summaryrefslogtreecommitdiffstats
path: root/searchlib
diff options
context:
space:
mode:
authorGeir Storli <geirst@verizonmedia.com>2019-03-13 13:20:14 +0000
committerGeir Storli <geirst@verizonmedia.com>2019-03-14 07:37:48 +0000
commit71014b3999a3ab01aface05395491606bc6f4fe1 (patch)
tree74549203400671aeda42a79f7bf2db58f3aabe8c /searchlib
parent8eebd56f9a56ca1a2504d75ae3274199790ef125 (diff)
Rewrite datastore unit tests to gtest.
Diffstat (limited to 'searchlib')
-rw-r--r--searchlib/src/tests/datastore/datastore/CMakeLists.txt1
-rw-r--r--searchlib/src/tests/datastore/datastore/datastore_test.cpp265
2 files changed, 131 insertions, 135 deletions
diff --git a/searchlib/src/tests/datastore/datastore/CMakeLists.txt b/searchlib/src/tests/datastore/datastore/CMakeLists.txt
index b8922fff563..1bc6210a13f 100644
--- a/searchlib/src/tests/datastore/datastore/CMakeLists.txt
+++ b/searchlib/src/tests/datastore/datastore/CMakeLists.txt
@@ -4,5 +4,6 @@ vespa_add_executable(searchlib_datastore_test_app TEST
datastore_test.cpp
DEPENDS
searchlib
+ gtest
)
vespa_add_test(NAME searchlib_datastore_test_app COMMAND searchlib_datastore_test_app)
diff --git a/searchlib/src/tests/datastore/datastore/datastore_test.cpp b/searchlib/src/tests/datastore/datastore/datastore_test.cpp
index b312d2bfe55..1141838d07b 100644
--- a/searchlib/src/tests/datastore/datastore/datastore_test.cpp
+++ b/searchlib/src/tests/datastore/datastore/datastore_test.cpp
@@ -1,15 +1,14 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/vespalib/testkit/testapp.h>
#include <vespa/searchlib/datastore/datastore.h>
#include <vespa/searchlib/datastore/datastore.hpp>
+#include <vespa/vespalib/gtest/gtest.h>
#include <vespa/vespalib/test/insertion_operators.h>
#include <vespa/log/log.h>
LOG_SETUP("datastore_test");
-namespace search {
-namespace datastore {
+namespace search::datastore {
using vespalib::alloc::MemoryAllocator;
@@ -136,95 +135,94 @@ public:
using MyRef = MyStore::RefType;
-bool
+void
assertMemStats(const DataStoreBase::MemStats &exp,
const DataStoreBase::MemStats &act)
{
- if (!EXPECT_EQUAL(exp._allocElems, act._allocElems)) return false;
- if (!EXPECT_EQUAL(exp._usedElems, act._usedElems)) return false;
- if (!EXPECT_EQUAL(exp._deadElems, act._deadElems)) return false;
- if (!EXPECT_EQUAL(exp._holdElems, act._holdElems)) return false;
- if (!EXPECT_EQUAL(exp._freeBuffers, act._freeBuffers)) return false;
- if (!EXPECT_EQUAL(exp._activeBuffers, act._activeBuffers)) return false;
- if (!EXPECT_EQUAL(exp._holdBuffers, act._holdBuffers)) return false;
- return true;
+ EXPECT_EQ(exp._allocElems, act._allocElems);
+ EXPECT_EQ(exp._usedElems, act._usedElems);
+ EXPECT_EQ(exp._deadElems, act._deadElems);
+ EXPECT_EQ(exp._holdElems, act._holdElems);
+ EXPECT_EQ(exp._freeBuffers, act._freeBuffers);
+ EXPECT_EQ(exp._activeBuffers, act._activeBuffers);
+ EXPECT_EQ(exp._holdBuffers, act._holdBuffers);
}
-TEST("require that entry ref is working")
+TEST(DataStoreTest, require_that_entry_ref_is_working)
{
using MyRefType = EntryRefT<22>;
- EXPECT_EQUAL(4194304u, MyRefType::offsetSize());
- EXPECT_EQUAL(1024u, MyRefType::numBuffers());
+ EXPECT_EQ(4194304u, MyRefType::offsetSize());
+ EXPECT_EQ(1024u, MyRefType::numBuffers());
{
MyRefType r(0, 0);
- EXPECT_EQUAL(0u, r.offset());
- EXPECT_EQUAL(0u, r.bufferId());
+ EXPECT_EQ(0u, r.offset());
+ EXPECT_EQ(0u, r.bufferId());
}
{
MyRefType r(237, 13);
- EXPECT_EQUAL(237u, r.offset());
- EXPECT_EQUAL(13u, r.bufferId());
+ EXPECT_EQ(237u, r.offset());
+ EXPECT_EQ(13u, r.bufferId());
}
{
MyRefType r(4194303, 1023);
- EXPECT_EQUAL(4194303u, r.offset());
- EXPECT_EQUAL(1023u, r.bufferId());
+ EXPECT_EQ(4194303u, r.offset());
+ EXPECT_EQ(1023u, r.bufferId());
}
{
MyRefType r1(6498, 76);
MyRefType r2(r1);
- EXPECT_EQUAL(r1.offset(), r2.offset());
- EXPECT_EQUAL(r1.bufferId(), r2.bufferId());
+ EXPECT_EQ(r1.offset(), r2.offset());
+ EXPECT_EQ(r1.bufferId(), r2.bufferId());
}
}
-TEST("require that aligned entry ref is working")
+TEST(DataStoreTest, require_that_aligned_entry_ref_is_working)
{
using MyRefType = AlignedEntryRefT<22, 2>; // 4 byte alignement
- EXPECT_EQUAL(4 * 4194304u, MyRefType::offsetSize());
- EXPECT_EQUAL(1024u, MyRefType::numBuffers());
- EXPECT_EQUAL(0u, MyRefType::align(0));
- EXPECT_EQUAL(4u, MyRefType::align(1));
- EXPECT_EQUAL(4u, MyRefType::align(2));
- EXPECT_EQUAL(4u, MyRefType::align(3));
- EXPECT_EQUAL(4u, MyRefType::align(4));
- EXPECT_EQUAL(8u, MyRefType::align(5));
+ EXPECT_EQ(4 * 4194304u, MyRefType::offsetSize());
+ EXPECT_EQ(1024u, MyRefType::numBuffers());
+ EXPECT_EQ(0u, MyRefType::align(0));
+ EXPECT_EQ(4u, MyRefType::align(1));
+ EXPECT_EQ(4u, MyRefType::align(2));
+ EXPECT_EQ(4u, MyRefType::align(3));
+ EXPECT_EQ(4u, MyRefType::align(4));
+ EXPECT_EQ(8u, MyRefType::align(5));
{
MyRefType r(0, 0);
- EXPECT_EQUAL(0u, r.offset());
- EXPECT_EQUAL(0u, r.bufferId());
+ EXPECT_EQ(0u, r.offset());
+ EXPECT_EQ(0u, r.bufferId());
}
{
MyRefType r(237, 13);
- EXPECT_EQUAL(MyRefType::align(237), r.offset());
- EXPECT_EQUAL(13u, r.bufferId());
+ EXPECT_EQ(MyRefType::align(237), r.offset());
+ EXPECT_EQ(13u, r.bufferId());
}
{
MyRefType r(MyRefType::offsetSize() - 4, 1023);
- EXPECT_EQUAL(MyRefType::align(MyRefType::offsetSize() - 4), r.offset());
- EXPECT_EQUAL(1023u, r.bufferId());
+ EXPECT_EQ(MyRefType::align(MyRefType::offsetSize() - 4), r.offset());
+ EXPECT_EQ(1023u, r.bufferId());
}
}
-TEST("require that entries can be added and retrieved")
+TEST(DataStoreTest, require_that_entries_can_be_added_and_retrieved)
{
using IntStore = DataStore<int>;
IntStore ds;
EntryRef r1 = ds.addEntry(10);
EntryRef r2 = ds.addEntry(20);
EntryRef r3 = ds.addEntry(30);
- EXPECT_EQUAL(1u, IntStore::RefType(r1).offset());
- EXPECT_EQUAL(2u, IntStore::RefType(r2).offset());
- EXPECT_EQUAL(3u, IntStore::RefType(r3).offset());
- EXPECT_EQUAL(0u, IntStore::RefType(r1).bufferId());
- EXPECT_EQUAL(0u, IntStore::RefType(r2).bufferId());
- EXPECT_EQUAL(0u, IntStore::RefType(r3).bufferId());
- EXPECT_EQUAL(10, ds.getEntry(r1));
- EXPECT_EQUAL(20, ds.getEntry(r2));
- EXPECT_EQUAL(30, ds.getEntry(r3));
+ EXPECT_EQ(1u, IntStore::RefType(r1).offset());
+ EXPECT_EQ(2u, IntStore::RefType(r2).offset());
+ EXPECT_EQ(3u, IntStore::RefType(r3).offset());
+ EXPECT_EQ(0u, IntStore::RefType(r1).bufferId());
+ EXPECT_EQ(0u, IntStore::RefType(r2).bufferId());
+ EXPECT_EQ(0u, IntStore::RefType(r3).bufferId());
+ EXPECT_EQ(10, ds.getEntry(r1));
+ EXPECT_EQ(20, ds.getEntry(r2));
+ EXPECT_EQ(30, ds.getEntry(r3));
}
-TEST("require that add entry triggers change of buffer")
+TEST(DataStoreTest, require_that_add_entry_triggers_change_of_buffer)
{
using Store = DataStore<uint64_t, EntryRefT<10, 10> >;
Store s;
@@ -233,12 +231,11 @@ TEST("require that add entry triggers change of buffer")
uint64_t lastNum = 0;
for (;;++num) {
EntryRef r = s.addEntry(num);
- EXPECT_EQUAL(num, s.getEntry(r));
+ EXPECT_EQ(num, s.getEntry(r));
uint32_t bufferId = Store::RefType(r).bufferId();
if (bufferId > lastId) {
LOG(info, "Changed to bufferId %u after %" PRIu64 " nums", bufferId, num);
- EXPECT_EQUAL(Store::RefType::offsetSize() - (lastId == 0),
- num - lastNum);
+ EXPECT_EQ(Store::RefType::offsetSize() - (lastId == 0), num - lastNum);
lastId = bufferId;
lastNum = num;
}
@@ -246,32 +243,32 @@ TEST("require that add entry triggers change of buffer")
break;
}
}
- EXPECT_EQUAL(Store::RefType::offsetSize() * 2 - 1, num);
+ EXPECT_EQ(Store::RefType::offsetSize() * 2 - 1, num);
LOG(info, "Added %" PRIu64 " nums in 2 buffers", num);
}
-TEST("require that we can hold and trim buffers")
+TEST(DataStoreTest, require_that_we_can_hold_and_trim_buffers)
{
MyStore s;
- EXPECT_EQUAL(0u, MyRef(s.addEntry(1)).bufferId());
+ EXPECT_EQ(0u, MyRef(s.addEntry(1)).bufferId());
s.switchActiveBuffer();
- EXPECT_EQUAL(1u, s.activeBufferId());
+ EXPECT_EQ(1u, s.activeBufferId());
s.holdBuffer(0); // hold last buffer
s.transferHoldLists(10);
- EXPECT_EQUAL(1u, MyRef(s.addEntry(2)).bufferId());
+ EXPECT_EQ(1u, MyRef(s.addEntry(2)).bufferId());
s.switchActiveBuffer();
- EXPECT_EQUAL(2u, s.activeBufferId());
+ EXPECT_EQ(2u, s.activeBufferId());
s.holdBuffer(1); // hold last buffer
s.transferHoldLists(20);
- EXPECT_EQUAL(2u, MyRef(s.addEntry(3)).bufferId());
+ EXPECT_EQ(2u, MyRef(s.addEntry(3)).bufferId());
s.switchActiveBuffer();
- EXPECT_EQUAL(3u, s.activeBufferId());
+ EXPECT_EQ(3u, s.activeBufferId());
s.holdBuffer(2); // hold last buffer
s.transferHoldLists(30);
- EXPECT_EQUAL(3u, MyRef(s.addEntry(4)).bufferId());
+ EXPECT_EQ(3u, MyRef(s.addEntry(4)).bufferId());
s.holdBuffer(3); // hold current buffer
s.transferHoldLists(40);
@@ -286,8 +283,8 @@ TEST("require that we can hold and trim buffers")
EXPECT_TRUE(s.getBufferState(3).size() != 0);
s.switchActiveBuffer();
- EXPECT_EQUAL(0u, s.activeBufferId());
- EXPECT_EQUAL(0u, MyRef(s.addEntry(5)).bufferId());
+ EXPECT_EQ(0u, s.activeBufferId());
+ EXPECT_EQ(0u, MyRef(s.addEntry(5)).bufferId());
s.trimHoldLists(41);
EXPECT_TRUE(s.getBufferState(0).size() != 0);
EXPECT_TRUE(s.getBufferState(1).size() == 0);
@@ -295,7 +292,7 @@ TEST("require that we can hold and trim buffers")
EXPECT_TRUE(s.getBufferState(3).size() == 0);
}
-TEST("require that we can hold and trim elements")
+TEST(DataStoreTest, require_that_we_can_hold_and_trim_elements)
{
MyStore s;
MyRef r1 = s.addEntry(1);
@@ -307,17 +304,17 @@ TEST("require that we can hold and trim elements")
MyRef r3 = s.addEntry(3);
s.holdElem(r3, 1);
s.transferHoldLists(30);
- EXPECT_EQUAL(1, s.getEntry(r1));
- EXPECT_EQUAL(2, s.getEntry(r2));
- EXPECT_EQUAL(3, s.getEntry(r3));
+ EXPECT_EQ(1, s.getEntry(r1));
+ EXPECT_EQ(2, s.getEntry(r2));
+ EXPECT_EQ(3, s.getEntry(r3));
s.trimElemHoldList(11);
- EXPECT_EQUAL(0, s.getEntry(r1));
- EXPECT_EQUAL(2, s.getEntry(r2));
- EXPECT_EQUAL(3, s.getEntry(r3));
+ EXPECT_EQ(0, s.getEntry(r1));
+ EXPECT_EQ(2, s.getEntry(r2));
+ EXPECT_EQ(3, s.getEntry(r3));
s.trimElemHoldList(31);
- EXPECT_EQUAL(0, s.getEntry(r1));
- EXPECT_EQUAL(0, s.getEntry(r2));
- EXPECT_EQUAL(0, s.getEntry(r3));
+ EXPECT_EQ(0, s.getEntry(r1));
+ EXPECT_EQ(0, s.getEntry(r2));
+ EXPECT_EQ(0, s.getEntry(r3));
}
MyRef
@@ -326,7 +323,7 @@ toRef(Handle<int> handle)
return MyRef(handle.ref);
}
-TEST("require that we can use free lists")
+TEST(DataStoreTest, require_that_we_can_use_free_lists)
{
MyStore s;
s.enableFreeLists();
@@ -339,25 +336,25 @@ TEST("require that we can use free lists")
s.transferHoldLists(20);
s.trimElemHoldList(11);
auto h3 = allocator.alloc(3); // reuse h1.ref
- EXPECT_EQUAL(toRef(h1).offset(), toRef(h3).offset());
- EXPECT_EQUAL(toRef(h1).bufferId(), toRef(h3).bufferId());
+ EXPECT_EQ(toRef(h1).offset(), toRef(h3).offset());
+ EXPECT_EQ(toRef(h1).bufferId(), toRef(h3).bufferId());
auto h4 = allocator.alloc(4);
- EXPECT_EQUAL(toRef(h2).offset() + 1, toRef(h4).offset());
+ EXPECT_EQ(toRef(h2).offset() + 1, toRef(h4).offset());
s.trimElemHoldList(21);
auto h5 = allocator.alloc(5); // reuse h2.ref
- EXPECT_EQUAL(toRef(h2).offset(), toRef(h5).offset());
- EXPECT_EQUAL(toRef(h2).bufferId(), toRef(h5).bufferId());
+ EXPECT_EQ(toRef(h2).offset(), toRef(h5).offset());
+ EXPECT_EQ(toRef(h2).bufferId(), toRef(h5).bufferId());
auto h6 = allocator.alloc(6);
- EXPECT_EQUAL(toRef(h4).offset() + 1, toRef(h6).offset());
- EXPECT_EQUAL(3, s.getEntry(h1.ref));
- EXPECT_EQUAL(5, s.getEntry(h2.ref));
- EXPECT_EQUAL(3, s.getEntry(h3.ref));
- EXPECT_EQUAL(4, s.getEntry(h4.ref));
- EXPECT_EQUAL(5, s.getEntry(h5.ref));
- EXPECT_EQUAL(6, s.getEntry(h6.ref));
+ EXPECT_EQ(toRef(h4).offset() + 1, toRef(h6).offset());
+ EXPECT_EQ(3, s.getEntry(h1.ref));
+ EXPECT_EQ(5, s.getEntry(h2.ref));
+ EXPECT_EQ(3, s.getEntry(h3.ref));
+ EXPECT_EQ(4, s.getEntry(h4.ref));
+ EXPECT_EQ(5, s.getEntry(h5.ref));
+ EXPECT_EQ(6, s.getEntry(h6.ref));
}
-TEST("require that memory stats are calculated")
+TEST(DataStoreTest, require_that_memory_stats_are_calculated)
{
MyStore s;
DataStoreBase::MemStats m;
@@ -368,17 +365,17 @@ TEST("require that memory stats are calculated")
m._activeBuffers = 1;
m._freeBuffers = MyRef::numBuffers() - 1;
m._holdBuffers = 0;
- EXPECT_TRUE(assertMemStats(m, s.getMemStats()));
+ assertMemStats(m, s.getMemStats());
// add entry
MyRef r = s.addEntry(10);
m._usedElems++;
- EXPECT_TRUE(assertMemStats(m, s.getMemStats()));
+ assertMemStats(m, s.getMemStats());
// inc dead
s.incDead(r, 1);
m._deadElems++;
- EXPECT_TRUE(assertMemStats(m, s.getMemStats()));
+ assertMemStats(m, s.getMemStats());
// hold buffer
s.addEntry(20);
@@ -389,7 +386,7 @@ TEST("require that memory stats are calculated")
m._holdElems += 2; // used - dead
m._activeBuffers--;
m._holdBuffers++;
- EXPECT_TRUE(assertMemStats(m, s.getMemStats()));
+ assertMemStats(m, s.getMemStats());
// new active buffer
s.switchActiveBuffer();
@@ -407,10 +404,10 @@ TEST("require that memory stats are calculated")
m._holdElems = 0;
m._freeBuffers = MyRef::numBuffers() - 1;
m._holdBuffers = 0;
- EXPECT_TRUE(assertMemStats(m, s.getMemStats()));
+ assertMemStats(m, s.getMemStats());
}
-TEST("require that memory usage is calculated")
+TEST(DataStoreTest, require_that_memory_usage_is_calculated)
{
MyStore s;
MyRef r = s.addEntry(10);
@@ -421,14 +418,14 @@ TEST("require that memory usage is calculated")
s.holdBuffer(r.bufferId());
s.transferHoldLists(100);
MemoryUsage m = s.getMemoryUsage();
- EXPECT_EQUAL(MyRef::offsetSize() * sizeof(int), m.allocatedBytes());
- EXPECT_EQUAL(5 * sizeof(int), m.usedBytes());
- EXPECT_EQUAL(2 * sizeof(int), m.deadBytes());
- EXPECT_EQUAL(3 * sizeof(int), m.allocatedBytesOnHold());
+ EXPECT_EQ(MyRef::offsetSize() * sizeof(int), m.allocatedBytes());
+ EXPECT_EQ(5 * sizeof(int), m.usedBytes());
+ EXPECT_EQ(2 * sizeof(int), m.deadBytes());
+ EXPECT_EQ(3 * sizeof(int), m.allocatedBytesOnHold());
s.trimHoldLists(101);
}
-TEST("require that we can disable elemement hold list")
+TEST(DataStoreTest, require_that_we_can_disable_elemement_hold_list)
{
MyStore s;
MyRef r1 = s.addEntry(10);
@@ -436,23 +433,23 @@ TEST("require that we can disable elemement hold list")
MyRef r3 = s.addEntry(30);
(void) r3;
MemoryUsage m = s.getMemoryUsage();
- EXPECT_EQUAL(MyRef::offsetSize() * sizeof(int), m.allocatedBytes());
- EXPECT_EQUAL(4 * sizeof(int), m.usedBytes());
- EXPECT_EQUAL(1 * sizeof(int), m.deadBytes());
- EXPECT_EQUAL(0 * sizeof(int), m.allocatedBytesOnHold());
+ EXPECT_EQ(MyRef::offsetSize() * sizeof(int), m.allocatedBytes());
+ EXPECT_EQ(4 * sizeof(int), m.usedBytes());
+ EXPECT_EQ(1 * sizeof(int), m.deadBytes());
+ EXPECT_EQ(0 * sizeof(int), m.allocatedBytesOnHold());
s.holdElem(r1, 1);
m = s.getMemoryUsage();
- EXPECT_EQUAL(MyRef::offsetSize() * sizeof(int), m.allocatedBytes());
- EXPECT_EQUAL(4 * sizeof(int), m.usedBytes());
- EXPECT_EQUAL(1 * sizeof(int), m.deadBytes());
- EXPECT_EQUAL(1 * sizeof(int), m.allocatedBytesOnHold());
+ EXPECT_EQ(MyRef::offsetSize() * sizeof(int), m.allocatedBytes());
+ EXPECT_EQ(4 * sizeof(int), m.usedBytes());
+ EXPECT_EQ(1 * sizeof(int), m.deadBytes());
+ EXPECT_EQ(1 * sizeof(int), m.allocatedBytesOnHold());
s.disableElemHoldList();
s.holdElem(r2, 1);
m = s.getMemoryUsage();
- EXPECT_EQUAL(MyRef::offsetSize() * sizeof(int), m.allocatedBytes());
- EXPECT_EQUAL(4 * sizeof(int), m.usedBytes());
- EXPECT_EQUAL(2 * sizeof(int), m.deadBytes());
- EXPECT_EQUAL(1 * sizeof(int), m.allocatedBytesOnHold());
+ EXPECT_EQ(MyRef::offsetSize() * sizeof(int), m.allocatedBytes());
+ EXPECT_EQ(4 * sizeof(int), m.usedBytes());
+ EXPECT_EQ(2 * sizeof(int), m.deadBytes());
+ EXPECT_EQ(1 * sizeof(int), m.allocatedBytesOnHold());
s.transferHoldLists(100);
s.trimHoldLists(101);
}
@@ -466,39 +463,39 @@ void assertGrowStats(GrowthStats expSizes,
size_t expInitMemUsage,
size_t minClusters, size_t numClustersForNewBuffer, size_t maxClusters = 128)
{
- EXPECT_EQUAL(expSizes, IntGrowStore(1, minClusters, maxClusters, numClustersForNewBuffer).getGrowthStats(expSizes.size()));
- EXPECT_EQUAL(expFirstBufSizes, IntGrowStore(1, minClusters, maxClusters, numClustersForNewBuffer).getFirstBufGrowStats());
- EXPECT_EQUAL(expInitMemUsage, IntGrowStore(1, minClusters, maxClusters, numClustersForNewBuffer).getMemoryUsage().allocatedBytes());
+ EXPECT_EQ(expSizes, IntGrowStore(1, minClusters, maxClusters, numClustersForNewBuffer).getGrowthStats(expSizes.size()));
+ EXPECT_EQ(expFirstBufSizes, IntGrowStore(1, minClusters, maxClusters, numClustersForNewBuffer).getFirstBufGrowStats());
+ EXPECT_EQ(expInitMemUsage, IntGrowStore(1, minClusters, maxClusters, numClustersForNewBuffer).getMemoryUsage().allocatedBytes());
}
}
-TEST("require that buffer growth works")
+TEST(DataStoreTest, require_that_buffer_growth_works)
{
// Always switch to new buffer, min size 4
- TEST_DO(assertGrowStats({ 4, 4, 4, 4, 8, 16, 16, 32, 64, 64 },
- { 4 }, 20, 4, 0));
+ assertGrowStats({ 4, 4, 4, 4, 8, 16, 16, 32, 64, 64 },
+ { 4 }, 20, 4, 0);
// Resize if buffer size is less than 4, min size 0
- TEST_DO(assertGrowStats({ 4, 4, 4, 4, 8, 16, 16, 32, 64, 64 },
- { 0, 1, 2, 4 }, 4, 0, 4));
+ assertGrowStats({ 4, 4, 4, 4, 8, 16, 16, 32, 64, 64 },
+ { 0, 1, 2, 4 }, 4, 0, 4);
// Always switch to new buffer, min size 16
- TEST_DO(assertGrowStats({ 16, 16, 16, 32, 32, 64, 128, 128, 128 },
- { 16 }, 68, 16, 0));
+ assertGrowStats({ 16, 16, 16, 32, 32, 64, 128, 128, 128 },
+ { 16 }, 68, 16, 0);
// Resize if buffer size is less than 16, min size 0
- TEST_DO(assertGrowStats({ 16, 16, 16, 32, 32, 64, 128, 128, 128 },
- { 0, 1, 2, 4, 8, 16 }, 4, 0, 16));
+ assertGrowStats({ 16, 16, 16, 32, 32, 64, 128, 128, 128 },
+ { 0, 1, 2, 4, 8, 16 }, 4, 0, 16);
// Resize if buffer size is less than 16, min size 4
- TEST_DO(assertGrowStats({ 16, 16, 16, 32, 32, 64, 128, 128, 128 },
- { 4, 8, 16 }, 20, 4, 16));
+ assertGrowStats({ 16, 16, 16, 32, 32, 64, 128, 128, 128 },
+ { 4, 8, 16 }, 20, 4, 16);
// Always switch to new buffer, min size 0
- TEST_DO(assertGrowStats({ 1, 1, 1, 1, 1, 2, 2, 4, 8, 8, 16, 32 },
- { 0, 1 }, 4, 0, 0));
+ assertGrowStats({ 1, 1, 1, 1, 1, 2, 2, 4, 8, 8, 16, 32 },
+ { 0, 1 }, 4, 0, 0);
// Buffers with sizes larger than the huge page size of the mmap allocator.
- ASSERT_EQUAL(524288u, HUGE_PAGE_CLUSTER_SIZE);
- TEST_DO(assertGrowStats({ 262144, 262144, 262144, 524288, 524288, 524288 * 2, 524288 * 3, 524288 * 4, 524288 * 5, 524288 * 5 },
- { 0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144 },
- 4, 0, HUGE_PAGE_CLUSTER_SIZE / 2, HUGE_PAGE_CLUSTER_SIZE * 5));
+ ASSERT_EQ(524288u, HUGE_PAGE_CLUSTER_SIZE);
+ assertGrowStats({ 262144, 262144, 262144, 524288, 524288, 524288 * 2, 524288 * 3, 524288 * 4, 524288 * 5, 524288 * 5 },
+ { 0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144 },
+ 4, 0, HUGE_PAGE_CLUSTER_SIZE / 2, HUGE_PAGE_CLUSTER_SIZE * 5);
}
using RefType15 = EntryRefT<15>; // offsetSize=32768
@@ -512,12 +509,12 @@ void assertGrowStats(GrowthStats expSizes, uint32_t clusterSize)
uint32_t maxClusters = RefType15::offsetSize();
uint32_t numClustersForNewBuffer = 2048;
GrowStore<DataType, RefType15> store(clusterSize, minClusters, maxClusters, numClustersForNewBuffer);
- EXPECT_EQUAL(expSizes, store.getGrowthStats(expSizes.size()));
+ EXPECT_EQ(expSizes, store.getGrowthStats(expSizes.size()));
}
}
-TEST("require that offset in EntryRefT is within bounds when allocating memory buffers where wanted number of bytes is not a power of 2 and less than huge page size")
+TEST(DataStoreTest, require_that_offset_in_EntryRefT_is_within_bounds_when_allocating_memory_buffers_where_wanted_number_of_bytes_is_not_a_power_of_2_and_less_than_huge_page_size)
{
/*
* When allocating new memory buffers for the data store the following happens (ref. calcAllocation() in bufferstate.cpp):
@@ -539,7 +536,5 @@ TEST("require that offset in EntryRefT is within bounds when allocating memory b
}
}
-}
-
-TEST_MAIN() { TEST_RUN_ALL(); }
+GTEST_MAIN_RUN_ALL_TESTS()