summaryrefslogtreecommitdiffstats
path: root/vespalib/src/tests/datastore
diff options
context:
space:
mode:
Diffstat (limited to 'vespalib/src/tests/datastore')
-rw-r--r--vespalib/src/tests/datastore/array_store/CMakeLists.txt8
-rw-r--r--vespalib/src/tests/datastore/array_store/array_store_test.cpp360
-rw-r--r--vespalib/src/tests/datastore/array_store_config/CMakeLists.txt8
-rw-r--r--vespalib/src/tests/datastore/array_store_config/array_store_config_test.cpp84
-rw-r--r--vespalib/src/tests/datastore/buffer_type/CMakeLists.txt8
-rw-r--r--vespalib/src/tests/datastore/buffer_type/buffer_type_test.cpp116
-rw-r--r--vespalib/src/tests/datastore/datastore/CMakeLists.txt9
-rw-r--r--vespalib/src/tests/datastore/datastore/datastore_test.cpp584
-rw-r--r--vespalib/src/tests/datastore/unique_store/CMakeLists.txt8
-rw-r--r--vespalib/src/tests/datastore/unique_store/unique_store_test.cpp267
10 files changed, 1452 insertions, 0 deletions
diff --git a/vespalib/src/tests/datastore/array_store/CMakeLists.txt b/vespalib/src/tests/datastore/array_store/CMakeLists.txt
new file mode 100644
index 00000000000..a6f0ef31b1a
--- /dev/null
+++ b/vespalib/src/tests/datastore/array_store/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(vespalib_array_store_test_app TEST
+ SOURCES
+ array_store_test.cpp
+ DEPENDS
+ vespalib
+)
+vespa_add_test(NAME vespalib_array_store_test_app COMMAND vespalib_array_store_test_app)
diff --git a/vespalib/src/tests/datastore/array_store/array_store_test.cpp b/vespalib/src/tests/datastore/array_store/array_store_test.cpp
new file mode 100644
index 00000000000..1eb71d36fe7
--- /dev/null
+++ b/vespalib/src/tests/datastore/array_store/array_store_test.cpp
@@ -0,0 +1,360 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/vespalib/test/datastore/memstats.h>
+#include <vespa/vespalib/datastore/array_store.hpp>
+#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/vespalib/test/insertion_operators.h>
+#include <vespa/vespalib/util/traits.h>
+#include <vector>
+
+using namespace search::datastore;
+using vespalib::MemoryUsage;
+using vespalib::ArrayRef;
+using generation_t = vespalib::GenerationHandler::generation_t;
+using MemStats = search::datastore::test::MemStats;
+
+constexpr float ALLOC_GROW_FACTOR = 0.2;
+
+template <typename EntryT, typename RefT = EntryRefT<19> >
+struct Fixture
+{
+ using EntryRefType = RefT;
+ using ArrayStoreType = ArrayStore<EntryT, RefT>;
+ using LargeArray = typename ArrayStoreType::LargeArray;
+ using ConstArrayRef = typename ArrayStoreType::ConstArrayRef;
+ using EntryVector = std::vector<EntryT>;
+ using value_type = EntryT;
+ using ReferenceStore = std::map<EntryRef, EntryVector>;
+
+ ArrayStoreType store;
+ ReferenceStore refStore;
+ generation_t generation;
+ Fixture(uint32_t maxSmallArraySize)
+ : store(ArrayStoreConfig(maxSmallArraySize,
+ ArrayStoreConfig::AllocSpec(16, RefT::offsetSize(), 8 * 1024,
+ ALLOC_GROW_FACTOR))),
+ refStore(),
+ generation(1)
+ {}
+ Fixture(const ArrayStoreConfig &storeCfg)
+ : store(storeCfg),
+ refStore(),
+ generation(1)
+ {}
+ void assertAdd(const EntryVector &input) {
+ EntryRef ref = add(input);
+ assertGet(ref, input);
+ }
+ EntryRef add(const EntryVector &input) {
+ EntryRef result = store.add(ConstArrayRef(input));
+ ASSERT_EQUAL(0u, refStore.count(result));
+ refStore.insert(std::make_pair(result, input));
+ return result;
+ }
+ void assertGet(EntryRef ref, const EntryVector &exp) const {
+ ConstArrayRef act = store.get(ref);
+ EXPECT_EQUAL(exp, EntryVector(act.begin(), act.end()));
+ }
+ void remove(EntryRef ref) {
+ ASSERT_EQUAL(1u, refStore.count(ref));
+ store.remove(ref);
+ refStore.erase(ref);
+ }
+ void remove(const EntryVector &input) {
+ remove(getEntryRef(input));
+ }
+ uint32_t getBufferId(EntryRef ref) const {
+ return EntryRefType(ref).bufferId();
+ }
+ void assertBufferState(EntryRef ref, const MemStats expStats) const {
+ EXPECT_EQUAL(expStats._used, store.bufferState(ref).size());
+ EXPECT_EQUAL(expStats._hold, store.bufferState(ref).getHoldElems());
+ EXPECT_EQUAL(expStats._dead, store.bufferState(ref).getDeadElems());
+ }
+ void assertMemoryUsage(const MemStats expStats) const {
+ MemoryUsage act = store.getMemoryUsage();
+ EXPECT_EQUAL(expStats._used, act.usedBytes());
+ EXPECT_EQUAL(expStats._hold, act.allocatedBytesOnHold());
+ EXPECT_EQUAL(expStats._dead, act.deadBytes());
+ }
+ void assertStoreContent() const {
+ for (const auto &elem : refStore) {
+ TEST_DO(assertGet(elem.first, elem.second));
+ }
+ }
+ EntryRef getEntryRef(const EntryVector &input) {
+ for (auto itr = refStore.begin(); itr != refStore.end(); ++itr) {
+ if (itr->second == input) {
+ return itr->first;
+ }
+ }
+ return EntryRef();
+ }
+ void trimHoldLists() {
+ store.transferHoldLists(generation++);
+ store.trimHoldLists(generation);
+ }
+ void compactWorst(bool compactMemory, bool compactAddressSpace) {
+ ICompactionContext::UP ctx = store.compactWorst(compactMemory, compactAddressSpace);
+ std::vector<EntryRef> refs;
+ for (auto itr = refStore.begin(); itr != refStore.end(); ++itr) {
+ refs.push_back(itr->first);
+ }
+ std::vector<EntryRef> compactedRefs = refs;
+ ctx->compact(ArrayRef<EntryRef>(compactedRefs));
+ ReferenceStore compactedRefStore;
+ for (size_t i = 0; i < refs.size(); ++i) {
+ ASSERT_EQUAL(0u, compactedRefStore.count(compactedRefs[i]));
+ ASSERT_EQUAL(1u, refStore.count(refs[i]));
+ compactedRefStore.insert(std::make_pair(compactedRefs[i], refStore[refs[i]]));
+ }
+ refStore = compactedRefStore;
+ }
+ size_t entrySize() const { return sizeof(EntryT); }
+ size_t largeArraySize() const { return sizeof(LargeArray); }
+};
+
+using NumberFixture = Fixture<uint32_t>;
+using StringFixture = Fixture<std::string>;
+using SmallOffsetNumberFixture = Fixture<uint32_t, EntryRefT<10>>;
+using ByteFixture = Fixture<uint8_t>;
+
+
+
+TEST("require that we test with trivial and non-trivial types")
+{
+ EXPECT_TRUE(vespalib::can_skip_destruction<NumberFixture::value_type>::value);
+ EXPECT_FALSE(vespalib::can_skip_destruction<StringFixture::value_type>::value);
+}
+
+TEST_F("require that we can add and get small arrays of trivial type", NumberFixture(3))
+{
+ TEST_DO(f.assertAdd({}));
+ TEST_DO(f.assertAdd({1}));
+ TEST_DO(f.assertAdd({2,3}));
+ TEST_DO(f.assertAdd({3,4,5}));
+}
+
+TEST_F("require that we can add and get small arrays of non-trivial type", StringFixture(3))
+{
+ TEST_DO(f.assertAdd({}));
+ TEST_DO(f.assertAdd({"aa"}));
+ TEST_DO(f.assertAdd({"bbb", "ccc"}));
+ TEST_DO(f.assertAdd({"ddd", "eeee", "fffff"}));
+}
+
+TEST_F("require that we can add and get large arrays of simple type", NumberFixture(3))
+{
+ TEST_DO(f.assertAdd({1,2,3,4}));
+ TEST_DO(f.assertAdd({2,3,4,5,6}));
+}
+
+TEST_F("require that we can add and get large arrays of non-trivial type", StringFixture(3))
+{
+ TEST_DO(f.assertAdd({"aa", "bb", "cc", "dd"}));
+ TEST_DO(f.assertAdd({"ddd", "eee", "ffff", "gggg", "hhhh"}));
+}
+
+TEST_F("require that elements are put on hold when a small array is removed", NumberFixture(3))
+{
+ EntryRef ref = f.add({1,2,3});
+ TEST_DO(f.assertBufferState(ref, MemStats().used(3).hold(0)));
+ f.store.remove(ref);
+ TEST_DO(f.assertBufferState(ref, MemStats().used(3).hold(3)));
+}
+
+TEST_F("require that elements are put on hold when a large array is removed", NumberFixture(3))
+{
+ EntryRef ref = f.add({1,2,3,4});
+ // Note: The first buffer have the first element reserved -> we expect 2 elements used here.
+ TEST_DO(f.assertBufferState(ref, MemStats().used(2).hold(0).dead(1)));
+ f.store.remove(ref);
+ TEST_DO(f.assertBufferState(ref, MemStats().used(2).hold(1).dead(1)));
+}
+
+TEST_F("require that new underlying buffer is allocated when current is full", SmallOffsetNumberFixture(3))
+{
+ uint32_t firstBufferId = f.getBufferId(f.add({1,1}));
+ for (uint32_t i = 0; i < (F1::EntryRefType::offsetSize() - 1); ++i) {
+ uint32_t bufferId = f.getBufferId(f.add({i, i+1}));
+ EXPECT_EQUAL(firstBufferId, bufferId);
+ }
+ TEST_DO(f.assertStoreContent());
+
+ uint32_t secondBufferId = f.getBufferId(f.add({2,2}));
+ EXPECT_NOT_EQUAL(firstBufferId, secondBufferId);
+ for (uint32_t i = 0; i < 10u; ++i) {
+ uint32_t bufferId = f.getBufferId(f.add({i+2,i}));
+ EXPECT_EQUAL(secondBufferId, bufferId);
+ }
+ TEST_DO(f.assertStoreContent());
+}
+
+TEST_F("require that the buffer with most dead space is compacted", NumberFixture(2))
+{
+ EntryRef size1Ref = f.add({1});
+ EntryRef size2Ref = f.add({2,2});
+ EntryRef size3Ref = f.add({3,3,3});
+ f.remove(f.add({5,5}));
+ f.trimHoldLists();
+ TEST_DO(f.assertBufferState(size1Ref, MemStats().used(1).dead(0)));
+ TEST_DO(f.assertBufferState(size2Ref, MemStats().used(4).dead(2)));
+ TEST_DO(f.assertBufferState(size3Ref, MemStats().used(2).dead(1))); // Note: First element is reserved
+ uint32_t size1BufferId = f.getBufferId(size1Ref);
+ uint32_t size2BufferId = f.getBufferId(size2Ref);
+ uint32_t size3BufferId = f.getBufferId(size3Ref);
+
+ EXPECT_EQUAL(3u, f.refStore.size());
+ f.compactWorst(true, false);
+ EXPECT_EQUAL(3u, f.refStore.size());
+ f.assertStoreContent();
+
+ EXPECT_EQUAL(size1BufferId, f.getBufferId(f.getEntryRef({1})));
+ EXPECT_EQUAL(size3BufferId, f.getBufferId(f.getEntryRef({3,3,3})));
+ // Buffer for size 2 arrays has been compacted
+ EXPECT_NOT_EQUAL(size2BufferId, f.getBufferId(f.getEntryRef({2,2})));
+ f.assertGet(size2Ref, {2,2}); // Old ref should still point to data.
+ EXPECT_TRUE(f.store.bufferState(size2Ref).isOnHold());
+ f.trimHoldLists();
+ EXPECT_TRUE(f.store.bufferState(size2Ref).isFree());
+}
+
+namespace {
+
+void testCompaction(NumberFixture &f, bool compactMemory, bool compactAddressSpace)
+{
+ EntryRef size1Ref = f.add({1});
+ EntryRef size2Ref = f.add({2,2});
+ EntryRef size3Ref = f.add({3,3,3});
+ f.remove(f.add({5,5,5}));
+ f.remove(f.add({6}));
+ f.remove(f.add({7}));
+ f.trimHoldLists();
+ TEST_DO(f.assertBufferState(size1Ref, MemStats().used(3).dead(2)));
+ TEST_DO(f.assertBufferState(size2Ref, MemStats().used(2).dead(0)));
+ TEST_DO(f.assertBufferState(size3Ref, MemStats().used(6).dead(3)));
+ uint32_t size1BufferId = f.getBufferId(size1Ref);
+ uint32_t size2BufferId = f.getBufferId(size2Ref);
+ uint32_t size3BufferId = f.getBufferId(size3Ref);
+
+ EXPECT_EQUAL(3u, f.refStore.size());
+ f.compactWorst(compactMemory, compactAddressSpace);
+ EXPECT_EQUAL(3u, f.refStore.size());
+ f.assertStoreContent();
+
+ if (compactMemory) {
+ EXPECT_NOT_EQUAL(size3BufferId, f.getBufferId(f.getEntryRef({3,3,3})));
+ } else {
+ EXPECT_EQUAL(size3BufferId, f.getBufferId(f.getEntryRef({3,3,3})));
+ }
+ if (compactAddressSpace) {
+ EXPECT_NOT_EQUAL(size1BufferId, f.getBufferId(f.getEntryRef({1})));
+ } else {
+ EXPECT_EQUAL(size1BufferId, f.getBufferId(f.getEntryRef({1})));
+ }
+ EXPECT_EQUAL(size2BufferId, f.getBufferId(f.getEntryRef({2,2})));
+ f.assertGet(size1Ref, {1}); // Old ref should still point to data.
+ f.assertGet(size3Ref, {3,3,3}); // Old ref should still point to data.
+ if (compactMemory) {
+ EXPECT_TRUE(f.store.bufferState(size3Ref).isOnHold());
+ } else {
+ EXPECT_FALSE(f.store.bufferState(size3Ref).isOnHold());
+ }
+ if (compactAddressSpace) {
+ EXPECT_TRUE(f.store.bufferState(size1Ref).isOnHold());
+ } else {
+ EXPECT_FALSE(f.store.bufferState(size1Ref).isOnHold());
+ }
+ EXPECT_FALSE(f.store.bufferState(size2Ref).isOnHold());
+ f.trimHoldLists();
+ if (compactMemory) {
+ EXPECT_TRUE(f.store.bufferState(size3Ref).isFree());
+ } else {
+ EXPECT_FALSE(f.store.bufferState(size3Ref).isFree());
+ }
+ if (compactAddressSpace) {
+ EXPECT_TRUE(f.store.bufferState(size1Ref).isFree());
+ } else {
+ EXPECT_FALSE(f.store.bufferState(size1Ref).isFree());
+ }
+ EXPECT_FALSE(f.store.bufferState(size2Ref).isFree());
+}
+
+}
+
+TEST_F("require that compactWorst selects on only memory", NumberFixture(3)) {
+ testCompaction(f, true, false);
+}
+
+TEST_F("require that compactWorst selects on only address space", NumberFixture(3)) {
+ testCompaction(f, false, true);
+}
+
+TEST_F("require that compactWorst selects on both memory and address space", NumberFixture(3)) {
+ testCompaction(f, true, true);
+}
+
+TEST_F("require that compactWorst selects on neither memory nor address space", NumberFixture(3)) {
+ testCompaction(f, false, false);
+}
+
+TEST_F("require that used, onHold and dead memory usage is tracked for small arrays", NumberFixture(2))
+{
+ MemStats exp(f.store.getMemoryUsage());
+ f.add({2,2});
+ TEST_DO(f.assertMemoryUsage(exp.used(f.entrySize() * 2)));
+ f.remove({2,2});
+ TEST_DO(f.assertMemoryUsage(exp.hold(f.entrySize() * 2)));
+ f.trimHoldLists();
+ TEST_DO(f.assertMemoryUsage(exp.holdToDead(f.entrySize() * 2)));
+}
+
+TEST_F("require that used, onHold and dead memory usage is tracked for large arrays", NumberFixture(2))
+{
+ MemStats exp(f.store.getMemoryUsage());
+ f.add({3,3,3});
+ TEST_DO(f.assertMemoryUsage(exp.used(f.largeArraySize() + f.entrySize() * 3)));
+ f.remove({3,3,3});
+ TEST_DO(f.assertMemoryUsage(exp.hold(f.largeArraySize() + f.entrySize() * 3)));
+ f.trimHoldLists();
+ TEST_DO(f.assertMemoryUsage(exp.decHold(f.largeArraySize() + f.entrySize() * 3).
+ dead(f.largeArraySize())));
+}
+
+TEST_F("require that address space usage is ratio between used arrays and number of possible arrays", NumberFixture(3))
+{
+ f.add({2,2});
+ f.add({3,3,3});
+ // 1 array is reserved (buffer 0, offset 0).
+ EXPECT_EQUAL(3u, f.store.addressSpaceUsage().used());
+ EXPECT_EQUAL(1u, f.store.addressSpaceUsage().dead());
+ size_t fourgig = (1ull << 32);
+ /*
+ * Expected limit is sum of allocated arrays for active buffers and
+ * potentially allocated arrays for free buffers. If all buffers were
+ * free then the limit would be 4 Gi.
+ * Then we subtract arrays for 4 buffers that are not free (arraySize=1,2,3 + largeArray),
+ * and add their actual number of allocated arrays (16 arrays per buffer).
+ * Note: arraySize=3 has 21 arrays as allocated buffer is rounded up to power of 2:
+ * 16 * 3 * sizeof(int) = 192 -> 256.
+ * allocated elements = 256 / sizeof(int) = 64.
+ * limit = 64 / 3 = 21.
+ */
+ size_t expLimit = fourgig - 4 * F1::EntryRefType::offsetSize() + 3 * 16 + 21;
+ EXPECT_EQUAL(static_cast<double>(2)/ expLimit, f.store.addressSpaceUsage().usage());
+ EXPECT_EQUAL(expLimit, f.store.addressSpaceUsage().limit());
+}
+
+TEST_F("require that offset in EntryRefT is within bounds when allocating memory buffers where wanted number of bytes is not a power of 2 and less than huge page size",
+ ByteFixture(ByteFixture::ArrayStoreType::optimizedConfigForHugePage(1023, vespalib::alloc::MemoryAllocator::HUGEPAGE_SIZE,
+ 4 * 1024, 8 * 1024, ALLOC_GROW_FACTOR)))
+{
+ // The array store config used in this test is equivalent to the one multi-value attribute uses when initializing multi-value mapping.
+ // See similar test in datastore_test.cpp for more details on what happens during memory allocation.
+ for (size_t i = 0; i < 1000000; ++i) {
+ f.add({1, 2, 3});
+ }
+ f.assertStoreContent();
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/vespalib/src/tests/datastore/array_store_config/CMakeLists.txt b/vespalib/src/tests/datastore/array_store_config/CMakeLists.txt
new file mode 100644
index 00000000000..a5638462748
--- /dev/null
+++ b/vespalib/src/tests/datastore/array_store_config/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(vespalib_array_store_config_test_app TEST
+ SOURCES
+ array_store_config_test.cpp
+ DEPENDS
+ vespalib
+)
+vespa_add_test(NAME vespalib_array_store_config_test_app COMMAND vespalib_array_store_config_test_app)
diff --git a/vespalib/src/tests/datastore/array_store_config/array_store_config_test.cpp b/vespalib/src/tests/datastore/array_store_config/array_store_config_test.cpp
new file mode 100644
index 00000000000..4779cf1aa70
--- /dev/null
+++ b/vespalib/src/tests/datastore/array_store_config/array_store_config_test.cpp
@@ -0,0 +1,84 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/vespalib/datastore/entryref.h>
+#include <vespa/vespalib/datastore/array_store_config.h>
+
+using namespace search::datastore;
+using AllocSpec = ArrayStoreConfig::AllocSpec;
+
+constexpr float ALLOC_GROW_FACTOR = 0.2;
+
+struct Fixture
+{
+ using EntryRefType = EntryRefT<18>;
+ ArrayStoreConfig cfg;
+
+ Fixture(uint32_t maxSmallArraySize,
+ const AllocSpec &defaultSpec)
+ : cfg(maxSmallArraySize, defaultSpec) {}
+
+ Fixture(uint32_t maxSmallArraySize,
+ size_t hugePageSize,
+ size_t smallPageSize,
+ size_t minNumArraysForNewBuffer)
+ : cfg(ArrayStoreConfig::optimizeForHugePage(maxSmallArraySize, hugePageSize, smallPageSize,
+ sizeof(int), EntryRefType::offsetSize(),
+ minNumArraysForNewBuffer,
+ ALLOC_GROW_FACTOR)) { }
+ void assertSpec(size_t arraySize, uint32_t numArraysForNewBuffer) {
+ assertSpec(arraySize, AllocSpec(0, EntryRefType::offsetSize(),
+ numArraysForNewBuffer, ALLOC_GROW_FACTOR));
+ }
+ void assertSpec(size_t arraySize, const AllocSpec &expSpec) {
+ const ArrayStoreConfig::AllocSpec &actSpec = cfg.specForSize(arraySize);
+ EXPECT_EQUAL(expSpec.minArraysInBuffer, actSpec.minArraysInBuffer);
+ EXPECT_EQUAL(expSpec.maxArraysInBuffer, actSpec.maxArraysInBuffer);
+ EXPECT_EQUAL(expSpec.numArraysForNewBuffer, actSpec.numArraysForNewBuffer);
+ EXPECT_EQUAL(expSpec.allocGrowFactor, actSpec.allocGrowFactor);
+ }
+};
+
+AllocSpec
+makeSpec(size_t minArraysInBuffer,
+ size_t maxArraysInBuffer,
+ size_t numArraysForNewBuffer)
+{
+ return AllocSpec(minArraysInBuffer, maxArraysInBuffer, numArraysForNewBuffer, ALLOC_GROW_FACTOR);
+}
+
+constexpr size_t KB = 1024;
+constexpr size_t MB = KB * KB;
+
+TEST_F("require that default allocation spec is given for all array sizes", Fixture(3, makeSpec(4, 32, 8)))
+{
+ EXPECT_EQUAL(3u, f.cfg.maxSmallArraySize());
+ TEST_DO(f.assertSpec(0, makeSpec(4, 32, 8)));
+ TEST_DO(f.assertSpec(1, makeSpec(4, 32, 8)));
+ TEST_DO(f.assertSpec(2, makeSpec(4, 32, 8)));
+ TEST_DO(f.assertSpec(3, makeSpec(4, 32, 8)));
+}
+
+TEST_F("require that we can generate config optimized for a given huge page", Fixture(1024,
+ 2 * MB,
+ 4 * KB,
+ 8 * KB))
+{
+ EXPECT_EQUAL(1024u, f.cfg.maxSmallArraySize());
+ TEST_DO(f.assertSpec(0, 8 * KB)); // large arrays
+ TEST_DO(f.assertSpec(1, 256 * KB));
+ TEST_DO(f.assertSpec(2, 256 * KB));
+ TEST_DO(f.assertSpec(3, 168 * KB));
+ TEST_DO(f.assertSpec(4, 128 * KB));
+ TEST_DO(f.assertSpec(5, 100 * KB));
+ TEST_DO(f.assertSpec(6, 84 * KB));
+
+ TEST_DO(f.assertSpec(32, 16 * KB));
+ TEST_DO(f.assertSpec(33, 12 * KB));
+ TEST_DO(f.assertSpec(42, 12 * KB));
+ TEST_DO(f.assertSpec(43, 8 * KB));
+ TEST_DO(f.assertSpec(1022, 8 * KB));
+ TEST_DO(f.assertSpec(1023, 8 * KB));
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/vespalib/src/tests/datastore/buffer_type/CMakeLists.txt b/vespalib/src/tests/datastore/buffer_type/CMakeLists.txt
new file mode 100644
index 00000000000..d3618c998e3
--- /dev/null
+++ b/vespalib/src/tests/datastore/buffer_type/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(vespalib_buffer_type_test_app TEST
+ SOURCES
+ buffer_type_test.cpp
+ DEPENDS
+ vespalib
+)
+vespa_add_test(NAME vespalib_buffer_type_test_app COMMAND vespalib_buffer_type_test_app)
diff --git a/vespalib/src/tests/datastore/buffer_type/buffer_type_test.cpp b/vespalib/src/tests/datastore/buffer_type/buffer_type_test.cpp
new file mode 100644
index 00000000000..6a51b9192ce
--- /dev/null
+++ b/vespalib/src/tests/datastore/buffer_type/buffer_type_test.cpp
@@ -0,0 +1,116 @@
+// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/vespalib/datastore/buffer_type.h>
+#include <vespa/vespalib/testkit/testapp.h>
+
+using namespace search::datastore;
+
+using IntBufferType = BufferType<int>;
+constexpr uint32_t ARRAYS_SIZE(4);
+constexpr uint32_t MAX_ARRAYS(128);
+constexpr uint32_t NUM_ARRAYS_FOR_NEW_BUFFER(0);
+
+struct Setup {
+ uint32_t _minArrays;
+ size_t _usedElems;
+ size_t _neededElems;
+ uint32_t _bufferId;
+ float _allocGrowFactor;
+ bool _resizing;
+ Setup()
+ : _minArrays(0),
+ _usedElems(0),
+ _neededElems(0),
+ _bufferId(1),
+ _allocGrowFactor(0.5),
+ _resizing(false)
+ {}
+ Setup &minArrays(uint32_t value) { _minArrays = value; return *this; }
+ Setup &used(size_t value) { _usedElems = value; return *this; }
+ Setup &needed(size_t value) { _neededElems = value; return *this; }
+ Setup &bufferId(uint32_t value) { _bufferId = value; return *this; }
+ Setup &resizing(bool value) { _resizing = value; return *this; }
+};
+
+struct Fixture {
+ Setup setup;
+ IntBufferType bufferType;
+ size_t deadElems;
+ int buffer;
+ Fixture(const Setup &setup_)
+ : setup(setup_),
+ bufferType(ARRAYS_SIZE, setup._minArrays, MAX_ARRAYS, NUM_ARRAYS_FOR_NEW_BUFFER, setup._allocGrowFactor),
+ deadElems(0),
+ buffer(0)
+ {}
+ ~Fixture() {
+ bufferType.onHold(&setup._usedElems);
+ bufferType.onFree(setup._usedElems);
+ }
+ void onActive() {
+ bufferType.onActive(setup._bufferId, &setup._usedElems, deadElems, &buffer);
+ }
+ size_t arraysToAlloc() {
+ return bufferType.calcArraysToAlloc(setup._bufferId, setup._neededElems, setup._resizing);
+ }
+};
+
+void
+assertArraysToAlloc(size_t exp, const Setup &setup)
+{
+ Fixture f(setup);
+ f.onActive();
+ EXPECT_EQUAL(exp, f.arraysToAlloc());
+}
+
+TEST("require that complete arrays are allocated")
+{
+ TEST_DO(assertArraysToAlloc(1, Setup().needed(1)));
+ TEST_DO(assertArraysToAlloc(1, Setup().needed(2)));
+ TEST_DO(assertArraysToAlloc(1, Setup().needed(3)));
+ TEST_DO(assertArraysToAlloc(1, Setup().needed(4)));
+ TEST_DO(assertArraysToAlloc(2, Setup().needed(5)));
+}
+
+TEST("require that reserved elements are taken into account when not resizing")
+{
+ TEST_DO(assertArraysToAlloc(2, Setup().needed(1).bufferId(0)));
+ TEST_DO(assertArraysToAlloc(2, Setup().needed(4).bufferId(0)));
+ TEST_DO(assertArraysToAlloc(3, Setup().needed(5).bufferId(0)));
+}
+
+TEST("require that arrays to alloc is based on currently used elements (no resizing)")
+{
+ TEST_DO(assertArraysToAlloc(2, Setup().used(4 * 4).needed(4)));
+ TEST_DO(assertArraysToAlloc(4, Setup().used(8 * 4).needed(4)));
+}
+
+TEST("require that arrays to alloc is based on currently used elements (with resizing)")
+{
+ TEST_DO(assertArraysToAlloc(4 + 2, Setup().used(4 * 4).needed(4).resizing(true)));
+ TEST_DO(assertArraysToAlloc(8 + 4, Setup().used(8 * 4).needed(4).resizing(true)));
+ TEST_DO(assertArraysToAlloc(4 + 3, Setup().used(4 * 4).needed(3 * 4).resizing(true)));
+}
+
+TEST("require that arrays to alloc always contain elements needed")
+{
+ TEST_DO(assertArraysToAlloc(2, Setup().used(4 * 4).needed(2 * 4)));
+ TEST_DO(assertArraysToAlloc(3, Setup().used(4 * 4).needed(3 * 4)));
+ TEST_DO(assertArraysToAlloc(4, Setup().used(4 * 4).needed(4 * 4)));
+}
+
+TEST("require that arrays to alloc is capped to max arrays")
+{
+ TEST_DO(assertArraysToAlloc(127, Setup().used(254 * 4).needed(4)));
+ TEST_DO(assertArraysToAlloc(128, Setup().used(256 * 4).needed(4)));
+ TEST_DO(assertArraysToAlloc(128, Setup().used(258 * 4).needed(8)));
+}
+
+TEST("require that arrays to alloc is capped to min arrays")
+{
+ TEST_DO(assertArraysToAlloc(16, Setup().used(30 * 4).needed(4).minArrays(16)));
+ TEST_DO(assertArraysToAlloc(16, Setup().used(32 * 4).needed(4).minArrays(16)));
+ TEST_DO(assertArraysToAlloc(17, Setup().used(34 * 4).needed(4).minArrays(16)));
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/vespalib/src/tests/datastore/datastore/CMakeLists.txt b/vespalib/src/tests/datastore/datastore/CMakeLists.txt
new file mode 100644
index 00000000000..eb3e0a4576a
--- /dev/null
+++ b/vespalib/src/tests/datastore/datastore/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(vespalib_datastore_test_app TEST
+ SOURCES
+ datastore_test.cpp
+ DEPENDS
+ vespalib
+ gtest
+)
+vespa_add_test(NAME vespalib_datastore_test_app COMMAND vespalib_datastore_test_app)
diff --git a/vespalib/src/tests/datastore/datastore/datastore_test.cpp b/vespalib/src/tests/datastore/datastore/datastore_test.cpp
new file mode 100644
index 00000000000..0981280ac23
--- /dev/null
+++ b/vespalib/src/tests/datastore/datastore/datastore_test.cpp
@@ -0,0 +1,584 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/vespalib/datastore/datastore.h>
+#include <vespa/vespalib/datastore/datastore.hpp>
+#include <vespa/vespalib/gtest/gtest.h>
+#include <vespa/vespalib/test/insertion_operators.h>
+
+#include <vespa/log/log.h>
+LOG_SETUP("datastore_test");
+
+namespace search::datastore {
+
+using vespalib::alloc::MemoryAllocator;
+
+struct IntReclaimer
+{
+ static void reclaim(int *) {}
+};
+
+class MyStore : public DataStore<int, EntryRefT<3, 2> > {
+private:
+ using ParentType = DataStore<int, EntryRefT<3, 2> >;
+ using ParentType::_activeBufferIds;
+public:
+ MyStore() {}
+
+ void
+ holdBuffer(uint32_t bufferId)
+ {
+ ParentType::holdBuffer(bufferId);
+ }
+
+ void
+ holdElem(EntryRef ref, uint64_t len)
+ {
+ ParentType::holdElem(ref, len);
+ }
+
+ void
+ transferHoldLists(generation_t generation)
+ {
+ ParentType::transferHoldLists(generation);
+ }
+
+ void trimElemHoldList(generation_t usedGen) override {
+ ParentType::trimElemHoldList(usedGen);
+ }
+ void incDead(EntryRef ref, uint64_t dead) {
+ ParentType::incDead(ref, dead);
+ }
+ void ensureBufferCapacity(size_t sizeNeeded) {
+ ParentType::ensureBufferCapacity(0, sizeNeeded);
+ }
+ void enableFreeLists() {
+ ParentType::enableFreeLists();
+ }
+
+ void
+ switchActiveBuffer()
+ {
+ ParentType::switchActiveBuffer(0, 0u);
+ }
+ size_t activeBufferId() const { return _activeBufferIds[0]; }
+};
+
+
+using GrowthStats = std::vector<int>;
+
+constexpr float ALLOC_GROW_FACTOR = 0.4;
+constexpr size_t HUGE_PAGE_ARRAY_SIZE = (MemoryAllocator::HUGEPAGE_SIZE / sizeof(int));
+
+template <typename DataType, typename RefType>
+class GrowStore
+{
+ using Store = DataStoreT<RefType>;
+ Store _store;
+ BufferType<DataType> _firstType;
+ BufferType<DataType> _type;
+ uint32_t _typeId;
+public:
+ GrowStore(size_t arraySize, size_t minArrays, size_t maxArrays, size_t numArraysForNewBuffer)
+ : _store(),
+ _firstType(1, 1, maxArrays, 0, ALLOC_GROW_FACTOR),
+ _type(arraySize, minArrays, maxArrays, numArraysForNewBuffer, ALLOC_GROW_FACTOR),
+ _typeId(0)
+ {
+ (void) _store.addType(&_firstType);
+ _typeId = _store.addType(&_type);
+ _store.initActiveBuffers();
+ }
+ ~GrowStore() { _store.dropBuffers(); }
+
+ Store &store() { return _store; }
+ uint32_t typeId() const { return _typeId; }
+
+ GrowthStats getGrowthStats(size_t bufs) {
+ GrowthStats sizes;
+ int prevBufferId = -1;
+ while (sizes.size() < bufs) {
+ RefType iRef = (_type.getArraySize() == 1) ?
+ (_store.template allocator<DataType>(_typeId).alloc().ref) :
+ (_store.template allocator<DataType>(_typeId).allocArray(_type.getArraySize()).ref);
+ int bufferId = iRef.bufferId();
+ if (bufferId != prevBufferId) {
+ if (prevBufferId >= 0) {
+ const auto &state = _store.getBufferState(prevBufferId);
+ sizes.push_back(state.capacity());
+ }
+ prevBufferId = bufferId;
+ }
+ }
+ return sizes;
+ }
+ GrowthStats getFirstBufGrowStats() {
+ GrowthStats sizes;
+ int i = 0;
+ int prevBuffer = -1;
+ size_t prevAllocated = _store.getMemoryUsage().allocatedBytes();
+ for (;;) {
+ RefType iRef = _store.template allocator<DataType>(_typeId).alloc().ref;
+ size_t allocated = _store.getMemoryUsage().allocatedBytes();
+ if (allocated != prevAllocated) {
+ sizes.push_back(i);
+ prevAllocated = allocated;
+ }
+ int buffer = iRef.bufferId();
+ if (buffer != prevBuffer) {
+ if (prevBuffer >= 0) {
+ return sizes;
+ }
+ prevBuffer = buffer;
+ }
+ ++i;
+ }
+ }
+ vespalib::MemoryUsage getMemoryUsage() const { return _store.getMemoryUsage(); }
+};
+
+using MyRef = MyStore::RefType;
+
+void
+assertMemStats(const DataStoreBase::MemStats &exp,
+ const DataStoreBase::MemStats &act)
+{
+ EXPECT_EQ(exp._allocElems, act._allocElems);
+ EXPECT_EQ(exp._usedElems, act._usedElems);
+ EXPECT_EQ(exp._deadElems, act._deadElems);
+ EXPECT_EQ(exp._holdElems, act._holdElems);
+ EXPECT_EQ(exp._freeBuffers, act._freeBuffers);
+ EXPECT_EQ(exp._activeBuffers, act._activeBuffers);
+ EXPECT_EQ(exp._holdBuffers, act._holdBuffers);
+}
+
+TEST(DataStoreTest, require_that_entry_ref_is_working)
+{
+ using MyRefType = EntryRefT<22>;
+ EXPECT_EQ(4194304u, MyRefType::offsetSize());
+ EXPECT_EQ(1024u, MyRefType::numBuffers());
+ {
+ MyRefType r(0, 0);
+ EXPECT_EQ(0u, r.offset());
+ EXPECT_EQ(0u, r.bufferId());
+ }
+ {
+ MyRefType r(237, 13);
+ EXPECT_EQ(237u, r.offset());
+ EXPECT_EQ(13u, r.bufferId());
+ }
+ {
+ MyRefType r(4194303, 1023);
+ EXPECT_EQ(4194303u, r.offset());
+ EXPECT_EQ(1023u, r.bufferId());
+ }
+ {
+ MyRefType r1(6498, 76);
+ MyRefType r2(r1);
+ EXPECT_EQ(r1.offset(), r2.offset());
+ EXPECT_EQ(r1.bufferId(), r2.bufferId());
+ }
+}
+
+TEST(DataStoreTest, require_that_aligned_entry_ref_is_working)
+{
+ using MyRefType = AlignedEntryRefT<22, 2>; // 4 byte alignement
+ EXPECT_EQ(4 * 4194304u, MyRefType::offsetSize());
+ EXPECT_EQ(1024u, MyRefType::numBuffers());
+ EXPECT_EQ(0u, MyRefType::align(0));
+ EXPECT_EQ(4u, MyRefType::align(1));
+ EXPECT_EQ(4u, MyRefType::align(2));
+ EXPECT_EQ(4u, MyRefType::align(3));
+ EXPECT_EQ(4u, MyRefType::align(4));
+ EXPECT_EQ(8u, MyRefType::align(5));
+ {
+ MyRefType r(0, 0);
+ EXPECT_EQ(0u, r.offset());
+ EXPECT_EQ(0u, r.bufferId());
+ }
+ {
+ MyRefType r(237, 13);
+ EXPECT_EQ(MyRefType::align(237), r.offset());
+ EXPECT_EQ(13u, r.bufferId());
+ }
+ {
+ MyRefType r(MyRefType::offsetSize() - 4, 1023);
+ EXPECT_EQ(MyRefType::align(MyRefType::offsetSize() - 4), r.offset());
+ EXPECT_EQ(1023u, r.bufferId());
+ }
+}
+
+TEST(DataStoreTest, require_that_entries_can_be_added_and_retrieved)
+{
+ using IntStore = DataStore<int>;
+ IntStore ds;
+ EntryRef r1 = ds.addEntry(10);
+ EntryRef r2 = ds.addEntry(20);
+ EntryRef r3 = ds.addEntry(30);
+ EXPECT_EQ(1u, IntStore::RefType(r1).offset());
+ EXPECT_EQ(2u, IntStore::RefType(r2).offset());
+ EXPECT_EQ(3u, IntStore::RefType(r3).offset());
+ EXPECT_EQ(0u, IntStore::RefType(r1).bufferId());
+ EXPECT_EQ(0u, IntStore::RefType(r2).bufferId());
+ EXPECT_EQ(0u, IntStore::RefType(r3).bufferId());
+ EXPECT_EQ(10, ds.getEntry(r1));
+ EXPECT_EQ(20, ds.getEntry(r2));
+ EXPECT_EQ(30, ds.getEntry(r3));
+}
+
+TEST(DataStoreTest, require_that_add_entry_triggers_change_of_buffer)
+{
+ using Store = DataStore<uint64_t, EntryRefT<10, 10> >;
+ Store s;
+ uint64_t num = 0;
+ uint32_t lastId = 0;
+ uint64_t lastNum = 0;
+ for (;;++num) {
+ EntryRef r = s.addEntry(num);
+ EXPECT_EQ(num, s.getEntry(r));
+ uint32_t bufferId = Store::RefType(r).bufferId();
+ if (bufferId > lastId) {
+ LOG(info, "Changed to bufferId %u after %" PRIu64 " nums", bufferId, num);
+ EXPECT_EQ(Store::RefType::offsetSize() - (lastId == 0), num - lastNum);
+ lastId = bufferId;
+ lastNum = num;
+ }
+ if (bufferId == 2) {
+ break;
+ }
+ }
+ EXPECT_EQ(Store::RefType::offsetSize() * 2 - 1, num);
+ LOG(info, "Added %" PRIu64 " nums in 2 buffers", num);
+}
+
+TEST(DataStoreTest, require_that_we_can_hold_and_trim_buffers)
+{
+ MyStore s;
+ EXPECT_EQ(0u, MyRef(s.addEntry(1)).bufferId());
+ s.switchActiveBuffer();
+ EXPECT_EQ(1u, s.activeBufferId());
+ s.holdBuffer(0); // hold last buffer
+ s.transferHoldLists(10);
+
+ EXPECT_EQ(1u, MyRef(s.addEntry(2)).bufferId());
+ s.switchActiveBuffer();
+ EXPECT_EQ(2u, s.activeBufferId());
+ s.holdBuffer(1); // hold last buffer
+ s.transferHoldLists(20);
+
+ EXPECT_EQ(2u, MyRef(s.addEntry(3)).bufferId());
+ s.switchActiveBuffer();
+ EXPECT_EQ(3u, s.activeBufferId());
+ s.holdBuffer(2); // hold last buffer
+ s.transferHoldLists(30);
+
+ EXPECT_EQ(3u, MyRef(s.addEntry(4)).bufferId());
+ s.holdBuffer(3); // hold current buffer
+ s.transferHoldLists(40);
+
+ EXPECT_TRUE(s.getBufferState(0).size() != 0);
+ EXPECT_TRUE(s.getBufferState(1).size() != 0);
+ EXPECT_TRUE(s.getBufferState(2).size() != 0);
+ EXPECT_TRUE(s.getBufferState(3).size() != 0);
+ s.trimHoldLists(11);
+ EXPECT_TRUE(s.getBufferState(0).size() == 0);
+ EXPECT_TRUE(s.getBufferState(1).size() != 0);
+ EXPECT_TRUE(s.getBufferState(2).size() != 0);
+ EXPECT_TRUE(s.getBufferState(3).size() != 0);
+
+ s.switchActiveBuffer();
+ EXPECT_EQ(0u, s.activeBufferId());
+ EXPECT_EQ(0u, MyRef(s.addEntry(5)).bufferId());
+ s.trimHoldLists(41);
+ EXPECT_TRUE(s.getBufferState(0).size() != 0);
+ EXPECT_TRUE(s.getBufferState(1).size() == 0);
+ EXPECT_TRUE(s.getBufferState(2).size() == 0);
+ EXPECT_TRUE(s.getBufferState(3).size() == 0);
+}
+
+TEST(DataStoreTest, require_that_we_can_hold_and_trim_elements)
+{
+ MyStore s;
+ MyRef r1 = s.addEntry(1);
+ s.holdElem(r1, 1);
+ s.transferHoldLists(10);
+ MyRef r2 = s.addEntry(2);
+ s.holdElem(r2, 1);
+ s.transferHoldLists(20);
+ MyRef r3 = s.addEntry(3);
+ s.holdElem(r3, 1);
+ s.transferHoldLists(30);
+ EXPECT_EQ(1, s.getEntry(r1));
+ EXPECT_EQ(2, s.getEntry(r2));
+ EXPECT_EQ(3, s.getEntry(r3));
+ s.trimElemHoldList(11);
+ EXPECT_EQ(0, s.getEntry(r1));
+ EXPECT_EQ(2, s.getEntry(r2));
+ EXPECT_EQ(3, s.getEntry(r3));
+ s.trimElemHoldList(31);
+ EXPECT_EQ(0, s.getEntry(r1));
+ EXPECT_EQ(0, s.getEntry(r2));
+ EXPECT_EQ(0, s.getEntry(r3));
+}
+
+using IntHandle = Handle<int>;
+
+MyRef
+to_ref(IntHandle handle)
+{
+ return MyRef(handle.ref);
+}
+
+std::ostream&
+operator<<(std::ostream &os, const IntHandle &rhs)
+{
+ MyRef ref(rhs.ref);
+ os << "{ref.bufferId=" << ref.bufferId() << ", ref.offset=" << ref.offset() << ", data=" << rhs.data << "}";
+ return os;
+}
+
+void
+expect_successive_handles(const IntHandle &first, const IntHandle &second)
+{
+ EXPECT_EQ(to_ref(first).offset() + 1, to_ref(second).offset());
+}
+
+TEST(DataStoreTest, require_that_we_can_use_free_lists)
+{
+ MyStore s;
+ s.enableFreeLists();
+ auto allocator = s.freeListAllocator<IntReclaimer>();
+ auto h1 = allocator.alloc(1);
+ s.holdElem(h1.ref, 1);
+ s.transferHoldLists(10);
+ auto h2 = allocator.alloc(2);
+ expect_successive_handles(h1, h2);
+ s.holdElem(h2.ref, 1);
+ s.transferHoldLists(20);
+ s.trimElemHoldList(11);
+ auto h3 = allocator.alloc(3); // reuse h1.ref
+ EXPECT_EQ(h1, h3);
+ auto h4 = allocator.alloc(4);
+ expect_successive_handles(h2, h4);
+ s.trimElemHoldList(21);
+ auto h5 = allocator.alloc(5); // reuse h2.ref
+ EXPECT_EQ(h2, h5);
+ auto h6 = allocator.alloc(6);
+ expect_successive_handles(h4, h6);
+ EXPECT_EQ(3, s.getEntry(h1.ref));
+ EXPECT_EQ(5, s.getEntry(h2.ref));
+ EXPECT_EQ(3, s.getEntry(h3.ref));
+ EXPECT_EQ(4, s.getEntry(h4.ref));
+ EXPECT_EQ(5, s.getEntry(h5.ref));
+ EXPECT_EQ(6, s.getEntry(h6.ref));
+}
+
+TEST(DataStoreTest, require_that_we_can_use_free_lists_with_raw_allocator)
+{
+ GrowStore<int, MyRef> grow_store(3, 64, 64, 64);
+ auto &s = grow_store.store();
+ s.enableFreeLists();
+ auto allocator = s.freeListRawAllocator<int>(grow_store.typeId());
+
+ auto h1 = allocator.alloc(3);
+ auto h2 = allocator.alloc(3);
+ expect_successive_handles(h1, h2);
+ s.holdElem(h1.ref, 3);
+ s.holdElem(h2.ref, 3);
+ s.transferHoldLists(10);
+ s.trimElemHoldList(11);
+
+ auto h3 = allocator.alloc(3); // reuse h2.ref from free list
+ EXPECT_EQ(h2, h3);
+
+ auto h4 = allocator.alloc(3); // reuse h1.ref from free list
+ EXPECT_EQ(h1, h4);
+
+ auto h5 = allocator.alloc(3);
+ expect_successive_handles(h2, h5);
+ expect_successive_handles(h3, h5);
+}
+
+TEST(DataStoreTest, require_that_memory_stats_are_calculated)
+{
+ MyStore s;
+ DataStoreBase::MemStats m;
+ m._allocElems = MyRef::offsetSize();
+ m._usedElems = 1; // ref = 0 is reserved
+ m._deadElems = 1; // ref = 0 is reserved
+ m._holdElems = 0;
+ m._activeBuffers = 1;
+ m._freeBuffers = MyRef::numBuffers() - 1;
+ m._holdBuffers = 0;
+ assertMemStats(m, s.getMemStats());
+
+ // add entry
+ MyRef r = s.addEntry(10);
+ m._usedElems++;
+ assertMemStats(m, s.getMemStats());
+
+ // inc dead
+ s.incDead(r, 1);
+ m._deadElems++;
+ assertMemStats(m, s.getMemStats());
+
+ // hold buffer
+ s.addEntry(20);
+ s.addEntry(30);
+ s.holdBuffer(r.bufferId());
+ s.transferHoldLists(100);
+ m._usedElems += 2;
+ m._holdElems += 2; // used - dead
+ m._activeBuffers--;
+ m._holdBuffers++;
+ assertMemStats(m, s.getMemStats());
+
+ // new active buffer
+ s.switchActiveBuffer();
+ s.addEntry(40);
+ m._allocElems += MyRef::offsetSize();
+ m._usedElems++;
+ m._activeBuffers++;
+ m._freeBuffers--;
+
+ // trim hold buffer
+ s.trimHoldLists(101);
+ m._allocElems -= MyRef::offsetSize();
+ m._usedElems = 1;
+ m._deadElems = 0;
+ m._holdElems = 0;
+ m._freeBuffers = MyRef::numBuffers() - 1;
+ m._holdBuffers = 0;
+ assertMemStats(m, s.getMemStats());
+}
+
+TEST(DataStoreTest, require_that_memory_usage_is_calculated)
+{
+ MyStore s;
+ MyRef r = s.addEntry(10);
+ s.addEntry(20);
+ s.addEntry(30);
+ s.addEntry(40);
+ s.incDead(r, 1);
+ s.holdBuffer(r.bufferId());
+ s.transferHoldLists(100);
+ vespalib::MemoryUsage m = s.getMemoryUsage();
+ EXPECT_EQ(MyRef::offsetSize() * sizeof(int), m.allocatedBytes());
+ EXPECT_EQ(5 * sizeof(int), m.usedBytes());
+ EXPECT_EQ(2 * sizeof(int), m.deadBytes());
+ EXPECT_EQ(3 * sizeof(int), m.allocatedBytesOnHold());
+ s.trimHoldLists(101);
+}
+
+TEST(DataStoreTest, require_that_we_can_disable_elemement_hold_list)
+{
+ MyStore s;
+ MyRef r1 = s.addEntry(10);
+ MyRef r2 = s.addEntry(20);
+ MyRef r3 = s.addEntry(30);
+ (void) r3;
+ vespalib::MemoryUsage m = s.getMemoryUsage();
+ EXPECT_EQ(MyRef::offsetSize() * sizeof(int), m.allocatedBytes());
+ EXPECT_EQ(4 * sizeof(int), m.usedBytes());
+ EXPECT_EQ(1 * sizeof(int), m.deadBytes());
+ EXPECT_EQ(0 * sizeof(int), m.allocatedBytesOnHold());
+ s.holdElem(r1, 1);
+ m = s.getMemoryUsage();
+ EXPECT_EQ(MyRef::offsetSize() * sizeof(int), m.allocatedBytes());
+ EXPECT_EQ(4 * sizeof(int), m.usedBytes());
+ EXPECT_EQ(1 * sizeof(int), m.deadBytes());
+ EXPECT_EQ(1 * sizeof(int), m.allocatedBytesOnHold());
+ s.disableElemHoldList();
+ s.holdElem(r2, 1);
+ m = s.getMemoryUsage();
+ EXPECT_EQ(MyRef::offsetSize() * sizeof(int), m.allocatedBytes());
+ EXPECT_EQ(4 * sizeof(int), m.usedBytes());
+ EXPECT_EQ(2 * sizeof(int), m.deadBytes());
+ EXPECT_EQ(1 * sizeof(int), m.allocatedBytesOnHold());
+ s.transferHoldLists(100);
+ s.trimHoldLists(101);
+}
+
+using IntGrowStore = GrowStore<int, EntryRefT<24>>;
+
+namespace {
+
+void assertGrowStats(GrowthStats expSizes,
+ GrowthStats expFirstBufSizes,
+ size_t expInitMemUsage,
+ size_t minArrays, size_t numArraysForNewBuffer, size_t maxArrays = 128)
+{
+ EXPECT_EQ(expSizes, IntGrowStore(1, minArrays, maxArrays, numArraysForNewBuffer).getGrowthStats(expSizes.size()));
+ EXPECT_EQ(expFirstBufSizes, IntGrowStore(1, minArrays, maxArrays, numArraysForNewBuffer).getFirstBufGrowStats());
+ EXPECT_EQ(expInitMemUsage, IntGrowStore(1, minArrays, maxArrays, numArraysForNewBuffer).getMemoryUsage().allocatedBytes());
+}
+
+}
+
+TEST(DataStoreTest, require_that_buffer_growth_works)
+{
+ // Always switch to new buffer, min size 4
+ assertGrowStats({ 4, 4, 4, 4, 8, 16, 16, 32, 64, 64 },
+ { 4 }, 20, 4, 0);
+ // Resize if buffer size is less than 4, min size 0
+ assertGrowStats({ 4, 4, 4, 4, 8, 16, 16, 32, 64, 64 },
+ { 0, 1, 2, 4 }, 4, 0, 4);
+ // Always switch to new buffer, min size 16
+ assertGrowStats({ 16, 16, 16, 32, 32, 64, 128, 128, 128 },
+ { 16 }, 68, 16, 0);
+ // Resize if buffer size is less than 16, min size 0
+ assertGrowStats({ 16, 16, 16, 32, 32, 64, 128, 128, 128 },
+ { 0, 1, 2, 4, 8, 16 }, 4, 0, 16);
+ // Resize if buffer size is less than 16, min size 4
+ assertGrowStats({ 16, 16, 16, 32, 32, 64, 128, 128, 128 },
+ { 4, 8, 16 }, 20, 4, 16);
+ // Always switch to new buffer, min size 0
+ assertGrowStats({ 1, 1, 1, 1, 1, 2, 2, 4, 8, 8, 16, 32 },
+ { 0, 1 }, 4, 0, 0);
+
+ // Buffers with sizes larger than the huge page size of the mmap allocator.
+ ASSERT_EQ(524288u, HUGE_PAGE_ARRAY_SIZE);
+ assertGrowStats({ 262144, 262144, 262144, 524288, 524288, 524288 * 2, 524288 * 3, 524288 * 4, 524288 * 5, 524288 * 5 },
+ { 0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144 },
+ 4, 0, HUGE_PAGE_ARRAY_SIZE / 2, HUGE_PAGE_ARRAY_SIZE * 5);
+}
+
+using RefType15 = EntryRefT<15>; // offsetSize=32768
+
+namespace {
+
+template <typename DataType>
+void assertGrowStats(GrowthStats expSizes, uint32_t arraySize)
+{
+ uint32_t minArrays = 2048;
+ uint32_t maxArrays = RefType15::offsetSize();
+ uint32_t numArraysForNewBuffer = 2048;
+ GrowStore<DataType, RefType15> store(arraySize, minArrays, maxArrays, numArraysForNewBuffer);
+ EXPECT_EQ(expSizes, store.getGrowthStats(expSizes.size()));
+}
+
+}
+
+TEST(DataStoreTest, require_that_offset_in_EntryRefT_is_within_bounds_when_allocating_memory_buffers_where_wanted_number_of_bytes_is_not_a_power_of_2_and_less_than_huge_page_size)
+{
+ /*
+ * When allocating new memory buffers for the data store the following happens (ref. calcAllocation() in bufferstate.cpp):
+ * 1) Calculate how many arrays to alloc.
+ * In this case we alloc a minimum of 2048 and a maximum of 32768.
+ * 2) Calculate how many bytes to alloc: arraysToAlloc * arraySize * elementSize.
+ * In this case elementSize is (1 or 4) and arraySize varies (3, 5, 7).
+ * 3) Round up bytes to alloc to match the underlying allocator (power of 2 if less than huge page size):
+ * After this we might end up with more bytes than the offset in EntryRef can handle. In this case this is 32768.
+ * 4) Cap bytes to alloc to the max offset EntryRef can handle.
+ * The max bytes to alloc is: maxArrays * arraySize * elementSize.
+ */
+ assertGrowStats<uint8_t>({8192,8192,8192,16384,16384,32768,65536,65536,98304,98304,98304,98304}, 3);
+ assertGrowStats<uint8_t>({16384,16384,16384,32768,32768,65536,131072,131072,163840,163840,163840,163840}, 5);
+ assertGrowStats<uint8_t>({16384,16384,16384,32768,32768,65536,131072,131072,229376,229376,229376,229376}, 7);
+ assertGrowStats<uint32_t>({8192,8192,8192,16384,16384,32768,65536,65536,98304,98304,98304,98304}, 3);
+ assertGrowStats<uint32_t>({16384,16384,16384,32768,32768,65536,131072,131072,163840,163840,163840,163840}, 5);
+ assertGrowStats<uint32_t>({16384,16384,16384,32768,32768,65536,131072,131072,229376,229376,229376,229376}, 7);
+}
+
+}
+
+GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/vespalib/src/tests/datastore/unique_store/CMakeLists.txt b/vespalib/src/tests/datastore/unique_store/CMakeLists.txt
new file mode 100644
index 00000000000..dd200018448
--- /dev/null
+++ b/vespalib/src/tests/datastore/unique_store/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(vespalib_unique_store_test_app TEST
+ SOURCES
+ unique_store_test.cpp
+ DEPENDS
+ vespalib
+)
+vespa_add_test(NAME vespalib_unique_store_test_app COMMAND vespalib_unique_store_test_app)
diff --git a/vespalib/src/tests/datastore/unique_store/unique_store_test.cpp b/vespalib/src/tests/datastore/unique_store/unique_store_test.cpp
new file mode 100644
index 00000000000..ec3a3040167
--- /dev/null
+++ b/vespalib/src/tests/datastore/unique_store/unique_store_test.cpp
@@ -0,0 +1,267 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/log/log.h>
+LOG_SETUP("unique_store_test");
+#include <vespa/vespalib/datastore/unique_store.hpp>
+#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/vespalib/test/datastore/memstats.h>
+#include <vespa/vespalib/test/insertion_operators.h>
+#include <vespa/vespalib/util/traits.h>
+#include <vector>
+
+using namespace search::datastore;
+using vespalib::MemoryUsage;
+using vespalib::ArrayRef;
+using generation_t = vespalib::GenerationHandler::generation_t;
+using MemStats = search::datastore::test::MemStats;
+
+template <typename EntryT, typename RefT = EntryRefT<22> >
+struct Fixture
+{
+ using EntryRefType = RefT;
+ using UniqueStoreType = UniqueStore<EntryT, RefT>;
+ using UniqueStoreAddResult = typename UniqueStoreType::AddResult;
+ using value_type = EntryT;
+ using ReferenceStore = std::map<EntryRef, std::pair<EntryT,uint32_t>>;
+
+ UniqueStoreType store;
+ ReferenceStore refStore;
+ generation_t generation;
+ Fixture()
+ : store(),
+ refStore(),
+ generation(1)
+ {}
+ void assertAdd(const EntryT &input) {
+ EntryRef ref = add(input);
+ assertGet(ref, input);
+ }
+ EntryRef add(const EntryT &input) {
+ UniqueStoreAddResult addResult = store.add(input);
+ EntryRef result = addResult.ref();
+ auto insres = refStore.insert(std::make_pair(result, std::make_pair(input, 1u)));
+ EXPECT_EQUAL(insres.second, addResult.inserted());
+ if (!insres.second) {
+ ++insres.first->second.second;
+ }
+ return result;
+ }
+ void alignRefStore(EntryRef ref, const EntryT &input, uint32_t refcnt) {
+ if (refcnt > 0) {
+ auto insres = refStore.insert(std::make_pair(ref, std::make_pair(input, refcnt)));
+ if (!insres.second) {
+ insres.first->second.second = refcnt;
+ }
+ } else {
+ refStore.erase(ref);
+ }
+ }
+ void assertGet(EntryRef ref, const EntryT &exp) const {
+ EntryT act = store.get(ref);
+ EXPECT_EQUAL(exp, act);
+ }
+ void remove(EntryRef ref) {
+ ASSERT_EQUAL(1u, refStore.count(ref));
+ store.remove(ref);
+ if (refStore[ref].second > 1) {
+ --refStore[ref].second;
+ } else {
+ refStore.erase(ref);
+ }
+ }
+ void remove(const EntryT &input) {
+ remove(getEntryRef(input));
+ }
+ uint32_t getBufferId(EntryRef ref) const {
+ return EntryRefType(ref).bufferId();
+ }
+ void assertBufferState(EntryRef ref, const MemStats expStats) const {
+ EXPECT_EQUAL(expStats._used, store.bufferState(ref).size());
+ EXPECT_EQUAL(expStats._hold, store.bufferState(ref).getHoldElems());
+ EXPECT_EQUAL(expStats._dead, store.bufferState(ref).getDeadElems());
+ }
+ void assertMemoryUsage(const MemStats expStats) const {
+ MemoryUsage act = store.getMemoryUsage();
+ EXPECT_EQUAL(expStats._used, act.usedBytes());
+ EXPECT_EQUAL(expStats._hold, act.allocatedBytesOnHold());
+ EXPECT_EQUAL(expStats._dead, act.deadBytes());
+ }
+ void assertStoreContent() const {
+ for (const auto &elem : refStore) {
+ TEST_DO(assertGet(elem.first, elem.second.first));
+ }
+ }
+ EntryRef getEntryRef(const EntryT &input) {
+ for (const auto &elem : refStore) {
+ if (elem.second.first == input) {
+ return elem.first;
+ }
+ }
+ return EntryRef();
+ }
+ void trimHoldLists() {
+ store.freeze();
+ store.transferHoldLists(generation++);
+ store.trimHoldLists(generation);
+ }
+ void compactWorst() {
+ ICompactionContext::UP ctx = store.compactWorst();
+ std::vector<EntryRef> refs;
+ for (const auto &elem : refStore) {
+ refs.push_back(elem.first);
+ }
+ refs.push_back(EntryRef());
+ std::vector<EntryRef> compactedRefs = refs;
+ ctx->compact(ArrayRef<EntryRef>(compactedRefs));
+ ASSERT_FALSE(refs.back().valid());
+ refs.pop_back();
+ ReferenceStore compactedRefStore;
+ for (size_t i = 0; i < refs.size(); ++i) {
+ ASSERT_EQUAL(0u, compactedRefStore.count(compactedRefs[i]));
+ ASSERT_EQUAL(1u, refStore.count(refs[i]));
+ compactedRefStore.insert(std::make_pair(compactedRefs[i], refStore[refs[i]]));
+ }
+ refStore = compactedRefStore;
+ }
+ size_t entrySize() const { return sizeof(EntryT); }
+ auto getBuilder(uint32_t uniqueValuesHint) { return store.getBuilder(uniqueValuesHint); }
+ auto getSaver() { return store.getSaver(); }
+};
+
+using NumberFixture = Fixture<uint32_t>;
+using StringFixture = Fixture<std::string>;
+using SmallOffsetNumberFixture = Fixture<uint32_t, EntryRefT<10>>;
+
+TEST("require that we test with trivial and non-trivial types")
+{
+ EXPECT_TRUE(vespalib::can_skip_destruction<NumberFixture::value_type>::value);
+ EXPECT_FALSE(vespalib::can_skip_destruction<StringFixture::value_type>::value);
+}
+
+TEST_F("require that we can add and get values of trivial type", NumberFixture)
+{
+ TEST_DO(f.assertAdd(1));
+ TEST_DO(f.assertAdd(2));
+ TEST_DO(f.assertAdd(3));
+ TEST_DO(f.assertAdd(1));
+}
+
+TEST_F("require that we can add and get values of non-trivial type", StringFixture)
+{
+ TEST_DO(f.assertAdd("aa"));
+ TEST_DO(f.assertAdd("bbb"));
+ TEST_DO(f.assertAdd("ccc"));
+ TEST_DO(f.assertAdd("aa"));
+}
+
+TEST_F("require that elements are put on hold when value is removed", NumberFixture)
+{
+ EntryRef ref = f.add(1);
+ // Note: The first buffer have the first element reserved -> we expect 2 elements used here.
+ TEST_DO(f.assertBufferState(ref, MemStats().used(2).hold(0).dead(1)));
+ f.store.remove(ref);
+ TEST_DO(f.assertBufferState(ref, MemStats().used(2).hold(1).dead(1)));
+}
+
+TEST_F("require that elements are reference counted", NumberFixture)
+{
+ EntryRef ref = f.add(1);
+ EntryRef ref2 = f.add(1);
+ EXPECT_EQUAL(ref.ref(), ref2.ref());
+ // Note: The first buffer have the first element reserved -> we expect 2 elements used here.
+ TEST_DO(f.assertBufferState(ref, MemStats().used(2).hold(0).dead(1)));
+ f.store.remove(ref);
+ TEST_DO(f.assertBufferState(ref, MemStats().used(2).hold(0).dead(1)));
+ f.store.remove(ref);
+ TEST_DO(f.assertBufferState(ref, MemStats().used(2).hold(1).dead(1)));
+}
+
+TEST_F("require that new underlying buffer is allocated when current is full", SmallOffsetNumberFixture)
+{
+ uint32_t firstBufferId = f.getBufferId(f.add(1));
+ for (uint32_t i = 0; i < (F1::EntryRefType::offsetSize() - 2); ++i) {
+ uint32_t bufferId = f.getBufferId(f.add(i + 2));
+ EXPECT_EQUAL(firstBufferId, bufferId);
+ }
+ TEST_DO(f.assertStoreContent());
+
+ uint32_t bias = F1::EntryRefType::offsetSize();
+ uint32_t secondBufferId = f.getBufferId(f.add(bias + 1));
+ EXPECT_NOT_EQUAL(firstBufferId, secondBufferId);
+ for (uint32_t i = 0; i < 10u; ++i) {
+ uint32_t bufferId = f.getBufferId(f.add(bias + i + 2));
+ EXPECT_EQUAL(secondBufferId, bufferId);
+ }
+ TEST_DO(f.assertStoreContent());
+}
+
+TEST_F("require that compaction works", NumberFixture)
+{
+ EntryRef val1Ref = f.add(1);
+ EntryRef val2Ref = f.add(2);
+ f.remove(f.add(4));
+ f.trimHoldLists();
+ TEST_DO(f.assertBufferState(val1Ref, MemStats().used(4).dead(2))); // Note: First element is reserved
+ uint32_t val1BufferId = f.getBufferId(val1Ref);
+
+ EXPECT_EQUAL(2u, f.refStore.size());
+ f.compactWorst();
+ EXPECT_EQUAL(2u, f.refStore.size());
+ TEST_DO(f.assertStoreContent());
+
+ // Buffer has been compacted
+ EXPECT_NOT_EQUAL(val1BufferId, f.getBufferId(f.getEntryRef(1)));
+ // Old ref should still point to data.
+ f.assertGet(val1Ref, 1);
+ f.assertGet(val2Ref, 2);
+ EXPECT_TRUE(f.store.bufferState(val1Ref).isOnHold());
+ f.trimHoldLists();
+ EXPECT_TRUE(f.store.bufferState(val1Ref).isFree());
+ TEST_DO(f.assertStoreContent());
+}
+
+TEST_F("require that builder works", NumberFixture)
+{
+ auto builder = f.getBuilder(2);
+ builder.add(10);
+ builder.add(20);
+ builder.setupRefCounts();
+ EntryRef val10Ref = builder.mapEnumValueToEntryRef(1);
+ EntryRef val20Ref = builder.mapEnumValueToEntryRef(2);
+ TEST_DO(f.assertBufferState(val10Ref, MemStats().used(3).dead(1))); // Note: First element is reserved
+ EXPECT_TRUE(val10Ref.valid());
+ EXPECT_TRUE(val20Ref.valid());
+ EXPECT_NOT_EQUAL(val10Ref.ref(), val20Ref.ref());
+ f.assertGet(val10Ref, 10);
+ f.assertGet(val20Ref, 20);
+ builder.makeDictionary();
+ // Align refstore with the two entries added by builder.
+ f.alignRefStore(val10Ref, 10, 1);
+ f.alignRefStore(val20Ref, 20, 1);
+ EXPECT_EQUAL(val10Ref.ref(), f.add(10).ref());
+ EXPECT_EQUAL(val20Ref.ref(), f.add(20).ref());
+}
+
+TEST_F("require that saver works", NumberFixture)
+{
+ EntryRef val10Ref = f.add(10);
+ EntryRef val20Ref = f.add(20);
+ f.remove(f.add(40));
+ f.trimHoldLists();
+
+ auto saver = f.getSaver();
+ std::vector<uint32_t> refs;
+ saver.foreach_key([&](EntryRef ref) { refs.push_back(ref.ref()); });
+ std::vector<uint32_t> expRefs;
+ expRefs.push_back(val10Ref.ref());
+ expRefs.push_back(val20Ref.ref());
+ EXPECT_EQUAL(expRefs, refs);
+ saver.enumerateValues();
+ uint32_t invalidEnum = saver.mapEntryRefToEnumValue(EntryRef());
+ uint32_t enumValue10 = saver.mapEntryRefToEnumValue(val10Ref);
+ uint32_t enumValue20 = saver.mapEntryRefToEnumValue(val20Ref);
+ EXPECT_EQUAL(0u, invalidEnum);
+ EXPECT_EQUAL(1u, enumValue10);
+ EXPECT_EQUAL(2u, enumValue20);
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }