aboutsummaryrefslogtreecommitdiffstats
path: root/vespalib/src/tests
diff options
context:
space:
mode:
Diffstat (limited to 'vespalib/src/tests')
-rw-r--r--vespalib/src/tests/assert/.gitignore1
-rw-r--r--vespalib/src/tests/assert/CMakeLists.txt16
-rw-r--r--vespalib/src/tests/assert/assert_test.cpp39
-rw-r--r--vespalib/src/tests/assert/asserter.cpp25
-rw-r--r--vespalib/src/tests/btree/.gitignore5
-rw-r--r--vespalib/src/tests/btree/CMakeLists.txt29
-rw-r--r--vespalib/src/tests/btree/btree_test.cpp1526
-rw-r--r--vespalib/src/tests/btree/btreeaggregation_test.cpp1157
-rw-r--r--vespalib/src/tests/btree/frozenbtree_test.cpp469
-rw-r--r--vespalib/src/tests/btree/iteratespeed.cpp212
-rw-r--r--vespalib/src/tests/datastore/array_store/CMakeLists.txt8
-rw-r--r--vespalib/src/tests/datastore/array_store/array_store_test.cpp360
-rw-r--r--vespalib/src/tests/datastore/array_store_config/CMakeLists.txt8
-rw-r--r--vespalib/src/tests/datastore/array_store_config/array_store_config_test.cpp84
-rw-r--r--vespalib/src/tests/datastore/buffer_type/CMakeLists.txt8
-rw-r--r--vespalib/src/tests/datastore/buffer_type/buffer_type_test.cpp116
-rw-r--r--vespalib/src/tests/datastore/datastore/CMakeLists.txt9
-rw-r--r--vespalib/src/tests/datastore/datastore/datastore_test.cpp584
-rw-r--r--vespalib/src/tests/datastore/unique_store/CMakeLists.txt8
-rw-r--r--vespalib/src/tests/datastore/unique_store/unique_store_test.cpp267
20 files changed, 4931 insertions, 0 deletions
diff --git a/vespalib/src/tests/assert/.gitignore b/vespalib/src/tests/assert/.gitignore
new file mode 100644
index 00000000000..605b8273f92
--- /dev/null
+++ b/vespalib/src/tests/assert/.gitignore
@@ -0,0 +1 @@
+vespalib_asserter_app
diff --git a/vespalib/src/tests/assert/CMakeLists.txt b/vespalib/src/tests/assert/CMakeLists.txt
new file mode 100644
index 00000000000..3c9780f1ec4
--- /dev/null
+++ b/vespalib/src/tests/assert/CMakeLists.txt
@@ -0,0 +1,16 @@
+# Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+vespa_add_executable(vespalib_assert_test_app TEST
+ SOURCES
+ assert_test.cpp
+ DEPENDS
+ vespalib
+)
+vespa_add_test(NAME vespalib_assert_test_app COMMAND vespalib_assert_test_app)
+
+vespa_add_executable(vespalib_asserter_app TEST
+ SOURCES
+ asserter.cpp
+ DEPENDS
+ vespalib
+)
diff --git a/vespalib/src/tests/assert/assert_test.cpp b/vespalib/src/tests/assert/assert_test.cpp
new file mode 100644
index 00000000000..454c0957974
--- /dev/null
+++ b/vespalib/src/tests/assert/assert_test.cpp
@@ -0,0 +1,39 @@
+// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/vespalib/testkit/test_kit.h>
+#include <vespa/vespalib/util/slaveproc.h>
+#include <vespa/vespalib/util/stringfmt.h>
+#include <vespa/vespalib/util/assert.h>
+#include <vespa/vespalib/io/fileutil.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <vespa/defaults.h>
+
+using namespace vespalib;
+
+TEST("that it borks the first time.") {
+ std::string assertName;
+ const char * assertDir = "var/db/vespa/tmp";
+ vespalib::rmdir("var", true);
+ ASSERT_TRUE(vespalib::mkdir(assertDir, true));
+ {
+ SlaveProc proc("ulimit -c 0 && exec env VESPA_HOME=./ ./staging_vespalib_asserter_app myassert 10000");
+ proc.wait();
+ ASSERT_EQUAL(proc.getExitCode() & 0x7f, 6);
+ }
+ {
+ SlaveProc proc("ulimit -c 0 && exec env VESPA_HOME=./ ./staging_vespalib_asserter_app myassert 10000");
+ proc.readLine(assertName);
+ proc.wait();
+ ASSERT_EQUAL(proc.getExitCode() & 0x7f, 0);
+ }
+ ASSERT_EQUAL(0, unlink(assertName.c_str()));
+ {
+ SlaveProc proc("ulimit -c 0 && exec env VESPA_HOME=./ ./staging_vespalib_asserter_app myassert 10000");
+ proc.wait();
+ ASSERT_EQUAL(proc.getExitCode() & 0x7f, 6);
+ }
+ ASSERT_EQUAL(0, unlink(assertName.c_str()));
+ ASSERT_TRUE(vespalib::rmdir("var", true));
+}
+
+TEST_MAIN_WITH_PROCESS_PROXY() { TEST_RUN_ALL(); }
diff --git a/vespalib/src/tests/assert/asserter.cpp b/vespalib/src/tests/assert/asserter.cpp
new file mode 100644
index 00000000000..640464889c0
--- /dev/null
+++ b/vespalib/src/tests/assert/asserter.cpp
@@ -0,0 +1,25 @@
+// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/vespalib/util/assert.h>
+#include <cassert>
+#include <cstdlib>
+#include <fstream>
+#include <string>
+
+int main(int argc, char *argv[]) {
+ assert(argc == 3);
+ const char * assertKey = argv[1];
+ size_t assertCount = strtoul(argv[2], nullptr, 0);
+ for (size_t i(0); i < assertCount; i++) {
+ ASSERT_ONCE_OR_LOG(true, assertKey, 100);
+ ASSERT_ONCE_OR_LOG(false, assertKey, 100);
+ }
+ std::string filename = vespalib::assert::getAssertLogFileName(assertKey);
+ std::ifstream is(filename.c_str());
+ assert(is);
+ std::string line;
+ std::getline(is, line);
+ printf("%s\n", filename.c_str());
+ assert(line.find(assertKey) != std::string::npos);
+ assert(assertCount == vespalib::assert::getNumAsserts(assertKey));
+ return 0;
+}
diff --git a/vespalib/src/tests/btree/.gitignore b/vespalib/src/tests/btree/.gitignore
new file mode 100644
index 00000000000..4dc55d5704a
--- /dev/null
+++ b/vespalib/src/tests/btree/.gitignore
@@ -0,0 +1,5 @@
+iteratespeed
+vespalib_btree_test_app
+vespalib_btreeaggregation_test_app
+vespalib_frozenbtree_test_app
+vespalib_iteratespeed_app
diff --git a/vespalib/src/tests/btree/CMakeLists.txt b/vespalib/src/tests/btree/CMakeLists.txt
new file mode 100644
index 00000000000..b6bdcb5160e
--- /dev/null
+++ b/vespalib/src/tests/btree/CMakeLists.txt
@@ -0,0 +1,29 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(vespalib_btree_test_app TEST
+ SOURCES
+ btree_test.cpp
+ DEPENDS
+ vespalib
+)
+vespa_add_test(NAME vespalib_btree_test_app COMMAND vespalib_btree_test_app)
+vespa_add_executable(vespalib_frozenbtree_test_app TEST
+ SOURCES
+ frozenbtree_test.cpp
+ DEPENDS
+ vespalib
+)
+vespa_add_test(NAME vespalib_frozenbtree_test_app COMMAND vespalib_frozenbtree_test_app)
+vespa_add_executable(vespalib_btreeaggregation_test_app TEST
+ SOURCES
+ btreeaggregation_test.cpp
+ DEPENDS
+ vespalib
+)
+vespa_add_test(NAME vespalib_btreeaggregation_test_app COMMAND vespalib_btreeaggregation_test_app)
+vespa_add_executable(vespalib_iteratespeed_app
+ SOURCES
+ iteratespeed.cpp
+ DEPENDS
+ vespalib
+)
+vespa_add_test(NAME vespalib_iteratespeed_app COMMAND vespalib_iteratespeed_app BENCHMARK)
diff --git a/vespalib/src/tests/btree/btree_test.cpp b/vespalib/src/tests/btree/btree_test.cpp
new file mode 100644
index 00000000000..4090283c10f
--- /dev/null
+++ b/vespalib/src/tests/btree/btree_test.cpp
@@ -0,0 +1,1526 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/log/log.h>
+LOG_SETUP("btree_test");
+#include <vespa/vespalib/testkit/testapp.h>
+#include <string>
+#include <vespa/vespalib/btree/btreeroot.h>
+#include <vespa/vespalib/btree/btreebuilder.h>
+#include <vespa/vespalib/btree/btreenodeallocator.h>
+#include <vespa/vespalib/btree/btree.h>
+#include <vespa/vespalib/btree/btreestore.h>
+#include <vespa/vespalib/util/rand48.h>
+
+#include <vespa/vespalib/btree/btreenodeallocator.hpp>
+#include <vespa/vespalib/btree/btreenode.hpp>
+#include <vespa/vespalib/btree/btreenodestore.hpp>
+#include <vespa/vespalib/btree/btreeiterator.hpp>
+#include <vespa/vespalib/btree/btreeroot.hpp>
+#include <vespa/vespalib/btree/btreebuilder.hpp>
+#include <vespa/vespalib/btree/btree.hpp>
+#include <vespa/vespalib/btree/btreestore.hpp>
+#include <vespa/vespalib/test/btree/btree_printer.h>
+
+using vespalib::GenerationHandler;
+using search::datastore::EntryRef;
+
+namespace search {
+namespace btree {
+
+namespace {
+
+template <typename T>
+std::string
+toStr(const T & v)
+{
+ std::stringstream ss;
+ ss << v;
+ return ss.str();
+}
+
+}
+
+typedef BTreeTraits<4, 4, 31, false> MyTraits;
+
+#define KEYWRAP
+
+#ifdef KEYWRAP
+
+// Force use of functor to compare keys.
+class WrapInt
+{
+public:
+ int _val;
+ WrapInt(int val) : _val(val) {}
+ WrapInt() : _val(0) {}
+ bool operator==(const WrapInt & rhs) const { return _val == rhs._val; }
+};
+
+std::ostream &
+operator<<(std::ostream &s, const WrapInt &i)
+{
+ s << i._val;
+ return s;
+}
+
+typedef WrapInt MyKey;
+class MyComp
+{
+public:
+ bool
+ operator()(const WrapInt &a, const WrapInt &b) const
+ {
+ return a._val < b._val;
+ }
+};
+
+#define UNWRAP(key) (key._val)
+#else
+typedef int MyKey;
+typedef std::less<int> MyComp;
+#define UNWRAP(key) (key)
+#endif
+
+typedef BTree<MyKey, std::string,
+ btree::NoAggregated,
+ MyComp, MyTraits> MyTree;
+typedef BTreeStore<MyKey, std::string,
+ btree::NoAggregated,
+ MyComp, MyTraits> MyTreeStore;
+typedef MyTree::Builder MyTreeBuilder;
+typedef MyTree::LeafNodeType MyLeafNode;
+typedef MyTree::InternalNodeType MyInternalNode;
+typedef MyTree::NodeAllocatorType MyNodeAllocator;
+typedef std::pair<MyKey, std::string> LeafPair;
+typedef MyTreeStore::KeyDataType MyKeyData;
+typedef MyTreeStore::KeyDataTypeRefPair MyKeyDataRefPair;
+
+typedef BTree<int, BTreeNoLeafData, btree::NoAggregated> SetTreeB;
+
+typedef BTreeTraits<16, 16, 10, false> LSeekTraits;
+typedef BTree<int, BTreeNoLeafData, btree::NoAggregated,
+ std::less<int>, LSeekTraits> SetTreeL;
+
+struct LeafPairLess {
+ bool operator()(const LeafPair & lhs, const LeafPair & rhs) const {
+ return UNWRAP(lhs.first) < UNWRAP(rhs.first);
+ }
+};
+
+template <typename ManagerType>
+void
+cleanup(GenerationHandler & g, ManagerType & m)
+{
+ m.freeze();
+ m.transferHoldLists(g.getCurrentGeneration());
+ g.incGeneration();
+ m.trimHoldLists(g.getFirstUsedGeneration());
+}
+
+template <typename ManagerType, typename NodeType>
+void
+cleanup(GenerationHandler & g,
+ ManagerType & m,
+ BTreeNode::Ref n1Ref, NodeType * n1,
+ BTreeNode::Ref n2Ref = BTreeNode::Ref(), NodeType * n2 = NULL)
+{
+ assert(ManagerType::isValidRef(n1Ref));
+ m.holdNode(n1Ref, n1);
+ if (n2 != NULL) {
+ assert(ManagerType::isValidRef(n2Ref));
+ m.holdNode(n2Ref, n2);
+ } else {
+ assert(!ManagerType::isValidRef(n2Ref));
+ }
+ cleanup(g, m);
+}
+
+template<typename Tree>
+bool
+assertTree(const std::string &exp, const Tree &t)
+{
+ std::stringstream ss;
+ test::BTreePrinter<std::stringstream, typename Tree::NodeAllocatorType> printer(ss, t.getAllocator());
+ printer.print(t.getRoot());
+ if (!EXPECT_EQUAL(exp, ss.str())) return false;
+ return true;
+}
+
+template <typename Tree>
+void
+populateTree(Tree &t, uint32_t count, uint32_t delta)
+{
+ uint32_t key = 1;
+ int32_t value = 101;
+ for (uint32_t i = 0; i < count; ++i) {
+ t.insert(key, value);
+ key += delta;
+ value += delta;
+ }
+}
+
+template <typename Tree>
+void
+populateLeafNode(Tree &t)
+{
+ populateTree(t, 4, 2);
+}
+
+
+class Test : public vespalib::TestApp {
+private:
+ template <typename LeafNodeType>
+ bool assertLeafNode(const std::string & exp, const LeafNodeType & n);
+ bool assertSeek(int skey, int ekey, const MyTree & tree);
+ bool assertSeek(int skey, int ekey, MyTree::Iterator & itr);
+ bool assertMemoryUsage(const vespalib::MemoryUsage & exp, const vespalib::MemoryUsage & act);
+
+ void
+ buildSubTree(const std::vector<LeafPair> &sub,
+ size_t numEntries);
+
+ void requireThatNodeInsertWorks();
+ void requireThatTreeInsertWorks();
+ void requireThatNodeSplitInsertWorks();
+ void requireThatNodeStealWorks();
+ void requireThatTreeRemoveStealWorks();
+ void requireThatNodeRemoveWorks();
+ void requireThatNodeLowerBoundWorks();
+ void requireThatWeCanInsertAndRemoveFromTree();
+ void requireThatSortedTreeInsertWorks();
+ void requireThatCornerCaseTreeFindWorks();
+ void requireThatBasicTreeIteratorWorks();
+ void requireThatTreeIteratorSeekWorks();
+ void requireThatTreeIteratorAssignWorks();
+ void requireThatMemoryUsageIsCalculated();
+ template <typename TreeType>
+ void requireThatLowerBoundWorksT();
+ void requireThatLowerBoundWorks();
+ template <typename TreeType>
+ void requireThatUpperBoundWorksT();
+ void requireThatUpperBoundWorks();
+ void requireThatUpdateOfKeyWorks();
+
+ void
+ requireThatSmallNodesWorks();
+
+ void
+ requireThatApplyWorks();
+
+ void
+ requireThatIteratorDistanceWorks(int numEntries);
+
+ void
+ requireThatIteratorDistanceWorks();
+public:
+ int Main() override;
+};
+
+template <typename LeafNodeType>
+bool
+Test::assertLeafNode(const std::string & exp, const LeafNodeType & n)
+{
+ std::stringstream ss;
+ ss << "[";
+ for (uint32_t i = 0; i < n.validSlots(); ++i) {
+ if (i > 0) ss << ",";
+ ss << n.getKey(i) << ":" << n.getData(i);
+ }
+ ss << "]";
+ if (!EXPECT_EQUAL(exp, ss.str())) return false;
+ return true;
+}
+
+bool
+Test::assertSeek(int skey, int ekey, const MyTree & tree)
+{
+ MyTree::Iterator itr = tree.begin();
+ return assertSeek(skey, ekey, itr);
+}
+
+bool
+Test::assertSeek(int skey, int ekey, MyTree::Iterator & itr)
+{
+ MyTree::Iterator bseekItr = itr;
+ MyTree::Iterator lseekItr = itr;
+ bseekItr.binarySeek(skey);
+ lseekItr.linearSeek(skey);
+ if (!EXPECT_EQUAL(ekey, UNWRAP(bseekItr.getKey()))) return false;
+ if (!EXPECT_EQUAL(ekey, UNWRAP(lseekItr.getKey()))) return false;
+ itr = bseekItr;
+ return true;
+}
+
+bool
+Test::assertMemoryUsage(const vespalib::MemoryUsage & exp, const vespalib::MemoryUsage & act)
+{
+ if (!EXPECT_EQUAL(exp.allocatedBytes(), act.allocatedBytes())) return false;
+ if (!EXPECT_EQUAL(exp.usedBytes(), act.usedBytes())) return false;
+ if (!EXPECT_EQUAL(exp.deadBytes(), act.deadBytes())) return false;
+ if (!EXPECT_EQUAL(exp.allocatedBytesOnHold(), act.allocatedBytesOnHold())) return false;
+ return true;
+}
+
+void
+Test::requireThatNodeInsertWorks()
+{
+ GenerationHandler g;
+ MyNodeAllocator m;
+ MyLeafNode::RefPair nPair = m.allocLeafNode();
+ MyLeafNode *n = nPair.data;
+ EXPECT_TRUE(n->isLeaf());
+ EXPECT_EQUAL(0u, n->validSlots());
+ n->insert(0, 20, "b");
+ EXPECT_TRUE(!n->isFull());
+ EXPECT_TRUE(!n->isAtLeastHalfFull());
+ EXPECT_TRUE(assertLeafNode("[20:b]", *n));
+ n->insert(0, 10, "a");
+ EXPECT_TRUE(!n->isFull());
+ EXPECT_TRUE(n->isAtLeastHalfFull());
+ EXPECT_TRUE(assertLeafNode("[10:a,20:b]", *n));
+ EXPECT_EQUAL(20, UNWRAP(n->getLastKey()));
+ EXPECT_EQUAL("b", n->getLastData());
+ n->insert(2, 30, "c");
+ EXPECT_TRUE(!n->isFull());
+ n->insert(3, 40, "d");
+ EXPECT_TRUE(n->isFull());
+ EXPECT_TRUE(n->isAtLeastHalfFull());
+ EXPECT_TRUE(assertLeafNode("[10:a,20:b,30:c,40:d]", *n));
+ cleanup(g, m, nPair.ref, n);
+}
+
+void
+Test::requireThatTreeInsertWorks()
+{
+ using Tree = BTree<MyKey, int32_t, btree::NoAggregated, MyComp, MyTraits>;
+ {
+ Tree t;
+ EXPECT_TRUE(assertTree("{}", t));
+ t.insert(20, 102);
+ EXPECT_TRUE(assertTree("{{20:102}}", t));
+ t.insert(10, 101);
+ EXPECT_TRUE(assertTree("{{10:101,20:102}}", t));
+ t.insert(30, 103);
+ t.insert(40, 104);
+ EXPECT_TRUE(assertTree("{{10:101,20:102,30:103,40:104}}", t));
+ }
+ { // new entry in current node
+ Tree t;
+ populateLeafNode(t);
+ t.insert(4, 104);
+ EXPECT_TRUE(assertTree("{{4,7}} -> "
+ "{{1:101,3:103,4:104},"
+ "{5:105,7:107}}", t));
+ }
+ { // new entry in split node
+ Tree t;
+ populateLeafNode(t);
+ t.insert(6, 106);
+ EXPECT_TRUE(assertTree("{{5,7}} -> "
+ "{{1:101,3:103,5:105},"
+ "{6:106,7:107}}", t));
+ }
+ { // new entry at end
+ Tree t;
+ populateLeafNode(t);
+ t.insert(8, 108);
+ EXPECT_TRUE(assertTree("{{5,8}} -> "
+ "{{1:101,3:103,5:105},"
+ "{7:107,8:108}}", t));
+ }
+ { // multi level node split
+ Tree t;
+ populateTree(t, 16, 2);
+ EXPECT_TRUE(assertTree("{{7,15,23,31}} -> "
+ "{{1:101,3:103,5:105,7:107},"
+ "{9:109,11:111,13:113,15:115},"
+ "{17:117,19:119,21:121,23:123},"
+ "{25:125,27:127,29:129,31:131}}", t));
+ t.insert(33, 133);
+ EXPECT_TRUE(assertTree("{{23,33}} -> "
+ "{{7,15,23},{29,33}} -> "
+ "{{1:101,3:103,5:105,7:107},"
+ "{9:109,11:111,13:113,15:115},"
+ "{17:117,19:119,21:121,23:123},"
+ "{25:125,27:127,29:129},"
+ "{31:131,33:133}}", t));
+ }
+ { // give to left node to avoid split
+ Tree t;
+ populateTree(t, 8, 2);
+ t.remove(5);
+ EXPECT_TRUE(assertTree("{{7,15}} -> "
+ "{{1:101,3:103,7:107},"
+ "{9:109,11:111,13:113,15:115}}", t));
+ t.insert(10, 110);
+ EXPECT_TRUE(assertTree("{{9,15}} -> "
+ "{{1:101,3:103,7:107,9:109},"
+ "{10:110,11:111,13:113,15:115}}", t));
+ }
+ { // give to left node to avoid split, and move to left node
+ Tree t;
+ populateTree(t, 8, 2);
+ t.remove(3);
+ t.remove(5);
+ EXPECT_TRUE(assertTree("{{7,15}} -> "
+ "{{1:101,7:107},"
+ "{9:109,11:111,13:113,15:115}}", t));
+ t.insert(8, 108);
+ EXPECT_TRUE(assertTree("{{9,15}} -> "
+ "{{1:101,7:107,8:108,9:109},"
+ "{11:111,13:113,15:115}}", t));
+ }
+ { // not give to left node to avoid split, but insert at end at left node
+ Tree t;
+ populateTree(t, 8, 2);
+ t.remove(5);
+ EXPECT_TRUE(assertTree("{{7,15}} -> "
+ "{{1:101,3:103,7:107},"
+ "{9:109,11:111,13:113,15:115}}", t));
+ t.insert(8, 108);
+ EXPECT_TRUE(assertTree("{{8,15}} -> "
+ "{{1:101,3:103,7:107,8:108},"
+ "{9:109,11:111,13:113,15:115}}", t));
+ }
+ { // give to right node to avoid split
+ Tree t;
+ populateTree(t, 8, 2);
+ t.remove(13);
+ EXPECT_TRUE(assertTree("{{7,15}} -> "
+ "{{1:101,3:103,5:105,7:107},"
+ "{9:109,11:111,15:115}}", t));
+ t.insert(4, 104);
+ EXPECT_TRUE(assertTree("{{5,15}} -> "
+ "{{1:101,3:103,4:104,5:105},"
+ "{7:107,9:109,11:111,15:115}}", t));
+ }
+ { // give to right node to avoid split and move to right node
+ using MyTraits6 = BTreeTraits<6, 6, 31, false>;
+ using Tree6 = BTree<MyKey, int32_t, btree::NoAggregated, MyComp, MyTraits6>;
+
+ Tree6 t;
+ populateTree(t, 12, 2);
+ t.remove(19);
+ t.remove(21);
+ t.remove(23);
+ EXPECT_TRUE(assertTree("{{11,17}} -> "
+ "{{1:101,3:103,5:105,7:107,9:109,11:111},"
+ "{13:113,15:115,17:117}}", t));
+ t.insert(10, 110);
+ EXPECT_TRUE(assertTree("{{7,17}} -> "
+ "{{1:101,3:103,5:105,7:107},"
+ "{9:109,10:110,11:111,13:113,15:115,17:117}}", t));
+ }
+}
+
+MyLeafNode::RefPair
+getLeafNode(MyNodeAllocator &allocator)
+{
+ MyLeafNode::RefPair nPair = allocator.allocLeafNode();
+ MyLeafNode *n = nPair.data;
+ n->insert(0, 1, "a");
+ n->insert(1, 3, "c");
+ n->insert(2, 5, "e");
+ n->insert(3, 7, "g");
+ return nPair;
+}
+
+void
+Test::requireThatNodeSplitInsertWorks()
+{
+ { // new entry in current node
+ GenerationHandler g;
+ MyNodeAllocator m;
+ MyLeafNode::RefPair nPair = getLeafNode(m);
+ MyLeafNode *n = nPair.data;
+ MyLeafNode::RefPair sPair = m.allocLeafNode();
+ MyLeafNode *s = sPair.data;
+ n->splitInsert(s, 2, 4, "d");
+ EXPECT_TRUE(assertLeafNode("[1:a,3:c,4:d]", *n));
+ EXPECT_TRUE(assertLeafNode("[5:e,7:g]", *s));
+ cleanup(g, m, nPair.ref, n, sPair.ref, s);
+ }
+ { // new entry in split node
+ GenerationHandler g;
+ MyNodeAllocator m;
+ MyLeafNode::RefPair nPair = getLeafNode(m);
+ MyLeafNode *n = nPair.data;
+ MyLeafNode::RefPair sPair = m.allocLeafNode();
+ MyLeafNode *s = sPair.data;
+ n->splitInsert(s, 3, 6, "f");
+ EXPECT_TRUE(assertLeafNode("[1:a,3:c,5:e]", *n));
+ EXPECT_TRUE(assertLeafNode("[6:f,7:g]", *s));
+ cleanup(g, m, nPair.ref, n, sPair.ref, s);
+ }
+ { // new entry at end
+ GenerationHandler g;
+ MyNodeAllocator m;
+ MyLeafNode::RefPair nPair = getLeafNode(m);
+ MyLeafNode *n = nPair.data;
+ MyLeafNode::RefPair sPair = m.allocLeafNode();
+ MyLeafNode *s = sPair.data;
+ n->splitInsert(s, 4, 8, "h");
+ EXPECT_TRUE(assertLeafNode("[1:a,3:c,5:e]", *n));
+ EXPECT_TRUE(assertLeafNode("[7:g,8:h]", *s));
+ cleanup(g, m, nPair.ref, n, sPair.ref, s);
+ }
+}
+
+struct BTreeStealTraits
+{
+ static const size_t LEAF_SLOTS = 6;
+ static const size_t INTERNAL_SLOTS = 6;
+ static const size_t PATH_SIZE = 20;
+ static const bool BINARY_SEEK = true;
+};
+
+void
+Test::requireThatNodeStealWorks()
+{
+ typedef BTreeLeafNode<int, std::string,
+ btree::NoAggregated, 6> MyStealNode;
+ typedef BTreeNodeAllocator<int, std::string,
+ btree::NoAggregated,
+ BTreeStealTraits::INTERNAL_SLOTS, BTreeStealTraits::LEAF_SLOTS>
+ MyStealManager;
+ { // steal all from left
+ GenerationHandler g;
+ MyStealManager m;
+ MyStealNode::RefPair nPair = m.allocLeafNode();
+ MyStealNode *n = nPair.data;
+ n->insert(0, 4, "d");
+ n->insert(1, 5, "e");
+ EXPECT_TRUE(!n->isAtLeastHalfFull());
+ MyStealNode::RefPair vPair = m.allocLeafNode();
+ MyStealNode *v = vPair.data;
+ v->insert(0, 1, "a");
+ v->insert(1, 2, "b");
+ v->insert(2, 3, "c");
+ n->stealAllFromLeftNode(v);
+ EXPECT_TRUE(n->isAtLeastHalfFull());
+ EXPECT_TRUE(assertLeafNode("[1:a,2:b,3:c,4:d,5:e]", *n));
+ cleanup(g, m, nPair.ref, n, vPair.ref, v);
+ }
+ { // steal all from right
+ GenerationHandler g;
+ MyStealManager m;
+ MyStealNode::RefPair nPair = m.allocLeafNode();
+ MyStealNode *n = nPair.data;
+ n->insert(0, 1, "a");
+ n->insert(1, 2, "b");
+ EXPECT_TRUE(!n->isAtLeastHalfFull());
+ MyStealNode::RefPair vPair = m.allocLeafNode();
+ MyStealNode *v = vPair.data;
+ v->insert(0, 3, "c");
+ v->insert(1, 4, "d");
+ v->insert(2, 5, "e");
+ n->stealAllFromRightNode(v);
+ EXPECT_TRUE(n->isAtLeastHalfFull());
+ EXPECT_TRUE(assertLeafNode("[1:a,2:b,3:c,4:d,5:e]", *n));
+ cleanup(g, m, nPair.ref, n, vPair.ref, v);
+ }
+ { // steal some from left
+ GenerationHandler g;
+ MyStealManager m;
+ MyStealNode::RefPair nPair = m.allocLeafNode();
+ MyStealNode *n = nPair.data;
+ n->insert(0, 5, "e");
+ n->insert(1, 6, "f");
+ EXPECT_TRUE(!n->isAtLeastHalfFull());
+ MyStealNode::RefPair vPair = m.allocLeafNode();
+ MyStealNode *v = vPair.data;
+ v->insert(0, 1, "a");
+ v->insert(1, 2, "b");
+ v->insert(2, 3, "c");
+ v->insert(3, 4, "d");
+ n->stealSomeFromLeftNode(v);
+ EXPECT_TRUE(n->isAtLeastHalfFull());
+ EXPECT_TRUE(v->isAtLeastHalfFull());
+ EXPECT_TRUE(assertLeafNode("[4:d,5:e,6:f]", *n));
+ EXPECT_TRUE(assertLeafNode("[1:a,2:b,3:c]", *v));
+ cleanup(g, m, nPair.ref, n, vPair.ref, v);
+ }
+ { // steal some from right
+ GenerationHandler g;
+ MyStealManager m;
+ MyStealNode::RefPair nPair = m.allocLeafNode();
+ MyStealNode *n = nPair.data;
+ n->insert(0, 1, "a");
+ n->insert(1, 2, "b");
+ EXPECT_TRUE(!n->isAtLeastHalfFull());
+ MyStealNode::RefPair vPair = m.allocLeafNode();
+ MyStealNode *v = vPair.data;
+ v->insert(0, 3, "c");
+ v->insert(1, 4, "d");
+ v->insert(2, 5, "e");
+ v->insert(3, 6, "f");
+ n->stealSomeFromRightNode(v);
+ EXPECT_TRUE(n->isAtLeastHalfFull());
+ EXPECT_TRUE(v->isAtLeastHalfFull());
+ EXPECT_TRUE(assertLeafNode("[1:a,2:b,3:c]", *n));
+ EXPECT_TRUE(assertLeafNode("[4:d,5:e,6:f]", *v));
+ cleanup(g, m, nPair.ref, n, vPair.ref, v);
+ }
+}
+
+void
+Test::requireThatTreeRemoveStealWorks()
+{
+ using MyStealTree = BTree<MyKey, int32_t,btree::NoAggregated, MyComp, BTreeStealTraits, NoAggrCalc>;
+ { // steal all from left
+ MyStealTree t;
+ t.insert(10, 110);
+ t.insert(20, 120);
+ t.insert(30, 130);
+ t.insert(40, 140);
+ t.insert(50, 150);
+ t.insert(60, 160);
+ t.insert(35, 135);
+ t.remove(35);
+ EXPECT_TRUE(assertTree("{{30,60}} -> "
+ "{{10:110,20:120,30:130},"
+ "{40:140,50:150,60:160}}", t));
+ t.remove(50);
+ EXPECT_TRUE(assertTree("{{10:110,20:120,30:130,40:140,60:160}}", t));
+ }
+ { // steal all from right
+ MyStealTree t;
+ t.insert(10, 110);
+ t.insert(20, 120);
+ t.insert(30, 130);
+ t.insert(40, 140);
+ t.insert(50, 150);
+ t.insert(60, 160);
+ t.insert(35, 135);
+ t.remove(35);
+ EXPECT_TRUE(assertTree("{{30,60}} -> "
+ "{{10:110,20:120,30:130},"
+ "{40:140,50:150,60:160}}", t));
+ t.remove(20);
+ EXPECT_TRUE(assertTree("{{10:110,30:130,40:140,50:150,60:160}}", t));
+ }
+ { // steal some from left
+ MyStealTree t;
+ t.insert(10, 110);
+ t.insert(20, 120);
+ t.insert(30, 130);
+ t.insert(60, 160);
+ t.insert(70, 170);
+ t.insert(80, 180);
+ t.insert(50, 150);
+ t.insert(40, 140);
+ EXPECT_TRUE(assertTree("{{50,80}} -> "
+ "{{10:110,20:120,30:130,40:140,50:150},"
+ "{60:160,70:170,80:180}}", t));
+ t.remove(60);
+ EXPECT_TRUE(assertTree("{{30,80}} -> "
+ "{{10:110,20:120,30:130},"
+ "{40:140,50:150,70:170,80:180}}", t));
+ }
+ { // steal some from right
+ MyStealTree t;
+ t.insert(10, 110);
+ t.insert(20, 120);
+ t.insert(30, 130);
+ t.insert(40, 140);
+ t.insert(50, 150);
+ t.insert(60, 160);
+ t.insert(70, 170);
+ t.insert(80, 180);
+ t.insert(90, 190);
+ t.remove(40);
+ EXPECT_TRUE(assertTree("{{30,90}} -> "
+ "{{10:110,20:120,30:130},"
+ "{50:150,60:160,70:170,80:180,90:190}}", t));
+ t.remove(20);
+ EXPECT_TRUE(assertTree("{{60,90}} -> "
+ "{{10:110,30:130,50:150,60:160},"
+ "{70:170,80:180,90:190}}", t));
+ }
+}
+
+void
+Test::requireThatNodeRemoveWorks()
+{
+ GenerationHandler g;
+ MyNodeAllocator m;
+ MyLeafNode::RefPair nPair = getLeafNode(m);
+ MyLeafNode *n = nPair.data;
+ n->remove(1);
+ EXPECT_TRUE(assertLeafNode("[1:a,5:e,7:g]", *n));
+ cleanup(g, m, nPair.ref, n);
+}
+
+void
+Test::requireThatNodeLowerBoundWorks()
+{
+ GenerationHandler g;
+ MyNodeAllocator m;
+ MyLeafNode::RefPair nPair = getLeafNode(m);
+ MyLeafNode *n = nPair.data;
+ EXPECT_EQUAL(1u, n->lower_bound(3, MyComp()));
+ EXPECT_FALSE(MyComp()(3, n->getKey(1u)));
+ EXPECT_EQUAL(0u, n->lower_bound(0, MyComp()));
+ EXPECT_TRUE(MyComp()(0, n->getKey(0u)));
+ EXPECT_EQUAL(1u, n->lower_bound(2, MyComp()));
+ EXPECT_TRUE(MyComp()(2, n->getKey(1u)));
+ EXPECT_EQUAL(3u, n->lower_bound(6, MyComp()));
+ EXPECT_TRUE(MyComp()(6, n->getKey(3u)));
+ EXPECT_EQUAL(4u, n->lower_bound(8, MyComp()));
+ cleanup(g, m, nPair.ref, n);
+}
+
+void
+generateData(std::vector<LeafPair> & data, size_t numEntries)
+{
+ data.reserve(numEntries);
+ Rand48 rnd;
+ rnd.srand48(10);
+ for (size_t i = 0; i < numEntries; ++i) {
+ int num = rnd.lrand48() % 10000000;
+ std::string str = toStr(num);
+ data.push_back(std::make_pair(num, str));
+ }
+}
+
+
+void
+Test::buildSubTree(const std::vector<LeafPair> &sub,
+ size_t numEntries)
+{
+ GenerationHandler g;
+ MyTree tree;
+ MyTreeBuilder builder(tree.getAllocator());
+
+ std::vector<LeafPair> sorted(sub.begin(), sub.begin() + numEntries);
+ std::sort(sorted.begin(), sorted.end(), LeafPairLess());
+ for (size_t i = 0; i < numEntries; ++i) {
+ int num = UNWRAP(sorted[i].first);
+ const std::string & str = sorted[i].second;
+ builder.insert(num, str);
+ }
+ tree.assign(builder);
+ assert(numEntries == tree.size());
+ assert(tree.isValid());
+ EXPECT_EQUAL(numEntries, tree.size());
+ EXPECT_TRUE(tree.isValid());
+ MyTree::Iterator itr = tree.begin();
+ MyTree::Iterator ritr = itr;
+ if (numEntries > 0) {
+ EXPECT_TRUE(ritr.valid());
+ EXPECT_EQUAL(0u, ritr.position());
+ --ritr;
+ EXPECT_TRUE(!ritr.valid());
+ EXPECT_EQUAL(numEntries, ritr.position());
+ --ritr;
+ EXPECT_TRUE(ritr.valid());
+ EXPECT_EQUAL(numEntries - 1, ritr.position());
+ } else {
+ EXPECT_TRUE(!ritr.valid());
+ EXPECT_EQUAL(0u, ritr.position());
+ --ritr;
+ EXPECT_TRUE(!ritr.valid());
+ EXPECT_EQUAL(0u, ritr.position());
+ }
+ for (size_t i = 0; i < numEntries; ++i) {
+ EXPECT_TRUE(itr.valid());
+ EXPECT_EQUAL(sorted[i].first, itr.getKey());
+ EXPECT_EQUAL(sorted[i].second, itr.getData());
+ ++itr;
+ }
+ EXPECT_TRUE(!itr.valid());
+ ritr = itr;
+ EXPECT_TRUE(!ritr.valid());
+ --ritr;
+ for (size_t i = 0; i < numEntries; ++i) {
+ EXPECT_TRUE(ritr.valid());
+ EXPECT_EQUAL(sorted[numEntries - 1 - i].first, ritr.getKey());
+ EXPECT_EQUAL(sorted[numEntries - 1 - i].second, ritr.getData());
+ --ritr;
+ }
+ EXPECT_TRUE(!ritr.valid());
+}
+
+void
+Test::requireThatWeCanInsertAndRemoveFromTree()
+{
+ GenerationHandler g;
+ MyTree tree;
+ std::vector<LeafPair> exp;
+ std::vector<LeafPair> sorted;
+ size_t numEntries = 1000;
+ generateData(exp, numEntries);
+ sorted = exp;
+ std::sort(sorted.begin(), sorted.end(), LeafPairLess());
+ // insert entries
+ for (size_t i = 0; i < numEntries; ++i) {
+ int num = UNWRAP(exp[i].first);
+ const std::string & str = exp[i].second;
+ EXPECT_TRUE(!tree.find(num).valid());
+ //LOG(info, "insert[%zu](%d, %s)", i, num, str.c_str());
+ EXPECT_TRUE(tree.insert(num, str));
+ EXPECT_TRUE(!tree.insert(num, str));
+ for (size_t j = 0; j <= i; ++j) {
+ //LOG(info, "find[%zu](%d)", j, exp[j].first._val);
+ MyTree::Iterator itr = tree.find(exp[j].first);
+ EXPECT_TRUE(itr.valid());
+ EXPECT_EQUAL(exp[j].first, itr.getKey());
+ EXPECT_EQUAL(exp[j].second, itr.getData());
+ }
+ EXPECT_EQUAL(i + 1u, tree.size());
+ EXPECT_TRUE(tree.isValid());
+ buildSubTree(exp, i + 1);
+ }
+ //std::cout << "tree: " << tree.toString() << std::endl;
+
+ {
+ MyTree::Iterator itr = tree.begin();
+ MyTree::Iterator itre = itr;
+ MyTree::Iterator itre2;
+ MyTree::Iterator ritr = itr;
+ while (itre.valid())
+ ++itre;
+ if (numEntries > 0) {
+ EXPECT_TRUE(ritr.valid());
+ EXPECT_EQUAL(0u, ritr.position());
+ --ritr;
+ EXPECT_TRUE(!ritr.valid());
+ EXPECT_EQUAL(numEntries, ritr.position());
+ --ritr;
+ EXPECT_TRUE(ritr.valid());
+ EXPECT_EQUAL(numEntries - 1, ritr.position());
+ } else {
+ EXPECT_TRUE(!ritr.valid());
+ EXPECT_EQUAL(0u, ritr.position());
+ --ritr;
+ EXPECT_TRUE(!ritr.valid());
+ EXPECT_EQUAL(0u, ritr.position());
+ }
+ MyTree::Iterator pitr = itr;
+ for (size_t i = 0; i < numEntries; ++i) {
+ ssize_t si = i;
+ ssize_t sileft = numEntries - i;
+ EXPECT_TRUE(itr.valid());
+ EXPECT_EQUAL(i, itr.position());
+ EXPECT_EQUAL(sileft, itre - itr);
+ EXPECT_EQUAL(-sileft, itr - itre);
+ EXPECT_EQUAL(sileft, itre2 - itr);
+ EXPECT_EQUAL(-sileft, itr - itre2);
+ EXPECT_EQUAL(si, itr - tree.begin());
+ EXPECT_EQUAL(-si, tree.begin() - itr);
+ EXPECT_EQUAL(i != 0, itr - pitr);
+ EXPECT_EQUAL(-(i != 0), pitr - itr);
+ EXPECT_EQUAL(sorted[i].first, itr.getKey());
+ EXPECT_EQUAL(sorted[i].second, itr.getData());
+ pitr = itr;
+ ++itr;
+ ritr = itr;
+ --ritr;
+ EXPECT_TRUE(ritr.valid());
+ EXPECT_TRUE(ritr == pitr);
+ }
+ EXPECT_TRUE(!itr.valid());
+ EXPECT_EQUAL(numEntries, itr.position());
+ ssize_t sNumEntries = numEntries;
+ EXPECT_EQUAL(sNumEntries, itr - tree.begin());
+ EXPECT_EQUAL(-sNumEntries, tree.begin() - itr);
+ EXPECT_EQUAL(1, itr - pitr);
+ EXPECT_EQUAL(-1, pitr - itr);
+ }
+ // compact full tree by calling incremental compaction methods in a loop
+ {
+ MyTree::NodeAllocatorType &manager = tree.getAllocator();
+ std::vector<uint32_t> toHold = manager.startCompact();
+ MyTree::Iterator itr = tree.begin();
+ tree.setRoot(itr.moveFirstLeafNode(tree.getRoot()));
+ while (itr.valid()) {
+ // LOG(info, "Leaf moved to %d", UNWRAP(itr.getKey()));
+ itr.moveNextLeafNode();
+ }
+ manager.finishCompact(toHold);
+ manager.freeze();
+ manager.transferHoldLists(g.getCurrentGeneration());
+ g.incGeneration();
+ manager.trimHoldLists(g.getFirstUsedGeneration());
+ }
+ // remove entries
+ for (size_t i = 0; i < numEntries; ++i) {
+ int num = UNWRAP(exp[i].first);
+ //LOG(info, "remove[%zu](%d)", i, num);
+ //std::cout << "tree: " << tree.toString() << std::endl;
+ EXPECT_TRUE(tree.remove(num));
+ EXPECT_TRUE(!tree.find(num).valid());
+ EXPECT_TRUE(!tree.remove(num));
+ EXPECT_TRUE(tree.isValid());
+ for (size_t j = i + 1; j < numEntries; ++j) {
+ MyTree::Iterator itr = tree.find(exp[j].first);
+ EXPECT_TRUE(itr.valid());
+ EXPECT_EQUAL(exp[j].first, itr.getKey());
+ EXPECT_EQUAL(exp[j].second, itr.getData());
+ }
+ EXPECT_EQUAL(numEntries - 1 - i, tree.size());
+ }
+}
+
+void
+Test::requireThatSortedTreeInsertWorks()
+{
+ {
+ GenerationHandler g;
+ MyTree tree;
+ for (int i = 0; i < 1000; ++i) {
+ EXPECT_TRUE(tree.insert(i, toStr(i)));
+ MyTree::Iterator itr = tree.find(i);
+ EXPECT_TRUE(itr.valid());
+ EXPECT_EQUAL(toStr(i), itr.getData());
+ EXPECT_TRUE(tree.isValid());
+ }
+ }
+ {
+ GenerationHandler g;
+ MyTree tree;
+ for (int i = 1000; i > 0; --i) {
+ EXPECT_TRUE(tree.insert(i, toStr(i)));
+ MyTree::Iterator itr = tree.find(i);
+ EXPECT_TRUE(itr.valid());
+ EXPECT_EQUAL(toStr(i), itr.getData());
+ EXPECT_TRUE(tree.isValid());
+ }
+ }
+}
+
+void
+Test::requireThatCornerCaseTreeFindWorks()
+{
+ GenerationHandler g;
+ MyTree tree;
+ for (int i = 1; i < 100; ++i) {
+ tree.insert(i, toStr(i));
+ }
+ EXPECT_TRUE(!tree.find(0).valid()); // lower than lowest
+ EXPECT_TRUE(!tree.find(1000).valid()); // higher than highest
+}
+
+void
+Test::requireThatBasicTreeIteratorWorks()
+{
+ GenerationHandler g;
+ MyTree tree;
+ EXPECT_TRUE(!tree.begin().valid());
+ std::vector<LeafPair> exp;
+ size_t numEntries = 1000;
+ generateData(exp, numEntries);
+ for (size_t i = 0; i < numEntries; ++i) {
+ tree.insert(exp[i].first, exp[i].second);
+ }
+ std::sort(exp.begin(), exp.end(), LeafPairLess());
+ size_t ei = 0;
+ MyTree::Iterator itr = tree.begin();
+ MyTree::Iterator ritr;
+ EXPECT_EQUAL(1000u, itr.size());
+ for (; itr.valid(); ++itr) {
+ //LOG(info, "itr(%d, %s)", itr.getKey(), itr.getData().c_str());
+ EXPECT_EQUAL(UNWRAP(exp[ei].first), UNWRAP(itr.getKey()));
+ EXPECT_EQUAL(exp[ei].second, itr.getData());
+ ei++;
+ ritr = itr;
+ }
+ EXPECT_EQUAL(numEntries, ei);
+ for (; ritr.valid(); --ritr) {
+ --ei;
+ //LOG(info, "itr(%d, %s)", itr.getKey(), itr.getData().c_str());
+ EXPECT_EQUAL(UNWRAP(exp[ei].first), UNWRAP(ritr.getKey()));
+ EXPECT_EQUAL(exp[ei].second, ritr.getData());
+ }
+}
+
+void
+Test::requireThatTreeIteratorSeekWorks()
+{
+ GenerationHandler g;
+ MyTree tree;
+ for (int i = 0; i < 40; i += 2) {
+ tree.insert(i, toStr(i));
+ }
+ //std::cout << tree.toString() << std::endl;
+ EXPECT_TRUE(assertSeek(2, 2, tree)); // next key
+ EXPECT_TRUE(assertSeek(10, 10, tree)); // skip to existing
+ EXPECT_TRUE(assertSeek(26, 26, tree)); // skip to existing
+ EXPECT_TRUE(assertSeek(11, 12, tree)); // skip to non-existing
+ EXPECT_TRUE(assertSeek(23, 24, tree)); // skip to non-existing
+ {
+ MyTree::Iterator itr = tree.begin();
+ EXPECT_TRUE(assertSeek(4, 4, itr));
+ EXPECT_TRUE(assertSeek(14, 14, itr));
+ EXPECT_TRUE(assertSeek(18, 18, itr));
+ EXPECT_TRUE(assertSeek(36, 36, itr));
+ }
+ {
+ MyTree::Iterator itr = tree.begin();
+ EXPECT_TRUE(assertSeek(3, 4, itr));
+ EXPECT_TRUE(assertSeek(13, 14, itr));
+ EXPECT_TRUE(assertSeek(17, 18, itr));
+ EXPECT_TRUE(assertSeek(35, 36, itr));
+ }
+ {
+ MyTree::Iterator itr = tree.begin();
+ MyTree::Iterator itr2 = tree.begin();
+ itr.binarySeek(40); // outside
+ itr2.linearSeek(40); // outside
+ EXPECT_TRUE(!itr.valid());
+ EXPECT_TRUE(!itr2.valid());
+ }
+ {
+ MyTree::Iterator itr = tree.begin();
+ EXPECT_TRUE(assertSeek(8, 8, itr));
+ for (int i = 10; i < 40; i += 2) {
+ ++itr;
+ EXPECT_EQUAL(i, UNWRAP(itr.getKey()));
+ }
+ }
+ {
+ MyTree::Iterator itr = tree.begin();
+ EXPECT_TRUE(assertSeek(26, 26, itr));
+ for (int i = 28; i < 40; i += 2) {
+ ++itr;
+ EXPECT_EQUAL(i, UNWRAP(itr.getKey()));
+ }
+ }
+ GenerationHandler g2;
+ MyTree tree2; // only leaf node
+ tree2.insert(0, "0");
+ tree2.insert(2, "2");
+ tree2.insert(4, "4");
+ EXPECT_TRUE(assertSeek(1, 2, tree2));
+ EXPECT_TRUE(assertSeek(2, 2, tree2));
+ {
+ MyTree::Iterator itr = tree2.begin();
+ MyTree::Iterator itr2 = tree2.begin();
+ itr.binarySeek(5); // outside
+ itr2.linearSeek(5); // outside
+ EXPECT_TRUE(!itr.valid());
+ EXPECT_TRUE(!itr2.valid());
+ }
+}
+
+void
+Test::requireThatTreeIteratorAssignWorks()
+{
+ GenerationHandler g;
+ MyTree tree;
+ for (int i = 0; i < 1000; ++i) {
+ tree.insert(i, toStr(i));
+ }
+ for (int i = 0; i < 1000; ++i) {
+ MyTree::Iterator itr = tree.find(i);
+ MyTree::Iterator itr2 = itr;
+ EXPECT_TRUE(itr == itr2);
+ int expNum = i;
+ for (; itr2.valid(); ++itr2) {
+ EXPECT_EQUAL(expNum++, UNWRAP(itr2.getKey()));
+ }
+ EXPECT_EQUAL(1000, expNum);
+ }
+}
+
+size_t
+adjustAllocatedBytes(size_t nodeCount, size_t nodeSize)
+{
+ // Note: Sizes of underlying data store buffers are power of 2.
+ size_t allocatedBytes = vespalib::roundUp2inN(nodeCount * nodeSize);
+ size_t adjustedNodeCount = allocatedBytes / nodeSize;
+ return adjustedNodeCount * nodeSize;
+}
+
+void
+Test::requireThatMemoryUsageIsCalculated()
+{
+ typedef BTreeNodeAllocator<int32_t, int8_t,
+ btree::NoAggregated,
+ MyTraits::INTERNAL_SLOTS, MyTraits::LEAF_SLOTS> NodeAllocator;
+ typedef NodeAllocator::InternalNodeType INode;
+ typedef NodeAllocator::LeafNodeType LNode;
+ typedef NodeAllocator::InternalNodeTypeRefPair IRef;
+ typedef NodeAllocator::LeafNodeTypeRefPair LRef;
+ LOG(info, "sizeof(BTreeNode)=%zu, sizeof(INode)=%zu, sizeof(LNode)=%zu",
+ sizeof(BTreeNode), sizeof(INode), sizeof(LNode));
+ EXPECT_GREATER(sizeof(INode), sizeof(LNode));
+ GenerationHandler gh;
+ gh.incGeneration();
+ NodeAllocator tm;
+ vespalib::MemoryUsage mu;
+ const uint32_t initialInternalNodes = 128u;
+ const uint32_t initialLeafNodes = 128u;
+ mu.incAllocatedBytes(adjustAllocatedBytes(initialInternalNodes, sizeof(INode)));
+ mu.incAllocatedBytes(adjustAllocatedBytes(initialLeafNodes, sizeof(LNode)));
+ mu.incUsedBytes(sizeof(INode));
+ mu.incDeadBytes(sizeof(INode));
+ EXPECT_TRUE(assertMemoryUsage(mu, tm.getMemoryUsage()));
+
+ // add internal node
+ IRef ir = tm.allocInternalNode(1);
+ mu.incUsedBytes(sizeof(INode));
+ EXPECT_TRUE(assertMemoryUsage(mu, tm.getMemoryUsage()));
+
+ // add leaf node
+ LRef lr = tm.allocLeafNode();
+ mu.incUsedBytes(sizeof(LNode));
+ EXPECT_TRUE(assertMemoryUsage(mu, tm.getMemoryUsage()));
+
+ // move nodes to hold list
+ tm.freeze(); // mark allocated nodes as frozen so we can hold them later on
+ tm.holdNode(ir.ref, ir.data);
+ mu.incAllocatedBytesOnHold(sizeof(INode));
+ EXPECT_TRUE(assertMemoryUsage(mu, tm.getMemoryUsage()));
+ tm.holdNode(lr.ref, lr.data);
+ mu.incAllocatedBytesOnHold(sizeof(LNode));
+ EXPECT_TRUE(assertMemoryUsage(mu, tm.getMemoryUsage()));
+
+ // trim hold lists
+ tm.transferHoldLists(gh.getCurrentGeneration());
+ gh.incGeneration();
+ tm.trimHoldLists(gh.getFirstUsedGeneration());
+ mu = vespalib::MemoryUsage();
+ mu.incAllocatedBytes(adjustAllocatedBytes(initialInternalNodes, sizeof(INode)));
+ mu.incAllocatedBytes(adjustAllocatedBytes(initialLeafNodes, sizeof(LNode)));
+ mu.incUsedBytes(sizeof(INode) * 2);
+ mu.incDeadBytes(sizeof(INode) * 2);
+ mu.incUsedBytes(sizeof(LNode));
+ mu.incDeadBytes(sizeof(LNode));
+ EXPECT_TRUE(assertMemoryUsage(mu, tm.getMemoryUsage()));
+}
+
+template <typename TreeType>
+void
+Test::requireThatLowerBoundWorksT()
+{
+ GenerationHandler g;
+ TreeType t;
+ EXPECT_TRUE(t.insert(10, BTreeNoLeafData()));
+ EXPECT_TRUE(t.insert(20, BTreeNoLeafData()));
+ EXPECT_TRUE(t.insert(30, BTreeNoLeafData()));
+ EXPECT_EQUAL(10, t.lowerBound(9).getKey());
+ EXPECT_EQUAL(20, t.lowerBound(20).getKey());
+ EXPECT_EQUAL(30, t.lowerBound(21).getKey());
+ EXPECT_EQUAL(30, t.lowerBound(30).getKey());
+ EXPECT_TRUE(!t.lowerBound(31).valid());
+ for (int i = 40; i < 1000; i+=10) {
+ EXPECT_TRUE(t.insert(i, BTreeNoLeafData()));
+ }
+ for (int i = 9; i < 990; i+=10) {
+ EXPECT_EQUAL(i + 1, t.lowerBound(i).getKey());
+ EXPECT_EQUAL(i + 1, t.lowerBound(i + 1).getKey());
+ }
+ EXPECT_TRUE(!t.lowerBound(991).valid());
+}
+
+void
+Test::requireThatLowerBoundWorks()
+{
+ requireThatLowerBoundWorksT<SetTreeB>();
+ requireThatLowerBoundWorksT<SetTreeL>();
+}
+
+template <typename TreeType>
+void
+Test::requireThatUpperBoundWorksT()
+{
+ GenerationHandler g;
+ TreeType t;
+ EXPECT_TRUE(t.insert(10, BTreeNoLeafData()));
+ EXPECT_TRUE(t.insert(20, BTreeNoLeafData()));
+ EXPECT_TRUE(t.insert(30, BTreeNoLeafData()));
+ EXPECT_EQUAL(10, t.upperBound(9).getKey());
+ EXPECT_EQUAL(30, t.upperBound(20).getKey());
+ EXPECT_EQUAL(30, t.upperBound(21).getKey());
+ EXPECT_TRUE(!t.upperBound(30).valid());
+ for (int i = 40; i < 1000; i+=10) {
+ EXPECT_TRUE(t.insert(i, BTreeNoLeafData()));
+ }
+ for (int i = 9; i < 980; i+=10) {
+ EXPECT_EQUAL(i + 1, t.upperBound(i).getKey());
+ EXPECT_EQUAL(i + 11, t.upperBound(i + 1).getKey());
+ }
+ EXPECT_TRUE(!t.upperBound(990).valid());
+}
+
+void
+Test::requireThatUpperBoundWorks()
+{
+ requireThatUpperBoundWorksT<SetTreeB>();
+ requireThatUpperBoundWorksT<SetTreeL>();
+}
+
+struct UpdKeyComp {
+ int _remainder;
+ mutable size_t _numErrors;
+ UpdKeyComp(int remainder) : _remainder(remainder), _numErrors(0) {}
+ bool operator() (const int & lhs, const int & rhs) const {
+ if (lhs % 2 != _remainder) ++_numErrors;
+ if (rhs % 2 != _remainder) ++_numErrors;
+ return lhs < rhs;
+ }
+};
+
+void
+Test::requireThatUpdateOfKeyWorks()
+{
+ typedef BTree<int, BTreeNoLeafData,
+ btree::NoAggregated,
+ UpdKeyComp &> UpdKeyTree;
+ typedef UpdKeyTree::Iterator UpdKeyTreeIterator;
+ GenerationHandler g;
+ UpdKeyTree t;
+ UpdKeyComp cmp1(0);
+ for (int i = 0; i < 1000; i+=2) {
+ EXPECT_TRUE(t.insert(i, BTreeNoLeafData(), cmp1));
+ }
+ EXPECT_EQUAL(0u, cmp1._numErrors);
+ for (int i = 0; i < 1000; i+=2) {
+ UpdKeyTreeIterator itr = t.find(i, cmp1);
+ itr.writeKey(i + 1);
+ }
+ UpdKeyComp cmp2(1);
+ for (int i = 1; i < 1000; i+=2) {
+ UpdKeyTreeIterator itr = t.find(i, cmp2);
+ EXPECT_TRUE(itr.valid());
+ }
+ EXPECT_EQUAL(0u, cmp2._numErrors);
+}
+
+
+void
+Test::requireThatSmallNodesWorks()
+{
+ typedef BTreeStore<MyKey, std::string, btree::NoAggregated, MyComp,
+ BTreeDefaultTraits> TreeStore;
+ GenerationHandler g;
+ TreeStore s;
+
+ EntryRef root;
+ EXPECT_EQUAL(0u, s.size(root));
+ EXPECT_TRUE(s.isSmallArray(root));
+ EXPECT_TRUE(s.insert(root, 40, "fourty"));
+ EXPECT_TRUE(!s.insert(root, 40, "fourty.not"));
+ EXPECT_EQUAL(1u, s.size(root));
+ EXPECT_TRUE(s.isSmallArray(root));
+ EXPECT_TRUE(s.insert(root, 20, "twenty"));
+ EXPECT_TRUE(!s.insert(root, 20, "twenty.not"));
+ EXPECT_TRUE(!s.insert(root, 40, "fourty.not"));
+ EXPECT_EQUAL(2u, s.size(root));
+ EXPECT_TRUE(s.isSmallArray(root));
+ EXPECT_TRUE(s.insert(root, 60, "sixty"));
+ EXPECT_TRUE(!s.insert(root, 60, "sixty.not"));
+ EXPECT_TRUE(!s.insert(root, 20, "twenty.not"));
+ EXPECT_TRUE(!s.insert(root, 40, "fourty.not"));
+ EXPECT_EQUAL(3u, s.size(root));
+ EXPECT_TRUE(s.isSmallArray(root));
+ EXPECT_TRUE(s.insert(root, 50, "fifty"));
+ EXPECT_TRUE(!s.insert(root, 50, "fifty.not"));
+ EXPECT_TRUE(!s.insert(root, 60, "sixty.not"));
+ EXPECT_TRUE(!s.insert(root, 20, "twenty.not"));
+ EXPECT_TRUE(!s.insert(root, 40, "fourty.not"));
+ EXPECT_EQUAL(4u, s.size(root));
+ EXPECT_TRUE(s.isSmallArray(root));
+
+ for (uint32_t i = 0; i < 100; ++i) {
+ EXPECT_TRUE(s.insert(root, 1000 + i, "big"));
+ if (i > 0) {
+ EXPECT_TRUE(!s.insert(root, 1000 + i - 1, "big"));
+ }
+ EXPECT_EQUAL(5u + i, s.size(root));
+ EXPECT_EQUAL(5u + i <= 8u, s.isSmallArray(root));
+ }
+ EXPECT_TRUE(s.remove(root, 40));
+ EXPECT_TRUE(!s.remove(root, 40));
+ EXPECT_EQUAL(103u, s.size(root));
+ EXPECT_TRUE(!s.isSmallArray(root));
+ EXPECT_TRUE(s.remove(root, 20));
+ EXPECT_TRUE(!s.remove(root, 20));
+ EXPECT_EQUAL(102u, s.size(root));
+ EXPECT_TRUE(!s.isSmallArray(root));
+ EXPECT_TRUE(s.remove(root, 50));
+ EXPECT_TRUE(!s.remove(root, 50));
+ EXPECT_EQUAL(101u, s.size(root));
+ EXPECT_TRUE(!s.isSmallArray(root));
+ for (uint32_t i = 0; i < 100; ++i) {
+ EXPECT_TRUE(s.remove(root, 1000 + i));
+ if (i > 0) {
+ EXPECT_TRUE(!s.remove(root, 1000 + i - 1));
+ }
+ EXPECT_EQUAL(100 - i, s.size(root));
+ EXPECT_EQUAL(100 - i <= 8u, s.isSmallArray(root));
+ }
+ EXPECT_EQUAL(1u, s.size(root));
+ EXPECT_TRUE(s.isSmallArray(root));
+
+ s.clear(root);
+ s.clearBuilder();
+ s.freeze();
+ s.transferHoldLists(g.getCurrentGeneration());
+ g.incGeneration();
+ s.trimHoldLists(g.getFirstUsedGeneration());
+}
+
+
+void
+Test::requireThatApplyWorks()
+{
+ typedef BTreeStore<MyKey, std::string, btree::NoAggregated, MyComp,
+ BTreeDefaultTraits> TreeStore;
+ typedef TreeStore::KeyType KeyType;
+ typedef TreeStore::KeyDataType KeyDataType;
+ GenerationHandler g;
+ TreeStore s;
+ std::vector<KeyDataType> additions;
+ std::vector<KeyType> removals;
+
+ EntryRef root;
+ EXPECT_EQUAL(0u, s.size(root));
+ EXPECT_TRUE(s.isSmallArray(root));
+
+ additions.clear();
+ removals.clear();
+ additions.push_back(KeyDataType(40, "fourty"));
+ s.apply(root, &additions[0], &additions[0] + additions.size(),
+ &removals[0], &removals[0] + removals.size());
+ EXPECT_EQUAL(1u, s.size(root));
+ EXPECT_TRUE(s.isSmallArray(root));
+
+ additions.clear();
+ removals.clear();
+ additions.push_back(KeyDataType(20, "twenty"));
+ s.apply(root, &additions[0], &additions[0] + additions.size(),
+ &removals[0], &removals[0] + removals.size());
+ EXPECT_EQUAL(2u, s.size(root));
+ EXPECT_TRUE(s.isSmallArray(root));
+
+ additions.clear();
+ removals.clear();
+ additions.push_back(KeyDataType(60, "sixty"));
+ s.apply(root, &additions[0], &additions[0] + additions.size(),
+ &removals[0], &removals[0] + removals.size());
+ EXPECT_EQUAL(3u, s.size(root));
+ EXPECT_TRUE(s.isSmallArray(root));
+
+ additions.clear();
+ removals.clear();
+ additions.push_back(KeyDataType(50, "fifty"));
+ s.apply(root, &additions[0], &additions[0] + additions.size(),
+ &removals[0], &removals[0] + removals.size());
+ EXPECT_EQUAL(4u, s.size(root));
+ EXPECT_TRUE(s.isSmallArray(root));
+
+ for (uint32_t i = 0; i < 100; ++i) {
+ additions.clear();
+ removals.clear();
+ additions.push_back(KeyDataType(1000 + i, "big"));
+ s.apply(root, &additions[0], &additions[0] + additions.size(),
+ &removals[0], &removals[0] + removals.size());
+ EXPECT_EQUAL(5u + i, s.size(root));
+ EXPECT_EQUAL(5u + i <= 8u, s.isSmallArray(root));
+ }
+
+ additions.clear();
+ removals.clear();
+ removals.push_back(40);
+ s.apply(root, &additions[0], &additions[0] + additions.size(),
+ &removals[0], &removals[0] + removals.size());
+ EXPECT_EQUAL(103u, s.size(root));
+ EXPECT_TRUE(!s.isSmallArray(root));
+
+ additions.clear();
+ removals.clear();
+ removals.push_back(20);
+ s.apply(root, &additions[0], &additions[0] + additions.size(),
+ &removals[0], &removals[0] + removals.size());
+ EXPECT_EQUAL(102u, s.size(root));
+ EXPECT_TRUE(!s.isSmallArray(root));
+
+ additions.clear();
+ removals.clear();
+ removals.push_back(50);
+ s.apply(root, &additions[0], &additions[0] + additions.size(),
+ &removals[0], &removals[0] + removals.size());
+ EXPECT_EQUAL(101u, s.size(root));
+ EXPECT_TRUE(!s.isSmallArray(root));
+ for (uint32_t i = 0; i < 100; ++i) {
+ additions.clear();
+ removals.clear();
+ removals.push_back(1000 +i);
+ s.apply(root, &additions[0], &additions[0] + additions.size(),
+ &removals[0], &removals[0] + removals.size());
+ EXPECT_EQUAL(100 - i, s.size(root));
+ EXPECT_EQUAL(100 - i <= 8u, s.isSmallArray(root));
+ }
+ EXPECT_EQUAL(1u, s.size(root));
+ EXPECT_TRUE(s.isSmallArray(root));
+
+ additions.clear();
+ removals.clear();
+ for (uint32_t i = 0; i < 20; ++i)
+ additions.push_back(KeyDataType(1000 + i, "big"));
+ removals.push_back(60);
+ removals.push_back(1002);
+ s.apply(root, &additions[0], &additions[0] + additions.size(),
+ &removals[0], &removals[0] + removals.size());
+ EXPECT_EQUAL(20u, s.size(root));
+ EXPECT_TRUE(!s.isSmallArray(root));
+
+ additions.clear();
+ s.apply(root, &additions[0], &additions[0] + additions.size(),
+ &removals[0], &removals[0] + removals.size());
+ EXPECT_EQUAL(19u, s.size(root));
+ EXPECT_TRUE(!s.isSmallArray(root));
+
+ additions.clear();
+ removals.clear();
+ for (uint32_t i = 0; i < 20; ++i)
+ additions.push_back(KeyDataType(1100 + i, "big"));
+ for (uint32_t i = 0; i < 10; ++i)
+ removals.push_back(1000 + i);
+ s.apply(root, &additions[0], &additions[0] + additions.size(),
+ &removals[0], &removals[0] + removals.size());
+ EXPECT_EQUAL(30u, s.size(root));
+ EXPECT_TRUE(!s.isSmallArray(root));
+
+ s.clear(root);
+ s.clearBuilder();
+ s.freeze();
+ s.transferHoldLists(g.getCurrentGeneration());
+ g.incGeneration();
+ s.trimHoldLists(g.getFirstUsedGeneration());
+}
+
+class MyTreeTestIterator : public MyTree::Iterator
+{
+public:
+ MyTreeTestIterator(const MyTree::Iterator &rhs)
+ : MyTree::Iterator(rhs)
+ {
+ }
+
+ int getPathSize() const { return _pathSize; }
+};
+
+
+void
+Test::requireThatIteratorDistanceWorks(int numEntries)
+{
+ GenerationHandler g;
+ MyTree tree;
+ typedef MyTree::Iterator Iterator;
+ for (int i = 0; i < numEntries; ++i) {
+ tree.insert(i, toStr(i));
+ }
+ MyTreeTestIterator tit = tree.begin();
+ LOG(info,
+ "numEntries=%d, iterator pathSize=%d",
+ numEntries, tit.getPathSize());
+ Iterator it = tree.begin();
+ for (int i = 0; i <= numEntries; ++i) {
+ Iterator iit = tree.lowerBound(i);
+ Iterator iitn = tree.lowerBound(i + 1);
+ Iterator iitu = tree.upperBound(i);
+ Iterator iitls = tree.begin();
+ Iterator iitbs = tree.begin();
+ Iterator iitlsp = tree.begin();
+ Iterator iitbsp = tree.begin();
+ Iterator iitlb(tree.getRoot(), tree.getAllocator());
+ iitlb.lower_bound(i);
+ Iterator iitlb2(BTreeNode::Ref(), tree.getAllocator());
+ iitlb2.lower_bound(tree.getRoot(), i);
+ if (i > 0) {
+ iitls.linearSeek(i);
+ iitbs.binarySeek(i);
+ ++it;
+ }
+ iitlsp.linearSeekPast(i);
+ iitbsp.binarySeekPast(i);
+ Iterator iitlsp2 = iitls;
+ Iterator iitbsp2 = iitbs;
+ Iterator iitnr = i < numEntries ? iitn : tree.begin();
+ --iitnr;
+ if (i < numEntries) {
+ iitlsp2.linearSeekPast(i);
+ iitbsp2.binarySeekPast(i);
+ }
+ EXPECT_EQUAL(i, static_cast<int>(iit.position()));
+ EXPECT_EQUAL(i < numEntries, iit.valid());
+ EXPECT_TRUE(iit.identical(it));
+ EXPECT_TRUE(iit.identical(iitls));
+ EXPECT_TRUE(iit.identical(iitbs));
+ EXPECT_TRUE(iit.identical(iitnr));
+ EXPECT_TRUE(iit.identical(iitlb));
+ EXPECT_TRUE(iit.identical(iitlb2));
+ EXPECT_TRUE(iitn.identical(iitu));
+ EXPECT_TRUE(iitn.identical(iitlsp));
+ EXPECT_TRUE(iitn.identical(iitbsp));
+ EXPECT_TRUE(iitn.identical(iitlsp2));
+ EXPECT_TRUE(iitn.identical(iitbsp2));
+ if (i < numEntries) {
+ EXPECT_EQUAL(i + 1, static_cast<int>(iitn.position()));
+ EXPECT_EQUAL(i + 1 < numEntries, iitn.valid());
+ }
+ for (int j = 0; j <= numEntries; ++j) {
+ Iterator jit = tree.lowerBound(j);
+ EXPECT_EQUAL(j, static_cast<int>(jit.position()));
+ EXPECT_EQUAL(j < numEntries, jit.valid());
+ EXPECT_EQUAL(i - j, iit - jit);
+ EXPECT_EQUAL(j - i, jit - iit);
+
+ Iterator jit2 = jit;
+ jit2.setupEnd();
+ EXPECT_EQUAL(numEntries - j, jit2 - jit);
+ EXPECT_EQUAL(numEntries - i, jit2 - iit);
+ EXPECT_EQUAL(j - numEntries, jit - jit2);
+ EXPECT_EQUAL(i - numEntries, iit - jit2);
+ }
+ }
+}
+
+
+void
+Test::requireThatIteratorDistanceWorks()
+{
+ requireThatIteratorDistanceWorks(1);
+ requireThatIteratorDistanceWorks(3);
+ requireThatIteratorDistanceWorks(8);
+ requireThatIteratorDistanceWorks(20);
+ requireThatIteratorDistanceWorks(100);
+ requireThatIteratorDistanceWorks(400);
+}
+
+
+int
+Test::Main()
+{
+ TEST_INIT("btree_test");
+
+ requireThatNodeInsertWorks();
+ requireThatTreeInsertWorks();
+ requireThatNodeSplitInsertWorks();
+ requireThatNodeStealWorks();
+ requireThatTreeRemoveStealWorks();
+ requireThatNodeRemoveWorks();
+ requireThatNodeLowerBoundWorks();
+ requireThatWeCanInsertAndRemoveFromTree();
+ requireThatSortedTreeInsertWorks();
+ requireThatCornerCaseTreeFindWorks();
+ requireThatBasicTreeIteratorWorks();
+ requireThatTreeIteratorSeekWorks();
+ requireThatTreeIteratorAssignWorks();
+ requireThatMemoryUsageIsCalculated();
+ requireThatLowerBoundWorks();
+ requireThatUpperBoundWorks();
+ requireThatUpdateOfKeyWorks();
+ requireThatSmallNodesWorks();
+ requireThatApplyWorks();
+ requireThatIteratorDistanceWorks();
+
+ TEST_DONE();
+}
+
+}
+}
+
+TEST_APPHOOK(search::btree::Test);
diff --git a/vespalib/src/tests/btree/btreeaggregation_test.cpp b/vespalib/src/tests/btree/btreeaggregation_test.cpp
new file mode 100644
index 00000000000..0ce3d2c7d04
--- /dev/null
+++ b/vespalib/src/tests/btree/btreeaggregation_test.cpp
@@ -0,0 +1,1157 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/log/log.h>
+LOG_SETUP("btreeaggregation_test");
+#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/vespalib/btree/btreeroot.h>
+#include <vespa/vespalib/btree/btreebuilder.h>
+#include <vespa/vespalib/btree/btreenodeallocator.h>
+#include <vespa/vespalib/btree/btree.h>
+#include <vespa/vespalib/btree/btreestore.h>
+#include <vespa/vespalib/btree/btreenodeallocator.hpp>
+#include <vespa/vespalib/btree/btreenode.hpp>
+#include <vespa/vespalib/btree/btreenodestore.hpp>
+#include <vespa/vespalib/btree/btreeiterator.hpp>
+#include <vespa/vespalib/btree/btreeroot.hpp>
+#include <vespa/vespalib/btree/btreebuilder.hpp>
+#include <vespa/vespalib/btree/btree.hpp>
+#include <vespa/vespalib/btree/btreestore.hpp>
+#include <vespa/vespalib/btree/btreeaggregator.hpp>
+#include <vespa/vespalib/test/btree/btree_printer.h>
+#include <vespa/vespalib/util/rand48.h>
+
+#include <iostream>
+#include <map>
+#include <set>
+#include <string>
+
+using vespalib::GenerationHandler;
+using search::datastore::EntryRef;
+
+namespace search {
+namespace btree {
+
+namespace {
+
+int32_t
+toVal(uint32_t key)
+{
+ return key + 1000;
+}
+
+int32_t
+toHighVal(uint32_t key)
+{
+ return toVal(key) + 1000;
+}
+
+int32_t
+toLowVal(uint32_t key)
+{
+ return toVal(key) - 1000000;
+}
+
+int32_t
+toNotVal(uint32_t key)
+{
+ return key + 2000;
+}
+
+}
+
+typedef BTreeTraits<4, 4, 31, false> MyTraits;
+
+#define KEYWRAP
+
+#ifdef KEYWRAP
+
+// Force use of functor to compare keys.
+class WrapInt
+{
+public:
+ int _val;
+ WrapInt(int val) : _val(val) {}
+ WrapInt() : _val(0) {}
+ bool operator==(const WrapInt & rhs) const { return _val == rhs._val; }
+};
+
+std::ostream &
+operator<<(std::ostream &s, const WrapInt &i)
+{
+ s << i._val;
+ return s;
+}
+
+typedef WrapInt MyKey;
+class MyComp
+{
+public:
+ bool
+ operator()(const WrapInt &a, const WrapInt &b) const
+ {
+ return a._val < b._val;
+ }
+};
+
+#define UNWRAP(key) (key._val)
+#else
+typedef int MyKey;
+typedef std::less<int> MyComp;
+#define UNWRAP(key) (key)
+#endif
+
+typedef BTree<MyKey, int32_t,
+ btree::MinMaxAggregated,
+ MyComp, MyTraits,
+ MinMaxAggrCalc> MyTree;
+typedef BTreeStore<MyKey, int32_t,
+ btree::MinMaxAggregated,
+ MyComp,
+ BTreeDefaultTraits,
+ MinMaxAggrCalc> MyTreeStore;
+typedef MyTree::Builder MyTreeBuilder;
+typedef MyTree::LeafNodeType MyLeafNode;
+typedef MyTree::InternalNodeType MyInternalNode;
+typedef MyTree::NodeAllocatorType MyNodeAllocator;
+typedef MyTree::Builder::Aggregator MyAggregator;
+typedef MyTree::AggrCalcType MyAggrCalc;
+typedef std::pair<MyKey, int32_t> LeafPair;
+typedef MyTreeStore::KeyDataType MyKeyData;
+typedef MyTreeStore::KeyDataTypeRefPair MyKeyDataRefPair;
+
+typedef BTree<int, BTreeNoLeafData, btree::NoAggregated> SetTreeB;
+
+typedef BTreeTraits<16, 16, 10, false> LSeekTraits;
+typedef BTree<int, BTreeNoLeafData, btree::NoAggregated,
+ std::less<int>, LSeekTraits> SetTreeL;
+
+struct LeafPairLess {
+ bool operator()(const LeafPair & lhs, const LeafPair & rhs) const {
+ return UNWRAP(lhs.first) < UNWRAP(rhs.first);
+ }
+};
+
+
+class MockTree
+{
+public:
+ typedef std::map<uint32_t, int32_t> MTree;
+ typedef std::map<int32_t, std::set<uint32_t> > MRTree;
+ MTree _tree;
+ MRTree _rtree;
+
+ MockTree();
+ ~MockTree();
+
+
+ void
+ erase(uint32_t key)
+ {
+ MTree::iterator it(_tree.find(key));
+ if (it == _tree.end())
+ return;
+ int32_t oval = it->second;
+ MRTree::iterator rit(_rtree.find(oval));
+ assert(rit != _rtree.end());
+ size_t ecount = rit->second.erase(key);
+ assert(ecount == 1);
+ (void) ecount;
+ if (rit->second.empty()) {
+ _rtree.erase(oval);
+ }
+ _tree.erase(key);
+ }
+
+ void
+ insert(uint32_t key, int32_t val)
+ {
+ erase(key);
+ _tree[key] = val;
+ _rtree[val].insert(key);
+ }
+};
+
+
+MockTree::MockTree()
+ : _tree(),
+ _rtree()
+{}
+MockTree::~MockTree() {}
+
+class MyTreeForceApplyStore : public MyTreeStore
+{
+public:
+ typedef MyComp CompareT;
+
+ bool
+ insert(EntryRef &ref, const KeyType &key, const DataType &data,
+ CompareT comp = CompareT());
+
+ bool
+ remove(EntryRef &ref, const KeyType &key, CompareT comp = CompareT());
+};
+
+
+bool
+MyTreeForceApplyStore::insert(EntryRef &ref,
+ const KeyType &key, const DataType &data,
+ CompareT comp)
+{
+ bool retVal = true;
+ if (ref.valid()) {
+ RefType iRef(ref);
+ uint32_t clusterSize = getClusterSize(iRef);
+ if (clusterSize == 0) {
+ const BTreeType *tree = getTreeEntry(iRef);
+ const NodeAllocatorType &allocator = getAllocator();
+ Iterator itr = tree->find(key, allocator, comp);
+ if (itr.valid())
+ retVal = false;
+ } else {
+ const KeyDataType *old = getKeyDataEntry(iRef, clusterSize);
+ const KeyDataType *olde = old + clusterSize;
+ const KeyDataType *oldi = lower_bound(old, olde, key, comp);
+ if (oldi < olde && !comp(key, oldi->_key))
+ retVal = false; // key already present
+ }
+ }
+ KeyDataType addition(key, data);
+ if (retVal) {
+ apply(ref, &addition, &addition+1, NULL, NULL, comp);
+ }
+ return retVal;
+}
+
+
+bool
+MyTreeForceApplyStore::remove(EntryRef &ref, const KeyType &key,
+ CompareT comp)
+{
+ bool retVal = true;
+ if (!ref.valid())
+ retVal = false; // not found
+ else {
+ RefType iRef(ref);
+ uint32_t clusterSize = getClusterSize(iRef);
+ if (clusterSize == 0) {
+ const BTreeType *tree = getTreeEntry(iRef);
+ const NodeAllocatorType &allocator = getAllocator();
+ Iterator itr = tree->find(key, allocator, comp);
+ if (!itr.valid())
+ retVal = false;
+ } else {
+ const KeyDataType *old = getKeyDataEntry(iRef, clusterSize);
+ const KeyDataType *olde = old + clusterSize;
+ const KeyDataType *oldi = lower_bound(old, olde, key, comp);
+ if (oldi == olde || comp(key, oldi->_key))
+ retVal = false; // not found
+ }
+ }
+ std::vector<KeyDataType> additions;
+ std::vector<KeyType> removals;
+ removals.push_back(key);
+ apply(ref,
+ &additions[0], &additions[additions.size()],
+ &removals[0], &removals[removals.size()],
+ comp);
+ return retVal;
+}
+
+
+template <typename ManagerType>
+void
+freezeTree(GenerationHandler &g, ManagerType &m)
+{
+ m.freeze();
+ m.transferHoldLists(g.getCurrentGeneration());
+ g.incGeneration();
+ m.trimHoldLists(g.getFirstUsedGeneration());
+}
+
+template <typename ManagerType>
+void
+cleanup(GenerationHandler &g, ManagerType &m)
+{
+ freezeTree(g, m);
+}
+
+template <typename ManagerType, typename NodeType>
+void
+cleanup(GenerationHandler & g,
+ ManagerType & m,
+ BTreeNode::Ref n1Ref, NodeType * n1,
+ BTreeNode::Ref n2Ref = BTreeNode::Ref(), NodeType * n2 = NULL)
+{
+ assert(ManagerType::isValidRef(n1Ref));
+ m.holdNode(n1Ref, n1);
+ if (n2 != NULL) {
+ assert(ManagerType::isValidRef(n2Ref));
+ m.holdNode(n2Ref, n2);
+ } else {
+ assert(!ManagerType::isValidRef(n2Ref));
+ }
+ cleanup(g, m);
+}
+
+class Test : public vespalib::TestApp {
+private:
+ template <typename Tree>
+ bool
+ assertTree(const std::string & exp, const Tree &t);
+
+ template <typename Tree>
+ bool
+ assertAggregated(const MockTree &m, const Tree &t);
+
+ template <typename TreeStore>
+ bool
+ assertAggregated(const MockTree &m, const TreeStore &s, EntryRef ref);
+
+ void
+ buildSubTree(const std::vector<LeafPair> &sub,
+ size_t numEntries);
+
+ void requireThatNodeInsertWorks();
+ void requireThatNodeSplitInsertWorks();
+ void requireThatTreeInsertWorks();
+ void requireThatNodeStealWorks();
+ void requireThatNodeRemoveWorks();
+ void requireThatWeCanInsertAndRemoveFromTree();
+ void requireThatSortedTreeInsertWorks();
+ void requireThatCornerCaseTreeFindWorks();
+ void requireThatBasicTreeIteratorWorks();
+ void requireThatTreeIteratorAssignWorks();
+ void requireThatUpdateOfKeyWorks();
+ void requireThatUpdateOfDataWorks();
+
+ template <typename TreeStore>
+ void
+ requireThatSmallNodesWorks();
+public:
+ int Main() override;
+};
+
+
+template<typename Tree>
+bool
+Test::assertTree(const std::string &exp, const Tree &t)
+{
+ std::stringstream ss;
+ test::BTreePrinter<std::stringstream, typename Tree::NodeAllocatorType> printer(ss, t.getAllocator());
+ printer.print(t.getRoot());
+ if (!EXPECT_EQUAL(exp, ss.str())) return false;
+ return true;
+}
+
+
+template <typename Tree>
+bool
+Test::assertAggregated(const MockTree &m, const Tree &t)
+{
+ const MinMaxAggregated &ta(t.getAggregated());
+ if (t.getRoot().valid()) {
+ return
+ EXPECT_FALSE(m._rtree.empty()) &&
+ EXPECT_EQUAL(m._rtree.rbegin()->first,
+ ta.getMax()) &&
+ EXPECT_EQUAL(m._rtree.begin()->first,
+ ta.getMin());
+ } else {
+ return EXPECT_TRUE(m._rtree.empty()) &&
+ EXPECT_EQUAL(std::numeric_limits<int32_t>::min(),
+ ta.getMax()) &&
+ EXPECT_EQUAL(std::numeric_limits<int32_t>::max(),
+ ta.getMin());
+ }
+}
+
+template <typename TreeStore>
+bool
+Test::assertAggregated(const MockTree &m, const TreeStore &s, EntryRef ref)
+{
+ typename TreeStore::Iterator i(s.begin(ref));
+ MinMaxAggregated sa(s.getAggregated(ref));
+ const MinMaxAggregated &ia(i.getAggregated());
+ if (ref.valid()) {
+ return
+ EXPECT_FALSE(m._rtree.empty()) &&
+ EXPECT_EQUAL(m._rtree.rbegin()->first,
+ ia.getMax()) &&
+ EXPECT_EQUAL(m._rtree.begin()->first,
+ ia.getMin()) &&
+ EXPECT_EQUAL(m._rtree.rbegin()->first,
+ sa.getMax()) &&
+ EXPECT_EQUAL(m._rtree.begin()->first,
+ sa.getMin());
+ } else {
+ return EXPECT_TRUE(m._rtree.empty()) &&
+ EXPECT_EQUAL(std::numeric_limits<int32_t>::min(),
+ ia.getMax()) &&
+ EXPECT_EQUAL(std::numeric_limits<int32_t>::max(),
+ ia.getMin()) &&
+ EXPECT_EQUAL(std::numeric_limits<int32_t>::min(),
+ sa.getMax()) &&
+ EXPECT_EQUAL(std::numeric_limits<int32_t>::max(),
+ sa.getMin());
+ }
+}
+
+
+void
+Test::requireThatNodeInsertWorks()
+{
+ MyTree t;
+ t.insert(20, 102);
+ EXPECT_TRUE(assertTree("{{20:102[min=102,max=102]}}", t));
+ t.insert(10, 101);
+ EXPECT_TRUE(assertTree("{{10:101,20:102[min=101,max=102]}}", t));
+ t.insert(30, 103);
+ t.insert(40, 104);
+ EXPECT_TRUE(assertTree("{{10:101,20:102,30:103,40:104[min=101,max=104]}}", t));
+}
+
+template <typename Tree>
+void
+populateTree(Tree &t, uint32_t count, uint32_t delta)
+{
+ uint32_t key = 1;
+ int32_t value = 101;
+ for (uint32_t i = 0; i < count; ++i) {
+ t.insert(key, value);
+ key += delta;
+ value += delta;
+ }
+}
+
+void
+populateLeafNode(MyTree &t)
+{
+ populateTree(t, 4, 2);
+}
+
+void
+Test::requireThatNodeSplitInsertWorks()
+{
+ { // new entry in current node
+ MyTree t;
+ populateLeafNode(t);
+ t.insert(4, 104);
+ EXPECT_TRUE(assertTree("{{4,7[min=101,max=107]}} -> "
+ "{{1:101,3:103,4:104[min=101,max=104]},"
+ "{5:105,7:107[min=105,max=107]}}", t));
+ }
+ { // new entry in split node
+ MyTree t;
+ populateLeafNode(t);
+ t.insert(6, 106);
+ EXPECT_TRUE(assertTree("{{5,7[min=101,max=107]}} -> "
+ "{{1:101,3:103,5:105[min=101,max=105]},"
+ "{6:106,7:107[min=106,max=107]}}", t));
+ }
+ { // new entry at end
+ MyTree t;
+ populateLeafNode(t);
+ t.insert(8, 108);
+ EXPECT_TRUE(assertTree("{{5,8[min=101,max=108]}} -> "
+ "{{1:101,3:103,5:105[min=101,max=105]},"
+ "{7:107,8:108[min=107,max=108]}}", t));
+ }
+}
+
+void
+Test::requireThatTreeInsertWorks()
+{
+ { // multi level node split
+ MyTree t;
+ populateTree(t, 16, 2);
+ EXPECT_TRUE(assertTree("{{7,15,23,31[min=101,max=131]}} -> "
+ "{{1:101,3:103,5:105,7:107[min=101,max=107]},"
+ "{9:109,11:111,13:113,15:115[min=109,max=115]},"
+ "{17:117,19:119,21:121,23:123[min=117,max=123]},"
+ "{25:125,27:127,29:129,31:131[min=125,max=131]}}", t));
+ t.insert(33, 133);
+ EXPECT_TRUE(assertTree("{{23,33[min=101,max=133]}} -> "
+ "{{7,15,23[min=101,max=123]},{29,33[min=125,max=133]}} -> "
+ "{{1:101,3:103,5:105,7:107[min=101,max=107]},"
+ "{9:109,11:111,13:113,15:115[min=109,max=115]},"
+ "{17:117,19:119,21:121,23:123[min=117,max=123]},"
+ "{25:125,27:127,29:129[min=125,max=129]},"
+ "{31:131,33:133[min=131,max=133]}}", t));
+ }
+ { // give to left node to avoid split
+ MyTree t;
+ populateTree(t, 8, 2);
+ t.remove(5);
+ EXPECT_TRUE(assertTree("{{7,15[min=101,max=115]}} -> "
+ "{{1:101,3:103,7:107[min=101,max=107]},"
+ "{9:109,11:111,13:113,15:115[min=109,max=115]}}", t));
+ t.insert(10, 110);
+ EXPECT_TRUE(assertTree("{{9,15[min=101,max=115]}} -> "
+ "{{1:101,3:103,7:107,9:109[min=101,max=109]},"
+ "{10:110,11:111,13:113,15:115[min=110,max=115]}}", t));
+ }
+ { // give to left node to avoid split, and move to left node
+ MyTree t;
+ populateTree(t, 8, 2);
+ t.remove(3);
+ t.remove(5);
+ EXPECT_TRUE(assertTree("{{7,15[min=101,max=115]}} -> "
+ "{{1:101,7:107[min=101,max=107]},"
+ "{9:109,11:111,13:113,15:115[min=109,max=115]}}", t));
+ t.insert(8, 108);
+ EXPECT_TRUE(assertTree("{{9,15[min=101,max=115]}} -> "
+ "{{1:101,7:107,8:108,9:109[min=101,max=109]},"
+ "{11:111,13:113,15:115[min=111,max=115]}}", t));
+ }
+ { // not give to left node to avoid split, but insert at end at left node
+ MyTree t;
+ populateTree(t, 8, 2);
+ t.remove(5);
+ EXPECT_TRUE(assertTree("{{7,15[min=101,max=115]}} -> "
+ "{{1:101,3:103,7:107[min=101,max=107]},"
+ "{9:109,11:111,13:113,15:115[min=109,max=115]}}", t));
+ t.insert(8, 108);
+ EXPECT_TRUE(assertTree("{{8,15[min=101,max=115]}} -> "
+ "{{1:101,3:103,7:107,8:108[min=101,max=108]},"
+ "{9:109,11:111,13:113,15:115[min=109,max=115]}}", t));
+ }
+ { // give to right node to avoid split
+ MyTree t;
+ populateTree(t, 8, 2);
+ t.remove(13);
+ EXPECT_TRUE(assertTree("{{7,15[min=101,max=115]}} -> "
+ "{{1:101,3:103,5:105,7:107[min=101,max=107]},"
+ "{9:109,11:111,15:115[min=109,max=115]}}", t));
+ t.insert(4, 104);
+ EXPECT_TRUE(assertTree("{{5,15[min=101,max=115]}} -> "
+ "{{1:101,3:103,4:104,5:105[min=101,max=105]},"
+ "{7:107,9:109,11:111,15:115[min=107,max=115]}}", t));
+ }
+ { // give to right node to avoid split and move to right node
+ using MyTraits6 = BTreeTraits<6, 6, 31, false>;
+ using Tree6 = BTree<MyKey, int32_t, btree::MinMaxAggregated, MyComp, MyTraits6, MinMaxAggrCalc>;
+
+ Tree6 t;
+ populateTree(t, 12, 2);
+ t.remove(19);
+ t.remove(21);
+ t.remove(23);
+ EXPECT_TRUE(assertTree("{{11,17[min=101,max=117]}} -> "
+ "{{1:101,3:103,5:105,7:107,9:109,11:111[min=101,max=111]},"
+ "{13:113,15:115,17:117[min=113,max=117]}}", t));
+ t.insert(10, 110);
+ EXPECT_TRUE(assertTree("{{7,17[min=101,max=117]}} -> "
+ "{{1:101,3:103,5:105,7:107[min=101,max=107]},"
+ "{9:109,10:110,11:111,13:113,15:115,17:117[min=109,max=117]}}", t));
+ }
+}
+
+struct BTreeStealTraits
+{
+ static const size_t LEAF_SLOTS = 6;
+ static const size_t INTERNAL_SLOTS = 6;
+ static const size_t PATH_SIZE = 20;
+ static const bool BINARY_SEEK = true;
+};
+
+void
+Test::requireThatNodeStealWorks()
+{
+ typedef BTree<MyKey, int32_t,
+ btree::MinMaxAggregated,
+ MyComp, BTreeStealTraits,
+ MinMaxAggrCalc> MyStealTree;
+ { // steal all from left
+ MyStealTree t;
+ t.insert(10, 110);
+ t.insert(20, 120);
+ t.insert(30, 130);
+ t.insert(40, 140);
+ t.insert(50, 150);
+ t.insert(60, 160);
+ t.insert(35, 135);
+ t.remove(35);
+ EXPECT_TRUE(assertTree("{{30,60[min=110,max=160]}} -> "
+ "{{10:110,20:120,30:130[min=110,max=130]},"
+ "{40:140,50:150,60:160[min=140,max=160]}}", t));
+ t.remove(50);
+ EXPECT_TRUE(assertTree("{{10:110,20:120,30:130,40:140,60:160[min=110,max=160]}}", t));
+ }
+ { // steal all from right
+ MyStealTree t;
+ t.insert(10, 110);
+ t.insert(20, 120);
+ t.insert(30, 130);
+ t.insert(40, 140);
+ t.insert(50, 150);
+ t.insert(60, 160);
+ t.insert(35, 135);
+ t.remove(35);
+ EXPECT_TRUE(assertTree("{{30,60[min=110,max=160]}} -> "
+ "{{10:110,20:120,30:130[min=110,max=130]},"
+ "{40:140,50:150,60:160[min=140,max=160]}}", t));
+ t.remove(20);
+ EXPECT_TRUE(assertTree("{{10:110,30:130,40:140,50:150,60:160[min=110,max=160]}}", t));
+ }
+ { // steal some from left
+ MyStealTree t;
+ t.insert(10, 110);
+ t.insert(20, 120);
+ t.insert(30, 130);
+ t.insert(60, 160);
+ t.insert(70, 170);
+ t.insert(80, 180);
+ t.insert(50, 150);
+ t.insert(40, 140);
+ EXPECT_TRUE(assertTree("{{50,80[min=110,max=180]}} -> "
+ "{{10:110,20:120,30:130,40:140,50:150[min=110,max=150]},"
+ "{60:160,70:170,80:180[min=160,max=180]}}", t));
+ t.remove(60);
+ EXPECT_TRUE(assertTree("{{30,80[min=110,max=180]}} -> "
+ "{{10:110,20:120,30:130[min=110,max=130]},"
+ "{40:140,50:150,70:170,80:180[min=140,max=180]}}", t));
+ }
+ { // steal some from right
+ MyStealTree t;
+ t.insert(10, 110);
+ t.insert(20, 120);
+ t.insert(30, 130);
+ t.insert(40, 140);
+ t.insert(50, 150);
+ t.insert(60, 160);
+ t.insert(70, 170);
+ t.insert(80, 180);
+ t.insert(90, 190);
+ t.remove(40);
+ EXPECT_TRUE(assertTree("{{30,90[min=110,max=190]}} -> "
+ "{{10:110,20:120,30:130[min=110,max=130]},"
+ "{50:150,60:160,70:170,80:180,90:190[min=150,max=190]}}", t));
+ t.remove(20);
+ EXPECT_TRUE(assertTree("{{60,90[min=110,max=190]}} -> "
+ "{{10:110,30:130,50:150,60:160[min=110,max=160]},"
+ "{70:170,80:180,90:190[min=170,max=190]}}", t));
+ }
+}
+
+void
+Test::requireThatNodeRemoveWorks()
+{
+ MyTree t;
+ populateLeafNode(t);
+ t.remove(3);
+ EXPECT_TRUE(assertTree("{{1:101,5:105,7:107[min=101,max=107]}}", t));
+ t.remove(1);
+ EXPECT_TRUE(assertTree("{{5:105,7:107[min=105,max=107]}}", t));
+ t.remove(7);
+ EXPECT_TRUE(assertTree("{{5:105[min=105,max=105]}}", t));
+}
+
+void
+generateData(std::vector<LeafPair> & data, size_t numEntries)
+{
+ data.reserve(numEntries);
+ Rand48 rnd;
+ rnd.srand48(10);
+ for (size_t i = 0; i < numEntries; ++i) {
+ int num = rnd.lrand48() % 10000000;
+ uint32_t val = toVal(num);
+ data.push_back(std::make_pair(num, val));
+ }
+}
+
+void
+Test::buildSubTree(const std::vector<LeafPair> &sub,
+ size_t numEntries)
+{
+ GenerationHandler g;
+ MyTree tree;
+ MyTreeBuilder builder(tree.getAllocator());
+ MockTree mock;
+
+ std::vector<LeafPair> sorted(sub.begin(), sub.begin() + numEntries);
+ std::sort(sorted.begin(), sorted.end(), LeafPairLess());
+ for (size_t i = 0; i < numEntries; ++i) {
+ int num = UNWRAP(sorted[i].first);
+ const uint32_t & val = sorted[i].second;
+ builder.insert(num, val);
+ mock.insert(num, val);
+ }
+ tree.assign(builder);
+ assert(numEntries == tree.size());
+ assert(tree.isValid());
+
+ TEST_DO(EXPECT_TRUE(assertAggregated(mock, tree)));
+ EXPECT_EQUAL(numEntries, tree.size());
+ EXPECT_TRUE(tree.isValid());
+ MyTree::Iterator itr = tree.begin();
+ MyTree::Iterator ritr = itr;
+ if (numEntries > 0) {
+ EXPECT_TRUE(ritr.valid());
+ EXPECT_EQUAL(0u, ritr.position());
+ --ritr;
+ EXPECT_TRUE(!ritr.valid());
+ EXPECT_EQUAL(numEntries, ritr.position());
+ --ritr;
+ EXPECT_TRUE(ritr.valid());
+ EXPECT_EQUAL(numEntries - 1, ritr.position());
+ } else {
+ EXPECT_TRUE(!ritr.valid());
+ EXPECT_EQUAL(0u, ritr.position());
+ --ritr;
+ EXPECT_TRUE(!ritr.valid());
+ EXPECT_EQUAL(0u, ritr.position());
+ }
+ for (size_t i = 0; i < numEntries; ++i) {
+ EXPECT_TRUE(itr.valid());
+ EXPECT_EQUAL(sorted[i].first, itr.getKey());
+ EXPECT_EQUAL(sorted[i].second, itr.getData());
+ ++itr;
+ }
+ EXPECT_TRUE(!itr.valid());
+ ritr = itr;
+ EXPECT_TRUE(!ritr.valid());
+ --ritr;
+ for (size_t i = 0; i < numEntries; ++i) {
+ EXPECT_TRUE(ritr.valid());
+ EXPECT_EQUAL(sorted[numEntries - 1 - i].first, ritr.getKey());
+ EXPECT_EQUAL(sorted[numEntries - 1 - i].second, ritr.getData());
+ --ritr;
+ }
+ EXPECT_TRUE(!ritr.valid());
+}
+
+void
+Test::requireThatWeCanInsertAndRemoveFromTree()
+{
+ GenerationHandler g;
+ MyTree tree;
+ MockTree mock;
+ std::vector<LeafPair> exp;
+ std::vector<LeafPair> sorted;
+ TEST_DO(EXPECT_TRUE(assertAggregated(mock, tree)));
+ size_t numEntries = 1000;
+ generateData(exp, numEntries);
+ sorted = exp;
+ std::sort(sorted.begin(), sorted.end(), LeafPairLess());
+ // insert entries
+ for (size_t i = 0; i < numEntries; ++i) {
+ int num = UNWRAP(exp[i].first);
+ const uint32_t & val = exp[i].second;
+ EXPECT_TRUE(!tree.find(num).valid());
+ //LOG(info, "insert[%zu](%d, %s)", i, num, str.c_str());
+ EXPECT_TRUE(tree.insert(num, val));
+ EXPECT_TRUE(!tree.insert(num, val));
+ mock.insert(num, val);
+ TEST_DO(EXPECT_TRUE(assertAggregated(mock, tree)));
+ for (size_t j = 0; j <= i; ++j) {
+ //LOG(info, "find[%zu](%d)", j, exp[j].first._val);
+ MyTree::Iterator itr = tree.find(exp[j].first);
+ EXPECT_TRUE(itr.valid());
+ EXPECT_EQUAL(exp[j].first, itr.getKey());
+ EXPECT_EQUAL(exp[j].second, itr.getData());
+ }
+ EXPECT_EQUAL(i + 1u, tree.size());
+ EXPECT_TRUE(tree.isValid());
+ buildSubTree(exp, i + 1);
+ }
+ //std::cout << "tree: " << tree.toString() << std::endl;
+
+ {
+ MyTree::Iterator itr = tree.begin();
+ MyTree::Iterator itre = itr;
+ MyTree::Iterator itre2;
+ MyTree::Iterator ritr = itr;
+ while (itre.valid())
+ ++itre;
+ if (numEntries > 0) {
+ EXPECT_TRUE(ritr.valid());
+ EXPECT_EQUAL(0u, ritr.position());
+ --ritr;
+ EXPECT_TRUE(!ritr.valid());
+ EXPECT_EQUAL(numEntries, ritr.position());
+ --ritr;
+ EXPECT_TRUE(ritr.valid());
+ EXPECT_EQUAL(numEntries - 1, ritr.position());
+ } else {
+ EXPECT_TRUE(!ritr.valid());
+ EXPECT_EQUAL(0u, ritr.position());
+ --ritr;
+ EXPECT_TRUE(!ritr.valid());
+ EXPECT_EQUAL(0u, ritr.position());
+ }
+ MyTree::Iterator pitr = itr;
+ for (size_t i = 0; i < numEntries; ++i) {
+ ssize_t si = i;
+ ssize_t sileft = numEntries - i;
+ EXPECT_TRUE(itr.valid());
+ EXPECT_EQUAL(i, itr.position());
+ EXPECT_EQUAL(sileft, itre - itr);
+ EXPECT_EQUAL(-sileft, itr - itre);
+ EXPECT_EQUAL(sileft, itre2 - itr);
+ EXPECT_EQUAL(-sileft, itr - itre2);
+ EXPECT_EQUAL(si, itr - tree.begin());
+ EXPECT_EQUAL(-si, tree.begin() - itr);
+ EXPECT_EQUAL(i != 0, itr - pitr);
+ EXPECT_EQUAL(-(i != 0), pitr - itr);
+ EXPECT_EQUAL(sorted[i].first, itr.getKey());
+ EXPECT_EQUAL(sorted[i].second, itr.getData());
+ pitr = itr;
+ ++itr;
+ ritr = itr;
+ --ritr;
+ EXPECT_TRUE(ritr.valid());
+ EXPECT_TRUE(ritr == pitr);
+ }
+ EXPECT_TRUE(!itr.valid());
+ EXPECT_EQUAL(numEntries, itr.position());
+ ssize_t sNumEntries = numEntries;
+ EXPECT_EQUAL(sNumEntries, itr - tree.begin());
+ EXPECT_EQUAL(-sNumEntries, tree.begin() - itr);
+ EXPECT_EQUAL(1, itr - pitr);
+ EXPECT_EQUAL(-1, pitr - itr);
+ }
+ // compact full tree by calling incremental compaction methods in a loop
+ {
+ MyTree::NodeAllocatorType &manager = tree.getAllocator();
+ std::vector<uint32_t> toHold = manager.startCompact();
+ MyTree::Iterator itr = tree.begin();
+ tree.setRoot(itr.moveFirstLeafNode(tree.getRoot()));
+ while (itr.valid()) {
+ // LOG(info, "Leaf moved to %d", UNWRAP(itr.getKey()));
+ itr.moveNextLeafNode();
+ }
+ manager.finishCompact(toHold);
+ manager.freeze();
+ manager.transferHoldLists(g.getCurrentGeneration());
+ g.incGeneration();
+ manager.trimHoldLists(g.getFirstUsedGeneration());
+ }
+ // remove entries
+ for (size_t i = 0; i < numEntries; ++i) {
+ int num = UNWRAP(exp[i].first);
+ //LOG(info, "remove[%zu](%d)", i, num);
+ //std::cout << "tree: " << tree.toString() << std::endl;
+ EXPECT_TRUE(tree.remove(num));
+ EXPECT_TRUE(!tree.find(num).valid());
+ EXPECT_TRUE(!tree.remove(num));
+ EXPECT_TRUE(tree.isValid());
+ mock.erase(num);
+ TEST_DO(EXPECT_TRUE(assertAggregated(mock, tree)));
+ for (size_t j = i + 1; j < numEntries; ++j) {
+ MyTree::Iterator itr = tree.find(exp[j].first);
+ EXPECT_TRUE(itr.valid());
+ EXPECT_EQUAL(exp[j].first, itr.getKey());
+ EXPECT_EQUAL(exp[j].second, itr.getData());
+ }
+ EXPECT_EQUAL(numEntries - 1 - i, tree.size());
+ }
+}
+
+void
+Test::requireThatSortedTreeInsertWorks()
+{
+ {
+ MyTree tree;
+ MockTree mock;
+ TEST_DO(EXPECT_TRUE(assertAggregated(mock, tree)));
+ for (int i = 0; i < 1000; ++i) {
+ EXPECT_TRUE(tree.insert(i, toVal(i)));
+ mock.insert(i, toVal(i));
+ MyTree::Iterator itr = tree.find(i);
+ EXPECT_TRUE(itr.valid());
+ EXPECT_EQUAL(toVal(i), itr.getData());
+ EXPECT_TRUE(tree.isValid());
+ TEST_DO(EXPECT_TRUE(assertAggregated(mock, tree)));
+ }
+ }
+ {
+ MyTree tree;
+ MockTree mock;
+ TEST_DO(EXPECT_TRUE(assertAggregated(mock, tree)));
+ for (int i = 1000; i > 0; --i) {
+ EXPECT_TRUE(tree.insert(i, toVal(i)));
+ mock.insert(i, toVal(i));
+ MyTree::Iterator itr = tree.find(i);
+ EXPECT_TRUE(itr.valid());
+ EXPECT_EQUAL(toVal(i), itr.getData());
+ EXPECT_TRUE(tree.isValid());
+ TEST_DO(EXPECT_TRUE(assertAggregated(mock, tree)));
+ }
+ }
+}
+
+void
+Test::requireThatCornerCaseTreeFindWorks()
+{
+ GenerationHandler g;
+ MyTree tree;
+ for (int i = 1; i < 100; ++i) {
+ tree.insert(i, toVal(i));
+ }
+ EXPECT_TRUE(!tree.find(0).valid()); // lower than lowest
+ EXPECT_TRUE(!tree.find(1000).valid()); // higher than highest
+}
+
+void
+Test::requireThatBasicTreeIteratorWorks()
+{
+ GenerationHandler g;
+ MyTree tree;
+ EXPECT_TRUE(!tree.begin().valid());
+ std::vector<LeafPair> exp;
+ size_t numEntries = 1000;
+ generateData(exp, numEntries);
+ for (size_t i = 0; i < numEntries; ++i) {
+ tree.insert(exp[i].first, exp[i].second);
+ }
+ std::sort(exp.begin(), exp.end(), LeafPairLess());
+ size_t ei = 0;
+ MyTree::Iterator itr = tree.begin();
+ MyTree::Iterator ritr;
+ EXPECT_EQUAL(1000u, itr.size());
+ for (; itr.valid(); ++itr) {
+ //LOG(info, "itr(%d, %s)", itr.getKey(), itr.getData().c_str());
+ EXPECT_EQUAL(UNWRAP(exp[ei].first), UNWRAP(itr.getKey()));
+ EXPECT_EQUAL(exp[ei].second, itr.getData());
+ ei++;
+ ritr = itr;
+ }
+ EXPECT_EQUAL(numEntries, ei);
+ for (; ritr.valid(); --ritr) {
+ --ei;
+ //LOG(info, "itr(%d, %s)", itr.getKey(), itr.getData().c_str());
+ EXPECT_EQUAL(UNWRAP(exp[ei].first), UNWRAP(ritr.getKey()));
+ EXPECT_EQUAL(exp[ei].second, ritr.getData());
+ }
+}
+
+
+
+void
+Test::requireThatTreeIteratorAssignWorks()
+{
+ GenerationHandler g;
+ MyTree tree;
+ for (int i = 0; i < 1000; ++i) {
+ tree.insert(i, toVal(i));
+ }
+ for (int i = 0; i < 1000; ++i) {
+ MyTree::Iterator itr = tree.find(i);
+ MyTree::Iterator itr2 = itr;
+ EXPECT_TRUE(itr == itr2);
+ int expNum = i;
+ for (; itr2.valid(); ++itr2) {
+ EXPECT_EQUAL(expNum++, UNWRAP(itr2.getKey()));
+ }
+ EXPECT_EQUAL(1000, expNum);
+ }
+}
+
+struct UpdKeyComp {
+ int _remainder;
+ mutable size_t _numErrors;
+ UpdKeyComp(int remainder) : _remainder(remainder), _numErrors(0) {}
+ bool operator() (const int & lhs, const int & rhs) const {
+ if (lhs % 2 != _remainder) ++_numErrors;
+ if (rhs % 2 != _remainder) ++_numErrors;
+ return lhs < rhs;
+ }
+};
+
+void
+Test::requireThatUpdateOfKeyWorks()
+{
+ typedef BTree<int, BTreeNoLeafData,
+ btree::NoAggregated,
+ UpdKeyComp &> UpdKeyTree;
+ typedef UpdKeyTree::Iterator UpdKeyTreeIterator;
+ GenerationHandler g;
+ UpdKeyTree t;
+ UpdKeyComp cmp1(0);
+ for (int i = 0; i < 1000; i+=2) {
+ EXPECT_TRUE(t.insert(i, BTreeNoLeafData(), cmp1));
+ }
+ EXPECT_EQUAL(0u, cmp1._numErrors);
+ for (int i = 0; i < 1000; i+=2) {
+ UpdKeyTreeIterator itr = t.find(i, cmp1);
+ itr.writeKey(i + 1);
+ }
+ UpdKeyComp cmp2(1);
+ for (int i = 1; i < 1000; i+=2) {
+ UpdKeyTreeIterator itr = t.find(i, cmp2);
+ EXPECT_TRUE(itr.valid());
+ }
+ EXPECT_EQUAL(0u, cmp2._numErrors);
+}
+
+
+void
+Test::requireThatUpdateOfDataWorks()
+{
+ // typedef MyTree::Iterator Iterator;
+ GenerationHandler g;
+ MyTree t;
+ MockTree mock;
+ MyAggrCalc ac;
+ MyTree::NodeAllocatorType &manager = t.getAllocator();
+ TEST_DO(EXPECT_TRUE(assertAggregated(mock, t)));
+ for (int i = 0; i < 1000; i+=2) {
+ EXPECT_TRUE(t.insert(i, toVal(i)));
+ mock.insert(i, toVal(i));
+ TEST_DO(EXPECT_TRUE(assertAggregated(mock, t)));
+ }
+ freezeTree(g, manager);
+ for (int i = 0; i < 1000; i+=2) {
+ MyTree::Iterator itr = t.find(i);
+ MyTree::Iterator itr2 = itr;
+ t.thaw(itr);
+ itr.updateData(toHighVal(i), ac);
+ EXPECT_EQUAL(toHighVal(i), itr.getData());
+ EXPECT_EQUAL(toVal(i), itr2.getData());
+ mock.erase(i);
+ mock.insert(i, toHighVal(i));
+ TEST_DO(EXPECT_TRUE(assertAggregated(mock, t)));
+ freezeTree(g, manager);
+ itr = t.find(i);
+ itr2 = itr;
+ t.thaw(itr);
+ itr.updateData(toLowVal(i), ac);
+ EXPECT_EQUAL(toLowVal(i), itr.getData());
+ EXPECT_EQUAL(toHighVal(i), itr2.getData());
+ mock.erase(i);
+ mock.insert(i, toLowVal(i));
+ TEST_DO(EXPECT_TRUE(assertAggregated(mock, t)));
+ freezeTree(g, manager);
+ itr = t.find(i);
+ itr2 = itr;
+ t.thaw(itr);
+ itr.updateData(toVal(i), ac);
+ EXPECT_EQUAL(toVal(i), itr.getData());
+ EXPECT_EQUAL(toLowVal(i), itr2.getData());
+ mock.erase(i);
+ mock.insert(i, toVal(i));
+ TEST_DO(EXPECT_TRUE(assertAggregated(mock, t)));
+ freezeTree(g, manager);
+ }
+}
+
+
+template <typename TreeStore>
+void
+Test::requireThatSmallNodesWorks()
+{
+ GenerationHandler g;
+ TreeStore s;
+ MockTree mock;
+
+ EntryRef root;
+ EXPECT_EQUAL(0u, s.size(root));
+ EXPECT_TRUE(s.isSmallArray(root));
+ TEST_DO(EXPECT_TRUE(assertAggregated(mock, s, root)));
+ EXPECT_TRUE(s.insert(root, 40, toVal(40)));
+ mock.insert(40, toVal(40));
+ EXPECT_TRUE(!s.insert(root, 40, toNotVal(40)));
+ EXPECT_EQUAL(1u, s.size(root));
+ EXPECT_TRUE(s.isSmallArray(root));
+ TEST_DO(EXPECT_TRUE(assertAggregated(mock, s, root)));
+ EXPECT_TRUE(s.insert(root, 20, toVal(20)));
+ mock.insert(20, toVal(20));
+ EXPECT_TRUE(!s.insert(root, 20, toNotVal(20)));
+ EXPECT_TRUE(!s.insert(root, 40, toNotVal(40)));
+ EXPECT_EQUAL(2u, s.size(root));
+ EXPECT_TRUE(s.isSmallArray(root));
+ TEST_DO(EXPECT_TRUE(assertAggregated(mock, s, root)));
+ EXPECT_TRUE(s.insert(root, 60, toVal(60)));
+ mock.insert(60, toVal(60));
+ EXPECT_TRUE(!s.insert(root, 60, toNotVal(60)));
+ EXPECT_TRUE(!s.insert(root, 20, toNotVal(20)));
+ EXPECT_TRUE(!s.insert(root, 40, toNotVal(40)));
+ EXPECT_EQUAL(3u, s.size(root));
+ EXPECT_TRUE(s.isSmallArray(root));
+ TEST_DO(EXPECT_TRUE(assertAggregated(mock, s, root)));
+ EXPECT_TRUE(s.insert(root, 50, toVal(50)));
+ mock.insert(50, toVal(50));
+ EXPECT_TRUE(!s.insert(root, 50, toNotVal(50)));
+ EXPECT_TRUE(!s.insert(root, 60, toNotVal(60)));
+ EXPECT_TRUE(!s.insert(root, 20, toNotVal(20)));
+ EXPECT_TRUE(!s.insert(root, 40, toNotVal(40)));
+ EXPECT_EQUAL(4u, s.size(root));
+ EXPECT_TRUE(s.isSmallArray(root));
+ TEST_DO(EXPECT_TRUE(assertAggregated(mock, s, root)));
+
+ for (uint32_t i = 0; i < 100; ++i) {
+ EXPECT_TRUE(s.insert(root, 1000 + i, 42));
+ mock.insert(1000 + i, 42);
+ if (i > 0) {
+ EXPECT_TRUE(!s.insert(root, 1000 + i - 1, 42));
+ }
+ EXPECT_EQUAL(5u + i, s.size(root));
+ EXPECT_EQUAL(5u + i <= 8u, s.isSmallArray(root));
+ TEST_DO(EXPECT_TRUE(assertAggregated(mock, s, root)));
+ }
+ EXPECT_TRUE(s.remove(root, 40));
+ mock.erase(40);
+ EXPECT_TRUE(!s.remove(root, 40));
+ EXPECT_EQUAL(103u, s.size(root));
+ EXPECT_TRUE(!s.isSmallArray(root));
+ TEST_DO(EXPECT_TRUE(assertAggregated(mock, s, root)));
+ EXPECT_TRUE(s.remove(root, 20));
+ mock.erase(20);
+ EXPECT_TRUE(!s.remove(root, 20));
+ EXPECT_EQUAL(102u, s.size(root));
+ EXPECT_TRUE(!s.isSmallArray(root));
+ TEST_DO(EXPECT_TRUE(assertAggregated(mock, s, root)));
+ EXPECT_TRUE(s.remove(root, 50));
+ mock.erase(50);
+ EXPECT_TRUE(!s.remove(root, 50));
+ EXPECT_EQUAL(101u, s.size(root));
+ EXPECT_TRUE(!s.isSmallArray(root));
+ TEST_DO(EXPECT_TRUE(assertAggregated(mock, s, root)));
+ for (uint32_t i = 0; i < 100; ++i) {
+ EXPECT_TRUE(s.remove(root, 1000 + i));
+ mock.erase(1000 + i);
+ if (i > 0) {
+ EXPECT_TRUE(!s.remove(root, 1000 + i - 1));
+ }
+ EXPECT_EQUAL(100 - i, s.size(root));
+ EXPECT_EQUAL(100 - i <= 8u, s.isSmallArray(root));
+ TEST_DO(EXPECT_TRUE(assertAggregated(mock, s, root)));
+ }
+ EXPECT_EQUAL(1u, s.size(root));
+ EXPECT_TRUE(s.isSmallArray(root));
+
+ s.clear(root);
+ s.clearBuilder();
+ s.freeze();
+ s.transferHoldLists(g.getCurrentGeneration());
+ g.incGeneration();
+ s.trimHoldLists(g.getFirstUsedGeneration());
+}
+
+
+int
+Test::Main()
+{
+ TEST_INIT("btreeaggregation_test");
+
+ requireThatNodeInsertWorks();
+ requireThatNodeSplitInsertWorks();
+ requireThatTreeInsertWorks();
+ requireThatNodeStealWorks();
+ requireThatNodeRemoveWorks();
+ requireThatWeCanInsertAndRemoveFromTree();
+ requireThatSortedTreeInsertWorks();
+ requireThatCornerCaseTreeFindWorks();
+ requireThatBasicTreeIteratorWorks();
+ requireThatTreeIteratorAssignWorks();
+ requireThatUpdateOfKeyWorks();
+ requireThatUpdateOfDataWorks();
+ TEST_DO(requireThatSmallNodesWorks<MyTreeStore>());
+ TEST_DO(requireThatSmallNodesWorks<MyTreeForceApplyStore>());
+
+ TEST_DONE();
+}
+
+}
+}
+
+TEST_APPHOOK(search::btree::Test);
diff --git a/vespalib/src/tests/btree/frozenbtree_test.cpp b/vespalib/src/tests/btree/frozenbtree_test.cpp
new file mode 100644
index 00000000000..597c2ffdc90
--- /dev/null
+++ b/vespalib/src/tests/btree/frozenbtree_test.cpp
@@ -0,0 +1,469 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#define DEBUG_FROZENBTREE
+#define LOG_FROZENBTREEXX
+#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/vespalib/btree/btreeroot.h>
+#include <vespa/vespalib/btree/btreeiterator.hpp>
+#include <vespa/vespalib/btree/btreeroot.hpp>
+#include <vespa/vespalib/btree/btreenodeallocator.hpp>
+#include <vespa/vespalib/util/rand48.h>
+#include <map>
+
+#include <vespa/log/log.h>
+LOG_SETUP("frozenbtree_test");
+
+using search::btree::BTreeRoot;
+using search::btree::BTreeNode;
+using search::btree::BTreeInternalNode;
+using search::btree::BTreeLeafNode;
+using search::btree::BTreeDefaultTraits;
+using vespalib::GenerationHandler;
+
+namespace search {
+
+
+class FrozenBTreeTest : public vespalib::TestApp
+{
+public:
+ typedef int KeyType;
+private:
+ std::vector<KeyType> _randomValues;
+ std::vector<KeyType> _sortedRandomValues;
+
+public:
+ typedef int DataType;
+ typedef BTreeRoot<KeyType, DataType,
+ btree::NoAggregated,
+ std::less<KeyType>,
+ BTreeDefaultTraits> Tree;
+ typedef Tree::NodeAllocatorType NodeAllocator;
+ typedef Tree::InternalNodeType InternalNodeType;
+ typedef Tree::LeafNodeType LeafNodeType;
+ typedef Tree::Iterator Iterator;
+ typedef Tree::ConstIterator ConstIterator;
+private:
+ GenerationHandler *_generationHandler;
+ NodeAllocator *_allocator;
+ Tree *_tree;
+
+ Rand48 _randomGenerator;
+
+ void allocTree();
+ void freeTree(bool verbose);
+ void fillRandomValues(unsigned int count);
+ void insertRandomValues(Tree &tree, NodeAllocator &allocator, const std::vector<KeyType> &values);
+ void removeRandomValues(Tree &tree, NodeAllocator &allocator, const std::vector<KeyType> &values);
+ void lookupRandomValues(const Tree &tree, NodeAllocator &allocator, const std::vector<KeyType> &values);
+ void lookupGoneRandomValues(const Tree &tree, NodeAllocator &allocator, const std::vector<KeyType> &values);
+ void lookupFrozenRandomValues(const Tree &tree, NodeAllocator &allocator, const std::vector<KeyType> &values);
+ void sortRandomValues();
+ void traverseTreeIterator(const Tree &tree, NodeAllocator &allocator,
+ const std::vector<KeyType> &sorted, bool frozen);
+
+ void printSubEnumTree(BTreeNode::Ref node, NodeAllocator &allocator, int indent) const;
+ void printEnumTree(const Tree *tree, NodeAllocator &allocator);
+
+ static const char *frozenName(bool frozen) {
+ return frozen ? "frozen" : "thawed";
+ }
+public:
+ FrozenBTreeTest();
+ ~FrozenBTreeTest();
+
+ int Main() override;
+};
+
+FrozenBTreeTest::FrozenBTreeTest()
+ : vespalib::TestApp(),
+ _randomValues(),
+ _sortedRandomValues(),
+ _generationHandler(NULL),
+ _allocator(NULL),
+ _tree(NULL),
+ _randomGenerator()
+{}
+FrozenBTreeTest::~FrozenBTreeTest() {}
+
+void
+FrozenBTreeTest::allocTree()
+{
+ assert(_generationHandler == NULL);
+ assert(_allocator == NULL);
+ assert(_tree == NULL);
+ _generationHandler = new GenerationHandler;
+ _allocator = new NodeAllocator();
+ _tree = new Tree;
+}
+
+
+void
+FrozenBTreeTest::freeTree(bool verbose)
+{
+#if 0
+ LOG(info,
+ "freeTree before clear: %" PRIu64 " (%" PRIu64 " held)"
+ ", %" PRIu32 " leaves",
+ static_cast<uint64_t>(_intTree->getUsedMemory()),
+ static_cast<uint64_t>(_intTree->getHeldMemory()),
+ _intTree->validLeaves());
+ _intTree->clear();
+ LOG(info,
+ "freeTree before unhold: %" PRIu64 " (%" PRIu64 " held)",
+ static_cast<uint64_t>(_intTree->getUsedMemory()),
+ static_cast<uint64_t>(_intTree->getHeldMemory()));
+ _intTree->dropFrozen();
+ _intTree->removeOldGenerations(_intTree->getGeneration() + 1);
+ LOG(info,
+ "freeTree after unhold: %" PRIu64 " (%" PRIu64 " held)",
+ static_cast<uint64_t>(_intTree->getUsedMemory()),
+ static_cast<uint64_t>(_intTree->getHeldMemory()));
+ if (verbose)
+ LOG(info,
+ "%d+%d leftover tree nodes",
+ _intTree->getNumInternalNodes(),
+ _intTree->getNumLeafNodes());
+ EXPECT_TRUE(_intTree->getNumInternalNodes() == 0 &&
+ _intTree->getNumLeafNodes() == 0);
+ delete _intTree;
+ _intTree = NULL;
+ delete _intKeyStore;
+ _intKeyStore = NULL;
+#endif
+ (void) verbose;
+ _tree->clear(*_allocator);
+ _allocator->freeze();
+ _allocator->transferHoldLists(_generationHandler->getCurrentGeneration());
+ _generationHandler->incGeneration();
+ _allocator->trimHoldLists(_generationHandler->getFirstUsedGeneration());
+ delete _tree;
+ _tree = NULL;
+ delete _allocator;
+ _allocator = NULL;
+ delete _generationHandler;
+ _generationHandler = NULL;
+}
+
+
+void
+FrozenBTreeTest::fillRandomValues(unsigned int count)
+{
+ unsigned int i;
+
+ LOG(info, "Filling %u random values", count);
+ _randomValues.clear();
+ _randomValues.reserve(count);
+ _randomGenerator.srand48(42);
+ for (i = 0; i <count; i++)
+ _randomValues.push_back(_randomGenerator.lrand48());
+
+ EXPECT_TRUE(_randomValues.size() == count);
+}
+
+
+void
+FrozenBTreeTest::
+insertRandomValues(Tree &tree,
+ NodeAllocator &allocator,
+ const std::vector<KeyType> &values)
+{
+ std::vector<KeyType>::const_iterator i(values.begin());
+ std::vector<KeyType>::const_iterator ie(values.end());
+ Iterator p;
+
+ LOG(info, "insertRandomValues start");
+ for (; i != ie; ++i) {
+#ifdef LOG_FROZENBTREE
+ LOG(info, "Try lookup %d before insert", *i);
+#endif
+ p = tree.find(*i, allocator);
+ if (!p.valid()) {
+ DataType val = *i + 42;
+ if (tree.insert(*i, val, allocator))
+ p = tree.find(*i, allocator);
+ }
+ ASSERT_TRUE(p.valid() && p.getKey() == *i && p.getData() == *i + 42);
+#ifdef DEBUG_FROZENBTREEX
+ printEnumTree(&tree);
+#endif
+ }
+ ASSERT_TRUE(tree.isValid(allocator));
+ ASSERT_TRUE(tree.isValidFrozen(allocator));
+ LOG(info, "insertRandomValues done");
+}
+
+
+void
+FrozenBTreeTest::
+removeRandomValues(Tree &tree,
+ NodeAllocator &allocator,
+ const std::vector<KeyType> & values)
+{
+ std::vector<KeyType>::const_iterator i(values.begin());
+ std::vector<KeyType>::const_iterator ie(values.end());
+ Iterator p;
+
+ LOG(info, "removeRandomValues start");
+ for (; i != ie; ++i) {
+#ifdef LOG_FROZENBTREE
+ LOG(info, "Try lookup %d before remove", *i);
+#endif
+ p = tree.find(*i, allocator);
+ if (p.valid()) {
+ if (tree.remove(*i, allocator))
+ p = tree.find(*i, allocator);
+ }
+ ASSERT_TRUE(!p.valid());
+#ifdef DEBUG_FROZENBTREEX
+ tree.printTree();
+#endif
+ }
+ ASSERT_TRUE(tree.isValid(allocator));
+ ASSERT_TRUE(tree.isValidFrozen(allocator));
+ LOG(info, "removeRandomValues done");
+}
+
+
+void
+FrozenBTreeTest::
+lookupRandomValues(const Tree &tree,
+ NodeAllocator &allocator,
+ const std::vector<KeyType> &values)
+{
+ std::vector<KeyType>::const_iterator i(values.begin());
+ std::vector<KeyType>::const_iterator ie(values.end());
+ Iterator p;
+
+ LOG(info, "lookupRandomValues start");
+ for (; i != ie; ++i) {
+ p = tree.find(*i, allocator);
+ ASSERT_TRUE(p.valid() && p.getKey() == *i);
+ }
+ LOG(info, "lookupRandomValues done");
+}
+
+
+void
+FrozenBTreeTest::
+lookupGoneRandomValues(const Tree &tree,
+ NodeAllocator &allocator,
+ const std::vector<KeyType> &values)
+{
+ std::vector<KeyType>::const_iterator i(values.begin());
+ std::vector<KeyType>::const_iterator ie(values.end());
+ Iterator p;
+
+ LOG(info, "lookupGoneRandomValues start");
+ for (; i != ie; ++i) {
+ p = tree.find(*i, allocator);
+ ASSERT_TRUE(!p.valid());
+ }
+ LOG(info, "lookupGoneRandomValues done");
+}
+
+
+void
+FrozenBTreeTest::
+lookupFrozenRandomValues(const Tree &tree,
+ NodeAllocator &allocator,
+ const std::vector<KeyType> &values)
+{
+ std::vector<KeyType>::const_iterator i(values.begin());
+ std::vector<KeyType>::const_iterator ie(values.end());
+ ConstIterator p;
+
+ LOG(info, "lookupFrozenRandomValues start");
+ for (; i != ie; ++i) {
+ p = tree.getFrozenView(allocator).find(*i, std::less<int>());
+ ASSERT_TRUE(p.valid() && p.getKey() == *i && p.getData() == *i + 42);
+ }
+ LOG(info, "lookupFrozenRandomValues done");
+}
+
+
+void
+FrozenBTreeTest::sortRandomValues()
+{
+ std::vector<KeyType>::iterator i;
+ std::vector<KeyType>::iterator ie;
+ uint32_t okcnt;
+ int prevVal;
+ std::vector<KeyType> sorted;
+
+ LOG(info, "sortRandomValues start");
+ sorted = _randomValues;
+ std::sort(sorted.begin(), sorted.end());
+ _sortedRandomValues.clear();
+ _sortedRandomValues.reserve(sorted.size());
+
+ okcnt = 0;
+ prevVal = 0;
+ ie = sorted.end();
+ for (i = sorted.begin(); i != ie; ++i) {
+ if (i == _sortedRandomValues.begin() || *i > prevVal) {
+ okcnt++;
+ _sortedRandomValues.push_back(*i);
+ } else if (*i == prevVal)
+ okcnt++;
+ else
+ LOG_ABORT("should not be reached");
+ prevVal = *i;
+ }
+ EXPECT_TRUE(okcnt == sorted.size());
+ LOG(info, "sortRandomValues done");
+}
+
+
+void
+FrozenBTreeTest::
+traverseTreeIterator(const Tree &tree,
+ NodeAllocator &allocator,
+ const std::vector<KeyType> &sorted,
+ bool frozen)
+{
+ LOG(info,
+ "traverseTreeIterator %s start",
+ frozenName(frozen));
+
+ std::vector<KeyType>::const_iterator i;
+
+ i = sorted.begin();
+ if (frozen) {
+ ConstIterator ai;
+ ai = tree.getFrozenView(allocator).begin();
+ for (;ai.valid(); ++ai, ++i)
+ {
+ ASSERT_TRUE(ai.getKey() == *i);
+ }
+ } else {
+ Iterator ai;
+ ai = tree.begin(allocator);
+ for (;ai.valid(); ++ai, ++i)
+ {
+ ASSERT_TRUE(ai.getKey() == *i);
+ }
+ }
+
+
+ ASSERT_TRUE(i == sorted.end());
+
+ LOG(info,
+ "traverseTreeIterator %s done",
+ frozenName(frozen));
+}
+
+
+void
+FrozenBTreeTest::
+printSubEnumTree(BTreeNode::Ref node,
+ NodeAllocator &allocator,
+ int indent) const
+{
+ // typedef BTreeNode Node;
+ typedef LeafNodeType LeafNode;
+ typedef InternalNodeType InternalNode;
+ BTreeNode::Ref subNode;
+ unsigned int i;
+
+ if (allocator.isLeafRef(node)) {
+ const LeafNode *lnode = allocator.mapLeafRef(node);
+ printf("%*s LeafNode %s valid=%d\n",
+ indent, "",
+ lnode->getFrozen() ? "frozen" : "thawed",
+ lnode->validSlots());
+ for (i = 0; i < lnode->validSlots(); i++) {
+
+ KeyType k = lnode->getKey(i);
+ DataType d = lnode->getData(i);
+ printf("leaf value %3d %d %d\n",
+ (int) i,
+ (int) k,
+ (int) d);
+ }
+ return;
+ }
+ const InternalNode *inode = allocator.mapInternalRef(node);
+ printf("%*s IntermediteNode %s valid=%d\n",
+ indent, "",
+ inode->getFrozen() ? "frozen" : "thawed",
+ inode->validSlots());
+ for (i = 0; i < inode->validSlots(); i++) {
+ subNode = inode->getChild(i);
+ assert(subNode != BTreeNode::Ref());
+ printSubEnumTree(subNode, allocator, indent + 4);
+ }
+}
+
+
+void
+FrozenBTreeTest::printEnumTree(const Tree *tree,
+ NodeAllocator &allocator)
+{
+ printf("Tree Dump start\n");
+ if (!NodeAllocator::isValidRef(tree->getRoot())) {
+ printf("EMPTY\n");
+ } else {
+ printSubEnumTree(tree->getRoot(), allocator, 0);
+ }
+ printf("Tree Dump done\n");
+}
+
+
+
+int
+FrozenBTreeTest::Main()
+{
+ TEST_INIT("frozenbtree_test");
+
+ fillRandomValues(1000);
+ sortRandomValues();
+
+ allocTree();
+ insertRandomValues(*_tree, *_allocator, _randomValues);
+ lookupRandomValues(*_tree, *_allocator, _randomValues);
+ _allocator->freeze();
+ _allocator->transferHoldLists(_generationHandler->getCurrentGeneration());
+ lookupFrozenRandomValues(*_tree, *_allocator, _randomValues);
+ traverseTreeIterator(*_tree,
+ *_allocator,
+ _sortedRandomValues,
+ false);
+ traverseTreeIterator(*_tree,
+ *_allocator,
+ _sortedRandomValues,
+ true);
+ traverseTreeIterator(*_tree,
+ *_allocator,
+ _sortedRandomValues,
+ false);
+ traverseTreeIterator(*_tree,
+ *_allocator,
+ _sortedRandomValues,
+ true);
+ removeRandomValues(*_tree, *_allocator, _randomValues);
+ lookupGoneRandomValues(*_tree, *_allocator, _randomValues);
+ lookupFrozenRandomValues(*_tree, *_allocator,_randomValues);
+ traverseTreeIterator(*_tree,
+ *_allocator,
+ _sortedRandomValues,
+ true);
+ insertRandomValues(*_tree, *_allocator, _randomValues);
+ freeTree(true);
+
+ fillRandomValues(1000000);
+ sortRandomValues();
+
+ allocTree();
+ insertRandomValues(*_tree, *_allocator, _randomValues);
+ traverseTreeIterator(*_tree,
+ *_allocator,
+ _sortedRandomValues,
+ false);
+ freeTree(false);
+
+ TEST_DONE();
+}
+
+}
+
+TEST_APPHOOK(search::FrozenBTreeTest);
diff --git a/vespalib/src/tests/btree/iteratespeed.cpp b/vespalib/src/tests/btree/iteratespeed.cpp
new file mode 100644
index 00000000000..d1b9b3d7b1d
--- /dev/null
+++ b/vespalib/src/tests/btree/iteratespeed.cpp
@@ -0,0 +1,212 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/vespalib/btree/btreeroot.h>
+#include <vespa/vespalib/btree/btreebuilder.h>
+#include <vespa/vespalib/btree/btreenodeallocator.h>
+#include <vespa/vespalib/btree/btree.h>
+#include <vespa/vespalib/btree/btreestore.h>
+#include <vespa/vespalib/btree/btreenodeallocator.hpp>
+#include <vespa/vespalib/btree/btreenode.hpp>
+#include <vespa/vespalib/btree/btreenodestore.hpp>
+#include <vespa/vespalib/btree/btreeiterator.hpp>
+#include <vespa/vespalib/btree/btreeroot.hpp>
+#include <vespa/vespalib/btree/btreebuilder.hpp>
+#include <vespa/vespalib/btree/btree.hpp>
+#include <vespa/vespalib/btree/btreestore.hpp>
+#include <vespa/vespalib/util/rand48.h>
+
+#include <vespa/fastos/app.h>
+#include <vespa/fastos/timestamp.h>
+
+#include <vespa/log/log.h>
+LOG_SETUP("iteratespeed");
+
+namespace search {
+namespace btree {
+
+enum class IterateMethod
+{
+ FORWARD,
+ BACKWARDS,
+ LAMBDA
+};
+
+class IterateSpeed : public FastOS_Application
+{
+ template <typename Traits, IterateMethod iterateMethod>
+ void
+ workLoop(int loops, bool enableForward, bool enableBackwards,
+ bool enableLambda, int leafSlots);
+ void usage();
+ int Main() override;
+};
+
+
+namespace {
+
+const char *iterateMethodName(IterateMethod iterateMethod)
+{
+ switch (iterateMethod) {
+ case IterateMethod::FORWARD:
+ return "forward";
+ case IterateMethod::BACKWARDS:
+ return "backwards";
+ default:
+ return "lambda";
+ }
+}
+
+}
+
+template <typename Traits, IterateMethod iterateMethod>
+void
+IterateSpeed::workLoop(int loops, bool enableForward, bool enableBackwards,
+ bool enableLambda, int leafSlots)
+{
+ if ((iterateMethod == IterateMethod::FORWARD && !enableForward) ||
+ (iterateMethod == IterateMethod::BACKWARDS && !enableBackwards) ||
+ (iterateMethod == IterateMethod::LAMBDA && !enableLambda) ||
+ (leafSlots != 0 &&
+ leafSlots != static_cast<int>(Traits::LEAF_SLOTS)))
+ return;
+ vespalib::GenerationHandler g;
+ using Tree = BTree<int, int, btree::NoAggregated, std::less<int>, Traits>;
+ using Builder = typename Tree::Builder;
+ using ConstIterator = typename Tree::ConstIterator;
+ Tree tree;
+ Builder builder(tree.getAllocator());
+ size_t numEntries = 1000000;
+ size_t numInnerLoops = 1000;
+ for (size_t i = 0; i < numEntries; ++i) {
+ builder.insert(i, 0);
+ }
+ tree.assign(builder);
+ assert(numEntries == tree.size());
+ assert(tree.isValid());
+ for (int l = 0; l < loops; ++l) {
+ fastos::TimeStamp before = fastos::ClockSystem::now();
+ uint64_t sum = 0;
+ for (size_t innerl = 0; innerl < numInnerLoops; ++innerl) {
+ if (iterateMethod == IterateMethod::FORWARD) {
+ ConstIterator itr(BTreeNode::Ref(), tree.getAllocator());
+ itr.begin(tree.getRoot());
+ while (itr.valid()) {
+ sum += itr.getKey();
+ ++itr;
+ }
+ } else if (iterateMethod == IterateMethod::BACKWARDS) {
+ ConstIterator itr(BTreeNode::Ref(), tree.getAllocator());
+ itr.end(tree.getRoot());
+ --itr;
+ while (itr.valid()) {
+ sum += itr.getKey();
+ --itr;
+ }
+ } else {
+ tree.getAllocator().foreach_key(tree.getRoot(),
+ [&](int key) { sum += key; } );
+ }
+ }
+ fastos::TimeStamp after = fastos::ClockSystem::now();
+ double used = after.sec() - before.sec();
+ printf("Elapsed time for iterating %ld steps is %8.5f, "
+ "direction=%s, fanout=%u,%u, sum=%" PRIu64 "\n",
+ numEntries * numInnerLoops,
+ used,
+ iterateMethodName(iterateMethod),
+ static_cast<int>(Traits::LEAF_SLOTS),
+ static_cast<int>(Traits::INTERNAL_SLOTS),
+ sum);
+ fflush(stdout);
+ }
+}
+
+
+void
+IterateSpeed::usage()
+{
+ printf("iteratspeed "
+ "[-F <leafSlots>] "
+ "[-b] "
+ "[-c <numLoops>] "
+ "[-f] "
+ "[-l]\n");
+}
+
+int
+IterateSpeed::Main()
+{
+ int argi;
+ char c;
+ const char *optArg;
+ argi = 1;
+ int loops = 1;
+ bool backwards = false;
+ bool forwards = false;
+ bool lambda = false;
+ int leafSlots = 0;
+ while ((c = GetOpt("F:bc:fl", optArg, argi)) != -1) {
+ switch (c) {
+ case 'F':
+ leafSlots = atoi(optArg);
+ break;
+ case 'b':
+ backwards = true;
+ break;
+ case 'c':
+ loops = atoi(optArg);
+ break;
+ case 'f':
+ forwards = true;
+ break;
+ case 'l':
+ lambda = true;
+ break;
+ default:
+ usage();
+ return 1;
+ }
+ }
+ if (!backwards && !forwards && !lambda) {
+ backwards = true;
+ forwards = true;
+ lambda = true;
+ }
+
+ using SmallTraits = BTreeTraits<4, 4, 31, false>;
+ using DefTraits = BTreeDefaultTraits;
+ using LargeTraits = BTreeTraits<32, 16, 10, true>;
+ using HugeTraits = BTreeTraits<64, 16, 10, true>;
+ workLoop<SmallTraits, IterateMethod::FORWARD>(loops, forwards, backwards,
+ lambda, leafSlots);
+ workLoop<DefTraits, IterateMethod::FORWARD>(loops, forwards, backwards,
+ lambda, leafSlots);
+ workLoop<LargeTraits, IterateMethod::FORWARD>(loops, forwards, backwards,
+ lambda, leafSlots);
+ workLoop<HugeTraits, IterateMethod::FORWARD>(loops, forwards, backwards,
+ lambda, leafSlots);
+ workLoop<SmallTraits, IterateMethod::BACKWARDS>(loops, forwards, backwards,
+ lambda, leafSlots);
+ workLoop<DefTraits, IterateMethod::BACKWARDS>(loops, forwards, backwards,
+ lambda, leafSlots);
+ workLoop<LargeTraits, IterateMethod::BACKWARDS>(loops, forwards, backwards,
+ lambda, leafSlots);
+ workLoop<HugeTraits, IterateMethod::BACKWARDS>(loops, forwards, backwards,
+ lambda, leafSlots);
+ workLoop<SmallTraits, IterateMethod::LAMBDA>(loops, forwards, backwards,
+ lambda, leafSlots);
+ workLoop<DefTraits, IterateMethod::LAMBDA>(loops, forwards, backwards,
+ lambda, leafSlots);
+ workLoop<LargeTraits, IterateMethod::LAMBDA>(loops, forwards, backwards,
+ lambda, leafSlots);
+ workLoop<HugeTraits, IterateMethod::LAMBDA>(loops, forwards, backwards,
+ lambda, leafSlots);
+ return 0;
+}
+
+}
+}
+
+FASTOS_MAIN(search::btree::IterateSpeed);
+
+
diff --git a/vespalib/src/tests/datastore/array_store/CMakeLists.txt b/vespalib/src/tests/datastore/array_store/CMakeLists.txt
new file mode 100644
index 00000000000..a6f0ef31b1a
--- /dev/null
+++ b/vespalib/src/tests/datastore/array_store/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(vespalib_array_store_test_app TEST
+ SOURCES
+ array_store_test.cpp
+ DEPENDS
+ vespalib
+)
+vespa_add_test(NAME vespalib_array_store_test_app COMMAND vespalib_array_store_test_app)
diff --git a/vespalib/src/tests/datastore/array_store/array_store_test.cpp b/vespalib/src/tests/datastore/array_store/array_store_test.cpp
new file mode 100644
index 00000000000..1eb71d36fe7
--- /dev/null
+++ b/vespalib/src/tests/datastore/array_store/array_store_test.cpp
@@ -0,0 +1,360 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/vespalib/test/datastore/memstats.h>
+#include <vespa/vespalib/datastore/array_store.hpp>
+#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/vespalib/test/insertion_operators.h>
+#include <vespa/vespalib/util/traits.h>
+#include <vector>
+
+using namespace search::datastore;
+using vespalib::MemoryUsage;
+using vespalib::ArrayRef;
+using generation_t = vespalib::GenerationHandler::generation_t;
+using MemStats = search::datastore::test::MemStats;
+
+constexpr float ALLOC_GROW_FACTOR = 0.2;
+
+template <typename EntryT, typename RefT = EntryRefT<19> >
+struct Fixture
+{
+ using EntryRefType = RefT;
+ using ArrayStoreType = ArrayStore<EntryT, RefT>;
+ using LargeArray = typename ArrayStoreType::LargeArray;
+ using ConstArrayRef = typename ArrayStoreType::ConstArrayRef;
+ using EntryVector = std::vector<EntryT>;
+ using value_type = EntryT;
+ using ReferenceStore = std::map<EntryRef, EntryVector>;
+
+ ArrayStoreType store;
+ ReferenceStore refStore;
+ generation_t generation;
+ Fixture(uint32_t maxSmallArraySize)
+ : store(ArrayStoreConfig(maxSmallArraySize,
+ ArrayStoreConfig::AllocSpec(16, RefT::offsetSize(), 8 * 1024,
+ ALLOC_GROW_FACTOR))),
+ refStore(),
+ generation(1)
+ {}
+ Fixture(const ArrayStoreConfig &storeCfg)
+ : store(storeCfg),
+ refStore(),
+ generation(1)
+ {}
+ void assertAdd(const EntryVector &input) {
+ EntryRef ref = add(input);
+ assertGet(ref, input);
+ }
+ EntryRef add(const EntryVector &input) {
+ EntryRef result = store.add(ConstArrayRef(input));
+ ASSERT_EQUAL(0u, refStore.count(result));
+ refStore.insert(std::make_pair(result, input));
+ return result;
+ }
+ void assertGet(EntryRef ref, const EntryVector &exp) const {
+ ConstArrayRef act = store.get(ref);
+ EXPECT_EQUAL(exp, EntryVector(act.begin(), act.end()));
+ }
+ void remove(EntryRef ref) {
+ ASSERT_EQUAL(1u, refStore.count(ref));
+ store.remove(ref);
+ refStore.erase(ref);
+ }
+ void remove(const EntryVector &input) {
+ remove(getEntryRef(input));
+ }
+ uint32_t getBufferId(EntryRef ref) const {
+ return EntryRefType(ref).bufferId();
+ }
+ void assertBufferState(EntryRef ref, const MemStats expStats) const {
+ EXPECT_EQUAL(expStats._used, store.bufferState(ref).size());
+ EXPECT_EQUAL(expStats._hold, store.bufferState(ref).getHoldElems());
+ EXPECT_EQUAL(expStats._dead, store.bufferState(ref).getDeadElems());
+ }
+ void assertMemoryUsage(const MemStats expStats) const {
+ MemoryUsage act = store.getMemoryUsage();
+ EXPECT_EQUAL(expStats._used, act.usedBytes());
+ EXPECT_EQUAL(expStats._hold, act.allocatedBytesOnHold());
+ EXPECT_EQUAL(expStats._dead, act.deadBytes());
+ }
+ void assertStoreContent() const {
+ for (const auto &elem : refStore) {
+ TEST_DO(assertGet(elem.first, elem.second));
+ }
+ }
+ EntryRef getEntryRef(const EntryVector &input) {
+ for (auto itr = refStore.begin(); itr != refStore.end(); ++itr) {
+ if (itr->second == input) {
+ return itr->first;
+ }
+ }
+ return EntryRef();
+ }
+ void trimHoldLists() {
+ store.transferHoldLists(generation++);
+ store.trimHoldLists(generation);
+ }
+ void compactWorst(bool compactMemory, bool compactAddressSpace) {
+ ICompactionContext::UP ctx = store.compactWorst(compactMemory, compactAddressSpace);
+ std::vector<EntryRef> refs;
+ for (auto itr = refStore.begin(); itr != refStore.end(); ++itr) {
+ refs.push_back(itr->first);
+ }
+ std::vector<EntryRef> compactedRefs = refs;
+ ctx->compact(ArrayRef<EntryRef>(compactedRefs));
+ ReferenceStore compactedRefStore;
+ for (size_t i = 0; i < refs.size(); ++i) {
+ ASSERT_EQUAL(0u, compactedRefStore.count(compactedRefs[i]));
+ ASSERT_EQUAL(1u, refStore.count(refs[i]));
+ compactedRefStore.insert(std::make_pair(compactedRefs[i], refStore[refs[i]]));
+ }
+ refStore = compactedRefStore;
+ }
+ size_t entrySize() const { return sizeof(EntryT); }
+ size_t largeArraySize() const { return sizeof(LargeArray); }
+};
+
+using NumberFixture = Fixture<uint32_t>;
+using StringFixture = Fixture<std::string>;
+using SmallOffsetNumberFixture = Fixture<uint32_t, EntryRefT<10>>;
+using ByteFixture = Fixture<uint8_t>;
+
+
+
+TEST("require that we test with trivial and non-trivial types")
+{
+ EXPECT_TRUE(vespalib::can_skip_destruction<NumberFixture::value_type>::value);
+ EXPECT_FALSE(vespalib::can_skip_destruction<StringFixture::value_type>::value);
+}
+
+TEST_F("require that we can add and get small arrays of trivial type", NumberFixture(3))
+{
+ TEST_DO(f.assertAdd({}));
+ TEST_DO(f.assertAdd({1}));
+ TEST_DO(f.assertAdd({2,3}));
+ TEST_DO(f.assertAdd({3,4,5}));
+}
+
+TEST_F("require that we can add and get small arrays of non-trivial type", StringFixture(3))
+{
+ TEST_DO(f.assertAdd({}));
+ TEST_DO(f.assertAdd({"aa"}));
+ TEST_DO(f.assertAdd({"bbb", "ccc"}));
+ TEST_DO(f.assertAdd({"ddd", "eeee", "fffff"}));
+}
+
+TEST_F("require that we can add and get large arrays of simple type", NumberFixture(3))
+{
+ TEST_DO(f.assertAdd({1,2,3,4}));
+ TEST_DO(f.assertAdd({2,3,4,5,6}));
+}
+
+TEST_F("require that we can add and get large arrays of non-trivial type", StringFixture(3))
+{
+ TEST_DO(f.assertAdd({"aa", "bb", "cc", "dd"}));
+ TEST_DO(f.assertAdd({"ddd", "eee", "ffff", "gggg", "hhhh"}));
+}
+
+TEST_F("require that elements are put on hold when a small array is removed", NumberFixture(3))
+{
+ EntryRef ref = f.add({1,2,3});
+ TEST_DO(f.assertBufferState(ref, MemStats().used(3).hold(0)));
+ f.store.remove(ref);
+ TEST_DO(f.assertBufferState(ref, MemStats().used(3).hold(3)));
+}
+
+TEST_F("require that elements are put on hold when a large array is removed", NumberFixture(3))
+{
+ EntryRef ref = f.add({1,2,3,4});
+ // Note: The first buffer have the first element reserved -> we expect 2 elements used here.
+ TEST_DO(f.assertBufferState(ref, MemStats().used(2).hold(0).dead(1)));
+ f.store.remove(ref);
+ TEST_DO(f.assertBufferState(ref, MemStats().used(2).hold(1).dead(1)));
+}
+
+TEST_F("require that new underlying buffer is allocated when current is full", SmallOffsetNumberFixture(3))
+{
+ uint32_t firstBufferId = f.getBufferId(f.add({1,1}));
+ for (uint32_t i = 0; i < (F1::EntryRefType::offsetSize() - 1); ++i) {
+ uint32_t bufferId = f.getBufferId(f.add({i, i+1}));
+ EXPECT_EQUAL(firstBufferId, bufferId);
+ }
+ TEST_DO(f.assertStoreContent());
+
+ uint32_t secondBufferId = f.getBufferId(f.add({2,2}));
+ EXPECT_NOT_EQUAL(firstBufferId, secondBufferId);
+ for (uint32_t i = 0; i < 10u; ++i) {
+ uint32_t bufferId = f.getBufferId(f.add({i+2,i}));
+ EXPECT_EQUAL(secondBufferId, bufferId);
+ }
+ TEST_DO(f.assertStoreContent());
+}
+
+TEST_F("require that the buffer with most dead space is compacted", NumberFixture(2))
+{
+ EntryRef size1Ref = f.add({1});
+ EntryRef size2Ref = f.add({2,2});
+ EntryRef size3Ref = f.add({3,3,3});
+ f.remove(f.add({5,5}));
+ f.trimHoldLists();
+ TEST_DO(f.assertBufferState(size1Ref, MemStats().used(1).dead(0)));
+ TEST_DO(f.assertBufferState(size2Ref, MemStats().used(4).dead(2)));
+ TEST_DO(f.assertBufferState(size3Ref, MemStats().used(2).dead(1))); // Note: First element is reserved
+ uint32_t size1BufferId = f.getBufferId(size1Ref);
+ uint32_t size2BufferId = f.getBufferId(size2Ref);
+ uint32_t size3BufferId = f.getBufferId(size3Ref);
+
+ EXPECT_EQUAL(3u, f.refStore.size());
+ f.compactWorst(true, false);
+ EXPECT_EQUAL(3u, f.refStore.size());
+ f.assertStoreContent();
+
+ EXPECT_EQUAL(size1BufferId, f.getBufferId(f.getEntryRef({1})));
+ EXPECT_EQUAL(size3BufferId, f.getBufferId(f.getEntryRef({3,3,3})));
+ // Buffer for size 2 arrays has been compacted
+ EXPECT_NOT_EQUAL(size2BufferId, f.getBufferId(f.getEntryRef({2,2})));
+ f.assertGet(size2Ref, {2,2}); // Old ref should still point to data.
+ EXPECT_TRUE(f.store.bufferState(size2Ref).isOnHold());
+ f.trimHoldLists();
+ EXPECT_TRUE(f.store.bufferState(size2Ref).isFree());
+}
+
+namespace {
+
+void testCompaction(NumberFixture &f, bool compactMemory, bool compactAddressSpace)
+{
+ EntryRef size1Ref = f.add({1});
+ EntryRef size2Ref = f.add({2,2});
+ EntryRef size3Ref = f.add({3,3,3});
+ f.remove(f.add({5,5,5}));
+ f.remove(f.add({6}));
+ f.remove(f.add({7}));
+ f.trimHoldLists();
+ TEST_DO(f.assertBufferState(size1Ref, MemStats().used(3).dead(2)));
+ TEST_DO(f.assertBufferState(size2Ref, MemStats().used(2).dead(0)));
+ TEST_DO(f.assertBufferState(size3Ref, MemStats().used(6).dead(3)));
+ uint32_t size1BufferId = f.getBufferId(size1Ref);
+ uint32_t size2BufferId = f.getBufferId(size2Ref);
+ uint32_t size3BufferId = f.getBufferId(size3Ref);
+
+ EXPECT_EQUAL(3u, f.refStore.size());
+ f.compactWorst(compactMemory, compactAddressSpace);
+ EXPECT_EQUAL(3u, f.refStore.size());
+ f.assertStoreContent();
+
+ if (compactMemory) {
+ EXPECT_NOT_EQUAL(size3BufferId, f.getBufferId(f.getEntryRef({3,3,3})));
+ } else {
+ EXPECT_EQUAL(size3BufferId, f.getBufferId(f.getEntryRef({3,3,3})));
+ }
+ if (compactAddressSpace) {
+ EXPECT_NOT_EQUAL(size1BufferId, f.getBufferId(f.getEntryRef({1})));
+ } else {
+ EXPECT_EQUAL(size1BufferId, f.getBufferId(f.getEntryRef({1})));
+ }
+ EXPECT_EQUAL(size2BufferId, f.getBufferId(f.getEntryRef({2,2})));
+ f.assertGet(size1Ref, {1}); // Old ref should still point to data.
+ f.assertGet(size3Ref, {3,3,3}); // Old ref should still point to data.
+ if (compactMemory) {
+ EXPECT_TRUE(f.store.bufferState(size3Ref).isOnHold());
+ } else {
+ EXPECT_FALSE(f.store.bufferState(size3Ref).isOnHold());
+ }
+ if (compactAddressSpace) {
+ EXPECT_TRUE(f.store.bufferState(size1Ref).isOnHold());
+ } else {
+ EXPECT_FALSE(f.store.bufferState(size1Ref).isOnHold());
+ }
+ EXPECT_FALSE(f.store.bufferState(size2Ref).isOnHold());
+ f.trimHoldLists();
+ if (compactMemory) {
+ EXPECT_TRUE(f.store.bufferState(size3Ref).isFree());
+ } else {
+ EXPECT_FALSE(f.store.bufferState(size3Ref).isFree());
+ }
+ if (compactAddressSpace) {
+ EXPECT_TRUE(f.store.bufferState(size1Ref).isFree());
+ } else {
+ EXPECT_FALSE(f.store.bufferState(size1Ref).isFree());
+ }
+ EXPECT_FALSE(f.store.bufferState(size2Ref).isFree());
+}
+
+}
+
+TEST_F("require that compactWorst selects on only memory", NumberFixture(3)) {
+ testCompaction(f, true, false);
+}
+
+TEST_F("require that compactWorst selects on only address space", NumberFixture(3)) {
+ testCompaction(f, false, true);
+}
+
+TEST_F("require that compactWorst selects on both memory and address space", NumberFixture(3)) {
+ testCompaction(f, true, true);
+}
+
+TEST_F("require that compactWorst selects on neither memory nor address space", NumberFixture(3)) {
+ testCompaction(f, false, false);
+}
+
+TEST_F("require that used, onHold and dead memory usage is tracked for small arrays", NumberFixture(2))
+{
+ MemStats exp(f.store.getMemoryUsage());
+ f.add({2,2});
+ TEST_DO(f.assertMemoryUsage(exp.used(f.entrySize() * 2)));
+ f.remove({2,2});
+ TEST_DO(f.assertMemoryUsage(exp.hold(f.entrySize() * 2)));
+ f.trimHoldLists();
+ TEST_DO(f.assertMemoryUsage(exp.holdToDead(f.entrySize() * 2)));
+}
+
+TEST_F("require that used, onHold and dead memory usage is tracked for large arrays", NumberFixture(2))
+{
+ MemStats exp(f.store.getMemoryUsage());
+ f.add({3,3,3});
+ TEST_DO(f.assertMemoryUsage(exp.used(f.largeArraySize() + f.entrySize() * 3)));
+ f.remove({3,3,3});
+ TEST_DO(f.assertMemoryUsage(exp.hold(f.largeArraySize() + f.entrySize() * 3)));
+ f.trimHoldLists();
+ TEST_DO(f.assertMemoryUsage(exp.decHold(f.largeArraySize() + f.entrySize() * 3).
+ dead(f.largeArraySize())));
+}
+
+TEST_F("require that address space usage is ratio between used arrays and number of possible arrays", NumberFixture(3))
+{
+ f.add({2,2});
+ f.add({3,3,3});
+ // 1 array is reserved (buffer 0, offset 0).
+ EXPECT_EQUAL(3u, f.store.addressSpaceUsage().used());
+ EXPECT_EQUAL(1u, f.store.addressSpaceUsage().dead());
+ size_t fourgig = (1ull << 32);
+ /*
+ * Expected limit is sum of allocated arrays for active buffers and
+ * potentially allocated arrays for free buffers. If all buffers were
+ * free then the limit would be 4 Gi.
+ * Then we subtract arrays for 4 buffers that are not free (arraySize=1,2,3 + largeArray),
+ * and add their actual number of allocated arrays (16 arrays per buffer).
+ * Note: arraySize=3 has 21 arrays as allocated buffer is rounded up to power of 2:
+ * 16 * 3 * sizeof(int) = 192 -> 256.
+ * allocated elements = 256 / sizeof(int) = 64.
+ * limit = 64 / 3 = 21.
+ */
+ size_t expLimit = fourgig - 4 * F1::EntryRefType::offsetSize() + 3 * 16 + 21;
+ EXPECT_EQUAL(static_cast<double>(2)/ expLimit, f.store.addressSpaceUsage().usage());
+ EXPECT_EQUAL(expLimit, f.store.addressSpaceUsage().limit());
+}
+
+TEST_F("require that offset in EntryRefT is within bounds when allocating memory buffers where wanted number of bytes is not a power of 2 and less than huge page size",
+ ByteFixture(ByteFixture::ArrayStoreType::optimizedConfigForHugePage(1023, vespalib::alloc::MemoryAllocator::HUGEPAGE_SIZE,
+ 4 * 1024, 8 * 1024, ALLOC_GROW_FACTOR)))
+{
+ // The array store config used in this test is equivalent to the one multi-value attribute uses when initializing multi-value mapping.
+ // See similar test in datastore_test.cpp for more details on what happens during memory allocation.
+ for (size_t i = 0; i < 1000000; ++i) {
+ f.add({1, 2, 3});
+ }
+ f.assertStoreContent();
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/vespalib/src/tests/datastore/array_store_config/CMakeLists.txt b/vespalib/src/tests/datastore/array_store_config/CMakeLists.txt
new file mode 100644
index 00000000000..a5638462748
--- /dev/null
+++ b/vespalib/src/tests/datastore/array_store_config/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(vespalib_array_store_config_test_app TEST
+ SOURCES
+ array_store_config_test.cpp
+ DEPENDS
+ vespalib
+)
+vespa_add_test(NAME vespalib_array_store_config_test_app COMMAND vespalib_array_store_config_test_app)
diff --git a/vespalib/src/tests/datastore/array_store_config/array_store_config_test.cpp b/vespalib/src/tests/datastore/array_store_config/array_store_config_test.cpp
new file mode 100644
index 00000000000..4779cf1aa70
--- /dev/null
+++ b/vespalib/src/tests/datastore/array_store_config/array_store_config_test.cpp
@@ -0,0 +1,84 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/vespalib/datastore/entryref.h>
+#include <vespa/vespalib/datastore/array_store_config.h>
+
+using namespace search::datastore;
+using AllocSpec = ArrayStoreConfig::AllocSpec;
+
+constexpr float ALLOC_GROW_FACTOR = 0.2;
+
+struct Fixture
+{
+ using EntryRefType = EntryRefT<18>;
+ ArrayStoreConfig cfg;
+
+ Fixture(uint32_t maxSmallArraySize,
+ const AllocSpec &defaultSpec)
+ : cfg(maxSmallArraySize, defaultSpec) {}
+
+ Fixture(uint32_t maxSmallArraySize,
+ size_t hugePageSize,
+ size_t smallPageSize,
+ size_t minNumArraysForNewBuffer)
+ : cfg(ArrayStoreConfig::optimizeForHugePage(maxSmallArraySize, hugePageSize, smallPageSize,
+ sizeof(int), EntryRefType::offsetSize(),
+ minNumArraysForNewBuffer,
+ ALLOC_GROW_FACTOR)) { }
+ void assertSpec(size_t arraySize, uint32_t numArraysForNewBuffer) {
+ assertSpec(arraySize, AllocSpec(0, EntryRefType::offsetSize(),
+ numArraysForNewBuffer, ALLOC_GROW_FACTOR));
+ }
+ void assertSpec(size_t arraySize, const AllocSpec &expSpec) {
+ const ArrayStoreConfig::AllocSpec &actSpec = cfg.specForSize(arraySize);
+ EXPECT_EQUAL(expSpec.minArraysInBuffer, actSpec.minArraysInBuffer);
+ EXPECT_EQUAL(expSpec.maxArraysInBuffer, actSpec.maxArraysInBuffer);
+ EXPECT_EQUAL(expSpec.numArraysForNewBuffer, actSpec.numArraysForNewBuffer);
+ EXPECT_EQUAL(expSpec.allocGrowFactor, actSpec.allocGrowFactor);
+ }
+};
+
+AllocSpec
+makeSpec(size_t minArraysInBuffer,
+ size_t maxArraysInBuffer,
+ size_t numArraysForNewBuffer)
+{
+ return AllocSpec(minArraysInBuffer, maxArraysInBuffer, numArraysForNewBuffer, ALLOC_GROW_FACTOR);
+}
+
+constexpr size_t KB = 1024;
+constexpr size_t MB = KB * KB;
+
+TEST_F("require that default allocation spec is given for all array sizes", Fixture(3, makeSpec(4, 32, 8)))
+{
+ EXPECT_EQUAL(3u, f.cfg.maxSmallArraySize());
+ TEST_DO(f.assertSpec(0, makeSpec(4, 32, 8)));
+ TEST_DO(f.assertSpec(1, makeSpec(4, 32, 8)));
+ TEST_DO(f.assertSpec(2, makeSpec(4, 32, 8)));
+ TEST_DO(f.assertSpec(3, makeSpec(4, 32, 8)));
+}
+
+TEST_F("require that we can generate config optimized for a given huge page", Fixture(1024,
+ 2 * MB,
+ 4 * KB,
+ 8 * KB))
+{
+ EXPECT_EQUAL(1024u, f.cfg.maxSmallArraySize());
+ TEST_DO(f.assertSpec(0, 8 * KB)); // large arrays
+ TEST_DO(f.assertSpec(1, 256 * KB));
+ TEST_DO(f.assertSpec(2, 256 * KB));
+ TEST_DO(f.assertSpec(3, 168 * KB));
+ TEST_DO(f.assertSpec(4, 128 * KB));
+ TEST_DO(f.assertSpec(5, 100 * KB));
+ TEST_DO(f.assertSpec(6, 84 * KB));
+
+ TEST_DO(f.assertSpec(32, 16 * KB));
+ TEST_DO(f.assertSpec(33, 12 * KB));
+ TEST_DO(f.assertSpec(42, 12 * KB));
+ TEST_DO(f.assertSpec(43, 8 * KB));
+ TEST_DO(f.assertSpec(1022, 8 * KB));
+ TEST_DO(f.assertSpec(1023, 8 * KB));
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/vespalib/src/tests/datastore/buffer_type/CMakeLists.txt b/vespalib/src/tests/datastore/buffer_type/CMakeLists.txt
new file mode 100644
index 00000000000..d3618c998e3
--- /dev/null
+++ b/vespalib/src/tests/datastore/buffer_type/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(vespalib_buffer_type_test_app TEST
+ SOURCES
+ buffer_type_test.cpp
+ DEPENDS
+ vespalib
+)
+vespa_add_test(NAME vespalib_buffer_type_test_app COMMAND vespalib_buffer_type_test_app)
diff --git a/vespalib/src/tests/datastore/buffer_type/buffer_type_test.cpp b/vespalib/src/tests/datastore/buffer_type/buffer_type_test.cpp
new file mode 100644
index 00000000000..6a51b9192ce
--- /dev/null
+++ b/vespalib/src/tests/datastore/buffer_type/buffer_type_test.cpp
@@ -0,0 +1,116 @@
+// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/vespalib/datastore/buffer_type.h>
+#include <vespa/vespalib/testkit/testapp.h>
+
+using namespace search::datastore;
+
+using IntBufferType = BufferType<int>;
+constexpr uint32_t ARRAYS_SIZE(4);
+constexpr uint32_t MAX_ARRAYS(128);
+constexpr uint32_t NUM_ARRAYS_FOR_NEW_BUFFER(0);
+
+struct Setup {
+ uint32_t _minArrays;
+ size_t _usedElems;
+ size_t _neededElems;
+ uint32_t _bufferId;
+ float _allocGrowFactor;
+ bool _resizing;
+ Setup()
+ : _minArrays(0),
+ _usedElems(0),
+ _neededElems(0),
+ _bufferId(1),
+ _allocGrowFactor(0.5),
+ _resizing(false)
+ {}
+ Setup &minArrays(uint32_t value) { _minArrays = value; return *this; }
+ Setup &used(size_t value) { _usedElems = value; return *this; }
+ Setup &needed(size_t value) { _neededElems = value; return *this; }
+ Setup &bufferId(uint32_t value) { _bufferId = value; return *this; }
+ Setup &resizing(bool value) { _resizing = value; return *this; }
+};
+
+struct Fixture {
+ Setup setup;
+ IntBufferType bufferType;
+ size_t deadElems;
+ int buffer;
+ Fixture(const Setup &setup_)
+ : setup(setup_),
+ bufferType(ARRAYS_SIZE, setup._minArrays, MAX_ARRAYS, NUM_ARRAYS_FOR_NEW_BUFFER, setup._allocGrowFactor),
+ deadElems(0),
+ buffer(0)
+ {}
+ ~Fixture() {
+ bufferType.onHold(&setup._usedElems);
+ bufferType.onFree(setup._usedElems);
+ }
+ void onActive() {
+ bufferType.onActive(setup._bufferId, &setup._usedElems, deadElems, &buffer);
+ }
+ size_t arraysToAlloc() {
+ return bufferType.calcArraysToAlloc(setup._bufferId, setup._neededElems, setup._resizing);
+ }
+};
+
+void
+assertArraysToAlloc(size_t exp, const Setup &setup)
+{
+ Fixture f(setup);
+ f.onActive();
+ EXPECT_EQUAL(exp, f.arraysToAlloc());
+}
+
+TEST("require that complete arrays are allocated")
+{
+ TEST_DO(assertArraysToAlloc(1, Setup().needed(1)));
+ TEST_DO(assertArraysToAlloc(1, Setup().needed(2)));
+ TEST_DO(assertArraysToAlloc(1, Setup().needed(3)));
+ TEST_DO(assertArraysToAlloc(1, Setup().needed(4)));
+ TEST_DO(assertArraysToAlloc(2, Setup().needed(5)));
+}
+
+TEST("require that reserved elements are taken into account when not resizing")
+{
+ TEST_DO(assertArraysToAlloc(2, Setup().needed(1).bufferId(0)));
+ TEST_DO(assertArraysToAlloc(2, Setup().needed(4).bufferId(0)));
+ TEST_DO(assertArraysToAlloc(3, Setup().needed(5).bufferId(0)));
+}
+
+TEST("require that arrays to alloc is based on currently used elements (no resizing)")
+{
+ TEST_DO(assertArraysToAlloc(2, Setup().used(4 * 4).needed(4)));
+ TEST_DO(assertArraysToAlloc(4, Setup().used(8 * 4).needed(4)));
+}
+
+TEST("require that arrays to alloc is based on currently used elements (with resizing)")
+{
+ TEST_DO(assertArraysToAlloc(4 + 2, Setup().used(4 * 4).needed(4).resizing(true)));
+ TEST_DO(assertArraysToAlloc(8 + 4, Setup().used(8 * 4).needed(4).resizing(true)));
+ TEST_DO(assertArraysToAlloc(4 + 3, Setup().used(4 * 4).needed(3 * 4).resizing(true)));
+}
+
+TEST("require that arrays to alloc always contain elements needed")
+{
+ TEST_DO(assertArraysToAlloc(2, Setup().used(4 * 4).needed(2 * 4)));
+ TEST_DO(assertArraysToAlloc(3, Setup().used(4 * 4).needed(3 * 4)));
+ TEST_DO(assertArraysToAlloc(4, Setup().used(4 * 4).needed(4 * 4)));
+}
+
+TEST("require that arrays to alloc is capped to max arrays")
+{
+ TEST_DO(assertArraysToAlloc(127, Setup().used(254 * 4).needed(4)));
+ TEST_DO(assertArraysToAlloc(128, Setup().used(256 * 4).needed(4)));
+ TEST_DO(assertArraysToAlloc(128, Setup().used(258 * 4).needed(8)));
+}
+
+TEST("require that arrays to alloc is capped to min arrays")
+{
+ TEST_DO(assertArraysToAlloc(16, Setup().used(30 * 4).needed(4).minArrays(16)));
+ TEST_DO(assertArraysToAlloc(16, Setup().used(32 * 4).needed(4).minArrays(16)));
+ TEST_DO(assertArraysToAlloc(17, Setup().used(34 * 4).needed(4).minArrays(16)));
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/vespalib/src/tests/datastore/datastore/CMakeLists.txt b/vespalib/src/tests/datastore/datastore/CMakeLists.txt
new file mode 100644
index 00000000000..eb3e0a4576a
--- /dev/null
+++ b/vespalib/src/tests/datastore/datastore/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(vespalib_datastore_test_app TEST
+ SOURCES
+ datastore_test.cpp
+ DEPENDS
+ vespalib
+ gtest
+)
+vespa_add_test(NAME vespalib_datastore_test_app COMMAND vespalib_datastore_test_app)
diff --git a/vespalib/src/tests/datastore/datastore/datastore_test.cpp b/vespalib/src/tests/datastore/datastore/datastore_test.cpp
new file mode 100644
index 00000000000..0981280ac23
--- /dev/null
+++ b/vespalib/src/tests/datastore/datastore/datastore_test.cpp
@@ -0,0 +1,584 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/vespalib/datastore/datastore.h>
+#include <vespa/vespalib/datastore/datastore.hpp>
+#include <vespa/vespalib/gtest/gtest.h>
+#include <vespa/vespalib/test/insertion_operators.h>
+
+#include <vespa/log/log.h>
+LOG_SETUP("datastore_test");
+
+namespace search::datastore {
+
+using vespalib::alloc::MemoryAllocator;
+
+struct IntReclaimer
+{
+ static void reclaim(int *) {}
+};
+
+class MyStore : public DataStore<int, EntryRefT<3, 2> > {
+private:
+ using ParentType = DataStore<int, EntryRefT<3, 2> >;
+ using ParentType::_activeBufferIds;
+public:
+ MyStore() {}
+
+ void
+ holdBuffer(uint32_t bufferId)
+ {
+ ParentType::holdBuffer(bufferId);
+ }
+
+ void
+ holdElem(EntryRef ref, uint64_t len)
+ {
+ ParentType::holdElem(ref, len);
+ }
+
+ void
+ transferHoldLists(generation_t generation)
+ {
+ ParentType::transferHoldLists(generation);
+ }
+
+ void trimElemHoldList(generation_t usedGen) override {
+ ParentType::trimElemHoldList(usedGen);
+ }
+ void incDead(EntryRef ref, uint64_t dead) {
+ ParentType::incDead(ref, dead);
+ }
+ void ensureBufferCapacity(size_t sizeNeeded) {
+ ParentType::ensureBufferCapacity(0, sizeNeeded);
+ }
+ void enableFreeLists() {
+ ParentType::enableFreeLists();
+ }
+
+ void
+ switchActiveBuffer()
+ {
+ ParentType::switchActiveBuffer(0, 0u);
+ }
+ size_t activeBufferId() const { return _activeBufferIds[0]; }
+};
+
+
+using GrowthStats = std::vector<int>;
+
+constexpr float ALLOC_GROW_FACTOR = 0.4;
+constexpr size_t HUGE_PAGE_ARRAY_SIZE = (MemoryAllocator::HUGEPAGE_SIZE / sizeof(int));
+
+template <typename DataType, typename RefType>
+class GrowStore
+{
+ using Store = DataStoreT<RefType>;
+ Store _store;
+ BufferType<DataType> _firstType;
+ BufferType<DataType> _type;
+ uint32_t _typeId;
+public:
+ GrowStore(size_t arraySize, size_t minArrays, size_t maxArrays, size_t numArraysForNewBuffer)
+ : _store(),
+ _firstType(1, 1, maxArrays, 0, ALLOC_GROW_FACTOR),
+ _type(arraySize, minArrays, maxArrays, numArraysForNewBuffer, ALLOC_GROW_FACTOR),
+ _typeId(0)
+ {
+ (void) _store.addType(&_firstType);
+ _typeId = _store.addType(&_type);
+ _store.initActiveBuffers();
+ }
+ ~GrowStore() { _store.dropBuffers(); }
+
+ Store &store() { return _store; }
+ uint32_t typeId() const { return _typeId; }
+
+ GrowthStats getGrowthStats(size_t bufs) {
+ GrowthStats sizes;
+ int prevBufferId = -1;
+ while (sizes.size() < bufs) {
+ RefType iRef = (_type.getArraySize() == 1) ?
+ (_store.template allocator<DataType>(_typeId).alloc().ref) :
+ (_store.template allocator<DataType>(_typeId).allocArray(_type.getArraySize()).ref);
+ int bufferId = iRef.bufferId();
+ if (bufferId != prevBufferId) {
+ if (prevBufferId >= 0) {
+ const auto &state = _store.getBufferState(prevBufferId);
+ sizes.push_back(state.capacity());
+ }
+ prevBufferId = bufferId;
+ }
+ }
+ return sizes;
+ }
+ GrowthStats getFirstBufGrowStats() {
+ GrowthStats sizes;
+ int i = 0;
+ int prevBuffer = -1;
+ size_t prevAllocated = _store.getMemoryUsage().allocatedBytes();
+ for (;;) {
+ RefType iRef = _store.template allocator<DataType>(_typeId).alloc().ref;
+ size_t allocated = _store.getMemoryUsage().allocatedBytes();
+ if (allocated != prevAllocated) {
+ sizes.push_back(i);
+ prevAllocated = allocated;
+ }
+ int buffer = iRef.bufferId();
+ if (buffer != prevBuffer) {
+ if (prevBuffer >= 0) {
+ return sizes;
+ }
+ prevBuffer = buffer;
+ }
+ ++i;
+ }
+ }
+ vespalib::MemoryUsage getMemoryUsage() const { return _store.getMemoryUsage(); }
+};
+
+using MyRef = MyStore::RefType;
+
+void
+assertMemStats(const DataStoreBase::MemStats &exp,
+ const DataStoreBase::MemStats &act)
+{
+ EXPECT_EQ(exp._allocElems, act._allocElems);
+ EXPECT_EQ(exp._usedElems, act._usedElems);
+ EXPECT_EQ(exp._deadElems, act._deadElems);
+ EXPECT_EQ(exp._holdElems, act._holdElems);
+ EXPECT_EQ(exp._freeBuffers, act._freeBuffers);
+ EXPECT_EQ(exp._activeBuffers, act._activeBuffers);
+ EXPECT_EQ(exp._holdBuffers, act._holdBuffers);
+}
+
+TEST(DataStoreTest, require_that_entry_ref_is_working)
+{
+ using MyRefType = EntryRefT<22>;
+ EXPECT_EQ(4194304u, MyRefType::offsetSize());
+ EXPECT_EQ(1024u, MyRefType::numBuffers());
+ {
+ MyRefType r(0, 0);
+ EXPECT_EQ(0u, r.offset());
+ EXPECT_EQ(0u, r.bufferId());
+ }
+ {
+ MyRefType r(237, 13);
+ EXPECT_EQ(237u, r.offset());
+ EXPECT_EQ(13u, r.bufferId());
+ }
+ {
+ MyRefType r(4194303, 1023);
+ EXPECT_EQ(4194303u, r.offset());
+ EXPECT_EQ(1023u, r.bufferId());
+ }
+ {
+ MyRefType r1(6498, 76);
+ MyRefType r2(r1);
+ EXPECT_EQ(r1.offset(), r2.offset());
+ EXPECT_EQ(r1.bufferId(), r2.bufferId());
+ }
+}
+
+TEST(DataStoreTest, require_that_aligned_entry_ref_is_working)
+{
+ using MyRefType = AlignedEntryRefT<22, 2>; // 4 byte alignement
+ EXPECT_EQ(4 * 4194304u, MyRefType::offsetSize());
+ EXPECT_EQ(1024u, MyRefType::numBuffers());
+ EXPECT_EQ(0u, MyRefType::align(0));
+ EXPECT_EQ(4u, MyRefType::align(1));
+ EXPECT_EQ(4u, MyRefType::align(2));
+ EXPECT_EQ(4u, MyRefType::align(3));
+ EXPECT_EQ(4u, MyRefType::align(4));
+ EXPECT_EQ(8u, MyRefType::align(5));
+ {
+ MyRefType r(0, 0);
+ EXPECT_EQ(0u, r.offset());
+ EXPECT_EQ(0u, r.bufferId());
+ }
+ {
+ MyRefType r(237, 13);
+ EXPECT_EQ(MyRefType::align(237), r.offset());
+ EXPECT_EQ(13u, r.bufferId());
+ }
+ {
+ MyRefType r(MyRefType::offsetSize() - 4, 1023);
+ EXPECT_EQ(MyRefType::align(MyRefType::offsetSize() - 4), r.offset());
+ EXPECT_EQ(1023u, r.bufferId());
+ }
+}
+
+TEST(DataStoreTest, require_that_entries_can_be_added_and_retrieved)
+{
+ using IntStore = DataStore<int>;
+ IntStore ds;
+ EntryRef r1 = ds.addEntry(10);
+ EntryRef r2 = ds.addEntry(20);
+ EntryRef r3 = ds.addEntry(30);
+ EXPECT_EQ(1u, IntStore::RefType(r1).offset());
+ EXPECT_EQ(2u, IntStore::RefType(r2).offset());
+ EXPECT_EQ(3u, IntStore::RefType(r3).offset());
+ EXPECT_EQ(0u, IntStore::RefType(r1).bufferId());
+ EXPECT_EQ(0u, IntStore::RefType(r2).bufferId());
+ EXPECT_EQ(0u, IntStore::RefType(r3).bufferId());
+ EXPECT_EQ(10, ds.getEntry(r1));
+ EXPECT_EQ(20, ds.getEntry(r2));
+ EXPECT_EQ(30, ds.getEntry(r3));
+}
+
+TEST(DataStoreTest, require_that_add_entry_triggers_change_of_buffer)
+{
+ using Store = DataStore<uint64_t, EntryRefT<10, 10> >;
+ Store s;
+ uint64_t num = 0;
+ uint32_t lastId = 0;
+ uint64_t lastNum = 0;
+ for (;;++num) {
+ EntryRef r = s.addEntry(num);
+ EXPECT_EQ(num, s.getEntry(r));
+ uint32_t bufferId = Store::RefType(r).bufferId();
+ if (bufferId > lastId) {
+ LOG(info, "Changed to bufferId %u after %" PRIu64 " nums", bufferId, num);
+ EXPECT_EQ(Store::RefType::offsetSize() - (lastId == 0), num - lastNum);
+ lastId = bufferId;
+ lastNum = num;
+ }
+ if (bufferId == 2) {
+ break;
+ }
+ }
+ EXPECT_EQ(Store::RefType::offsetSize() * 2 - 1, num);
+ LOG(info, "Added %" PRIu64 " nums in 2 buffers", num);
+}
+
+TEST(DataStoreTest, require_that_we_can_hold_and_trim_buffers)
+{
+ MyStore s;
+ EXPECT_EQ(0u, MyRef(s.addEntry(1)).bufferId());
+ s.switchActiveBuffer();
+ EXPECT_EQ(1u, s.activeBufferId());
+ s.holdBuffer(0); // hold last buffer
+ s.transferHoldLists(10);
+
+ EXPECT_EQ(1u, MyRef(s.addEntry(2)).bufferId());
+ s.switchActiveBuffer();
+ EXPECT_EQ(2u, s.activeBufferId());
+ s.holdBuffer(1); // hold last buffer
+ s.transferHoldLists(20);
+
+ EXPECT_EQ(2u, MyRef(s.addEntry(3)).bufferId());
+ s.switchActiveBuffer();
+ EXPECT_EQ(3u, s.activeBufferId());
+ s.holdBuffer(2); // hold last buffer
+ s.transferHoldLists(30);
+
+ EXPECT_EQ(3u, MyRef(s.addEntry(4)).bufferId());
+ s.holdBuffer(3); // hold current buffer
+ s.transferHoldLists(40);
+
+ EXPECT_TRUE(s.getBufferState(0).size() != 0);
+ EXPECT_TRUE(s.getBufferState(1).size() != 0);
+ EXPECT_TRUE(s.getBufferState(2).size() != 0);
+ EXPECT_TRUE(s.getBufferState(3).size() != 0);
+ s.trimHoldLists(11);
+ EXPECT_TRUE(s.getBufferState(0).size() == 0);
+ EXPECT_TRUE(s.getBufferState(1).size() != 0);
+ EXPECT_TRUE(s.getBufferState(2).size() != 0);
+ EXPECT_TRUE(s.getBufferState(3).size() != 0);
+
+ s.switchActiveBuffer();
+ EXPECT_EQ(0u, s.activeBufferId());
+ EXPECT_EQ(0u, MyRef(s.addEntry(5)).bufferId());
+ s.trimHoldLists(41);
+ EXPECT_TRUE(s.getBufferState(0).size() != 0);
+ EXPECT_TRUE(s.getBufferState(1).size() == 0);
+ EXPECT_TRUE(s.getBufferState(2).size() == 0);
+ EXPECT_TRUE(s.getBufferState(3).size() == 0);
+}
+
+TEST(DataStoreTest, require_that_we_can_hold_and_trim_elements)
+{
+ MyStore s;
+ MyRef r1 = s.addEntry(1);
+ s.holdElem(r1, 1);
+ s.transferHoldLists(10);
+ MyRef r2 = s.addEntry(2);
+ s.holdElem(r2, 1);
+ s.transferHoldLists(20);
+ MyRef r3 = s.addEntry(3);
+ s.holdElem(r3, 1);
+ s.transferHoldLists(30);
+ EXPECT_EQ(1, s.getEntry(r1));
+ EXPECT_EQ(2, s.getEntry(r2));
+ EXPECT_EQ(3, s.getEntry(r3));
+ s.trimElemHoldList(11);
+ EXPECT_EQ(0, s.getEntry(r1));
+ EXPECT_EQ(2, s.getEntry(r2));
+ EXPECT_EQ(3, s.getEntry(r3));
+ s.trimElemHoldList(31);
+ EXPECT_EQ(0, s.getEntry(r1));
+ EXPECT_EQ(0, s.getEntry(r2));
+ EXPECT_EQ(0, s.getEntry(r3));
+}
+
+using IntHandle = Handle<int>;
+
+MyRef
+to_ref(IntHandle handle)
+{
+ return MyRef(handle.ref);
+}
+
+std::ostream&
+operator<<(std::ostream &os, const IntHandle &rhs)
+{
+ MyRef ref(rhs.ref);
+ os << "{ref.bufferId=" << ref.bufferId() << ", ref.offset=" << ref.offset() << ", data=" << rhs.data << "}";
+ return os;
+}
+
+void
+expect_successive_handles(const IntHandle &first, const IntHandle &second)
+{
+ EXPECT_EQ(to_ref(first).offset() + 1, to_ref(second).offset());
+}
+
+TEST(DataStoreTest, require_that_we_can_use_free_lists)
+{
+ MyStore s;
+ s.enableFreeLists();
+ auto allocator = s.freeListAllocator<IntReclaimer>();
+ auto h1 = allocator.alloc(1);
+ s.holdElem(h1.ref, 1);
+ s.transferHoldLists(10);
+ auto h2 = allocator.alloc(2);
+ expect_successive_handles(h1, h2);
+ s.holdElem(h2.ref, 1);
+ s.transferHoldLists(20);
+ s.trimElemHoldList(11);
+ auto h3 = allocator.alloc(3); // reuse h1.ref
+ EXPECT_EQ(h1, h3);
+ auto h4 = allocator.alloc(4);
+ expect_successive_handles(h2, h4);
+ s.trimElemHoldList(21);
+ auto h5 = allocator.alloc(5); // reuse h2.ref
+ EXPECT_EQ(h2, h5);
+ auto h6 = allocator.alloc(6);
+ expect_successive_handles(h4, h6);
+ EXPECT_EQ(3, s.getEntry(h1.ref));
+ EXPECT_EQ(5, s.getEntry(h2.ref));
+ EXPECT_EQ(3, s.getEntry(h3.ref));
+ EXPECT_EQ(4, s.getEntry(h4.ref));
+ EXPECT_EQ(5, s.getEntry(h5.ref));
+ EXPECT_EQ(6, s.getEntry(h6.ref));
+}
+
+TEST(DataStoreTest, require_that_we_can_use_free_lists_with_raw_allocator)
+{
+ GrowStore<int, MyRef> grow_store(3, 64, 64, 64);
+ auto &s = grow_store.store();
+ s.enableFreeLists();
+ auto allocator = s.freeListRawAllocator<int>(grow_store.typeId());
+
+ auto h1 = allocator.alloc(3);
+ auto h2 = allocator.alloc(3);
+ expect_successive_handles(h1, h2);
+ s.holdElem(h1.ref, 3);
+ s.holdElem(h2.ref, 3);
+ s.transferHoldLists(10);
+ s.trimElemHoldList(11);
+
+ auto h3 = allocator.alloc(3); // reuse h2.ref from free list
+ EXPECT_EQ(h2, h3);
+
+ auto h4 = allocator.alloc(3); // reuse h1.ref from free list
+ EXPECT_EQ(h1, h4);
+
+ auto h5 = allocator.alloc(3);
+ expect_successive_handles(h2, h5);
+ expect_successive_handles(h3, h5);
+}
+
+TEST(DataStoreTest, require_that_memory_stats_are_calculated)
+{
+ MyStore s;
+ DataStoreBase::MemStats m;
+ m._allocElems = MyRef::offsetSize();
+ m._usedElems = 1; // ref = 0 is reserved
+ m._deadElems = 1; // ref = 0 is reserved
+ m._holdElems = 0;
+ m._activeBuffers = 1;
+ m._freeBuffers = MyRef::numBuffers() - 1;
+ m._holdBuffers = 0;
+ assertMemStats(m, s.getMemStats());
+
+ // add entry
+ MyRef r = s.addEntry(10);
+ m._usedElems++;
+ assertMemStats(m, s.getMemStats());
+
+ // inc dead
+ s.incDead(r, 1);
+ m._deadElems++;
+ assertMemStats(m, s.getMemStats());
+
+ // hold buffer
+ s.addEntry(20);
+ s.addEntry(30);
+ s.holdBuffer(r.bufferId());
+ s.transferHoldLists(100);
+ m._usedElems += 2;
+ m._holdElems += 2; // used - dead
+ m._activeBuffers--;
+ m._holdBuffers++;
+ assertMemStats(m, s.getMemStats());
+
+ // new active buffer
+ s.switchActiveBuffer();
+ s.addEntry(40);
+ m._allocElems += MyRef::offsetSize();
+ m._usedElems++;
+ m._activeBuffers++;
+ m._freeBuffers--;
+
+ // trim hold buffer
+ s.trimHoldLists(101);
+ m._allocElems -= MyRef::offsetSize();
+ m._usedElems = 1;
+ m._deadElems = 0;
+ m._holdElems = 0;
+ m._freeBuffers = MyRef::numBuffers() - 1;
+ m._holdBuffers = 0;
+ assertMemStats(m, s.getMemStats());
+}
+
+TEST(DataStoreTest, require_that_memory_usage_is_calculated)
+{
+ MyStore s;
+ MyRef r = s.addEntry(10);
+ s.addEntry(20);
+ s.addEntry(30);
+ s.addEntry(40);
+ s.incDead(r, 1);
+ s.holdBuffer(r.bufferId());
+ s.transferHoldLists(100);
+ vespalib::MemoryUsage m = s.getMemoryUsage();
+ EXPECT_EQ(MyRef::offsetSize() * sizeof(int), m.allocatedBytes());
+ EXPECT_EQ(5 * sizeof(int), m.usedBytes());
+ EXPECT_EQ(2 * sizeof(int), m.deadBytes());
+ EXPECT_EQ(3 * sizeof(int), m.allocatedBytesOnHold());
+ s.trimHoldLists(101);
+}
+
+TEST(DataStoreTest, require_that_we_can_disable_elemement_hold_list)
+{
+ MyStore s;
+ MyRef r1 = s.addEntry(10);
+ MyRef r2 = s.addEntry(20);
+ MyRef r3 = s.addEntry(30);
+ (void) r3;
+ vespalib::MemoryUsage m = s.getMemoryUsage();
+ EXPECT_EQ(MyRef::offsetSize() * sizeof(int), m.allocatedBytes());
+ EXPECT_EQ(4 * sizeof(int), m.usedBytes());
+ EXPECT_EQ(1 * sizeof(int), m.deadBytes());
+ EXPECT_EQ(0 * sizeof(int), m.allocatedBytesOnHold());
+ s.holdElem(r1, 1);
+ m = s.getMemoryUsage();
+ EXPECT_EQ(MyRef::offsetSize() * sizeof(int), m.allocatedBytes());
+ EXPECT_EQ(4 * sizeof(int), m.usedBytes());
+ EXPECT_EQ(1 * sizeof(int), m.deadBytes());
+ EXPECT_EQ(1 * sizeof(int), m.allocatedBytesOnHold());
+ s.disableElemHoldList();
+ s.holdElem(r2, 1);
+ m = s.getMemoryUsage();
+ EXPECT_EQ(MyRef::offsetSize() * sizeof(int), m.allocatedBytes());
+ EXPECT_EQ(4 * sizeof(int), m.usedBytes());
+ EXPECT_EQ(2 * sizeof(int), m.deadBytes());
+ EXPECT_EQ(1 * sizeof(int), m.allocatedBytesOnHold());
+ s.transferHoldLists(100);
+ s.trimHoldLists(101);
+}
+
+using IntGrowStore = GrowStore<int, EntryRefT<24>>;
+
+namespace {
+
+void assertGrowStats(GrowthStats expSizes,
+ GrowthStats expFirstBufSizes,
+ size_t expInitMemUsage,
+ size_t minArrays, size_t numArraysForNewBuffer, size_t maxArrays = 128)
+{
+ EXPECT_EQ(expSizes, IntGrowStore(1, minArrays, maxArrays, numArraysForNewBuffer).getGrowthStats(expSizes.size()));
+ EXPECT_EQ(expFirstBufSizes, IntGrowStore(1, minArrays, maxArrays, numArraysForNewBuffer).getFirstBufGrowStats());
+ EXPECT_EQ(expInitMemUsage, IntGrowStore(1, minArrays, maxArrays, numArraysForNewBuffer).getMemoryUsage().allocatedBytes());
+}
+
+}
+
+TEST(DataStoreTest, require_that_buffer_growth_works)
+{
+ // Always switch to new buffer, min size 4
+ assertGrowStats({ 4, 4, 4, 4, 8, 16, 16, 32, 64, 64 },
+ { 4 }, 20, 4, 0);
+ // Resize if buffer size is less than 4, min size 0
+ assertGrowStats({ 4, 4, 4, 4, 8, 16, 16, 32, 64, 64 },
+ { 0, 1, 2, 4 }, 4, 0, 4);
+ // Always switch to new buffer, min size 16
+ assertGrowStats({ 16, 16, 16, 32, 32, 64, 128, 128, 128 },
+ { 16 }, 68, 16, 0);
+ // Resize if buffer size is less than 16, min size 0
+ assertGrowStats({ 16, 16, 16, 32, 32, 64, 128, 128, 128 },
+ { 0, 1, 2, 4, 8, 16 }, 4, 0, 16);
+ // Resize if buffer size is less than 16, min size 4
+ assertGrowStats({ 16, 16, 16, 32, 32, 64, 128, 128, 128 },
+ { 4, 8, 16 }, 20, 4, 16);
+ // Always switch to new buffer, min size 0
+ assertGrowStats({ 1, 1, 1, 1, 1, 2, 2, 4, 8, 8, 16, 32 },
+ { 0, 1 }, 4, 0, 0);
+
+ // Buffers with sizes larger than the huge page size of the mmap allocator.
+ ASSERT_EQ(524288u, HUGE_PAGE_ARRAY_SIZE);
+ assertGrowStats({ 262144, 262144, 262144, 524288, 524288, 524288 * 2, 524288 * 3, 524288 * 4, 524288 * 5, 524288 * 5 },
+ { 0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144 },
+ 4, 0, HUGE_PAGE_ARRAY_SIZE / 2, HUGE_PAGE_ARRAY_SIZE * 5);
+}
+
+using RefType15 = EntryRefT<15>; // offsetSize=32768
+
+namespace {
+
+template <typename DataType>
+void assertGrowStats(GrowthStats expSizes, uint32_t arraySize)
+{
+ uint32_t minArrays = 2048;
+ uint32_t maxArrays = RefType15::offsetSize();
+ uint32_t numArraysForNewBuffer = 2048;
+ GrowStore<DataType, RefType15> store(arraySize, minArrays, maxArrays, numArraysForNewBuffer);
+ EXPECT_EQ(expSizes, store.getGrowthStats(expSizes.size()));
+}
+
+}
+
+TEST(DataStoreTest, require_that_offset_in_EntryRefT_is_within_bounds_when_allocating_memory_buffers_where_wanted_number_of_bytes_is_not_a_power_of_2_and_less_than_huge_page_size)
+{
+ /*
+ * When allocating new memory buffers for the data store the following happens (ref. calcAllocation() in bufferstate.cpp):
+ * 1) Calculate how many arrays to alloc.
+ * In this case we alloc a minimum of 2048 and a maximum of 32768.
+ * 2) Calculate how many bytes to alloc: arraysToAlloc * arraySize * elementSize.
+ * In this case elementSize is (1 or 4) and arraySize varies (3, 5, 7).
+ * 3) Round up bytes to alloc to match the underlying allocator (power of 2 if less than huge page size):
+ * After this we might end up with more bytes than the offset in EntryRef can handle. In this case this is 32768.
+ * 4) Cap bytes to alloc to the max offset EntryRef can handle.
+ * The max bytes to alloc is: maxArrays * arraySize * elementSize.
+ */
+ assertGrowStats<uint8_t>({8192,8192,8192,16384,16384,32768,65536,65536,98304,98304,98304,98304}, 3);
+ assertGrowStats<uint8_t>({16384,16384,16384,32768,32768,65536,131072,131072,163840,163840,163840,163840}, 5);
+ assertGrowStats<uint8_t>({16384,16384,16384,32768,32768,65536,131072,131072,229376,229376,229376,229376}, 7);
+ assertGrowStats<uint32_t>({8192,8192,8192,16384,16384,32768,65536,65536,98304,98304,98304,98304}, 3);
+ assertGrowStats<uint32_t>({16384,16384,16384,32768,32768,65536,131072,131072,163840,163840,163840,163840}, 5);
+ assertGrowStats<uint32_t>({16384,16384,16384,32768,32768,65536,131072,131072,229376,229376,229376,229376}, 7);
+}
+
+}
+
+GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/vespalib/src/tests/datastore/unique_store/CMakeLists.txt b/vespalib/src/tests/datastore/unique_store/CMakeLists.txt
new file mode 100644
index 00000000000..dd200018448
--- /dev/null
+++ b/vespalib/src/tests/datastore/unique_store/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(vespalib_unique_store_test_app TEST
+ SOURCES
+ unique_store_test.cpp
+ DEPENDS
+ vespalib
+)
+vespa_add_test(NAME vespalib_unique_store_test_app COMMAND vespalib_unique_store_test_app)
diff --git a/vespalib/src/tests/datastore/unique_store/unique_store_test.cpp b/vespalib/src/tests/datastore/unique_store/unique_store_test.cpp
new file mode 100644
index 00000000000..ec3a3040167
--- /dev/null
+++ b/vespalib/src/tests/datastore/unique_store/unique_store_test.cpp
@@ -0,0 +1,267 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/log/log.h>
+LOG_SETUP("unique_store_test");
+#include <vespa/vespalib/datastore/unique_store.hpp>
+#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/vespalib/test/datastore/memstats.h>
+#include <vespa/vespalib/test/insertion_operators.h>
+#include <vespa/vespalib/util/traits.h>
+#include <vector>
+
+using namespace search::datastore;
+using vespalib::MemoryUsage;
+using vespalib::ArrayRef;
+using generation_t = vespalib::GenerationHandler::generation_t;
+using MemStats = search::datastore::test::MemStats;
+
+template <typename EntryT, typename RefT = EntryRefT<22> >
+struct Fixture
+{
+ using EntryRefType = RefT;
+ using UniqueStoreType = UniqueStore<EntryT, RefT>;
+ using UniqueStoreAddResult = typename UniqueStoreType::AddResult;
+ using value_type = EntryT;
+ using ReferenceStore = std::map<EntryRef, std::pair<EntryT,uint32_t>>;
+
+ UniqueStoreType store;
+ ReferenceStore refStore;
+ generation_t generation;
+ Fixture()
+ : store(),
+ refStore(),
+ generation(1)
+ {}
+ void assertAdd(const EntryT &input) {
+ EntryRef ref = add(input);
+ assertGet(ref, input);
+ }
+ EntryRef add(const EntryT &input) {
+ UniqueStoreAddResult addResult = store.add(input);
+ EntryRef result = addResult.ref();
+ auto insres = refStore.insert(std::make_pair(result, std::make_pair(input, 1u)));
+ EXPECT_EQUAL(insres.second, addResult.inserted());
+ if (!insres.second) {
+ ++insres.first->second.second;
+ }
+ return result;
+ }
+ void alignRefStore(EntryRef ref, const EntryT &input, uint32_t refcnt) {
+ if (refcnt > 0) {
+ auto insres = refStore.insert(std::make_pair(ref, std::make_pair(input, refcnt)));
+ if (!insres.second) {
+ insres.first->second.second = refcnt;
+ }
+ } else {
+ refStore.erase(ref);
+ }
+ }
+ void assertGet(EntryRef ref, const EntryT &exp) const {
+ EntryT act = store.get(ref);
+ EXPECT_EQUAL(exp, act);
+ }
+ void remove(EntryRef ref) {
+ ASSERT_EQUAL(1u, refStore.count(ref));
+ store.remove(ref);
+ if (refStore[ref].second > 1) {
+ --refStore[ref].second;
+ } else {
+ refStore.erase(ref);
+ }
+ }
+ void remove(const EntryT &input) {
+ remove(getEntryRef(input));
+ }
+ uint32_t getBufferId(EntryRef ref) const {
+ return EntryRefType(ref).bufferId();
+ }
+ void assertBufferState(EntryRef ref, const MemStats expStats) const {
+ EXPECT_EQUAL(expStats._used, store.bufferState(ref).size());
+ EXPECT_EQUAL(expStats._hold, store.bufferState(ref).getHoldElems());
+ EXPECT_EQUAL(expStats._dead, store.bufferState(ref).getDeadElems());
+ }
+ void assertMemoryUsage(const MemStats expStats) const {
+ MemoryUsage act = store.getMemoryUsage();
+ EXPECT_EQUAL(expStats._used, act.usedBytes());
+ EXPECT_EQUAL(expStats._hold, act.allocatedBytesOnHold());
+ EXPECT_EQUAL(expStats._dead, act.deadBytes());
+ }
+ void assertStoreContent() const {
+ for (const auto &elem : refStore) {
+ TEST_DO(assertGet(elem.first, elem.second.first));
+ }
+ }
+ EntryRef getEntryRef(const EntryT &input) {
+ for (const auto &elem : refStore) {
+ if (elem.second.first == input) {
+ return elem.first;
+ }
+ }
+ return EntryRef();
+ }
+ void trimHoldLists() {
+ store.freeze();
+ store.transferHoldLists(generation++);
+ store.trimHoldLists(generation);
+ }
+ void compactWorst() {
+ ICompactionContext::UP ctx = store.compactWorst();
+ std::vector<EntryRef> refs;
+ for (const auto &elem : refStore) {
+ refs.push_back(elem.first);
+ }
+ refs.push_back(EntryRef());
+ std::vector<EntryRef> compactedRefs = refs;
+ ctx->compact(ArrayRef<EntryRef>(compactedRefs));
+ ASSERT_FALSE(refs.back().valid());
+ refs.pop_back();
+ ReferenceStore compactedRefStore;
+ for (size_t i = 0; i < refs.size(); ++i) {
+ ASSERT_EQUAL(0u, compactedRefStore.count(compactedRefs[i]));
+ ASSERT_EQUAL(1u, refStore.count(refs[i]));
+ compactedRefStore.insert(std::make_pair(compactedRefs[i], refStore[refs[i]]));
+ }
+ refStore = compactedRefStore;
+ }
+ size_t entrySize() const { return sizeof(EntryT); }
+ auto getBuilder(uint32_t uniqueValuesHint) { return store.getBuilder(uniqueValuesHint); }
+ auto getSaver() { return store.getSaver(); }
+};
+
+using NumberFixture = Fixture<uint32_t>;
+using StringFixture = Fixture<std::string>;
+using SmallOffsetNumberFixture = Fixture<uint32_t, EntryRefT<10>>;
+
+TEST("require that we test with trivial and non-trivial types")
+{
+ EXPECT_TRUE(vespalib::can_skip_destruction<NumberFixture::value_type>::value);
+ EXPECT_FALSE(vespalib::can_skip_destruction<StringFixture::value_type>::value);
+}
+
+TEST_F("require that we can add and get values of trivial type", NumberFixture)
+{
+ TEST_DO(f.assertAdd(1));
+ TEST_DO(f.assertAdd(2));
+ TEST_DO(f.assertAdd(3));
+ TEST_DO(f.assertAdd(1));
+}
+
+TEST_F("require that we can add and get values of non-trivial type", StringFixture)
+{
+ TEST_DO(f.assertAdd("aa"));
+ TEST_DO(f.assertAdd("bbb"));
+ TEST_DO(f.assertAdd("ccc"));
+ TEST_DO(f.assertAdd("aa"));
+}
+
+TEST_F("require that elements are put on hold when value is removed", NumberFixture)
+{
+ EntryRef ref = f.add(1);
+ // Note: The first buffer have the first element reserved -> we expect 2 elements used here.
+ TEST_DO(f.assertBufferState(ref, MemStats().used(2).hold(0).dead(1)));
+ f.store.remove(ref);
+ TEST_DO(f.assertBufferState(ref, MemStats().used(2).hold(1).dead(1)));
+}
+
+TEST_F("require that elements are reference counted", NumberFixture)
+{
+ EntryRef ref = f.add(1);
+ EntryRef ref2 = f.add(1);
+ EXPECT_EQUAL(ref.ref(), ref2.ref());
+ // Note: The first buffer have the first element reserved -> we expect 2 elements used here.
+ TEST_DO(f.assertBufferState(ref, MemStats().used(2).hold(0).dead(1)));
+ f.store.remove(ref);
+ TEST_DO(f.assertBufferState(ref, MemStats().used(2).hold(0).dead(1)));
+ f.store.remove(ref);
+ TEST_DO(f.assertBufferState(ref, MemStats().used(2).hold(1).dead(1)));
+}
+
+TEST_F("require that new underlying buffer is allocated when current is full", SmallOffsetNumberFixture)
+{
+ uint32_t firstBufferId = f.getBufferId(f.add(1));
+ for (uint32_t i = 0; i < (F1::EntryRefType::offsetSize() - 2); ++i) {
+ uint32_t bufferId = f.getBufferId(f.add(i + 2));
+ EXPECT_EQUAL(firstBufferId, bufferId);
+ }
+ TEST_DO(f.assertStoreContent());
+
+ uint32_t bias = F1::EntryRefType::offsetSize();
+ uint32_t secondBufferId = f.getBufferId(f.add(bias + 1));
+ EXPECT_NOT_EQUAL(firstBufferId, secondBufferId);
+ for (uint32_t i = 0; i < 10u; ++i) {
+ uint32_t bufferId = f.getBufferId(f.add(bias + i + 2));
+ EXPECT_EQUAL(secondBufferId, bufferId);
+ }
+ TEST_DO(f.assertStoreContent());
+}
+
+TEST_F("require that compaction works", NumberFixture)
+{
+ EntryRef val1Ref = f.add(1);
+ EntryRef val2Ref = f.add(2);
+ f.remove(f.add(4));
+ f.trimHoldLists();
+ TEST_DO(f.assertBufferState(val1Ref, MemStats().used(4).dead(2))); // Note: First element is reserved
+ uint32_t val1BufferId = f.getBufferId(val1Ref);
+
+ EXPECT_EQUAL(2u, f.refStore.size());
+ f.compactWorst();
+ EXPECT_EQUAL(2u, f.refStore.size());
+ TEST_DO(f.assertStoreContent());
+
+ // Buffer has been compacted
+ EXPECT_NOT_EQUAL(val1BufferId, f.getBufferId(f.getEntryRef(1)));
+ // Old ref should still point to data.
+ f.assertGet(val1Ref, 1);
+ f.assertGet(val2Ref, 2);
+ EXPECT_TRUE(f.store.bufferState(val1Ref).isOnHold());
+ f.trimHoldLists();
+ EXPECT_TRUE(f.store.bufferState(val1Ref).isFree());
+ TEST_DO(f.assertStoreContent());
+}
+
+TEST_F("require that builder works", NumberFixture)
+{
+ auto builder = f.getBuilder(2);
+ builder.add(10);
+ builder.add(20);
+ builder.setupRefCounts();
+ EntryRef val10Ref = builder.mapEnumValueToEntryRef(1);
+ EntryRef val20Ref = builder.mapEnumValueToEntryRef(2);
+ TEST_DO(f.assertBufferState(val10Ref, MemStats().used(3).dead(1))); // Note: First element is reserved
+ EXPECT_TRUE(val10Ref.valid());
+ EXPECT_TRUE(val20Ref.valid());
+ EXPECT_NOT_EQUAL(val10Ref.ref(), val20Ref.ref());
+ f.assertGet(val10Ref, 10);
+ f.assertGet(val20Ref, 20);
+ builder.makeDictionary();
+ // Align refstore with the two entries added by builder.
+ f.alignRefStore(val10Ref, 10, 1);
+ f.alignRefStore(val20Ref, 20, 1);
+ EXPECT_EQUAL(val10Ref.ref(), f.add(10).ref());
+ EXPECT_EQUAL(val20Ref.ref(), f.add(20).ref());
+}
+
+TEST_F("require that saver works", NumberFixture)
+{
+ EntryRef val10Ref = f.add(10);
+ EntryRef val20Ref = f.add(20);
+ f.remove(f.add(40));
+ f.trimHoldLists();
+
+ auto saver = f.getSaver();
+ std::vector<uint32_t> refs;
+ saver.foreach_key([&](EntryRef ref) { refs.push_back(ref.ref()); });
+ std::vector<uint32_t> expRefs;
+ expRefs.push_back(val10Ref.ref());
+ expRefs.push_back(val20Ref.ref());
+ EXPECT_EQUAL(expRefs, refs);
+ saver.enumerateValues();
+ uint32_t invalidEnum = saver.mapEntryRefToEnumValue(EntryRef());
+ uint32_t enumValue10 = saver.mapEntryRefToEnumValue(val10Ref);
+ uint32_t enumValue20 = saver.mapEntryRefToEnumValue(val20Ref);
+ EXPECT_EQUAL(0u, invalidEnum);
+ EXPECT_EQUAL(1u, enumValue10);
+ EXPECT_EQUAL(2u, enumValue20);
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }