aboutsummaryrefslogtreecommitdiffstats
path: root/storage/src/tests
diff options
context:
space:
mode:
authorJon Bratseth <bratseth@yahoo-inc.com>2016-06-15 23:09:44 +0200
committerJon Bratseth <bratseth@yahoo-inc.com>2016-06-15 23:09:44 +0200
commit72231250ed81e10d66bfe70701e64fa5fe50f712 (patch)
tree2728bba1131a6f6e5bdf95afec7d7ff9358dac50 /storage/src/tests
Publish
Diffstat (limited to 'storage/src/tests')
-rw-r--r--storage/src/tests/.gitignore22
-rw-r--r--storage/src/tests/CMakeLists.txt22
-rw-r--r--storage/src/tests/bucketdb/.gitignore12
-rw-r--r--storage/src/tests/bucketdb/CMakeLists.txt14
-rw-r--r--storage/src/tests/bucketdb/bucketinfotest.cpp201
-rw-r--r--storage/src/tests/bucketdb/bucketmanagertest.cpp1323
-rw-r--r--storage/src/tests/bucketdb/distribution_hash_normalizer_test.cpp114
-rw-r--r--storage/src/tests/bucketdb/initializertest.cpp924
-rw-r--r--storage/src/tests/bucketdb/judyarraytest.cpp287
-rw-r--r--storage/src/tests/bucketdb/judymultimaptest.cpp172
-rw-r--r--storage/src/tests/bucketdb/lockablemaptest.cpp1262
-rw-r--r--storage/src/tests/bucketmover/CMakeLists.txt9
-rw-r--r--storage/src/tests/bucketmover/bucketmovertest.cpp190
-rw-r--r--storage/src/tests/bucketmover/htmltabletest.cpp100
-rw-r--r--storage/src/tests/common/.gitignore8
-rw-r--r--storage/src/tests/common/CMakeLists.txt12
-rw-r--r--storage/src/tests/common/dummystoragelink.cpp191
-rw-r--r--storage/src/tests/common/dummystoragelink.h121
-rw-r--r--storage/src/tests/common/hostreporter/CMakeLists.txt14
-rw-r--r--storage/src/tests/common/hostreporter/cpureportertest.cpp40
-rw-r--r--storage/src/tests/common/hostreporter/diskreportertest.cpp33
-rw-r--r--storage/src/tests/common/hostreporter/hostinfotest.cpp60
-rw-r--r--storage/src/tests/common/hostreporter/memreportertest.cpp44
-rw-r--r--storage/src/tests/common/hostreporter/networkreportertest.cpp40
-rw-r--r--storage/src/tests/common/hostreporter/util.cpp34
-rw-r--r--storage/src/tests/common/hostreporter/util.h16
-rw-r--r--storage/src/tests/common/hostreporter/versionreportertest.cpp39
-rw-r--r--storage/src/tests/common/metricstest.cpp393
-rw-r--r--storage/src/tests/common/storagelinktest.cpp57
-rw-r--r--storage/src/tests/common/storagelinktest.h46
-rw-r--r--storage/src/tests/common/testhelper.cpp209
-rw-r--r--storage/src/tests/common/testhelper.h58
-rw-r--r--storage/src/tests/common/testnodestateupdater.h50
-rw-r--r--storage/src/tests/common/teststorageapp.cpp292
-rw-r--r--storage/src/tests/common/teststorageapp.h161
-rw-r--r--storage/src/tests/config-doctypes.cfg158
-rw-r--r--storage/src/tests/config-document.cfg78
-rw-r--r--storage/src/tests/config-testdocman-document.cfg138
-rw-r--r--storage/src/tests/distributor/.gitignore8
-rw-r--r--storage/src/tests/distributor/CMakeLists.txt44
-rw-r--r--storage/src/tests/distributor/blockingoperationstartertest.cpp78
-rw-r--r--storage/src/tests/distributor/bucketdatabasetest.cpp550
-rw-r--r--storage/src/tests/distributor/bucketdatabasetest.h63
-rw-r--r--storage/src/tests/distributor/bucketdbmetricupdatertest.cpp361
-rw-r--r--storage/src/tests/distributor/bucketdbupdatertest.cpp2296
-rw-r--r--storage/src/tests/distributor/bucketgctimecalculatortest.cpp114
-rw-r--r--storage/src/tests/distributor/bucketstateoperationtest.cpp251
-rw-r--r--storage/src/tests/distributor/distributor_host_info_reporter_test.cpp225
-rw-r--r--storage/src/tests/distributor/distributortest.cpp691
-rw-r--r--storage/src/tests/distributor/distributortestutil.cpp298
-rw-r--r--storage/src/tests/distributor/distributortestutil.h200
-rw-r--r--storage/src/tests/distributor/externaloperationhandlertest.cpp176
-rw-r--r--storage/src/tests/distributor/garbagecollectiontest.cpp77
-rw-r--r--storage/src/tests/distributor/getoperationtest.cpp567
-rw-r--r--storage/src/tests/distributor/idealstatemanagertest.cpp268
-rw-r--r--storage/src/tests/distributor/joinbuckettest.cpp127
-rw-r--r--storage/src/tests/distributor/maintenancemocks.h123
-rw-r--r--storage/src/tests/distributor/maintenanceschedulertest.cpp108
-rw-r--r--storage/src/tests/distributor/mapbucketdatabasetest.cpp26
-rw-r--r--storage/src/tests/distributor/mergelimitertest.cpp161
-rw-r--r--storage/src/tests/distributor/mergeoperationtest.cpp430
-rw-r--r--storage/src/tests/distributor/messagesenderstub.cpp88
-rw-r--r--storage/src/tests/distributor/messagesenderstub.h71
-rw-r--r--storage/src/tests/distributor/nodeinfotest.cpp83
-rw-r--r--storage/src/tests/distributor/nodemaintenancestatstrackertest.cpp102
-rw-r--r--storage/src/tests/distributor/operationtargetresolvertest.cpp316
-rw-r--r--storage/src/tests/distributor/pendingmessagetrackertest.cpp674
-rw-r--r--storage/src/tests/distributor/putoperationtest.cpp704
-rw-r--r--storage/src/tests/distributor/removebucketoperationtest.cpp150
-rw-r--r--storage/src/tests/distributor/removelocationtest.cpp84
-rw-r--r--storage/src/tests/distributor/removeoperationtest.cpp203
-rw-r--r--storage/src/tests/distributor/simplebucketprioritydatabasetest.cpp143
-rw-r--r--storage/src/tests/distributor/simplemaintenancescannertest.cpp220
-rw-r--r--storage/src/tests/distributor/splitbuckettest.cpp353
-rw-r--r--storage/src/tests/distributor/statecheckerstest.cpp1838
-rw-r--r--storage/src/tests/distributor/statoperationtest.cpp115
-rw-r--r--storage/src/tests/distributor/statusreporterdelegatetest.cpp87
-rw-r--r--storage/src/tests/distributor/throttlingoperationstartertest.cpp142
-rw-r--r--storage/src/tests/distributor/twophaseupdateoperationtest.cpp1194
-rw-r--r--storage/src/tests/distributor/updateoperationtest.cpp210
-rw-r--r--storage/src/tests/distributor/visitoroperationtest.cpp1646
-rw-r--r--storage/src/tests/fastos.project.newcore80
-rw-r--r--storage/src/tests/frameworkimpl/memory/CMakeLists.txt8
-rw-r--r--storage/src/tests/frameworkimpl/memory/memorystatusviewertest.cpp168
-rw-r--r--storage/src/tests/frameworkimpl/status/CMakeLists.txt8
-rw-r--r--storage/src/tests/frameworkimpl/status/statustest.cpp222
-rw-r--r--storage/src/tests/persistence/.gitignore12
-rw-r--r--storage/src/tests/persistence/CMakeLists.txt19
-rw-r--r--storage/src/tests/persistence/bucketownershipnotifiertest.cpp162
-rw-r--r--storage/src/tests/persistence/diskmoveoperationhandlertest.cpp57
-rw-r--r--storage/src/tests/persistence/filestorage/.gitignore13
-rw-r--r--storage/src/tests/persistence/filestorage/CMakeLists.txt17
-rw-r--r--storage/src/tests/persistence/filestorage/deactivatebucketstest.cpp66
-rw-r--r--storage/src/tests/persistence/filestorage/deletebuckettest.cpp63
-rw-r--r--storage/src/tests/persistence/filestorage/filestormanagertest.cpp3150
-rw-r--r--storage/src/tests/persistence/filestorage/filestormodifiedbucketstest.cpp142
-rw-r--r--storage/src/tests/persistence/filestorage/filestortestfixture.cpp143
-rw-r--r--storage/src/tests/persistence/filestorage/filestortestfixture.h112
-rw-r--r--storage/src/tests/persistence/filestorage/forwardingmessagesender.h26
-rw-r--r--storage/src/tests/persistence/filestorage/mergeblockingtest.cpp239
-rw-r--r--storage/src/tests/persistence/filestorage/modifiedbucketcheckertest.cpp214
-rw-r--r--storage/src/tests/persistence/filestorage/operationabortingtest.cpp470
-rw-r--r--storage/src/tests/persistence/filestorage/sanitycheckeddeletetest.cpp78
-rw-r--r--storage/src/tests/persistence/filestorage/singlebucketjointest.cpp51
-rw-r--r--storage/src/tests/persistence/legacyoperationhandlertest.cpp190
-rw-r--r--storage/src/tests/persistence/mergehandlertest.cpp1494
-rw-r--r--storage/src/tests/persistence/persistenceproviderwrapper.cpp222
-rw-r--r--storage/src/tests/persistence/persistenceproviderwrapper.h153
-rw-r--r--storage/src/tests/persistence/persistencequeuetest.cpp103
-rw-r--r--storage/src/tests/persistence/persistencetestutils.cpp412
-rw-r--r--storage/src/tests/persistence/persistencetestutils.h214
-rw-r--r--storage/src/tests/persistence/persistencethread_splittest.cpp234
-rw-r--r--storage/src/tests/persistence/processalltest.cpp262
-rw-r--r--storage/src/tests/persistence/providershutdownwrappertest.cpp87
-rw-r--r--storage/src/tests/persistence/splitbitdetectortest.cpp363
-rw-r--r--storage/src/tests/persistence/testandsettest.cpp331
-rwxr-xr-xstorage/src/tests/pstack_testrunner14
-rw-r--r--storage/src/tests/serverapp/.gitignore8
-rw-r--r--storage/src/tests/storageserver/.gitignore13
-rw-r--r--storage/src/tests/storageserver/CMakeLists.txt17
-rw-r--r--storage/src/tests/storageserver/bouncertest.cpp285
-rw-r--r--storage/src/tests/storageserver/bucketintegritycheckertest.cpp302
-rw-r--r--storage/src/tests/storageserver/changedbucketownershiphandlertest.cpp648
-rw-r--r--storage/src/tests/storageserver/communicationmanagertest.cpp235
-rw-r--r--storage/src/tests/storageserver/documentapiconvertertest.cpp529
-rw-r--r--storage/src/tests/storageserver/dummystoragelink.cpp182
-rw-r--r--storage/src/tests/storageserver/dummystoragelink.h115
-rw-r--r--storage/src/tests/storageserver/mergethrottlertest.cpp1566
-rw-r--r--storage/src/tests/storageserver/priorityconvertertest.cpp104
-rw-r--r--storage/src/tests/storageserver/statemanagertest.cpp264
-rw-r--r--storage/src/tests/storageserver/statereportertest.cpp279
-rw-r--r--storage/src/tests/storageserver/testvisitormessagesession.cpp78
-rw-r--r--storage/src/tests/storageserver/testvisitormessagesession.h79
-rw-r--r--storage/src/tests/storageutil/.gitignore13
-rw-r--r--storage/src/tests/storageutil/CMakeLists.txt10
-rw-r--r--storage/src/tests/storageutil/charttest.cpp66
-rw-r--r--storage/src/tests/storageutil/functortest.cpp55
-rw-r--r--storage/src/tests/storageutil/palettetest.cpp33
-rw-r--r--storage/src/tests/storageutil/recordflatfiletest.cpp314
-rw-r--r--storage/src/tests/subscriptions/.gitignore8
-rw-r--r--storage/src/tests/systemtests/.gitignore0
-rw-r--r--storage/src/tests/testhelper.cpp175
-rw-r--r--storage/src/tests/testhelper.h58
-rw-r--r--storage/src/tests/testrunner.cpp15
-rw-r--r--storage/src/tests/visiting/.gitignore12
-rw-r--r--storage/src/tests/visiting/CMakeLists.txt11
-rw-r--r--storage/src/tests/visiting/commandqueuetest.cpp223
-rw-r--r--storage/src/tests/visiting/memory_bounded_trace_test.cpp131
-rw-r--r--storage/src/tests/visiting/visitormanagertest.cpp1172
-rw-r--r--storage/src/tests/visiting/visitortest.cpp1023
150 files changed, 40231 insertions, 0 deletions
diff --git a/storage/src/tests/.gitignore b/storage/src/tests/.gitignore
new file mode 100644
index 00000000000..9023e5da3b4
--- /dev/null
+++ b/storage/src/tests/.gitignore
@@ -0,0 +1,22 @@
+*.o
+*.lo
+.depend.NEW
+.depend
+.deps
+.libs
+.config.log
+Makefile
+testrunner
+vdsroot
+*.core
+state
+*.So
+test.vlog
+dirconfig.tmp
+.*.swp
+metricsreport.html
+piefile.html
+piefile-customcols.html
+palette.html
+use_new_storage_core
+storage_testrunner_app
diff --git a/storage/src/tests/CMakeLists.txt b/storage/src/tests/CMakeLists.txt
new file mode 100644
index 00000000000..894ea7b4d25
--- /dev/null
+++ b/storage/src/tests/CMakeLists.txt
@@ -0,0 +1,22 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(storage_testrunner_app
+ SOURCES
+ testrunner.cpp
+ DEPENDS
+ storage_teststorageserver
+ storage_testbucketmover
+ storage_teststorageutil
+ storage_testvisiting
+ storage_testbucketdb
+ storage_testcommon
+ storage_testhostreporter
+ storage_testdistributor
+ storage_testpersistence
+ storage_testfilestorage
+ storage_testmemory
+ storage_teststatus
+ storage
+ AFTER
+ storage_storageconfig
+)
+vespa_add_test(NAME storage_testrunner_app COMMAND storage_testrunner_app)
diff --git a/storage/src/tests/bucketdb/.gitignore b/storage/src/tests/bucketdb/.gitignore
new file mode 100644
index 00000000000..4e71c44a596
--- /dev/null
+++ b/storage/src/tests/bucketdb/.gitignore
@@ -0,0 +1,12 @@
+*.So
+*.core
+*.lo
+*.o
+.*.swp
+.config.log
+.depend
+.depend.NEW
+.deps
+.libs
+Makefile
+testrunner
diff --git a/storage/src/tests/bucketdb/CMakeLists.txt b/storage/src/tests/bucketdb/CMakeLists.txt
new file mode 100644
index 00000000000..95228966589
--- /dev/null
+++ b/storage/src/tests/bucketdb/CMakeLists.txt
@@ -0,0 +1,14 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_library(storage_testbucketdb
+ SOURCES
+ initializertest.cpp
+ bucketmanagertest.cpp
+ judyarraytest.cpp
+ judymultimaptest.cpp
+ lockablemaptest.cpp
+ bucketinfotest.cpp
+ distribution_hash_normalizer_test.cpp
+ DEPENDS
+ AFTER
+ storage_storageconfig
+)
diff --git a/storage/src/tests/bucketdb/bucketinfotest.cpp b/storage/src/tests/bucketdb/bucketinfotest.cpp
new file mode 100644
index 00000000000..eef4c6d7739
--- /dev/null
+++ b/storage/src/tests/bucketdb/bucketinfotest.cpp
@@ -0,0 +1,201 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <boost/assign.hpp>
+#include <boost/random.hpp>
+#include <cppunit/extensions/HelperMacros.h>
+#include <map>
+#include <vector>
+#include <vespa/vespalib/text/stringtokenizer.h>
+#include <vespa/storage/distributor/bucketdb/bucketinfo.h>
+
+namespace storage {
+
+namespace distributor {
+
+struct BucketInfoTest : public CppUnit::TestFixture {
+ void testBucketInfoEntriesWithNewestTimestampsAreKept();
+ void testOrder();
+ void testHasInvalidCopy();
+ void testAddNodeSetsTrustedWhenConsistent();
+ void testTrustedResetWhenCopiesBecomeInconsistent();
+ void testTrustedResetWhenTrustedCopiesGoOutOfSync();
+ void testTrustedNotResetWhenNonTrustedCopiesStillOutOfSync();
+
+ CPPUNIT_TEST_SUITE(BucketInfoTest);
+ CPPUNIT_TEST(testBucketInfoEntriesWithNewestTimestampsAreKept);
+ CPPUNIT_TEST(testOrder);
+ CPPUNIT_TEST(testHasInvalidCopy);
+ CPPUNIT_TEST(testAddNodeSetsTrustedWhenConsistent);
+ CPPUNIT_TEST_IGNORED(testTrustedResetWhenCopiesBecomeInconsistent);
+ CPPUNIT_TEST(testTrustedResetWhenTrustedCopiesGoOutOfSync);
+ CPPUNIT_TEST(testTrustedNotResetWhenNonTrustedCopiesStillOutOfSync);
+ CPPUNIT_TEST_SUITE_END();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(BucketInfoTest);
+
+BucketInfo
+getBucketInfo(std::string nodeList, std::string order) {
+ BucketInfo info;
+
+ std::vector<uint16_t> ordering;
+ {
+ vespalib::StringTokenizer tokenizer(order, ",");
+ for (uint32_t i = 0; i < tokenizer.size(); i++) {
+ ordering.push_back(atoi(tokenizer[i].c_str()));
+ }
+ }
+
+ vespalib::StringTokenizer tokenizer(nodeList, ",");
+ for (uint32_t i = 0; i < tokenizer.size(); i++) {
+ info.addNode(BucketCopy(0,
+ atoi(tokenizer[i].c_str()),
+ api::BucketInfo(1,1,1)),
+ ordering);
+ }
+
+ return info;
+}
+
+std::string
+nodeList(const BucketInfo& info) {
+ std::ostringstream ost;
+ for (uint32_t i = 0; i < info.getNodeCount(); i++) {
+ if (i != 0) {
+ ost << ",";
+ }
+ ost << (int)info.getNodeRef(i).getNode();
+ }
+ return ost.str();
+}
+
+// Since we keep bucket info in memory for a period of time before applying
+// to bucket db, we maintain timestamps to prevent external load happening
+// in the meantime from having their updates lost when we perform a batch
+// insert. This also applies for when we postpone db updates in persistence
+// message tracker until we've received a reply from all copies.
+void
+BucketInfoTest::testBucketInfoEntriesWithNewestTimestampsAreKept()
+{
+ BucketInfo bi;
+ std::vector<uint16_t> idealState;
+ idealState.push_back(0);
+
+ bi.addNode(BucketCopy(5, 0, api::BucketInfo(1,1,1)), idealState);
+ CPPUNIT_ASSERT_EQUAL(api::BucketInfo(1,1,1),
+ bi.getNode(0)->getBucketInfo());
+
+ bi.addNode(BucketCopy(5, 0, api::BucketInfo(2,2,2)), idealState);
+ CPPUNIT_ASSERT_EQUAL(api::BucketInfo(1,1,1),
+ bi.getNode(0)->getBucketInfo());
+
+ bi.addNode(BucketCopy(4, 0, api::BucketInfo(3,3,3)), idealState);
+ CPPUNIT_ASSERT_EQUAL(api::BucketInfo(1,1,1),
+ bi.getNode(0)->getBucketInfo());
+
+ bi.addNode(BucketCopy(7, 0, api::BucketInfo(4,4,4)), idealState);
+ CPPUNIT_ASSERT_EQUAL(api::BucketInfo(4,4,4),
+ bi.getNode(0)->getBucketInfo());
+
+ bi.addNode(BucketCopy(2, 1, api::BucketInfo(4,4,4)), idealState);
+ CPPUNIT_ASSERT_EQUAL(api::BucketInfo(4,4,4),
+ bi.getNode(1)->getBucketInfo());
+}
+
+void
+BucketInfoTest::testOrder() {
+
+ CPPUNIT_ASSERT_EQUAL(std::string("2,0,1"), nodeList(getBucketInfo("0,1,2", "2,0,1")));
+ CPPUNIT_ASSERT_EQUAL(std::string("2,0,1"), nodeList(getBucketInfo("1,0,2", "2,0,1")));
+ CPPUNIT_ASSERT_EQUAL(std::string("1,0,2"), nodeList(getBucketInfo("1,2,0", "1")));
+ CPPUNIT_ASSERT_EQUAL(std::string("2,1,0,3,4"), nodeList(getBucketInfo("0,1,2,3,4", "2,1")));
+}
+
+void
+BucketInfoTest::testHasInvalidCopy()
+{
+ std::vector<uint16_t> order;
+
+ BucketInfo info;
+ info.addNode(BucketCopy(0, 0, api::BucketInfo(10, 100, 1000)), order);
+ info.addNode(BucketCopy(0, 1, api::BucketInfo(10, 100, 1000)), order);
+ CPPUNIT_ASSERT(!info.hasInvalidCopy());
+
+ info.addNode(BucketCopy(0, 2, api::BucketInfo()), order);
+ CPPUNIT_ASSERT(info.hasInvalidCopy());
+
+}
+
+void
+BucketInfoTest::testAddNodeSetsTrustedWhenConsistent()
+{
+ std::vector<uint16_t> order;
+
+ {
+ BucketInfo info;
+ info.addNode(BucketCopy(0, 0, api::BucketInfo(0x1, 2, 144)).setTrusted(), order);
+ info.addNode(BucketCopy(0, 1, api::BucketInfo(0x1, 2, 144)), order);
+ CPPUNIT_ASSERT(info.getNode(1)->trusted());
+ }
+
+ {
+ BucketInfo info;
+ info.addNode(BucketCopy(0, 0, api::BucketInfo(0x1, 1, 2)).setTrusted(), order);
+ info.addNode(BucketCopy(0, 1, api::BucketInfo(0x2, 2, 3)), order);
+ info.addNode(BucketCopy(0, 2, api::BucketInfo(0x3, 3, 4)), order);
+
+ BucketCopy copy(1, 1, api::BucketInfo(0x1, 1, 2));
+ info.updateNode(copy);
+ CPPUNIT_ASSERT(info.getNode(1)->trusted());
+ CPPUNIT_ASSERT(!info.getNode(2)->trusted());
+ }
+}
+
+void
+BucketInfoTest::testTrustedResetWhenCopiesBecomeInconsistent()
+{
+ CPPUNIT_FAIL("TODO: test this!");
+}
+
+void
+BucketInfoTest::testTrustedResetWhenTrustedCopiesGoOutOfSync()
+{
+ std::vector<uint16_t> order;
+
+ BucketInfo info;
+ info.addNode(BucketCopy(0, 0, api::BucketInfo(10, 100, 1000)).setTrusted(), order);
+ info.addNode(BucketCopy(0, 1, api::BucketInfo(10, 100, 1000)), order);
+ CPPUNIT_ASSERT(info.getNode(0)->trusted());
+ CPPUNIT_ASSERT(info.getNode(1)->trusted());
+
+ info.updateNode(BucketCopy(0, 1, api::BucketInfo(20, 200, 2000)).setTrusted());
+ CPPUNIT_ASSERT(!info.getNode(0)->trusted());
+ CPPUNIT_ASSERT(!info.getNode(1)->trusted());
+}
+
+void
+BucketInfoTest::testTrustedNotResetWhenNonTrustedCopiesStillOutOfSync()
+{
+ std::vector<uint16_t> order;
+
+ BucketInfo info;
+ info.addNode(BucketCopy(0, 0, api::BucketInfo(10, 100, 1000)).setTrusted(), order);
+ info.addNode(BucketCopy(0, 1, api::BucketInfo(20, 200, 2000)), order);
+ info.addNode(BucketCopy(0, 2, api::BucketInfo(30, 300, 3000)), order);
+ CPPUNIT_ASSERT(info.getNode(0)->trusted());
+ CPPUNIT_ASSERT(!info.getNode(1)->trusted());
+ CPPUNIT_ASSERT(!info.getNode(2)->trusted());
+
+ info.updateNode(BucketCopy(0, 1, api::BucketInfo(21, 201, 2001)));
+
+ CPPUNIT_ASSERT(info.getNode(0)->trusted());
+ CPPUNIT_ASSERT(!info.getNode(1)->trusted());
+ CPPUNIT_ASSERT(!info.getNode(2)->trusted());
+}
+
+}
+
+} // storage
+
diff --git a/storage/src/tests/bucketdb/bucketmanagertest.cpp b/storage/src/tests/bucketdb/bucketmanagertest.cpp
new file mode 100644
index 00000000000..ee2e3f6ef7f
--- /dev/null
+++ b/storage/src/tests/bucketdb/bucketmanagertest.cpp
@@ -0,0 +1,1323 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+LOG_SETUP(".test.bucketdb.bucketmanager");
+
+#include <vespa/config/helper/configgetter.h>
+#include <cppunit/extensions/HelperMacros.h>
+#include <vespa/document/config/config-documenttypes.h>
+#include <vespa/document/datatype/documenttype.h>
+#include <vespa/document/fieldvalue/document.h>
+#include <vespa/document/repo/documenttyperepo.h>
+#include <vespa/storage/bucketdb/bucketmanager.h>
+#include <vespa/storage/common/bucketmessages.h>
+#include <vespa/storageframework/defaultimplementation/clock/realclock.h>
+#include <vespa/storage/persistence/filestorage/filestormanager.h>
+#include <vespa/storageapi/message/bucket.h>
+#include <vespa/storageapi/message/persistence.h>
+#include <vespa/storageapi/message/state.h>
+#include <vespa/storageapi/message/bucketsplitting.h>
+#include <tests/common/teststorageapp.h>
+#include <tests/common/dummystoragelink.h>
+#include <tests/common/testhelper.h>
+#include <vespa/vdslib/state/clusterstate.h>
+#include <vespa/vdslib/state/random.h>
+#include <vespa/vespalib/io/fileutil.h>
+#include <vespa/vespalib/stllike/string.h>
+#include <vespa/storageapi/message/bucket.h>
+#include <thread>
+#include <future>
+
+using config::ConfigGetter;
+using document::DocumenttypesConfig;
+using config::FileSpec;
+using document::DocumentType;
+using document::DocumentTypeRepo;
+
+namespace storage {
+
+struct TestBucketInfo {
+ uint32_t crc;
+ uint32_t size;
+ uint32_t count;
+ uint32_t partition;
+
+ api::BucketInfo getInfo() const
+ { return api::BucketInfo(crc, count, size); }
+};
+
+std::ostream& operator<<(std::ostream& out, const TestBucketInfo& info) {
+ out << "TestBucketInfo(" << info.crc << ", " << info.size
+ << ", " << info.count << ", " << info.partition << ")";
+ return out;
+}
+
+class ConcurrentOperationFixture;
+struct TestParams;
+
+struct BucketManagerTest : public CppUnit::TestFixture {
+public:
+ CPPUNIT_TEST_SUITE(BucketManagerTest);
+ CPPUNIT_TEST(testRequestBucketInfoWithList);
+ CPPUNIT_TEST(testDistributionBitGenerationEmpty);
+ CPPUNIT_TEST(testDistributionBitChangeOnCreateBucket);
+ CPPUNIT_TEST(testMinUsedBitsFromComponentIsHonored);
+ CPPUNIT_TEST(testRemoveLastModifiedOK);
+ CPPUNIT_TEST(testRemoveLastModifiedFailed);
+ CPPUNIT_TEST(testSwallowNotifyBucketChangeReply);
+ CPPUNIT_TEST(testMetricsGeneration);
+ CPPUNIT_TEST(testSplitReplyOrderedAfterBucketReply);
+ CPPUNIT_TEST(testJoinReplyOrderedAfterBucketReply);
+ CPPUNIT_TEST(testDeleteReplyOrderedAfterBucketReply);
+ CPPUNIT_TEST(testOnlyEnqueueWhenProcessingRequest);
+ CPPUNIT_TEST(testOrderRepliesAfterBucketSpecificRequest);
+ CPPUNIT_TEST(testQueuedRepliesOnlyDispatchedWhenAllProcessingDone);
+ CPPUNIT_TEST(testMutationRepliesForSplitBucketAreEnqueued);
+ CPPUNIT_TEST(testMutationRepliesForDeletedBucketAreEnqueued);
+ CPPUNIT_TEST(testMutationRepliesForJoinedBucketAreEnqueued);
+ CPPUNIT_TEST(testConflictingPutRepliesAreEnqueued);
+ CPPUNIT_TEST(testConflictingUpdateRepliesAreEnqueued);
+ CPPUNIT_TEST(testRemappedMutationIsCheckedAgainstOriginalBucket);
+ CPPUNIT_TEST(testBucketConflictSetIsClearedBetweenBlockingRequests);
+ CPPUNIT_TEST(testConflictSetOnlyClearedAfterAllBucketRequestsDone);
+ CPPUNIT_TEST(testRejectRequestWithMismatchingDistributionHash);
+ CPPUNIT_TEST(testDbNotIteratedWhenAllRequestsRejected);
+ CPPUNIT_TEST(testReceivedDistributionHashIsNormalized);
+
+ // FIXME(vekterli): test is not deterministic and enjoys failing
+ // sporadically when running under Valgrind. See bug 5932891.
+ CPPUNIT_TEST_IGNORED(testRequestBucketInfoWithState);
+ CPPUNIT_TEST_SUITE_END();
+
+ std::unique_ptr<TestServiceLayerApp> _node;
+ std::unique_ptr<DummyStorageLink> _top;
+ BucketManager *_manager;
+ DummyStorageLink* _bottom;
+ FileStorManager* _filestorManager;
+ std::map<document::BucketId, TestBucketInfo> _bucketInfo;
+ uint32_t _emptyBuckets;
+ document::Document::SP _document;
+
+ void setupTestEnvironment(bool fakePersistenceLayer = true,
+ bool noDelete = false);
+ void addBucketsToDB(uint32_t count);
+ bool wasBlockedDueToLastModified(api::StorageMessage* msg,
+ uint64_t lastModified);
+ bool wasBlockedDueToLastModified(api::StorageMessage::SP msg);
+ void insertSingleBucket(const document::BucketId& bucket,
+ const api::BucketInfo& info);
+ void waitUntilRequestsAreProcessing(size_t nRequests = 1);
+ void doTestMutationOrdering(
+ ConcurrentOperationFixture& fixture,
+ const TestParams& params);
+ void doTestConflictingReplyIsEnqueued(
+ const document::BucketId& bucket,
+ const api::StorageCommand::SP& treeMutationCmd,
+ const api::MessageType& treeMutationReplyType);
+
+ void scheduleBucketInfoRequestWithConcurrentOps(
+ ConcurrentOperationFixture& fixture,
+ const document::BucketId& bucketForRemove,
+ const document::BucketId& bucketForSplit,
+ api::Timestamp mutationTimestamp);
+ void sendSingleBucketInfoRequest(const document::BucketId& id);
+ void assertRequestWithBadHashIsRejected(
+ ConcurrentOperationFixture& fixture);
+
+
+ void testRequestBucketInfoWithState();
+ void testRequestBucketInfoWithList();
+ void testDistributionBitGenerationEmpty();
+ void testDistributionBitChangeOnCreateBucket();
+ void testMinUsedBitsFromComponentIsHonored();
+
+ void testRemoveLastModifiedOK();
+ void testRemoveLastModifiedFailed();
+
+ void testSwallowNotifyBucketChangeReply();
+ void testMetricsGeneration();
+ void testSplitReplyOrderedAfterBucketReply();
+ void testJoinReplyOrderedAfterBucketReply();
+ void testDeleteReplyOrderedAfterBucketReply();
+ void testOnlyEnqueueWhenProcessingRequest();
+ void testOrderRepliesAfterBucketSpecificRequest();
+ void testQueuedRepliesOnlyDispatchedWhenAllProcessingDone();
+ void testMutationRepliesForSplitBucketAreEnqueued();
+ void testMutationRepliesForDeletedBucketAreEnqueued();
+ void testMutationRepliesForJoinedBucketAreEnqueued();
+ void testConflictingPutRepliesAreEnqueued();
+ void testConflictingUpdateRepliesAreEnqueued();
+ void testRemappedMutationIsCheckedAgainstOriginalBucket();
+ void testBucketConflictSetIsClearedBetweenBlockingRequests();
+ void testConflictSetOnlyClearedAfterAllBucketRequestsDone();
+ void testRejectRequestWithMismatchingDistributionHash();
+ void testDbNotIteratedWhenAllRequestsRejected();
+ void testReceivedDistributionHashIsNormalized();
+
+public:
+ static constexpr uint32_t DIR_SPREAD = 3;
+ static constexpr uint32_t MESSAGE_WAIT_TIME = 60*2;
+
+ void setUp() {
+ _emptyBuckets = 0;
+ }
+
+ void tearDown() {
+ }
+
+ friend class ConcurrentOperationFixture;
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(BucketManagerTest);
+
+#define ASSERT_DUMMYLINK_REPLY_COUNT(link, count) \
+ if (link->getNumReplies() != count) { \
+ std::ostringstream ost; \
+ ost << "Expected there to be " << count << " replies in link, but " \
+ << "found " << link->getNumReplies() << ":\n"; \
+ for (uint32_t i=0; i<link->getNumReplies(); ++i) { \
+ ost << link->getReply(i)->getType() << "\n"; \
+ } \
+ CPPUNIT_FAIL(ost.str()); \
+ }
+
+void BucketManagerTest::setupTestEnvironment(bool fakePersistenceLayer,
+ bool noDelete)
+{
+ if (!noDelete) {
+ assert(system("rm -rf vdsroot") == 0);
+ }
+ assert(system("mkdir -p vdsroot/disks/d0") == 0);
+ assert(system("mkdir -p vdsroot/disks/d1") == 0);
+ vdstestlib::DirConfig config(getStandardConfig(true));
+
+ DocumentTypeRepo::SP repo(new DocumentTypeRepo(
+ *ConfigGetter<DocumenttypesConfig>::getConfig("config-doctypes",
+ FileSpec("config-doctypes.cfg"))));
+ _top.reset(new DummyStorageLink);
+ _node.reset(new TestServiceLayerApp(
+ DiskCount(2), NodeIndex(0), config.getConfigId()));
+ _node->setTypeRepo(repo);
+ _node->setupDummyPersistence();
+ // Set up the 3 links
+ StorageLink::UP manager(new BucketManager("", _node->getComponentRegister()));
+ _manager = (BucketManager*) manager.get();
+ _top->push_back(std::move(manager));
+ if (fakePersistenceLayer) {
+ StorageLink::UP bottom(new DummyStorageLink);
+ _bottom = (DummyStorageLink*) bottom.get();
+ _top->push_back(std::move(bottom));
+ } else {
+ StorageLink::UP bottom(new FileStorManager(
+ config.getConfigId(), _node->getPartitions(),
+ _node->getPersistenceProvider(), _node->getComponentRegister()));
+ _filestorManager = (FileStorManager*) bottom.get();
+ _top->push_back(std::move(bottom));
+ }
+ // Generate a doc to use for testing..
+ const DocumentType &type(*_node->getTypeRepo()
+ ->getDocumentType("text/html"));
+ _document.reset(new document::Document(type, document::DocumentId(
+ document::DocIdString("test", "ntnu"))));
+}
+
+void BucketManagerTest::addBucketsToDB(uint32_t count)
+{
+ _bucketInfo.clear();
+ _emptyBuckets = 0;
+ lib::RandomGen randomizer(25423);
+ while (_bucketInfo.size() < count) {
+ document::BucketId id(16, randomizer.nextUint32());
+ id = id.stripUnused();
+ if (_bucketInfo.size() == 0) {
+ id = _node->getBucketIdFactory().getBucketId(
+ _document->getId()).stripUnused();
+ }
+ TestBucketInfo info;
+ info.crc = randomizer.nextUint32();
+ info.size = randomizer.nextUint32();
+ info.count = randomizer.nextUint32(1, 0xFFFF);
+
+ info.partition = _node->getPartition(id);
+ _bucketInfo[id] = info;
+ }
+
+ // Make sure we have at least one empty bucket
+ TestBucketInfo& info = (++_bucketInfo.begin())->second;
+ CPPUNIT_ASSERT(info.size != 0);
+ info.size = 0;
+ info.count = 0;
+ info.crc = 0;
+ ++_emptyBuckets;
+ for (std::map<document::BucketId, TestBucketInfo>::iterator it
+ = _bucketInfo.begin(); it != _bucketInfo.end(); ++it)
+ {
+ bucketdb::StorageBucketInfo entry;
+ entry.disk = it->second.partition;
+ entry.setBucketInfo(api::BucketInfo(it->second.crc,
+ it->second.count,
+ it->second.size));
+ _node->getStorageBucketDatabase().insert(it->first, entry, "foo");
+ }
+}
+
+bool
+BucketManagerTest::wasBlockedDueToLastModified(api::StorageMessage* msg,
+ uint64_t lastModified)
+{
+ setupTestEnvironment();
+ document::BucketId id(16, 1);
+ api::BucketInfo info(1, 2, 3);
+ info.setLastModified(api::Timestamp(1234));
+
+ {
+ bucketdb::StorageBucketInfo entry;
+ entry.setBucketInfo(info);
+ entry.disk = 0;
+ _node->getStorageBucketDatabase().insert(id, entry, "foo");
+ }
+
+ _top->open();
+
+ _top->sendDown(api::StorageMessage::SP(msg));
+ if (_top->getNumReplies() == 1) {
+ CPPUNIT_ASSERT_EQUAL(0, (int)_bottom->getNumCommands());
+ CPPUNIT_ASSERT(!static_cast<api::StorageReply&>(
+ *_top->getReply(0)).getResult().success());
+ return true;
+ } else {
+ CPPUNIT_ASSERT_EQUAL(0, (int)_top->getNumReplies());
+
+ // Check that bucket database now has the operation's timestamp as last modified.
+ {
+ StorBucketDatabase::WrappedEntry entry(
+ _node->getStorageBucketDatabase().get(id, "foo"));
+ CPPUNIT_ASSERT_EQUAL(lastModified, entry->info.getLastModified());
+ }
+
+ return false;
+ }
+}
+
+void BucketManagerTest::testRemoveLastModifiedOK()
+{
+ CPPUNIT_ASSERT(!wasBlockedDueToLastModified(
+ new api::RemoveCommand(document::BucketId(16, 1),
+ document::DocumentId("userdoc:m:1:foo"),
+ api::Timestamp(1235)),
+ 1235));
+}
+
+
+void BucketManagerTest::testRemoveLastModifiedFailed()
+{
+ CPPUNIT_ASSERT(wasBlockedDueToLastModified(
+ new api::RemoveCommand(document::BucketId(16, 1),
+ document::DocumentId("userdoc:m:1:foo"),
+ api::Timestamp(1233)),
+ 1233));
+}
+
+void BucketManagerTest::testDistributionBitGenerationEmpty()
+{
+ TestName("BucketManagerTest::testDistributionBitGenerationEmpty()");
+ setupTestEnvironment();
+ _manager->doneInit();
+ vespalib::Monitor l;
+ _manager->updateMetrics(BucketManager::MetricLockGuard(l));
+ CPPUNIT_ASSERT_EQUAL(58u, _node->getStateUpdater().getReportedNodeState()->getMinUsedBits());
+}
+
+void BucketManagerTest::testDistributionBitChangeOnCreateBucket()
+{
+ TestName("BucketManagerTest::testDistributionBitChangeOnCreateBucket()");
+ setupTestEnvironment();
+ addBucketsToDB(30);
+ _top->open();
+ _node->getDoneInitializeHandler().notifyDoneInitializing();
+ _manager->doneInit();
+ _manager->updateMinUsedBits();
+ CPPUNIT_ASSERT_EQUAL(16u, _node->getStateUpdater().getReportedNodeState()->getMinUsedBits());
+
+ std::shared_ptr<api::CreateBucketCommand> cmd(
+ new api::CreateBucketCommand(document::BucketId(4, 5678)));
+ _top->sendDown(cmd);
+ CPPUNIT_ASSERT_EQUAL(4u, _node->getStateUpdater().getReportedNodeState()->getMinUsedBits());
+}
+
+void BucketManagerTest::testMinUsedBitsFromComponentIsHonored()
+{
+ TestName("BucketManagerTest::testMinUsedBitsFromComponentIsHonored()");
+ setupTestEnvironment();
+ // Let these differ in order to test state update behavior.
+ _node->getComponentRegister().getMinUsedBitsTracker().setMinUsedBits(10);
+ lib::NodeState ns(
+ *_node->getStateUpdater().getReportedNodeState());
+ ns.setMinUsedBits(13);
+ _node->getStateUpdater().setReportedNodeState(ns);
+ addBucketsToDB(30);
+ _top->open();
+ // Don't update metrics, as these will always overwrite the min used bits
+ // if it differs from the db.
+
+ // 12 >= 10, so no update of reported state (left at 13; this should of
+ // course not happen in practice, but used for faking in the test)
+ std::shared_ptr<api::CreateBucketCommand> cmd(
+ new api::CreateBucketCommand(document::BucketId(12, 5678)));
+ _top->sendDown(cmd);
+ CPPUNIT_ASSERT_EQUAL(13u, _node->getStateUpdater().getReportedNodeState()->getMinUsedBits());
+}
+
+void BucketManagerTest::testRequestBucketInfoWithState()
+{
+ TestName("BucketManagerTest::testRequestBucketInfoWithState()");
+ // Test prior to building bucket cache
+ setupTestEnvironment();
+ addBucketsToDB(30);
+ /* Currently this is just queued up
+ {
+ std::shared_ptr<api::RequestBucketInfoCommand> cmd(
+ new api::RequestBucketInfoCommand(
+ 0, lib::ClusterState("distributor:3 .2.s:d storage:1")));
+ _top->sendDown(cmd);
+ _top->waitForMessages(1, 5);
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, _top->getNumReplies());
+ std::shared_ptr<api::RequestBucketInfoReply> reply(
+ std::dynamic_pointer_cast<api::RequestBucketInfoReply>(
+ _top->getReply(0)));
+ _top->reset();
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(api::ReturnCode(api::ReturnCode::NOT_READY),
+ reply->getResult());
+ } */
+ std::vector<lib::ClusterState> states;
+ states.push_back(lib::ClusterState("version:0"));
+ states.push_back(lib::ClusterState("version:1 distributor:1 storage:1"));
+ states.push_back(lib::ClusterState(
+ "version:2 distributor:3 .1.s:i .2.s:d storage:4"));
+ states.push_back(lib::ClusterState(
+ "version:3 distributor:3 .1.s:i .2.s:d storage:4 .3.s:d"));
+ states.push_back(lib::ClusterState(
+ "version:4 distributor:3 .1.s:i .2.s:d storage:4"));
+
+ _node->setClusterState(states.back());
+ for (uint32_t i=0; i<states.size(); ++i) {
+ api::SetSystemStateCommand::SP cmd(
+ new api::SetSystemStateCommand(states[i]));
+ _manager->onDown(cmd);
+ }
+
+ // Send a request bucket info command that will be outdated and failed.
+ std::shared_ptr<api::RequestBucketInfoCommand> cmd1(
+ new api::RequestBucketInfoCommand(0, states[1]));
+ // Send two request bucket info commands that will be processed together
+ // when the bucket manager is idle, as states are equivalent
+ std::shared_ptr<api::RequestBucketInfoCommand> cmd2(
+ new api::RequestBucketInfoCommand(0, states[2]));
+ std::shared_ptr<api::RequestBucketInfoCommand> cmd3(
+ new api::RequestBucketInfoCommand(0, states[3]));
+
+ // Tag server initialized before starting
+ _top->open();
+ _manager->startWorkerThread();
+ _node->getDoneInitializeHandler().notifyDoneInitializing();
+ _manager->doneInit();
+
+ LOG(info, "Sending 3 different request bucket info messages");
+ _top->sendDown(cmd1);
+ _top->sendDown(cmd2);
+ _top->sendDown(cmd3);
+
+ {
+ LOG(info, "Waiting for response from 3 request bucket info messages");
+ _top->waitForMessages(3, 5);
+ ASSERT_DUMMYLINK_REPLY_COUNT(_top, 3);
+ std::map<uint64_t, api::RequestBucketInfoReply::SP> replies;
+ for (uint32_t i=0; i<3; ++i) {
+ replies[_top->getReply(i)->getMsgId()]
+ = std::dynamic_pointer_cast<api::RequestBucketInfoReply>(
+ _top->getReply(i));
+ }
+ std::shared_ptr<api::RequestBucketInfoReply> reply1(
+ replies[cmd1->getMsgId()]);
+ std::shared_ptr<api::RequestBucketInfoReply> reply2(
+ replies[cmd2->getMsgId()]);
+ std::shared_ptr<api::RequestBucketInfoReply> reply3(
+ replies[cmd3->getMsgId()]);
+ _top->reset();
+ CPPUNIT_ASSERT(reply1.get());
+ CPPUNIT_ASSERT(reply2.get());
+ CPPUNIT_ASSERT(reply3.get());
+ CPPUNIT_ASSERT_EQUAL(api::ReturnCode(api::ReturnCode::REJECTED,
+ "Ignoring bucket info request for cluster state version 1 as "
+ "versions from version 2 differs from this state."),
+ reply1->getResult());
+ CPPUNIT_ASSERT_EQUAL(api::ReturnCode(api::ReturnCode::REJECTED,
+ "There is already a newer bucket info request for "
+ "this node from distributor 0"),
+ reply2->getResult());
+ CPPUNIT_ASSERT_EQUAL(api::ReturnCode(api::ReturnCode::OK),
+ reply3->getResult());
+ api::RequestBucketInfoReply::Entry entry;
+
+ CPPUNIT_ASSERT_EQUAL((size_t) 18, reply3->getBucketInfo().size());
+ entry = api::RequestBucketInfoReply::Entry(
+ document::BucketId(16, 0xe8c8), api::BucketInfo(0x79d04f78, 11153, 1851385240u));
+ CPPUNIT_ASSERT_EQUAL(entry, reply3->getBucketInfo()[0]);
+ }
+}
+
+namespace {
+ struct PopenWrapper {
+ FILE* _file;
+ std::vector<char> _buffer;
+ uint32_t _index;
+ uint32_t _size;
+ bool _eof;
+
+ PopenWrapper(const std::string& cmd)
+ : _buffer(65536, '\0'), _index(0), _size(0), _eof(false)
+ {
+ _file = popen(cmd.c_str(), "r");
+ if (_file == 0) {
+ throw vespalib::Exception("Failed to run '" + cmd
+ + "' in popen: " + strerror(errno), VESPA_STRLOC);
+ }
+ }
+
+ const char* getNextLine() {
+ if (_eof && _size == 0) return 0;
+ // Check if we have a newline waiting
+ char* newline = strchr(&_buffer[_index], '\n');
+ // If not try to get one
+ if (_eof) {
+ newline = &_buffer[_index + _size];
+ } else if (newline == 0) {
+ // If we index is passed half the buffer, reposition
+ if (_index > _buffer.size() / 2) {
+ memcpy(&_buffer[0], &_buffer[_index], _size);
+ _index = 0;
+ }
+ // Verify we have space to write to
+ if (_index + _size >= _buffer.size()) {
+ throw vespalib::Exception("No newline could be find in "
+ "half the buffer size. Wrapper not designed to "
+ "handle that long lines (1)", VESPA_STRLOC);
+ }
+ // Fill up buffer
+ size_t bytesRead = fread(&_buffer[_index + _size],
+ 1, _buffer.size() - _index - _size - 1,
+ _file);
+ if (bytesRead == 0) {
+ if (!feof(_file)) {
+ throw vespalib::Exception("Failed to run fgets: "
+ + std::string(strerror(errno)), VESPA_STRLOC);
+ } else {
+ _eof = true;
+ }
+ } else {
+ _size += bytesRead;
+ }
+ newline = strchr(&_buffer[_index], '\n');
+ if (newline == 0) {
+ if (_eof) {
+ if (_size == 0) return 0;
+ } else {
+ throw vespalib::Exception("No newline could be find in "
+ "half the buffer size. Wrapper not designed to "
+ "handle that long lines (2)", VESPA_STRLOC);
+ }
+ }
+ }
+ *newline = '\0';
+ ++newline;
+ const char* line = &_buffer[_index];
+ uint32_t strlen = (newline - line);
+ _index += strlen;
+ _size -= strlen;
+ return line;
+ }
+ };
+}
+
+void BucketManagerTest::testRequestBucketInfoWithList()
+{
+ TestName("BucketManagerTest::testRequestBucketInfoWithList()");
+ setupTestEnvironment();
+ addBucketsToDB(30);
+ _top->open();
+ _node->getDoneInitializeHandler().notifyDoneInitializing();
+ _top->doneInit();
+ {
+ std::vector<document::BucketId> bids;
+ bids.push_back(document::BucketId(16, 0xe8c8));
+
+ std::shared_ptr<api::RequestBucketInfoCommand> cmd(
+ new api::RequestBucketInfoCommand(bids));
+
+ _top->sendDown(cmd);
+ _top->waitForMessages(1, 5);
+ ASSERT_DUMMYLINK_REPLY_COUNT(_top, 1);
+ std::shared_ptr<api::RequestBucketInfoReply> reply(
+ std::dynamic_pointer_cast<api::RequestBucketInfoReply>(
+ _top->getReply(0)));
+ _top->reset();
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(api::ReturnCode(api::ReturnCode::OK),
+ reply->getResult());
+ if (reply->getBucketInfo().size() > 1) {
+ std::cerr << "Too many replies found\n";
+ for (uint32_t i=0; i<reply->getBucketInfo().size(); ++i) {
+ std::cerr << reply->getBucketInfo()[i] << "\n";
+ }
+ }
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, reply->getBucketInfo().size());
+ api::RequestBucketInfoReply::Entry entry(
+ document::BucketId(16, 0xe8c8),
+ api::BucketInfo(0x79d04f78, 11153, 1851385240u));
+ CPPUNIT_ASSERT_EQUAL(entry, reply->getBucketInfo()[0]);
+ }
+}
+
+void
+BucketManagerTest::testSwallowNotifyBucketChangeReply()
+{
+ TestName("BucketManagerTest::testSwallowNotifyBucketChangeReply()");
+ setupTestEnvironment();
+ addBucketsToDB(30);
+ _top->open();
+ _node->getDoneInitializeHandler().notifyDoneInitializing();
+ _top->doneInit();
+
+ api::NotifyBucketChangeCommand cmd(document::BucketId(1, 16),
+ api::BucketInfo());
+ std::shared_ptr<api::NotifyBucketChangeReply> reply(
+ new api::NotifyBucketChangeReply(cmd));
+
+ _top->sendDown(reply);
+ // Should not leave the bucket manager.
+ CPPUNIT_ASSERT_EQUAL(0, (int)_bottom->getNumCommands());
+}
+
+void
+BucketManagerTest::testMetricsGeneration()
+{
+ setupTestEnvironment();
+ _top->open();
+ // Add 3 buckets; 2 ready, 1 active. 300 docs total, 600 bytes total.
+ for (int i = 0; i < 3; ++i) {
+ bucketdb::StorageBucketInfo entry;
+ entry.disk = 0;
+ api::BucketInfo info(50, 100, 200);
+ if (i > 0) {
+ info.setReady();
+ if (i == 2) {
+ info.setActive();
+ }
+ }
+ entry.setBucketInfo(info);
+ _node->getStorageBucketDatabase().insert(document::BucketId(16, i),
+ entry, "foo");
+ }
+ _node->getDoneInitializeHandler().notifyDoneInitializing();
+ _top->doneInit();
+ vespalib::Monitor l;
+ _manager->updateMetrics(BucketManager::MetricLockGuard(l));
+
+ CPPUNIT_ASSERT_EQUAL(size_t(2), _manager->_metrics->disks.size());
+ const DataStoredMetrics& m(*_manager->_metrics->disks[0]);
+ CPPUNIT_ASSERT_EQUAL(int64_t(3), m.buckets.getLast());
+ CPPUNIT_ASSERT_EQUAL(int64_t(300), m.docs.getLast());
+ CPPUNIT_ASSERT_EQUAL(int64_t(600), m.bytes.getLast());
+ CPPUNIT_ASSERT_EQUAL(int64_t(1), m.active.getLast());
+ CPPUNIT_ASSERT_EQUAL(int64_t(2), m.ready.getLast());
+}
+
+void
+BucketManagerTest::insertSingleBucket(const document::BucketId& bucket,
+ const api::BucketInfo& info)
+{
+ bucketdb::StorageBucketInfo entry;
+ entry.disk = 0;
+ entry.setBucketInfo(info);
+ _node->getStorageBucketDatabase().insert(bucket, entry, "foo");
+}
+
+void
+BucketManagerTest::waitUntilRequestsAreProcessing(size_t nRequests)
+{
+ while (_manager->bucketInfoRequestsCurrentlyProcessing() != nRequests) {
+ std::this_thread::yield();
+ }
+}
+
+namespace {
+
+struct WithBuckets {
+ std::map<document::BucketId, api::BucketInfo> _bucketsAndInfo;
+
+ WithBuckets& add(const document::BucketId& id,
+ const api::BucketInfo& info)
+ {
+ _bucketsAndInfo[id] = info;
+ return *this;
+ }
+};
+
+} // anon ns
+
+class ConcurrentOperationFixture {
+public:
+ ConcurrentOperationFixture(BucketManagerTest& self)
+ : _self(self),
+ _state("distributor:1 storage:1")
+ {
+ _self.setupTestEnvironment();
+ _self._top->open();
+ _self._node->getDoneInitializeHandler().notifyDoneInitializing();
+ _self._manager->startWorkerThread();
+ _self._top->doneInit();
+
+ // Need a cluster state to work with initially, so that processing
+ // bucket requests can calculate a target distributor.
+ _self._node->setClusterState(_state);
+ _self._manager->onDown(
+ std::make_shared<api::SetSystemStateCommand>(_state));
+ }
+
+ void setUp(const WithBuckets& buckets) {
+ for (auto& b : buckets._bucketsAndInfo) {
+ _self.insertSingleBucket(b.first, b.second);
+ }
+ }
+
+ auto acquireBucketLock(const document::BucketId& bucket) {
+ return _self._node->getStorageBucketDatabase().get(bucket, "foo");
+ }
+
+ auto createRemoveCommand(const document::BucketId& bucket,
+ api::Timestamp timestamp = 123456) const
+ {
+ // Note: this is a dummy message; its contained document ID will not
+ // map to the provided bucket ID (at least it's extremely unlikely..)
+ return std::make_shared<api::RemoveCommand>(
+ bucket,
+ document::DocumentId("id:foo:testdoctype1::bar"),
+ timestamp);
+ }
+
+ auto createPutCommand(const document::BucketId& bucket) const {
+ auto doc = _self._node->getTestDocMan().createDocument(
+ "a foo walks into a bar", "id:foo:testdoctype1::bar1");
+ return std::make_shared<api::PutCommand>(
+ bucket, std::move(doc), api::Timestamp(123456));
+ }
+
+ auto createUpdateCommand(const document::BucketId& bucket) const {
+ auto update = std::make_shared<document::DocumentUpdate>(
+ *_self._node->getTestDocMan().getTypeRepo()
+ .getDocumentType("testdoctype1"),
+ document::DocumentId("id:foo:testdoctype1::bar2"));
+ return std::make_shared<api::UpdateCommand>(
+ bucket, update, api::Timestamp(123456));
+ }
+
+ auto createFullFetchCommand() const {
+ return std::make_shared<api::RequestBucketInfoCommand>(0, _state);
+ }
+
+ auto createFullFetchCommandWithHash(vespalib::stringref hash) const {
+ return std::make_shared<api::RequestBucketInfoCommand>(0, _state, hash);
+ }
+
+ auto acquireBucketLockAndSendInfoRequest(const document::BucketId& bucket) {
+ auto guard = acquireBucketLock(bucket);
+ // Send down processing command which will block.
+ _self._top->sendDown(createFullFetchCommand());
+ // Have to wait until worker thread has started chewing on request
+ // before we can continue, or we can end up in a race where processing
+ // does not start until _after_ we've sent up our bucket-deleting
+ // message. Since we hold a bucket lock, the below function can never
+ // transition false->true->false under our feet, only false->true.
+ _self.waitUntilRequestsAreProcessing(1);
+ return guard;
+ }
+
+ // Currently assumes there is only 1 command of cmd's message type in
+ // the bottom storage link.
+ void bounceWithReply(api::StorageCommand& cmd,
+ api::ReturnCode::Result code = api::ReturnCode::OK,
+ const document::BucketId& remapTo = document::BucketId())
+ {
+ _self._bottom->waitForMessages(1, BucketManagerTest::MESSAGE_WAIT_TIME);
+ // Bounce it back up with an implicitly OK status. This should cause the
+ // bucket manager to avoid reporting deleted buckets in its result set
+ // since these have been "tainted" by a concurrent removal.
+ std::unique_ptr<api::StorageReply> reply(cmd.makeReply());
+ if (remapTo.getRawId() != 0) {
+ dynamic_cast<api::BucketReply&>(*reply).remapBucketId(remapTo);
+ }
+ reply->setResult(code);
+ _self._bottom->getAndRemoveMessage(cmd.getType());
+ _self._bottom->sendUp(std::move(reply));
+ }
+
+ auto awaitAndGetReplies(size_t nReplies) {
+ _self._top->waitForMessages(
+ nReplies, BucketManagerTest::MESSAGE_WAIT_TIME);
+ return _self._top->getReplies();
+ }
+
+ void assertOrderedAfterBucketReply(size_t nBucketReplies,
+ const api::MessageType& msgType)
+ {
+ const size_t nTotal = nBucketReplies + 1;
+ auto replies = awaitAndGetReplies(nTotal);
+ CPPUNIT_ASSERT_EQUAL(nTotal, replies.size());
+ for (size_t i = 0; i < nBucketReplies; ++i) {
+ CPPUNIT_ASSERT_EQUAL(api::MessageType::REQUESTBUCKETINFO_REPLY,
+ replies[i]->getType());
+ }
+ CPPUNIT_ASSERT_EQUAL(msgType, replies[nBucketReplies]->getType());
+ }
+
+ void assertReplyOrdering(
+ const std::vector<const api::MessageType*>& replyTypes)
+ {
+ auto replies = awaitAndGetReplies(replyTypes.size());
+ CPPUNIT_ASSERT_EQUAL(replyTypes.size(), replies.size());
+ for (size_t i = 0; i < replyTypes.size(); ++i) {
+ CPPUNIT_ASSERT_EQUAL(*replyTypes[i], replies[i]->getType());
+ }
+ }
+
+ void clearReceivedReplies() {
+ _self._top->getRepliesOnce();
+ }
+
+private:
+ BucketManagerTest& _self;
+ lib::ClusterState _state;
+};
+
+void
+BucketManagerTest::testSplitReplyOrderedAfterBucketReply()
+{
+ ConcurrentOperationFixture fixture(*this);
+ document::BucketId bucketA(17, 0);
+ document::BucketId bucketB(17, 1);
+ fixture.setUp(WithBuckets()
+ .add(bucketA, api::BucketInfo(50, 100, 200))
+ .add(bucketB, api::BucketInfo(100, 200, 400)));
+ auto guard = fixture.acquireBucketLockAndSendInfoRequest(bucketB);
+
+ // Split bucket A to model a concurrent modification to an already fetched
+ // bucket.
+ auto splitCmd = std::make_shared<api::SplitBucketCommand>(bucketA);
+ _top->sendDown(splitCmd);
+ fixture.bounceWithReply(*splitCmd);
+ // Let bucket manager breathe again.
+ guard.unlock();
+
+ fixture.assertOrderedAfterBucketReply(
+ 1, api::MessageType::SPLITBUCKET_REPLY);
+}
+
+void
+BucketManagerTest::testJoinReplyOrderedAfterBucketReply()
+{
+ ConcurrentOperationFixture fixture(*this);
+ document::BucketId bucketA(17, 0);
+ document::BucketId bucketB(17, 1 << 16);
+ document::BucketId parent(16, 0);
+ fixture.setUp(WithBuckets()
+ .add(bucketA, api::BucketInfo(50, 100, 200))
+ .add(bucketB, api::BucketInfo(100, 200, 400)));
+ auto guard = fixture.acquireBucketLockAndSendInfoRequest(bucketB);
+
+ auto joinCmd = std::make_shared<api::JoinBucketsCommand>(parent);
+ joinCmd->getSourceBuckets().assign({bucketA, bucketB});
+ _top->sendDown(joinCmd);
+ fixture.bounceWithReply(*joinCmd);
+
+ guard.unlock();
+ fixture.assertOrderedAfterBucketReply(
+ 1, api::MessageType::JOINBUCKETS_REPLY);
+}
+
+// Technically, deletes being ordered after bucket info replies won't help
+// correctness since buckets are removed from the distributor DB upon _sending_
+// the delete and not receiving it.
+void
+BucketManagerTest::testDeleteReplyOrderedAfterBucketReply()
+{
+ ConcurrentOperationFixture fixture(*this);
+ document::BucketId bucketA(17, 0);
+ document::BucketId bucketB(17, 1);
+ fixture.setUp(WithBuckets()
+ .add(bucketA, api::BucketInfo(50, 100, 200))
+ .add(bucketB, api::BucketInfo(100, 200, 400)));
+ auto guard = fixture.acquireBucketLockAndSendInfoRequest(bucketB);
+
+ auto deleteCmd = std::make_shared<api::DeleteBucketCommand>(bucketA);
+ _top->sendDown(deleteCmd);
+ fixture.bounceWithReply(*deleteCmd);
+
+ guard.unlock();
+
+ fixture.assertOrderedAfterBucketReply(
+ 1, api::MessageType::DELETEBUCKET_REPLY);
+}
+
+void
+BucketManagerTest::testOnlyEnqueueWhenProcessingRequest()
+{
+ ConcurrentOperationFixture fixture(*this);
+ document::BucketId bucketA(17, 0);
+ fixture.setUp(WithBuckets()
+ .add(bucketA, api::BucketInfo(50, 100, 200)));
+
+ // Process delete command _before_ processing bucket requests.
+ auto deleteCmd = std::make_shared<api::DeleteBucketCommand>(bucketA);
+ _top->sendDown(deleteCmd);
+ fixture.bounceWithReply(*deleteCmd);
+ // Should arrive happily on the top.
+ _top->waitForMessages(1, MESSAGE_WAIT_TIME);
+}
+
+// Bucket info requests that contain a specific set of buckets are handled
+// differently than full bucket info fetches and are not delegated to the
+// worker thread. We still require that any split/joins etc are ordered after
+// this reply if their reply is sent up concurrently.
+void
+BucketManagerTest::testOrderRepliesAfterBucketSpecificRequest()
+{
+ ConcurrentOperationFixture fixture(*this);
+ document::BucketId bucketA(17, 0);
+ fixture.setUp(WithBuckets()
+ .add(bucketA, api::BucketInfo(50, 100, 200)));
+
+ auto guard = fixture.acquireBucketLock(bucketA);
+
+ auto infoRoundtrip = std::async(std::launch::async, [&]() {
+ std::vector<document::BucketId> buckets{bucketA};
+ auto infoCmd = std::make_shared<api::RequestBucketInfoCommand>(buckets);
+ // Can't complete until `guard` has been unlocked.
+ _top->sendDown(infoCmd);
+ // Barrier: bucket reply and subsequent split reply
+ _top->waitForMessages(2, MESSAGE_WAIT_TIME);
+ });
+ waitUntilRequestsAreProcessing();
+ // Barrier: roundtrip thread now blocked. Send a split whose reply shall
+ // be enqueued since there's a RequestBucketInfo currently doing its thing.
+ auto splitCmd = std::make_shared<api::SplitBucketCommand>(bucketA);
+ _top->sendDown(splitCmd);
+ // Enqueuing happens synchronously in this thread, so no need for further
+ // synchronization.
+ fixture.bounceWithReply(*splitCmd);
+
+ guard.unlock();
+ infoRoundtrip.get();
+ // At this point, we know 2 messages are in the top queue since the
+ // async future guarantees this for completion.
+ fixture.assertOrderedAfterBucketReply(
+ 1, api::MessageType::SPLITBUCKET_REPLY);
+}
+
+// Test is similar to testOrderRepliesAfterBucketSpecificRequest, but has
+// two concurrent bucket info request processing instances going on; one in
+// the worker thread and one in the message chain itself. Since we only have
+// one queue, we must wait with dispatching replies until _all_ processing
+// has ceased.
+void
+BucketManagerTest::testQueuedRepliesOnlyDispatchedWhenAllProcessingDone()
+{
+ ConcurrentOperationFixture fixture(*this);
+ document::BucketId bucketA(17, 0);
+ fixture.setUp(WithBuckets()
+ .add(bucketA, api::BucketInfo(50, 100, 200)));
+
+ auto guard = fixture.acquireBucketLock(bucketA);
+
+ auto singleBucketInfo = std::async(std::launch::async, [&]() {
+ std::vector<document::BucketId> buckets{bucketA};
+ auto infoCmd = std::make_shared<api::RequestBucketInfoCommand>(buckets);
+ _top->sendDown(infoCmd);
+ _top->waitForMessages(3, MESSAGE_WAIT_TIME);
+ });
+ waitUntilRequestsAreProcessing(1);
+ auto fullFetch = std::async(std::launch::async, [&]() {
+ _top->sendDown(fixture.createFullFetchCommand());
+ _top->waitForMessages(3, MESSAGE_WAIT_TIME);
+ });
+ waitUntilRequestsAreProcessing(2);
+ auto splitCmd = std::make_shared<api::SplitBucketCommand>(bucketA);
+ _top->sendDown(splitCmd);
+ fixture.bounceWithReply(*splitCmd);
+
+ guard.unlock();
+ singleBucketInfo.get();
+ fullFetch.get();
+
+ fixture.assertOrderedAfterBucketReply(
+ 2, api::MessageType::SPLITBUCKET_REPLY);
+}
+
+// Hide boring, repetetive code to allow for chaining of setters (and auto-
+// generation of getters and member vars) behind a macro.
+#ifdef BUILDER_PARAM
+# error "Redefinition of existing macro `BUILDER_PARAM`"
+#endif
+#define BUILDER_PARAM(type, name) \
+ type _ ## name; \
+ auto& name(const type& name ## _) { _ ## name = name ## _; return *this; } \
+ const type & name() const { return _ ## name; }
+
+struct TestParams {
+ BUILDER_PARAM(document::BucketId, bucket);
+ BUILDER_PARAM(document::BucketId, remappedTo);
+ BUILDER_PARAM(api::StorageCommand::SP, documentMutation);
+ BUILDER_PARAM(api::StorageCommand::SP, treeMutation);
+ BUILDER_PARAM(std::vector<const api::MessageType*>, expectedOrdering);
+};
+
+void
+BucketManagerTest::doTestMutationOrdering(
+ ConcurrentOperationFixture& fixture,
+ const TestParams& params)
+{
+ fixture.setUp(WithBuckets()
+ .add(params.bucket(), api::BucketInfo(50, 100, 200)));
+ // Have to send down mutating command _before_ we take bucket lock, as the
+ // bucket manager acquires a lock for bucket on the way down in order to
+ // check the timestamp of the message vs the last modified timestamp of
+ // the bucket itself (offers some time travelling clock protection).
+ _top->sendDown(params.documentMutation());
+ auto guard = fixture.acquireBucketLockAndSendInfoRequest(params.bucket());
+
+ _top->sendDown(params.treeMutation());
+ // Unless "conflicting" mutation replies are enqueued after splits et al,
+ // they will bypass the lock and arrive in an inverse order of execution
+ // at the distributor. Note that we send replies in the opposite order their
+ // commands were sent down, but this is an artifact of ordering commands
+ // to avoid test deadlocks, and priorities may alter the execution order
+ // anyway. The important thing is that reply orders are not altered.
+ fixture.bounceWithReply(*params.treeMutation());
+ fixture.bounceWithReply(*params.documentMutation(),
+ api::ReturnCode::OK,
+ params.remappedTo());
+ guard.unlock();
+
+ fixture.assertReplyOrdering(params.expectedOrdering());
+}
+
+void
+BucketManagerTest::doTestConflictingReplyIsEnqueued(
+ const document::BucketId& bucket,
+ const api::StorageCommand::SP& treeMutationCmd,
+ const api::MessageType& treeMutationReplyType)
+{
+ ConcurrentOperationFixture fixture(*this);
+
+ // We don't check all combinations of document operation replies vs
+ // bucket operation replies, just RemoveReply vs all bucket ops.
+ auto params = TestParams()
+ .bucket(bucket)
+ .documentMutation(fixture.createRemoveCommand(bucket))
+ .treeMutation(treeMutationCmd)
+ .expectedOrdering({&api::MessageType::REQUESTBUCKETINFO_REPLY,
+ &treeMutationReplyType,
+ &api::MessageType::REMOVE_REPLY});
+
+ doTestMutationOrdering(fixture, params);
+}
+
+void
+BucketManagerTest::testMutationRepliesForSplitBucketAreEnqueued()
+{
+ document::BucketId bucket(17, 0);
+ doTestConflictingReplyIsEnqueued(
+ bucket,
+ std::make_shared<api::SplitBucketCommand>(bucket),
+ api::MessageType::SPLITBUCKET_REPLY);
+}
+
+void
+BucketManagerTest::testMutationRepliesForDeletedBucketAreEnqueued()
+{
+ document::BucketId bucket(17, 0);
+ doTestConflictingReplyIsEnqueued(
+ bucket,
+ std::make_shared<api::DeleteBucketCommand>(bucket),
+ api::MessageType::DELETEBUCKET_REPLY);
+}
+
+void
+BucketManagerTest::testMutationRepliesForJoinedBucketAreEnqueued()
+{
+ ConcurrentOperationFixture fixture(*this);
+ document::BucketId bucketA(17, 0);
+ document::BucketId bucketB(17, 1 << 16);
+ document::BucketId parent(16, 0);
+ // We only test for the parent bucket, since that's what queued operations
+ // will be remapped to after a successful join.
+ auto joinCmd = std::make_shared<api::JoinBucketsCommand>(parent);
+ joinCmd->getSourceBuckets().assign({bucketA, bucketB});
+
+ auto params = TestParams()
+ .bucket(parent)
+ .documentMutation(fixture.createRemoveCommand(parent))
+ .treeMutation(joinCmd)
+ .expectedOrdering({&api::MessageType::REQUESTBUCKETINFO_REPLY,
+ &api::MessageType::JOINBUCKETS_REPLY,
+ &api::MessageType::REMOVE_REPLY});
+
+ doTestMutationOrdering(fixture, params);
+}
+
+void
+BucketManagerTest::testConflictingPutRepliesAreEnqueued()
+{
+ ConcurrentOperationFixture fixture(*this);
+ document::BucketId bucket(17, 0);
+
+ auto params = TestParams()
+ .bucket(bucket)
+ .documentMutation(fixture.createPutCommand(bucket))
+ .treeMutation(std::make_shared<api::SplitBucketCommand>(bucket))
+ .expectedOrdering({&api::MessageType::REQUESTBUCKETINFO_REPLY,
+ &api::MessageType::SPLITBUCKET_REPLY,
+ &api::MessageType::PUT_REPLY});
+
+ doTestMutationOrdering(fixture, params);
+}
+
+void
+BucketManagerTest::testConflictingUpdateRepliesAreEnqueued()
+{
+ ConcurrentOperationFixture fixture(*this);
+ document::BucketId bucket(17, 0);
+
+ auto params = TestParams()
+ .bucket(bucket)
+ .documentMutation(fixture.createUpdateCommand(bucket))
+ .treeMutation(std::make_shared<api::SplitBucketCommand>(bucket))
+ .expectedOrdering({&api::MessageType::REQUESTBUCKETINFO_REPLY,
+ &api::MessageType::SPLITBUCKET_REPLY,
+ &api::MessageType::UPDATE_REPLY});
+
+ doTestMutationOrdering(fixture, params);
+}
+
+/**
+ * After a split or join, any messages bound for the original bucket(s) that
+ * are currently in the persistence queues will be remapped to the bucket
+ * resulting from the operation. We have to make sure remapped operations are
+ * enqueued as well.
+ */
+void
+BucketManagerTest::testRemappedMutationIsCheckedAgainstOriginalBucket()
+{
+ ConcurrentOperationFixture fixture(*this);
+ document::BucketId bucket(17, 0);
+ document::BucketId remappedToBucket(18, 0);
+
+ auto params = TestParams()
+ .bucket(bucket)
+ .documentMutation(fixture.createRemoveCommand(bucket))
+ .remappedTo(remappedToBucket)
+ .treeMutation(std::make_shared<api::SplitBucketCommand>(bucket))
+ .expectedOrdering({&api::MessageType::REQUESTBUCKETINFO_REPLY,
+ &api::MessageType::SPLITBUCKET_REPLY,
+ &api::MessageType::REMOVE_REPLY});
+
+ doTestMutationOrdering(fixture, params);
+}
+
+void
+BucketManagerTest::scheduleBucketInfoRequestWithConcurrentOps(
+ ConcurrentOperationFixture& fixture,
+ const document::BucketId& bucketForRemove,
+ const document::BucketId& bucketForSplit,
+ api::Timestamp mutationTimestamp)
+{
+ auto mutation(
+ fixture.createRemoveCommand(bucketForRemove, mutationTimestamp));
+ _top->sendDown(mutation);
+ auto guard = fixture.acquireBucketLockAndSendInfoRequest(
+ bucketForRemove);
+
+ auto conflictingOp(
+ std::make_shared<api::SplitBucketCommand>(bucketForSplit));
+ _top->sendDown(conflictingOp);
+ fixture.bounceWithReply(*conflictingOp);
+ fixture.bounceWithReply(*mutation);
+ guard.unlock();
+}
+
+void
+BucketManagerTest::testBucketConflictSetIsClearedBetweenBlockingRequests()
+{
+ ConcurrentOperationFixture fixture(*this);
+ document::BucketId firstConflictBucket(17, 0);
+ document::BucketId secondConflictBucket(18, 0);
+
+ fixture.setUp(WithBuckets()
+ .add(firstConflictBucket, api::BucketInfo(50, 100, 200))
+ .add(secondConflictBucket, api::BucketInfo(60, 200, 300)));
+
+ // Do a single round of starting and completing a request bucket info
+ // command with queueing and adding of `firstConflictBucket` to the set
+ // of conflicting buckets.
+ scheduleBucketInfoRequestWithConcurrentOps(
+ fixture, firstConflictBucket,
+ firstConflictBucket, api::Timestamp(1000));
+
+ // Barrier for completion of first round of replies. Subsequently remove
+ // all replies to get a clean slate.
+ fixture.awaitAndGetReplies(3);
+ fixture.clearReceivedReplies();
+
+ // Do a second round with a different bucket as the conflict. The
+ // mutation towards the first conflict bucket should now _not_ be queued
+ // as it was for an entirely different request bucket round.
+ scheduleBucketInfoRequestWithConcurrentOps(
+ fixture, firstConflictBucket,
+ secondConflictBucket, api::Timestamp(1001));
+
+ // Remove is not ordered after the split here since it should not be
+ // queued.
+ fixture.assertReplyOrdering({&api::MessageType::REMOVE_REPLY,
+ &api::MessageType::REQUESTBUCKETINFO_REPLY,
+ &api::MessageType::SPLITBUCKET_REPLY});
+}
+
+void
+BucketManagerTest::sendSingleBucketInfoRequest(const document::BucketId& id)
+{
+ std::vector<document::BucketId> buckets{id};
+ auto infoCmd = std::make_shared<api::RequestBucketInfoCommand>(buckets);
+ _top->sendDown(infoCmd);
+}
+
+void
+BucketManagerTest::testConflictSetOnlyClearedAfterAllBucketRequestsDone()
+{
+ ConcurrentOperationFixture fixture(*this);
+ document::BucketId bucketA(16, 0);
+ document::BucketId bucketB(16, 1);
+
+ fixture.setUp(WithBuckets()
+ .add(bucketA, api::BucketInfo(50, 100, 200))
+ .add(bucketB, api::BucketInfo(60, 200, 300)));
+
+ auto mutation = fixture.createRemoveCommand(bucketA);
+ _top->sendDown(mutation);
+
+ auto guardA = fixture.acquireBucketLock(bucketA);
+ auto guardB = fixture.acquireBucketLock(bucketB);
+
+ auto singleBucketInfoA = std::async(std::launch::async, [&]() {
+ sendSingleBucketInfoRequest(bucketA);
+ _top->waitForMessages(4, MESSAGE_WAIT_TIME);
+ });
+ waitUntilRequestsAreProcessing(1);
+ auto singleBucketInfoB = std::async(std::launch::async, [&]() {
+ sendSingleBucketInfoRequest(bucketB);
+ _top->waitForMessages(4, MESSAGE_WAIT_TIME);
+ });
+ // Barrier: after this point, both tasks are in the protected section.
+ // Neither async bucket info request can proceed as long as there are
+ // guards holding their desired bucket locks.
+ waitUntilRequestsAreProcessing(2);
+
+ auto conflictingOp = std::make_shared<api::SplitBucketCommand>(bucketA);
+ _top->sendDown(conflictingOp);
+ fixture.bounceWithReply(*conflictingOp);
+ // Releasing guard A (and allowing the request for A to go through) should
+ // _not_ clear the conflict set. I.e. if we send a mutation reply for a
+ // conflicted bucket up at this point, it should be enqueued after the
+ // split reply.
+ guardA.unlock();
+ _top->waitForMessages(1, MESSAGE_WAIT_TIME); // Completion barrier for A.
+ fixture.bounceWithReply(*mutation);
+ // Allow B to go through. This _should_ clear the conflict set and dequeue
+ // any conflicted mutations after their conflicting ops.
+ guardB.unlock();
+ singleBucketInfoA.get();
+ singleBucketInfoB.get();
+ // Note: request bucket info reply is dispatched up _before_ protected
+ // section guard goes out of scope, so reply is ordered before conflicts.
+ fixture.assertReplyOrdering({&api::MessageType::REQUESTBUCKETINFO_REPLY,
+ &api::MessageType::REQUESTBUCKETINFO_REPLY,
+ &api::MessageType::SPLITBUCKET_REPLY,
+ &api::MessageType::REMOVE_REPLY});
+}
+
+void
+BucketManagerTest::assertRequestWithBadHashIsRejected(
+ ConcurrentOperationFixture& fixture)
+{
+ // Test by default sets up 10 nodes in config. Pretend we only know of 3.
+ auto infoCmd = fixture.createFullFetchCommandWithHash("(0;0;1;2)");
+ _top->sendDown(infoCmd);
+ auto replies = fixture.awaitAndGetReplies(1);
+ auto& reply = dynamic_cast<api::RequestBucketInfoReply&>(*replies[0]);
+ CPPUNIT_ASSERT_EQUAL(api::ReturnCode::REJECTED,
+ reply.getResult().getResult());
+}
+
+void
+BucketManagerTest::testRejectRequestWithMismatchingDistributionHash()
+{
+ ConcurrentOperationFixture fixture(*this);
+ document::BucketId bucket(17, 0);
+ fixture.setUp(WithBuckets().add(bucket, api::BucketInfo(50, 100, 200)));
+ assertRequestWithBadHashIsRejected(fixture);
+}
+
+void
+BucketManagerTest::testDbNotIteratedWhenAllRequestsRejected()
+{
+ ConcurrentOperationFixture fixture(*this);
+ document::BucketId bucket(17, 0);
+ fixture.setUp(WithBuckets().add(bucket, api::BucketInfo(50, 100, 200)));
+ auto guard = fixture.acquireBucketLock(bucket);
+ // We've got a bucket locked, so iff the manager actually starts processing
+ // buckets even though it has no requests active, it will stall while
+ // waiting for the lock to be released. When we then send down an additional
+ // bucket info request, this request will either be rejected immediately (if
+ // the db is NOT processed) or time out and fail the test.
+ assertRequestWithBadHashIsRejected(fixture);
+ fixture.clearReceivedReplies();
+
+ auto infoCmd = fixture.createFullFetchCommandWithHash("(0;0;1;2)");
+ _top->sendDown(infoCmd);
+ auto replies = fixture.awaitAndGetReplies(1);
+}
+
+/**
+ * Accept bucket info requests if their distribution hash is a valid permutation
+ * of our own config (i.e. they are set-wise identical even though the
+ * ordering of nodes may differ). See VESPA-1980 for context.
+ */
+void
+BucketManagerTest::testReceivedDistributionHashIsNormalized()
+{
+ ConcurrentOperationFixture fixture(*this);
+ document::BucketId bucket(17, 0);
+ fixture.setUp(WithBuckets().add(bucket, api::BucketInfo(50, 100, 200)));
+
+ // Test is configured with 10 nodes in increasing order. Jumble the order
+ // around.
+ auto infoCmd = fixture.createFullFetchCommandWithHash(
+ "(0;2;1;3;9;6;4;5;8;7;0)");
+ _top->sendDown(infoCmd);
+ auto replies = fixture.awaitAndGetReplies(1);
+ auto& reply = dynamic_cast<api::RequestBucketInfoReply&>(*replies[0]);
+ // Should NOT have been rejected despite hash not matching config order
+ // verbatim.
+ CPPUNIT_ASSERT_EQUAL(api::ReturnCode::OK, reply.getResult().getResult());
+}
+
+} // storage
diff --git a/storage/src/tests/bucketdb/distribution_hash_normalizer_test.cpp b/storage/src/tests/bucketdb/distribution_hash_normalizer_test.cpp
new file mode 100644
index 00000000000..7734e1054ff
--- /dev/null
+++ b/storage/src/tests/bucketdb/distribution_hash_normalizer_test.cpp
@@ -0,0 +1,114 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <vespa/storage/bucketdb/distribution_hash_normalizer.h>
+#include <string>
+
+namespace storage {
+
+using Normalizer = DistributionHashNormalizer;
+
+class DistributionHashNormalizerTest : public CppUnit::TestFixture {
+public:
+ CPPUNIT_TEST_SUITE(DistributionHashNormalizerTest);
+ CPPUNIT_TEST(orderNonHierarchicRootGroupNodesByDistributionKey);
+ CPPUNIT_TEST(mayHaveSameGroupIndexAsNodeIndex);
+ CPPUNIT_TEST(emitOptionalCapacityForRootGroup);
+ CPPUNIT_TEST(emitOptionalCapacityForSubGroups);
+ CPPUNIT_TEST(hierarchicGroupsAreOrderedByGroupIndex);
+ CPPUNIT_TEST(subgroupsOrderedOnEachNestingLevel);
+ CPPUNIT_TEST(distributionSpecIsCopiedVerbatim);
+ CPPUNIT_TEST(emptyInputYieldsEmptyOutput);
+ CPPUNIT_TEST(parseFailureReturnsInputVerbatim);
+ CPPUNIT_TEST_SUITE_END();
+
+ void orderNonHierarchicRootGroupNodesByDistributionKey();
+ void mayHaveSameGroupIndexAsNodeIndex();
+ void emitOptionalCapacityForRootGroup();
+ void emitOptionalCapacityForSubGroups();
+ void hierarchicGroupsAreOrderedByGroupIndex();
+ void subgroupsOrderedOnEachNestingLevel();
+ void distributionSpecIsCopiedVerbatim();
+ void emptyInputYieldsEmptyOutput();
+ void parseFailureReturnsInputVerbatim();
+
+private:
+ DistributionHashNormalizer _normalizer;
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(DistributionHashNormalizerTest);
+
+void
+DistributionHashNormalizerTest::orderNonHierarchicRootGroupNodesByDistributionKey()
+{
+ // Group index is first in list.
+ CPPUNIT_ASSERT_EQUAL(vespalib::string("(1;0;2;3;4;7)"),
+ _normalizer.normalize("(1;4;7;2;0;3)"));
+}
+
+void
+DistributionHashNormalizerTest::mayHaveSameGroupIndexAsNodeIndex()
+{
+ CPPUNIT_ASSERT_EQUAL(vespalib::string("(0;0;2;3;4;7)"),
+ _normalizer.normalize("(0;4;7;2;0;3)"));
+}
+
+void
+DistributionHashNormalizerTest::emitOptionalCapacityForRootGroup()
+{
+ CPPUNIT_ASSERT_EQUAL(vespalib::string("(0c12.5;1;2;3;4;7)"),
+ _normalizer.normalize("(0c12.5;1;4;7;2;3)"));
+}
+
+void
+DistributionHashNormalizerTest::emitOptionalCapacityForSubGroups()
+{
+ CPPUNIT_ASSERT_EQUAL(vespalib::string("(0d1|*(1c5.5;1)(2;2)(3c7;3))"),
+ _normalizer.normalize("(0d1|*(2;2)(1c5.5;1)(3c7;3))"));
+}
+
+void
+DistributionHashNormalizerTest::hierarchicGroupsAreOrderedByGroupIndex()
+{
+ CPPUNIT_ASSERT_EQUAL(vespalib::string("(0d1|*(0;0)(1;1)(3;3))"),
+ _normalizer.normalize("(0d1|*(3;3)(1;1)(0;0))"));
+}
+
+void
+DistributionHashNormalizerTest::subgroupsOrderedOnEachNestingLevel()
+{
+ CPPUNIT_ASSERT_EQUAL(vespalib::string("(0d1|*(1d3|*(2;2)(3;3))"
+ "(4;1)(7d2|*(5;5)(6;6)))"),
+ _normalizer.normalize("(0d1|*(7d2|*(6;6)(5;5))"
+ "(1d3|*(2;2)(3;3))(4;1))"));
+}
+
+void
+DistributionHashNormalizerTest::distributionSpecIsCopiedVerbatim()
+{
+ // Definitely don't want to do any ordering of the distribution spec.
+ CPPUNIT_ASSERT_EQUAL(vespalib::string("(0d3|2|1|*(0;0)(1;1)(3;3))"),
+ _normalizer.normalize("(0d3|2|1|*(3;3)(1;1)(0;0))"));
+}
+
+void
+DistributionHashNormalizerTest::emptyInputYieldsEmptyOutput()
+{
+ // Technically a parse failure (only 4.2 has this behavior), but it's
+ // explicitly checked for in BucketManager, so let's test it explicitly
+ // here as well.
+ CPPUNIT_ASSERT_EQUAL(vespalib::string(""), _normalizer.normalize(""));
+}
+
+// In the (unlikely) case that the parser somehow fails to capture all possible
+// valid values of the distribution hash, fall back to returning the non-
+// normalized string. A log warning will also be emitted (though that's not
+// testable).
+void
+DistributionHashNormalizerTest::parseFailureReturnsInputVerbatim()
+{
+ CPPUNIT_ASSERT_EQUAL(vespalib::string("onkel skrue"),
+ _normalizer.normalize("onkel skrue"));
+}
+
+} // storage
+
diff --git a/storage/src/tests/bucketdb/initializertest.cpp b/storage/src/tests/bucketdb/initializertest.cpp
new file mode 100644
index 00000000000..169150a7ff9
--- /dev/null
+++ b/storage/src/tests/bucketdb/initializertest.cpp
@@ -0,0 +1,924 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * Tests storage initialization without depending on persistence layer.
+ */
+#include <vespa/fastos/fastos.h>
+#include <vespa/storage/bucketdb/storagebucketdbinitializer.h>
+
+#include <vespa/document/base/testdocman.h>
+#include <vespa/log/log.h>
+#include <vespa/storage/persistence/filestorage/filestormanager.h>
+#include <vespa/storageapi/message/bucket.h>
+#include <vespa/storageapi/message/persistence.h>
+#include <vespa/storageapi/message/state.h>
+#include <tests/common/teststorageapp.h>
+#include <tests/common/dummystoragelink.h>
+#include <tests/common/testhelper.h>
+#include <vespa/vdstestlib/cppunit/dirconfig.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+
+LOG_SETUP(".test.bucketdb.initializing");
+
+namespace storage {
+
+typedef uint16_t PartitionId;
+
+struct InitializerTest : public CppUnit::TestFixture {
+
+ class InitParams {
+ vdstestlib::DirConfig config;
+ bool configFinalized;
+
+ public:
+ uint32_t bucketBitsUsed;
+ NodeIndex nodeIndex;
+ NodeCount nodeCount;
+ Redundancy redundancy;
+ uint32_t docsPerDisk;
+ DiskCount diskCount;
+ std::set<uint32_t> disksDown;
+ bool bucketWrongDisk;
+ bool bucketMultipleDisks;
+ bool failingListRequest;
+ bool failingInfoRequest;
+
+ InitParams()
+ : config(getStandardConfig(true)),
+ configFinalized(false),
+ bucketBitsUsed(4),
+ nodeIndex(0),
+ nodeCount(10),
+ redundancy(2),
+ docsPerDisk(10),
+ diskCount(5),
+ bucketWrongDisk(false),
+ bucketMultipleDisks(false),
+ failingListRequest(false),
+ failingInfoRequest(false) {}
+
+ void setAllFailures() {
+ bucketWrongDisk = true;
+ bucketMultipleDisks = true;
+ failingListRequest = true;
+ failingInfoRequest = true;
+ }
+
+ vdstestlib::DirConfig& getConfig() {
+ if (!configFinalized) {
+ config.getConfig("stor-server")
+ .setValue("node_index", nodeIndex);
+ config.getConfig("stor-distribution")
+ .setValue("redundancy", redundancy);
+ configFinalized = true;
+ }
+ return config;
+ }
+
+ };
+
+ document::TestDocMan _docMan;
+
+ void testInitialization(InitParams& params);
+
+ /**
+ * Test that the status page can be shown during init without a deadlock
+ * or crash or anything. Don't validate much output, it might change.
+ */
+ void testStatusPage();
+
+ /** Test initializing with an empty node. */
+ void testInitEmptyNode() {
+ InitParams params;
+ params.docsPerDisk = 0;
+ testInitialization(params);
+ }
+ /** Test initializing with some data on single disk. */
+ void testInitSingleDisk() {
+ InitParams params;
+ params.diskCount = DiskCount(1);
+ testInitialization(params);
+ }
+ /** Test initializing with multiple disks. */
+ void testInitMultiDisk() {
+ InitParams params;
+ testInitialization(params);
+ }
+ /** Test initializing with one of the disks being bad. */
+ void testInitFailingMiddleDisk() {
+ InitParams params;
+ params.disksDown.insert(1);
+ testInitialization(params);
+ }
+ /** Test initializing with last disk being bad. */
+ void testInitFailingLastDisk() {
+ InitParams params;
+ params.disksDown.insert(params.diskCount - 1);
+ testInitialization(params);
+ }
+ /** Test initializing with bucket on wrong disk. */
+ void testInitBucketOnWrongDisk() {
+ InitParams params;
+ params.bucketWrongDisk = true;
+ params.bucketBitsUsed = 58;
+ testInitialization(params);
+ }
+ /** Test initializing with bucket on multiple disks. */
+ void testInitBucketOnMultipleDisks() {
+ InitParams params;
+ params.bucketMultipleDisks = true;
+ params.bucketBitsUsed = 58;
+ testInitialization(params);
+ }
+ /** Test initializing with failing list request. */
+ void testInitFailingListRequest() {
+ InitParams params;
+ params.failingListRequest = true;
+ testInitialization(params);
+ }
+ void testInitFailingInfoRequest() {
+ InitParams params;
+ params.failingInfoRequest = true;
+ testInitialization(params);
+ }
+ /** Test initializing with everything being wrong at once. */
+ void testAllFailures() {
+ InitParams params;
+ params.docsPerDisk = 100;
+ params.diskCount = DiskCount(10);
+ params.disksDown.insert(0);
+ params.disksDown.insert(2);
+ params.disksDown.insert(3);
+ params.disksDown.insert(9);
+ params.setAllFailures();
+ testInitialization(params);
+ }
+ void testCommandBlockingDuringInit();
+
+ void testBucketProgressCalculator();
+
+ void testBucketsInitializedByLoad();
+
+ CPPUNIT_TEST_SUITE(InitializerTest);
+ CPPUNIT_TEST(testInitEmptyNode);
+ CPPUNIT_TEST(testInitSingleDisk);
+ CPPUNIT_TEST(testInitMultiDisk);
+ CPPUNIT_TEST(testInitFailingMiddleDisk);
+ CPPUNIT_TEST(testInitFailingLastDisk);
+ CPPUNIT_TEST(testInitBucketOnWrongDisk);
+ //CPPUNIT_TEST(testInitBucketOnMultipleDisks);
+ //CPPUNIT_TEST(testStatusPage);
+ //CPPUNIT_TEST(testCommandBlockingDuringInit);
+ //CPPUNIT_TEST(testAllFailures);
+ CPPUNIT_TEST(testBucketProgressCalculator);
+ CPPUNIT_TEST(testBucketsInitializedByLoad);
+ CPPUNIT_TEST_SUITE_END();
+
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(InitializerTest);
+
+namespace {
+// Data kept on buckets we're using in test.
+struct BucketData {
+ api::BucketInfo info;
+
+ BucketData() : info(0, 0, 0, 0, 0) {
+ }
+
+ BucketData operator+(const BucketData& other) const {
+ BucketData copy;
+ copy.info.setDocumentCount(
+ info.getDocumentCount() + other.info.getDocumentCount());
+ copy.info.setTotalDocumentSize(
+ info.getTotalDocumentSize()
+ + other.info.getTotalDocumentSize());
+ copy.info.setChecksum(
+ info.getChecksum() * other.info.getChecksum());
+ return copy;
+ }
+};
+// Data reciding on one disk
+typedef std::map<document::BucketId, BucketData> DiskData;
+struct BucketInfoLogger {
+ std::map<PartitionId, DiskData>& map;
+
+ BucketInfoLogger(std::map<PartitionId, DiskData>& m)
+ : map(m) {}
+
+ StorBucketDatabase::Decision operator()(
+ uint64_t revBucket, StorBucketDatabase::Entry& entry)
+ {
+ document::BucketId bucket(
+ document::BucketId::keyToBucketId(revBucket));
+ CPPUNIT_ASSERT(bucket.getRawId() != 0);
+ CPPUNIT_ASSERT_MSG(
+ "Found invalid bucket in database: " + bucket.toString()
+ + " " + entry.getBucketInfo().toString(),
+ entry.getBucketInfo().valid());
+ DiskData& ddata(map[entry.disk]);
+ BucketData& bdata(ddata[bucket]);
+ bdata.info = entry.getBucketInfo();
+ return StorBucketDatabase::CONTINUE;
+ }
+};
+std::map<PartitionId, DiskData>
+createMapFromBucketDatabase(StorBucketDatabase& db) {
+ std::map<PartitionId, DiskData> result;
+ BucketInfoLogger infoLogger(result);
+ db.all(infoLogger, "createmap");
+ return result;
+}
+// Create data we want to have in this test
+std::map<PartitionId, DiskData>
+buildBucketInfo(const document::TestDocMan& docMan,
+ InitializerTest::InitParams& params)
+{
+ std::map<PartitionId, DiskData> result;
+ for (uint32_t i=0; i<params.diskCount; ++i) {
+ if (params.disksDown.find(i) == params.disksDown.end()) {
+ result[i];
+ }
+ }
+ lib::Distribution distribution(
+ lib::Distribution::getDefaultDistributionConfig(
+ params.redundancy, params.nodeCount));
+ document::BucketIdFactory bucketIdFactory;
+ lib::NodeState nodeState;
+ nodeState.setDiskCount(params.diskCount);
+
+ uint64_t totalDocs = params.docsPerDisk * params.diskCount;
+ for (uint32_t i=0, n=totalDocs; i<n; ++i) {
+ bool useWrongDisk = false;
+ if (i == 1 && params.bucketWrongDisk) {
+ useWrongDisk = true;
+ }
+ document::Document::SP doc(docMan.createRandomDocument(i));
+ if (i == 3 && params.bucketMultipleDisks) {
+ doc = docMan.createRandomDocument(i - 1);
+ useWrongDisk = true;
+ }
+ document::BucketId bid(bucketIdFactory.getBucketId(doc->getId()));
+ bid.setUsedBits(params.bucketBitsUsed);
+ bid = bid.stripUnused();
+ uint32_t partition(distribution.getIdealDisk(
+ nodeState, params.nodeIndex, bid,
+ lib::Distribution::IDEAL_DISK_EVEN_IF_DOWN));
+ if (params.disksDown.find(partition) != params.disksDown.end()) {
+ continue;
+ }
+ if (useWrongDisk) {
+ int correctPart = partition;
+ partition = (partition + 1) % params.diskCount;;
+ while (params.disksDown.find(partition) != params.disksDown.end()) {
+ partition = (partition + 1) % params.diskCount;;
+ }
+ LOG(info, "Putting bucket %s on wrong disk %u instead of %u",
+ bid.toString().c_str(), partition, correctPart);
+ }
+ LOG(info, "Putting bucket %s on disk %u",
+ bid.toString().c_str(), partition);
+ BucketData& data(result[partition][bid]);
+ data.info.setDocumentCount(data.info.getDocumentCount() + 1);
+ data.info.setTotalDocumentSize(
+ data.info.getTotalDocumentSize() + 100);
+ data.info.setChecksum(data.info.getChecksum() * 3);
+ }
+ return result;
+}
+void verifyEqual(std::map<PartitionId, DiskData>& org,
+ std::map<PartitionId, DiskData>& existing)
+{
+ uint32_t equalCount = 0;
+ std::map<PartitionId, DiskData>::const_iterator part1(org.begin());
+ std::map<PartitionId, DiskData>::const_iterator part2(existing.begin());
+ while (part1 != org.end() && part2 != existing.end()) {
+ if (part1->first < part2->first) {
+ if (!part1->second.empty()) {
+ std::ostringstream ost;
+ ost << "No data in partition " << part1->first << " found.";
+ CPPUNIT_FAIL(ost.str());
+ }
+ ++part1;
+ } else if (part1->first > part2->first) {
+ if (!part2->second.empty()) {
+ std::ostringstream ost;
+ ost << "Found data in partition " << part2->first
+ << " which should not exist.";
+ CPPUNIT_FAIL(ost.str());
+ }
+ ++part2;
+ } else {
+ DiskData::const_iterator bucket1(part1->second.begin());
+ DiskData::const_iterator bucket2(part2->second.begin());
+ while (bucket1 != part1->second.end()
+ && bucket2 != part2->second.end())
+ {
+ if (bucket1->first < bucket2->first) {
+ std::ostringstream ost;
+ ost << "No data in partition " << part1->first
+ << " for bucket " << bucket1->first << " found.";
+ CPPUNIT_FAIL(ost.str());
+ } else if (bucket1->first.getId() > bucket2->first.getId())
+ {
+ std::ostringstream ost;
+ ost << "Found data in partition " << part2->first
+ << " for bucket " << bucket2->first
+ << " which should not exist.";
+ CPPUNIT_FAIL(ost.str());
+ } else if (!(bucket1->second.info == bucket2->second.info)) {
+ std::ostringstream ost;
+ ost << "Bucket " << bucket1->first << " on partition "
+ << part1->first << " has bucket info "
+ << bucket2->second.info << " and not "
+ << bucket1->second.info << " as expected.";
+ CPPUNIT_FAIL(ost.str());
+ }
+ ++bucket1;
+ ++bucket2;
+ ++equalCount;
+ }
+ if (bucket1 != part1->second.end()) {
+ std::ostringstream ost;
+ ost << "No data in partition " << part1->first
+ << " for bucket " << bucket1->first << " found.";
+ CPPUNIT_FAIL(ost.str());
+ }
+ if (bucket2 != part2->second.end()) {
+ std::ostringstream ost;
+ ost << "Found data in partition " << part2->first
+ << " for bucket " << bucket2->first
+ << " which should not exist.";
+ CPPUNIT_FAIL(ost.str());
+ }
+ ++part1;
+ ++part2;
+ }
+ }
+ if (part1 != org.end() && !part1->second.empty()) {
+ std::ostringstream ost;
+ ost << "No data in partition " << part1->first << " found.";
+ CPPUNIT_FAIL(ost.str());
+ }
+ if (part2 != existing.end() && !part2->second.empty()) {
+ std::ostringstream ost;
+ ost << "Found data in partition " << part2->first
+ << " which should not exist.";
+ CPPUNIT_FAIL(ost.str());
+ }
+ //std::cerr << "\n " << equalCount << " buckets were matched. ";
+}
+
+struct MessageCallback
+{
+public:
+ virtual ~MessageCallback() {}
+ virtual void onMessage(const api::StorageMessage&) = 0;
+};
+
+struct FakePersistenceLayer : public StorageLink {
+ StorBucketDatabase& bucketDatabase;
+ std::map<PartitionId, DiskData>& data;
+ std::string firstFatal;
+ std::string fatalError;
+ MessageCallback* messageCallback;
+
+ FakePersistenceLayer(std::map<PartitionId, DiskData>& d,
+ StorBucketDatabase& db)
+ : StorageLink("fakepersistencelayer"),
+ bucketDatabase(db),
+ data(d),
+ messageCallback(0)
+ {
+ }
+
+ void fatal(vespalib::stringref error) {
+ fatalError = error;
+ if (firstFatal.empty()) firstFatal = fatalError;
+ }
+ const BucketData* getBucketData(PartitionId partition,
+ const document::BucketId& bucket,
+ vespalib::stringref opname)
+ {
+ std::map<PartitionId, DiskData>::const_iterator it(
+ data.find(partition));
+ if (it == data.end()) {
+ std::ostringstream ost;
+ ost << bucket << " is stated to be on partition " << partition
+ << " in operation " << opname << ", but we have no data for "
+ << "it there.";
+ fatal(ost.str());
+ } else {
+ DiskData::const_iterator it2(it->second.find(bucket));
+ if (it2 == it->second.end()) {
+ std::ostringstream ost;
+ ost << "Have no data for " << bucket << " on disk " << partition
+ << " in operation " << opname;
+ fatal(ost.str());
+ } else {
+ const BucketData& bucketData(it2->second);
+ return &bucketData;
+ }
+ }
+ return 0;
+ }
+ virtual bool onDown(const api::StorageMessage::SP& msg) {
+ fatalError = "";
+ if (messageCallback) {
+ messageCallback->onMessage(*msg);
+ }
+ if (msg->getType() == api::MessageType::INTERNAL) {
+ api::InternalCommand& cmd(
+ dynamic_cast<api::InternalCommand&>(*msg));
+ if (cmd.getType() == ReadBucketList::ID) {
+ ReadBucketList& rbl(dynamic_cast<ReadBucketList&>(cmd));
+ ReadBucketListReply::SP reply(new ReadBucketListReply(rbl));
+ std::map<PartitionId, DiskData>::const_iterator it(
+ data.find(rbl.getPartition()));
+ if (it == data.end()) {
+ std::ostringstream ost;
+ ost << "Got list request to partition "
+ << rbl.getPartition()
+ << " for which we should not get a request";
+ fatal(ost.str());
+ } else {
+ for (DiskData::const_iterator it2 = it->second.begin();
+ it2 != it->second.end(); ++it2)
+ {
+ reply->getBuckets().push_back(it2->first);
+ }
+ }
+ if (!fatalError.empty()) {
+ reply->setResult(api::ReturnCode(
+ api::ReturnCode::INTERNAL_FAILURE, fatalError));
+ }
+ sendUp(reply);
+ } else if (cmd.getType() == ReadBucketInfo::ID) {
+ ReadBucketInfo& rbi(dynamic_cast<ReadBucketInfo&>(cmd));
+ ReadBucketInfoReply::SP reply(new ReadBucketInfoReply(rbi));
+ StorBucketDatabase::WrappedEntry entry(
+ bucketDatabase.get(rbi.getBucketId(), "fakelayer"));
+ if (!entry.exist()) {
+ fatal("Bucket " + rbi.getBucketId().toString()
+ + " did not exist in bucket database but we got "
+ + "read bucket info request for it.");
+ } else {
+ const BucketData* bucketData(getBucketData(
+ entry->disk, rbi.getBucketId(), "readbucketinfo"));
+ if (bucketData != 0) {
+ entry->setBucketInfo(bucketData->info);
+ entry.write();
+ }
+ }
+ if (!fatalError.empty()) {
+ reply->setResult(api::ReturnCode(
+ api::ReturnCode::INTERNAL_FAILURE, fatalError));
+ }
+ sendUp(reply);
+ } else if (cmd.getType() == InternalBucketJoinCommand::ID) {
+ InternalBucketJoinCommand& ibj(
+ dynamic_cast<InternalBucketJoinCommand&>(cmd));
+ InternalBucketJoinReply::SP reply(
+ new InternalBucketJoinReply(ibj));
+ StorBucketDatabase::WrappedEntry entry(
+ bucketDatabase.get(ibj.getBucketId(), "fakelayer"));
+ if (!entry.exist()) {
+ fatal("Bucket " + ibj.getBucketId().toString()
+ + " did not exist in bucket database but we got "
+ + "read bucket info request for it.");
+ } else {
+ const BucketData* source(getBucketData(
+ ibj.getDiskOfInstanceToJoin(), ibj.getBucketId(),
+ "internaljoinsource"));
+ const BucketData* target(getBucketData(
+ ibj.getDiskOfInstanceToKeep(), ibj.getBucketId(),
+ "internaljointarget"));
+ if (source != 0 && target != 0) {
+ entry->setBucketInfo((*source + *target).info);
+ entry.write();
+ }
+ }
+ if (!fatalError.empty()) {
+ reply->setResult(api::ReturnCode(
+ api::ReturnCode::INTERNAL_FAILURE, fatalError));
+ }
+ sendUp(reply);
+ } else {
+ return false;
+ }
+ return true;
+ }
+ return false;
+ }
+};
+
+} // end of anonymous namespace
+
+#define CPPUNIT_ASSERT_METRIC_SET(x) \
+ CPPUNIT_ASSERT(initializer->getMetrics().x.getValue() > 0);
+
+void
+InitializerTest::testInitialization(InitParams& params)
+{
+ std::map<PartitionId, DiskData> data(buildBucketInfo(_docMan, params));
+
+ spi::PartitionStateList partitions(params.diskCount);
+ for (std::set<uint32_t>::const_iterator it = params.disksDown.begin();
+ it != params.disksDown.end(); ++it)
+ {
+ partitions[*it] = spi::PartitionState(
+ spi::PartitionState::DOWN, "Set down in test");
+ }
+ TestServiceLayerApp node(params.diskCount, params.nodeIndex,
+ params.getConfig().getConfigId());
+ DummyStorageLink top;
+ StorageBucketDBInitializer* initializer;
+ FakePersistenceLayer* bottom;
+ top.push_back(StorageLink::UP(initializer = new StorageBucketDBInitializer(
+ params.getConfig().getConfigId(),
+ partitions,
+ node.getDoneInitializeHandler(),
+ node.getComponentRegister())));
+ top.push_back(StorageLink::UP(bottom = new FakePersistenceLayer(
+ data, node.getStorageBucketDatabase())));
+
+ LOG(info, "STARTING INITIALIZATION");
+ top.open();
+
+ /*
+ FileChanger updater(config, nodeIndex, params, orgBucketDatabase);
+ if (params.bucketWrongDisk) updater.moveBucketWrongDisk();
+ if (params.bucketMultipleDisks) updater.copyBucketWrongDisk();
+ if (params.failingListRequest) {
+ updater.removeDirPermission(6, 'r');
+ updater.removeBucketsFromDBAtPath(6);
+ }
+ if (params.failingInfoRequest) {
+ updater.removeFilePermission();
+ orgBucketDatabase.erase(updater.getBucket(8));
+ }
+ */
+
+ node.waitUntilInitialized(initializer);
+
+ std::map<PartitionId, DiskData> initedBucketDatabase(
+ createMapFromBucketDatabase(node.getStorageBucketDatabase()));
+ verifyEqual(data, initedBucketDatabase);
+ /*
+ if (params.bucketWrongDisk) {
+ CPPUNIT_ASSERT_METRIC_SET(_wrongDisk);
+ }
+ if (params.bucketMultipleDisks) {
+ CPPUNIT_ASSERT_METRIC_SET(_joinedCount);
+ }
+ */
+}
+
+/*
+namespace {
+ enum State { LISTING, INFO, DONE };
+ void verifyStatusContent(StorageBucketDBInitializer& initializer,
+ State state)
+ {
+ std::ostringstream ost;
+ initializer.reportStatus(ost, framework::HttpUrlPath(""));
+ std::string status = ost.str();
+
+ if (state == LISTING) {
+ CPPUNIT_ASSERT_CONTAIN("List phase completed: false", status);
+ CPPUNIT_ASSERT_CONTAIN("Initialization completed: false", status);
+ } else if (state == INFO) {
+ CPPUNIT_ASSERT_CONTAIN("List phase completed: true", status);
+ CPPUNIT_ASSERT_CONTAIN("Initialization completed: false", status);
+ } else if (state == DONE) {
+ CPPUNIT_ASSERT_CONTAIN("List phase completed: true", status);
+ CPPUNIT_ASSERT_CONTAIN("Initialization completed: true", status);
+ }
+ }
+}
+
+void
+InitializerTest::testStatusPage()
+{
+ // Set up surrounding system to create a single bucket for us to
+ // do init on.
+ vdstestlib::DirConfig config(getStandardConfig(true));
+ uint16_t nodeIndex(
+ config.getConfig("stor-server").getValue("node_index", 0));
+ InitParams params;
+ params.docsPerDisk = 1;
+ params.diskCount = 1;
+ std::map<document::BucketId, api::BucketInfo> orgBucketDatabase(
+ buildBucketInfo(_docMan, config, nodeIndex, 1, 1, params.disksDown));
+ FileChanger updater(config, nodeIndex, params, orgBucketDatabase);
+
+ // Set up the initializer.
+ DummyStorageServer server(config.getConfigId());
+ DummyStorageLink top;
+ DummyStorageLink *bottom;
+ StorageBucketDBInitializer* initializer;
+ top.push_back(StorageLink::UP(initializer = new StorageBucketDBInitializer(
+ config.getConfigId(), server)));
+ top.push_back(StorageLink::UP(bottom = new DummyStorageLink));
+
+ // Grab bucket database lock for bucket to init to lock the initializer
+ // in the init stage
+ StorBucketDatabase::WrappedEntry entry(
+ server.getStorageBucketDatabase().get(
+ updater.getBucket(0), "testCommandBlocking",
+ StorBucketDatabase::LOCK_IF_NONEXISTING_AND_NOT_CREATING));
+ // Start the initializer
+ top.open();
+ bottom->waitForMessages(1, 30);
+ verifyStatusContent(*initializer, LISTING);
+ // Attempt to send put. Should be blocked
+ // Attempt to send request bucket info. Should be blocked.
+ // Attempt to send getNodeState. Should not be blocked.
+
+ // Unlock bucket in bucket database so listing step can complete.
+ // Await read info request being sent down.
+ entry.unlock();
+ bottom->waitForMessages(1, 30);
+ verifyStatusContent(*initializer, INFO);
+
+ ReadBucketInfo& cmd(dynamic_cast<ReadBucketInfo&>(*bottom->getCommand(0)));
+ ReadBucketInfoReply::SP reply(new ReadBucketInfoReply(cmd));
+ bottom->sendUp(reply);
+
+ node.waitUntilInitialized(initializer);
+ verifyStatusContent(*initializer, DONE);
+
+}
+
+#define ASSERT_BLOCKED(top, bottom, blocks) \
+ if (blocks) { \
+ top.waitForMessages(1, 30); \
+ CPPUNIT_ASSERT_EQUAL(size_t(1), top.getReplies().size()); \
+ CPPUNIT_ASSERT_EQUAL(size_t(0), bottom.getCommands().size()); \
+ api::StorageReply& reply(dynamic_cast<api::StorageReply&>( \
+ *top.getReply(0))); \
+ CPPUNIT_ASSERT_EQUAL(api::ReturnCode::ABORTED, \
+ reply.getResult().getResult()); \
+ top.reset(); \
+ } else { \
+ bottom.waitForMessages(1, 30); \
+ CPPUNIT_ASSERT_EQUAL(size_t(0), top.getReplies().size()); \
+ CPPUNIT_ASSERT_EQUAL(size_t(1), bottom.getCommands().size()); \
+ api::StorageCommand& command(dynamic_cast<api::StorageCommand&>( \
+ *bottom.getCommand(0))); \
+ (void) command; \
+ bottom.reset(); \
+ }
+
+namespace {
+ void verifyBlockingOn(DummyStorageLink& top,
+ DummyStorageLink& bottom,
+ bool blockEnabled)
+ {
+ // Attempt to send get. Should be blocked if block enabled
+ {
+ api::GetCommand::SP cmd(new api::GetCommand(
+ document::BucketId(16, 4),
+ document::DocumentId("userdoc:ns:4:test"), true));
+ top.sendDown(cmd);
+ ASSERT_BLOCKED(top, bottom, blockEnabled);
+ }
+ // Attempt to send request bucket info. Should be blocked if enabled.
+ {
+ api::RequestBucketInfoCommand::SP cmd(
+ new api::RequestBucketInfoCommand(
+ 0, lib::ClusterState("")));
+ top.sendDown(cmd);
+ ASSERT_BLOCKED(top, bottom, blockEnabled);
+ }
+ // Attempt to send getNodeState. Should not be blocked.
+ {
+ api::GetNodeStateCommand::SP cmd(new api::GetNodeStateCommand(
+ lib::NodeState::UP(0)));
+ top.sendDown(cmd);
+ ASSERT_BLOCKED(top, bottom, false);
+ }
+ }
+}
+
+void
+InitializerTest::testCommandBlockingDuringInit()
+{
+ // Set up surrounding system to create a single bucket for us to
+ // do init on.
+ vdstestlib::DirConfig config(getStandardConfig(true));
+ uint16_t nodeIndex(
+ config.getConfig("stor-server").getValue("node_index", 0));
+ InitParams params;
+ params.docsPerDisk = 1;
+ params.diskCount = 1;
+ std::map<document::BucketId, api::BucketInfo> orgBucketDatabase(
+ buildBucketInfo(_docMan, config, nodeIndex, 1, 1, params.disksDown));
+ FileChanger updater(config, nodeIndex, params, orgBucketDatabase);
+
+ // Set up the initializer.
+ DummyStorageServer server(config.getConfigId());
+ DummyStorageLink top;
+ DummyStorageLink *bottom;
+ StorageBucketDBInitializer* initializer;
+ top.push_back(StorageLink::UP(initializer = new StorageBucketDBInitializer(
+ config.getConfigId(), server)));
+ top.push_back(StorageLink::UP(bottom = new DummyStorageLink));
+
+ // Grab bucket database lock for bucket to init to lock the initializer
+ // in the init stage
+ StorBucketDatabase::WrappedEntry entry(
+ server.getStorageBucketDatabase().get(
+ updater.getBucket(0), "testCommandBlocking",
+ StorBucketDatabase::LOCK_IF_NONEXISTING_AND_NOT_CREATING));
+ // Start the initializer
+ top.open();
+ verifyBlockingOn(top, *bottom, true);
+ // Attempt to send put. Should be blocked
+ // Attempt to send request bucket info. Should be blocked.
+ // Attempt to send getNodeState. Should not be blocked.
+
+ // Unlock bucket in bucket database so listing step can complete.
+ // Await read info request being sent down.
+ entry.unlock();
+ bottom->waitForMessages(1, 30);
+ dynamic_cast<ReadBucketInfo&>(*bottom->getCommand(0));
+ CPPUNIT_ASSERT(!server.isInitialized());
+ bottom->reset();
+
+ // Retry - Should now not block
+ verifyBlockingOn(top, *bottom, false);
+}
+*/
+
+void
+InitializerTest::testBucketProgressCalculator()
+{
+ using document::BucketId;
+ StorageBucketDBInitializer::BucketProgressCalculator calc;
+ // We consider the given bucket as not being completed, so progress
+ // will be _up to_, not _including_ the bucket. This means we can never
+ // reach 1.0, so progress completion must be handled by other logic!
+ CPPUNIT_ASSERT_EQUAL(0.0, calc.calculateProgress(BucketId(1, 0)));
+ CPPUNIT_ASSERT_EQUAL(0.0, calc.calculateProgress(BucketId(32, 0)));
+
+ CPPUNIT_ASSERT_EQUAL(0.5, calc.calculateProgress(BucketId(1, 1)));
+
+ CPPUNIT_ASSERT_EQUAL(0.25, calc.calculateProgress(BucketId(2, 2)));
+ CPPUNIT_ASSERT_EQUAL(0.5, calc.calculateProgress(BucketId(2, 1)));
+ CPPUNIT_ASSERT_EQUAL(0.75, calc.calculateProgress(BucketId(2, 3)));
+
+ CPPUNIT_ASSERT_EQUAL(0.875, calc.calculateProgress(BucketId(3, 7)));
+}
+
+struct DatabaseInsertCallback : MessageCallback
+{
+ DiskData& _data;
+ StorBucketDatabase& _database;
+ TestServiceLayerApp& _app;
+ const InitializerTest::InitParams& _params;
+ bool _invoked;
+ double _lastSeenProgress;
+ uint8_t _expectedReadBucketPriority;
+ std::ostringstream _errors;
+ DatabaseInsertCallback(DiskData& data,
+ StorBucketDatabase& db,
+ TestServiceLayerApp& app,
+ const InitializerTest::InitParams& params)
+ : _data(data),
+ _database(db),
+ _app(app),
+ _params(params),
+ _invoked(false),
+ _lastSeenProgress(0),
+ _expectedReadBucketPriority(255)
+ {}
+
+ void onMessage(const api::StorageMessage& msg)
+ {
+ // Always make sure we're not set as initialized while we're still
+ // processing messages! Also ensure progress never goes down.
+ lib::NodeState::CSP reportedState(
+ _app.getStateUpdater().getReportedNodeState());
+ double progress(reportedState->getInitProgress().getValue());
+ LOG(debug, "reported progress is now %g", progress);
+ // CppUnit exceptions are swallowed...
+ if (progress >= 1.0) {
+ _errors << "progress exceeded 1.0: " << progress << "\n";
+ }
+ if (progress < _lastSeenProgress) {
+ _errors << "progress went down! "
+ << _lastSeenProgress << " -> " << progress
+ << "\n";
+ }
+ // 16 bits is allowed before we have listed any buckets at all
+ // since we at that point have no idea and have not reported anything
+ // back to the fleetcontroller.
+ if (_params.bucketBitsUsed != reportedState->getMinUsedBits()
+ && !(reportedState->getMinUsedBits() == 16 && !_invoked))
+ {
+ _errors << "reported state contains wrong min used bits. "
+ << "expected " << _params.bucketBitsUsed
+ << ", but got " << reportedState->getMinUsedBits()
+ << "\n";
+ }
+ _lastSeenProgress = progress;
+ if (_invoked) {
+ return;
+ }
+
+ if (msg.getType() == api::MessageType::INTERNAL) {
+ const api::InternalCommand& cmd(
+ dynamic_cast<const api::InternalCommand&>(msg));
+ if (cmd.getType() == ReadBucketInfo::ID) {
+ if (cmd.getPriority() != _expectedReadBucketPriority) {
+ _errors << "expected ReadBucketInfo priority of "
+ << static_cast<int>(_expectedReadBucketPriority)
+ << ", was " << static_cast<int>(cmd.getPriority());
+ }
+ // As soon as we get the first ReadBucketInfo, we insert new buckets
+ // into the the bucket database in order to simulate external
+ // load init. Kinda hacky, but should work as long as initializer
+ // always does at least 1 extra iteration pass (which we use
+ // config overrides to ensure happens).
+ _invoked = true;
+ for (int i = 0; i < 4; ++i) {
+ document::BucketId bid(16 + i, 8); // not the first, nor the last bucket
+ BucketData d;
+ StorBucketDatabase::WrappedEntry entry(
+ _database.get(bid, "DatabaseInsertCallback::onMessage",
+ StorBucketDatabase::LOCK_IF_NONEXISTING_AND_NOT_CREATING));
+ if (entry.exist()) {
+ _errors << "db entry for " << bid << " already existed";
+ }
+ if (i < 5) {
+ d.info = api::BucketInfo(3+i, 4+i, 5+i, 6+i, 7+i);
+ }
+ _data[bid] = d;
+ entry->disk = 0;
+ entry->setBucketInfo(d.info);
+ entry.write();
+ }
+ }
+ }
+ }
+};
+
+void
+InitializerTest::testBucketsInitializedByLoad()
+{
+ InitParams params;
+ params.docsPerDisk = 100;
+ params.diskCount = DiskCount(1);
+ params.getConfig().getConfig("stor-bucket-init").setValue("max_pending_info_reads_per_disk", 1);
+ params.getConfig().getConfig("stor-bucket-init").setValue("min_pending_info_reads_per_disk", 1);
+ params.getConfig().getConfig("stor-bucket-init")
+ .setValue("info_read_priority", 231);
+
+ std::map<PartitionId, DiskData> data(buildBucketInfo(_docMan, params));
+
+ spi::PartitionStateList partitions(params.diskCount);
+ TestServiceLayerApp node(params.diskCount, params.nodeIndex,
+ params.getConfig().getConfigId());
+ DummyStorageLink top;
+ StorageBucketDBInitializer* initializer;
+ FakePersistenceLayer* bottom;
+ top.push_back(StorageLink::UP(initializer = new StorageBucketDBInitializer(
+ params.getConfig().getConfigId(),
+ partitions,
+ node.getDoneInitializeHandler(),
+ node.getComponentRegister())));
+ top.push_back(StorageLink::UP(bottom = new FakePersistenceLayer(
+ data, node.getStorageBucketDatabase())));
+
+ DatabaseInsertCallback callback(data[0], node.getStorageBucketDatabase(),
+ node, params);
+ callback._expectedReadBucketPriority = 231;
+
+ bottom->messageCallback = &callback;
+
+ top.open();
+
+ node.waitUntilInitialized(initializer);
+ // Must explicitly wait until initializer has closed to ensure node state
+ // has been set.
+ top.close();
+
+ CPPUNIT_ASSERT(callback._invoked);
+ CPPUNIT_ASSERT_EQUAL(std::string(), callback._errors.str());
+
+ std::map<PartitionId, DiskData> initedBucketDatabase(
+ createMapFromBucketDatabase(node.getStorageBucketDatabase()));
+ verifyEqual(data, initedBucketDatabase);
+
+ lib::NodeState::CSP reportedState(
+ node.getStateUpdater().getReportedNodeState());
+
+ double progress(reportedState->getInitProgress().getValue());
+ CPPUNIT_ASSERT(progress >= 1.0);
+ CPPUNIT_ASSERT(progress < 1.0001);
+
+ CPPUNIT_ASSERT_EQUAL(params.bucketBitsUsed,
+ reportedState->getMinUsedBits());
+}
+
+} // storage
diff --git a/storage/src/tests/bucketdb/judyarraytest.cpp b/storage/src/tests/bucketdb/judyarraytest.cpp
new file mode 100644
index 00000000000..235c0c9eb5c
--- /dev/null
+++ b/storage/src/tests/bucketdb/judyarraytest.cpp
@@ -0,0 +1,287 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/storage/bucketdb/judyarray.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <boost/assign.hpp>
+#include <boost/random.hpp>
+#include <cppunit/extensions/HelperMacros.h>
+#include <map>
+#include <vector>
+
+namespace storage {
+
+struct JudyArrayTest : public CppUnit::TestFixture {
+ void testIterating();
+ void testDualArrayFunctions();
+ void testComparing();
+ void testSize();
+ void testStress();
+
+ CPPUNIT_TEST_SUITE(JudyArrayTest);
+ CPPUNIT_TEST(testIterating);
+ CPPUNIT_TEST(testDualArrayFunctions);
+ CPPUNIT_TEST(testSize);
+ CPPUNIT_TEST(testStress);
+ CPPUNIT_TEST_SUITE_END();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(JudyArrayTest);
+
+namespace {
+ std::vector<std::pair<JudyArray::key_type, JudyArray::data_type> >
+ getJudyArrayContents(const JudyArray& array) {
+ std::vector<std::pair<JudyArray::key_type, JudyArray::data_type> > vals;
+ for (JudyArray::const_iterator it = array.begin();
+ it != array.end(); ++it)
+ {
+ vals.push_back(std::make_pair(it.key(), it.value()));
+ }
+ return vals;
+ }
+}
+
+void
+JudyArrayTest::testIterating()
+{
+ JudyArray array;
+ // Test that things are sane for empty document
+ CPPUNIT_ASSERT_EQUAL(array.begin(), array.end());
+ // Add some values
+ using namespace boost::assign;
+ std::vector<std::pair<JudyArray::key_type, JudyArray::data_type> > values
+ = map_list_of(3,2)(5,12)(15,8)(13,10)(7,6)(9,4);
+ for (uint32_t i=0; i<values.size(); ++i) {
+ array.insert(values[i].first, values[i].second);
+ }
+ // Create expected result
+ std::sort(values.begin(), values.end());
+ // Test that we can iterate through const iterator
+ std::vector<std::pair<JudyArray::key_type, JudyArray::data_type> >
+ foundVals = getJudyArrayContents(array);
+ CPPUNIT_ASSERT_EQUAL(values, foundVals);
+
+ { // Test that both postfix operator work
+ JudyArray::iterator it = array.begin();
+ JudyArray::iterator it2 = it++;
+ CPPUNIT_ASSERT_EQUAL(JudyArray::value_type(values[0]), *it2);
+ CPPUNIT_ASSERT_EQUAL(JudyArray::value_type(values[1]), *it);
+
+ // And that iterator comparisons work
+ CPPUNIT_ASSERT_EQUAL(it2, array.begin());
+ CPPUNIT_ASSERT_EQUAL(it, ++array.begin());
+ CPPUNIT_ASSERT(!(it == it2));
+ CPPUNIT_ASSERT(it != it2);
+ }
+ { // Test that we can alter through non-const iterator
+ JudyArray::iterator it = array.begin();
+ ++it;
+ ++it;
+ it.setValue(20);
+ CPPUNIT_ASSERT_EQUAL((JudyArray::key_type) 7, it.key());
+ CPPUNIT_ASSERT_EQUAL((JudyArray::data_type) 20, array[7]);
+ it.remove();
+ CPPUNIT_ASSERT_EQUAL((JudyArray::size_type) 5,
+ getJudyArrayContents(array).size());
+ CPPUNIT_ASSERT_EQUAL(array.end(), array.find(7));
+ values.erase(values.begin() + 2);
+ CPPUNIT_ASSERT_EQUAL(values, getJudyArrayContents(array));
+ // And that we can continue iterating after removing.
+ ++it;
+ CPPUNIT_ASSERT_EQUAL((JudyArray::key_type) 9, it.key());
+ CPPUNIT_ASSERT_EQUAL((JudyArray::data_type) 4, array[9]);
+ }
+ { // Test printing of iterators
+ JudyArray::ConstIterator cit = array.begin();
+ CPPUNIT_ASSERT_MATCH_REGEX(
+ "^ConstIterator\\(Key: 3, Valp: 0x[0-9a-f]{1,16}, Val: 2\\)$",
+ cit.toString());
+ JudyArray::Iterator it = array.end();
+ CPPUNIT_ASSERT_MATCH_REGEX(
+ "^Iterator\\(Key: 0, Valp: 0\\)$",
+ it.toString());
+ }
+}
+
+void
+JudyArrayTest::testDualArrayFunctions()
+{
+ JudyArray array1;
+ JudyArray array2;
+ // Add values to array1
+ using namespace boost::assign;
+ std::vector<std::pair<JudyArray::key_type, JudyArray::data_type> > values1
+ = map_list_of(3,2)(5,12)(15,8)(13,10)(7,6)(9,4);
+ for (uint32_t i=0; i<values1.size(); ++i) {
+ array1.insert(values1[i].first, values1[i].second);
+ }
+ // Add values to array2
+ std::vector<std::pair<JudyArray::key_type, JudyArray::data_type> > values2
+ = map_list_of(4,5)(9,40);
+ for (uint32_t i=0; i<values2.size(); ++i) {
+ array2.insert(values2[i].first, values2[i].second);
+ }
+ // Create expected result
+ std::sort(values1.begin(), values1.end());
+ std::sort(values2.begin(), values2.end());
+
+ CPPUNIT_ASSERT_EQUAL(values1, getJudyArrayContents(array1));
+ CPPUNIT_ASSERT_EQUAL(values2, getJudyArrayContents(array2));
+ CPPUNIT_ASSERT(array1 > array2);
+ CPPUNIT_ASSERT(array1 != array2);
+ array1.swap(array2);
+ CPPUNIT_ASSERT_EQUAL(values1, getJudyArrayContents(array2));
+ CPPUNIT_ASSERT_EQUAL(values2, getJudyArrayContents(array1));
+ CPPUNIT_ASSERT(array1 < array2);
+ CPPUNIT_ASSERT(array1 != array2);
+
+ // Test some operators
+ JudyArray array3;
+ for (uint32_t i=0; i<values1.size(); ++i) {
+ array3.insert(values1[i].first, values1[i].second);
+ }
+ CPPUNIT_ASSERT(array1 != array3);
+ CPPUNIT_ASSERT_EQUAL(array2, array3);
+ CPPUNIT_ASSERT(array2 >= array3);
+ CPPUNIT_ASSERT(array2 <= array3);
+ CPPUNIT_ASSERT(!(array2 < array3));
+ CPPUNIT_ASSERT(!(array2 > array3));
+}
+
+void
+JudyArrayTest::testSize()
+{
+ JudyArray array;
+ CPPUNIT_ASSERT_EQUAL(array.begin(), array.end());
+ CPPUNIT_ASSERT(array.empty());
+ CPPUNIT_ASSERT_EQUAL((JudyArray::size_type) 0, array.size());
+ CPPUNIT_ASSERT_EQUAL((JudyArray::size_type) 0, array.getMemoryUsage());
+
+ // Test each method one can insert stuff into array
+ array.insert(4, 3);
+ CPPUNIT_ASSERT_EQUAL(getJudyArrayContents(array).size(), array.size());
+ array.insert(4, 7);
+ CPPUNIT_ASSERT_EQUAL(getJudyArrayContents(array).size(), array.size());
+ if (sizeof(JudyArray::size_type) == 4) {
+ CPPUNIT_ASSERT_EQUAL((JudyArray::size_type) 12, array.getMemoryUsage());
+ } else if (sizeof(JudyArray::size_type) == 8) {
+ CPPUNIT_ASSERT_EQUAL((JudyArray::size_type) 24, array.getMemoryUsage());
+ } else CPPUNIT_FAIL("Unknown size of type");
+
+ array[6] = 8;
+ CPPUNIT_ASSERT_EQUAL(getJudyArrayContents(array).size(), array.size());
+ array[6] = 10;
+ CPPUNIT_ASSERT_EQUAL(getJudyArrayContents(array).size(), array.size());
+ if (sizeof(JudyArray::size_type) == 4) {
+ CPPUNIT_ASSERT_EQUAL((JudyArray::size_type) 20, array.getMemoryUsage());
+ } else if (sizeof(JudyArray::size_type) == 8) {
+ CPPUNIT_ASSERT_EQUAL((JudyArray::size_type) 40, array.getMemoryUsage());
+ } else CPPUNIT_FAIL("Unknown size of type");
+
+ bool preExisted;
+ array.find(8, true, preExisted);
+ CPPUNIT_ASSERT_EQUAL(false, preExisted);
+ CPPUNIT_ASSERT_EQUAL(getJudyArrayContents(array).size(), array.size());
+ array.find(8, true, preExisted);
+ CPPUNIT_ASSERT_EQUAL(true, preExisted);
+ CPPUNIT_ASSERT_EQUAL(getJudyArrayContents(array).size(), array.size());
+ CPPUNIT_ASSERT_EQUAL((JudyArray::size_type) 3, array.size());
+ if (sizeof(JudyArray::size_type) == 4) {
+ CPPUNIT_ASSERT_EQUAL((JudyArray::size_type) 28, array.getMemoryUsage());
+ } else if (sizeof(JudyArray::size_type) == 8) {
+ CPPUNIT_ASSERT_EQUAL((JudyArray::size_type) 56, array.getMemoryUsage());
+ } else CPPUNIT_FAIL("Unknown size of type");
+
+ // Test each method one can remove stuff in array with
+ array.erase(8);
+ CPPUNIT_ASSERT_EQUAL(getJudyArrayContents(array).size(), array.size());
+ array.erase(8);
+ CPPUNIT_ASSERT_EQUAL(getJudyArrayContents(array).size(), array.size());
+ CPPUNIT_ASSERT_EQUAL((JudyArray::size_type) 2, array.size());
+ if (sizeof(JudyArray::size_type) == 4) {
+ CPPUNIT_ASSERT_EQUAL((JudyArray::size_type) 20, array.getMemoryUsage());
+ } else if (sizeof(JudyArray::size_type) == 8) {
+ CPPUNIT_ASSERT_EQUAL((JudyArray::size_type) 40, array.getMemoryUsage());
+ } else CPPUNIT_FAIL("Unknown size of type");
+}
+
+namespace {
+ template<typename T>
+ std::string toString(const T& m) {
+ std::cerr << "#";
+ std::ostringstream ost;
+ ost << m;
+ return ost.str();
+ }
+}
+
+void
+JudyArrayTest::testStress()
+{
+ // Do a lot of random stuff to both judy array and std::map. Ensure equal
+ // behaviour
+
+ JudyArray judyArray;
+ typedef std::map<JudyArray::key_type, JudyArray::data_type> StdMap;
+ StdMap stdMap;
+
+ boost::rand48 rnd(55);
+
+ for (uint32_t checkpoint=0; checkpoint<50; ++checkpoint) {
+ for (uint32_t opnr=0; opnr<500; ++opnr) {
+ int optype = rnd() % 100;
+ if (optype < 30) { // Insert
+ JudyArray::key_type key(rnd() % 500);
+ JudyArray::key_type value(rnd());
+ judyArray.insert(key, value);
+ stdMap[key] = value;
+ //std::pair<StdMap::iterator, bool> result
+ // = stdMap.insert(std::make_pair(key, value));
+ //if (!result.second) result.first->second = value;
+ } else if (optype < 50) { // operator[]
+ JudyArray::key_type key(rnd() % 500);
+ JudyArray::key_type value(rnd());
+ judyArray[key] = value;
+ stdMap[key] = value;
+ } else if (optype < 70) { // erase()
+ JudyArray::key_type key(rnd() % 500);
+ CPPUNIT_ASSERT_EQUAL_MSG(
+ toString(judyArray) + toString(stdMap),
+ stdMap.erase(key), judyArray.erase(key));
+ } else if (optype < 75) { // size()
+ CPPUNIT_ASSERT_EQUAL_MSG(
+ toString(judyArray) + toString(stdMap),
+ stdMap.size(), judyArray.size());
+ } else if (optype < 78) { // empty()
+ CPPUNIT_ASSERT_EQUAL_MSG(
+ toString(judyArray) + toString(stdMap),
+ stdMap.empty(), judyArray.empty());
+ } else { // find()
+ JudyArray::key_type key(rnd() % 500);
+ JudyArray::iterator it = judyArray.find(key);
+ StdMap::iterator it2 = stdMap.find(key);
+ CPPUNIT_ASSERT_EQUAL_MSG(
+ toString(judyArray) + toString(stdMap),
+ it2 == stdMap.end(), it == judyArray.end());
+ if (it != judyArray.end()) {
+ CPPUNIT_ASSERT_EQUAL_MSG(
+ toString(judyArray) + toString(stdMap),
+ it.key(), it2->first);
+ CPPUNIT_ASSERT_EQUAL_MSG(
+ toString(judyArray) + toString(stdMap),
+ it.value(), it2->second);
+ }
+ }
+ }
+ // Ensure judy array contents is equal to std::map's at this point
+ StdMap tmpMap;
+ for (JudyArray::const_iterator it = judyArray.begin();
+ it != judyArray.end(); ++it)
+ {
+ tmpMap[it.key()] = it.value();
+ }
+ CPPUNIT_ASSERT_EQUAL(stdMap, tmpMap);
+ }
+}
+
+} // storage
diff --git a/storage/src/tests/bucketdb/judymultimaptest.cpp b/storage/src/tests/bucketdb/judymultimaptest.cpp
new file mode 100644
index 00000000000..f63fad9aa06
--- /dev/null
+++ b/storage/src/tests/bucketdb/judymultimaptest.cpp
@@ -0,0 +1,172 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/storage/bucketdb/judymultimap.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <boost/assign.hpp>
+#include <boost/random.hpp>
+#include <cppunit/extensions/HelperMacros.h>
+#include <map>
+#include <vector>
+
+namespace storage {
+
+struct JudyMultiMapTest : public CppUnit::TestFixture {
+ void testSimpleUsage();
+ void testIterator();
+
+ CPPUNIT_TEST_SUITE(JudyMultiMapTest);
+ CPPUNIT_TEST(testSimpleUsage);
+ CPPUNIT_TEST(testIterator);
+ CPPUNIT_TEST_SUITE_END();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(JudyMultiMapTest);
+
+namespace {
+ struct B;
+ struct C;
+
+ struct A {
+ int _val1;
+ int _val2;
+ int _val3;
+
+ A() {}
+ A(const B& b);
+ A(const C& c);
+ A(int val1, int val2, int val3)
+ : _val1(val1), _val2(val2), _val3(val3) {}
+
+ static bool mayContain(const A&) { return true; }
+
+ bool operator==(const A& a) const {
+ return (_val1 == a._val1 && _val2 == a._val2 && _val3 == a._val3);
+ }
+ };
+
+ struct B {
+ int _val1;
+ int _val2;
+
+ B() {}
+ B(const A& a) : _val1(a._val1), _val2(a._val2) {}
+ B(int val1, int val2) : _val1(val1), _val2(val2) {}
+
+ static bool mayContain(const A& a) { return (a._val3 == 0); }
+ };
+
+ struct C {
+ int _val1;
+
+ C() {}
+ C(const A& a) : _val1(a._val1) {}
+ C(int val1) : _val1(val1) {}
+
+ static bool mayContain(const A& a)
+ { return (a._val2 == 0 && a._val3 == 0); }
+ };
+
+ A::A(const B& b) : _val1(b._val1), _val2(b._val2), _val3(0) {}
+ A::A(const C& c) : _val1(c._val1), _val2(0), _val3(0) {}
+
+ std::ostream& operator<<(std::ostream& out, const A& a) {
+ return out << "A(" << a._val1 << ", " << a._val2 << ", "
+ << a._val3 << ")";
+ }
+ std::ostream& operator<<(std::ostream& out, const B& b) {
+ return out << "B(" << b._val1 << ", " << b._val2 << ")";
+ }
+ std::ostream& operator<<(std::ostream& out, const C& c) {
+ return out << "C(" << c._val1 << ")";
+ }
+}
+
+void
+JudyMultiMapTest::testSimpleUsage() {
+ typedef JudyMultiMap<C, B, A> MultiMap;
+ MultiMap multiMap;
+ // Do some insertions
+ bool preExisted;
+ CPPUNIT_ASSERT(multiMap.empty());
+ multiMap.insert(16, A(1, 2, 3), preExisted);
+ CPPUNIT_ASSERT_EQUAL(false, preExisted);
+ multiMap.insert(11, A(4, 6, 0), preExisted);
+ CPPUNIT_ASSERT_EQUAL(false, preExisted);
+ multiMap.insert(14, A(42, 0, 0), preExisted);
+ CPPUNIT_ASSERT_EQUAL(false, preExisted);
+ CPPUNIT_ASSERT_EQUAL_MSG(multiMap.toString(),
+ (MultiMap::size_type) 3, multiMap.size());
+
+ multiMap.insert(11, A(4, 7, 0), preExisted);
+ CPPUNIT_ASSERT_EQUAL(true, preExisted);
+ CPPUNIT_ASSERT_EQUAL((MultiMap::size_type) 3, multiMap.size());
+ CPPUNIT_ASSERT(!multiMap.empty());
+
+ // Access some elements
+ CPPUNIT_ASSERT_EQUAL(A(4, 7, 0), multiMap[11]);
+ CPPUNIT_ASSERT_EQUAL(A(1, 2, 3), multiMap[16]);
+ CPPUNIT_ASSERT_EQUAL(A(42,0, 0), multiMap[14]);
+
+ // Do removes
+ CPPUNIT_ASSERT(multiMap.erase(12) == 0);
+ CPPUNIT_ASSERT_EQUAL((MultiMap::size_type) 3, multiMap.size());
+
+ CPPUNIT_ASSERT(multiMap.erase(14) == 1);
+ CPPUNIT_ASSERT_EQUAL((MultiMap::size_type) 2, multiMap.size());
+
+ CPPUNIT_ASSERT(multiMap.erase(11) == 1);
+ CPPUNIT_ASSERT(multiMap.erase(16) == 1);
+ CPPUNIT_ASSERT_EQUAL((MultiMap::size_type) 0, multiMap.size());
+ CPPUNIT_ASSERT(multiMap.empty());
+}
+
+void
+JudyMultiMapTest::testIterator()
+{
+ typedef JudyMultiMap<C, B, A> MultiMap;
+ MultiMap multiMap;
+ bool preExisted;
+ // Do some insertions
+ multiMap.insert(16, A(1, 2, 3), preExisted);
+ multiMap.insert(11, A(4, 6, 0), preExisted);
+ multiMap.insert(14, A(42, 0, 0), preExisted);
+
+ MultiMap::Iterator iter = multiMap.begin();
+ CPPUNIT_ASSERT_EQUAL((uint64_t)11, (uint64_t)iter.key());
+ CPPUNIT_ASSERT_EQUAL(A(4, 6, 0), iter.value());
+ iter++;
+ CPPUNIT_ASSERT_EQUAL((uint64_t)14, (uint64_t)iter.key());
+ CPPUNIT_ASSERT_EQUAL(A(42, 0, 0), iter.value());
+ iter++;
+ CPPUNIT_ASSERT_EQUAL((uint64_t)16, (uint64_t)iter.key());
+ CPPUNIT_ASSERT_EQUAL(A(1, 2, 3), iter.value());
+ iter--;
+ CPPUNIT_ASSERT_EQUAL((uint64_t)14, (uint64_t)iter.key());
+ CPPUNIT_ASSERT_EQUAL(A(42, 0, 0), iter.value());
+ iter++;
+ CPPUNIT_ASSERT_EQUAL((uint64_t)16, (uint64_t)iter.key());
+ CPPUNIT_ASSERT_EQUAL(A(1, 2, 3), iter.value());
+ iter--;
+ iter--;
+ CPPUNIT_ASSERT_EQUAL((uint64_t)11,(uint64_t) iter.key());
+ CPPUNIT_ASSERT_EQUAL(A(4, 6, 0), iter.value());
+ iter++;
+ iter++;
+ iter++;
+ CPPUNIT_ASSERT_EQUAL(multiMap.end(), iter);
+ iter--;
+ CPPUNIT_ASSERT_EQUAL((uint64_t)16, (uint64_t)iter.key());
+ CPPUNIT_ASSERT_EQUAL(A(1, 2, 3), iter.value());
+ iter--;
+ CPPUNIT_ASSERT_EQUAL((uint64_t)14, (uint64_t)iter.key());
+ CPPUNIT_ASSERT_EQUAL(A(42, 0, 0), iter.value());
+ iter--;
+ CPPUNIT_ASSERT_EQUAL((uint64_t)11,(uint64_t) iter.key());
+ CPPUNIT_ASSERT_EQUAL(A(4, 6, 0), iter.value());
+
+
+}
+
+} // storage
+
diff --git a/storage/src/tests/bucketdb/lockablemaptest.cpp b/storage/src/tests/bucketdb/lockablemaptest.cpp
new file mode 100644
index 00000000000..0f35f51afbd
--- /dev/null
+++ b/storage/src/tests/bucketdb/lockablemaptest.cpp
@@ -0,0 +1,1262 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/vespalib/util/document_runnable.h>
+#include <vespa/storage/bucketdb/judymultimap.h>
+#include <vespa/storage/bucketdb/lockablemap.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <cppunit/extensions/HelperMacros.h>
+
+namespace storage {
+
+struct LockableMapTest : public CppUnit::TestFixture {
+ void testSimpleUsage();
+ void testComparison();
+ void testIterating();
+ void testChunkedIterationIsTransparentAcrossChunkSizes();
+ void testCanAbortDuringChunkedIteration();
+ void testThreadSafetyStress();
+ void testFindBuckets();
+ void testFindBuckets2();
+ void testFindBuckets3();
+ void testFindBuckets4();
+ void testFindBuckets5();
+ void testFindBucketsSimple();
+ void testFindNoBuckets();
+ void testFindAll();
+ void testFindAll2();
+ void testFindAllUnusedBitIsSet();
+ void testFindAllInconsistentlySplit();
+ void testFindAllInconsistentlySplit2();
+ void testFindAllInconsistentlySplit3();
+ void testFindAllInconsistentlySplit4();
+ void testFindAllInconsistentlySplit5();
+ void testFindAllInconsistentlySplit6();
+ void testFindAllInconsistentBelow16Bits();
+ void testCreate();
+ void testCreate2();
+ void testCreate3();
+ void testCreate4();
+ void testCreate5();
+ void testCreate6();
+ void testCreateEmpty();
+ void testIsConsistent();
+
+ CPPUNIT_TEST_SUITE(LockableMapTest);
+ CPPUNIT_TEST(testSimpleUsage);
+ CPPUNIT_TEST(testComparison);
+ CPPUNIT_TEST(testIterating);
+ CPPUNIT_TEST(testChunkedIterationIsTransparentAcrossChunkSizes);
+ CPPUNIT_TEST(testCanAbortDuringChunkedIteration);
+ CPPUNIT_TEST(testThreadSafetyStress);
+ CPPUNIT_TEST(testFindBuckets);
+ CPPUNIT_TEST(testFindBuckets2);
+ CPPUNIT_TEST(testFindBuckets3);
+ CPPUNIT_TEST(testFindBuckets4);
+ CPPUNIT_TEST(testFindBuckets5);
+ CPPUNIT_TEST(testFindBucketsSimple);
+ CPPUNIT_TEST(testFindNoBuckets);
+ CPPUNIT_TEST(testFindAll);
+ CPPUNIT_TEST(testFindAll2);
+ CPPUNIT_TEST(testFindAllUnusedBitIsSet);
+ CPPUNIT_TEST(testFindAllInconsistentlySplit);
+ CPPUNIT_TEST(testFindAllInconsistentlySplit2);
+ CPPUNIT_TEST(testFindAllInconsistentlySplit3);
+ CPPUNIT_TEST(testFindAllInconsistentlySplit4);
+ CPPUNIT_TEST(testFindAllInconsistentlySplit5);
+ CPPUNIT_TEST(testFindAllInconsistentlySplit6);
+ CPPUNIT_TEST(testFindAllInconsistentBelow16Bits);
+ CPPUNIT_TEST(testCreate);
+ CPPUNIT_TEST(testCreate2);
+ CPPUNIT_TEST(testCreate3);
+ CPPUNIT_TEST(testCreate4);
+ CPPUNIT_TEST(testCreate5);
+ CPPUNIT_TEST(testCreate6);
+ CPPUNIT_TEST(testCreateEmpty);
+ CPPUNIT_TEST(testIsConsistent);
+ CPPUNIT_TEST_SUITE_END();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(LockableMapTest);
+
+namespace {
+ struct A : public boost::operators<A> {
+ int _val1;
+ int _val2;
+ int _val3;
+
+ A() : _val1(0), _val2(0), _val3(0) {}
+ A(int val1, int val2, int val3)
+ : _val1(val1), _val2(val2), _val3(val3) {}
+
+ static bool mayContain(const A&) { return true; }
+
+ bool operator==(const A& a) const {
+ return (_val1 == a._val1 && _val2 == a._val2 && _val3 == a._val3);
+ }
+ bool operator<(const A& a) const {
+ if (_val1 != a._val1) return (_val1 < a._val1);
+ if (_val2 != a._val2) return (_val2 < a._val2);
+ return (_val3 < a._val3);
+ }
+ };
+
+ std::ostream& operator<<(std::ostream& out, const A& a) {
+ return out << "A(" << a._val1 << ", " << a._val2 << ", "
+ << a._val3 << ")";
+ }
+
+ typedef LockableMap<JudyMultiMap<A> > Map;
+}
+
+void
+LockableMapTest::testSimpleUsage() {
+ // Tests insert, erase, size, empty, operator[]
+ typedef LockableMap<JudyMultiMap<A> > Map;
+ Map map;
+ // Do some insertions
+ CPPUNIT_ASSERT(map.empty());
+ bool preExisted;
+ map.insert(16, A(1, 2, 3), "foo", preExisted);
+ CPPUNIT_ASSERT_EQUAL(false, preExisted);
+ map.insert(11, A(4, 6, 0), "foo", preExisted);
+ CPPUNIT_ASSERT_EQUAL(false, preExisted);
+ map.insert(14, A(42, 0, 0), "foo", preExisted);
+ CPPUNIT_ASSERT_EQUAL(false, preExisted);
+ CPPUNIT_ASSERT_EQUAL_MSG(map.toString(),
+ (Map::size_type) 3, map.size());
+
+ map.insert(11, A(4, 7, 0), "foo", preExisted);
+ CPPUNIT_ASSERT_EQUAL(true, preExisted);
+ CPPUNIT_ASSERT_EQUAL((Map::size_type) 3, map.size());
+ CPPUNIT_ASSERT(!map.empty());
+
+ // Access some elements
+ CPPUNIT_ASSERT_EQUAL(A(4, 7, 0), *map.get(11, "foo"));
+ CPPUNIT_ASSERT_EQUAL(A(1, 2, 3), *map.get(16, "foo"));
+ CPPUNIT_ASSERT_EQUAL(A(42,0, 0), *map.get(14, "foo"));
+
+ // Do removes
+ CPPUNIT_ASSERT(map.erase(12, "foo") == 0);
+ CPPUNIT_ASSERT_EQUAL((Map::size_type) 3, map.size());
+
+ CPPUNIT_ASSERT(map.erase(14, "foo") == 1);
+ CPPUNIT_ASSERT_EQUAL((Map::size_type) 2, map.size());
+
+ CPPUNIT_ASSERT(map.erase(11, "foo") == 1);
+ CPPUNIT_ASSERT(map.erase(16, "foo") == 1);
+ CPPUNIT_ASSERT_EQUAL((Map::size_type) 0, map.size());
+ CPPUNIT_ASSERT(map.empty());
+}
+
+void
+LockableMapTest::testComparison() {
+ typedef LockableMap<JudyMultiMap<A> > Map;
+ Map map1;
+ Map map2;
+ bool preExisted;
+
+ // Check empty state is correct
+ CPPUNIT_ASSERT_EQUAL(map1, map2);
+ CPPUNIT_ASSERT(map1 <= map2);
+ CPPUNIT_ASSERT(map1 >= map2);
+ CPPUNIT_ASSERT(!(map1 < map2));
+ CPPUNIT_ASSERT(!(map1 > map2));
+ CPPUNIT_ASSERT(!(map1 != map2));
+
+ // Check that different lengths are oki
+ map1.insert(4, A(1, 2, 3), "foo", preExisted);
+ CPPUNIT_ASSERT(!(map1 == map2));
+ CPPUNIT_ASSERT(!(map1 <= map2));
+ CPPUNIT_ASSERT(!(map1 < map2));
+ CPPUNIT_ASSERT(map1 >= map2);
+ CPPUNIT_ASSERT(map1 > map2);
+ CPPUNIT_ASSERT(map1 != map2);
+
+ // Check that equal elements are oki
+ map2.insert(4, A(1, 2, 3), "foo", preExisted);
+ CPPUNIT_ASSERT_EQUAL(map1, map2);
+ CPPUNIT_ASSERT(map1 <= map2);
+ CPPUNIT_ASSERT(map1 >= map2);
+ CPPUNIT_ASSERT(!(map1 < map2));
+ CPPUNIT_ASSERT(!(map1 > map2));
+ CPPUNIT_ASSERT(!(map1 != map2));
+
+ // Check that non-equal values are oki
+ map1.insert(6, A(1, 2, 6), "foo", preExisted);
+ map2.insert(6, A(1, 2, 3), "foo", preExisted);
+ CPPUNIT_ASSERT(!(map1 == map2));
+ CPPUNIT_ASSERT(!(map1 <= map2));
+ CPPUNIT_ASSERT(!(map1 < map2));
+ CPPUNIT_ASSERT(map1 >= map2);
+ CPPUNIT_ASSERT(map1 > map2);
+ CPPUNIT_ASSERT(map1 != map2);
+
+ // Check that non-equal keys are oki
+ map1.erase(6, "foo");
+ map1.insert(7, A(1, 2, 3), "foo", preExisted);
+ CPPUNIT_ASSERT(!(map1 == map2));
+ CPPUNIT_ASSERT(!(map1 <= map2));
+ CPPUNIT_ASSERT(!(map1 < map2));
+ CPPUNIT_ASSERT(map1 >= map2);
+ CPPUNIT_ASSERT(map1 > map2);
+ CPPUNIT_ASSERT(map1 != map2);
+}
+
+namespace {
+ struct NonConstProcessor {
+ Map::Decision operator()(int key, A& a) {
+ (void) key;
+ ++a._val2;
+ return Map::UPDATE;
+ }
+ };
+ struct EntryProcessor {
+ mutable uint32_t count;
+ mutable std::vector<std::string> log;
+ mutable std::vector<Map::Decision> behaviour;
+
+ EntryProcessor() : count(0), log(), behaviour() {}
+ EntryProcessor(const std::vector<Map::Decision>& decisions)
+ : count(0), log(), behaviour(decisions) {}
+
+ Map::Decision operator()(uint64_t key, A& a) const {
+ std::ostringstream ost;
+ ost << key << " - " << a;
+ log.push_back(ost.str());
+ Map::Decision d = Map::CONTINUE;
+ if (behaviour.size() > count) {
+ d = behaviour[count++];
+ }
+ if (d == Map::UPDATE) {
+ ++a._val3;
+ }
+ return d;
+ }
+
+ std::string toString() {
+ std::ostringstream ost;
+ for (uint32_t i=0; i<log.size(); ++i) ost << log[i] << "\n";
+ return ost.str();
+ }
+ };
+}
+
+void
+LockableMapTest::testIterating() {
+ Map map;
+ bool preExisted;
+ map.insert(16, A(1, 2, 3), "foo", preExisted);
+ map.insert(11, A(4, 6, 0), "foo", preExisted);
+ map.insert(14, A(42, 0, 0), "foo", preExisted);
+ // Test that we can use functor with non-const function
+ {
+ NonConstProcessor ncproc;
+ map.each(ncproc, "foo"); // Locking both for each element
+ CPPUNIT_ASSERT_EQUAL(A(4, 7, 0), *map.get(11, "foo"));
+ CPPUNIT_ASSERT_EQUAL(A(42,1, 0), *map.get(14, "foo"));
+ CPPUNIT_ASSERT_EQUAL(A(1, 3, 3), *map.get(16, "foo"));
+ map.all(ncproc, "foo"); // And for all
+ CPPUNIT_ASSERT_EQUAL(A(4, 8, 0), *map.get(11, "foo"));
+ CPPUNIT_ASSERT_EQUAL(A(42,2, 0), *map.get(14, "foo"));
+ CPPUNIT_ASSERT_EQUAL(A(1, 4, 3), *map.get(16, "foo"));
+ }
+ // Test that we can use const functors directly..
+ map.each(EntryProcessor(), "foo");
+
+ // Test iterator bounds
+ {
+ EntryProcessor proc;
+ map.each(proc, "foo", 11, 16);
+ std::string expected("11 - A(4, 8, 0)\n"
+ "14 - A(42, 2, 0)\n"
+ "16 - A(1, 4, 3)\n");
+ CPPUNIT_ASSERT_EQUAL(expected, proc.toString());
+
+ EntryProcessor proc2;
+ map.each(proc2, "foo", 12, 15);
+ expected = "14 - A(42, 2, 0)\n";
+ CPPUNIT_ASSERT_EQUAL(expected, proc2.toString());
+ }
+ // Test that we can abort iterating
+ {
+ std::vector<Map::Decision> decisions;
+ decisions.push_back(Map::CONTINUE);
+ decisions.push_back(Map::ABORT);
+ EntryProcessor proc(decisions);
+ map.each(proc, "foo");
+ std::string expected("11 - A(4, 8, 0)\n"
+ "14 - A(42, 2, 0)\n");
+ CPPUNIT_ASSERT_EQUAL(expected, proc.toString());
+ }
+ // Test that we can remove during iteration
+ {
+ std::vector<Map::Decision> decisions;
+ decisions.push_back(Map::CONTINUE);
+ decisions.push_back(Map::REMOVE);
+ EntryProcessor proc(decisions);
+ map.each(proc, "foo");
+ std::string expected("11 - A(4, 8, 0)\n"
+ "14 - A(42, 2, 0)\n"
+ "16 - A(1, 4, 3)\n");
+ CPPUNIT_ASSERT_EQUAL(expected, proc.toString());
+ CPPUNIT_ASSERT_EQUAL_MSG(map.toString(),
+ (Map::size_type) 2, map.size());
+ CPPUNIT_ASSERT_EQUAL(A(4, 8, 0), *map.get(11, "foo"));
+ CPPUNIT_ASSERT_EQUAL(A(1, 4, 3), *map.get(16, "foo"));
+ Map::WrappedEntry entry = map.get(14, "foo");
+ CPPUNIT_ASSERT(!entry.exist());
+ }
+}
+
+void
+LockableMapTest::testChunkedIterationIsTransparentAcrossChunkSizes()
+{
+ Map map;
+ bool preExisted;
+ map.insert(16, A(1, 2, 3), "foo", preExisted);
+ map.insert(11, A(4, 6, 0), "foo", preExisted);
+ map.insert(14, A(42, 0, 0), "foo", preExisted);
+ NonConstProcessor ncproc; // Increments 2nd value in all entries.
+ // chunkedAll with chunk size of 1
+ map.chunkedAll(ncproc, "foo", 1);
+ CPPUNIT_ASSERT_EQUAL(A(4, 7, 0), *map.get(11, "foo"));
+ CPPUNIT_ASSERT_EQUAL(A(42, 1, 0), *map.get(14, "foo"));
+ CPPUNIT_ASSERT_EQUAL(A(1, 3, 3), *map.get(16, "foo"));
+ // chunkedAll with chunk size larger than db size
+ map.chunkedAll(ncproc, "foo", 100);
+ CPPUNIT_ASSERT_EQUAL(A(4, 8, 0), *map.get(11, "foo"));
+ CPPUNIT_ASSERT_EQUAL(A(42, 2, 0), *map.get(14, "foo"));
+ CPPUNIT_ASSERT_EQUAL(A(1, 4, 3), *map.get(16, "foo"));
+}
+
+void
+LockableMapTest::testCanAbortDuringChunkedIteration()
+{
+ Map map;
+ bool preExisted;
+ map.insert(16, A(1, 2, 3), "foo", preExisted);
+ map.insert(11, A(4, 6, 0), "foo", preExisted);
+ map.insert(14, A(42, 0, 0), "foo", preExisted);
+
+ std::vector<Map::Decision> decisions;
+ decisions.push_back(Map::CONTINUE);
+ decisions.push_back(Map::ABORT);
+ EntryProcessor proc(decisions);
+ map.chunkedAll(proc, "foo", 100);
+ std::string expected("11 - A(4, 6, 0)\n"
+ "14 - A(42, 0, 0)\n");
+ CPPUNIT_ASSERT_EQUAL(expected, proc.toString());
+}
+
+namespace {
+ struct LoadGiver : public document::Runnable {
+ typedef std::shared_ptr<LoadGiver> SP;
+ Map& _map;
+ uint32_t _counter;
+
+ LoadGiver(Map& map) : _map(map), _counter(0) {}
+ };
+
+ struct InsertEraseLoadGiver : public LoadGiver {
+ InsertEraseLoadGiver(Map& map) : LoadGiver(map) {}
+
+ void run() {
+ // Screws up order of buckets by xor'ing with 12345.
+ // Only operate on last 32k super buckets.
+ while (running()) {
+ uint32_t bucket = ((_counter ^ 12345) % 0x8000) + 0x8000;
+ if (bucket % 7 < 3) {
+ bool preExisted;
+ _map.insert(bucket, A(bucket, 0, _counter), "foo",
+ preExisted);
+ }
+ if (bucket % 5 < 2) {
+ _map.erase(bucket, "foo");
+ }
+ ++_counter;
+ }
+ }
+ };
+
+ struct GetLoadGiver : public LoadGiver {
+ GetLoadGiver(Map& map) : LoadGiver(map) {}
+
+ void run() {
+ // It's legal to keep entries as long as you only request higher
+ // buckets. So, to test this, keep entries until you request one
+ // that is smaller than those stored.
+ std::vector<std::pair<uint32_t, Map::WrappedEntry> > stored;
+ while (running()) {
+ uint32_t bucket = (_counter ^ 52721) % 0x10000;
+ if (!stored.empty() && stored.back().first > bucket) {
+ stored.clear();
+ }
+ stored.push_back(std::pair<uint32_t, Map::WrappedEntry>(
+ bucket, _map.get(bucket, "foo", _counter % 3 == 0)));
+ ++_counter;
+ }
+ }
+ };
+
+ struct AllLoadGiver : public LoadGiver {
+ AllLoadGiver(Map& map) : LoadGiver(map) {}
+
+ void run() {
+ while (running()) {
+ _map.all(*this, "foo");
+ ++_counter;
+ }
+ }
+
+ Map::Decision operator()(int key, A& a) {
+ //std::cerr << (void*) this << " - " << key << "\n";
+ (void) key;
+ ++a._val2;
+ return Map::CONTINUE;
+ }
+ };
+
+ struct EachLoadGiver : public LoadGiver {
+ EachLoadGiver(Map& map) : LoadGiver(map) {}
+
+ void run() {
+ while (running()) {
+ _map.each(*this, "foo");
+ ++_counter;
+ }
+ }
+
+ Map::Decision operator()(int key, A& a) {
+ //std::cerr << (void*) this << " - " << key << "\n";
+ (void) key;
+ ++a._val2;
+ return Map::CONTINUE;
+ }
+ };
+
+ struct RandomRangeLoadGiver : public LoadGiver {
+ RandomRangeLoadGiver(Map& map) : LoadGiver(map) {}
+
+ void run() {
+ while (running()) {
+ uint32_t min = (_counter ^ 23426) % 0x10000;
+ uint32_t max = (_counter ^ 40612) % 0x10000;
+ if (min > max) {
+ uint32_t tmp = min;
+ min = max;
+ max = tmp;
+ }
+ if (_counter % 7 < 5) {
+ _map.each(*this, "foo", min, max);
+ } else {
+ _map.all(*this, "foo", min, max);
+ }
+ ++_counter;
+ }
+ }
+
+ Map::Decision operator()(int key, A& a) {
+ //std::cerr << ".";
+ (void) key;
+ ++a._val2;
+ return Map::CONTINUE;
+ }
+ };
+
+ struct GetNextLoadGiver : public LoadGiver {
+ GetNextLoadGiver(Map& map) : LoadGiver(map) {}
+
+ void run() {
+ while (running()) {
+ uint32_t bucket = (_counter ^ 60417) % 0xffff;
+ if (_counter % 7 < 5) {
+ _map.each(*this, "foo", bucket + 1, 0xffff);
+ } else {
+ _map.all(*this, "foo", bucket + 1, 0xffff);
+ }
+ ++_counter;
+ }
+ }
+
+ Map::Decision operator()(int key, A& a) {
+ //std::cerr << ".";
+ (void) key;
+ ++a._val2;
+ return Map::ABORT;
+ }
+ };
+}
+
+void
+LockableMapTest::testThreadSafetyStress() {
+ uint32_t duration = 2 * 1000;
+ std::cerr << "\nRunning LockableMap threadsafety test for "
+ << (duration / 1000) << " seconds.\n";
+ // Set up multiple threads going through the bucket database at the same
+ // time. Ensuring all works and there are no deadlocks.
+
+ // Initial database of 32k elements which should always be present.
+ // Next 32k elements may exist (loadgivers may erase and create them, "foo")
+ Map map;
+ for (uint32_t i=0; i<65536; ++i) {
+ bool preExisted;
+ map.insert(i, A(i, 0, i ^ 12345), "foo", preExisted);
+ }
+ std::vector<LoadGiver::SP> loadgivers;
+ for (uint32_t i=0; i<8; ++i) {
+ loadgivers.push_back(LoadGiver::SP(new InsertEraseLoadGiver(map)));
+ }
+ for (uint32_t i=0; i<2; ++i) {
+ loadgivers.push_back(LoadGiver::SP(new GetLoadGiver(map)));
+ }
+ for (uint32_t i=0; i<2; ++i) {
+ loadgivers.push_back(LoadGiver::SP(new AllLoadGiver(map)));
+ }
+ for (uint32_t i=0; i<2; ++i) {
+ loadgivers.push_back(LoadGiver::SP(new EachLoadGiver(map)));
+ }
+ for (uint32_t i=0; i<2; ++i) {
+ loadgivers.push_back(LoadGiver::SP(new RandomRangeLoadGiver(map)));
+ }
+ for (uint32_t i=0; i<2; ++i) {
+ loadgivers.push_back(LoadGiver::SP(new GetNextLoadGiver(map)));
+ }
+
+ FastOS_ThreadPool pool(128 * 1024);
+ for (uint32_t i=0; i<loadgivers.size(); ++i) {
+ CPPUNIT_ASSERT(loadgivers[i]->start(pool));
+ }
+ FastOS_Thread::Sleep(duration);
+ std::cerr << "Closing down test\n";
+ for (uint32_t i=0; i<loadgivers.size(); ++i) {
+ CPPUNIT_ASSERT(loadgivers[i]->stop());
+ }
+// FastOS_Thread::Sleep(duration);
+// std::cerr << "Didn't manage to shut down\n";
+// map._lockedKeys.print(std::cerr, true, "");
+
+ for (uint32_t i=0; i<loadgivers.size(); ++i) {
+ CPPUNIT_ASSERT(loadgivers[i]->join());
+ }
+ std::cerr << "Loadgiver counts:";
+ for (uint32_t i=0; i<loadgivers.size(); ++i) {
+ std::cerr << " " << loadgivers[i]->_counter;
+ }
+ std::cerr << "\nTest completed\n";
+}
+
+#if 0
+namespace {
+struct Hex {
+ document::BucketId::Type val;
+
+ Hex(document::BucketId::Type v) : val(v) {}
+ bool operator==(const Hex& h) const { return val == h.val; }
+};
+
+std::ostream& operator<<(std::ostream& out, const Hex& h) {
+ out << std::hex << h.val << std::dec;
+ return out;
+}
+
+void
+printBucket(const std::string s, const document::BucketId& b) {
+ std::cerr << s << "bucket=" << b << ", reversed=" << b.stripUnused().toKey() << ", hex=" << Hex(b.stripUnused().toKey()) << "\n";
+}
+
+void
+printBuckets(const std::map<document::BucketId, Map::WrappedEntry>& results) {
+ for (std::map<document::BucketId, Map::WrappedEntry>::const_iterator iter = results.begin();
+ iter != results.end();
+ iter++) {
+ printBucket("Returned ", iter->first);
+ }
+}
+
+}
+#endif
+
+void
+LockableMapTest::testFindBucketsSimple() {
+#if __WORDSIZE == 64
+ typedef LockableMap<JudyMultiMap<A> > Map;
+ Map map;
+
+ document::BucketId id1(17, 0x0ffff);
+ id1 = id1.stripUnused();
+
+ document::BucketId id2(18, 0x1ffff);
+ id2 = id2.stripUnused();
+
+ document::BucketId id3(18, 0x3ffff);
+ id3 = id3.stripUnused();
+
+ bool preExisted;
+ map.insert(id1.toKey(), A(1,2,3), "foo", preExisted);
+ map.insert(id2.toKey(), A(2,3,4), "foo", preExisted);
+ map.insert(id3.toKey(), A(3,4,5), "foo", preExisted);
+
+ document::BucketId id(22, 0xfffff);
+ std::map<document::BucketId, Map::WrappedEntry> results =
+ map.getContained(id, "foo");
+
+ CPPUNIT_ASSERT_EQUAL((size_t)1, results.size());
+ CPPUNIT_ASSERT_EQUAL(A(3,4,5), *results[id3]);
+#endif
+}
+
+void
+LockableMapTest::testFindBuckets() {
+#if __WORDSIZE == 64
+ typedef LockableMap<JudyMultiMap<A> > Map;
+ Map map;
+
+ document::BucketId id1(16, 0x0ffff);
+ document::BucketId id2(17, 0x0ffff);
+ document::BucketId id3(17, 0x1ffff);
+ document::BucketId id4(19, 0xfffff);
+
+ bool preExisted;
+ map.insert(id1.stripUnused().toKey(), A(1,2,3), "foo", preExisted);
+ map.insert(id2.stripUnused().toKey(), A(2,3,4), "foo", preExisted);
+ map.insert(id3.stripUnused().toKey(), A(3,4,5), "foo", preExisted);
+ map.insert(id4.stripUnused().toKey(), A(4,5,6), "foo", preExisted);
+
+ document::BucketId id(22, 0xfffff);
+ std::map<document::BucketId, Map::WrappedEntry> results =
+ map.getContained(id, "foo");
+
+ CPPUNIT_ASSERT_EQUAL((size_t)3, results.size());
+
+ CPPUNIT_ASSERT_EQUAL(A(1,2,3), *results[id1.stripUnused()]);
+ CPPUNIT_ASSERT_EQUAL(A(4,5,6), *results[id4.stripUnused()]);
+ CPPUNIT_ASSERT_EQUAL(A(3,4,5), *results[id3.stripUnused()]);
+#endif
+}
+
+void
+LockableMapTest::testFindBuckets2() { // ticket 3121525
+#if __WORDSIZE == 64
+ typedef LockableMap<JudyMultiMap<A> > Map;
+ Map map;
+
+ document::BucketId id1(16, 0x0ffff);
+ document::BucketId id2(17, 0x0ffff);
+ document::BucketId id3(17, 0x1ffff);
+ document::BucketId id4(18, 0x1ffff);
+
+ bool preExisted;
+ map.insert(id1.stripUnused().toKey(), A(1,2,3), "foo", preExisted);
+ map.insert(id2.stripUnused().toKey(), A(2,3,4), "foo", preExisted);
+ map.insert(id3.stripUnused().toKey(), A(3,4,5), "foo", preExisted);
+ map.insert(id4.stripUnused().toKey(), A(4,5,6), "foo", preExisted);
+
+ document::BucketId id(22, 0x1ffff);
+ std::map<document::BucketId, Map::WrappedEntry> results =
+ map.getContained(id, "foo");
+
+ CPPUNIT_ASSERT_EQUAL((size_t)3, results.size());
+
+ CPPUNIT_ASSERT_EQUAL(A(1,2,3), *results[id1.stripUnused()]);
+ CPPUNIT_ASSERT_EQUAL(A(4,5,6), *results[id4.stripUnused()]);
+ CPPUNIT_ASSERT_EQUAL(A(3,4,5), *results[id3.stripUnused()]);
+#endif
+}
+
+void
+LockableMapTest::testFindBuckets3() { // ticket 3121525
+#if __WORDSIZE == 64
+ typedef LockableMap<JudyMultiMap<A> > Map;
+ Map map;
+
+ document::BucketId id1(16, 0x0ffff);
+ document::BucketId id2(17, 0x0ffff);
+
+ bool preExisted;
+ map.insert(id1.stripUnused().toKey(), A(1,2,3), "foo", preExisted);
+ map.insert(id2.stripUnused().toKey(), A(2,3,4), "foo", preExisted);
+
+ document::BucketId id(22, 0x1ffff);
+ std::map<document::BucketId, Map::WrappedEntry> results =
+ map.getContained(id, "foo");
+
+ CPPUNIT_ASSERT_EQUAL((size_t)1, results.size());
+
+ CPPUNIT_ASSERT_EQUAL(A(1,2,3), *results[id1.stripUnused()]);
+#endif
+}
+
+void
+LockableMapTest::testFindBuckets4() { // ticket 3121525
+#if __WORDSIZE == 64
+ typedef LockableMap<JudyMultiMap<A> > Map;
+ Map map;
+
+ document::BucketId id1(16, 0x0ffff);
+ document::BucketId id2(17, 0x0ffff);
+ document::BucketId id3(19, 0x1ffff);
+
+ bool preExisted;
+ map.insert(id1.stripUnused().toKey(), A(1,2,3), "foo", preExisted);
+ map.insert(id2.stripUnused().toKey(), A(2,3,4), "foo", preExisted);
+ map.insert(id3.stripUnused().toKey(), A(3,4,5), "foo", preExisted);
+
+ document::BucketId id(18, 0x1ffff);
+ std::map<document::BucketId, Map::WrappedEntry> results =
+ map.getContained(id, "foo");
+
+ CPPUNIT_ASSERT_EQUAL((size_t)1, results.size());
+
+ CPPUNIT_ASSERT_EQUAL(A(1,2,3), *results[id1.stripUnused()]);
+#endif
+}
+
+void
+LockableMapTest::testFindBuckets5() { // ticket 3121525
+#if __WORDSIZE == 64
+ typedef LockableMap<JudyMultiMap<A> > Map;
+ Map map;
+
+ document::BucketId id1(16, 0x0ffff);
+ document::BucketId id2(17, 0x0ffff);
+ document::BucketId id3(19, 0x5ffff);
+
+ bool preExisted;
+ map.insert(id1.stripUnused().toKey(), A(1,2,3), "foo", preExisted);
+ map.insert(id2.stripUnused().toKey(), A(2,3,4), "foo", preExisted);
+ map.insert(id3.stripUnused().toKey(), A(3,4,5), "foo", preExisted);
+
+ document::BucketId id(18, 0x1ffff);
+ std::map<document::BucketId, Map::WrappedEntry> results =
+ map.getContained(id, "foo");
+
+ CPPUNIT_ASSERT_EQUAL((size_t)1, results.size());
+
+ CPPUNIT_ASSERT_EQUAL(A(1,2,3), *results[id1.stripUnused()]);
+#endif
+}
+
+void
+LockableMapTest::testFindNoBuckets() {
+#if __WORDSIZE == 64
+ typedef LockableMap<JudyMultiMap<A> > Map;
+ Map map;
+
+ document::BucketId id(16, 0x0ffff);
+ std::map<document::BucketId, Map::WrappedEntry> results =
+ map.getAll(id, "foo");
+
+ CPPUNIT_ASSERT_EQUAL((size_t)0, results.size());
+#endif
+}
+
+void
+LockableMapTest::testFindAll() {
+#if __WORDSIZE == 64
+ typedef LockableMap<JudyMultiMap<A> > Map;
+ Map map;
+
+ document::BucketId id1(16, 0x0aaaa); // contains id2-id7
+ document::BucketId id2(17, 0x0aaaa); // contains id3-id4
+ document::BucketId id3(20, 0xcaaaa);
+ document::BucketId id4(20, 0xeaaaa);
+ document::BucketId id5(17, 0x1aaaa); // contains id6-id7
+ document::BucketId id6(20, 0xdaaaa);
+ document::BucketId id7(20, 0xfaaaa);
+ document::BucketId id8(20, 0xceaaa);
+ document::BucketId id9(17, 0x1ffff);
+
+ bool preExisted;
+ map.insert(id1.stripUnused().toKey(), A(1,2,3), "foo", preExisted);
+ map.insert(id2.stripUnused().toKey(), A(2,3,4), "foo", preExisted);
+ map.insert(id3.stripUnused().toKey(), A(3,4,5), "foo", preExisted);
+ map.insert(id4.stripUnused().toKey(), A(4,5,6), "foo", preExisted);
+ map.insert(id5.stripUnused().toKey(), A(5,6,7), "foo", preExisted);
+ map.insert(id6.stripUnused().toKey(), A(6,7,8), "foo", preExisted);
+ map.insert(id7.stripUnused().toKey(), A(7,8,9), "foo", preExisted);
+ map.insert(id8.stripUnused().toKey(), A(8,9,10), "foo", preExisted);
+ map.insert(id9.stripUnused().toKey(), A(9,10,11), "foo", preExisted);
+ //printBucket("Inserted ", id1);
+ //printBucket("Inserted ", id2);
+ //printBucket("Inserted ", id3);
+ //printBucket("Inserted ", id4);
+ //printBucket("Inserted ", id5);
+ //printBucket("Inserted ", id6);
+ //printBucket("Inserted ", id7);
+ //printBucket("Inserted ", id8);
+ //printBucket("Inserted ", id9);
+
+ document::BucketId id(17, 0x1aaaa);
+ std::map<document::BucketId, Map::WrappedEntry> results =
+ map.getAll(id, "foo");
+
+ //std::cerr << "Done: getAll() for bucket " << id << "\n";
+ //printBuckets(results);
+
+ CPPUNIT_ASSERT_EQUAL((size_t)4, results.size());
+
+ CPPUNIT_ASSERT_EQUAL(A(1,2,3), *results[id1.stripUnused()]); // super bucket
+ CPPUNIT_ASSERT_EQUAL(A(5,6,7), *results[id5.stripUnused()]); // most specific match (exact match)
+ CPPUNIT_ASSERT_EQUAL(A(6,7,8), *results[id6.stripUnused()]); // sub bucket
+ CPPUNIT_ASSERT_EQUAL(A(7,8,9), *results[id7.stripUnused()]); // sub bucket
+
+ id = document::BucketId(16, 0xffff);
+ results = map.getAll(id, "foo");
+
+ //std::cerr << "Done: getAll() for bucket " << id << "\n";
+ //printBuckets(results);
+
+ CPPUNIT_ASSERT_EQUAL((size_t)1, results.size());
+
+ CPPUNIT_ASSERT_EQUAL(A(9,10,11), *results[id9.stripUnused()]); // sub bucket
+#endif
+}
+
+void
+LockableMapTest::testFindAll2() { // Ticket 3121525
+#if __WORDSIZE == 64
+ typedef LockableMap<JudyMultiMap<A> > Map;
+ Map map;
+
+ document::BucketId id1(17, 0x00001);
+ document::BucketId id2(17, 0x10001);
+
+ bool preExisted;
+ map.insert(id1.stripUnused().toKey(), A(1,2,3), "foo", preExisted);
+ map.insert(id2.stripUnused().toKey(), A(2,3,4), "foo", preExisted);
+
+ document::BucketId id(16, 0x00001);
+ std::map<document::BucketId, Map::WrappedEntry> results =
+ map.getAll(id, "foo");
+
+ CPPUNIT_ASSERT_EQUAL((size_t)2, results.size());
+
+ CPPUNIT_ASSERT_EQUAL(A(1,2,3), *results[id1.stripUnused()]); // sub bucket
+ CPPUNIT_ASSERT_EQUAL(A(2,3,4), *results[id2.stripUnused()]); // sub bucket
+#endif
+}
+
+void
+LockableMapTest::testFindAllUnusedBitIsSet() { // ticket 2938896
+#if __WORDSIZE == 64
+ typedef LockableMap<JudyMultiMap<A> > Map;
+ Map map;
+
+ document::BucketId id1(24, 0x000dc7089);
+ document::BucketId id2(33, 0x0053c7089);
+ document::BucketId id3(33, 0x1053c7089);
+ document::BucketId id4(24, 0x000bc7089);
+
+ bool preExisted;
+ map.insert(id1.stripUnused().toKey(), A(1,2,3), "foo", preExisted);
+ map.insert(id2.stripUnused().toKey(), A(2,3,4), "foo", preExisted);
+ map.insert(id3.stripUnused().toKey(), A(3,4,5), "foo", preExisted);
+ map.insert(id4.stripUnused().toKey(), A(4,5,6), "foo", preExisted);
+
+ document::BucketId id(33, 0x1053c7089);
+ id.setUsedBits(32); // Bit 33 is set, but unused
+ std::map<document::BucketId, Map::WrappedEntry> results =
+ map.getAll(id, "foo");
+
+ CPPUNIT_ASSERT_EQUAL((size_t)2, results.size());
+
+ CPPUNIT_ASSERT_EQUAL(A(2,3,4), *results[id2.stripUnused()]); // sub bucket
+ CPPUNIT_ASSERT_EQUAL(A(3,4,5), *results[id3.stripUnused()]); // sub bucket
+#endif
+}
+
+void
+LockableMapTest::testFindAllInconsistentlySplit() { // Ticket 2938896
+#if __WORDSIZE == 64
+ typedef LockableMap<JudyMultiMap<A> > Map;
+ Map map;
+
+ document::BucketId id1(16, 0x00001); // contains id2-id3
+ document::BucketId id2(17, 0x00001);
+ document::BucketId id3(17, 0x10001);
+
+ bool preExisted;
+ map.insert(id1.stripUnused().toKey(), A(1,2,3), "foo", preExisted);
+ map.insert(id2.stripUnused().toKey(), A(2,3,4), "foo", preExisted);
+ map.insert(id3.stripUnused().toKey(), A(3,4,5), "foo", preExisted);
+
+ document::BucketId id(16, 0x00001);
+ std::map<document::BucketId, Map::WrappedEntry> results =
+ map.getAll(id, "foo");
+
+ CPPUNIT_ASSERT_EQUAL((size_t)3, results.size());
+
+ CPPUNIT_ASSERT_EQUAL(A(1,2,3), *results[id1.stripUnused()]); // most specific match (exact match)
+ CPPUNIT_ASSERT_EQUAL(A(2,3,4), *results[id2.stripUnused()]); // sub bucket
+ CPPUNIT_ASSERT_EQUAL(A(3,4,5), *results[id3.stripUnused()]); // sub bucket
+#endif
+}
+
+void
+LockableMapTest::testFindAllInconsistentlySplit2() { // ticket 3121525
+#if __WORDSIZE == 64
+ typedef LockableMap<JudyMultiMap<A> > Map;
+ Map map;
+
+ document::BucketId id1(17, 0x10000);
+ document::BucketId id2(27, 0x007228034); // contains id3
+ document::BucketId id3(29, 0x007228034);
+ document::BucketId id4(17, 0x1ffff);
+
+ bool preExisted;
+ map.insert(id1.stripUnused().toKey(), A(1,2,3), "foo", preExisted);
+ map.insert(id2.stripUnused().toKey(), A(2,3,4), "foo", preExisted);
+ map.insert(id3.stripUnused().toKey(), A(3,4,5), "foo", preExisted);
+ map.insert(id4.stripUnused().toKey(), A(4,5,6), "foo", preExisted);
+
+ document::BucketId id(32, 0x027228034);
+ std::map<document::BucketId, Map::WrappedEntry> results =
+ map.getAll(id, "foo");
+
+ CPPUNIT_ASSERT_EQUAL((size_t)2, results.size());
+
+ CPPUNIT_ASSERT_EQUAL(A(2,3,4), *results[id2.stripUnused()]); // super bucket
+ CPPUNIT_ASSERT_EQUAL(A(3,4,5), *results[id3.stripUnused()]); // most specific match (super bucket)
+#endif
+}
+
+void
+LockableMapTest::testFindAllInconsistentlySplit3() { // ticket 3121525
+#if __WORDSIZE == 64
+ typedef LockableMap<JudyMultiMap<A> > Map;
+ Map map;
+
+ document::BucketId id1(16, 0x0ffff); // contains id2
+ document::BucketId id2(17, 0x0ffff);
+
+ bool preExisted;
+ map.insert(id1.stripUnused().toKey(), A(1,2,3), "foo", preExisted);
+ map.insert(id2.stripUnused().toKey(), A(2,3,4), "foo", preExisted);
+
+ document::BucketId id(22, 0x1ffff);
+ std::map<document::BucketId, Map::WrappedEntry> results =
+ map.getAll(id, "foo");
+
+ CPPUNIT_ASSERT_EQUAL((size_t)1, results.size());
+
+ CPPUNIT_ASSERT_EQUAL(A(1,2,3), *results[id1.stripUnused()]); // super bucket
+#endif
+}
+
+void
+LockableMapTest::testFindAllInconsistentlySplit4() { // ticket 3121525
+#if __WORDSIZE == 64
+ typedef LockableMap<JudyMultiMap<A> > Map;
+ Map map;
+
+ document::BucketId id1(16, 0x0ffff); // contains id2-id3
+ document::BucketId id2(17, 0x0ffff);
+ document::BucketId id3(19, 0x1ffff);
+
+ bool preExisted;
+ map.insert(id1.stripUnused().toKey(), A(1,2,3), "foo", preExisted);
+ map.insert(id2.stripUnused().toKey(), A(2,3,4), "foo", preExisted);
+ map.insert(id3.stripUnused().toKey(), A(3,4,5), "foo", preExisted);
+
+ document::BucketId id(18, 0x1ffff);
+ std::map<document::BucketId, Map::WrappedEntry> results =
+ map.getAll(id, "foo");
+
+ CPPUNIT_ASSERT_EQUAL((size_t)2, results.size());
+
+ CPPUNIT_ASSERT_EQUAL(A(1,2,3), *results[id1.stripUnused()]); // super bucket
+ CPPUNIT_ASSERT_EQUAL(A(3,4,5), *results[id3.stripUnused()]); // sub bucket
+#endif
+}
+
+void
+LockableMapTest::testFindAllInconsistentlySplit5() { // ticket 3121525
+#if __WORDSIZE == 64
+ typedef LockableMap<JudyMultiMap<A> > Map;
+ Map map;
+
+ document::BucketId id1(16, 0x0ffff); // contains id2-id3
+ document::BucketId id2(17, 0x0ffff);
+ document::BucketId id3(19, 0x5ffff);
+
+ bool preExisted;
+ map.insert(id1.stripUnused().toKey(), A(1,2,3), "foo", preExisted);
+ map.insert(id2.stripUnused().toKey(), A(2,3,4), "foo", preExisted);
+ map.insert(id3.stripUnused().toKey(), A(3,4,5), "foo", preExisted);
+
+ document::BucketId id(18, 0x1ffff);
+ std::map<document::BucketId, Map::WrappedEntry> results =
+ map.getAll(id, "foo");
+
+ CPPUNIT_ASSERT_EQUAL((size_t)2, results.size());
+
+ CPPUNIT_ASSERT_EQUAL(A(1,2,3), *results[id1.stripUnused()]); // super bucket
+ CPPUNIT_ASSERT_EQUAL(A(3,4,5), *results[id3.stripUnused()]); // sub bucket
+#endif
+}
+
+void
+LockableMapTest::testFindAllInconsistentlySplit6() {
+ typedef LockableMap<JudyMultiMap<A> > Map;
+ Map map;
+
+ document::BucketId id1(16, 0x0ffff); // contains id2-id3
+ document::BucketId id2(18, 0x1ffff);
+ document::BucketId id3(19, 0x7ffff);
+
+ bool preExisted;
+ map.insert(id1.stripUnused().toKey(), A(1,2,3), "foo", preExisted);
+ map.insert(id2.stripUnused().toKey(), A(2,3,4), "foo", preExisted);
+ map.insert(id3.stripUnused().toKey(), A(3,4,5), "foo", preExisted);
+
+ document::BucketId id(18, 0x3ffff);
+ std::map<document::BucketId, Map::WrappedEntry> results =
+ map.getAll(id, "foo");
+
+ CPPUNIT_ASSERT_EQUAL((size_t)2, results.size());
+
+ CPPUNIT_ASSERT_EQUAL(A(1,2,3), *results[id1.stripUnused()]); // super bucket
+ CPPUNIT_ASSERT_EQUAL(A(3,4,5), *results[id3.stripUnused()]); // sub bucket
+}
+
+void
+LockableMapTest::testFindAllInconsistentBelow16Bits()
+{
+ typedef LockableMap<JudyMultiMap<A> > Map;
+ Map map;
+
+ document::BucketId id1(1, 0x1); // contains id2-id3
+ document::BucketId id2(3, 0x1);
+ document::BucketId id3(4, 0xD);
+
+ bool preExisted;
+ map.insert(id1.stripUnused().toKey(), A(1,2,3), "foo", preExisted);
+ map.insert(id2.stripUnused().toKey(), A(2,3,4), "foo", preExisted);
+ map.insert(id3.stripUnused().toKey(), A(3,4,5), "foo", preExisted);
+
+ document::BucketId id(3, 0x5);
+
+ std::map<document::BucketId, Map::WrappedEntry> results =
+ map.getAll(id, "foo");
+
+ CPPUNIT_ASSERT_EQUAL(size_t(2), results.size());
+
+ CPPUNIT_ASSERT_EQUAL(A(1,2,3), *results[id1.stripUnused()]); // super bucket
+ CPPUNIT_ASSERT_EQUAL(A(3,4,5), *results[id3.stripUnused()]); // sub bucket
+}
+
+void
+LockableMapTest::testCreate() {
+#if __WORDSIZE == 64
+ typedef LockableMap<JudyMultiMap<A> > Map;
+ Map map;
+ {
+ document::BucketId id1(58, 0x43d6c878000004d2ull);
+
+ std::map<document::BucketId, Map::WrappedEntry> entries(
+ map.getContained(id1, "foo"));
+
+ CPPUNIT_ASSERT_EQUAL((size_t)0, entries.size());
+
+ Map::WrappedEntry entry = map.createAppropriateBucket(36, "", id1);
+ CPPUNIT_ASSERT_EQUAL(document::BucketId(36,0x8000004d2ull),
+ entry.getBucketId());
+ }
+ {
+ document::BucketId id1(58, 0x423bf1e0000004d2ull);
+
+ std::map<document::BucketId, Map::WrappedEntry> entries(
+ map.getContained(id1, "foo"));
+ CPPUNIT_ASSERT_EQUAL((size_t)0, entries.size());
+
+ Map::WrappedEntry entry = map.createAppropriateBucket(36, "", id1);
+ CPPUNIT_ASSERT_EQUAL(document::BucketId(36,0x0000004d2ull),
+ entry.getBucketId());
+ }
+
+ CPPUNIT_ASSERT_EQUAL((size_t)2, map.size());
+#endif
+}
+
+void
+LockableMapTest::testCreate2() {
+#if __WORDSIZE == 64
+ typedef LockableMap<JudyMultiMap<A> > Map;
+ Map map;
+ {
+ document::BucketId id1(58, 0xeaf77782000004d2);
+ Map::WrappedEntry entry(
+ map.get(id1.stripUnused().toKey(), "foo", true));
+ }
+ {
+ document::BucketId id1(58, 0x00000000000004d2);
+ std::map<document::BucketId, Map::WrappedEntry> entries(
+ map.getContained(id1, "foo"));
+
+ CPPUNIT_ASSERT_EQUAL((size_t)0, entries.size());
+
+ Map::WrappedEntry entry = map.createAppropriateBucket(16, "", id1);
+
+ CPPUNIT_ASSERT_EQUAL(document::BucketId(34, 0x0000004d2ull),
+ entry.getBucketId());
+ }
+
+ CPPUNIT_ASSERT_EQUAL((size_t)2, map.size());
+#endif
+}
+
+void
+LockableMapTest::testCreate3() {
+#if __WORDSIZE == 64
+ typedef LockableMap<JudyMultiMap<A> > Map;
+ Map map;
+ {
+ document::BucketId id1(58, 0xeaf77780000004d2);
+ Map::WrappedEntry entry(
+ map.get(id1.stripUnused().toKey(), "foo", true));
+ }
+ {
+ document::BucketId id1(58, 0xeaf77782000004d2);
+ Map::WrappedEntry entry(
+ map.get(id1.stripUnused().toKey(), "foo", true));
+ }
+ {
+ document::BucketId id1(58, 0x00000000000004d2);
+ std::map<document::BucketId, Map::WrappedEntry> entries(
+ map.getContained(id1, "foo"));
+
+ CPPUNIT_ASSERT_EQUAL((size_t)0, entries.size());
+
+ Map::WrappedEntry entry = map.createAppropriateBucket(16, "", id1);
+ CPPUNIT_ASSERT_EQUAL(document::BucketId(40, 0x0000004d2ull),
+ entry.getBucketId());
+ }
+#endif
+}
+
+void
+LockableMapTest::testCreate4() {
+#if __WORDSIZE == 64
+ typedef LockableMap<JudyMultiMap<A> > Map;
+ Map map;
+ {
+ document::BucketId id1(16, 0x00000000000004d1);
+ Map::WrappedEntry entry(
+ map.get(id1.stripUnused().toKey(), "foo", true));
+ }
+ {
+ document::BucketId id1(40, 0x00000000000004d2);
+ Map::WrappedEntry entry(
+ map.get(id1.stripUnused().toKey(), "foo", true));
+ }
+ {
+ document::BucketId id1(58, 0x00000000010004d2);
+ Map::WrappedEntry entry = map.createAppropriateBucket(16, "", id1);
+
+ CPPUNIT_ASSERT_EQUAL(document::BucketId(25, 0x0010004d2ull),
+ entry.getBucketId());
+ }
+#endif
+}
+
+void
+LockableMapTest::testCreate6() {
+#if __WORDSIZE == 64
+ typedef LockableMap<JudyMultiMap<A> > Map;
+ Map map;
+ {
+ document::BucketId id1(0x8c000000000004d2);
+ Map::WrappedEntry entry(
+ map.get(id1.stripUnused().toKey(), "foo", true));
+ }
+
+ {
+ document::BucketId id1(0xeb54b3ac000004d2);
+ Map::WrappedEntry entry(
+ map.get(id1.stripUnused().toKey(), "foo", true));
+ }
+
+ {
+ document::BucketId id1(0x88000002000004d2);
+ Map::WrappedEntry entry(
+ map.get(id1.stripUnused().toKey(), "foo", true));
+ }
+ {
+ document::BucketId id1(0x84000001000004d2);
+ Map::WrappedEntry entry(
+ map.get(id1.stripUnused().toKey(), "foo", true));
+ }
+ {
+ document::BucketId id1(0xe9944a44000004d2);
+ Map::WrappedEntry entry = map.createAppropriateBucket(16, "", id1);
+ CPPUNIT_ASSERT_EQUAL(document::BucketId(0x90000004000004d2),
+ entry.getBucketId());
+ }
+#endif
+}
+
+
+void
+LockableMapTest::testCreate5() {
+#if __WORDSIZE == 64
+ typedef LockableMap<JudyMultiMap<A> > Map;
+ Map map;
+ {
+ document::BucketId id1(58, 0xeaf77780000004d2);
+ Map::WrappedEntry entry(
+ map.get(id1.stripUnused().toKey(), "foo", true));
+ }
+ {
+ document::BucketId id1(40, 0x00000000000004d1);
+
+ Map::WrappedEntry entry(
+ map.get(id1.stripUnused().toKey(), "foo", true));
+ }
+ {
+ document::BucketId id1(58, 0x00000000010004d2);
+ Map::WrappedEntry entry = map.createAppropriateBucket(16, "", id1);
+ CPPUNIT_ASSERT_EQUAL(document::BucketId(25, 0x0010004d2ull),
+ entry.getBucketId());
+ }
+#endif
+}
+
+void
+LockableMapTest::testCreateEmpty() {
+#if __WORDSIZE == 64
+ typedef LockableMap<JudyMultiMap<A> > Map;
+ Map map;
+ {
+ document::BucketId id1(58, 0x00000000010004d2);
+ Map::WrappedEntry entry = map.createAppropriateBucket(16, "", id1);
+ CPPUNIT_ASSERT_EQUAL(document::BucketId(16, 0x0000004d2ull),
+ entry.getBucketId());
+ }
+#endif
+}
+
+void
+LockableMapTest::testIsConsistent()
+{
+ typedef LockableMap<JudyMultiMap<A> > Map;
+ Map map;
+ document::BucketId id1(16, 0x00001); // contains id2-id3
+ document::BucketId id2(17, 0x00001);
+
+ bool preExisted;
+ map.insert(id1.stripUnused().toKey(), A(1,2,3), "foo", preExisted);
+ {
+ Map::WrappedEntry entry(
+ map.get(id1.stripUnused().toKey(), "foo", true));
+ CPPUNIT_ASSERT(map.isConsistent(entry));
+ }
+ map.insert(id2.stripUnused().toKey(), A(1,2,3), "foo", preExisted);
+ {
+ Map::WrappedEntry entry(
+ map.get(id1.stripUnused().toKey(), "foo", true));
+ CPPUNIT_ASSERT(!map.isConsistent(entry));
+ }
+}
+
+} // storage
diff --git a/storage/src/tests/bucketmover/CMakeLists.txt b/storage/src/tests/bucketmover/CMakeLists.txt
new file mode 100644
index 00000000000..2d02cdc4942
--- /dev/null
+++ b/storage/src/tests/bucketmover/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_library(storage_testbucketmover
+ SOURCES
+ bucketmovertest.cpp
+ htmltabletest.cpp
+ DEPENDS
+ AFTER
+ storage_storageconfig
+)
diff --git a/storage/src/tests/bucketmover/bucketmovertest.cpp b/storage/src/tests/bucketmover/bucketmovertest.cpp
new file mode 100644
index 00000000000..2720e6bac2a
--- /dev/null
+++ b/storage/src/tests/bucketmover/bucketmovertest.cpp
@@ -0,0 +1,190 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/storage/bucketdb/storbucketdb.h>
+#include <vespa/storage/common/bucketmessages.h>
+#include <vespa/storage/bucketmover/bucketmover.h>
+#include <tests/common/dummystoragelink.h>
+#include <tests/common/testhelper.h>
+#include <tests/common/teststorageapp.h>
+#include <vespa/storage/bucketdb/storbucketdb.h>
+
+bool debug = false;
+
+namespace storage {
+namespace bucketmover {
+
+struct BucketMoverTest : public CppUnit::TestFixture {
+public:
+ void setUp();
+ void tearDown();
+
+ void testNormalUsage();
+ void testMaxPending();
+ void testErrorHandling();
+
+ CPPUNIT_TEST_SUITE(BucketMoverTest);
+ CPPUNIT_TEST(testNormalUsage);
+ CPPUNIT_TEST(testMaxPending);
+ CPPUNIT_TEST(testErrorHandling);
+ CPPUNIT_TEST_SUITE_END();
+
+ std::unique_ptr<TestServiceLayerApp> _node;
+ std::unique_ptr<ServiceLayerComponent> _component;
+ std::unique_ptr<BucketMover> _bucketMover;
+ DummyStorageLink* after;
+
+private:
+ void addBucket(const document::BucketId& id, uint16_t idealDiff);
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(BucketMoverTest);
+
+void
+BucketMoverTest::tearDown()
+{
+ _node.reset(0);
+}
+
+void
+BucketMoverTest::setUp()
+{
+ try {
+ _node.reset(new TestServiceLayerApp(DiskCount(4)));
+ _node->setupDummyPersistence();
+ } catch (config::InvalidConfigException& e) {
+ fprintf(stderr, "%s\n", e.what());
+ }
+
+ _component.reset(new ServiceLayerComponent(_node->getComponentRegister(), "foo"));
+ _bucketMover.reset(new BucketMover("raw:", _node->getComponentRegister()));
+ after = new DummyStorageLink();
+ _bucketMover->push_back(StorageLink::UP(after));
+}
+
+void
+BucketMoverTest::addBucket(const document::BucketId& id,
+ uint16_t idealDiff)
+{
+ StorBucketDatabase::WrappedEntry entry(
+ _component->getBucketDatabase().get(
+ id,
+ "",
+ StorBucketDatabase::CREATE_IF_NONEXISTING));
+
+ entry->setBucketInfo(api::BucketInfo(1,1,1));
+
+ uint16_t idealDisk = _component->getIdealPartition(id);
+ entry->disk = (idealDisk + idealDiff) % _component->getDiskCount();
+ entry.write();
+}
+
+void
+BucketMoverTest::testNormalUsage()
+{
+ for (uint32_t i = 1; i < 4; ++i) {
+ addBucket(document::BucketId(16, i), 1);
+ }
+ for (uint32_t i = 4; i < 6; ++i) {
+ addBucket(document::BucketId(16, i), 0);
+ }
+
+ _bucketMover->open();
+ _bucketMover->tick();
+
+ std::vector<api::StorageMessage::SP> msgs = after->getCommandsOnce();
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("BucketDiskMoveCommand("
+ "BucketId(0x4000000000000002), source 3, target 2)"),
+ msgs[0]->toString());
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("BucketDiskMoveCommand("
+ "BucketId(0x4000000000000001), source 2, target 1)"),
+ msgs[1]->toString());
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("BucketDiskMoveCommand("
+ "BucketId(0x4000000000000003), source 1, target 0)"),
+ msgs[2]->toString());
+
+ for (uint32_t i = 0; i < 2; ++i) {
+ after->sendUp(std::shared_ptr<api::StorageMessage>(
+ ((api::StorageCommand*)msgs[i].get())->
+ makeReply().release()));
+ }
+
+ _bucketMover->tick();
+ CPPUNIT_ASSERT_EQUAL(0, (int)after->getNumCommands());
+
+ _bucketMover->finishCurrentRun();
+}
+
+void
+BucketMoverTest::testMaxPending()
+{
+ for (uint32_t i = 1; i < 100; ++i) {
+ addBucket(document::BucketId(16, i), 1);
+ }
+ for (uint32_t i = 101; i < 200; ++i) {
+ addBucket(document::BucketId(16, i), 0);
+ }
+
+ _bucketMover->open();
+ _bucketMover->tick();
+
+ std::vector<api::StorageMessage::SP> msgs = after->getCommandsOnce();
+ // 5 is the max pending default config.
+ CPPUNIT_ASSERT_EQUAL(5, (int)msgs.size());
+
+ after->sendUp(std::shared_ptr<api::StorageMessage>(
+ ((api::StorageCommand*)msgs[3].get())->
+ makeReply().release()));
+
+ _bucketMover->tick();
+
+ std::vector<api::StorageMessage::SP> msgs2 = after->getCommandsOnce();
+ CPPUNIT_ASSERT_EQUAL(1, (int)msgs2.size());
+}
+
+void
+BucketMoverTest::testErrorHandling()
+{
+ for (uint32_t i = 1; i < 100; ++i) {
+ addBucket(document::BucketId(16, i), 1);
+ }
+ for (uint32_t i = 101; i < 200; ++i) {
+ addBucket(document::BucketId(16, i), 0);
+ }
+
+ _bucketMover->open();
+ _bucketMover->tick();
+
+ std::vector<api::StorageMessage::SP> msgs = after->getCommandsOnce();
+ // 5 is the max pending default config.
+ CPPUNIT_ASSERT_EQUAL(5, (int)msgs.size());
+
+ BucketDiskMoveCommand& cmd = static_cast<BucketDiskMoveCommand&>(*msgs[0]);
+ uint32_t targetDisk = cmd.getDstDisk();
+
+ std::unique_ptr<api::StorageReply> reply(cmd.makeReply().release());
+ reply->setResult(api::ReturnCode(api::ReturnCode::INTERNAL_FAILURE, "foobar"));
+ after->sendUp(std::shared_ptr<api::StorageMessage>(reply.release()));
+
+ for (uint32_t i = 1; i < msgs.size(); ++i) {
+ after->sendUp(std::shared_ptr<api::StorageMessage>(
+ ((api::StorageCommand*)msgs[i].get())->
+ makeReply().release()));
+ }
+
+ _bucketMover->tick();
+
+ std::vector<api::StorageMessage::SP> msgs2 = after->getCommandsOnce();
+ CPPUNIT_ASSERT_EQUAL(5, (int)msgs2.size());
+
+ for (uint32_t i = 0; i < msgs2.size(); ++i) {
+ BucketDiskMoveCommand& bdm = static_cast<BucketDiskMoveCommand&>(*msgs2[i]);
+ CPPUNIT_ASSERT(bdm.getDstDisk() != targetDisk);
+ }
+}
+
+} // bucketmover
+} // storage
diff --git a/storage/src/tests/bucketmover/htmltabletest.cpp b/storage/src/tests/bucketmover/htmltabletest.cpp
new file mode 100644
index 00000000000..98cf68d489a
--- /dev/null
+++ b/storage/src/tests/bucketmover/htmltabletest.cpp
@@ -0,0 +1,100 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/storage/bucketmover/htmltable.h>
+#include <tests/common/testhelper.h>
+
+namespace storage {
+
+struct HtmlTableTest : public CppUnit::TestFixture {
+
+ void testPercentageColumn();
+ void testByteSizeColumn();
+
+ CPPUNIT_TEST_SUITE(HtmlTableTest);
+ CPPUNIT_TEST(testPercentageColumn);
+ CPPUNIT_TEST(testByteSizeColumn);
+ CPPUNIT_TEST_SUITE_END();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(HtmlTableTest);
+
+void HtmlTableTest::testPercentageColumn()
+{
+ // With total hardcoded to 100
+ {
+ HtmlTable table("disk");
+ PercentageColumn perc("fillrate", 100);
+ perc.addColorLimit(70, Column::LIGHT_GREEN);
+ perc.addColorLimit(85, Column::LIGHT_YELLOW);
+ perc.addColorLimit(100, Column::LIGHT_RED);
+ table.addColumn(perc);
+ table.addRow(0);
+ table.addRow(1);
+ table.addRow(2);
+ perc[0] = 30;
+ perc[1] = 80;
+ perc[2] = 100;
+ std::ostringstream ost;
+ table.print(ost);
+ std::string expected(
+"<table border=\"1\" cellpadding=\"2\" cellspacing=\"0\">\n"
+"<tr><th>disk</th><th>fillrate</th></tr>\n"
+"<tr><td>0</td><td bgcolor=\"#a0ffa0\" align=\"right\">30.00 %</td></tr>\n"
+"<tr><td>1</td><td bgcolor=\"#ffffa0\" align=\"right\">80.00 %</td></tr>\n"
+"<tr><td>2</td><td bgcolor=\"#ffa0a0\" align=\"right\">100.00 %</td></tr>\n"
+"</table>\n");
+ CPPUNIT_ASSERT_EQUAL(expected, ost.str());
+ }
+ // With automatically gathered total
+ {
+ HtmlTable table("disk");
+ PercentageColumn perc("fillrate");
+ table.addColumn(perc);
+ table.addRow(0);
+ table.addRow(1);
+ table.addRow(2);
+ perc[0] = 30;
+ perc[1] = 80;
+ perc[2] = 100;
+ std::ostringstream ost;
+ table.print(ost);
+ std::string expected(
+ "<table border=\"1\" cellpadding=\"2\" cellspacing=\"0\">\n"
+ "<tr><th>disk</th><th>fillrate</th></tr>\n"
+ "<tr><td>0</td><td align=\"right\">14.29 %</td></tr>\n"
+ "<tr><td>1</td><td align=\"right\">38.10 %</td></tr>\n"
+ "<tr><td>2</td><td align=\"right\">47.62 %</td></tr>\n"
+ "</table>\n");
+ CPPUNIT_ASSERT_EQUAL(expected, ost.str());
+ }
+}
+
+void HtmlTableTest::testByteSizeColumn()
+{
+ {
+ HtmlTable table("disk");
+ ByteSizeColumn size("size");
+ table.addColumn(size);
+ table.addRow(0);
+ table.addRow(1);
+ table.addRow(2);
+ // Biggest value enforce the denomination
+ size[0] = 42123;
+ size[1] = 124123151;
+ size[2] = 6131231;
+ std::ostringstream ost;
+ table.print(ost);
+ std::string expected(
+ "<table border=\"1\" cellpadding=\"2\" cellspacing=\"0\">\n"
+ "<tr><th>disk</th><th>size</th></tr>\n"
+ "<tr><td>0</td><td align=\"right\">0 MB</td></tr>\n"
+ "<tr><td>1</td><td align=\"right\">118 MB</td></tr>\n"
+ "<tr><td>2</td><td align=\"right\">5 MB</td></tr>\n"
+ "</table>\n");
+ CPPUNIT_ASSERT_EQUAL(expected, ost.str());
+ }
+
+}
+
+} // storage
diff --git a/storage/src/tests/common/.gitignore b/storage/src/tests/common/.gitignore
new file mode 100644
index 00000000000..333f254ba10
--- /dev/null
+++ b/storage/src/tests/common/.gitignore
@@ -0,0 +1,8 @@
+*.So
+*.lo
+.*.swp
+.depend
+.depend.NEW
+.deps
+.libs
+Makefile
diff --git a/storage/src/tests/common/CMakeLists.txt b/storage/src/tests/common/CMakeLists.txt
new file mode 100644
index 00000000000..309308473e1
--- /dev/null
+++ b/storage/src/tests/common/CMakeLists.txt
@@ -0,0 +1,12 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_library(storage_testcommon
+ SOURCES
+ dummystoragelink.cpp
+ testhelper.cpp
+ metricstest.cpp
+ storagelinktest.cpp
+ teststorageapp.cpp
+ DEPENDS
+ AFTER
+ storage_storageconfig
+)
diff --git a/storage/src/tests/common/dummystoragelink.cpp b/storage/src/tests/common/dummystoragelink.cpp
new file mode 100644
index 00000000000..d05241cb5b5
--- /dev/null
+++ b/storage/src/tests/common/dummystoragelink.cpp
@@ -0,0 +1,191 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/storageframework/defaultimplementation/clock/realclock.h>
+#include <sys/time.h>
+#include "dummystoragelink.h"
+
+namespace storage {
+
+DummyStorageLink* DummyStorageLink::_last(0);
+
+DummyStorageLink::DummyStorageLink()
+ : StorageLink("Dummy storage link"),
+ _commands(),
+ _replies(),
+ _injected(),
+ _autoReply(false),
+ _useDispatch(false),
+ _ignore(false),
+ _waitMonitor()
+{
+ _last = this;
+}
+
+DummyStorageLink::~DummyStorageLink()
+{
+ // Often a chain with dummy link on top is deleted in unit tests.
+ // If they haven't been closed already, close them for a cleaner
+ // shutdown
+ if (getState() == OPENED) {
+ close();
+ flush();
+ }
+ closeNextLink();
+ reset();
+}
+
+bool
+DummyStorageLink::handleInjectedReply()
+{
+ vespalib::LockGuard guard(_lock);
+ if (!_injected.empty()) {
+ sendUp(*_injected.begin());
+ _injected.pop_front();
+ return true;
+ }
+ return false;
+}
+
+bool DummyStorageLink::onDown(const api::StorageMessage::SP& cmd)
+{
+ if (_ignore) {
+ return false;
+ }
+ bool injected = handleInjectedReply();
+ if (!injected && _autoReply) {
+ if (!cmd->getType().isReply()) {
+ std::shared_ptr<api::StorageReply> reply(
+ std::dynamic_pointer_cast<api::StorageCommand>(cmd)
+ ->makeReply().release());
+ reply->setResult(api::ReturnCode(
+ api::ReturnCode::OK, "Automatically generated reply"));
+ sendUp(reply);
+ }
+ }
+ if (isBottom()) {
+ vespalib::MonitorGuard lock(_waitMonitor);
+ {
+ vespalib::LockGuard guard(_lock);
+ _commands.push_back(cmd);
+ }
+ lock.broadcast();
+ return true;
+ }
+ return StorageLink::onDown(cmd);
+}
+
+bool DummyStorageLink::onUp(const api::StorageMessage::SP& reply) {
+ if (isTop()) {
+ vespalib::MonitorGuard lock(_waitMonitor);
+ {
+ vespalib::LockGuard guard(_lock);
+ _replies.push_back(reply);
+ }
+ lock.broadcast();
+ return true;
+ }
+ return StorageLink::onUp(reply);
+
+}
+
+void DummyStorageLink::injectReply(api::StorageReply* reply)
+{
+ assert(reply);
+ vespalib::LockGuard guard(_lock);
+ _injected.push_back(std::shared_ptr<api::StorageReply>(reply));
+}
+
+void DummyStorageLink::reset() {
+ vespalib::MonitorGuard lock(_waitMonitor);
+ vespalib::LockGuard guard(_lock);
+ _commands.clear();
+ _replies.clear();
+ _injected.clear();
+}
+
+void DummyStorageLink::waitForMessages(unsigned int msgCount, int timeout)
+{
+ framework::defaultimplementation::RealClock clock;
+ framework::MilliSecTime endTime(
+ clock.getTimeInMillis() + framework::MilliSecTime(timeout * 1000));
+ vespalib::MonitorGuard lock(_waitMonitor);
+ while (_commands.size() + _replies.size() < msgCount) {
+ if (timeout != 0 && clock.getTimeInMillis() > endTime) {
+ std::ostringstream ost;
+ ost << "Timed out waiting for " << msgCount << " messages to "
+ << "arrive in dummy storage link. Only "
+ << (_commands.size() + _replies.size()) << " messages seen "
+ << "after timout of " << timeout << " seconds was reached.";
+ throw vespalib::IllegalStateException(ost.str(), VESPA_STRLOC);
+ }
+ if (timeout >= 0) {
+ lock.wait((endTime - clock.getTimeInMillis()).getTime());
+ } else {
+ lock.wait();
+ }
+ }
+}
+
+void DummyStorageLink::waitForMessage(const api::MessageType& type, int timeout)
+{
+ framework::defaultimplementation::RealClock clock;
+ framework::MilliSecTime endTime(
+ clock.getTimeInMillis() + framework::MilliSecTime(timeout * 1000));
+ vespalib::MonitorGuard lock(_waitMonitor);
+ while (true) {
+ for (uint32_t i=0; i<_commands.size(); ++i) {
+ if (_commands[i]->getType() == type) return;
+ }
+ for (uint32_t i=0; i<_replies.size(); ++i) {
+ if (_replies[i]->getType() == type) return;
+ }
+ if (timeout != 0 && clock.getTimeInMillis() > endTime) {
+ std::ostringstream ost;
+ ost << "Timed out waiting for " << type << " message to "
+ << "arrive in dummy storage link. Only "
+ << (_commands.size() + _replies.size()) << " messages seen "
+ << "after timout of " << timeout << " seconds was reached.";
+ if (_commands.size() == 1) {
+ ost << " Found command of type " << _commands[0]->getType();
+ }
+ if (_replies.size() == 1) {
+ ost << " Found command of type " << _replies[0]->getType();
+ }
+ throw vespalib::IllegalStateException(ost.str(), VESPA_STRLOC);
+ }
+ if (timeout >= 0) {
+ lock.wait((endTime - clock.getTimeInMillis()).getTime());
+ } else {
+ lock.wait();
+ }
+ }
+}
+
+api::StorageMessage::SP
+DummyStorageLink::getAndRemoveMessage(const api::MessageType& type)
+{
+ vespalib::MonitorGuard lock(_waitMonitor);
+ for (std::vector<api::StorageMessage::SP>::iterator it = _commands.begin();
+ it != _commands.end(); ++it)
+ {
+ if ((*it)->getType() == type) {
+ api::StorageMessage::SP result(*it);
+ _commands.erase(it);
+ return result;
+ }
+ }
+ for (std::vector<api::StorageMessage::SP>::iterator it = _replies.begin();
+ it != _replies.end(); ++it)
+ {
+ if ((*it)->getType() == type) {
+ api::StorageMessage::SP result(*it);
+ _replies.erase(it);
+ return result;
+ }
+ }
+ std::ostringstream ost;
+ ost << "No message of type " << type << " found.";
+ throw vespalib::IllegalStateException(ost.str(), VESPA_STRLOC);
+}
+
+} // storage
diff --git a/storage/src/tests/common/dummystoragelink.h b/storage/src/tests/common/dummystoragelink.h
new file mode 100644
index 00000000000..072d961cbc0
--- /dev/null
+++ b/storage/src/tests/common/dummystoragelink.h
@@ -0,0 +1,121 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <vespa/vespalib/util/sync.h>
+#include <list>
+#include <sstream>
+#include <vespa/storageapi/messageapi/storagecommand.h>
+#include <string>
+#include <vector>
+#include <vespa/storage/common/storagelink.h>
+#include <vespa/storage/common/bucketmessages.h>
+#include <vespa/storageapi/message/internal.h>
+
+class FastOS_ThreadPool;
+
+namespace storage {
+
+class DummyStorageLink : public StorageLink {
+
+ mutable vespalib::Lock _lock; // to protect below containers:
+ std::vector<api::StorageMessage::SP> _commands;
+ std::vector<api::StorageMessage::SP> _replies;
+ std::list<api::StorageMessage::SP> _injected;
+
+ bool _autoReply;
+ bool _useDispatch;
+ bool _ignore;
+ static DummyStorageLink* _last;
+ vespalib::Monitor _waitMonitor;
+
+public:
+ DummyStorageLink();
+ ~DummyStorageLink();
+
+ bool onDown(const api::StorageMessage::SP&);
+ bool onUp(const api::StorageMessage::SP&);
+
+ void addOnTopOfChain(StorageLink& link) {
+ link.addTestLinkOnTop(this);
+ }
+
+ void print(std::ostream& ost, bool verbose, const std::string& indent) const
+ {
+ (void) verbose;
+ ost << indent << "DummyStorageLink("
+ << "autoreply = " << (_autoReply ? "on" : "off")
+ << ", dispatch = " << (_useDispatch ? "on" : "off")
+ << ", " << _commands.size() << " commands"
+ << ", " << _replies.size() << " replies";
+ if (_injected.size() > 0)
+ ost << ", " << _injected.size() << " injected";
+ ost << ")";
+ }
+
+ void injectReply(api::StorageReply* reply);
+ void reset();
+ void setAutoreply(bool autoReply) { _autoReply = autoReply; }
+ void setIgnore(bool ignore) { _ignore = ignore; }
+ // Timeout is given in seconds
+ void waitForMessages(unsigned int msgCount = 1, int timeout = -1);
+ // Wait for a single message of a given type
+ void waitForMessage(const api::MessageType&, int timeout = -1);
+
+ api::StorageMessage::SP getCommand(size_t i) const {
+ vespalib::LockGuard guard(_lock);
+ api::StorageMessage::SP ret = _commands[i];
+ return ret;
+ }
+ api::StorageMessage::SP getReply(size_t i) const {
+ vespalib::LockGuard guard(_lock);
+ api::StorageMessage::SP ret = _replies[i];
+ return ret;
+ }
+ size_t getNumCommands() const {
+ vespalib::LockGuard guard(_lock);
+ return _commands.size();
+ }
+ size_t getNumReplies() const {
+ vespalib::LockGuard guard(_lock);
+ return _replies.size();
+ }
+
+ const std::vector<api::StorageMessage::SP>& getCommands() const
+ { return _commands; }
+ const std::vector<api::StorageMessage::SP>& getReplies() const
+ { return _replies; }
+
+ std::vector<api::StorageMessage::SP> getCommandsOnce() {
+ vespalib::MonitorGuard lock(_waitMonitor);
+ std::vector<api::StorageMessage::SP> retval;
+ {
+ vespalib::LockGuard guard(_lock);
+ retval.swap(_commands);
+ }
+ return retval;
+ }
+
+ std::vector<api::StorageMessage::SP> getRepliesOnce() {
+ vespalib::MonitorGuard lock(_waitMonitor);
+ std::vector<api::StorageMessage::SP> retval;
+ {
+ vespalib::LockGuard guard(_lock);
+ retval.swap(_replies);
+ }
+ return retval;
+ }
+
+ api::StorageMessage::SP getAndRemoveMessage(const api::MessageType&);
+
+ static DummyStorageLink* getLast() { return _last; }
+private:
+ /**
+ * Auto-reply with an injected message if one is available and return
+ * whether such an injection took place.
+ */
+ bool handleInjectedReply();
+};
+
+}
+
diff --git a/storage/src/tests/common/hostreporter/CMakeLists.txt b/storage/src/tests/common/hostreporter/CMakeLists.txt
new file mode 100644
index 00000000000..f0cb197c5e2
--- /dev/null
+++ b/storage/src/tests/common/hostreporter/CMakeLists.txt
@@ -0,0 +1,14 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_library(storage_testhostreporter
+ SOURCES
+ cpureportertest.cpp
+ memreportertest.cpp
+ networkreportertest.cpp
+ versionreportertest.cpp
+ diskreportertest.cpp
+ util.cpp
+ hostinfotest.cpp
+ DEPENDS
+ AFTER
+ storage_storageconfig
+)
diff --git a/storage/src/tests/common/hostreporter/cpureportertest.cpp b/storage/src/tests/common/hostreporter/cpureportertest.cpp
new file mode 100644
index 00000000000..56a929c3aff
--- /dev/null
+++ b/storage/src/tests/common/hostreporter/cpureportertest.cpp
@@ -0,0 +1,40 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+#include <vespa/storage/common/hostreporter/cpureporter.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <vespa/vespalib/data/slime/slime.h>
+#include <vespa/vespalib/util/jsonstream.h>
+#include "util.h"
+
+LOG_SETUP(".test.cpureporter");
+
+namespace storage {
+namespace {
+using Object = vespalib::JsonStream::Object;
+using End = vespalib::JsonStream::End;
+}
+
+struct CpuReporterTest : public CppUnit::TestFixture
+{
+ void testCpuReporter();
+
+ CPPUNIT_TEST_SUITE(CpuReporterTest);
+ CPPUNIT_TEST(testCpuReporter);
+ CPPUNIT_TEST_SUITE_END();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(CpuReporterTest);
+
+void
+CpuReporterTest::testCpuReporter()
+{
+ CpuReporter cpuReporter;
+ vespalib::Slime slime;
+ util::reporterToSlime(cpuReporter, slime);
+ CPPUNIT_ASSERT(1.0 <= slime.get()["cpu"]["context switches"].asDouble());
+ CPPUNIT_ASSERT(1.0 <= slime.get()["cpu"]["cputotal"]["user"].asDouble());
+ CPPUNIT_ASSERT(1.0 <= slime.get()["cpu"]["cputotal"]["user"].asDouble());
+ CPPUNIT_ASSERT(1.0 <= slime.get()["cpu"]["cputotal"]["user"].asDouble());
+}
+} // storage
diff --git a/storage/src/tests/common/hostreporter/diskreportertest.cpp b/storage/src/tests/common/hostreporter/diskreportertest.cpp
new file mode 100644
index 00000000000..158a77c2e7e
--- /dev/null
+++ b/storage/src/tests/common/hostreporter/diskreportertest.cpp
@@ -0,0 +1,33 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+#include <vespa/storage/common/hostreporter/diskreporter.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <vespa/vespalib/data/slime/slime.h>
+#include <vespa/vespalib/util/jsonstream.h>
+#include "util.h"
+
+LOG_SETUP(".test.diskreporter");
+
+namespace storage {
+
+struct DiskReporterTest : public CppUnit::TestFixture
+{
+ void testDiskReporter();
+
+ CPPUNIT_TEST_SUITE(DiskReporterTest);
+ CPPUNIT_TEST(testDiskReporter);
+ CPPUNIT_TEST_SUITE_END();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(DiskReporterTest);
+
+void
+DiskReporterTest::testDiskReporter()
+{
+ DiskReporter diskReporter;
+ vespalib::Slime slime;
+ util::reporterToSlime(diskReporter, slime);
+ CPPUNIT_ASSERT(0 < slime.get()["disk"].toString().size());
+}
+} // storage
diff --git a/storage/src/tests/common/hostreporter/hostinfotest.cpp b/storage/src/tests/common/hostreporter/hostinfotest.cpp
new file mode 100644
index 00000000000..99954c19840
--- /dev/null
+++ b/storage/src/tests/common/hostreporter/hostinfotest.cpp
@@ -0,0 +1,60 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+#include <vespa/storage/common/hostreporter/hostinfo.h>
+#include <vespa/storage/common/hostreporter/hostreporter.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <vespa/vespalib/data/slime/slime.h>
+#include <vespa/vespalib/util/jsonstream.h>
+#include "util.h"
+
+LOG_SETUP(".test.hostinforeporter");
+
+namespace storage {
+namespace {
+using Object = vespalib::JsonStream::Object;
+using End = vespalib::JsonStream::End;
+using JsonFormat = vespalib::slime::JsonFormat;
+using Memory = vespalib::slime::Memory;
+
+class DummyReporter: public HostReporter {
+public:
+ void report(vespalib::JsonStream& jsonreport) override {
+ jsonreport << "dummy" << Object() << "foo" << "bar" << End();
+ }
+};
+}
+
+struct HostInfoReporterTest : public CppUnit::TestFixture
+{
+ void testHostInfoReporter();
+
+ CPPUNIT_TEST_SUITE(HostInfoReporterTest);
+ CPPUNIT_TEST(testHostInfoReporter);
+ CPPUNIT_TEST_SUITE_END();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(HostInfoReporterTest);
+
+void
+HostInfoReporterTest::testHostInfoReporter()
+{
+ HostInfo hostinfo;
+ DummyReporter dummyReporter;
+ hostinfo.registerReporter(&dummyReporter);
+ vespalib::asciistream json;
+ vespalib::JsonStream stream(json, true);
+
+ stream << Object();
+ hostinfo.printReport(stream);
+ stream << End();
+
+ std::string jsonData = json.str();
+ vespalib::Slime slime;
+ JsonFormat::decode(Memory(jsonData), slime);
+ CPPUNIT_ASSERT(slime.get()["dummy"]["foo"].asString() == "bar");
+ CPPUNIT_ASSERT(0 < slime.get()["network"]["lo"]["input"]["packets"].asLong());
+ CPPUNIT_ASSERT(1.0 <= slime.get()["cpu"]["context switches"].asDouble());
+}
+} // storage
+
diff --git a/storage/src/tests/common/hostreporter/memreportertest.cpp b/storage/src/tests/common/hostreporter/memreportertest.cpp
new file mode 100644
index 00000000000..3eedfd48a3c
--- /dev/null
+++ b/storage/src/tests/common/hostreporter/memreportertest.cpp
@@ -0,0 +1,44 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+#include <vespa/storage/common/hostreporter/memreporter.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <vespa/vespalib/data/slime/slime.h>
+#include <vespa/vespalib/util/jsonstream.h>
+#include "util.h"
+
+LOG_SETUP(".test.memreporter");
+
+namespace storage {
+namespace {
+using Object = vespalib::JsonStream::Object;
+using End = vespalib::JsonStream::End;
+}
+
+struct MemReporterTest : public CppUnit::TestFixture
+{
+ void testMemReporter();
+
+ CPPUNIT_TEST_SUITE(MemReporterTest);
+ CPPUNIT_TEST(testMemReporter);
+ CPPUNIT_TEST_SUITE_END();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(MemReporterTest);
+
+void
+MemReporterTest::testMemReporter()
+{
+ MemReporter memReporter;
+ vespalib::Slime slime;
+ util::reporterToSlime(memReporter, slime);
+ CPPUNIT_ASSERT(0 < slime.get()["memory"]["total memory"].asLong());
+ CPPUNIT_ASSERT(0 < slime.get()["memory"]["free memory"].asLong());
+ CPPUNIT_ASSERT(0 < slime.get()["memory"]["disk cache"].asLong());
+ CPPUNIT_ASSERT(0 < slime.get()["memory"]["active memory"].asLong());
+ CPPUNIT_ASSERT(0 < slime.get()["memory"]["inactive memory"].asLong());
+ CPPUNIT_ASSERT(0 <= slime.get()["memory"]["swap total"].asLong());
+ CPPUNIT_ASSERT(0 <= slime.get()["memory"]["swap free"].asLong());
+ CPPUNIT_ASSERT(0 < slime.get()["memory"]["dirty"].asLong());
+}
+} // storage
diff --git a/storage/src/tests/common/hostreporter/networkreportertest.cpp b/storage/src/tests/common/hostreporter/networkreportertest.cpp
new file mode 100644
index 00000000000..cba5717adce
--- /dev/null
+++ b/storage/src/tests/common/hostreporter/networkreportertest.cpp
@@ -0,0 +1,40 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+#include <vespa/storage/common/hostreporter/networkreporter.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <vespa/vespalib/data/slime/slime.h>
+#include <vespa/vespalib/util/jsonstream.h>
+#include "util.h"
+
+LOG_SETUP(".test.networkreporter");
+
+namespace storage {
+namespace {
+using Object = vespalib::JsonStream::Object;
+using End = vespalib::JsonStream::End;
+}
+
+struct NetworkReporterTest : public CppUnit::TestFixture
+{
+ void testNetworkReporter();
+
+ CPPUNIT_TEST_SUITE(NetworkReporterTest);
+ CPPUNIT_TEST(testNetworkReporter);
+ CPPUNIT_TEST_SUITE_END();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(NetworkReporterTest);
+
+void
+NetworkReporterTest::testNetworkReporter()
+{
+ NetworkReporter networkReporter;
+ vespalib::Slime slime;
+ util::reporterToSlime(networkReporter, slime);
+ CPPUNIT_ASSERT(0 < slime.get()["network"]["lo"]["input"]["bytes"].asLong());
+ CPPUNIT_ASSERT(0 < slime.get()["network"]["lo"]["input"]["packets"].asLong());
+ CPPUNIT_ASSERT(0 < slime.get()["network"]["lo"]["output"]["bytes"].asLong());
+ CPPUNIT_ASSERT(0 < slime.get()["network"]["lo"]["output"]["packets"].asLong());
+}
+} // storage
diff --git a/storage/src/tests/common/hostreporter/util.cpp b/storage/src/tests/common/hostreporter/util.cpp
new file mode 100644
index 00000000000..37d5803070d
--- /dev/null
+++ b/storage/src/tests/common/hostreporter/util.cpp
@@ -0,0 +1,34 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include "util.h"
+#include <vespa/storage/common/hostreporter/hostreporter.h>
+#include <vespa/vespalib/data/slime/slime.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <vespa/vespalib/util/jsonstream.h>
+
+namespace storage {
+namespace util {
+namespace {
+using Object = vespalib::JsonStream::Object;
+using End = vespalib::JsonStream::End;
+using JsonFormat = vespalib::slime::JsonFormat;
+using Memory = vespalib::slime::Memory;
+}
+
+void
+reporterToSlime(HostReporter &hostReporter, vespalib::Slime &slime) {
+ vespalib::asciistream json;
+ vespalib::JsonStream stream(json, true);
+
+ stream << Object();
+ hostReporter.report(stream);
+ stream << End();
+ std::string jsonData = json.str();
+ size_t parsedSize = JsonFormat::decode(Memory(jsonData), slime);
+
+ if (jsonData.size() != parsedSize) {
+ CPPUNIT_FAIL("Sizes of jsonData mismatched, probably not json:\n" + jsonData);
+ }
+}
+}
+}
diff --git a/storage/src/tests/common/hostreporter/util.h b/storage/src/tests/common/hostreporter/util.h
new file mode 100644
index 00000000000..e7fcf418bd3
--- /dev/null
+++ b/storage/src/tests/common/hostreporter/util.h
@@ -0,0 +1,16 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#ifndef VESPA_STORAGE_COMMON_UTIL
+#define VESPA_STORAGE_COMMON_UTIL
+
+#include <vespa/storage/common/hostreporter/hostreporter.h>
+#include <vespa/vespalib/data/slime/slime.h>
+
+namespace storage {
+namespace util {
+
+void
+reporterToSlime(HostReporter &hostReporter, vespalib::Slime &slime);
+}
+}
+
+#endif // VESPA_STORAGE_COMMON_UTIL
diff --git a/storage/src/tests/common/hostreporter/versionreportertest.cpp b/storage/src/tests/common/hostreporter/versionreportertest.cpp
new file mode 100644
index 00000000000..43c6e64b0de
--- /dev/null
+++ b/storage/src/tests/common/hostreporter/versionreportertest.cpp
@@ -0,0 +1,39 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+#include <vespa/storage/common/hostreporter/versionreporter.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <vespa/vespalib/data/slime/slime.h>
+#include <vespa/vespalib/util/jsonstream.h>
+#include "util.h"
+
+LOG_SETUP(".test.versionreporter");
+
+namespace storage {
+namespace {
+using Object = vespalib::JsonStream::Object;
+using End = vespalib::JsonStream::End;
+}
+
+struct VersionReporterTest : public CppUnit::TestFixture
+{
+ void testVersionReporter();
+
+ CPPUNIT_TEST_SUITE(VersionReporterTest);
+ CPPUNIT_TEST(testVersionReporter);
+ CPPUNIT_TEST_SUITE_END();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(VersionReporterTest);
+
+void
+VersionReporterTest::testVersionReporter()
+{
+ VersionReporter versionReporter;
+ vespalib::Slime slime;
+ util::reporterToSlime(versionReporter, slime);
+ std::string version = slime.get()["vtag"]["version"].asString().make_string().c_str();
+ CPPUNIT_ASSERT(version.length() > 2);
+ CPPUNIT_ASSERT(version.find(".") > 0);
+}
+} // storage
diff --git a/storage/src/tests/common/metricstest.cpp b/storage/src/tests/common/metricstest.cpp
new file mode 100644
index 00000000000..e06b2183380
--- /dev/null
+++ b/storage/src/tests/common/metricstest.cpp
@@ -0,0 +1,393 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <cppunit/extensions/HelperMacros.h>
+#include <vespa/document/fieldvalue/document.h>
+#include <vespa/document/bucket/bucketid.h>
+#include <vespa/log/log.h>
+#include <vespa/storageapi/message/persistence.h>
+#include <vespa/storageframework/defaultimplementation/clock/fakeclock.h>
+#include <vespa/storage/bucketdb/bucketmanager.h>
+#include <vespa/storageframework/storageframework.h>
+#include <vespa/storage/common/statusmetricconsumer.h>
+#include <vespa/storage/persistence/filestorage/filestormanager.h>
+#include <vespa/storage/visiting/visitormetrics.h>
+#include <vespa/documentapi/loadtypes/loadtype.h>
+#include <tests/common/teststorageapp.h>
+#include <tests/common/testhelper.h>
+#include <tests/common/dummystoragelink.h>
+#include <iostream>
+#include <string>
+#include <chrono>
+
+LOG_SETUP(".test.metrics");
+
+namespace storage {
+
+struct MetricsTest : public CppUnit::TestFixture {
+ FastOS_ThreadPool _threadPool;
+ framework::defaultimplementation::FakeClock* _clock;
+ std::unique_ptr<TestServiceLayerApp> _node;
+ std::unique_ptr<DummyStorageLink> _top;
+ std::unique_ptr<StatusMetricConsumer> _metricsConsumer;
+ std::unique_ptr<vdstestlib::DirConfig> _config;
+ std::unique_ptr<metrics::MetricSet> _topSet;
+ std::unique_ptr<metrics::MetricManager> _metricManager;
+ std::shared_ptr<FileStorMetrics> _filestorMetrics;
+ std::shared_ptr<BucketManagerMetrics> _bucketManagerMetrics;
+ std::shared_ptr<VisitorMetrics> _visitorMetrics;
+
+ void createSnapshotForPeriod(std::chrono::seconds secs);
+ void assertMetricLastValue(const std::string& name,
+ int interval,
+ uint64_t expected);
+
+ MetricsTest();
+
+ void setUp();
+ void tearDown();
+ void runLoad(uint32_t count = 1);
+ void createFakeLoad();
+
+ void testFileStorMetrics();
+ void testSnapshotPresenting();
+ void testHtmlMetricsReport();
+ void testCurrentGaugeValuesOverrideSnapshotValues();
+ void testVerboseReportIncludesNonSetMetricsEvenAfterSnapshot();
+
+ CPPUNIT_TEST_SUITE(MetricsTest);
+ CPPUNIT_TEST(testFileStorMetrics);
+ CPPUNIT_TEST(testSnapshotPresenting);
+ CPPUNIT_TEST(testHtmlMetricsReport);
+ CPPUNIT_TEST(testCurrentGaugeValuesOverrideSnapshotValues);
+ CPPUNIT_TEST(testVerboseReportIncludesNonSetMetricsEvenAfterSnapshot);
+ CPPUNIT_TEST_SUITE_END();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(MetricsTest);
+
+namespace {
+ struct MetricClock : public metrics::MetricManager::Timer
+ {
+ framework::Clock& _clock;
+ MetricClock(framework::Clock& c) : _clock(c) {}
+ virtual time_t getTime() const
+ { return _clock.getTimeInSeconds().getTime(); }
+ virtual time_t getTimeInMilliSecs() const
+ { return _clock.getTimeInMillis().getTime(); }
+ };
+}
+
+MetricsTest::MetricsTest()
+ : _threadPool(256*1024),
+ _clock(0),
+ _top(),
+ _metricsConsumer()
+{
+}
+
+void MetricsTest::setUp() {
+ assert(system("rm -rf vdsroot") == 0);
+ _config.reset(new vdstestlib::DirConfig(getStandardConfig(true)));
+ try {
+ _node.reset(new TestServiceLayerApp(DiskCount(4), NodeIndex(0),
+ _config->getConfigId()));
+ _node->setupDummyPersistence();
+ _clock = &_node->getClock();
+ _clock->setAbsoluteTimeInSeconds(1000000);
+ _top.reset(new DummyStorageLink);
+ } catch (config::InvalidConfigException& e) {
+ fprintf(stderr, "%s\n", e.what());
+ }
+ _metricManager.reset(new metrics::MetricManager(
+ std::unique_ptr<metrics::MetricManager::Timer>(
+ new MetricClock(*_clock))));
+ _topSet.reset(new metrics::MetricSet("vds", "", ""));
+ {
+ metrics::MetricLockGuard guard(_metricManager->getMetricLock());
+ _metricManager->registerMetric(guard, *_topSet);
+ }
+
+ _metricsConsumer.reset(new StatusMetricConsumer(
+ _node->getComponentRegister(),
+ *_metricManager,
+ "status"));
+
+ uint16_t diskCount = _node->getPartitions().size();
+ documentapi::LoadTypeSet::SP loadTypes(_node->getLoadTypes());
+
+ _filestorMetrics.reset(new FileStorMetrics(
+ _node->getLoadTypes()->getMetricLoadTypes()));
+ _filestorMetrics->initDiskMetrics(
+ diskCount, loadTypes->getMetricLoadTypes(), 1);
+ _topSet->registerMetric(*_filestorMetrics);
+
+ _bucketManagerMetrics.reset(new BucketManagerMetrics);
+ _bucketManagerMetrics->setDisks(diskCount);
+ _topSet->registerMetric(*_bucketManagerMetrics);
+
+ _visitorMetrics.reset(new VisitorMetrics);
+ _visitorMetrics->initThreads(4, loadTypes->getMetricLoadTypes());
+ _topSet->registerMetric(*_visitorMetrics);
+ _metricManager->init(_config->getConfigId(), _node->getThreadPool());
+}
+
+void MetricsTest::tearDown() {
+ _metricManager->stop();
+ _metricsConsumer.reset(0);
+ _topSet.reset(0);
+ _metricManager.reset(0);
+ _top.reset(0);
+ _node.reset(0);
+ _config.reset(0);
+ _filestorMetrics.reset();
+ _bucketManagerMetrics.reset();
+ _visitorMetrics.reset();
+}
+
+void MetricsTest::createFakeLoad()
+{
+ _clock->addSecondsToTime(1);
+ _metricManager->timeChangedNotification();
+ uint32_t n = 5;
+ for (uint32_t i=0; i<_bucketManagerMetrics->disks.size(); ++i) {
+ DataStoredMetrics& metrics(*_bucketManagerMetrics->disks[i]);
+ metrics.docs.inc(10 * n);
+ metrics.bytes.inc(10240 * n);
+ }
+ _filestorMetrics->directoryEvents.inc(5);
+ _filestorMetrics->partitionEvents.inc(4);
+ _filestorMetrics->diskEvents.inc(3);
+ for (uint32_t i=0; i<_filestorMetrics->disks.size(); ++i) {
+ FileStorDiskMetrics& disk(*_filestorMetrics->disks[i]);
+ disk.queueSize.addValue(4 * n);
+ disk.averageQueueWaitingTime[documentapi::LoadType::DEFAULT].addValue(10 * n);
+ disk.pendingMerges.addValue(4 * n);
+ for (uint32_t j=0; j<disk.threads.size(); ++j) {
+ FileStorThreadMetrics& thread(*disk.threads[j]);
+ thread.operations.inc(120 * n);
+ thread.failedOperations.inc(2 * n);
+
+ using documentapi::LoadType;
+
+ thread.put[LoadType::DEFAULT].count.inc(10 * n);
+ thread.put[LoadType::DEFAULT].latency.addValue(5 * n);
+ thread.get[LoadType::DEFAULT].count.inc(12 * n);
+ thread.get[LoadType::DEFAULT].notFound.inc(2 * n);
+ thread.get[LoadType::DEFAULT].latency.addValue(3 * n);
+ thread.remove[LoadType::DEFAULT].count.inc(6 * n);
+ thread.remove[LoadType::DEFAULT].notFound.inc(1 * n);
+ thread.remove[LoadType::DEFAULT].latency.addValue(2 * n);
+ thread.update[LoadType::DEFAULT].count.inc(2 * n);
+ thread.update[LoadType::DEFAULT].notFound.inc(1 * n);
+ thread.update[LoadType::DEFAULT].latencyRead.addValue(2 * n);
+ thread.update[LoadType::DEFAULT].latency.addValue(7 * n);
+ thread.revert[LoadType::DEFAULT].count.inc(2 * n);
+ thread.revert[LoadType::DEFAULT].notFound.inc(n / 2);
+ thread.revert[LoadType::DEFAULT].latency.addValue(2 * n);
+ thread.visit[LoadType::DEFAULT].count.inc(6 * n);
+
+ thread.deleteBuckets.count.inc(1 * n);
+ thread.repairs.count.inc(3 * n);
+ thread.repairFixed.inc(1 * n);
+ thread.splitBuckets.count.inc(20 * n);
+ thread.movedBuckets.count.inc(1 * n);
+ thread.readBucketInfo.count.inc(2 * n);
+ thread.internalJoin.count.inc(3 * n);
+
+ thread.mergeBuckets.count.inc(2 * n);
+ thread.bytesMerged.inc(1000 * n);
+ thread.getBucketDiff.count.inc(4 * n);
+ thread.getBucketDiffReply.inc(4 * n);
+ thread.applyBucketDiff.count.inc(4 * n);
+ thread.applyBucketDiffReply.inc(4 * n);
+ thread.mergeLatencyTotal.addValue(300 * n);
+ thread.mergeMetadataReadLatency.addValue(20 * n);
+ thread.mergeDataReadLatency.addValue(40 * n);
+ thread.mergeDataWriteLatency.addValue(50 * n);
+ thread.mergeAverageDataReceivedNeeded.addValue(0.8);
+ }
+ }
+ for (uint32_t i=0; i<_visitorMetrics->threads.size(); ++i) {
+ VisitorThreadMetrics& thread(*_visitorMetrics->threads[i]);
+ thread.queueSize.addValue(2);
+ thread.averageQueueWaitingTime[documentapi::LoadType::DEFAULT].addValue(10);
+ thread.averageVisitorLifeTime[documentapi::LoadType::DEFAULT].addValue(1000);
+ thread.createdVisitors[documentapi::LoadType::DEFAULT].inc(5 * n);
+ thread.abortedVisitors[documentapi::LoadType::DEFAULT].inc(1 * n);
+ thread.completedVisitors[documentapi::LoadType::DEFAULT].inc(4 * n);
+ thread.failedVisitors[documentapi::LoadType::DEFAULT].inc(2 * n);
+ }
+ _clock->addSecondsToTime(60);
+ _metricManager->timeChangedNotification();
+ while (uint64_t(_metricManager->getLastProcessedTime())
+ < _clock->getTimeInSeconds().getTime())
+ {
+ FastOS_Thread::Sleep(5);
+ _metricManager->timeChangedNotification();
+ }
+}
+
+void MetricsTest::testFileStorMetrics() {
+ createFakeLoad();
+ std::ostringstream ost;
+ framework::HttpUrlPath path("metrics?interval=-1&format=text");
+ bool retVal = _metricsConsumer->reportStatus(ost, path);
+ CPPUNIT_ASSERT_MESSAGE("_metricsConsumer->reportStatus failed", retVal);
+ std::string s = ost.str();
+ CPPUNIT_ASSERT_MESSAGE("No get statistics in:\n" + s,
+ s.find("vds.filestor.alldisks.allthreads.get.sum.count count=240") != std::string::npos);
+ CPPUNIT_ASSERT_MESSAGE("No put statistics in:\n" + s,
+ s.find("vds.filestor.alldisks.allthreads.put.sum.count count=200") != std::string::npos);
+ CPPUNIT_ASSERT_MESSAGE("No remove statistics in:\n" + s,
+ s.find("vds.filestor.alldisks.allthreads.remove.sum.count count=120") != std::string::npos);
+ CPPUNIT_ASSERT_MESSAGE("No removenotfound stats in:\n" + s,
+ s.find("vds.filestor.alldisks.allthreads.remove.sum.not_found count=20") != std::string::npos);
+}
+
+#define ASSERT_METRIC(interval, metric, count) \
+{ \
+ std::ostringstream pathost; \
+ pathost << "metrics?interval=" << interval << "&format=text"; \
+ std::ostringstream ost;\
+ framework::HttpUrlPath path(pathost.str()); \
+ bool retVal = _metricsConsumer->reportStatus(ost, path); \
+ CPPUNIT_ASSERT_MESSAGE("_metricsConsumer->reportStatus failed", retVal); \
+ std::string s = ost.str(); \
+ if (count == -1) { \
+ CPPUNIT_ASSERT_MESSAGE(std::string("Metric ") + metric + " was set", \
+ s.find(metric) == std::string::npos); \
+ } else { \
+ std::ostringstream valueost; \
+ valueost << metric << " count=" << count; \
+ CPPUNIT_ASSERT_MESSAGE("Did not find value " + valueost.str() \
+ + " in metric dump " + s, \
+ s.find(valueost.str()) != std::string::npos); \
+ } \
+}
+
+void MetricsTest::testSnapshotPresenting() {
+ FileStorDiskMetrics& disk0(*_filestorMetrics->disks[0]);
+ FileStorThreadMetrics& thread0(*disk0.threads[0]);
+
+ LOG(info, "Adding to get metric");
+
+ using documentapi::LoadType;
+ thread0.get[LoadType::DEFAULT].count.inc(1);
+
+ LOG(info, "Waiting for 5 minute snapshot to be taken");
+ // Wait until active metrics have been added to 5 min snapshot and reset
+ for (uint32_t i=0; i<6; ++i) {
+ _clock->addSecondsToTime(60);
+ _metricManager->timeChangedNotification();
+ while (
+ uint64_t(_metricManager->getLastProcessedTime())
+ < _clock->getTimeInSeconds().getTime())
+ {
+ FastOS_Thread::Sleep(1);
+ }
+ }
+ LOG(info, "5 minute snapshot should have been taken. Adding put count");
+
+ thread0.put[LoadType::DEFAULT].count.inc(1);
+
+ // Verify that active metrics have set put count but not get count
+ ASSERT_METRIC(-2, "vds.filestor.alldisks.allthreads.put.sum.count", 1);
+ ASSERT_METRIC(-2, "vds.filestor.alldisks.allthreads.get.sum.count", -1);
+
+ // Verify that 5 min metrics have set get count but not put count
+ ASSERT_METRIC(300, "vds.filestor.alldisks.allthreads.put.sum.count", -1);
+ ASSERT_METRIC(300, "vds.filestor.alldisks.allthreads.get.sum.count", 1);
+
+ // Verify that the total metrics is equal to 5 minute
+ ASSERT_METRIC(0, "vds.filestor.alldisks.allthreads.put.sum.count", -1);
+ ASSERT_METRIC(0, "vds.filestor.alldisks.allthreads.get.sum.count", 1);
+
+ // Verify that total + active have set both
+ ASSERT_METRIC(-1, "vds.filestor.alldisks.allthreads.put.sum.count", 1);
+ ASSERT_METRIC(-1, "vds.filestor.alldisks.allthreads.get.sum.count", 1);
+}
+
+void MetricsTest::testHtmlMetricsReport() {
+ createFakeLoad();
+ _clock->addSecondsToTime(6 * 60);
+ _metricManager->timeChangedNotification();
+ _metricsConsumer->waitUntilTimeProcessed(_clock->getTimeInSeconds());
+ createFakeLoad();
+ std::ostringstream ost;
+ framework::HttpUrlPath path("metrics?interval=300&format=html");
+ bool retVal = _metricsConsumer->reportStatus(ost, path);
+ CPPUNIT_ASSERT_MESSAGE("_metricsConsumer->reportStatus failed", retVal);
+ std::string s = ost.str();
+ // Not actually testing against content. Better to manually verify that
+ // HTML look sane after changes.
+ //std::cerr << s << "\n";
+ {
+ std::ofstream out("metricsreport.html");
+ out << s;
+ out.close();
+ }
+}
+
+void
+MetricsTest::assertMetricLastValue(const std::string& name,
+ int interval,
+ uint64_t expected)
+{
+ std::ostringstream path;
+ path << "metrics?interval=" << interval
+ << "&format=text&pattern=" << name
+ << "&verbosity=2";
+ std::ostringstream report;
+ framework::HttpUrlPath uri(path.str());
+ CPPUNIT_ASSERT(_metricsConsumer->reportStatus(report, uri));
+ std::ostringstream expectedSubstr;
+ expectedSubstr << " last=" << expected;
+ auto str = report.str();
+ CPPUNIT_ASSERT_MESSAGE("Did not find value " + expectedSubstr.str()
+ + " in metric dump " + str,
+ str.find(expectedSubstr.str()) != std::string::npos);
+}
+
+using namespace std::chrono_literals;
+
+void
+MetricsTest::createSnapshotForPeriod(std::chrono::seconds secs)
+{
+ _clock->addSecondsToTime(secs.count());
+ _metricManager->timeChangedNotification();
+ while (uint64_t(_metricManager->getLastProcessedTime())
+ < _clock->getTimeInSeconds().getTime())
+ {
+ std::this_thread::sleep_for(100ms);
+ }
+}
+
+void
+MetricsTest::testCurrentGaugeValuesOverrideSnapshotValues()
+{
+ auto& metrics(*_bucketManagerMetrics->disks[0]);
+ metrics.docs.set(1000);
+ // Take a 5 minute snapshot of active metrics (1000 docs).
+ createSnapshotForPeriod(5min);
+ metrics.docs.set(2000);
+ // Active metrics are now 2000 docs. Asking for metric snapshots with
+ // an interval of -1 implies that the _active_ metric values should
+ // be added to the total snapshot, which in the case of gauge metrics
+ // only makes sense if the _active_ gauge value gets reported back.
+ // In this case it means we should observe 2000 docs, not 1000.
+ assertMetricLastValue("vds.datastored.alldisks.docs", -1, 2000);
+}
+
+void
+MetricsTest::testVerboseReportIncludesNonSetMetricsEvenAfterSnapshot()
+{
+ createSnapshotForPeriod(5min);
+ // When using verbosity=2 (which is what the system test framework invokes),
+ // all metrics should be included regardless of whether they've been set or
+ // not. In this case, the bytes gauge metric has not been set explicitly
+ // but should be reported as zero.
+ assertMetricLastValue("vds.datastored.alldisks.bytes", -1, 0);
+}
+
+} // storage
diff --git a/storage/src/tests/common/storagelinktest.cpp b/storage/src/tests/common/storagelinktest.cpp
new file mode 100644
index 00000000000..34b774ac424
--- /dev/null
+++ b/storage/src/tests/common/storagelinktest.cpp
@@ -0,0 +1,57 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <tests/common/storagelinktest.h>
+#include <iostream>
+#include <string>
+#include <vespa/storageapi/message/stat.h>
+
+namespace storage {
+
+CPPUNIT_TEST_SUITE_REGISTRATION(StorageLinkTest);
+
+StorageLinkTest::StorageLinkTest()
+ : _threadPool(1024),
+ _feeder(),
+ _middle(0),
+ _replier(0) {}
+
+void StorageLinkTest::setUp() {
+ _feeder.reset(new DummyStorageLink());
+ _middle = new DummyStorageLink();
+ _replier = new DummyStorageLink();
+ _feeder->push_back(StorageLink::UP(_middle));
+ _feeder->push_back(StorageLink::UP(_replier));
+ _replier->setAutoreply(true);
+}
+
+void StorageLinkTest::testPrinting() {
+ std::ostringstream actual;
+ actual << *_feeder;
+ std::string expected =
+"StorageChain(3)\n"
+" DummyStorageLink(autoreply = off, dispatch = off, 0 commands, 0 replies)\n"
+" DummyStorageLink(autoreply = off, dispatch = off, 0 commands, 0 replies)\n"
+" DummyStorageLink(autoreply = on, dispatch = off, 0 commands, 0 replies)";
+
+ CPPUNIT_ASSERT_EQUAL(expected, actual.str());
+}
+
+void StorageLinkTest::testNotImplemented() {
+ _feeder->open();
+ // Test that a message that nobody handles fails with NOT_IMPLEMENTED
+ _replier->setIgnore(true);
+ _feeder->sendDown(api::StorageCommand::SP(
+ new api::StatBucketCommand(document::BucketId(0), "")));
+ _feeder->close();
+ _feeder->flush();
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, _feeder->getNumReplies());
+ CPPUNIT_ASSERT_EQUAL(
+ dynamic_cast<api::StatBucketReply&>(
+ *_feeder->getReply(0)).getResult(),
+ api::ReturnCode(api::ReturnCode::NOT_IMPLEMENTED, "Statbucket"));
+ _feeder->reset();
+ _replier->setIgnore(false);
+}
+
+} // storage
diff --git a/storage/src/tests/common/storagelinktest.h b/storage/src/tests/common/storagelinktest.h
new file mode 100644
index 00000000000..efeebb1146e
--- /dev/null
+++ b/storage/src/tests/common/storagelinktest.h
@@ -0,0 +1,46 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <cppunit/extensions/HelperMacros.h>
+#include <vespa/fastos/fastos.h>
+#include <tests/common/dummystoragelink.h>
+
+namespace storage {
+
+struct StorageLinkTest : public CppUnit::TestFixture {
+ FastOS_ThreadPool _threadPool;
+ std::unique_ptr<DummyStorageLink> _feeder;
+ DummyStorageLink* _middle;
+ DummyStorageLink* _replier;
+
+ StorageLinkTest();
+
+ void setUp();
+
+ void testPrinting();
+ void testNotImplemented();
+
+ static bool callOnUp(StorageLink& link,
+ const api::StorageMessage::SP& msg)
+ {
+ return link.onUp(msg);
+ }
+ static bool callOnDown(StorageLink& link,
+ const api::StorageMessage::SP& msg)
+ {
+ return link.onDown(msg);
+ }
+ static void callOnFlush(StorageLink& link, bool downwards)
+ {
+ link.onFlush(downwards);
+ }
+
+ CPPUNIT_TEST_SUITE(StorageLinkTest);
+ CPPUNIT_TEST(testPrinting);
+ CPPUNIT_TEST(testNotImplemented);
+ CPPUNIT_TEST_SUITE_END();
+};
+
+}
+
diff --git a/storage/src/tests/common/testhelper.cpp b/storage/src/tests/common/testhelper.cpp
new file mode 100644
index 00000000000..b8b42124d39
--- /dev/null
+++ b/storage/src/tests/common/testhelper.cpp
@@ -0,0 +1,209 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <tests/common/testhelper.h>
+
+#include <vespa/log/log.h>
+#include <vespa/vespalib/io/fileutil.h>
+
+LOG_SETUP(".testhelper");
+
+namespace storage {
+
+namespace {
+ bool useNewStorageCore() {
+ if ( // Unit test directory
+ vespalib::fileExists("use_new_storage_core") ||
+ // src/cpp directory
+ vespalib::fileExists("../use_new_storage_core") ||
+ // Top build directory where storage-HEAD remains
+ vespalib::fileExists("../../../../use_new_storage_core"))
+ {
+ std::cerr << "Using new storage core for unit tests\n";
+ return true;
+ }
+ return false;
+ }
+ bool newStorageCore(useNewStorageCore());
+}
+
+void addStorageDistributionConfig(vdstestlib::DirConfig& dc)
+{
+ vdstestlib::DirConfig::Config* config;
+ config = &dc.getConfig("stor-distribution", true);
+ config->clear();
+ config->set("group[1]");
+ config->set("group[0].name", "invalid");
+ config->set("group[0].index", "invalid");
+ config->set("group[0].nodes[50]");
+ config->set("redundancy", "2");
+
+ for (uint32_t i = 0; i < 50; i++) {
+ std::ostringstream key; key << "group[0].nodes[" << i << "].index";
+ std::ostringstream val; val << i;
+ config->set(key.str(), val.str());
+ }
+}
+
+vdstestlib::DirConfig getStandardConfig(bool storagenode) {
+ std::string clusterName("storage");
+ vdstestlib::DirConfig dc;
+ vdstestlib::DirConfig::Config* config;
+ config = &dc.addConfig("fleetcontroller");
+ config->set("cluster_name", clusterName);
+ config->set("index", "0");
+ config->set("zookeeper_server", "\"\"");
+ config->set("total_distributor_count", "10");
+ config->set("total_storage_count", "10");
+ config = &dc.addConfig("upgrading");
+ config = &dc.addConfig("load-type");
+ config->set("type[10]");
+ config->set("type[0].id", "1");
+ config->set("type[0].name", "\"maintenance.inconsistent.join\"");
+ config->set("type[0].priority", "\"high_3\"");
+ config->set("type[1].id", "2");
+ config->set("type[1].name", "\"maintenance.inconsistent.split\"");
+ config->set("type[1].priority", "\"normal_1\"");
+ config->set("type[2].id", "3");
+ config->set("type[2].name", "\"maintenance.active.incorrectamount\"");
+ config->set("type[2].priority", "\"normal_2\"");
+ config->set("type[3].id", "4");
+ config->set("type[3].name", "\"maintenance.active.wrongcopy\"");
+ config->set("type[3].priority", "\"normal_3\"");
+ config->set("type[4].id", "5");
+ config->set("type[4].name", "\"maintenance.size.split\"");
+ config->set("type[4].priority", "\"normal_4\"");
+ config->set("type[5].id", "6");
+ config->set("type[5].name", "\"maintenance.size.join\"");
+ config->set("type[5].priority", "\"normal_5\"");
+ config->set("type[6].id", "7");
+ config->set("type[6].name", "\"maintenance.merge.toofewcopies\"");
+ config->set("type[6].priority", "\"normal_6\"");
+ config->set("type[7].id", "8");
+ config->set("type[7].name", "\"maintenance.merge.toomanycopies\"");
+ config->set("type[7].priority", "\"low_1\"");
+ config->set("type[8].id", "9");
+ config->set("type[8].name", "\"maintenance.merge.outofsync\"");
+ config->set("type[8].priority", "\"low_2\"");
+ config->set("type[9].id", "10");
+ config->set("type[9].name", "\"maintenance.move\"");
+ config->set("type[9].priority", "\"low_3\"");
+ config = &dc.addConfig("bucket");
+ config = &dc.addConfig("messagebus");
+ config = &dc.addConfig("stor-prioritymapping");
+ config = &dc.addConfig("stor-bucketdbupdater");
+ config = &dc.addConfig("stor-bucket-init");
+ config = &dc.addConfig("metricsmanager");
+ config->set("consumer[2]");
+ config->set("consumer[0].name", "\"status\"");
+ config->set("consumer[0].addedmetrics[1]");
+ config->set("consumer[0].addedmetrics[0]", "\"*\"");
+ config->set("consumer[1].name", "\"statereporter\"");
+ config->set("consumer[1].addedmetrics[1]");
+ config->set("consumer[1].addedmetrics[0]", "\"*\"");
+ config = &dc.addConfig("stor-communicationmanager");
+ config->set("rpcport", "0");
+ config->set("mbusport", "0");
+ config = &dc.addConfig("stor-bucketdb");
+ config->set("chunklevel", "0");
+ config = &dc.addConfig("stor-distributormanager");
+ config->set("splitcount", "1000");
+ config->set("splitsize", "10000000");
+ config->set("joincount", "500");
+ config->set("joinsize", "5000000");
+ config = &dc.addConfig("stor-opslogger");
+ config = &dc.addConfig("persistence");
+ config->set("abort_operations_with_changed_bucket_ownership", "true");
+ config = &dc.addConfig("stor-filestor");
+ // Easier to see what goes wrong with only 1 thread per disk.
+ config->set("minimum_file_meta_slots", "2");
+ config->set("minimum_file_header_block_size", "368");
+ config->set("minimum_file_size", "4096");
+ config->set("threads[1]");
+ config->set("threads[0].lowestpri 255");
+ config->set("dir_spread", "4");
+ config->set("dir_levels", "0");
+ config->set("use_new_core", newStorageCore ? "true" : "false");
+ config->set("maximum_versions_of_single_document_stored", "0");
+ //config->set("enable_slotfile_cache", "false");
+ // Unit tests typically use fake low time values, so don't complain
+ // about them or compact/delete them by default. Override in tests testing that
+ // behavior
+ config->set("time_future_limit", "5");
+ config->set("time_past_limit", "2000000000");
+ config->set("keep_remove_time_period", "2000000000");
+ config->set("revert_time_period", "2000000000");
+ // Don't want test to call exit()
+ config->set("fail_disk_after_error_count", "0");
+ config = &dc.addConfig("stor-bouncer");
+ config = &dc.addConfig("stor-integritychecker");
+ config = &dc.addConfig("stor-bucketmover");
+ config = &dc.addConfig("stor-messageforwarder");
+ config = &dc.addConfig("stor-server");
+ config->set("cluster_name", clusterName);
+ config->set("enable_dead_lock_detector", "false");
+ config->set("enable_dead_lock_detector_warnings", "false");
+ config->set("max_merges_per_node", "25");
+ config->set("max_merge_queue_size", "20");
+ config->set("root_folder",
+ (storagenode ? "vdsroot" : "vdsroot.distributor"));
+ config->set("is_distributor",
+ (storagenode ? "false" : "true"));
+ config = &dc.addConfig("stor-devices");
+ config->set("root_folder",
+ (storagenode ? "vdsroot" : "vdsroot.distributor"));
+ config = &dc.addConfig("stor-status");
+ config->set("httpport", "0");
+ config = &dc.addConfig("stor-visitor");
+ config->set("defaultdocblocksize", "8192");
+ // By default, need "old" behaviour of maxconcurrent
+ config->set("maxconcurrentvisitors_fixed", "4");
+ config->set("maxconcurrentvisitors_variable", "0");
+ config = &dc.addConfig("stor-visitordispatcher");
+ addFileConfig(dc, "documenttypes", "config-doctypes.cfg");
+ addStorageDistributionConfig(dc);
+ return dc;
+}
+
+void addSlobrokConfig(vdstestlib::DirConfig& dc,
+ const mbus::Slobrok& slobrok)
+{
+ std::ostringstream ost;
+ ost << "tcp/localhost:" << slobrok.port();
+ vdstestlib::DirConfig::Config* config;
+ config = &dc.getConfig("slobroks", true);
+ config->clear();
+ config->set("slobrok[1]");
+ config->set("slobrok[0].connectionspec", ost.str());
+}
+
+void addFileConfig(vdstestlib::DirConfig& dc,
+ const std::string& configDefName,
+ const std::string& fileName)
+{
+ vdstestlib::DirConfig::Config* config;
+ config = &dc.getConfig(configDefName, true);
+ config->clear();
+ std::ifstream in(fileName.c_str());
+ std::string line;
+ while (std::getline(in, line, '\n')) {
+ std::string::size_type pos = line.find(' ');
+ if (pos == std::string::npos) {
+ config->set(line);
+ } else {
+ config->set(line.substr(0, pos), line.substr(pos + 1));
+ }
+ }
+ in.close();
+}
+
+TestName::TestName(const std::string& n)
+ : name(n)
+{
+ LOG(debug, "Starting test %s", name.c_str());
+}
+
+TestName::~TestName() {
+ LOG(debug, "Done with test %s", name.c_str());
+}
+
+} // storage
diff --git a/storage/src/tests/common/testhelper.h b/storage/src/tests/common/testhelper.h
new file mode 100644
index 00000000000..be2c3e7ec66
--- /dev/null
+++ b/storage/src/tests/common/testhelper.h
@@ -0,0 +1,58 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#pragma once
+#include <vespa/vdstestlib/cppunit/dirconfig.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+
+
+#include <fstream>
+#include <vespa/fastos/fastos.h>
+#include <vespa/messagebus/testlib/slobrok.h>
+#include <sstream>
+
+#define ASSERT_REPLY_COUNT(count, dummylink) \
+ { \
+ std::ostringstream msgost; \
+ if ((dummylink).getNumReplies() != count) { \
+ for (uint32_t ijx=0; ijx<(dummylink).getNumReplies(); ++ijx) { \
+ msgost << (dummylink).getReply(ijx)->toString(true) << "\n"; \
+ } \
+ } \
+ CPPUNIT_ASSERT_EQUAL_MSG(msgost.str(), size_t(count), \
+ (dummylink).getNumReplies()); \
+ }
+#define ASSERT_COMMAND_COUNT(count, dummylink) \
+ { \
+ std::ostringstream msgost; \
+ if ((dummylink).getNumCommands() != count) { \
+ for (uint32_t ijx=0; ijx<(dummylink).getNumCommands(); ++ijx) { \
+ msgost << (dummylink).getCommand(ijx)->toString(true) << "\n"; \
+ } \
+ } \
+ CPPUNIT_ASSERT_EQUAL_MSG(msgost.str(), size_t(count), \
+ (dummylink).getNumCommands()); \
+ }
+
+namespace storage {
+
+void addFileConfig(vdstestlib::DirConfig& dc,
+ const std::string& configDefName,
+ const std::string& fileName);
+
+
+void addStorageDistributionConfig(vdstestlib::DirConfig& dc);
+
+vdstestlib::DirConfig getStandardConfig(bool storagenode);
+
+void addSlobrokConfig(vdstestlib::DirConfig& dc,
+ const mbus::Slobrok& slobrok);
+
+// Class used to print start and end of test. Enable debug when you want to see
+// which test creates what output or where we get stuck
+struct TestName {
+ std::string name;
+ TestName(const std::string& n);
+ ~TestName();
+};
+
+} // storage
+
diff --git a/storage/src/tests/common/testnodestateupdater.h b/storage/src/tests/common/testnodestateupdater.h
new file mode 100644
index 00000000000..9f5b2d8ba51
--- /dev/null
+++ b/storage/src/tests/common/testnodestateupdater.h
@@ -0,0 +1,50 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * \class storage::TestNodeStateUpdater
+ * \ingroup common
+ *
+ * \brief Test implementation of the node state updater.
+ */
+
+#pragma once
+
+#include <vespa/storage/common/nodestateupdater.h>
+
+namespace storage {
+
+struct TestNodeStateUpdater : public NodeStateUpdater
+{
+ lib::NodeState::CSP _reported;
+ lib::NodeState::CSP _current;
+ lib::ClusterState::CSP _cluster;
+ std::vector<StateListener*> _listeners;
+
+public:
+ TestNodeStateUpdater(const lib::NodeType& type) {
+ _reported.reset(new lib::NodeState(type, lib::State::UP));
+ _current.reset(new lib::NodeState(type, lib::State::UP));
+ }
+
+ lib::NodeState::CSP getReportedNodeState() const { return _reported; }
+ lib::NodeState::CSP getCurrentNodeState() const { return _current; }
+ lib::ClusterState::CSP getSystemState() const { return _cluster; }
+ void addStateListener(StateListener& s) {
+ _listeners.push_back(&s);
+ }
+ void removeStateListener(StateListener&) {}
+ Lock::SP grabStateChangeLock() { return Lock::SP(new Lock); }
+ void setReportedNodeState(const lib::NodeState& state)
+ { _reported.reset(new lib::NodeState(state)); }
+ void setCurrentNodeState(const lib::NodeState& state)
+ { _current.reset(new lib::NodeState(state)); }
+
+ void setClusterState(lib::ClusterState::CSP c) {
+ _cluster = c;
+ for (uint32_t i = 0; i < _listeners.size(); ++i) {
+ _listeners[i]->handleNewState();
+ }
+ }
+};
+
+} // storage
+
diff --git a/storage/src/tests/common/teststorageapp.cpp b/storage/src/tests/common/teststorageapp.cpp
new file mode 100644
index 00000000000..eb4c1c41c78
--- /dev/null
+++ b/storage/src/tests/common/teststorageapp.cpp
@@ -0,0 +1,292 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <tests/common/teststorageapp.h>
+
+#include <vespa/log/log.h>
+#include <vespa/persistence/dummyimpl/dummypersistence.h>
+#include <vespa/storage/bucketdb/storagebucketdbinitializer.h>
+#include <vespa/storage/config/config-stor-server.h>
+#include <vespa/storageframework/defaultimplementation/clock/realclock.h>
+#include <vespa/storageframework/defaultimplementation/memory/nomemorymanager.h>
+#include <vespa/config-fleetcontroller.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <vespa/vespalib/io/fileutil.h>
+
+LOG_SETUP(".test.servicelayerapp");
+
+using storage::framework::defaultimplementation::ComponentRegisterImpl;
+
+namespace storage {
+
+namespace {
+ template<typename T>
+ struct ConfigReader : public T::Subscriber,
+ public T
+ {
+ ConfigReader(const std::string& configId) {
+ T::subscribe(configId, *this);
+ }
+ void configure(const T& c) { dynamic_cast<T&>(*this) = c; }
+ };
+}
+
+TestStorageApp::TestStorageApp(StorageComponentRegisterImpl::UP compReg,
+ const lib::NodeType& type, NodeIndex index,
+ vespalib::stringref configId)
+ : TestComponentRegister(ComponentRegisterImpl::UP(std::move(compReg))),
+ _compReg(dynamic_cast<StorageComponentRegisterImpl&>(
+ TestComponentRegister::getComponentRegister())),
+ _docMan(),
+ _nodeStateUpdater(type),
+ _configId(configId),
+ _initialized(false)
+{
+ // Use config to adjust values
+ vespalib::string clusterName = "mycluster";
+ uint32_t redundancy = 2;
+ uint32_t nodeCount = 10;
+ documentapi::LoadTypeSet::SP loadTypes;
+ if (!configId.empty()) {
+ config::ConfigUri uri(configId);
+ std::unique_ptr<vespa::config::content::core::StorServerConfig> serverConfig = config::ConfigGetter<vespa::config::content::core::StorServerConfig>::getConfig(uri.getConfigId(), uri.getContext());
+ clusterName = serverConfig->clusterName;
+ if (index == 0xffff) index = serverConfig->nodeIndex;
+ redundancy = config::ConfigGetter<vespa::config::content::StorDistributionConfig>::getConfig(uri.getConfigId(), uri.getContext())->redundancy;
+ nodeCount = config::ConfigGetter<vespa::config::content::FleetcontrollerConfig>::getConfig(uri.getConfigId(), uri.getContext())->totalStorageCount;
+ _compReg.setPriorityConfig(
+ *config::ConfigGetter<StorageComponent::PriorityConfig>
+ ::getConfig(uri.getConfigId(), uri.getContext()));
+ loadTypes.reset(new documentapi::LoadTypeSet(
+ *config::ConfigGetter<vespa::config::content::LoadTypeConfig>
+ ::getConfig(uri.getConfigId(), uri.getContext())));
+ } else {
+ if (index == 0xffff) index = 0;
+ loadTypes.reset(new documentapi::LoadTypeSet);
+ }
+ if (index >= nodeCount) nodeCount = index + 1;
+ if (redundancy > nodeCount) redundancy = nodeCount;
+
+ _compReg.setNodeInfo(clusterName, type, index);
+ _compReg.setNodeStateUpdater(_nodeStateUpdater);
+ _compReg.setDocumentTypeRepo(_docMan.getTypeRepoSP());
+ _compReg.setLoadTypes(loadTypes);
+ _compReg.setBucketIdFactory(document::BucketIdFactory());
+ lib::Distribution::SP distr(new lib::Distribution(
+ lib::Distribution::getDefaultDistributionConfig(
+ redundancy, nodeCount)));
+ _compReg.setDistribution(distr);
+}
+
+void
+TestStorageApp::setDistribution(Redundancy redundancy, NodeCount nodeCount)
+{
+ lib::Distribution::SP distr(new lib::Distribution(
+ lib::Distribution::getDefaultDistributionConfig(
+ redundancy, nodeCount)));
+ _compReg.setDistribution(distr);
+}
+
+void
+TestStorageApp::setTypeRepo(document::DocumentTypeRepo::SP repo)
+{
+ _compReg.setDocumentTypeRepo(repo);
+}
+
+void
+TestStorageApp::setClusterState(const lib::ClusterState& c)
+{
+ _nodeStateUpdater.setClusterState(
+ lib::ClusterState::CSP(new lib::ClusterState(c)));
+}
+
+void
+TestStorageApp::waitUntilInitialized(
+ StorageBucketDBInitializer* initializer, framework::SecondTime timeout)
+{
+ // Always use real clock for wait timeouts. Component clock may be faked
+ // in tests
+ framework::defaultimplementation::RealClock clock;
+ framework::MilliSecTime endTime(
+ clock.getTimeInMillis() + timeout.getMillis());
+ while (!isInitialized()) {
+ FastOS_Thread::Sleep(1);
+ framework::MilliSecTime currentTime(clock.getTimeInMillis());
+ if (currentTime > endTime) {
+ std::ostringstream error;
+ error << "Failed to initialize service layer within timeout of "
+ << timeout << " seconds.";
+ if (initializer != 0) {
+ error << " ";
+ initializer->reportStatus(error, framework::HttpUrlPath(""));
+ LOG(error, "%s", error.str().c_str());
+ CPPUNIT_FAIL(error.str().c_str());
+ }
+ }
+ }
+}
+
+namespace {
+ NodeIndex getIndexFromConfig(vespalib::stringref configId) {
+ if (!configId.empty()) {
+ config::ConfigUri uri(configId);
+ return NodeIndex(
+ config::ConfigGetter<vespa::config::content::core::StorServerConfig>::getConfig(uri.getConfigId(), uri.getContext())->nodeIndex);
+ }
+ return NodeIndex(0);
+ }
+}
+
+TestServiceLayerApp::TestServiceLayerApp(vespalib::stringref configId)
+ : TestStorageApp(
+ StorageComponentRegisterImpl::UP(
+ new ServiceLayerComponentRegisterImpl),
+ lib::NodeType::STORAGE, getIndexFromConfig(configId), configId),
+ _compReg(dynamic_cast<ServiceLayerComponentRegisterImpl&>(
+ TestStorageApp::getComponentRegister())),
+ _persistenceProvider(),
+ _partitions(1)
+{
+ _compReg.setDiskCount(1);
+ lib::NodeState ns(*_nodeStateUpdater.getReportedNodeState());
+ ns.setDiskCount(1);
+ _nodeStateUpdater.setReportedNodeState(ns);
+}
+
+TestServiceLayerApp::TestServiceLayerApp(DiskCount dc, NodeIndex index,
+ vespalib::stringref configId)
+ : TestStorageApp(
+ StorageComponentRegisterImpl::UP(
+ new ServiceLayerComponentRegisterImpl),
+ lib::NodeType::STORAGE, index, configId),
+ _compReg(dynamic_cast<ServiceLayerComponentRegisterImpl&>(
+ TestStorageApp::getComponentRegister())),
+ _persistenceProvider(),
+ _partitions(dc)
+{
+ _compReg.setDiskCount(dc);
+ lib::NodeState ns(*_nodeStateUpdater.getReportedNodeState());
+ ns.setDiskCount(dc);
+ _nodeStateUpdater.setReportedNodeState(ns);
+ // Tests should know how many disks they want to use. If testing auto
+ // detection, you should not need this utility.
+ CPPUNIT_ASSERT(dc > 0);
+}
+
+void
+TestServiceLayerApp::setupDummyPersistence()
+{
+ spi::PersistenceProvider::UP provider(new spi::dummy::DummyPersistence(
+ getTypeRepo(), _compReg.getDiskCount()));
+ setPersistenceProvider(std::move(provider));
+}
+
+void
+TestServiceLayerApp::setPersistenceProvider(
+ spi::PersistenceProvider::UP provider)
+{
+ _partitions = provider->getPartitionStates().getList();
+ CPPUNIT_ASSERT_EQUAL(spi::PartitionId(_compReg.getDiskCount()),
+ _partitions.size());
+ _persistenceProvider = std::move(provider);
+}
+
+spi::PersistenceProvider&
+TestServiceLayerApp::getPersistenceProvider()
+{
+ if (_persistenceProvider.get() == 0) {
+ throw vespalib::IllegalStateException(
+ "Persistence provider requested but not initialized.",
+ VESPA_STRLOC);
+ }
+ return *_persistenceProvider;
+}
+
+spi::PartitionStateList&
+TestServiceLayerApp::getPartitions()
+{
+ if (_persistenceProvider.get() == 0) {
+ throw vespalib::IllegalStateException(
+ "Partition list requested but not initialized.",
+ VESPA_STRLOC);
+ }
+ return _partitions;
+}
+
+uint16_t
+TestServiceLayerApp::getPartition(const document::BucketId& bucket)
+{
+ lib::NodeState state(lib::NodeType::STORAGE, lib::State::UP);
+ state.setDiskCount(_compReg.getDiskCount());
+ return getDistribution()->getIdealDisk(
+ state, _compReg.getIndex(), bucket.stripUnused(),
+ lib::Distribution::IDEAL_DISK_EVEN_IF_DOWN);
+}
+
+namespace {
+ template<typename T>
+ const T getConfig(vespalib::stringref configId) {
+ config::ConfigUri uri(configId);
+ return *config::ConfigGetter<T>::getConfig(
+ uri.getConfigId(), uri.getContext());
+ }
+}
+
+void
+TestDistributorApp::configure(vespalib::stringref id)
+{
+ if (id.empty()) return;
+ DistributorConfig dc(getConfig<vespa::config::content::core::StorDistributormanagerConfig>(id));
+ _compReg.setDistributorConfig(dc);
+ VisitorConfig vc(getConfig<vespa::config::content::core::StorVisitordispatcherConfig>(id));
+ _compReg.setVisitorConfig(vc);
+}
+
+TestDistributorApp::TestDistributorApp(vespalib::stringref configId)
+ : TestStorageApp(
+ StorageComponentRegisterImpl::UP(
+ new DistributorComponentRegisterImpl),
+ lib::NodeType::DISTRIBUTOR, getIndexFromConfig(configId), configId),
+ _compReg(dynamic_cast<DistributorComponentRegisterImpl&>(
+ TestStorageApp::getComponentRegister())),
+ _lastUniqueTimestampRequested(0),
+ _uniqueTimestampCounter(0)
+{
+ _compReg.setTimeCalculator(*this);
+ configure(configId);
+}
+
+TestDistributorApp::TestDistributorApp(NodeIndex index,
+ vespalib::stringref configId)
+ : TestStorageApp(
+ StorageComponentRegisterImpl::UP(new StorageComponentRegisterImpl),
+ lib::NodeType::DISTRIBUTOR, index, configId),
+ _compReg(dynamic_cast<DistributorComponentRegisterImpl&>(
+ TestStorageApp::getComponentRegister())),
+ _lastUniqueTimestampRequested(0),
+ _uniqueTimestampCounter(0)
+{
+ _compReg.setTimeCalculator(*this);
+ configure(configId);
+}
+
+api::Timestamp
+TestDistributorApp::getUniqueTimestamp()
+{
+ vespalib::Lock lock(_accessLock);
+ uint64_t timeNow(getClock().getTimeInSeconds().getTime());
+ if (timeNow == _lastUniqueTimestampRequested) {
+ ++_uniqueTimestampCounter;
+ } else {
+ if (timeNow < _lastUniqueTimestampRequested) {
+ LOG(error, "Time has moved backwards, from %" PRIu64 " to %" PRIu64 ".",
+ _lastUniqueTimestampRequested, timeNow);
+ }
+ _lastUniqueTimestampRequested = timeNow;
+ _uniqueTimestampCounter = 0;
+ }
+
+ return _lastUniqueTimestampRequested * 1000000ll + _uniqueTimestampCounter;
+}
+
+} // storage
diff --git a/storage/src/tests/common/teststorageapp.h b/storage/src/tests/common/teststorageapp.h
new file mode 100644
index 00000000000..e7da9178743
--- /dev/null
+++ b/storage/src/tests/common/teststorageapp.h
@@ -0,0 +1,161 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * \class storage::TestServiceLayerApp
+ * \ingroup common
+ *
+ * \brief Helper class for tests involving service layer.
+ *
+ * Some components need some dependencies injected in order to work correctly.
+ * This test class simplifies the process of creating these dependencies.
+ *
+ * Note that the interface between this class and the test class should be as
+ * clean as possible, such that we can change as little as possible when
+ * refactoring later. Also, advanced functionality should not be generated in
+ * here, but rather fixed by tests themselves. Functionality here should be
+ * needed by many tests, and we should avoid instantiating complex instances
+ * here that several tests
+ */
+#pragma once
+
+#include <vespa/document/base/testdocman.h>
+#include <vespa/persistence/spi/persistenceprovider.h>
+#include <vespa/storage/bucketdb/distrbucketdb.h>
+#include <vespa/storage/bucketdb/storbucketdb.h>
+#include <vespa/storage/common/doneinitializehandler.h>
+#include <vespa/storage/common/nodestateupdater.h>
+#include <vespa/storage/storageserver/framework.h>
+#include <vespa/storage/frameworkimpl/component/distributorcomponentregisterimpl.h>
+#include <vespa/storage/frameworkimpl/component/servicelayercomponentregisterimpl.h>
+#include <vespa/storageframework/generic/memory/memorymanagerinterface.h>
+#include <vespa/storageframework/defaultimplementation/clock/realclock.h>
+#include <vespa/storageframework/defaultimplementation/component/testcomponentregister.h>
+#include <tests/common/testnodestateupdater.h>
+
+namespace storage {
+
+class StorageBucketDBInitializer;
+
+DEFINE_PRIMITIVE_WRAPPER(uint16_t, DiskCount);
+DEFINE_PRIMITIVE_WRAPPER(uint16_t, NodeIndex);
+DEFINE_PRIMITIVE_WRAPPER(uint16_t, NodeCount);
+DEFINE_PRIMITIVE_WRAPPER(uint16_t, Redundancy);
+
+class TestStorageApp
+ : public framework::defaultimplementation::TestComponentRegister,
+ private DoneInitializeHandler
+{
+ StorageComponentRegisterImpl& _compReg;
+
+protected:
+ document::TestDocMan _docMan;
+ TestNodeStateUpdater _nodeStateUpdater;
+ vespalib::string _configId;
+ bool _initialized;
+
+public:
+ /**
+ * Set up a storage application. If node index is not set, it will be
+ * fetched from config if config id is given, otherwise it is set to 0.
+ * If configId is given, some critical values are taken from config.
+ * (node count, redundancy, node index etc). If configId set not set these
+ * will just have some default values. A non-default node index will
+ * override config, but be careful with this, as components may fetch index
+ * from config themselves.
+ */
+ TestStorageApp(StorageComponentRegisterImpl::UP compReg,
+ const lib::NodeType&, NodeIndex = NodeIndex(0xffff),
+ vespalib::stringref configId = "");
+
+ // Set functions, to be able to modify content while running.
+ void setDistribution(Redundancy, NodeCount);
+ void setTypeRepo(document::DocumentTypeRepo::SP repo);
+ void setClusterState(const lib::ClusterState&);
+
+ // Utility functions for getting a hold of currently used bits. Practical
+ // to avoid adding extra components in the tests.
+ StorageComponentRegisterImpl& getComponentRegister() { return _compReg; }
+ document::TestDocMan& getTestDocMan() { return _docMan; }
+ document::DocumentTypeRepo::SP getTypeRepo()
+ { return _compReg.getTypeRepo(); }
+ const document::BucketIdFactory& getBucketIdFactory()
+ { return _compReg.getBucketIdFactory(); }
+ TestNodeStateUpdater& getStateUpdater() { return _nodeStateUpdater; }
+ documentapi::LoadTypeSet::SP getLoadTypes()
+ { return _compReg.getLoadTypes(); }
+ lib::Distribution::SP getDistribution()
+ { return _compReg.getDistribution(); }
+ TestNodeStateUpdater& getNodeStateUpdater() { return _nodeStateUpdater; }
+ uint16_t getIndex() const { return _compReg.getIndex(); }
+
+ // The storage app also implements the done initializer interface, so it can
+ // be sent to components needing this.
+ DoneInitializeHandler& getDoneInitializeHandler() { return *this; }
+ virtual void notifyDoneInitializing() { _initialized = true; }
+ bool isInitialized() const { return _initialized; }
+ void waitUntilInitialized(
+ StorageBucketDBInitializer* initializer = 0,
+ framework::SecondTime timeout = framework::SecondTime(30));
+
+private:
+ // Storage server interface implementation (until we can remove it)
+ virtual api::Timestamp getUniqueTimestamp() { assert(0); throw; }
+ virtual StorBucketDatabase& getStorageBucketDatabase() { assert(0); throw; }
+ virtual distributor::BucketDatabase& getBucketDatabase() { assert(0); throw; }
+ virtual uint16_t getDiskCount() const { assert(0); throw; }
+};
+
+class TestServiceLayerApp : public TestStorageApp
+{
+ ServiceLayerComponentRegisterImpl& _compReg;
+ spi::PersistenceProvider::UP _persistenceProvider;
+ spi::PartitionStateList _partitions;
+
+public:
+ TestServiceLayerApp(vespalib::stringref configId = "");
+ TestServiceLayerApp(DiskCount diskCount, NodeIndex = NodeIndex(0xffff),
+ vespalib::stringref configId = "");
+
+ void setupDummyPersistence();
+ void setPersistenceProvider(spi::PersistenceProvider::UP);
+
+ ServiceLayerComponentRegisterImpl& getComponentRegister()
+ { return _compReg; }
+
+ spi::PersistenceProvider& getPersistenceProvider();
+ spi::PartitionStateList& getPartitions();
+
+ uint16_t getPartition(const document::BucketId&);
+
+ virtual StorBucketDatabase& getStorageBucketDatabase()
+ { return _compReg.getBucketDatabase(); }
+
+private:
+ // For storage server interface implementation we'll get rid of soon.
+ // Use getPartitions().size() instead.
+ virtual uint16_t getDiskCount() const { return _compReg.getDiskCount(); }
+};
+
+class TestDistributorApp : public TestStorageApp,
+ public UniqueTimeCalculator
+{
+ DistributorComponentRegisterImpl& _compReg;
+ vespalib::Lock _accessLock;
+ uint64_t _lastUniqueTimestampRequested;
+ uint32_t _uniqueTimestampCounter;
+
+ void configure(vespalib::stringref configId);
+
+public:
+ TestDistributorApp(vespalib::stringref configId = "");
+ TestDistributorApp(NodeIndex index, vespalib::stringref configId = "");
+
+ DistributorComponentRegisterImpl& getComponentRegister()
+ { return _compReg; }
+ virtual distributor::BucketDatabase& getBucketDatabase()
+ { return _compReg.getBucketDatabase(); }
+
+ virtual api::Timestamp getUniqueTimestamp();
+};
+
+} // storageo
+
diff --git a/storage/src/tests/config-doctypes.cfg b/storage/src/tests/config-doctypes.cfg
new file mode 100644
index 00000000000..f41593ebfc3
--- /dev/null
+++ b/storage/src/tests/config-doctypes.cfg
@@ -0,0 +1,158 @@
+enablecompression false
+documenttype[3]
+documenttype[0].id -519202262
+documenttype[0].name "text/plain"
+documenttype[0].version 0
+documenttype[0].headerstruct 160469461
+documenttype[0].bodystruct 749465898
+documenttype[0].inherits[0]
+documenttype[0].datatype[2]
+documenttype[0].datatype[0].id 160469461
+documenttype[0].datatype[0].type STRUCT
+documenttype[0].datatype[0].array.element.id 0
+documenttype[0].datatype[0].map.key.id 0
+documenttype[0].datatype[0].map.value.id 0
+documenttype[0].datatype[0].wset.key.id 0
+documenttype[0].datatype[0].wset.createifnonexistent false
+documenttype[0].datatype[0].wset.removeifzero false
+documenttype[0].datatype[0].annotationref.annotation.id 0
+documenttype[0].datatype[0].sstruct.name "text/plain.header"
+documenttype[0].datatype[0].sstruct.version 0
+documenttype[0].datatype[0].sstruct.compression.type NONE
+documenttype[0].datatype[0].sstruct.compression.level 0
+documenttype[0].datatype[0].sstruct.compression.threshold 90
+documenttype[0].datatype[0].sstruct.compression.minsize 0
+documenttype[0].datatype[0].sstruct.field[3]
+documenttype[0].datatype[0].sstruct.field[0].name "author"
+documenttype[0].datatype[0].sstruct.field[0].id 644499292
+documenttype[0].datatype[0].sstruct.field[0].id_v6 177126295
+documenttype[0].datatype[0].sstruct.field[0].datatype 2
+documenttype[0].datatype[0].sstruct.field[1].name "date"
+documenttype[0].datatype[0].sstruct.field[1].id 491786523
+documenttype[0].datatype[0].sstruct.field[1].id_v6 916979460
+documenttype[0].datatype[0].sstruct.field[1].datatype 0
+documenttype[0].datatype[0].sstruct.field[2].name "subject"
+documenttype[0].datatype[0].sstruct.field[2].id 1797950813
+documenttype[0].datatype[0].sstruct.field[2].id_v6 943449689
+documenttype[0].datatype[0].sstruct.field[2].datatype 2
+documenttype[0].datatype[1].id 749465898
+documenttype[0].datatype[1].type STRUCT
+documenttype[0].datatype[1].array.element.id 0
+documenttype[0].datatype[1].map.key.id 0
+documenttype[0].datatype[1].map.value.id 0
+documenttype[0].datatype[1].wset.key.id 0
+documenttype[0].datatype[1].wset.createifnonexistent false
+documenttype[0].datatype[1].wset.removeifzero false
+documenttype[0].datatype[1].annotationref.annotation.id 0
+documenttype[0].datatype[1].sstruct.name "text/plain.body"
+documenttype[0].datatype[1].sstruct.version 0
+documenttype[0].datatype[1].sstruct.compression.type NONE
+documenttype[0].datatype[1].sstruct.compression.level 0
+documenttype[0].datatype[1].sstruct.compression.threshold 90
+documenttype[0].datatype[1].sstruct.compression.minsize 0
+documenttype[0].datatype[1].sstruct.field[1]
+documenttype[0].datatype[1].sstruct.field[0].name "content"
+documenttype[0].datatype[1].sstruct.field[0].id 1721764358
+documenttype[0].datatype[1].sstruct.field[0].id_v6 1751481844
+documenttype[0].datatype[1].sstruct.field[0].datatype 3
+documenttype[0].annotationtype[0]
+documenttype[1].id -653677105
+documenttype[1].name "text/html"
+documenttype[1].version 0
+documenttype[1].headerstruct 143329936
+documenttype[1].bodystruct 1473469605
+documenttype[1].inherits[0]
+documenttype[1].datatype[2]
+documenttype[1].datatype[0].id 143329936
+documenttype[1].datatype[0].type STRUCT
+documenttype[1].datatype[0].array.element.id 0
+documenttype[1].datatype[0].map.key.id 0
+documenttype[1].datatype[0].map.value.id 0
+documenttype[1].datatype[0].wset.key.id 0
+documenttype[1].datatype[0].wset.createifnonexistent false
+documenttype[1].datatype[0].wset.removeifzero false
+documenttype[1].datatype[0].annotationref.annotation.id 0
+documenttype[1].datatype[0].sstruct.name "text/html.header"
+documenttype[1].datatype[0].sstruct.version 0
+documenttype[1].datatype[0].sstruct.compression.type NONE
+documenttype[1].datatype[0].sstruct.compression.level 0
+documenttype[1].datatype[0].sstruct.compression.threshold 90
+documenttype[1].datatype[0].sstruct.compression.minsize 0
+documenttype[1].datatype[0].sstruct.field[3]
+documenttype[1].datatype[0].sstruct.field[0].name "author"
+documenttype[1].datatype[0].sstruct.field[0].id 644499292
+documenttype[1].datatype[0].sstruct.field[0].id_v6 177126295
+documenttype[1].datatype[0].sstruct.field[0].datatype 2
+documenttype[1].datatype[0].sstruct.field[1].name "date"
+documenttype[1].datatype[0].sstruct.field[1].id 491786523
+documenttype[1].datatype[0].sstruct.field[1].id_v6 916979460
+documenttype[1].datatype[0].sstruct.field[1].datatype 0
+documenttype[1].datatype[0].sstruct.field[2].name "subject"
+documenttype[1].datatype[0].sstruct.field[2].id 1797950813
+documenttype[1].datatype[0].sstruct.field[2].id_v6 943449689
+documenttype[1].datatype[0].sstruct.field[2].datatype 2
+documenttype[1].datatype[1].id 1473469605
+documenttype[1].datatype[1].type STRUCT
+documenttype[1].datatype[1].array.element.id 0
+documenttype[1].datatype[1].map.key.id 0
+documenttype[1].datatype[1].map.value.id 0
+documenttype[1].datatype[1].wset.key.id 0
+documenttype[1].datatype[1].wset.createifnonexistent false
+documenttype[1].datatype[1].wset.removeifzero false
+documenttype[1].datatype[1].annotationref.annotation.id 0
+documenttype[1].datatype[1].sstruct.name "text/html.body"
+documenttype[1].datatype[1].sstruct.version 0
+documenttype[1].datatype[1].sstruct.compression.type NONE
+documenttype[1].datatype[1].sstruct.compression.level 0
+documenttype[1].datatype[1].sstruct.compression.threshold 90
+documenttype[1].datatype[1].sstruct.compression.minsize 0
+documenttype[1].datatype[1].sstruct.field[1]
+documenttype[1].datatype[1].sstruct.field[0].name "content"
+documenttype[1].datatype[1].sstruct.field[0].id 1721764358
+documenttype[1].datatype[1].sstruct.field[0].id_v6 1751481844
+documenttype[1].datatype[1].sstruct.field[0].datatype 3
+documenttype[1].annotationtype[0]
+documenttype[2].id 238423572
+documenttype[2].name "testdoctype1"
+documenttype[2].version 1
+documenttype[2].headerstruct -226322995
+documenttype[2].bodystruct -1016297758
+documenttype[2].inherits[0]
+documenttype[2].datatype[2]
+documenttype[2].datatype[0].id -226322995
+documenttype[2].datatype[0].type STRUCT
+documenttype[2].datatype[0].array.element.id 0
+documenttype[2].datatype[0].map.key.id 0
+documenttype[2].datatype[0].map.value.id 0
+documenttype[2].datatype[0].wset.key.id 0
+documenttype[2].datatype[0].wset.createifnonexistent false
+documenttype[2].datatype[0].wset.removeifzero false
+documenttype[2].datatype[0].annotationref.annotation.id 0
+documenttype[2].datatype[0].sstruct.name "testdoctype1.header"
+documenttype[2].datatype[0].sstruct.version 1
+documenttype[2].datatype[0].sstruct.compression.type NONE
+documenttype[2].datatype[0].sstruct.compression.level 0
+documenttype[2].datatype[0].sstruct.compression.threshold 90
+documenttype[2].datatype[0].sstruct.compression.minsize 0
+documenttype[2].datatype[0].sstruct.field[0]
+documenttype[2].datatype[1].id -1016297758
+documenttype[2].datatype[1].type STRUCT
+documenttype[2].datatype[1].array.element.id 0
+documenttype[2].datatype[1].map.key.id 0
+documenttype[2].datatype[1].map.value.id 0
+documenttype[2].datatype[1].wset.key.id 0
+documenttype[2].datatype[1].wset.createifnonexistent false
+documenttype[2].datatype[1].wset.removeifzero false
+documenttype[2].datatype[1].annotationref.annotation.id 0
+documenttype[2].datatype[1].sstruct.name "testdoctype1.body"
+documenttype[2].datatype[1].sstruct.version 1
+documenttype[2].datatype[1].sstruct.compression.type NONE
+documenttype[2].datatype[1].sstruct.compression.level 0
+documenttype[2].datatype[1].sstruct.compression.threshold 90
+documenttype[2].datatype[1].sstruct.compression.minsize 0
+documenttype[2].datatype[1].sstruct.field[1]
+documenttype[2].datatype[1].sstruct.field[0].name "content"
+documenttype[2].datatype[1].sstruct.field[0].id 5
+documenttype[2].datatype[1].sstruct.field[0].id_v6 5
+documenttype[2].datatype[1].sstruct.field[0].datatype 2
+documenttype[2].annotationtype[0]
diff --git a/storage/src/tests/config-document.cfg b/storage/src/tests/config-document.cfg
new file mode 100644
index 00000000000..0ec7e881ddf
--- /dev/null
+++ b/storage/src/tests/config-document.cfg
@@ -0,0 +1,78 @@
+enablecompression false
+datatype[6]
+datatype[0].id 143329936
+datatype[0].arraytype[0]
+datatype[0].weightedsettype[0]
+datatype[0].structtype[1]
+datatype[0].structtype[0].name text/html.header
+datatype[0].structtype[0].version 0
+datatype[0].structtype[0].field[3]
+datatype[0].structtype[0].field[0].name author
+datatype[0].structtype[0].field[0].id[0]
+datatype[0].structtype[0].field[0].datatype 2
+datatype[0].structtype[0].field[1].name subject
+datatype[0].structtype[0].field[1].id[0]
+datatype[0].structtype[0].field[1].datatype 2
+datatype[0].structtype[0].field[2].name date
+datatype[0].structtype[0].field[2].id[0]
+datatype[0].structtype[0].field[2].datatype 0
+datatype[0].documenttype[0]
+datatype[1].id 1473469605
+datatype[1].arraytype[0]
+datatype[1].weightedsettype[0]
+datatype[1].structtype[1]
+datatype[1].structtype[0].name text/html.body
+datatype[1].structtype[0].version 0
+datatype[1].structtype[0].field[1]
+datatype[1].structtype[0].field[0].name content
+datatype[1].structtype[0].field[0].id[0]
+datatype[1].structtype[0].field[0].datatype 3
+datatype[1].documenttype[0]
+datatype[2].id -653677105
+datatype[2].arraytype[0]
+datatype[2].weightedsettype[0]
+datatype[2].structtype[0]
+datatype[2].documenttype[1]
+datatype[2].documenttype[0].name text/html
+datatype[2].documenttype[0].version 0
+datatype[2].documenttype[0].inherits[0]
+datatype[2].documenttype[0].headerstruct 143329936
+datatype[2].documenttype[0].bodystruct 1473469605
+datatype[3].id 160469461
+datatype[3].arraytype[0]
+datatype[3].weightedsettype[0]
+datatype[3].structtype[1]
+datatype[3].structtype[0].name text/plain.header
+datatype[3].structtype[0].version 0
+datatype[3].structtype[0].field[3]
+datatype[3].structtype[0].field[0].name author
+datatype[3].structtype[0].field[0].id[0]
+datatype[3].structtype[0].field[0].datatype 2
+datatype[3].structtype[0].field[1].name subject
+datatype[3].structtype[0].field[1].id[0]
+datatype[3].structtype[0].field[1].datatype 2
+datatype[3].structtype[0].field[2].name date
+datatype[3].structtype[0].field[2].id[0]
+datatype[3].structtype[0].field[2].datatype 0
+datatype[3].documenttype[0]
+datatype[4].id 749465898
+datatype[4].arraytype[0]
+datatype[4].weightedsettype[0]
+datatype[4].structtype[1]
+datatype[4].structtype[0].name text/plain.body
+datatype[4].structtype[0].version 0
+datatype[4].structtype[0].field[1]
+datatype[4].structtype[0].field[0].name content
+datatype[4].structtype[0].field[0].id[0]
+datatype[4].structtype[0].field[0].datatype 3
+datatype[4].documenttype[0]
+datatype[5].id -519202262
+datatype[5].arraytype[0]
+datatype[5].weightedsettype[0]
+datatype[5].structtype[0]
+datatype[5].documenttype[1]
+datatype[5].documenttype[0].name text/plain
+datatype[5].documenttype[0].version 0
+datatype[5].documenttype[0].inherits[0]
+datatype[5].documenttype[0].headerstruct 160469461
+datatype[5].documenttype[0].bodystruct 749465898
diff --git a/storage/src/tests/config-testdocman-document.cfg b/storage/src/tests/config-testdocman-document.cfg
new file mode 100644
index 00000000000..c4bf43d9e37
--- /dev/null
+++ b/storage/src/tests/config-testdocman-document.cfg
@@ -0,0 +1,138 @@
+datatype[14]
+datatype[0].id 1001
+datatype[0].arraytype[1]
+datatype[0].arraytype[0].datatype 2
+datatype[1].id 2001
+datatype[1].weightedsettype[1]
+datatype[1].weightedsettype[0].datatype 2
+datatype[1].weightedsettype[0].createifnonexistant false
+datatype[1].weightedsettype[0].removeifzero false
+datatype[2].id -2092985851
+datatype[2].structtype[1]
+datatype[2].structtype[0].name mystruct
+datatype[2].structtype[0].version 2
+datatype[2].structtype[0].field[2]
+datatype[2].structtype[0].field[0].name key
+datatype[2].structtype[0].field[0].id[1]
+datatype[2].structtype[0].field[0].id[0].id 1
+datatype[2].structtype[0].field[0].datatype 0
+datatype[2].structtype[0].field[1].name value
+datatype[2].structtype[0].field[1].id[1]
+datatype[2].structtype[0].field[1].id[0].id 2
+datatype[2].structtype[0].field[1].datatype 2
+datatype[3].id -1244861287
+datatype[3].arraytype[1]
+datatype[3].arraytype[0].datatype 3
+datatype[4].id 759956026
+datatype[4].arraytype[1]
+datatype[4].arraytype[0].datatype -2092985851
+datatype[5].id -226322995
+datatype[5].structtype[1]
+datatype[5].structtype[0].name testdoctype1.header
+datatype[5].structtype[0].version 1
+datatype[5].structtype[0].field[9]
+datatype[5].structtype[0].field[0].name headerval
+datatype[5].structtype[0].field[0].id[1]
+datatype[5].structtype[0].field[0].id[0].id 2
+datatype[5].structtype[0].field[0].datatype 0
+datatype[5].structtype[0].field[1].name hfloatval
+datatype[5].structtype[0].field[1].id[1]
+datatype[5].structtype[0].field[1].id[0].id 3
+datatype[5].structtype[0].field[1].datatype 1
+datatype[5].structtype[0].field[2].name hstringval
+datatype[5].structtype[0].field[2].id[1]
+datatype[5].structtype[0].field[2].id[0].id 4
+datatype[5].structtype[0].field[2].datatype 2
+datatype[5].structtype[0].field[3].name mystruct
+datatype[5].structtype[0].field[3].id[1]
+datatype[5].structtype[0].field[3].id[0].id 513
+datatype[5].structtype[0].field[3].datatype -2092985851
+datatype[5].structtype[0].field[4].name stringweightedset
+datatype[5].structtype[0].field[4].id[1]
+datatype[5].structtype[0].field[4].id[0].id 7
+datatype[5].structtype[0].field[4].datatype 2001
+datatype[5].structtype[0].field[5].name stringweightedset2
+datatype[5].structtype[0].field[5].id[1]
+datatype[5].structtype[0].field[5].id[0].id 8
+datatype[5].structtype[0].field[5].datatype 18
+datatype[5].structtype[0].field[6].name tags
+datatype[5].structtype[0].field[6].id[1]
+datatype[5].structtype[0].field[6].id[0].id 6
+datatype[5].structtype[0].field[6].datatype 1001
+datatype[5].structtype[0].field[7].name title
+datatype[5].structtype[0].field[7].id[1]
+datatype[5].structtype[0].field[7].id[0].id 12
+datatype[5].structtype[0].field[7].datatype 2
+datatype[5].structtype[0].field[8].name headerlongval
+datatype[5].structtype[0].field[8].id[1]
+datatype[5].structtype[0].field[8].id[0].id 9999
+datatype[5].structtype[0].field[8].datatype 4
+datatype[6].id -1016297758
+datatype[6].structtype[1]
+datatype[6].structtype[0].name testdoctype1.body
+datatype[6].structtype[0].version 1
+datatype[6].structtype[0].field[3]
+datatype[6].structtype[0].field[0].name content
+datatype[6].structtype[0].field[0].id[1]
+datatype[6].structtype[0].field[0].id[0].id 5
+datatype[6].structtype[0].field[0].datatype 2
+datatype[6].structtype[0].field[1].name rawarray
+datatype[6].structtype[0].field[1].id[1]
+datatype[6].structtype[0].field[1].id[0].id 10
+datatype[6].structtype[0].field[1].datatype -1244861287
+datatype[6].structtype[0].field[2].name structarray
+datatype[6].structtype[0].field[2].id[1]
+datatype[6].structtype[0].field[2].id[0].id 7123
+datatype[6].structtype[0].field[2].datatype 759956026
+datatype[7].id 238423572
+datatype[7].documenttype[1]
+datatype[7].documenttype[0].name testdoctype1
+datatype[7].documenttype[0].version 1
+datatype[7].documenttype[0].headerstruct -226322995
+datatype[7].documenttype[0].bodystruct -1016297758
+datatype[8].id -422836500
+datatype[8].structtype[1]
+datatype[8].structtype[0].name testdoctype2.header
+datatype[8].structtype[0].version 1
+datatype[8].structtype[0].field[1]
+datatype[8].structtype[0].field[0].name onlyinchild
+datatype[8].structtype[0].field[0].id[1]
+datatype[8].structtype[0].field[0].id[0].id 9
+datatype[8].structtype[0].field[0].datatype 0
+datatype[9].id 726512577
+datatype[9].structtype[1]
+datatype[9].structtype[0].name testdoctype2.body
+datatype[9].structtype[0].version 1
+datatype[9].structtype[0].field[0]
+datatype[10].id 238424533
+datatype[10].documenttype[1]
+datatype[10].documenttype[0].name testdoctype2
+datatype[10].documenttype[0].version 1
+datatype[10].documenttype[0].inherits[1]
+datatype[10].documenttype[0].inherits[0].name testdoctype1
+datatype[10].documenttype[0].inherits[0].version 1
+datatype[10].documenttype[0].headerstruct -422836500
+datatype[10].documenttype[0].bodystruct 726512577
+datatype[11].id -1301366770
+datatype[11].structtype[1]
+datatype[11].structtype[0].name _test_doctype3_.header
+datatype[11].structtype[0].version 1
+datatype[11].structtype[0].field[1]
+datatype[11].structtype[0].field[0].name _only_in_child_
+datatype[11].structtype[0].field[0].id[1]
+datatype[11].structtype[0].field[0].id[0].id 9
+datatype[11].structtype[0].field[0].datatype 0
+datatype[12].id 1422804323
+datatype[12].structtype[1]
+datatype[12].structtype[0].name _test_doctype3_.body
+datatype[12].structtype[0].version 1
+datatype[12].structtype[0].field[0]
+datatype[13].id 1088783091
+datatype[13].documenttype[1]
+datatype[13].documenttype[0].name _test_doctype3_
+datatype[13].documenttype[0].version 1
+datatype[13].documenttype[0].inherits[1]
+datatype[13].documenttype[0].inherits[0].name testdoctype1
+datatype[13].documenttype[0].inherits[0].version 1
+datatype[13].documenttype[0].headerstruct -1301366770
+datatype[13].documenttype[0].bodystruct 1422804323
diff --git a/storage/src/tests/distributor/.gitignore b/storage/src/tests/distributor/.gitignore
new file mode 100644
index 00000000000..333f254ba10
--- /dev/null
+++ b/storage/src/tests/distributor/.gitignore
@@ -0,0 +1,8 @@
+*.So
+*.lo
+.*.swp
+.depend
+.depend.NEW
+.deps
+.libs
+Makefile
diff --git a/storage/src/tests/distributor/CMakeLists.txt b/storage/src/tests/distributor/CMakeLists.txt
new file mode 100644
index 00000000000..6c6ba62ba6e
--- /dev/null
+++ b/storage/src/tests/distributor/CMakeLists.txt
@@ -0,0 +1,44 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_library(storage_testdistributor
+ SOURCES
+ mergelimitertest.cpp
+ bucketdatabasetest.cpp
+ messagesenderstub.cpp
+ externaloperationhandlertest.cpp
+ getoperationtest.cpp
+ idealstatemanagertest.cpp
+ putoperationtest.cpp
+ removeoperationtest.cpp
+ removebucketoperationtest.cpp
+ mergeoperationtest.cpp
+ splitbuckettest.cpp
+ joinbuckettest.cpp
+ visitoroperationtest.cpp
+ twophaseupdateoperationtest.cpp
+ removelocationtest.cpp
+ bucketdbupdatertest.cpp
+ statoperationtest.cpp
+ pendingmessagetrackertest.cpp
+ distributortestutil.cpp
+ simplebucketprioritydatabasetest.cpp
+ simplemaintenancescannertest.cpp
+ maintenanceschedulertest.cpp
+ throttlingoperationstartertest.cpp
+ blockingoperationstartertest.cpp
+ nodeinfotest.cpp
+ updateoperationtest.cpp
+ bucketstateoperationtest.cpp
+ distributortest.cpp
+ mapbucketdatabasetest.cpp
+ operationtargetresolvertest.cpp
+ garbagecollectiontest.cpp
+ statecheckerstest.cpp
+ statusreporterdelegatetest.cpp
+ bucketdbmetricupdatertest.cpp
+ bucketgctimecalculatortest.cpp
+ nodemaintenancestatstrackertest.cpp
+ distributor_host_info_reporter_test.cpp
+ DEPENDS
+ AFTER
+ storage_storageconfig
+)
diff --git a/storage/src/tests/distributor/blockingoperationstartertest.cpp b/storage/src/tests/distributor/blockingoperationstartertest.cpp
new file mode 100644
index 00000000000..ee0058643d9
--- /dev/null
+++ b/storage/src/tests/distributor/blockingoperationstartertest.cpp
@@ -0,0 +1,78 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <string>
+#include <sstream>
+#include <memory>
+#include <vespa/storage/frameworkimpl/component/storagecomponentregisterimpl.h>
+#include <vespa/storage/distributor/blockingoperationstarter.h>
+#include <vespa/storage/distributor/pendingmessagetracker.h>
+#include <tests/distributor/maintenancemocks.h>
+
+namespace storage {
+
+namespace distributor {
+
+using document::BucketId;
+
+class BlockingOperationStarterTest : public CppUnit::TestFixture {
+ CPPUNIT_TEST_SUITE(BlockingOperationStarterTest);
+ CPPUNIT_TEST(testOperationNotBlockedWhenNoMessagesPending);
+ CPPUNIT_TEST(testOperationBlockedWhenMessagesPending);
+ CPPUNIT_TEST_SUITE_END();
+
+ std::shared_ptr<Operation> createMockOperation() {
+ return std::shared_ptr<Operation>(new MockOperation(BucketId(16, 1)));
+ }
+ std::shared_ptr<Operation> createBlockingMockOperation() {
+ std::shared_ptr<MockOperation> op(new MockOperation(BucketId(16, 1)));
+ op->setShouldBlock(true);
+ return op;
+ }
+
+ framework::defaultimplementation::FakeClock _clock;
+ std::unique_ptr<MockOperationStarter> _starterImpl;
+ std::unique_ptr<StorageComponentRegisterImpl> _compReg;
+ std::unique_ptr<PendingMessageTracker> _messageTracker;
+ std::unique_ptr<BlockingOperationStarter> _operationStarter;
+
+public:
+ void testOperationNotBlockedWhenNoMessagesPending();
+ void testOperationBlockedWhenMessagesPending();
+
+ void setUp();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(BlockingOperationStarterTest);
+
+void
+BlockingOperationStarterTest::setUp()
+{
+ _starterImpl.reset(new MockOperationStarter());
+ _compReg.reset(new StorageComponentRegisterImpl());
+ _compReg->setClock(_clock);
+ _clock.setAbsoluteTimeInSeconds(1);
+ _messageTracker.reset(new PendingMessageTracker(*_compReg));
+ _operationStarter.reset(new BlockingOperationStarter(*_messageTracker, *_starterImpl));
+}
+
+void
+BlockingOperationStarterTest::testOperationNotBlockedWhenNoMessagesPending()
+{
+ CPPUNIT_ASSERT(_operationStarter->start(createMockOperation(),
+ OperationStarter::Priority(0)));
+ CPPUNIT_ASSERT_EQUAL(std::string("BucketId(0x4000000000000001), pri 0\n"),
+ _starterImpl->toString());
+}
+
+void
+BlockingOperationStarterTest::testOperationBlockedWhenMessagesPending()
+{
+ // start should return true but not forward message to underlying starter.
+ CPPUNIT_ASSERT(_operationStarter->start(createBlockingMockOperation(),
+ OperationStarter::Priority(0)));
+ CPPUNIT_ASSERT_EQUAL(std::string(""), _starterImpl->toString());
+}
+
+}
+}
diff --git a/storage/src/tests/distributor/bucketdatabasetest.cpp b/storage/src/tests/distributor/bucketdatabasetest.cpp
new file mode 100644
index 00000000000..011b02c8f89
--- /dev/null
+++ b/storage/src/tests/distributor/bucketdatabasetest.cpp
@@ -0,0 +1,550 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <tests/distributor/bucketdatabasetest.h>
+#include <vespa/storageframework/defaultimplementation/clock/realclock.h>
+#include <iostream>
+#include <fstream>
+#include <iomanip>
+
+namespace storage {
+namespace distributor {
+
+using document::BucketId;
+
+void
+BucketDatabaseTest::setUp()
+{
+ db().clear();
+}
+
+namespace {
+ BucketCopy BC(uint32_t nodeIdx) {
+ return BucketCopy(0, nodeIdx, api::BucketInfo());
+ }
+
+ BucketInfo BI(uint32_t nodeIdx) {
+ BucketInfo bi;
+ bi.addNode(BC(nodeIdx), toVector<uint16_t>(0));
+ return bi;
+ }
+}
+
+void
+BucketDatabaseTest::testClear() {
+ db().update(BucketDatabase::Entry(document::BucketId(16, 16), BI(1)));
+ db().update(BucketDatabase::Entry(document::BucketId(16, 11), BI(2)));
+ db().clear();
+ CPPUNIT_ASSERT_EQUAL(size_t(0), db().size());
+}
+
+void
+BucketDatabaseTest::testUpdateGetAndRemove() {
+ // Do some insertions
+ CPPUNIT_ASSERT_EQUAL(0, (int)db().size());
+ db().update(BucketDatabase::Entry(document::BucketId(16, 16), BI(1)));
+ db().update(BucketDatabase::Entry(document::BucketId(16, 11), BI(2)));
+ db().update(BucketDatabase::Entry(document::BucketId(16, 42), BI(3)));
+ CPPUNIT_ASSERT_EQUAL(3, (int)db().size());
+
+ db().update(BucketDatabase::Entry(document::BucketId(16, 11), BI(4)));
+ CPPUNIT_ASSERT_EQUAL(3, (int)db().size());
+
+ // Access some elements
+ CPPUNIT_ASSERT_EQUAL(BI(4), db().get(document::BucketId(16, 11)).getBucketInfo());
+ CPPUNIT_ASSERT_EQUAL(BI(1), db().get(document::BucketId(16, 16)).getBucketInfo());
+ CPPUNIT_ASSERT_EQUAL(BI(3), db().get(document::BucketId(16, 42)).getBucketInfo());
+
+ // Do removes
+ db().remove(document::BucketId(16, 12));
+
+ CPPUNIT_ASSERT_EQUAL(3, (int)db().size());
+
+ db().remove(document::BucketId(16, 11));
+
+ CPPUNIT_ASSERT_EQUAL(2, (int)db().size());
+
+ db().remove(document::BucketId(16, 16));
+ db().remove(document::BucketId(16, 42));
+
+ CPPUNIT_ASSERT_EQUAL(0, (int)db().size());
+}
+
+namespace {
+
+struct ModifyProcessor : public BucketDatabase::MutableEntryProcessor
+{
+ bool process(BucketDatabase::Entry& e) {
+ if (e.getBucketId() == document::BucketId(16, 0x0b)) {
+ e.getBucketInfo() = BI(7);
+ } else if (e.getBucketId() == document::BucketId(16, 0x2a)) {
+ e->clear();
+ e->addNode(BC(4), toVector<uint16_t>(0));
+ e->addNode(BC(5), toVector<uint16_t>(0));
+ }
+
+ return true;
+ }
+};
+
+struct ListAllProcessor : public BucketDatabase::EntryProcessor
+{
+ std::ostringstream ost;
+
+ bool process(const BucketDatabase::Entry& e) {
+ ost << e << "\n";
+ return true;
+ }
+};
+
+struct DummyProcessor : public BucketDatabase::EntryProcessor
+{
+ std::ostringstream ost;
+
+ bool process(const BucketDatabase::Entry&) {
+ return true;
+ }
+};
+
+
+struct StoppingProcessor : public BucketDatabase::EntryProcessor
+{
+ std::ostringstream ost;
+
+ bool process(const BucketDatabase::Entry& e) {
+ ost << e << "\n";
+
+ if (e.getBucketId() == document::BucketId(16, 0x2a)) {
+ return false;
+ }
+
+ return true;
+ }
+};
+
+}
+
+void
+BucketDatabaseTest::testIterating() {
+ // Do some insertions
+ db().update(BucketDatabase::Entry(document::BucketId(16, 0x10), BI(1)));
+ db().update(BucketDatabase::Entry(document::BucketId(16, 0x0b), BI(2)));
+ db().update(BucketDatabase::Entry(document::BucketId(16, 0x2a), BI(3)));
+
+ {
+ ListAllProcessor proc;
+ db().forEach(proc, document::BucketId());
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string(
+ "BucketId(0x4000000000000010) : "
+ "node(idx=1,crc=0x0,docs=0/0,bytes=1/1,trusted=false,active=false)\n"
+ "BucketId(0x400000000000002a) : "
+ "node(idx=3,crc=0x0,docs=0/0,bytes=1/1,trusted=false,active=false)\n"
+ "BucketId(0x400000000000000b) : "
+ "node(idx=2,crc=0x0,docs=0/0,bytes=1/1,trusted=false,active=false)\n"),
+ proc.ost.str());
+ }
+
+ {
+ ListAllProcessor proc;
+ db().forEach(proc, document::BucketId(16, 0x2a));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string(
+ "BucketId(0x400000000000000b) : "
+ "node(idx=2,crc=0x0,docs=0/0,bytes=1/1,trusted=false,active=false)\n"),
+ proc.ost.str());
+ }
+
+ {
+ StoppingProcessor proc;
+ db().forEach(proc, document::BucketId());
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string(
+ "BucketId(0x4000000000000010) : "
+ "node(idx=1,crc=0x0,docs=0/0,bytes=1/1,trusted=false,active=false)\n"
+ "BucketId(0x400000000000002a) : "
+ "node(idx=3,crc=0x0,docs=0/0,bytes=1/1,trusted=false,active=false)\n"),
+ proc.ost.str());
+ }
+
+ {
+ ModifyProcessor alterProc;
+ db().forEach(alterProc, document::BucketId());
+ // Verify content after altering
+ ListAllProcessor proc;
+ db().forEach(proc);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string(
+ "BucketId(0x4000000000000010) : "
+ "node(idx=1,crc=0x0,docs=0/0,bytes=1/1,trusted=false,active=false)\n"
+ "BucketId(0x400000000000002a) : "
+ "node(idx=4,crc=0x0,docs=0/0,bytes=1/1,trusted=false,active=false), "
+ "node(idx=5,crc=0x0,docs=0/0,bytes=1/1,trusted=false,active=false)\n"
+ "BucketId(0x400000000000000b) : "
+ "node(idx=7,crc=0x0,docs=0/0,bytes=1/1,trusted=false,active=false)\n"),
+ proc.ost.str());
+ }
+}
+
+std::string
+BucketDatabaseTest::doFindParents(const std::vector<document::BucketId>& ids,
+ const document::BucketId& searchId)
+{
+ db().clear();
+
+ for (uint32_t i = 0; i < ids.size(); ++i) {
+ db().update(BucketDatabase::Entry(ids[i], BI(i)));
+ }
+
+ std::vector<BucketDatabase::Entry> entries;
+ db().getParents(searchId, entries);
+
+ std::ostringstream ost;
+ for (uint32_t i = 0; i < ids.size(); ++i) {
+ if (std::find(entries.begin(), entries.end(),
+ BucketDatabase::Entry(ids[i], BI(i))) != entries.end()) {
+ if (!ost.str().empty()) {
+ ost << ",";
+ }
+ ost << i;
+ }
+ }
+
+ return ost.str();
+}
+
+void
+BucketDatabaseTest::testFindParents() {
+ // test what parents in the DB (specified in vector) are parents of the
+ // specified bucket. Result is a list of indexes into the vector.
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("2"),
+ doFindParents(toVector(document::BucketId(17, 0x0ffff),
+ document::BucketId(18, 0x1ffff),
+ document::BucketId(18, 0x3ffff)),
+ document::BucketId(22, 0xfffff)));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("0,2,3"),
+ doFindParents(toVector(document::BucketId(16, 0x0ffff),
+ document::BucketId(17, 0x0ffff),
+ document::BucketId(17, 0x1ffff),
+ document::BucketId(19, 0xfffff)),
+ document::BucketId(22, 0xfffff)));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("0,2,3"),
+ doFindParents(toVector(document::BucketId(16, 0x0ffff),
+ document::BucketId(17, 0x0ffff),
+ document::BucketId(17, 0x1ffff),
+ document::BucketId(18, 0x1ffff)),
+ document::BucketId(22, 0x1ffff)));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("0"),
+ doFindParents(toVector(document::BucketId(16, 0x0ffff),
+ document::BucketId(17, 0x0ffff)),
+ document::BucketId(22, 0x1ffff)));
+
+ CPPUNIT_ASSERT_EQUAL( // ticket 3121525
+ std::string("0"),
+ doFindParents(toVector(document::BucketId(16, 0x0ffff),
+ document::BucketId(17, 0x0ffff),
+ document::BucketId(19, 0x1ffff)),
+ document::BucketId(18, 0x1ffff)));
+
+ CPPUNIT_ASSERT_EQUAL( // ticket 3121525
+ std::string("0"),
+ doFindParents(toVector(document::BucketId(16, 0x0ffff),
+ document::BucketId(17, 0x0ffff),
+ document::BucketId(19, 0x5ffff)),
+ document::BucketId(18, 0x1ffff)));
+}
+
+std::string
+BucketDatabaseTest::doFindAll(const std::vector<document::BucketId>& ids,
+ const document::BucketId& searchId)
+{
+ db().clear();
+
+ for (uint32_t i = 0; i < ids.size(); ++i) {
+ db().update(BucketDatabase::Entry(ids[i], BI(i)));
+ }
+
+ std::vector<BucketDatabase::Entry> entries;
+ db().getAll(searchId, entries);
+
+ std::ostringstream ost;
+ for (uint32_t i = 0; i < ids.size(); ++i) {
+ if (std::find(entries.begin(), entries.end(),
+ BucketDatabase::Entry(ids[i], BI(i))) != entries.end()) {
+ if (!ost.str().empty()) {
+ ost << ",";
+ }
+ ost << i;
+ }
+ }
+
+ return ost.str();
+}
+
+void
+BucketDatabaseTest::testFindAll()
+{
+ std::vector<document::BucketId> buckets;
+ CPPUNIT_ASSERT_EQUAL(
+ std::string(""),
+ doFindAll(buckets, document::BucketId(18, 0x1ffff)));
+
+ buckets.push_back(document::BucketId(16, 0x0aaaa)); // contains bucket 2-7
+ buckets.push_back(document::BucketId(17, 0x0aaaa)); // contains bucket 3-4
+ buckets.push_back(document::BucketId(20, 0xcaaaa));
+ buckets.push_back(document::BucketId(20, 0xeaaaa));
+ buckets.push_back(document::BucketId(17, 0x1aaaa)); // contains bucket 6-7
+ buckets.push_back(document::BucketId(20, 0xdaaaa));
+ buckets.push_back(document::BucketId(20, 0xfaaaa));
+ buckets.push_back(document::BucketId(20, 0xceaaa));
+ buckets.push_back(document::BucketId(17, 0x1ffff));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("0,4,5,6"),
+ doFindAll(buckets, document::BucketId(17, 0x1aaaa)));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("8"),
+ doFindAll(buckets, document::BucketId(16, 0xffff)));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("0,1"),
+ doFindAll(toVector(document::BucketId(17, 0x00001),
+ document::BucketId(17, 0x10001)),
+ document::BucketId(16, 0x00001)));
+
+ document::BucketId id(33, 0x1053c7089); // Bit 32 is set, but unused.
+ id.setUsedBits(32);
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("1,2"),
+ doFindAll(toVector(document::BucketId(24, 0x000dc7089),
+ document::BucketId(33, 0x0053c7089),
+ document::BucketId(33, 0x1053c7089),
+ document::BucketId(24, 0x000bc7089)),
+ id));
+
+ CPPUNIT_ASSERT_EQUAL( // Inconsistent split
+ std::string("0,1,2"),
+ doFindAll(toVector(
+ document::BucketId(16, 0x00001), // contains 2-3
+ document::BucketId(17, 0x00001),
+ document::BucketId(17, 0x10001)),
+ document::BucketId(16, 0x00001)));
+
+ CPPUNIT_ASSERT_EQUAL( // Inconsistent split
+ std::string("1,2"),
+ doFindAll(toVector(
+ document::BucketId(17, 0x10000),
+ document::BucketId(27, 0x007228034), // contains 3
+ document::BucketId(29, 0x007228034),
+ document::BucketId(17, 0x1ffff)),
+ document::BucketId(32, 0x027228034)));
+
+ CPPUNIT_ASSERT_EQUAL( // Inconsistent split
+ std::string("0"),
+ doFindAll(toVector(
+ document::BucketId(16, 0x0ffff),
+ document::BucketId(17, 0x0ffff)),
+ document::BucketId(22, 0x1ffff)));
+
+ CPPUNIT_ASSERT_EQUAL( // Inconsistent split
+ std::string("0,2"),
+ doFindAll(toVector(
+ document::BucketId(16, 0x0ffff),
+ document::BucketId(17, 0x0ffff),
+ document::BucketId(19, 0x1ffff)),
+ document::BucketId(18, 0x1ffff)));
+
+ CPPUNIT_ASSERT_EQUAL( // Inconsistent split, ticket 3121525
+ std::string("0,2"),
+ doFindAll(toVector(
+ document::BucketId(16, 0x0ffff),
+ document::BucketId(17, 0x0ffff),
+ document::BucketId(19, 0x5ffff)),
+ document::BucketId(18, 0x1ffff)));
+}
+
+document::BucketId
+BucketDatabaseTest::doCreate(const std::vector<document::BucketId>& ids,
+ uint32_t minBits,
+ const document::BucketId& wantedId)
+{
+ db().clear();
+
+ for (uint32_t i = 0; i < ids.size(); ++i) {
+ db().update(BucketDatabase::Entry(ids[i], BI(i)));
+ }
+
+ BucketDatabase::Entry entry = db().createAppropriateBucket(minBits, wantedId);
+ return entry.getBucketId();
+}
+
+void
+BucketDatabaseTest::testCreateAppropriateBucket() {
+ // Use min split bits when no relevant bucket exist.
+ CPPUNIT_ASSERT_EQUAL(
+ document::BucketId(36,0x0000004d2),
+ doCreate(toVector(document::BucketId(58, 0x43d6c878000004d2ull)), 36,
+ document::BucketId(58, 0x423bf1e0000004d2ull)));
+ // New bucket has bits in common with existing bucket.
+ // Create bucket with min amount of bits while not being overlapping
+ CPPUNIT_ASSERT_EQUAL(
+ document::BucketId(34,0x0000004d2),
+ doCreate(toVector(document::BucketId(58, 0xeaf77782000004d2)),
+ 16,
+ document::BucketId(58, 0x00000000000004d2)));
+ // Create sibling of existing bucket with most LSB bits in common.
+ CPPUNIT_ASSERT_EQUAL(
+ document::BucketId(40, 0x0000004d2),
+ doCreate(toVector(document::BucketId(58, 0xeaf77780000004d2),
+ document::BucketId(58, 0xeaf77782000004d2)),
+ 16,
+ document::BucketId(58, 0x00000000000004d2)));
+ // Create sibling of existing bucket with most LSB bits in common.
+ CPPUNIT_ASSERT_EQUAL(
+ document::BucketId(25, 0x0010004d2),
+ doCreate(toVector(document::BucketId(16, 0x00000000000004d1),
+ document::BucketId(40, 0x00000000000004d2)),
+ 16,
+ document::BucketId(58, 0x00000000010004d2)));
+
+ CPPUNIT_ASSERT_EQUAL(
+ document::BucketId(36, 0x10000004000004d2),
+ doCreate(toVector(document::BucketId(0x8c000000000004d2),
+ document::BucketId(0xeb54b3ac000004d2),
+ document::BucketId(0x88000002000004d2),
+ document::BucketId(0x84000001000004d2)),
+ 16,
+ document::BucketId(58, 0x1944a44000004d2)));
+ CPPUNIT_ASSERT_EQUAL(
+ document::BucketId(25, 0x0010004d2),
+ doCreate(toVector(document::BucketId(58, 0xeaf77780000004d2),
+ document::BucketId(40, 0x00000000000004d1)),
+ 16,
+ document::BucketId(58,0x00000000010004d2)));
+ // Test empty bucket database case. (Use min split bits)
+ std::vector<document::BucketId> buckets;
+ CPPUNIT_ASSERT_EQUAL(
+ document::BucketId(16, 0x0000004d2ull),
+ doCreate(buckets, 16,
+ document::BucketId(58, 0x00000000010004d2)));
+}
+
+void
+BucketDatabaseTest::testGetNext()
+{
+ db().clear();
+ db().update(BucketDatabase::Entry(document::BucketId(16, 16), BI(1)));
+ db().update(BucketDatabase::Entry(document::BucketId(16, 11), BI(2)));
+ db().update(BucketDatabase::Entry(document::BucketId(16, 42), BI(3)));
+
+ CPPUNIT_ASSERT_EQUAL(document::BucketId(16, 16),
+ db().getNext(document::BucketId()).getBucketId());
+
+ CPPUNIT_ASSERT_EQUAL(document::BucketId(16, 42),
+ db().getNext(document::BucketId(16, 16)).getBucketId());
+
+ CPPUNIT_ASSERT_EQUAL(document::BucketId(16, 11),
+ db().getNext(document::BucketId(16, 42)).getBucketId());
+}
+
+void
+BucketDatabaseTest::doTestUpperBound(const UBoundFunc& f)
+{
+ db().clear();
+ // Tree is rooted at the LSB bit, so the following buckets are in iteration
+ // order based on the reverse of their "normal" bitstring:
+ // 0010:3
+ db().update(BucketDatabase::Entry(document::BucketId(3, 4), BI(2)));
+ // 1000:3
+ db().update(BucketDatabase::Entry(document::BucketId(3, 1), BI(2)));
+ // 1001:4
+ db().update(BucketDatabase::Entry(document::BucketId(4, 9), BI(1)));
+ // 10010:5
+ db().update(BucketDatabase::Entry(document::BucketId(5, 9), BI(1)));
+ // 1100:3
+ db().update(BucketDatabase::Entry(document::BucketId(3, 3), BI(3)));
+
+ // 0000:0 (default constructed) has ubound of 0010:3
+ CPPUNIT_ASSERT_EQUAL(BucketId(3, 4), f(db(), BucketId()));
+ // 0011:4 has ubound of 1000:3
+ CPPUNIT_ASSERT_EQUAL(document::BucketId(3, 1), f(db(), BucketId(4, 12)));
+ // 1000:1 has ubound of 1000:3
+ CPPUNIT_ASSERT_EQUAL(BucketId(3, 4), f(db(), BucketId(1, 0)));
+ CPPUNIT_ASSERT_EQUAL(BucketId(3, 1), f(db(), BucketId(3, 4)));
+ CPPUNIT_ASSERT_EQUAL(BucketId(4, 9), f(db(), BucketId(3, 1)));
+ CPPUNIT_ASSERT_EQUAL(BucketId(5, 9), f(db(), BucketId(4, 9)));
+ CPPUNIT_ASSERT_EQUAL(BucketId(3, 3), f(db(), BucketId(5, 9)));
+ // 100101:6 does not exist, should also return 1100:3
+ CPPUNIT_ASSERT_EQUAL(BucketId(3, 3), f(db(), BucketId(6, 41)));
+
+ // Test extremes.
+ db().clear();
+ db().update(BucketDatabase::Entry(document::BucketId(8, 0), BI(2)));
+ db().update(BucketDatabase::Entry(document::BucketId(8, 0xff), BI(2)));
+
+ CPPUNIT_ASSERT_EQUAL(BucketId(8, 0), f(db(), BucketId()));
+ CPPUNIT_ASSERT_EQUAL(BucketId(8, 0xff), f(db(), BucketId(8, 0)));
+}
+
+void
+BucketDatabaseTest::testUpperBoundReturnsNextInOrderGreaterBucket()
+{
+ doTestUpperBound([](const BucketDatabase& bucketDb,
+ const document::BucketId& id)
+ {
+ return bucketDb.upperBound(id).getBucketId();
+ });
+}
+
+void
+BucketDatabaseTest::testGetNextReturnsUpperBoundBucket()
+{
+ // getNext() would generally be implemented in terms of upperBound(), but
+ // make sure it conforms to the same contract in case this changes.
+ doTestUpperBound([](const BucketDatabase& bucketDb,
+ const document::BucketId& id)
+ {
+ return bucketDb.getNext(id).getBucketId();
+ });
+}
+
+void
+BucketDatabaseTest::testChildCount()
+{
+ db().clear();
+ // Empty tree; inserts cannot create inconsistencies.
+ CPPUNIT_ASSERT_EQUAL(0u, db().childCount(BucketId(3, 1)));
+
+ // Same bucket; cannot be inconsistent with itself.
+ db().update(BucketDatabase::Entry(document::BucketId(3, 1), BI(1)));
+ CPPUNIT_ASSERT_EQUAL(0u, db().childCount(BucketId(3, 1)));
+
+ // (2, 1) has one subtree.
+ CPPUNIT_ASSERT_EQUAL(1u, db().childCount(BucketId(2, 1)));
+
+ // Bucket exists in another subtree from (1, 1); inconsistency would
+ // result if we tried inserting it.
+ db().update(BucketDatabase::Entry(document::BucketId(3, 3), BI(2)));
+ CPPUNIT_ASSERT_EQUAL(2u, db().childCount(BucketId(1, 1)));
+
+ // Inner node with 1 subtree.
+ CPPUNIT_ASSERT_EQUAL(1u, db().childCount(BucketId(2, 3)));
+
+ // Leaves have no subtrees.
+ CPPUNIT_ASSERT_EQUAL(0u, db().childCount(BucketId(3, 1)));
+ CPPUNIT_ASSERT_EQUAL(0u, db().childCount(BucketId(3, 5)));
+}
+
+}
+} // storage
diff --git a/storage/src/tests/distributor/bucketdatabasetest.h b/storage/src/tests/distributor/bucketdatabasetest.h
new file mode 100644
index 00000000000..1eb8bf86add
--- /dev/null
+++ b/storage/src/tests/distributor/bucketdatabasetest.h
@@ -0,0 +1,63 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#pragma once
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/vespalib/util/document_runnable.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <cppunit/extensions/HelperMacros.h>
+#include <vespa/storage/distributor/bucketdb/judybucketdatabase.h>
+#include <vespa/storage/storageutil/utils.h>
+
+#define SETUP_DATABASE_TESTS() \
+ CPPUNIT_TEST(testUpdateGetAndRemove); \
+ CPPUNIT_TEST(testClear); \
+ CPPUNIT_TEST(testIterating); \
+ CPPUNIT_TEST(testFindParents); \
+ CPPUNIT_TEST(testFindAll); \
+ CPPUNIT_TEST(testCreateAppropriateBucket); \
+ CPPUNIT_TEST(testGetNext); \
+ CPPUNIT_TEST(testGetNextReturnsUpperBoundBucket); \
+ CPPUNIT_TEST(testUpperBoundReturnsNextInOrderGreaterBucket); \
+ CPPUNIT_TEST(testChildCount);
+
+namespace storage {
+namespace distributor {
+
+struct BucketDatabaseTest : public CppUnit::TestFixture {
+ void setUp();
+
+ void testUpdateGetAndRemove();
+ void testClear();
+ void testIterating();
+ void testFindParents();
+ void testFindAll();
+ void testCreateAppropriateBucket();
+ void testGetNext();
+ void testGetNextReturnsUpperBoundBucket();
+ void testUpperBoundReturnsNextInOrderGreaterBucket();
+ void testChildCount();
+
+ void testBenchmark();
+
+ std::string doFindParents(const std::vector<document::BucketId>& ids,
+ const document::BucketId& searchId);
+ std::string doFindAll(const std::vector<document::BucketId>& ids,
+ const document::BucketId& searchId);
+ document::BucketId doCreate(const std::vector<document::BucketId>& ids,
+ uint32_t minBits,
+ const document::BucketId& wantedId);
+
+ virtual BucketDatabase& db() = 0;
+
+private:
+ using UBoundFunc = std::function<
+ document::BucketId(const BucketDatabase&,
+ const document::BucketId&)>;
+
+ void doTestUpperBound(const UBoundFunc& f);
+};
+
+}
+
+}
+
diff --git a/storage/src/tests/distributor/bucketdbmetricupdatertest.cpp b/storage/src/tests/distributor/bucketdbmetricupdatertest.cpp
new file mode 100644
index 00000000000..6aa9ef3a844
--- /dev/null
+++ b/storage/src/tests/distributor/bucketdbmetricupdatertest.cpp
@@ -0,0 +1,361 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <string>
+#include <sstream>
+#include <vespa/storage/distributor/bucketdb/bucketdbmetricupdater.h>
+#include <vespa/storage/distributor/bucketdb/bucketdatabase.h>
+#include <vespa/storage/distributor/distributormetricsset.h>
+#include <vespa/storage/distributor/idealstatemetricsset.h>
+#include <vespa/storage/config/config-stor-distributormanager.h>
+
+namespace storage {
+namespace distributor {
+
+using document::BucketId;
+
+class BucketDBMetricUpdaterTest : public CppUnit::TestFixture {
+ CPPUNIT_TEST_SUITE(BucketDBMetricUpdaterTest);
+ CPPUNIT_TEST(testDocAndByteCountsAreUpdated);
+ CPPUNIT_TEST(testBucketsWithTooFewAndTooManyCopies);
+ CPPUNIT_TEST(testBucketsWithVaryingTrustedness);
+ CPPUNIT_TEST(testPickCountsFromTrustedCopy);
+ CPPUNIT_TEST(testPickLargestCopyIfNoTrusted);
+ CPPUNIT_TEST(testCompleteRoundClearsWorkingState);
+ CPPUNIT_TEST(testMinBucketReplicaTrackedAndReportedPerNode);
+ CPPUNIT_TEST(nonTrustedReplicasAlsoCountedInModeAny);
+ CPPUNIT_TEST(minimumReplicaCountReturnedForNodeInModeAny);
+ CPPUNIT_TEST_SUITE_END();
+
+ void visitBucketWith2Copies1Trusted(BucketDBMetricUpdater& metricUpdater);
+ void visitBucketWith2CopiesBothTrusted(
+ BucketDBMetricUpdater& metricUpdater);
+ void visitBucketWith1Copy(BucketDBMetricUpdater& metricUpdater);
+
+
+ using NodeToReplicasMap = std::unordered_map<uint16_t, uint32_t>;
+ NodeToReplicasMap replicaStatsOf(BucketDBMetricUpdater& metricUpdater);
+
+ metrics::LoadTypeSet _loadTypes;
+public:
+ BucketDBMetricUpdaterTest();
+
+ void testDocAndByteCountsAreUpdated();
+ void testBucketsWithTooFewAndTooManyCopies();
+ void testBucketsWithVaryingTrustedness();
+ void testPickCountsFromTrustedCopy();
+ void testPickLargestCopyIfNoTrusted();
+ void testCompleteRoundClearsWorkingState();
+ void testMinBucketReplicaTrackedAndReportedPerNode();
+ void nonTrustedReplicasAlsoCountedInModeAny();
+ void minimumReplicaCountReturnedForNodeInModeAny();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(BucketDBMetricUpdaterTest);
+
+BucketDBMetricUpdaterTest::BucketDBMetricUpdaterTest()
+{
+ _loadTypes.push_back(metrics::LoadType(0, "foo"));
+}
+
+namespace {
+
+void addNode(BucketInfo& info, uint16_t node, uint32_t crc) {
+ auto apiInfo = api::BucketInfo(crc, crc + 1, crc + 2);
+ std::vector<uint16_t> order;
+ info.addNode(BucketCopy(1234, node, apiInfo), order);
+}
+
+typedef bool Trusted;
+
+BucketInfo
+makeInfo(uint32_t copy0Crc)
+{
+ BucketInfo info;
+ addNode(info, 0, copy0Crc);
+ return info;
+}
+
+BucketInfo
+makeInfo(uint32_t copy0Crc, uint32_t copy1Crc)
+{
+ BucketInfo info;
+ addNode(info, 0, copy0Crc);
+ addNode(info, 1, copy1Crc);
+ return info;
+}
+
+} // anonymous namespace
+
+void
+BucketDBMetricUpdaterTest::testDocAndByteCountsAreUpdated()
+{
+ BucketDBMetricUpdater metricUpdater;
+ IdealStateMetricSet ims;
+ DistributorMetricSet dms(_loadTypes);
+
+ CPPUNIT_ASSERT_EQUAL(false, metricUpdater.hasCompletedRound());
+
+ metricUpdater.getLastCompleteStats().propagateMetrics(ims, dms);
+ metricUpdater.completeRound(false);
+
+ CPPUNIT_ASSERT_EQUAL(true, metricUpdater.hasCompletedRound());
+
+ CPPUNIT_ASSERT_EQUAL(int64_t(0), dms.docsStored.getLast());
+ CPPUNIT_ASSERT_EQUAL(int64_t(0), dms.bytesStored.getLast());
+ {
+ BucketDatabase::Entry e(document::BucketId(16, 1), makeInfo(10));
+ metricUpdater.visit(e, 1);
+ }
+
+ metricUpdater.completeRound(false);
+ metricUpdater.getLastCompleteStats().propagateMetrics(ims, dms);
+
+ CPPUNIT_ASSERT_EQUAL(true, metricUpdater.hasCompletedRound());
+
+ CPPUNIT_ASSERT_EQUAL(int64_t(11), dms.docsStored.getLast());
+ CPPUNIT_ASSERT_EQUAL(int64_t(12), dms.bytesStored.getLast());
+
+ {
+ BucketDatabase::Entry e(document::BucketId(16, 1), makeInfo(20));
+ metricUpdater.visit(e, 1);
+ }
+
+ metricUpdater.completeRound(false);
+ metricUpdater.getLastCompleteStats().propagateMetrics(ims, dms);
+
+ CPPUNIT_ASSERT_EQUAL(int64_t(32), dms.docsStored.getLast());
+ CPPUNIT_ASSERT_EQUAL(int64_t(34), dms.bytesStored.getLast());
+}
+
+void
+BucketDBMetricUpdaterTest::testBucketsWithTooFewAndTooManyCopies()
+{
+ BucketDBMetricUpdater metricUpdater;
+ IdealStateMetricSet ims;
+ DistributorMetricSet dms(_loadTypes);
+
+ metricUpdater.completeRound();
+ metricUpdater.getLastCompleteStats().propagateMetrics(ims, dms);
+ CPPUNIT_ASSERT_EQUAL(int64_t(0), ims.buckets_toofewcopies.getLast());
+ CPPUNIT_ASSERT_EQUAL(int64_t(0), ims.buckets_toomanycopies.getLast());
+ CPPUNIT_ASSERT_EQUAL(int64_t(0), ims.buckets.getLast());
+
+ // 1 copy too little
+ {
+ BucketDatabase::Entry e(document::BucketId(16, 1), makeInfo(10));
+ metricUpdater.visit(e, 2);
+ }
+ metricUpdater.completeRound(false);
+ metricUpdater.getLastCompleteStats().propagateMetrics(ims, dms);
+
+ CPPUNIT_ASSERT_EQUAL(int64_t(1), ims.buckets_toofewcopies.getLast());
+ CPPUNIT_ASSERT_EQUAL(int64_t(0), ims.buckets_toomanycopies.getLast());
+ CPPUNIT_ASSERT_EQUAL(int64_t(1), ims.buckets.getLast());
+
+ // 1 copy too many
+ {
+ BucketDatabase::Entry e(document::BucketId(16, 1), makeInfo(40, 40));
+ metricUpdater.visit(e, 1);
+ }
+ metricUpdater.completeRound(false);
+ metricUpdater.getLastCompleteStats().propagateMetrics(ims, dms);
+
+ CPPUNIT_ASSERT_EQUAL(int64_t(1), ims.buckets_toofewcopies.getLast());
+ CPPUNIT_ASSERT_EQUAL(int64_t(1), ims.buckets_toomanycopies.getLast());
+ CPPUNIT_ASSERT_EQUAL(int64_t(2), ims.buckets.getLast());
+
+ // Right amount of copies, just inc bucket counter.
+ {
+ BucketDatabase::Entry e(document::BucketId(16, 1), makeInfo(40, 40));
+ metricUpdater.visit(e, 2);
+ }
+ metricUpdater.completeRound(false);
+ metricUpdater.getLastCompleteStats().propagateMetrics(ims, dms);
+
+ CPPUNIT_ASSERT_EQUAL(int64_t(1), ims.buckets_toofewcopies.getLast());
+ CPPUNIT_ASSERT_EQUAL(int64_t(1), ims.buckets_toomanycopies.getLast());
+ CPPUNIT_ASSERT_EQUAL(int64_t(3), ims.buckets.getLast());
+}
+
+void
+BucketDBMetricUpdaterTest::testBucketsWithVaryingTrustedness()
+{
+ BucketDBMetricUpdater metricUpdater;
+ IdealStateMetricSet ims;
+ DistributorMetricSet dms(_loadTypes);
+
+ metricUpdater.completeRound(false);
+ metricUpdater.getLastCompleteStats().propagateMetrics(ims, dms);
+ CPPUNIT_ASSERT_EQUAL(int64_t(0), ims.buckets_notrusted.getLast());
+ // Has only trusted (implicit for first added)
+ {
+ BucketDatabase::Entry e(document::BucketId(16, 1), makeInfo(100));
+ metricUpdater.visit(e, 2);
+ }
+ metricUpdater.completeRound(false);
+ metricUpdater.getLastCompleteStats().propagateMetrics(ims, dms);
+ CPPUNIT_ASSERT_EQUAL(int64_t(0), ims.buckets_notrusted.getLast());
+ // Has at least one trusted (implicit for first added)
+ {
+ BucketDatabase::Entry e(document::BucketId(16, 2), makeInfo(100, 200));
+ metricUpdater.visit(e, 2);
+ }
+ metricUpdater.completeRound(false);
+ metricUpdater.getLastCompleteStats().propagateMetrics(ims, dms);
+ CPPUNIT_ASSERT_EQUAL(int64_t(0), ims.buckets_notrusted.getLast());
+ // Has no trusted
+ {
+ BucketInfo info(makeInfo(100, 200));
+ info.resetTrusted();
+ BucketDatabase::Entry e(document::BucketId(16, 3), info);
+ metricUpdater.visit(e, 2);
+ }
+ metricUpdater.completeRound(false);
+ metricUpdater.getLastCompleteStats().propagateMetrics(ims, dms);
+ CPPUNIT_ASSERT_EQUAL(int64_t(1), ims.buckets_notrusted.getLast());
+}
+
+void
+BucketDBMetricUpdaterTest::testPickCountsFromTrustedCopy()
+{
+ BucketDBMetricUpdater metricUpdater;
+ IdealStateMetricSet ims;
+ DistributorMetricSet dms(_loadTypes);
+
+ // First copy added is implicitly trusted, but it is not the largest.
+ BucketDatabase::Entry e(document::BucketId(16, 2), makeInfo(100, 200));
+ metricUpdater.visit(e, 2);
+ metricUpdater.completeRound(false);
+ metricUpdater.getLastCompleteStats().propagateMetrics(ims, dms);
+
+ CPPUNIT_ASSERT_EQUAL(int64_t(101), dms.docsStored.getLast());
+ CPPUNIT_ASSERT_EQUAL(int64_t(102), dms.bytesStored.getLast());
+}
+
+void
+BucketDBMetricUpdaterTest::testPickLargestCopyIfNoTrusted()
+{
+ BucketDBMetricUpdater metricUpdater;
+ IdealStateMetricSet ims;
+ DistributorMetricSet dms(_loadTypes);
+
+ // No trusted copies, so must pick second copy.
+ BucketInfo info(makeInfo(100, 200));
+ info.resetTrusted();
+ BucketDatabase::Entry e(document::BucketId(16, 2), info);
+ metricUpdater.visit(e, 2);
+ metricUpdater.completeRound(false);
+ metricUpdater.getLastCompleteStats().propagateMetrics(ims, dms);
+
+ CPPUNIT_ASSERT_EQUAL(int64_t(201), dms.docsStored.getLast());
+ CPPUNIT_ASSERT_EQUAL(int64_t(202), dms.bytesStored.getLast());
+}
+
+void
+BucketDBMetricUpdaterTest::testCompleteRoundClearsWorkingState()
+{
+ BucketDBMetricUpdater metricUpdater;
+ IdealStateMetricSet ims;
+ DistributorMetricSet dms(_loadTypes);
+
+ {
+ BucketDatabase::Entry e(document::BucketId(16, 1), makeInfo(10));
+ metricUpdater.visit(e, 1);
+ }
+ metricUpdater.completeRound();
+ metricUpdater.getLastCompleteStats().propagateMetrics(ims, dms);
+
+ CPPUNIT_ASSERT_EQUAL(int64_t(11), dms.docsStored.getLast());
+ // Completing the round again with no visits having been done will
+ // propagate an empty working state to the complete state.
+ metricUpdater.completeRound();
+ metricUpdater.getLastCompleteStats().propagateMetrics(ims, dms);
+
+ CPPUNIT_ASSERT_EQUAL(int64_t(0), dms.docsStored.getLast());
+}
+
+// Replicas on nodes 0 and 1.
+void
+BucketDBMetricUpdaterTest::visitBucketWith2Copies1Trusted(
+ BucketDBMetricUpdater& metricUpdater)
+{
+ BucketInfo info;
+ addNode(info, 0, 100);
+ addNode(info, 1, 101); // Note different checksums => #trusted = 1
+ BucketDatabase::Entry e(document::BucketId(16, 1), info);
+ metricUpdater.visit(e, 2);
+}
+
+// Replicas on nodes 0 and 2.
+void
+BucketDBMetricUpdaterTest::visitBucketWith2CopiesBothTrusted(
+ BucketDBMetricUpdater& metricUpdater)
+{
+ BucketInfo info;
+ addNode(info, 0, 200);
+ addNode(info, 2, 200);
+ BucketDatabase::Entry e(document::BucketId(16, 2), info);
+ metricUpdater.visit(e, 2);
+}
+
+// Single replica on node 2.
+void
+BucketDBMetricUpdaterTest::visitBucketWith1Copy(
+ BucketDBMetricUpdater& metricUpdater)
+{
+ BucketInfo info;
+ addNode(info, 2, 100);
+ BucketDatabase::Entry e(document::BucketId(16, 1), info);
+ metricUpdater.visit(e, 2);
+}
+
+BucketDBMetricUpdaterTest::NodeToReplicasMap
+BucketDBMetricUpdaterTest::replicaStatsOf(BucketDBMetricUpdater& metricUpdater)
+{
+ metricUpdater.completeRound(true);
+ return metricUpdater.getLastCompleteStats()._minBucketReplica;
+}
+
+void BucketDBMetricUpdaterTest::testMinBucketReplicaTrackedAndReportedPerNode()
+{
+ BucketDBMetricUpdater metricUpdater;
+
+ // Node 0 and 1 should have min replica 1, while node 2 should have min
+ // replica 2.
+ visitBucketWith2Copies1Trusted(metricUpdater);
+ visitBucketWith2CopiesBothTrusted(metricUpdater);
+
+ CPPUNIT_ASSERT_EQUAL(NodeToReplicasMap({{0, 1}, {1, 1}, {2, 2}}),
+ replicaStatsOf(metricUpdater));
+}
+
+void
+BucketDBMetricUpdaterTest::nonTrustedReplicasAlsoCountedInModeAny()
+{
+ BucketDBMetricUpdater metricUpdater;
+ using CountingMode = BucketDBMetricUpdater::ReplicaCountingMode;
+ metricUpdater.setMinimumReplicaCountingMode(CountingMode::ANY);
+ visitBucketWith2Copies1Trusted(metricUpdater);
+ visitBucketWith2CopiesBothTrusted(metricUpdater);
+
+ CPPUNIT_ASSERT_EQUAL(NodeToReplicasMap({{0, 2}, {1, 2}, {2, 2}}),
+ replicaStatsOf(metricUpdater));
+}
+
+void
+BucketDBMetricUpdaterTest::minimumReplicaCountReturnedForNodeInModeAny()
+{
+ BucketDBMetricUpdater metricUpdater;
+ using CountingMode = BucketDBMetricUpdater::ReplicaCountingMode;
+ metricUpdater.setMinimumReplicaCountingMode(CountingMode::ANY);
+ visitBucketWith2CopiesBothTrusted(metricUpdater);
+ visitBucketWith1Copy(metricUpdater);
+
+ // Node 2 has a bucket with only 1 replica.
+ CPPUNIT_ASSERT_EQUAL(NodeToReplicasMap({{0, 2}, {2, 1}}),
+ replicaStatsOf(metricUpdater));
+}
+
+} // distributor
+} // storage
diff --git a/storage/src/tests/distributor/bucketdbupdatertest.cpp b/storage/src/tests/distributor/bucketdbupdatertest.cpp
new file mode 100644
index 00000000000..a1c933d2606
--- /dev/null
+++ b/storage/src/tests/distributor/bucketdbupdatertest.cpp
@@ -0,0 +1,2296 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <iomanip>
+#include <iostream>
+#include <memory>
+#include <vespa/storageapi/message/persistence.h>
+#include <vespa/storage/distributor/bucketdbupdater.h>
+#include <vespa/storageapi/message/bucket.h>
+#include <vespa/storageapi/message/state.h>
+#include <vespa/document/fieldvalue/document.h>
+#include <vespa/vdslib/state/random.h>
+#include <vespa/vespalib/io/fileutil.h>
+#include <vespa/storageapi/message/bucket.h>
+#include <vespa/storage/distributor/pendingclusterstate.h>
+#include <vespa/storageframework/defaultimplementation/clock/realclock.h>
+#include <vespa/storageframework/storageframework.h>
+#include <vespa/vespalib/text/stringtokenizer.h>
+#include <vespa/storage/storageutil/distributorstatecache.h>
+#include <tests/distributor/distributortestutil.h>
+#include <tests/distributor/messagesenderstub.h>
+#include <vespa/storage/distributor/simpleclusterinformation.h>
+
+#include <iostream>
+#include <fstream>
+#include <string>
+
+using namespace storage::api;
+using namespace storage::lib;
+
+namespace storage {
+namespace distributor {
+
+class BucketDBUpdaterTest : public CppUnit::TestFixture,
+ public DistributorTestUtil
+{
+ CPPUNIT_TEST_SUITE(BucketDBUpdaterTest);
+ CPPUNIT_TEST(testNormalUsage); // Make sure that bucketdbupdater sends requests to nodes, send responses back for 3 nodes, check that bucketdb is in correct state
+ CPPUNIT_TEST(testDistributorChange);
+ CPPUNIT_TEST(testDistributorChangeWithGrouping);
+ CPPUNIT_TEST(testNormalUsageInitializing); // Check that we send request bucket info when storage node is initializing, and send another when it's up.
+ CPPUNIT_TEST(testFailedRequestBucketInfo);
+ CPPUNIT_TEST(testBitChange); // Check what happens when distribution bits change
+ CPPUNIT_TEST(testNodeDown);
+ CPPUNIT_TEST(testStorageNodeInMaintenanceClearsBucketsForNode);
+ CPPUNIT_TEST(testNodeDownCopiesGetInSync);
+ CPPUNIT_TEST(testDownWhileInit);
+ CPPUNIT_TEST(testInitializingWhileRecheck);
+ CPPUNIT_TEST(testRecheckNode);
+ CPPUNIT_TEST(testRecheckNodeWithFailure);
+ CPPUNIT_TEST(testNotifyBucketChange);
+ CPPUNIT_TEST(testNotifyBucketChangeFromNodeDown);
+ CPPUNIT_TEST(testNotifyChangeWithPendingStateQueuesBucketInfoRequests);
+ CPPUNIT_TEST(testMergeReply);
+ CPPUNIT_TEST(testMergeReplyNodeDown);
+ CPPUNIT_TEST(testMergeReplyNodeDownAfterRequestSent);
+ CPPUNIT_TEST(testFlush);
+ CPPUNIT_TEST(testPendingClusterStateSendMessages);
+ CPPUNIT_TEST(testPendingClusterStateReceive);
+ CPPUNIT_TEST(testPendingClusterStateMerge);
+ CPPUNIT_TEST(testPendingClusterStateMergeReplicaChanged);
+ CPPUNIT_TEST(testPendingClusterStateWithGroupDown);
+ CPPUNIT_TEST(testPendingClusterStateWithGroupDownAndNoHandover);
+ CPPUNIT_TEST(testNoDbResurrectionForBucketNotOwnedInCurrentState);
+ CPPUNIT_TEST(testNoDbResurrectionForBucketNotOwnedInPendingState);
+ CPPUNIT_TEST(testClusterStateAlwaysSendsFullFetchWhenDistributionChangePending);
+ CPPUNIT_TEST(testChangedDistributionConfigTriggersRecoveryMode);
+ CPPUNIT_TEST(testNewlyAddedBucketsHaveCurrentTimeAsGcTimestamp);
+ CPPUNIT_TEST(testNewerMutationsNotOverwrittenByEarlierBucketFetch);
+ CPPUNIT_TEST(preemptedDistrChangeCarriesNodeSetOverToNextStateFetch);
+ CPPUNIT_TEST(preemptedStorChangeCarriesNodeSetOverToNextStateFetch);
+ CPPUNIT_TEST(preemptedStorageNodeDownMustBeReFetched);
+ CPPUNIT_TEST(outdatedNodeSetClearedAfterSuccessfulStateCompletion);
+ CPPUNIT_TEST(doNotSendToPreemptedNodeNowInDownState);
+ CPPUNIT_TEST(doNotSendToPreemptedNodeNotPartOfNewState);
+ CPPUNIT_TEST_DISABLED(clusterConfigDownsizeOnlySendsToAvailableNodes);
+ CPPUNIT_TEST(changedDiskSetTriggersReFetch);
+ CPPUNIT_TEST(nodeMissingFromConfigIsTreatedAsNeedingOwnershipTransfer);
+ CPPUNIT_TEST_SUITE_END();
+
+protected:
+ void testNormalUsage();
+ void testDistributorChange();
+ void testDistributorChangeWithGrouping();
+ void testNormalUsageInitializing();
+ void testFailedRequestBucketInfo();
+ void testNoResponses();
+ void testBitChange();
+ void testInconsistentChecksum();
+ void testAddEmptyNode();
+ void testNodeDown();
+ void testStorageNodeInMaintenanceClearsBucketsForNode();
+ void testNodeDownCopiesGetInSync();
+ void testDownWhileInit();
+ void testInitializingWhileRecheck();
+ void testRecheckNode();
+ void testRecheckNodeWithFailure();
+ void testNotifyBucketChange();
+ void testNotifyBucketChangeFromNodeDown();
+ void testNotifyChangeWithPendingStateQueuesBucketInfoRequests();
+ void testMergeReply();
+ void testMergeReplyNodeDown();
+ void testMergeReplyNodeDownAfterRequestSent();
+ void testFlush();
+ void testPendingClusterStateSendMessages();
+ void testPendingClusterStateReceive();
+ void testPendingClusterStateMerge();
+ void testPendingClusterStateMergeReplicaChanged();
+ void testPendingClusterStateWithGroupDown();
+ void testPendingClusterStateWithGroupDownAndNoHandover();
+ void testNoDbResurrectionForBucketNotOwnedInCurrentState();
+ void testNoDbResurrectionForBucketNotOwnedInPendingState();
+ void testClusterStateAlwaysSendsFullFetchWhenDistributionChangePending();
+ void testChangedDistributionConfigTriggersRecoveryMode();
+ void testNewlyAddedBucketsHaveCurrentTimeAsGcTimestamp();
+ void testNewerMutationsNotOverwrittenByEarlierBucketFetch();
+ void preemptedDistrChangeCarriesNodeSetOverToNextStateFetch();
+ void preemptedStorChangeCarriesNodeSetOverToNextStateFetch();
+ void preemptedStorageNodeDownMustBeReFetched();
+ void outdatedNodeSetClearedAfterSuccessfulStateCompletion();
+ void doNotSendToPreemptedNodeNowInDownState();
+ void doNotSendToPreemptedNodeNotPartOfNewState();
+ void clusterConfigDownsizeOnlySendsToAvailableNodes();
+ void changedDiskSetTriggersReFetch();
+ void nodeMissingFromConfigIsTreatedAsNeedingOwnershipTransfer();
+
+ bool bucketExistsThatHasNode(int bucketCount, uint16_t node) const;
+
+ ClusterInformation::CSP createClusterInfo(const std::string& clusterState) {
+ ClusterInformation::CSP clusterInfo(
+ new SimpleClusterInformation(
+ getBucketDBUpdater().getDistributorComponent().getIndex(),
+ getBucketDBUpdater().getDistributorComponent().getDistribution(),
+ lib::ClusterState(clusterState),
+ "ui"));
+ return clusterInfo;
+ }
+
+public:
+ void setUp() {
+ createLinks();
+ };
+
+ void tearDown() {
+ close();
+ }
+
+ std::shared_ptr<RequestBucketInfoReply> getFakeBucketReply(
+ const lib::ClusterState& state,
+ RequestBucketInfoCommand& cmd,
+ int storageIndex,
+ int bucketCount,
+ int invalidBucketCount = 0)
+ {
+ RequestBucketInfoReply* sreply = new RequestBucketInfoReply(cmd);
+ sreply->setAddress(storageAddress(storageIndex));
+
+ api::RequestBucketInfoReply::EntryVector &vec = sreply->getBucketInfo();
+
+ for (int i=0; i<bucketCount + invalidBucketCount; i++) {
+ if (!getBucketDBUpdater().getDistributorComponent()
+ .ownsBucketInState(state, document::BucketId(16, i))) {
+ continue;
+ }
+
+ std::vector<uint16_t> nodes;
+ getBucketDBUpdater().getDistributorComponent()
+ .getDistribution().getIdealNodes(
+ lib::NodeType::STORAGE,
+ state,
+ document::BucketId(16, i),
+ nodes);
+
+ for (uint32_t j=0; j<nodes.size(); j++) {
+ if (nodes[j] == storageIndex) {
+ if (i >= bucketCount) {
+ vec.push_back(api::RequestBucketInfoReply::Entry(
+ document::BucketId(16, i),
+ api::BucketInfo()));
+ } else {
+ vec.push_back(api::RequestBucketInfoReply::Entry(
+ document::BucketId(16, i),
+ api::BucketInfo(10,1,1)));
+ }
+ }
+ }
+ }
+
+ return std::shared_ptr<api::RequestBucketInfoReply>(sreply);
+ }
+
+ void fakeBucketReply(
+ const lib::ClusterState& state,
+ RequestBucketInfoCommand& cmd,
+ int storageIndex,
+ int bucketCount,
+ int invalidBucketCount = 0)
+ {
+ getBucketDBUpdater().onRequestBucketInfoReply(
+ getFakeBucketReply(state,
+ cmd,
+ storageIndex,
+ bucketCount,
+ invalidBucketCount));
+ }
+
+ void sendFakeReplyForSingleBucketRequest(
+ const api::RequestBucketInfoCommand& rbi)
+ {
+ CPPUNIT_ASSERT_EQUAL(size_t(1), rbi.getBuckets().size());
+ const document::BucketId& bucket(rbi.getBuckets()[0]);
+
+ std::shared_ptr<api::RequestBucketInfoReply> reply(
+ new api::RequestBucketInfoReply(rbi));
+ reply->getBucketInfo().push_back(
+ api::RequestBucketInfoReply::Entry(bucket,
+ api::BucketInfo(20, 10, 12, 50, 60, true, true)));
+ getBucketDBUpdater().onRequestBucketInfoReply(reply);
+ }
+
+ std::string verifyBucket(document::BucketId id, const lib::ClusterState& state) {
+ BucketDatabase::Entry entry = getBucketDatabase().get(id);
+ if (!entry.valid()) {
+ return vespalib::make_string("%s doesn't exist in DB",
+ id.toString().c_str());
+ }
+
+ std::vector<uint16_t> nodes;
+ getBucketDBUpdater().getDistributorComponent().getDistribution().getIdealNodes(
+ lib::NodeType::STORAGE,
+ state,
+ document::BucketId(id),
+ nodes);
+
+ if (nodes.size() != entry->getNodeCount()) {
+ return vespalib::make_string("Bucket Id %s has %d nodes in "
+ "ideal state, but has only %d in DB",
+ id.toString().c_str(),
+ (int)nodes.size(),
+ (int)entry->getNodeCount());
+ }
+
+ for (uint32_t i = 0; i<nodes.size(); i++) {
+ bool found = false;
+
+ for (uint32_t j = 0; j<entry->getNodeCount(); j++) {
+ if (nodes[i] == entry->getNodeRef(j).getNode()) {
+ found = true;
+ }
+ }
+
+ if (!found) {
+ return vespalib::make_string(
+ "Bucket Id %s has no copy from node %d",
+ id.toString().c_str(),
+ nodes[i]);
+ }
+ }
+
+ return "";
+ }
+
+
+ void verifyInvalid(document::BucketId id, int storageNode) {
+ BucketDatabase::Entry entry = getBucketDatabase().get(id);
+
+ CPPUNIT_ASSERT(entry.valid());
+
+ bool found = false;
+ for (uint32_t j = 0; j<entry->getNodeCount(); j++) {
+ if (entry->getNodeRef(j).getNode() == storageNode) {
+ CPPUNIT_ASSERT(!entry->getNodeRef(j).valid());
+ found = true;
+ }
+ }
+
+ CPPUNIT_ASSERT(found);
+ }
+
+ struct OrderByIncreasingNodeIndex {
+ template <typename T>
+ bool operator()(const T& lhs, const T& rhs) {
+ return (lhs->getAddress()->getIndex()
+ < rhs->getAddress()->getIndex());
+ }
+ };
+
+ void sortSentMessagesByIndex(MessageSenderStub& sender,
+ size_t sortFromOffset = 0)
+ {
+ std::sort(sender.commands.begin() + sortFromOffset,
+ sender.commands.end(),
+ OrderByIncreasingNodeIndex());
+ }
+
+ void setSystemState(const lib::ClusterState& state) {
+ const size_t sizeBeforeState = _sender.commands.size();
+ getBucketDBUpdater().onSetSystemState(
+ std::shared_ptr<api::SetSystemStateCommand>(
+ new api::SetSystemStateCommand(state)));
+ // A lot of test logic has the assumption that all messages sent as a
+ // result of cluster state changes will be in increasing index order
+ // (for simplicity, not because this is required for correctness).
+ // Only sort the messages that arrived as a result of the state, don't
+ // jumble the sorting with any existing messages.
+ sortSentMessagesByIndex(_sender, sizeBeforeState);
+ }
+
+ void setAndEnableClusterState(const lib::ClusterState& state,
+ uint32_t expectedMsgs,
+ uint32_t nBuckets) {
+ _sender.clear();
+ setSystemState(state);
+ CPPUNIT_ASSERT_EQUAL(size_t(expectedMsgs), _sender.commands.size());
+
+ for (uint32_t i = 0; i < _sender.commands.size(); i++) {
+ CPPUNIT_ASSERT(_sender.commands[i]->getType() ==
+ MessageType::REQUESTBUCKETINFO);
+
+ const api::StorageMessageAddress& address(
+ *_sender.commands[i]->getAddress());
+ fakeBucketReply(
+ state,
+ dynamic_cast<RequestBucketInfoCommand&>(*_sender.commands[i]),
+ address.getIndex(),
+ nBuckets);
+ }
+ }
+
+
+ void setStorageNodes(uint32_t numStorageNodes) {
+ _sender.clear();
+
+ lib::ClusterState newState(
+ vespalib::make_string("distributor:1 storage:%d", numStorageNodes));
+
+ setSystemState(newState);
+
+ for (uint32_t i=0; i<numStorageNodes; i++) {
+ CPPUNIT_ASSERT(_sender.commands[i]->getType() ==
+ MessageType::REQUESTBUCKETINFO);
+
+ const api::StorageMessageAddress *address = _sender.commands[i]->getAddress();
+ CPPUNIT_ASSERT_EQUAL(i, (uint32_t)address->getIndex());
+ }
+ }
+
+ void initializeNodesAndBuckets(uint32_t numStorageNodes,
+ uint32_t numBuckets)
+ {
+ setStorageNodes(numStorageNodes);
+
+ vespalib::string state(vespalib::make_string(
+ "distributor:1 storage:%d", numStorageNodes));
+ lib::ClusterState newState(state);
+
+ for (uint32_t i=0; i<numStorageNodes; i++) {
+ fakeBucketReply(newState,
+ *((RequestBucketInfoCommand*)_sender.commands[i].get()),
+ i,
+ numBuckets);
+ }
+ assertCorrectBuckets(numBuckets, state);
+ }
+
+ bool bucketHasNode(document::BucketId id, uint16_t node) const {
+ BucketDatabase::Entry entry = getBucket(id);
+ CPPUNIT_ASSERT(entry.valid());
+
+ for (uint32_t j=0; j<entry->getNodeCount(); j++) {
+ if (entry->getNodeRef(j).getNode() == node) {
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ api::StorageMessageAddress storageAddress(uint16_t node) {
+ return api::StorageMessageAddress("storage", lib::NodeType::STORAGE, node);
+ }
+
+ std::string getSentNodes(const std::string& oldClusterState,
+ const std::string& newClusterState);
+
+ std::string getSentNodesDistributionChanged(
+ const std::string& oldClusterState);
+
+ std::vector<uint16_t> getSentNodesWithPreemption(
+ const std::string& oldClusterState,
+ uint32_t expectedOldStateMessages,
+ const std::string& preemptedClusterState,
+ const std::string& newClusterState);
+
+ std::vector<uint16_t> getSendSet() const;
+
+ std::string mergeBucketLists(
+ const lib::ClusterState& oldState,
+ const std::string& existingData,
+ const lib::ClusterState& newState,
+ const std::string& newData,
+ bool includeBucketInfo = false);
+
+ std::string mergeBucketLists(
+ const std::string& existingData,
+ const std::string& newData,
+ bool includeBucketInfo = false);
+
+ void assertCorrectBuckets(int numBuckets, const std::string& stateStr) {
+ lib::ClusterState state(stateStr);
+ for (int i=0; i<numBuckets; i++) {
+ CPPUNIT_ASSERT_EQUAL(
+ getIdealStr(document::BucketId(16, i), state),
+ getNodes(document::BucketId(16, i)));
+ }
+ }
+
+ void setDistribution(const std::string& distConfig) {
+ lib::Distribution* distribution = new lib::Distribution(distConfig);
+ _node->getComponentRegister().setDistribution(
+ lib::Distribution::SP(distribution));
+ }
+
+ std::string getDistConfig6Nodes3Groups() const {
+ return ("redundancy 2\n"
+ "group[3]\n"
+ "group[0].name \"invalid\"\n"
+ "group[0].index \"invalid\"\n"
+ "group[0].partitions 1|*\n"
+ "group[0].nodes[0]\n"
+ "group[1].name rack0\n"
+ "group[1].index 0\n"
+ "group[1].nodes[3]\n"
+ "group[1].nodes[0].index 0\n"
+ "group[1].nodes[1].index 1\n"
+ "group[1].nodes[2].index 2\n"
+ "group[2].name rack1\n"
+ "group[2].index 1\n"
+ "group[2].nodes[3]\n"
+ "group[2].nodes[0].index 3\n"
+ "group[2].nodes[1].index 4\n"
+ "group[2].nodes[2].index 5\n");
+ }
+
+ std::string getDistConfig6Nodes4Groups() const {
+ return ("redundancy 2\n"
+ "group[4]\n"
+ "group[0].name \"invalid\"\n"
+ "group[0].index \"invalid\"\n"
+ "group[0].partitions 1|*\n"
+ "group[0].nodes[0]\n"
+ "group[1].name rack0\n"
+ "group[1].index 0\n"
+ "group[1].nodes[2]\n"
+ "group[1].nodes[0].index 0\n"
+ "group[1].nodes[1].index 1\n"
+ "group[2].name rack1\n"
+ "group[2].index 1\n"
+ "group[2].nodes[2]\n"
+ "group[2].nodes[0].index 2\n"
+ "group[2].nodes[1].index 3\n"
+ "group[3].name rack2\n"
+ "group[3].index 2\n"
+ "group[3].nodes[2]\n"
+ "group[3].nodes[0].index 4\n"
+ "group[3].nodes[1].index 5\n");
+ }
+
+ std::string getDistConfig3Nodes1Group() const {
+ return ("redundancy 2\n"
+ "group[2]\n"
+ "group[0].name \"invalid\"\n"
+ "group[0].index \"invalid\"\n"
+ "group[0].partitions 1|*\n"
+ "group[0].nodes[0]\n"
+ "group[1].name rack0\n"
+ "group[1].index 0\n"
+ "group[1].nodes[3]\n"
+ "group[1].nodes[0].index 0\n"
+ "group[1].nodes[1].index 1\n"
+ "group[1].nodes[2].index 2\n");
+ }
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(BucketDBUpdaterTest);
+
+void
+BucketDBUpdaterTest::testNormalUsage()
+{
+ setSystemState(lib::ClusterState("distributor:2 .0.s:i .1.s:i storage:3"));
+
+ CPPUNIT_ASSERT_EQUAL(size_t(3), _sender.commands.size());
+
+ // Ensure distribution hash is set correctly
+ CPPUNIT_ASSERT_EQUAL(
+ getBucketDBUpdater().getDistributorComponent().getDistribution()
+ .getNodeGraph().getDistributionConfigHash(),
+ dynamic_cast<const RequestBucketInfoCommand&>(
+ *_sender.commands[0]).getDistributionHash());
+
+ fakeBucketReply(
+ lib::ClusterState("distributor:2 .0.s:i .1.s:i storage:3"),
+ dynamic_cast<RequestBucketInfoCommand&>(*_sender.commands[0]),
+ 0, 10);
+
+ _sender.clear();
+
+ // Optimization for not refetching unneeded data after cluster state
+ // change is only implemented after completion of previous cluster state
+ setSystemState(lib::ClusterState("distributor:2 .0.s:i storage:3"));
+
+ CPPUNIT_ASSERT_EQUAL(size_t(3), _sender.commands.size());
+ // Expect reply of first set SystemState request.
+ CPPUNIT_ASSERT_EQUAL(size_t(1), _sender.replies.size());
+
+ for (uint32_t i = 0; i < 3; ++i) {
+ fakeBucketReply(
+ lib::ClusterState("distributor:2 .0.s:i .1.s:i storage:3"),
+ dynamic_cast<RequestBucketInfoCommand&>(*_sender.commands[i]),
+ i, 10);
+ }
+
+ assertCorrectBuckets(10, "distributor:2 storage:3");
+}
+
+void
+BucketDBUpdaterTest::testDistributorChange()
+{
+ int numBuckets = 100;
+
+ // First sends request
+ setSystemState(lib::ClusterState("distributor:2 .0.s:i .1.s:i storage:3"));
+ CPPUNIT_ASSERT_EQUAL(size_t(3), _sender.commands.size());
+ for (uint32_t i = 0; i < 3; ++i) {
+ fakeBucketReply(
+ lib::ClusterState("distributor:2 .0.s:i .1.s:i storage:3"),
+ dynamic_cast<RequestBucketInfoCommand&>(*_sender.commands[i]),
+ i, numBuckets);
+ }
+ _sender.clear();
+
+ // No change from initializing to up (when done with last job)
+ setSystemState(lib::ClusterState("distributor:2 storage:3"));
+ CPPUNIT_ASSERT_EQUAL(size_t(0), _sender.commands.size());
+ _sender.clear();
+
+ // Adding node. No new read requests, but buckets thrown
+ setSystemState(lib::ClusterState("distributor:3 storage:3"));
+ CPPUNIT_ASSERT_EQUAL(size_t(0), _sender.commands.size());
+ assertCorrectBuckets(numBuckets, "distributor:3 storage:3");
+ _sender.clear();
+
+ // Removing distributor. Need to refetch new data from all nodes.
+ setSystemState(lib::ClusterState("distributor:2 storage:3"));
+ CPPUNIT_ASSERT_EQUAL(size_t(3), _sender.commands.size());
+ for (uint32_t i = 0; i < 3; ++i) {
+ fakeBucketReply(
+ lib::ClusterState("distributor:2 storage:3"),
+ dynamic_cast<RequestBucketInfoCommand&>(*_sender.commands[i]),
+ i, numBuckets);
+ }
+ _sender.clear();
+ assertCorrectBuckets(numBuckets, "distributor:2 storage:3");
+}
+
+void
+BucketDBUpdaterTest::testDistributorChangeWithGrouping()
+{
+ std::string distConfig(getDistConfig6Nodes3Groups());
+ setDistribution(distConfig);
+ _distributor->enableNextDistribution();
+ int numBuckets = 100;
+
+ setSystemState(lib::ClusterState("distributor:6 storage:6"));
+ CPPUNIT_ASSERT_EQUAL(size_t(6), _sender.commands.size());
+ for (uint32_t i = 0; i < 6; ++i) {
+ fakeBucketReply(
+ lib::ClusterState("distributor:6 storage:6"),
+ dynamic_cast<RequestBucketInfoCommand&>(*_sender.commands[i]),
+ i, numBuckets);
+ }
+ _sender.clear();
+
+ // Distributor going down in other group, no change
+ setSystemState(lib::ClusterState("distributor:6 .5.s:d storage:6"));
+ CPPUNIT_ASSERT_EQUAL(size_t(0), _sender.commands.size());
+ _sender.clear();
+
+ setSystemState(lib::ClusterState("distributor:6 storage:6"));
+ CPPUNIT_ASSERT_EQUAL(size_t(0), _sender.commands.size());
+ assertCorrectBuckets(numBuckets, "distributor:6 storage:6");
+ _sender.clear();
+
+ // Unchanged grouping cause no change.
+ setDistribution(distConfig);
+ _distributor->storageDistributionChanged();
+ _distributor->enableNextDistribution();
+ CPPUNIT_ASSERT_EQUAL(size_t(0), _sender.commands.size());
+
+ // Changed grouping cause change
+ setDistribution(getDistConfig6Nodes4Groups());
+ _distributor->storageDistributionChanged();
+ _distributor->enableNextDistribution();
+
+ CPPUNIT_ASSERT_EQUAL(size_t(6), _sender.commands.size());
+}
+
+void
+BucketDBUpdaterTest::testNormalUsageInitializing()
+{
+ setSystemState(lib::ClusterState("distributor:1 .0.s:i storage:1 .0.s:i"));
+
+ CPPUNIT_ASSERT_EQUAL(size_t(1), _sender.commands.size());
+
+ // Not yet passing on system state.
+ CPPUNIT_ASSERT_EQUAL(size_t(0), _senderDown.commands.size());
+
+ fakeBucketReply(lib::ClusterState("distributor:1 .0.s:i storage:1"),
+ *((RequestBucketInfoCommand*)_sender.commands[0].get()),
+ 0,
+ 10,
+ 10);
+
+ assertCorrectBuckets(10, "distributor:1 storage:1");
+
+ for (int i=10; i<20; i++) {
+ verifyInvalid(document::BucketId(16, i), 0);
+ }
+
+ // Pass on cluster state and recheck buckets now.
+ CPPUNIT_ASSERT_EQUAL(size_t(1), _senderDown.commands.size());
+
+ _sender.clear();
+ _senderDown.clear();
+
+ setSystemState(lib::ClusterState("distributor:1 .0.s:i storage:1"));
+
+ // Send a new request bucket info up.
+ CPPUNIT_ASSERT_EQUAL(size_t(1), _sender.commands.size());
+
+ fakeBucketReply(lib::ClusterState("distributor:1 .0.s:i storage:1"),
+ *((RequestBucketInfoCommand*)_sender.commands[0].get()),
+ 0,
+ 20);
+
+ // Pass on cluster state and recheck buckets now.
+ CPPUNIT_ASSERT_EQUAL(size_t(1), _senderDown.commands.size());
+
+ assertCorrectBuckets(20, "distributor:1 storage:1");
+}
+
+void
+BucketDBUpdaterTest::testFailedRequestBucketInfo()
+{
+ setSystemState(lib::ClusterState("distributor:1 .0.s:i storage:1"));
+
+ // 2 messages sent up: 1 to the nodes, and one reply to the setsystemstate.
+ CPPUNIT_ASSERT_EQUAL(size_t(1), _sender.commands.size());
+
+ {
+ std::shared_ptr<api::RequestBucketInfoReply> reply =
+ getFakeBucketReply(lib::ClusterState("distributor:1 .0.s:i storage:1"),
+ *((RequestBucketInfoCommand*)_sender.commands[0].get()),
+ 0,
+ 10);
+
+ reply->setResult(api::ReturnCode::NOT_CONNECTED);
+ getBucketDBUpdater().onRequestBucketInfoReply(reply);
+ // Trigger that delayed message is sent
+ getClock().addSecondsToTime(10);
+ getBucketDBUpdater().resendDelayedMessages();
+ }
+
+ // Should be resent.
+ CPPUNIT_ASSERT_EQUAL(std::string("Request bucket info,"
+ "Request bucket info"),
+ _sender.getCommands());
+
+ CPPUNIT_ASSERT_EQUAL(size_t(0), _senderDown.commands.size());
+
+ fakeBucketReply(lib::ClusterState("distributor:1 .0.s:i storage:1"),
+ *((RequestBucketInfoCommand*)_sender.commands[1].get()),
+ 0,
+ 10);
+
+ for (int i=0; i<10; i++) {
+ CPPUNIT_ASSERT_EQUAL(
+ std::string(""),
+ verifyBucket(document::BucketId(16, i),
+ lib::ClusterState("distributor:1 storage:1")));
+ }
+
+ // Set system state should now be passed on
+ CPPUNIT_ASSERT_EQUAL(std::string("Set system state"),
+ _senderDown.getCommands());
+}
+
+void
+BucketDBUpdaterTest::testDownWhileInit()
+{
+ setStorageNodes(3);
+
+ fakeBucketReply(lib::ClusterState("distributor:1 storage:3"),
+ *((RequestBucketInfoCommand*)_sender.commands[0].get()),
+ 0,
+ 5);
+
+ setSystemState(lib::ClusterState("distributor:1 storage:3 .1.s:d"));
+
+ fakeBucketReply(lib::ClusterState("distributor:1 storage:3"),
+ *((RequestBucketInfoCommand*)_sender.commands[2].get()),
+ 2,
+ 5);
+
+ fakeBucketReply(lib::ClusterState("distributor:1 storage:3"),
+ *((RequestBucketInfoCommand*)_sender.commands[1].get()),
+ 1,
+ 5);
+}
+
+bool
+BucketDBUpdaterTest::bucketExistsThatHasNode(int bucketCount, uint16_t node) const
+{
+ for (int i=1; i<bucketCount; i++) {
+ if (bucketHasNode(document::BucketId(16, i), node)) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+void
+BucketDBUpdaterTest::testNodeDown()
+{
+ setStorageNodes(3);
+ _distributor->enableClusterState(lib::ClusterState("distributor:1 storage:3"));
+
+ for (int i=1; i<100; i++) {
+ addIdealNodes(document::BucketId(16, i));
+ }
+
+ CPPUNIT_ASSERT(bucketExistsThatHasNode(100, 1));
+
+ setSystemState(lib::ClusterState("distributor:1 storage:3 .1.s:d"));
+
+ CPPUNIT_ASSERT(!bucketExistsThatHasNode(100, 1));
+}
+
+void
+BucketDBUpdaterTest::testStorageNodeInMaintenanceClearsBucketsForNode()
+{
+ setStorageNodes(3);
+ _distributor->enableClusterState(lib::ClusterState("distributor:1 storage:3"));
+
+ for (int i=1; i<100; i++) {
+ addIdealNodes(document::BucketId(16, i));
+ }
+
+ CPPUNIT_ASSERT(bucketExistsThatHasNode(100, 1));
+
+ setSystemState(lib::ClusterState("distributor:1 storage:3 .1.s:m"));
+
+ CPPUNIT_ASSERT(!bucketExistsThatHasNode(100, 1));
+}
+
+void
+BucketDBUpdaterTest::testNodeDownCopiesGetInSync()
+{
+ setStorageNodes(3);
+
+ lib::ClusterState systemState("distributor:1 storage:3");
+ document::BucketId bid(16, 1);
+
+ addNodesToBucketDB(bid, "0=3,1=2,2=3");
+
+ setSystemState(lib::ClusterState("distributor:1 storage:3 .1.s:d"));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("BucketId(0x4000000000000001) : "
+ "node(idx=0,crc=0x3,docs=3/3,bytes=3/3,trusted=true,active=false), "
+ "node(idx=2,crc=0x3,docs=3/3,bytes=3/3,trusted=true,active=false)"),
+ dumpBucket(bid));
+}
+
+void
+BucketDBUpdaterTest::testInitializingWhileRecheck()
+{
+ lib::ClusterState systemState("distributor:1 storage:2 .0.s:i .0.i:0.1");
+ setSystemState(systemState);
+
+ CPPUNIT_ASSERT_EQUAL(size_t(2), _sender.commands.size());
+ CPPUNIT_ASSERT_EQUAL(size_t(0), _senderDown.commands.size());
+
+ getBucketDBUpdater().recheckBucketInfo(1, document::BucketId(16, 3));
+
+ for (int i=0; i<2; i++) {
+ fakeBucketReply(systemState,
+ *((RequestBucketInfoCommand*)_sender.commands[i].get()),
+ i,
+ 100);
+ }
+
+ // Now we can pass on system state.
+ CPPUNIT_ASSERT_EQUAL(size_t(1), _senderDown.commands.size());
+
+ CPPUNIT_ASSERT_EQUAL(MessageType::SETSYSTEMSTATE,
+ _senderDown.commands[0]->getType());
+}
+
+void
+BucketDBUpdaterTest::testBitChange()
+{
+
+ std::vector<document::BucketId> bucketlist;
+
+ {
+ setSystemState(lib::ClusterState("bits:14 storage:1 distributor:2"));
+
+ CPPUNIT_ASSERT_EQUAL(1, (int)_sender.commands.size());
+
+ CPPUNIT_ASSERT(_sender.commands[0]->getType() == MessageType::REQUESTBUCKETINFO);
+
+ RequestBucketInfoReply* sreply =
+ new RequestBucketInfoReply(*((RequestBucketInfoCommand*)_sender.commands[0].get()));
+ sreply->setAddress(storageAddress(0));
+ api::RequestBucketInfoReply::EntryVector &vec = sreply->getBucketInfo();
+
+
+ int cnt=0;
+ for (int i=0; cnt < 2; i++) {
+ lib::Distribution distribution = getBucketDBUpdater().getDistributorComponent()
+ .getDistribution();
+ std::vector<uint16_t> distributors;
+ if (distribution.getIdealDistributorNode(
+ lib::ClusterState("redundancy:1 bits:14 storage:1 distributor:2"),
+ document::BucketId(16, i))
+ == 0)
+ {
+ vec.push_back(api::RequestBucketInfoReply::Entry(
+ document::BucketId(16, i),
+ api::BucketInfo(10,1,1)));
+
+ bucketlist.push_back(document::BucketId(16, i));
+ cnt++;
+ }
+ }
+
+ getBucketDBUpdater().onRequestBucketInfoReply(std::shared_ptr<RequestBucketInfoReply>(sreply));
+ }
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("BucketId(0x4000000000000001) : "
+ "node(idx=0,crc=0xa,docs=1/1,bytes=1/1,trusted=true,active=false)"),
+ dumpBucket(bucketlist[0]));
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("BucketId(0x4000000000000002) : "
+ "node(idx=0,crc=0xa,docs=1/1,bytes=1/1,trusted=true,active=false)"),
+ dumpBucket(bucketlist[1]));
+
+ {
+ _sender.clear();
+ setSystemState(lib::ClusterState("bits:16 storage:1 distributor:2"));
+
+ CPPUNIT_ASSERT_EQUAL(1, (int)_sender.commands.size());
+
+ CPPUNIT_ASSERT(_sender.commands[0]->getType() == MessageType::REQUESTBUCKETINFO);
+
+ RequestBucketInfoReply* sreply =
+ new RequestBucketInfoReply(
+ *((RequestBucketInfoCommand*)_sender.commands[0].get()));
+ sreply->setAddress(storageAddress(0));
+ sreply->setResult(api::ReturnCode::OK);
+ api::RequestBucketInfoReply::EntryVector &vec = sreply->getBucketInfo();
+
+ for (uint32_t i = 0; i < 3; ++i) {
+ vec.push_back(api::RequestBucketInfoReply::Entry(
+ document::BucketId(16, i),
+ api::BucketInfo(10,1,1)));
+ }
+
+ vec.push_back(api::RequestBucketInfoReply::Entry(
+ document::BucketId(16, 4),
+ api::BucketInfo(10,1,1)));
+
+ getBucketDBUpdater().onRequestBucketInfoReply(
+ std::shared_ptr<RequestBucketInfoReply>(sreply));
+ }
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("BucketId(0x4000000000000000) : "
+ "node(idx=0,crc=0xa,docs=1/1,bytes=1/1,trusted=true,active=false)"),
+ dumpBucket(document::BucketId(16, 0)));
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("BucketId(0x4000000000000001) : "
+ "node(idx=0,crc=0xa,docs=1/1,bytes=1/1,trusted=true,active=false)"),
+ dumpBucket(document::BucketId(16, 1)));
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("BucketId(0x4000000000000002) : "
+ "node(idx=0,crc=0xa,docs=1/1,bytes=1/1,trusted=true,active=false)"),
+ dumpBucket(document::BucketId(16, 2)));
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("BucketId(0x4000000000000004) : "
+ "node(idx=0,crc=0xa,docs=1/1,bytes=1/1,trusted=true,active=false)"),
+ dumpBucket(document::BucketId(16, 4)));
+
+ {
+ _sender.clear();
+ setSystemState(lib::ClusterState("storage:1 distributor:2 .1.s:i"));
+ }
+
+ {
+ _sender.clear();
+ setSystemState(lib::ClusterState("storage:1 distributor:2"));
+ }
+};
+
+void
+BucketDBUpdaterTest::testRecheckNodeWithFailure()
+{
+ initializeNodesAndBuckets(3, 5);
+
+ _sender.clear();
+
+ getBucketDBUpdater().recheckBucketInfo(1, document::BucketId(16, 3));
+
+ CPPUNIT_ASSERT_EQUAL(size_t(1), _sender.commands.size());
+
+
+ uint16_t index = 0;
+ {
+ api::RequestBucketInfoCommand& rbi(
+ dynamic_cast<RequestBucketInfoCommand&>(*_sender.commands[0]));
+ CPPUNIT_ASSERT_EQUAL(size_t(1), rbi.getBuckets().size());
+ CPPUNIT_ASSERT_EQUAL(document::BucketId(16, 3), rbi.getBuckets()[0]);
+ auto reply(std::make_shared<api::RequestBucketInfoReply>(rbi));
+
+ const api::StorageMessageAddress *address = _sender.commands[0]->getAddress();
+ index = address->getIndex();
+
+ reply->setResult(api::ReturnCode::NOT_CONNECTED);
+ getBucketDBUpdater().onRequestBucketInfoReply(reply);
+ // Trigger that delayed message is sent
+ getClock().addSecondsToTime(10);
+ getBucketDBUpdater().resendDelayedMessages();
+ }
+
+ CPPUNIT_ASSERT_EQUAL(size_t(2), _sender.commands.size());
+
+ setSystemState(
+ lib::ClusterState(vespalib::make_string("distributor:1 storage:3 .%d.s:d", index)));
+
+ // Recheck bucket.
+ {
+ api::RequestBucketInfoCommand& rbi(dynamic_cast<RequestBucketInfoCommand&>
+ (*_sender.commands[1]));
+ CPPUNIT_ASSERT_EQUAL(size_t(1), rbi.getBuckets().size());
+ CPPUNIT_ASSERT_EQUAL(document::BucketId(16, 3), rbi.getBuckets()[0]);
+ auto reply(std::make_shared<api::RequestBucketInfoReply>(rbi));
+ reply->setResult(api::ReturnCode::NOT_CONNECTED);
+ getBucketDBUpdater().onRequestBucketInfoReply(reply);
+ }
+
+ // Should not retry since node is down.
+ CPPUNIT_ASSERT_EQUAL(size_t(2), _sender.commands.size());
+}
+
+void
+BucketDBUpdaterTest::testRecheckNode()
+{
+ initializeNodesAndBuckets(3, 5);
+
+ _sender.clear();
+
+ getBucketDBUpdater().recheckBucketInfo(1, document::BucketId(16, 3));
+
+ CPPUNIT_ASSERT_EQUAL(size_t(1), _sender.commands.size());
+
+ api::RequestBucketInfoCommand& rbi(
+ dynamic_cast<RequestBucketInfoCommand&>(*_sender.commands[0]));
+ CPPUNIT_ASSERT_EQUAL(size_t(1), rbi.getBuckets().size());
+ CPPUNIT_ASSERT_EQUAL(document::BucketId(16, 3), rbi.getBuckets()[0]);
+
+ auto reply(std::make_shared<api::RequestBucketInfoReply>(rbi));
+ reply->getBucketInfo().push_back(
+ api::RequestBucketInfoReply::Entry(document::BucketId(16, 3),
+ api::BucketInfo(20, 10, 12, 50, 60, true, true)));
+ getBucketDBUpdater().onRequestBucketInfoReply(reply);
+
+ lib::ClusterState state("distributor:1 storage:3");
+ for (uint32_t i = 0; i < 3; i++) {
+ CPPUNIT_ASSERT_EQUAL(
+ getIdealStr(document::BucketId(16, i), state),
+ getNodes(document::BucketId(16, i)));
+ }
+
+ for (uint32_t i = 4; i < 5; i++) {
+ CPPUNIT_ASSERT_EQUAL(
+ getIdealStr(document::BucketId(16, i), state),
+ getNodes(document::BucketId(16, i)));
+ }
+
+ BucketDatabase::Entry entry = getBucketDatabase().get(document::BucketId(16, 3));
+ CPPUNIT_ASSERT(entry.valid());
+
+ const BucketCopy* copy = entry->getNode(1);
+ CPPUNIT_ASSERT(copy != 0);
+ CPPUNIT_ASSERT_EQUAL(api::BucketInfo(20,10,12, 50, 60, true, true),
+ copy->getBucketInfo());
+}
+
+void
+BucketDBUpdaterTest::testNotifyBucketChange()
+{
+ _distributor->enableClusterState(lib::ClusterState("distributor:1 storage:1"));
+
+ addNodesToBucketDB(document::BucketId(16, 1), "0=1234");
+ _sender.replies.clear();
+
+ {
+ api::BucketInfo info(1, 2, 3, 4, 5, true, true);
+ auto cmd(std::make_shared<api::NotifyBucketChangeCommand>(
+ document::BucketId(16, 1), info));
+ cmd->setSourceIndex(0);
+ getBucketDBUpdater().onNotifyBucketChange(cmd);
+ }
+
+ {
+ api::BucketInfo info(10, 11, 12, 13, 14, false, false);
+ auto cmd(std::make_shared<api::NotifyBucketChangeCommand>(
+ document::BucketId(16, 2), info));
+ cmd->setSourceIndex(0);
+ getBucketDBUpdater().onNotifyBucketChange(cmd);
+ }
+
+ // Must receive reply
+ CPPUNIT_ASSERT_EQUAL(size_t(2), _sender.replies.size());
+
+ for (int i = 0; i < 2; ++i) {
+ CPPUNIT_ASSERT_EQUAL(MessageType::NOTIFYBUCKETCHANGE_REPLY,
+ _sender.replies[i]->getType());
+ }
+
+ // No database update until request bucket info replies have been received.
+ CPPUNIT_ASSERT_EQUAL(std::string("BucketId(0x4000000000000001) : "
+ "node(idx=0,crc=0x4d2,docs=1234/1234,bytes=1234/1234,"
+ "trusted=false,active=false)"),
+ dumpBucket(document::BucketId(16, 1)));
+ CPPUNIT_ASSERT_EQUAL(std::string("NONEXISTING"),
+ dumpBucket(document::BucketId(16, 2)));
+
+ CPPUNIT_ASSERT_EQUAL(size_t(2), _sender.commands.size());
+
+ std::vector<api::BucketInfo> infos;
+ infos.push_back(api::BucketInfo(4567, 200, 2000, 400, 4000, true, true));
+ infos.push_back(api::BucketInfo(8999, 300, 3000, 500, 5000, false, false));
+
+ for (int i = 0; i < 2; ++i) {
+ api::RequestBucketInfoCommand& rbi(
+ dynamic_cast<RequestBucketInfoCommand&>(*_sender.commands[i]));
+ CPPUNIT_ASSERT_EQUAL(size_t(1), rbi.getBuckets().size());
+ CPPUNIT_ASSERT_EQUAL(document::BucketId(16, i + 1), rbi.getBuckets()[0]);
+
+ auto reply(std::make_shared<api::RequestBucketInfoReply>(rbi));
+ reply->getBucketInfo().push_back(
+ api::RequestBucketInfoReply::Entry(document::BucketId(16, i + 1),
+ infos[i]));
+ getBucketDBUpdater().onRequestBucketInfoReply(reply);
+ }
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("BucketId(0x4000000000000001) : "
+ "node(idx=0,crc=0x11d7,docs=200/400,bytes=2000/4000,trusted=true,active=true)"),
+ dumpBucket(document::BucketId(16, 1)));
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("BucketId(0x4000000000000002) : "
+ "node(idx=0,crc=0x2327,docs=300/500,bytes=3000/5000,trusted=true,active=false)"),
+ dumpBucket(document::BucketId(16, 2)));
+
+}
+
+void
+BucketDBUpdaterTest::testNotifyBucketChangeFromNodeDown()
+{
+ _distributor->enableClusterState(lib::ClusterState("distributor:1 storage:2"));
+
+ addNodesToBucketDB(document::BucketId(16, 1), "1=1234");
+
+ _sender.replies.clear();
+
+ {
+ api::BucketInfo info(8999, 300, 3000, 500, 5000, false, false);
+ auto cmd(std::make_shared<api::NotifyBucketChangeCommand>(
+ document::BucketId(16, 1), info));
+ cmd->setSourceIndex(0);
+ getBucketDBUpdater().onNotifyBucketChange(cmd);
+ }
+ // Enable here to avoid having request bucket info be silently swallowed
+ // (sendRequestBucketInfo drops message if node is down).
+ _distributor->enableClusterState(lib::ClusterState("distributor:1 storage:2 .0.s:d"));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("BucketId(0x4000000000000001) : "
+ "node(idx=1,crc=0x4d2,docs=1234/1234,bytes=1234/1234,trusted=false,active=false)"),
+ dumpBucket(document::BucketId(16, 1)));
+
+ CPPUNIT_ASSERT_EQUAL(size_t(1), _sender.replies.size());
+ CPPUNIT_ASSERT_EQUAL(MessageType::NOTIFYBUCKETCHANGE_REPLY,
+ _sender.replies[0]->getType());
+
+ // Currently, this pending operation will be auto-flushed when the cluster state
+ // changes so the behavior is still correct. Keep this test around to prevent
+ // regressions here.
+ CPPUNIT_ASSERT_EQUAL(size_t(1), _sender.commands.size());
+ api::RequestBucketInfoCommand& rbi(
+ dynamic_cast<RequestBucketInfoCommand&>(*_sender.commands[0]));
+ CPPUNIT_ASSERT_EQUAL(size_t(1), rbi.getBuckets().size());
+ CPPUNIT_ASSERT_EQUAL(document::BucketId(16, 1), rbi.getBuckets()[0]);
+
+ auto reply(std::make_shared<api::RequestBucketInfoReply>(rbi));
+ reply->getBucketInfo().push_back(
+ api::RequestBucketInfoReply::Entry(
+ document::BucketId(16, 1),
+ api::BucketInfo(8999, 300, 3000, 500, 5000, false, false)));
+ getBucketDBUpdater().onRequestBucketInfoReply(reply);
+
+ // No change
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("BucketId(0x4000000000000001) : "
+ "node(idx=1,crc=0x4d2,docs=1234/1234,bytes=1234/1234,trusted=false,active=false)"),
+ dumpBucket(document::BucketId(16, 1)));
+}
+
+/**
+ * Test that NotifyBucketChange received while there's a pending cluster state
+ * waits until the cluster state has been enabled as current before it sends off
+ * the single bucket info requests. This is to prevent a race condition where
+ * the replies to bucket info requests for buckets that would be owned by the
+ * distributor in the pending state but not by the current state would be
+ * discarded when attempted inserted into the bucket database.
+ */
+void
+BucketDBUpdaterTest::testNotifyChangeWithPendingStateQueuesBucketInfoRequests()
+{
+ setSystemState(lib::ClusterState("distributor:1 storage:1"));
+ CPPUNIT_ASSERT_EQUAL(size_t(1), _sender.commands.size());
+
+ {
+ api::BucketInfo info(8999, 300, 3000, 500, 5000, false, false);
+ auto cmd(std::make_shared<api::NotifyBucketChangeCommand>(
+ document::BucketId(16, 1), info));
+ cmd->setSourceIndex(0);
+ getBucketDBUpdater().onNotifyBucketChange(cmd);
+ }
+
+ CPPUNIT_ASSERT_EQUAL(size_t(1), _sender.commands.size());
+
+ fakeBucketReply(
+ lib::ClusterState("distributor:1 storage:1"),
+ dynamic_cast<RequestBucketInfoCommand&>(*_sender.commands[0]),
+ 0, 10);
+
+ CPPUNIT_ASSERT_EQUAL(size_t(2), _sender.commands.size());
+
+ {
+ api::RequestBucketInfoCommand& rbi(
+ dynamic_cast<RequestBucketInfoCommand&>(*_sender.commands[1]));
+ CPPUNIT_ASSERT_EQUAL(size_t(1), rbi.getBuckets().size());
+ CPPUNIT_ASSERT_EQUAL(document::BucketId(16, 1), rbi.getBuckets()[0]);
+ }
+ _sender.clear();
+
+ // Queue must be cleared once pending state is enabled.
+ {
+ lib::ClusterState state("distributor:1 storage:2");
+ uint32_t expectedMsgs = 1, dummyBucketsToReturn = 1;
+ setAndEnableClusterState(state, expectedMsgs, dummyBucketsToReturn);
+ }
+ CPPUNIT_ASSERT_EQUAL(size_t(1), _sender.commands.size());
+ {
+ api::RequestBucketInfoCommand& rbi(
+ dynamic_cast<RequestBucketInfoCommand&>(*_sender.commands[0]));
+ CPPUNIT_ASSERT_EQUAL(size_t(0), rbi.getBuckets().size());
+ }
+}
+
+void
+BucketDBUpdaterTest::testMergeReply()
+{
+ _distributor->enableClusterState(lib::ClusterState("distributor:1 storage:3"));
+
+ addNodesToBucketDB(document::BucketId(16, 1234),
+ "0=1234,1=1234,2=1234");
+
+ std::vector<api::MergeBucketCommand::Node> nodes;
+ nodes.push_back(api::MergeBucketCommand::Node(0));
+ nodes.push_back(api::MergeBucketCommand::Node(1));
+ nodes.push_back(api::MergeBucketCommand::Node(2));
+
+ api::MergeBucketCommand cmd(document::BucketId(16, 1234), nodes, 0);
+
+ auto reply(std::make_shared<api::MergeBucketReply>(cmd));
+
+ _sender.clear();
+ getBucketDBUpdater().onMergeBucketReply(reply);
+
+ CPPUNIT_ASSERT_EQUAL(size_t(3), _sender.commands.size());
+
+ for (uint32_t i = 0; i < 3; i++) {
+ std::shared_ptr<api::RequestBucketInfoCommand>
+ req(std::dynamic_pointer_cast<api::RequestBucketInfoCommand>(
+ _sender.commands[i]));
+
+ CPPUNIT_ASSERT(req.get());
+ CPPUNIT_ASSERT_EQUAL(size_t(1), req->getBuckets().size());
+ CPPUNIT_ASSERT_EQUAL(document::BucketId(16, 1234), req->getBuckets()[0]);
+
+ auto reqreply(std::make_shared<api::RequestBucketInfoReply>(*req));
+ reqreply->getBucketInfo().push_back(
+ api::RequestBucketInfoReply::Entry(document::BucketId(16, 1234),
+ api::BucketInfo(10 * (i + 1), 100 * (i +1), 1000 * (i+1))));
+
+ getBucketDBUpdater().onRequestBucketInfoReply(reqreply);
+ }
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("BucketId(0x40000000000004d2) : "
+ "node(idx=0,crc=0xa,docs=100/100,bytes=1000/1000,trusted=false,active=false), "
+ "node(idx=1,crc=0x14,docs=200/200,bytes=2000/2000,trusted=false,active=false), "
+ "node(idx=2,crc=0x1e,docs=300/300,bytes=3000/3000,trusted=false,active=false)"),
+ dumpBucket(document::BucketId(16, 1234)));
+};
+
+void
+BucketDBUpdaterTest::testMergeReplyNodeDown()
+{
+ _distributor->enableClusterState(lib::ClusterState("distributor:1 storage:3"));
+ std::vector<api::MergeBucketCommand::Node> nodes;
+
+ addNodesToBucketDB(document::BucketId(16, 1234), "0=1234,1=1234,2=1234");
+
+ for (uint32_t i = 0; i < 3; ++i) {
+ nodes.push_back(api::MergeBucketCommand::Node(i));
+ }
+
+ api::MergeBucketCommand cmd(document::BucketId(16, 1234), nodes, 0);
+
+ auto reply(std::make_shared<api::MergeBucketReply>(cmd));
+
+ setSystemState(lib::ClusterState("distributor:1 storage:2"));
+
+ _sender.clear();
+ getBucketDBUpdater().onMergeBucketReply(reply);
+
+ CPPUNIT_ASSERT_EQUAL(size_t(2), _sender.commands.size());
+
+ for (uint32_t i = 0; i < 2; i++) {
+ std::shared_ptr<api::RequestBucketInfoCommand> req(
+ std::dynamic_pointer_cast<api::RequestBucketInfoCommand>(
+ _sender.commands[i]));
+
+ CPPUNIT_ASSERT(req.get());
+ CPPUNIT_ASSERT_EQUAL(size_t(1), req->getBuckets().size());
+ CPPUNIT_ASSERT_EQUAL(document::BucketId(16, 1234), req->getBuckets()[0]);
+
+ auto reqreply(std::make_shared<api::RequestBucketInfoReply>(*req));
+ reqreply->getBucketInfo().push_back(
+ api::RequestBucketInfoReply::Entry(
+ document::BucketId(16, 1234),
+ api::BucketInfo(10 * (i + 1), 100 * (i +1), 1000 * (i+1))));
+ getBucketDBUpdater().onRequestBucketInfoReply(reqreply);
+ }
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("BucketId(0x40000000000004d2) : "
+ "node(idx=0,crc=0xa,docs=100/100,bytes=1000/1000,trusted=false,active=false), "
+ "node(idx=1,crc=0x14,docs=200/200,bytes=2000/2000,trusted=false,active=false)"),
+ dumpBucket(document::BucketId(16, 1234)));
+};
+
+void
+BucketDBUpdaterTest::testMergeReplyNodeDownAfterRequestSent()
+{
+ _distributor->enableClusterState(lib::ClusterState("distributor:1 storage:3"));
+ std::vector<api::MergeBucketCommand::Node> nodes;
+
+ addNodesToBucketDB(document::BucketId(16, 1234), "0=1234,1=1234,2=1234");
+
+ for (uint32_t i = 0; i < 3; ++i) {
+ nodes.push_back(api::MergeBucketCommand::Node(i));
+ }
+
+ api::MergeBucketCommand cmd(document::BucketId(16, 1234), nodes, 0);
+
+ auto reply(std::make_shared<api::MergeBucketReply>(cmd));
+
+ _sender.clear();
+ getBucketDBUpdater().onMergeBucketReply(reply);
+
+ CPPUNIT_ASSERT_EQUAL(size_t(3), _sender.commands.size());
+
+ setSystemState(lib::ClusterState("distributor:1 storage:2"));
+
+ for (uint32_t i = 0; i < 3; i++) {
+ std::shared_ptr<api::RequestBucketInfoCommand> req(
+ std::dynamic_pointer_cast<api::RequestBucketInfoCommand>(
+ _sender.commands[i]));
+
+ CPPUNIT_ASSERT(req.get());
+ CPPUNIT_ASSERT_EQUAL(size_t(1), req->getBuckets().size());
+ CPPUNIT_ASSERT_EQUAL(document::BucketId(16, 1234), req->getBuckets()[0]);
+
+ auto reqreply(std::make_shared<api::RequestBucketInfoReply>(*req));
+ reqreply->getBucketInfo().push_back(
+ api::RequestBucketInfoReply::Entry(
+ document::BucketId(16, 1234),
+ api::BucketInfo(10 * (i + 1), 100 * (i +1), 1000 * (i+1))));
+ getBucketDBUpdater().onRequestBucketInfoReply(reqreply);
+ }
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("BucketId(0x40000000000004d2) : "
+ "node(idx=0,crc=0xa,docs=100/100,bytes=1000/1000,trusted=false,active=false), "
+ "node(idx=1,crc=0x14,docs=200/200,bytes=2000/2000,trusted=false,active=false)"),
+ dumpBucket(document::BucketId(16, 1234)));
+};
+
+
+void
+BucketDBUpdaterTest::testFlush()
+{
+ _distributor->enableClusterState(lib::ClusterState("distributor:1 storage:3"));
+ _sender.clear();
+
+ addNodesToBucketDB(document::BucketId(16, 1234), "0=1234,1=1234,2=1234");
+
+ std::vector<api::MergeBucketCommand::Node> nodes;
+ for (uint32_t i = 0; i < 3; ++i) {
+ nodes.push_back(api::MergeBucketCommand::Node(i));
+ }
+
+ api::MergeBucketCommand cmd(document::BucketId(16, 1234),
+ nodes,
+ 0);
+
+ auto reply(std::make_shared<api::MergeBucketReply>(cmd));
+
+ _sender.clear();
+ getBucketDBUpdater().onMergeBucketReply(reply);
+
+ CPPUNIT_ASSERT_EQUAL(size_t(3), _sender.commands.size());
+ CPPUNIT_ASSERT_EQUAL(size_t(0), _senderDown.replies.size());
+
+ getBucketDBUpdater().flush();
+ // Flushing should drop all merge bucket replies
+ CPPUNIT_ASSERT_EQUAL(size_t(0), _senderDown.commands.size());
+}
+
+std::string
+BucketDBUpdaterTest::getSentNodes(
+ const std::string& oldClusterState,
+ const std::string& newClusterState)
+{
+ MessageSenderStub sender;
+
+ std::shared_ptr<api::SetSystemStateCommand> cmd(
+ new api::SetSystemStateCommand(
+ lib::ClusterState(newClusterState)));
+
+ framework::defaultimplementation::FakeClock clock;
+ ClusterInformation::CSP clusterInfo(createClusterInfo(oldClusterState));
+
+ std::unordered_set<uint16_t> outdatedNodes;
+ std::unique_ptr<PendingClusterState> state(
+ PendingClusterState::createForClusterStateChange(
+ clock, clusterInfo, sender, cmd, outdatedNodes,
+ api::Timestamp(1)));
+
+ sortSentMessagesByIndex(sender);
+
+ std::ostringstream ost;
+ for (uint32_t i = 0; i < sender.commands.size(); i++) {
+ RequestBucketInfoCommand* req =
+ dynamic_cast<RequestBucketInfoCommand*>(sender.commands[i].get());
+
+ if (i > 0) {
+ ost << ",";
+ }
+
+ ost << req->getAddress()->getIndex();
+ }
+
+ return ost.str();
+}
+
+std::string
+BucketDBUpdaterTest::getSentNodesDistributionChanged(
+ const std::string& oldClusterState)
+{
+ MessageSenderStub sender;
+
+ framework::defaultimplementation::FakeClock clock;
+ ClusterInformation::CSP clusterInfo(createClusterInfo(oldClusterState));
+ std::unique_ptr<PendingClusterState> state(
+ PendingClusterState::createForDistributionChange(
+ clock, clusterInfo, sender, api::Timestamp(1)));
+
+ sortSentMessagesByIndex(sender);
+
+ std::ostringstream ost;
+ for (uint32_t i = 0; i < sender.commands.size(); i++) {
+ RequestBucketInfoCommand* req =
+ dynamic_cast<RequestBucketInfoCommand*>(sender.commands[i].get());
+
+ if (i > 0) {
+ ost << ",";
+ }
+
+ ost << req->getAddress()->getIndex();
+ }
+
+ return ost.str();
+}
+
+void
+BucketDBUpdaterTest::testPendingClusterStateSendMessages()
+{
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("0,1,2"),
+ getSentNodes("cluster:d",
+ "distributor:1 storage:3"));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("0,1"),
+ getSentNodes("cluster:d",
+ "distributor:1 storage:3 .2.s:m"));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("2"),
+ getSentNodes("distributor:1 storage:2",
+ "distributor:1 storage:3"));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("2,3,4,5"),
+ getSentNodes("distributor:1 storage:2",
+ "distributor:1 storage:6"));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("0,1,2"),
+ getSentNodes("distributor:4 storage:3",
+ "distributor:3 storage:3"));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("0,1,2,3"),
+ getSentNodes("distributor:4 storage:3",
+ "distributor:4 .2.s:d storage:4"));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string(""),
+ getSentNodes("distributor:4 storage:3",
+ "distributor:4 .0.s:d storage:4"));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string(""),
+ getSentNodes("distributor:3 storage:3",
+ "distributor:4 storage:3"));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("2"),
+ getSentNodes("distributor:3 storage:3 .2.s:i",
+ "distributor:3 storage:3"));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("1"),
+ getSentNodes("distributor:3 storage:3 .1.s:d",
+ "distributor:3 storage:3"));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("1,2,4"),
+ getSentNodes("distributor:3 storage:4 .1.s:d .2.s:i",
+ "distributor:3 storage:5"));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string(""),
+ getSentNodes("distributor:1 storage:3",
+ "cluster:d"));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string(""),
+ getSentNodes("distributor:1 storage:3",
+ "distributor:1 storage:3"));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string(""),
+ getSentNodes("distributor:1 storage:3",
+ "cluster:d distributor:1 storage:6"));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string(""),
+ getSentNodes("distributor:3 storage:3",
+ "distributor:3 .2.s:m storage:3"));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("0,1,2"),
+ getSentNodes("distributor:3 .2.s:m storage:3",
+ "distributor:3 .2.s:d storage:3"));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string(""),
+ getSentNodes("distributor:3 .2.s:m storage:3",
+ "distributor:3 storage:3"));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("0,1,2"),
+ getSentNodesDistributionChanged("distributor:3 storage:3"));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("0,1"),
+ getSentNodes("distributor:10 storage:2",
+ "distributor:10 .1.s:d storage:2"));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("1"),
+ getSentNodes("distributor:2 storage:2",
+ "distributor:2 storage:2 .1.d:3 .1.d.1.s:d"));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("1"),
+ getSentNodes("distributor:2 storage:2 .1.s:d",
+ "distributor:2 storage:2 .1.d:3 .1.d.1.s:d"));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string(""),
+ getSentNodes("distributor:2 storage:2",
+ "distributor:3 .2.s:i storage:2"));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("0,1,2"),
+ getSentNodes("distributor:3 storage:3",
+ "distributor:3 .2.s:s storage:3"));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string(""),
+ getSentNodes("distributor:3 .2.s:s storage:3",
+ "distributor:3 .2.s:d storage:3"));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("1"),
+ getSentNodes("distributor:3 storage:3 .1.s:m",
+ "distributor:3 storage:3"));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string(""),
+ getSentNodes("distributor:3 storage:3",
+ "distributor:3 storage:3 .1.s:m"));
+};
+
+void
+BucketDBUpdaterTest::testPendingClusterStateReceive()
+{
+ MessageSenderStub sender;
+
+ auto cmd(std::make_shared<api::SetSystemStateCommand>(
+ lib::ClusterState("distributor:1 storage:3")));
+
+ framework::defaultimplementation::FakeClock clock;
+ ClusterInformation::CSP clusterInfo(createClusterInfo("cluster:d"));
+ std::unordered_set<uint16_t> outdatedNodes;
+ std::unique_ptr<PendingClusterState> state(
+ PendingClusterState::createForClusterStateChange(
+ clock, clusterInfo, sender, cmd, outdatedNodes,
+ api::Timestamp(1)));
+
+ CPPUNIT_ASSERT_EQUAL(3, (int)sender.commands.size());
+
+ sortSentMessagesByIndex(sender);
+
+ std::ostringstream ost;
+ for (uint32_t i = 0; i < sender.commands.size(); i++) {
+ RequestBucketInfoCommand* req =
+ dynamic_cast<RequestBucketInfoCommand*>(sender.commands[i].get());
+
+ RequestBucketInfoReply* rep =
+ new RequestBucketInfoReply(*req);
+
+ rep->getBucketInfo().push_back(
+ RequestBucketInfoReply::Entry(
+ document::BucketId(16, i),
+ api::BucketInfo(i, i, i, i, i)));
+
+ CPPUNIT_ASSERT(
+ state->onRequestBucketInfoReply(
+ std::shared_ptr<api::RequestBucketInfoReply>(rep)));
+
+ CPPUNIT_ASSERT_EQUAL(i == sender.commands.size() - 1 ? true : false,
+ state->done());
+ }
+
+ CPPUNIT_ASSERT_EQUAL(3, (int)state->results().size());
+}
+
+void
+BucketDBUpdaterTest::testPendingClusterStateWithGroupDown()
+{
+ std::string config(getDistConfig6Nodes4Groups());
+ config += "distributor_auto_ownership_transfer_on_whole_group_down true\n";
+ setDistribution(config);
+
+ // Group config has nodes {0, 1}, {2, 3}, {4, 5}
+ // We're node index 0.
+
+ // Entire group 1 goes down. Must refetch from all nodes.
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("0,1,2,3,4,5"),
+ getSentNodes("distributor:6 storage:6",
+ "distributor:6 .2.s:d .3.s:d storage:6"));
+
+ // But don't fetch if not the entire group is down.
+ CPPUNIT_ASSERT_EQUAL(
+ std::string(""),
+ getSentNodes("distributor:6 storage:6",
+ "distributor:6 .2.s:d storage:6"));
+}
+
+void
+BucketDBUpdaterTest::testPendingClusterStateWithGroupDownAndNoHandover()
+{
+ std::string config(getDistConfig6Nodes4Groups());
+ config += "distributor_auto_ownership_transfer_on_whole_group_down false\n";
+ setDistribution(config);
+
+ // Group is down, but config says to not do anything about it.
+ CPPUNIT_ASSERT_EQUAL(
+ std::string(""),
+ getSentNodes("distributor:6 storage:6",
+ "distributor:6 .2.s:d .3.s:d storage:6"));
+}
+
+void
+parseInputData(const std::string& data,
+ uint64_t timestamp,
+ PendingClusterState& state,
+ bool includeBucketInfo)
+{
+ vespalib::StringTokenizer tokenizer(data, "|");
+ for (uint32_t i = 0; i < tokenizer.size(); i++) {
+ vespalib::StringTokenizer tok2(tokenizer[i], ":");
+
+ uint16_t node = atoi(tok2[0].c_str());
+
+ state.setNodeReplied(node);
+
+ vespalib::StringTokenizer tok3(tok2[1], ",");
+ for (uint32_t j = 0; j < tok3.size(); j++) {
+ if (includeBucketInfo) {
+ vespalib::StringTokenizer tok4(tok3[j], "/");
+
+ state.addNodeInfo(
+ document::BucketId(16, atoi(tok4[0].c_str())),
+ BucketCopy(
+ timestamp,
+ node,
+ api::BucketInfo(
+ atoi(tok4[1].c_str()),
+ atoi(tok4[2].c_str()),
+ atoi(tok4[3].c_str()),
+ atoi(tok4[2].c_str()),
+ atoi(tok4[3].c_str()))));
+ } else {
+ state.addNodeInfo(
+ document::BucketId(16, atoi(tok3[j].c_str())),
+ BucketCopy(timestamp,
+ node,
+ api::BucketInfo(3, 3, 3, 3, 3)));
+ }
+ }
+ }
+}
+
+struct BucketDumper : public BucketDatabase::EntryProcessor
+{
+ std::ostringstream ost;
+ bool _includeBucketInfo;
+
+ BucketDumper(bool includeBucketInfo)
+ : _includeBucketInfo(includeBucketInfo)
+ {
+ }
+
+ bool process(const BucketDatabase::Entry& e) {
+ document::BucketId bucketId(e.getBucketId());
+
+ ost << (uint32_t)bucketId.getRawId() << ":";
+ for (uint32_t i = 0; i < e->getNodeCount(); ++i) {
+ if (i > 0) {
+ ost << ",";
+ }
+ const BucketCopy& copy(e->getNodeRef(i));
+ ost << copy.getNode();
+ if (_includeBucketInfo) {
+ ost << "/" << copy.getChecksum()
+ << "/" << copy.getDocumentCount()
+ << "/" << copy.getTotalDocumentSize()
+ << "/" << (copy.trusted() ? "t" : "u");
+ }
+ }
+ ost << "|";
+ return true;
+ }
+};
+
+std::string
+BucketDBUpdaterTest::mergeBucketLists(
+ const lib::ClusterState& oldState,
+ const std::string& existingData,
+ const lib::ClusterState& newState,
+ const std::string& newData,
+ bool includeBucketInfo)
+{
+ framework::defaultimplementation::FakeClock clock;
+ framework::MilliSecTimer timer(clock);
+
+ MessageSenderStub sender;
+ std::unordered_set<uint16_t> outdatedNodes;
+
+ {
+ auto cmd(std::make_shared<api::SetSystemStateCommand>(oldState));
+
+ api::Timestamp beforeTime(1);
+
+ ClusterInformation::CSP clusterInfo(createClusterInfo("cluster:d"));
+ std::unique_ptr<PendingClusterState> state(
+ PendingClusterState::createForClusterStateChange(
+ clock, clusterInfo, sender, cmd, outdatedNodes,
+ beforeTime));
+
+ parseInputData(existingData, beforeTime, *state, includeBucketInfo);
+ state->mergeInto(getBucketDBUpdater().getDistributorComponent().getBucketDatabase());
+ }
+
+ BucketDumper dumper_tmp(true);
+ getBucketDatabase().forEach(dumper_tmp);
+
+ {
+ auto cmd(std::make_shared<api::SetSystemStateCommand>(
+ lib::ClusterState(newState)));
+
+ api::Timestamp afterTime(2);
+
+ ClusterInformation::CSP clusterInfo(createClusterInfo(oldState.toString()));
+ std::unique_ptr<PendingClusterState> state(
+ PendingClusterState::createForClusterStateChange(
+ clock, clusterInfo, sender, cmd, outdatedNodes,
+ afterTime));
+
+ parseInputData(newData, afterTime, *state, includeBucketInfo);
+ state->mergeInto(getBucketDBUpdater().getDistributorComponent()
+ .getBucketDatabase());
+ }
+
+ BucketDumper dumper(includeBucketInfo);
+ getBucketDBUpdater().getDistributorComponent()
+ .getBucketDatabase().forEach(dumper);
+ getBucketDBUpdater().getDistributorComponent()
+ .getBucketDatabase().clear();
+ return dumper.ost.str();
+}
+
+std::string
+BucketDBUpdaterTest::mergeBucketLists(const std::string& existingData,
+ const std::string& newData,
+ bool includeBucketInfo)
+{
+ return mergeBucketLists(
+ lib::ClusterState("distributor:1 storage:3"),
+ existingData,
+ lib::ClusterState("distributor:1 storage:3"),
+ newData,
+ includeBucketInfo);
+}
+
+void
+BucketDBUpdaterTest::testPendingClusterStateMerge()
+{
+ // Simple initializing case - ask all nodes for info
+ CPPUNIT_ASSERT_EQUAL(
+ // Result is on the form: [bucket w/o count bits]:[node indexes]|..
+ std::string("4:0,1|2:0,1|6:1,2|1:0,2|5:2,0|3:2,1|"),
+ // Input is on the form: [node]:[bucket w/o count bits]|...
+ mergeBucketLists(
+ "",
+ "0:1,2,4,5|1:2,3,4,6|2:1,3,5,6"));
+
+ // Node came up with fewer buckets (lost disk)
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("4:1|2:0,1|6:1,2|1:2,0|5:2|3:2,1|"),
+ mergeBucketLists(
+ lib::ClusterState("distributor:1 storage:3"),
+ "0:1,2,4,5|1:2,3,4,6|2:1,3,5,6",
+ lib::ClusterState("distributor:1 storage:3 .0.d:3 .0.d.1.s:d"),
+ "0:1,2")
+ );
+
+ // New node came up
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("4:0,1|2:0,1|6:1,2,3|1:0,2,3|5:2,0,3|3:2,1,3|"),
+ mergeBucketLists(
+ "0:1,2,4,5|1:2,3,4,6|2:1,3,5,6",
+ "3:1,3,5,6"));
+
+ // Node came up with some buckets removed and some added
+ // Buckets that were removed should not be removed as the node
+ // didn't lose a disk.
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("8:0|4:0,1|2:0,1|6:1,0,2|1:0,2|5:2,0|3:2,1|"),
+ mergeBucketLists(
+ "0:1,2,4,5|1:2,3,4,6|2:1,3,5,6",
+ "0:1,2,6,8"));
+
+ // Node came up with no buckets
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("4:1|2:1|6:1,2|1:2|5:2|3:2,1|"),
+ mergeBucketLists(
+ lib::ClusterState("distributor:1 storage:3"),
+ "0:1,2,4,5|1:2,3,4,6|2:1,3,5,6",
+ lib::ClusterState("distributor:1 storage:3 .0.d:3 .0.d.1.s:d"),
+ "0:")
+ );
+
+ // One node lost a disk, another was just reasked (distributor
+ // change)
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("2:0,1|6:1,2|1:2,0|5:2|3:2,1|"),
+ mergeBucketLists(
+ lib::ClusterState("distributor:1 storage:3"),
+ "0:1,2,4,5|1:2,3,6|2:1,3,5,6",
+ lib::ClusterState("distributor:1 storage:3 .0.d:3 .0.d.1.s:d"),
+ "0:1,2|1:2,3")
+ );
+
+ // Bucket info format is "bucketid/checksum/count/size"
+ // Node went from initializing to up and invalid bucket went to empty.
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("2:0/0/0/0/t|"),
+ mergeBucketLists(
+ "0:2/0/0/1",
+ "0:2/0/0/0",
+ true));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("5:1/2/3/4/u,0/0/0/0/u|"),
+ mergeBucketLists("", "0:5/0/0/0|1:5/2/3/4", true));
+}
+
+void
+BucketDBUpdaterTest::testPendingClusterStateMergeReplicaChanged()
+{
+ // Node went from initializing to up and non-invalid bucket changed.
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("2:0/2/3/4/t|3:0/2/4/6/t|"),
+ mergeBucketLists(
+ lib::ClusterState("distributor:1 storage:1 .0.s:i"),
+ "0:2/1/2/3,3/2/4/6",
+ lib::ClusterState("distributor:1 storage:1"),
+ "0:2/2/3/4,3/2/4/6",
+ true));
+}
+
+void
+BucketDBUpdaterTest::testNoDbResurrectionForBucketNotOwnedInCurrentState()
+{
+ document::BucketId bucket(16, 3);
+ lib::ClusterState stateBefore("distributor:1 storage:1");
+ {
+ uint32_t expectedMsgs = 1, dummyBucketsToReturn = 1;
+ setAndEnableClusterState(stateBefore, expectedMsgs, dummyBucketsToReturn);
+ }
+ _sender.clear();
+
+ getBucketDBUpdater().recheckBucketInfo(0, bucket);
+
+ CPPUNIT_ASSERT_EQUAL(size_t(1), _sender.commands.size());
+ std::shared_ptr<api::RequestBucketInfoCommand> rbi(
+ std::dynamic_pointer_cast<RequestBucketInfoCommand>(
+ _sender.commands[0]));
+
+ lib::ClusterState stateAfter("distributor:3 storage:3");
+
+ {
+ uint32_t expectedMsgs = 2, dummyBucketsToReturn = 1;
+ setAndEnableClusterState(stateAfter, expectedMsgs, dummyBucketsToReturn);
+ }
+ CPPUNIT_ASSERT(!getBucketDBUpdater().getDistributorComponent()
+ .ownsBucketInCurrentState(bucket));
+
+ sendFakeReplyForSingleBucketRequest(*rbi);
+
+ CPPUNIT_ASSERT_EQUAL(std::string("NONEXISTING"), dumpBucket(bucket));
+}
+
+void
+BucketDBUpdaterTest::testNoDbResurrectionForBucketNotOwnedInPendingState()
+{
+ document::BucketId bucket(16, 3);
+ lib::ClusterState stateBefore("distributor:1 storage:1");
+ {
+ uint32_t expectedMsgs = 1, dummyBucketsToReturn = 1;
+ setAndEnableClusterState(stateBefore, expectedMsgs, dummyBucketsToReturn);
+ }
+ _sender.clear();
+
+ getBucketDBUpdater().recheckBucketInfo(0, bucket);
+
+ CPPUNIT_ASSERT_EQUAL(size_t(1), _sender.commands.size());
+ std::shared_ptr<api::RequestBucketInfoCommand> rbi(
+ std::dynamic_pointer_cast<RequestBucketInfoCommand>(
+ _sender.commands[0]));
+
+ lib::ClusterState stateAfter("distributor:3 storage:3");
+ // Set, but _don't_ enable cluster state. We want it to be pending.
+ setSystemState(stateAfter);
+ CPPUNIT_ASSERT(getBucketDBUpdater().getDistributorComponent()
+ .ownsBucketInCurrentState(bucket));
+ CPPUNIT_ASSERT(!getBucketDBUpdater()
+ .checkOwnershipInPendingState(bucket).isOwned());
+
+ sendFakeReplyForSingleBucketRequest(*rbi);
+
+ CPPUNIT_ASSERT_EQUAL(std::string("NONEXISTING"), dumpBucket(bucket));
+}
+
+/*
+ * If we get a distribution config change, it's important that cluster states that
+ * arrive after this--but _before_ the pending cluster state has finished--must trigger
+ * a full bucket info fetch no matter what the cluster state change was! Otherwise, we
+ * will with a high likelihood end up not getting the complete view of the buckets in
+ * the cluster.
+ */
+void
+BucketDBUpdaterTest::testClusterStateAlwaysSendsFullFetchWhenDistributionChangePending()
+{
+ lib::ClusterState stateBefore("distributor:6 storage:6");
+ {
+ uint32_t expectedMsgs = 6, dummyBucketsToReturn = 1;
+ setAndEnableClusterState(stateBefore, expectedMsgs, dummyBucketsToReturn);
+ }
+ _sender.clear();
+ std::string distConfig(getDistConfig6Nodes3Groups());
+ {
+ _node->getComponentRegister().setDistribution(
+ std::make_shared<lib::Distribution>(distConfig));
+ _distributor->storageDistributionChanged();
+ _distributor->enableNextDistribution();
+ }
+ sortSentMessagesByIndex(_sender);
+ CPPUNIT_ASSERT_EQUAL(size_t(6), _sender.commands.size());
+ // Suddenly, a wild cluster state change appears! Even though this state
+ // does not in itself imply any bucket changes, it will still overwrite the
+ // pending cluster state and thus its state of pending bucket info requests.
+ setSystemState(lib::ClusterState("distributor:6 .2.t:12345 storage:6"));
+
+ CPPUNIT_ASSERT_EQUAL(size_t(12), _sender.commands.size());
+
+ // Send replies for first 6 (outdated requests).
+ int numBuckets = 10;
+ for (uint32_t i = 0; i < 6; ++i) {
+ fakeBucketReply(
+ lib::ClusterState("distributor:6 storage:6"),
+ dynamic_cast<RequestBucketInfoCommand&>(*_sender.commands[i]),
+ i, numBuckets);
+ }
+ // No change from these.
+ assertCorrectBuckets(1, "distributor:6 storage:6");
+
+ // Send for current pending.
+ for (uint32_t i = 0; i < 6; ++i) {
+ fakeBucketReply(
+ lib::ClusterState("distributor:6 .2.t:12345 storage:6"),
+ dynamic_cast<RequestBucketInfoCommand&>(*_sender.commands[i + 6]),
+ i, numBuckets);
+ }
+ assertCorrectBuckets(numBuckets, "distributor:6 storage:6");
+ _sender.clear();
+
+ // No more pending global fetch; this should be a no-op state.
+ setSystemState(lib::ClusterState("distributor:6 .3.t:12345 storage:6"));
+ CPPUNIT_ASSERT_EQUAL(size_t(0), _sender.commands.size());
+}
+
+void
+BucketDBUpdaterTest::testChangedDistributionConfigTriggersRecoveryMode()
+{
+ setAndEnableClusterState(lib::ClusterState("distributor:6 storage:6"), 6, 20);
+ _sender.clear();
+ // First cluster state; implicit scan of all buckets which does not
+ // use normal recovery mode ticking-path.
+ CPPUNIT_ASSERT(!_distributor->isInRecoveryMode());
+
+ std::string distConfig(getDistConfig6Nodes4Groups());
+ _node->getComponentRegister().setDistribution(
+ std::make_shared<lib::Distribution>(distConfig));
+ _distributor->storageDistributionChanged();
+ _distributor->enableNextDistribution();
+ sortSentMessagesByIndex(_sender);
+ // No replies received yet, still no recovery mode.
+ CPPUNIT_ASSERT(!_distributor->isInRecoveryMode());
+
+ CPPUNIT_ASSERT_EQUAL(size_t(6), _sender.commands.size());
+ uint32_t numBuckets = 10;
+ for (uint32_t i = 0; i < 6; ++i) {
+ fakeBucketReply(
+ lib::ClusterState("distributor:6 storage:6"),
+ dynamic_cast<RequestBucketInfoCommand&>(*_sender.commands[i]),
+ i, numBuckets);
+ }
+
+ // Pending cluster state (i.e. distribution) has been enabled, which should
+ // cause recovery mode to be entered.
+ CPPUNIT_ASSERT(_distributor->isInRecoveryMode());
+}
+
+void
+BucketDBUpdaterTest::testNewlyAddedBucketsHaveCurrentTimeAsGcTimestamp()
+{
+ getClock().setAbsoluteTimeInSeconds(101234);
+ lib::ClusterState stateBefore("distributor:1 storage:1");
+ {
+ uint32_t expectedMsgs = 1, dummyBucketsToReturn = 1;
+ setAndEnableClusterState(stateBefore, expectedMsgs, dummyBucketsToReturn);
+ }
+
+ // setAndEnableClusterState adds n buckets with id (16, i)
+ document::BucketId bucket(16, 0);
+ BucketDatabase::Entry e(getBucket(bucket));
+ CPPUNIT_ASSERT(e.valid());
+ CPPUNIT_ASSERT_EQUAL(uint32_t(101234), e->getLastGarbageCollectionTime());
+}
+
+void
+BucketDBUpdaterTest::testNewerMutationsNotOverwrittenByEarlierBucketFetch()
+{
+ {
+ lib::ClusterState stateBefore("distributor:1 storage:1 .0.s:i");
+ uint32_t expectedMsgs = 1, dummyBucketsToReturn = 0;
+ // This step is required to make the distributor ready for accepting
+ // the below explicit database insertion towards node 0.
+ setAndEnableClusterState(stateBefore, expectedMsgs,
+ dummyBucketsToReturn);
+ }
+ _sender.clear();
+ getClock().setAbsoluteTimeInSeconds(1000);
+ lib::ClusterState state("distributor:1 storage:1");
+ setSystemState(state);
+ CPPUNIT_ASSERT_EQUAL(size_t(1), _sender.commands.size());
+
+ // Before replying with the bucket info, simulate the arrival of a mutation
+ // reply that alters the state of the bucket with information that will be
+ // more recent that what is returned by the bucket info. This information
+ // must not be lost when the bucket info is later merged into the database.
+ document::BucketId bucket(16, 1);
+ constexpr uint64_t insertionTimestamp = 1001ULL * 1000000;
+ api::BucketInfo wantedInfo(5, 6, 7);
+ getBucketDBUpdater().getDistributorComponent().updateBucketDatabase(
+ bucket,
+ BucketCopy(insertionTimestamp, 0, wantedInfo),
+ DatabaseUpdate::CREATE_IF_NONEXISTING);
+
+ getClock().setAbsoluteTimeInSeconds(1002);
+ constexpr uint32_t bucketsReturned = 10; // Buckets (16, 0) ... (16, 9)
+ // Return bucket information which on the timeline might originate from
+ // anywhere between [1000, 1002]. Our assumption is that any mutations
+ // taking place after t=1000 must have its reply received and processed
+ // by this distributor and timestamped strictly higher than t=1000 (modulo
+ // clock skew, of course, but that is outside the scope of this). A mutation
+ // happening before t=1000 but receiving a reply at t>1000 does not affect
+ // correctness, as this should contain the same bucket info as that
+ // contained in the full bucket reply and the DB update is thus idempotent.
+ fakeBucketReply(
+ state,
+ dynamic_cast<RequestBucketInfoCommand&>(*_sender.commands[0]),
+ 0,
+ bucketsReturned);
+
+ BucketDatabase::Entry e(getBucket(bucket));
+ CPPUNIT_ASSERT_EQUAL(uint32_t(1), e->getNodeCount());
+ CPPUNIT_ASSERT_EQUAL(wantedInfo, e->getNodeRef(0).getBucketInfo());
+
+}
+
+std::vector<uint16_t>
+BucketDBUpdaterTest::getSendSet() const
+{
+ std::vector<uint16_t> nodes;
+ std::transform(_sender.commands.begin(),
+ _sender.commands.end(),
+ std::back_inserter(nodes),
+ [](auto& cmd)
+ {
+ auto& req(dynamic_cast<const api::RequestBucketInfoCommand&>(*cmd));
+ return req.getAddress()->getIndex();
+ });
+ return nodes;
+}
+
+std::vector<uint16_t>
+BucketDBUpdaterTest::getSentNodesWithPreemption(
+ const std::string& oldClusterState,
+ uint32_t expectedOldStateMessages,
+ const std::string& preemptedClusterState,
+ const std::string& newClusterState)
+{
+ lib::ClusterState stateBefore(oldClusterState);
+ uint32_t dummyBucketsToReturn = 10;
+ setAndEnableClusterState(lib::ClusterState(oldClusterState),
+ expectedOldStateMessages,
+ dummyBucketsToReturn);
+ _sender.clear();
+
+ setSystemState(lib::ClusterState(preemptedClusterState));
+ _sender.clear();
+ // Do not allow the pending state to become the active state; trigger a
+ // new transition without ACKing the info requests first. This will
+ // overwrite the pending state entirely.
+ setSystemState(lib::ClusterState(newClusterState));
+ return getSendSet();
+}
+
+using nodeVec = std::vector<uint16_t>;
+
+/*
+ * If we don't carry over the set of nodes that we need to fetch from,
+ * a naive comparison between the active state and the new state will
+ * make it appear to the distributor that nothing has changed, as any
+ * database modifications caused by intermediate states will not be
+ * accounted for (basically the ABA problem in a distributed setting).
+ */
+void
+BucketDBUpdaterTest::preemptedDistrChangeCarriesNodeSetOverToNextStateFetch()
+{
+ CPPUNIT_ASSERT_EQUAL(
+ (nodeVec{0, 1, 2, 3, 4, 5}),
+ getSentNodesWithPreemption("version:1 distributor:6 storage:6", 6,
+ "version:2 distributor:6 .5.s:d storage:6",
+ "version:3 distributor:6 storage:6"));
+}
+
+void
+BucketDBUpdaterTest::preemptedStorChangeCarriesNodeSetOverToNextStateFetch()
+{
+ CPPUNIT_ASSERT_EQUAL(
+ (nodeVec{2, 3}),
+ getSentNodesWithPreemption(
+ "version:1 distributor:6 storage:6 .2.s:d", 5,
+ "version:2 distributor:6 storage:6 .2.s:d .3.s:d",
+ "version:3 distributor:6 storage:6"));
+}
+
+void
+BucketDBUpdaterTest::preemptedStorageNodeDownMustBeReFetched()
+{
+ CPPUNIT_ASSERT_EQUAL(
+ (nodeVec{2}),
+ getSentNodesWithPreemption(
+ "version:1 distributor:6 storage:6", 6,
+ "version:2 distributor:6 storage:6 .2.s:d",
+ "version:3 distributor:6 storage:6"));
+}
+
+void
+BucketDBUpdaterTest::doNotSendToPreemptedNodeNowInDownState()
+{
+ CPPUNIT_ASSERT_EQUAL(
+ nodeVec{},
+ getSentNodesWithPreemption(
+ "version:1 distributor:6 storage:6 .2.s:d", 5,
+ "version:2 distributor:6 storage:6", // Sends to 2.
+ "version:3 distributor:6 storage:6 .2.s:d")); // 2 down again.
+}
+
+void
+BucketDBUpdaterTest::doNotSendToPreemptedNodeNotPartOfNewState()
+{
+ // Even though 100 nodes are preempted, not all of these should be part
+ // of the request afterwards when only 6 are part of the state.
+ CPPUNIT_ASSERT_EQUAL(
+ (nodeVec{0, 1, 2, 3, 4, 5}),
+ getSentNodesWithPreemption(
+ "version:1 distributor:6 storage:100", 100,
+ "version:2 distributor:5 .4.s:d storage:100",
+ "version:3 distributor:6 storage:6"));
+}
+
+void
+BucketDBUpdaterTest::outdatedNodeSetClearedAfterSuccessfulStateCompletion()
+{
+ lib::ClusterState stateBefore(
+ "version:1 distributor:6 storage:6 .1.t:1234");
+ uint32_t expectedMsgs = 6, dummyBucketsToReturn = 10;
+ setAndEnableClusterState(stateBefore, expectedMsgs, dummyBucketsToReturn);
+ _sender.clear();
+ // New cluster state that should not by itself trigger any new fetches,
+ // unless outdated node set is somehow not cleared after an enabled
+ // (completed) cluster state has been set.
+ lib::ClusterState stateAfter("version:3 distributor:6 storage:6");
+ setSystemState(stateAfter);
+ CPPUNIT_ASSERT_EQUAL(size_t(0), _sender.commands.size());
+}
+
+// XXX test currently disabled since distribution config currently isn't used
+// at all in order to deduce the set of nodes to send to. This might not matter
+// in practice since it is assumed that the cluster state matching the new
+// distribution config will follow very shortly after the config has been
+// applied to the node. The new cluster state will then send out requests to
+// the correct node set.
+void
+BucketDBUpdaterTest::clusterConfigDownsizeOnlySendsToAvailableNodes()
+{
+ uint32_t expectedMsgs = 6, dummyBucketsToReturn = 20;
+ setAndEnableClusterState(lib::ClusterState("distributor:6 storage:6"),
+ expectedMsgs, dummyBucketsToReturn);
+ _sender.clear();
+
+ // Intentionally trigger a racing config change which arrives before the
+ // new cluster state representing it.
+ std::string distConfig(getDistConfig3Nodes1Group());
+ _node->getComponentRegister().setDistribution(
+ std::make_shared<lib::Distribution>(distConfig));
+ _distributor->storageDistributionChanged();
+ _distributor->enableNextDistribution();
+ sortSentMessagesByIndex(_sender);
+
+ CPPUNIT_ASSERT_EQUAL((nodeVec{0, 1, 2}), getSendSet());
+}
+
+void
+BucketDBUpdaterTest::changedDiskSetTriggersReFetch()
+{
+ // Same number of online disks, but the set of disks has changed.
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("1"),
+ getSentNodes("distributor:2 storage:2 .1.d:3 .1.d.2.s:d",
+ "distributor:2 storage:2 .1.d:3 .1.d.1.s:d"));
+}
+
+/**
+ * Test scenario where a cluster is downsized by removing a subset of the nodes
+ * from the distribution configuration. The system must be able to deal with
+ * a scenario where the set of nodes between two cluster states across a config
+ * change may differ.
+ *
+ * See VESPA-790 for details.
+ */
+void
+BucketDBUpdaterTest::nodeMissingFromConfigIsTreatedAsNeedingOwnershipTransfer()
+{
+ uint32_t expectedMsgs = 3, dummyBucketsToReturn = 1;
+ setAndEnableClusterState(lib::ClusterState("distributor:3 storage:3"),
+ expectedMsgs, dummyBucketsToReturn);
+ _sender.clear();
+
+ // Cluster goes from {0, 1, 2} -> {0, 1}. This leaves us with a config
+ // that does not contain node 2 while the _active_ cluster state still
+ // contains this node.
+ const char* downsizeCfg =
+ "redundancy 2\n"
+ "distributor_auto_ownership_transfer_on_whole_group_down true\n"
+ "group[2]\n"
+ "group[0].name \"invalid\"\n"
+ "group[0].index \"invalid\"\n"
+ "group[0].partitions 1|*\n"
+ "group[0].nodes[0]\n"
+ "group[1].name rack0\n"
+ "group[1].index 0\n"
+ "group[1].nodes[2]\n"
+ "group[1].nodes[0].index 0\n"
+ "group[1].nodes[1].index 1\n";
+
+ _node->getComponentRegister().setDistribution(
+ std::make_shared<lib::Distribution>(downsizeCfg));
+ _distributor->storageDistributionChanged();
+ _distributor->enableNextDistribution();
+ sortSentMessagesByIndex(_sender);
+ _sender.clear();
+
+ // Attempt to apply state with {0, 1} set. This will compare the new state
+ // with the previous state, which still has node 2.
+ expectedMsgs = 2;
+ setAndEnableClusterState(lib::ClusterState("distributor:2 storage:2"),
+ expectedMsgs, dummyBucketsToReturn);
+
+ CPPUNIT_ASSERT_EQUAL((nodeVec{0, 1}), getSendSet());
+}
+
+} // distributor
+} // storage
diff --git a/storage/src/tests/distributor/bucketgctimecalculatortest.cpp b/storage/src/tests/distributor/bucketgctimecalculatortest.cpp
new file mode 100644
index 00000000000..39bef3ec395
--- /dev/null
+++ b/storage/src/tests/distributor/bucketgctimecalculatortest.cpp
@@ -0,0 +1,114 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <chrono>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <vespa/storage/distributor/bucketgctimecalculator.h>
+
+namespace storage {
+namespace distributor {
+
+struct MockBucketIdHasher : public BucketGcTimeCalculator::BucketIdHasher
+{
+ size_t nextGeneratedHash {0};
+
+ size_t doHash(const document::BucketId&) const override {
+ return nextGeneratedHash;
+ }
+};
+
+struct BucketGcTimeCalculatorTest : public CppUnit::TestFixture
+{
+ void noGcIfAlreadyCheckedAfterStartPoint();
+ void gcIfNotRunInCurrentPeriodAndCheckPeriodPassed();
+ void noGcIfNotRunInCurrentPeriodAndCheckPeriodNotPassed();
+ void noGcIfCheckIntervalIsZero();
+ void identityHasherReturnsBucketId();
+
+ BucketGcTimeCalculatorTest();
+
+ CPPUNIT_TEST_SUITE(BucketGcTimeCalculatorTest);
+ CPPUNIT_TEST(noGcIfAlreadyCheckedAfterStartPoint);
+ CPPUNIT_TEST(gcIfNotRunInCurrentPeriodAndCheckPeriodPassed);
+ CPPUNIT_TEST(noGcIfNotRunInCurrentPeriodAndCheckPeriodNotPassed);
+ CPPUNIT_TEST(noGcIfCheckIntervalIsZero);
+ CPPUNIT_TEST(identityHasherReturnsBucketId);
+ CPPUNIT_TEST_SUITE_END();
+
+private:
+ // Ease of reading aliases
+ using CurrentTime = std::chrono::seconds;
+ using LastRunAt = std::chrono::seconds;
+
+ MockBucketIdHasher hasher;
+ std::chrono::seconds checkInterval;
+ BucketGcTimeCalculator calc;
+ document::BucketId b;
+};
+
+BucketGcTimeCalculatorTest::BucketGcTimeCalculatorTest()
+ : checkInterval(1000),
+ calc(hasher, checkInterval),
+ b(16, 1)
+{
+ hasher.nextGeneratedHash = 500;
+}
+
+CPPUNIT_TEST_SUITE_REGISTRATION(BucketGcTimeCalculatorTest);
+
+void
+BucketGcTimeCalculatorTest::noGcIfAlreadyCheckedAfterStartPoint()
+{
+ // Note: LastRun(0) is considered to be within the current period.
+ CPPUNIT_ASSERT(!calc.shouldGc(b, CurrentTime(0), LastRunAt(0)));
+ CPPUNIT_ASSERT(!calc.shouldGc(b, CurrentTime(499), LastRunAt(0)));
+ CPPUNIT_ASSERT(!calc.shouldGc(b, CurrentTime(999), LastRunAt(500)));
+
+ CPPUNIT_ASSERT(!calc.shouldGc(b, CurrentTime(1000), LastRunAt(1000)));
+ CPPUNIT_ASSERT(!calc.shouldGc(b, CurrentTime(1234), LastRunAt(1100)));
+ CPPUNIT_ASSERT(!calc.shouldGc(b, CurrentTime(1600), LastRunAt(1500)));
+}
+
+void
+BucketGcTimeCalculatorTest::gcIfNotRunInCurrentPeriodAndCheckPeriodPassed()
+{
+ CPPUNIT_ASSERT(calc.shouldGc(b, CurrentTime(500), LastRunAt(0)));
+ CPPUNIT_ASSERT(calc.shouldGc(b, CurrentTime(1600), LastRunAt(500)));
+ // Note: this may look wrong, but is correct since GC should have been
+ // scheduled _after_ 1499 so this is most likely the case where a bucket
+ // has been added to the database at this point in time. Not treating
+ // this as a valid GC scenario would mean newly added buckets would have to
+ // wait until the next period to be considered. If the period is long and
+ // the system is unstable (causing many bucket handoffs), we'd risk not
+ // being able to scheduled many buckets at all.
+ CPPUNIT_ASSERT(calc.shouldGc(b, CurrentTime(1600), LastRunAt(1499)));
+
+ CPPUNIT_ASSERT(calc.shouldGc(b, CurrentTime(2000), LastRunAt(500)));
+ CPPUNIT_ASSERT(calc.shouldGc(b, CurrentTime(2600), LastRunAt(1500)));
+}
+
+void
+BucketGcTimeCalculatorTest::noGcIfNotRunInCurrentPeriodAndCheckPeriodNotPassed()
+{
+ CPPUNIT_ASSERT(!calc.shouldGc(b, CurrentTime(1000), LastRunAt(500)));
+}
+
+void
+BucketGcTimeCalculatorTest::noGcIfCheckIntervalIsZero()
+{
+ BucketGcTimeCalculator calc2(hasher, std::chrono::seconds(0));
+ CPPUNIT_ASSERT(!calc2.shouldGc(b, CurrentTime(5000), LastRunAt(0)));
+}
+
+void
+BucketGcTimeCalculatorTest::identityHasherReturnsBucketId()
+{
+ BucketGcTimeCalculator::BucketIdIdentityHasher hasher2;
+ document::BucketId bucket(36, 1234);
+
+ CPPUNIT_ASSERT_EQUAL(bucket.getId(), hasher2.hash(bucket));
+}
+
+} // distributor
+} // storage
+
diff --git a/storage/src/tests/distributor/bucketstateoperationtest.cpp b/storage/src/tests/distributor/bucketstateoperationtest.cpp
new file mode 100644
index 00000000000..1477f1d6ed0
--- /dev/null
+++ b/storage/src/tests/distributor/bucketstateoperationtest.cpp
@@ -0,0 +1,251 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <cppunit/extensions/HelperMacros.h>
+#include <tests/distributor/distributortestutil.h>
+#include <vespa/storage/distributor/operations/idealstate/setbucketstateoperation.h>
+#include <vespa/storageapi/messageapi/storagemessage.h>
+
+namespace storage {
+
+namespace distributor {
+
+class BucketStateOperationTest : public CppUnit::TestFixture,
+ public DistributorTestUtil
+{
+ CPPUNIT_TEST_SUITE(BucketStateOperationTest);
+ CPPUNIT_TEST(testActiveStateSupportedInBucketDb);
+ CPPUNIT_TEST(testActivateSingleNode);
+ CPPUNIT_TEST(testActivateAndDeactivateNodes);
+ CPPUNIT_TEST(testDoNotDeactivateIfActivateFails);
+ CPPUNIT_TEST(testBucketDbNotUpdatedOnFailure);
+ CPPUNIT_TEST_SUITE_END();
+
+private:
+ void testActiveStateSupportedInBucketDb();
+ void testActivateSingleNode();
+ void testActivateAndDeactivateNodes();
+ void testDoNotDeactivateIfActivateFails();
+ void testBucketDbNotUpdatedOnFailure();
+
+public:
+ void setUp()
+ {
+ createLinks();
+ }
+
+ void tearDown()
+ {
+ close();
+ }
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(BucketStateOperationTest);
+
+void
+BucketStateOperationTest::testActiveStateSupportedInBucketDb()
+{
+ document::BucketId bid(16, 1);
+ insertBucketInfo(bid, 0, 0xabc, 10, 1100, true, true);
+
+ BucketDatabase::Entry entry = getBucket(bid);
+ CPPUNIT_ASSERT(entry.valid());
+ CPPUNIT_ASSERT(entry->getNode(0)->active());
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("node(idx=0,crc=0xabc,docs=10/10,bytes=1100/1100,"
+ "trusted=true,active=true)"),
+ entry->getNode(0)->toString());
+}
+
+void
+BucketStateOperationTest::testActivateSingleNode()
+{
+ document::BucketId bid(16, 1);
+ insertBucketInfo(bid, 0, 0xabc, 10, 1100, true, false);
+
+ BucketAndNodes bucketAndNodes(bid, toVector<uint16_t>(0));
+ std::vector<uint16_t> active;
+ active.push_back(0);
+ SetBucketStateOperation op("storage", bucketAndNodes, active);
+
+ op.setIdealStateManager(&getIdealStateManager());
+ op.start(_sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL((size_t)1, _sender.commands.size());
+
+ std::shared_ptr<api::StorageCommand> msg = _sender.commands[0];
+ CPPUNIT_ASSERT(msg->getType() == api::MessageType::SETBUCKETSTATE);
+ CPPUNIT_ASSERT_EQUAL(
+ api::StorageMessageAddress(
+ "storage", lib::NodeType::STORAGE, 0).toString(),
+ msg->getAddress()->toString());
+
+ const api::SetBucketStateCommand& cmd(
+ dynamic_cast<const api::SetBucketStateCommand&>(*msg));
+ CPPUNIT_ASSERT_EQUAL(bid, cmd.getBucketId());
+ CPPUNIT_ASSERT_EQUAL(api::SetBucketStateCommand::ACTIVE, cmd.getState());
+
+ std::shared_ptr<api::StorageReply> reply(msg->makeReply().release());
+ op.receive(_sender, reply);
+
+ BucketDatabase::Entry entry = getBucket(bid);
+ CPPUNIT_ASSERT(entry.valid());
+ CPPUNIT_ASSERT(entry->getNodeRef(0).active());
+
+ CPPUNIT_ASSERT(op.ok());
+
+ // TODO: check that it's done
+}
+
+void
+BucketStateOperationTest::testActivateAndDeactivateNodes()
+{
+ document::BucketId bid(16, 1);
+ insertBucketInfo(bid, 0, 0xabc, 10, 1100, false, true);
+ insertBucketInfo(bid, 1, 0xdef, 15, 1500, false, false);
+
+ BucketAndNodes bucketAndNodes(bid, toVector<uint16_t>(0, 1));
+ std::vector<uint16_t> active;
+ active.push_back(1);
+ SetBucketStateOperation op("storage", bucketAndNodes, active);
+
+ op.setIdealStateManager(&getIdealStateManager());
+ op.start(_sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL((size_t)1, _sender.commands.size());
+ {
+ std::shared_ptr<api::StorageCommand> msg = _sender.commands[0];
+ CPPUNIT_ASSERT(msg->getType() == api::MessageType::SETBUCKETSTATE);
+ CPPUNIT_ASSERT_EQUAL(
+ api::StorageMessageAddress(
+ "storage", lib::NodeType::STORAGE, 1).toString(),
+ msg->getAddress()->toString());
+
+ const api::SetBucketStateCommand& cmd(
+ dynamic_cast<const api::SetBucketStateCommand&>(*msg));
+ CPPUNIT_ASSERT_EQUAL(bid, cmd.getBucketId());
+ CPPUNIT_ASSERT_EQUAL(api::SetBucketStateCommand::ACTIVE, cmd.getState());
+
+ std::shared_ptr<api::StorageReply> reply(msg->makeReply().release());
+ op.receive(_sender, reply);
+ }
+
+ CPPUNIT_ASSERT_EQUAL((size_t)2, _sender.commands.size());
+ {
+ std::shared_ptr<api::StorageCommand> msg = _sender.commands[1];
+ CPPUNIT_ASSERT(msg->getType() == api::MessageType::SETBUCKETSTATE);
+ CPPUNIT_ASSERT_EQUAL(
+ api::StorageMessageAddress(
+ "storage", lib::NodeType::STORAGE, 0).toString(),
+ msg->getAddress()->toString());
+
+ const api::SetBucketStateCommand& cmd(
+ dynamic_cast<const api::SetBucketStateCommand&>(*msg));
+ CPPUNIT_ASSERT_EQUAL(bid, cmd.getBucketId());
+ CPPUNIT_ASSERT_EQUAL(api::SetBucketStateCommand::INACTIVE, cmd.getState());
+
+ std::shared_ptr<api::StorageReply> reply(msg->makeReply().release());
+ op.receive(_sender, reply);
+ }
+
+ BucketDatabase::Entry entry = getBucket(bid);
+ CPPUNIT_ASSERT(entry.valid());
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("node(idx=0,crc=0xabc,docs=10/10,bytes=1100/1100,"
+ "trusted=true,active=false)"),
+ entry->getNodeRef(0).toString());
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("node(idx=1,crc=0xdef,docs=15/15,bytes=1500/1500,"
+ "trusted=false,active=true)"),
+ entry->getNodeRef(1).toString());
+
+ CPPUNIT_ASSERT(op.ok());
+}
+
+void
+BucketStateOperationTest::testDoNotDeactivateIfActivateFails()
+{
+ document::BucketId bid(16, 1);
+ insertBucketInfo(bid, 0, 0xabc, 10, 1100, false, true);
+ insertBucketInfo(bid, 1, 0xdef, 15, 1500, false, false);
+
+ BucketAndNodes bucketAndNodes(bid, toVector<uint16_t>(0, 1));
+ std::vector<uint16_t> active;
+ active.push_back(1);
+ SetBucketStateOperation op("storage", bucketAndNodes, active);
+
+ op.setIdealStateManager(&getIdealStateManager());
+ op.start(_sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL((size_t)1, _sender.commands.size());
+ {
+ std::shared_ptr<api::StorageCommand> msg = _sender.commands[0];
+ CPPUNIT_ASSERT(msg->getType() == api::MessageType::SETBUCKETSTATE);
+ CPPUNIT_ASSERT_EQUAL(
+ api::StorageMessageAddress(
+ "storage", lib::NodeType::STORAGE, 1).toString(),
+ msg->getAddress()->toString());
+
+ const api::SetBucketStateCommand& cmd(
+ dynamic_cast<const api::SetBucketStateCommand&>(*msg));
+ CPPUNIT_ASSERT_EQUAL(bid, cmd.getBucketId());
+ CPPUNIT_ASSERT_EQUAL(api::SetBucketStateCommand::ACTIVE, cmd.getState());
+
+ std::shared_ptr<api::StorageReply> reply(msg->makeReply().release());
+ reply->setResult(api::ReturnCode(api::ReturnCode::ABORTED, "aaarg!"));
+ op.receive(_sender, reply);
+ }
+
+ CPPUNIT_ASSERT_EQUAL((size_t)1, _sender.commands.size());
+
+ BucketDatabase::Entry entry = getBucket(bid);
+ CPPUNIT_ASSERT(entry.valid());
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("node(idx=0,crc=0xabc,docs=10/10,bytes=1100/1100,"
+ "trusted=true,active=true)"),
+ entry->getNodeRef(0).toString());
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("node(idx=1,crc=0xdef,docs=15/15,bytes=1500/1500,"
+ "trusted=false,active=false)"),
+ entry->getNodeRef(1).toString());
+
+ CPPUNIT_ASSERT(!op.ok());
+}
+
+void
+BucketStateOperationTest::testBucketDbNotUpdatedOnFailure()
+{
+ document::BucketId bid(16, 1);
+ insertBucketInfo(bid, 0, 0xabc, 10, 1100, true, false);
+
+ BucketAndNodes bucketAndNodes(bid, toVector<uint16_t>(0));
+ std::vector<uint16_t> active;
+ active.push_back(0);
+ SetBucketStateOperation op("storage", bucketAndNodes, active);
+
+ op.setIdealStateManager(&getIdealStateManager());
+ op.start(_sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL((size_t)1, _sender.commands.size());
+
+ std::shared_ptr<api::StorageCommand> msg = _sender.commands[0];
+ CPPUNIT_ASSERT(msg->getType() == api::MessageType::SETBUCKETSTATE);
+ CPPUNIT_ASSERT_EQUAL(
+ api::StorageMessageAddress(
+ "storage", lib::NodeType::STORAGE, 0).toString(),
+ msg->getAddress()->toString());
+
+ std::shared_ptr<api::StorageReply> reply(msg->makeReply().release());
+ reply->setResult(api::ReturnCode(api::ReturnCode::ABORTED, "aaarg!"));
+ op.receive(_sender, reply);
+
+ BucketDatabase::Entry entry = getBucket(bid);
+ CPPUNIT_ASSERT(entry.valid());
+ // Should not be updated
+ CPPUNIT_ASSERT(!entry->getNodeRef(0).active());
+
+ CPPUNIT_ASSERT(!op.ok());
+}
+
+} // namespace distributor
+
+} // namespace storage
diff --git a/storage/src/tests/distributor/distributor_host_info_reporter_test.cpp b/storage/src/tests/distributor/distributor_host_info_reporter_test.cpp
new file mode 100644
index 00000000000..65ccc65bdcf
--- /dev/null
+++ b/storage/src/tests/distributor/distributor_host_info_reporter_test.cpp
@@ -0,0 +1,225 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <vespa/storage/distributor/distributor_host_info_reporter.h>
+#include <vespa/storage/distributor/latency_statistics_provider.h>
+#include <vespa/storage/distributor/min_replica_provider.h>
+#include <vespa/vespalib/data/slime/slime.h>
+#include <vespa/vespalib/io/fileutil.h>
+#include <vespa/vespalib/util/jsonstream.h>
+#include <tests/common/hostreporter/util.h>
+
+namespace storage {
+namespace distributor {
+
+using End = vespalib::JsonStream::End;
+using File = vespalib::File;
+using Object = vespalib::JsonStream::Object;
+
+class DistributorHostInfoReporterTest : public CppUnit::TestFixture
+{
+ CPPUNIT_TEST_SUITE(DistributorHostInfoReporterTest);
+ CPPUNIT_TEST(hostInfoWithPutLatenciesOnly);
+ CPPUNIT_TEST(hostInfoAllInfo);
+ CPPUNIT_TEST(generateExampleJson);
+ CPPUNIT_TEST(noReportGeneratedIfDisabled);
+ CPPUNIT_TEST_SUITE_END();
+
+ void hostInfoWithPutLatenciesOnly();
+ void hostInfoAllInfo();
+ void verifyReportedNodeLatencies(
+ const vespalib::Slime& root,
+ uint16_t node,
+ int64_t latencySum,
+ int64_t count);
+ void generateExampleJson();
+ void noReportGeneratedIfDisabled();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(DistributorHostInfoReporterTest);
+
+using ms = std::chrono::milliseconds;
+
+namespace {
+
+OperationStats
+makeOpStats(std::chrono::milliseconds totalLatency, uint64_t numRequests)
+{
+ OperationStats stats;
+ stats.totalLatency = totalLatency;
+ stats.numRequests = numRequests;
+ return stats;
+}
+
+// My kingdom for GoogleMock!
+struct MockedLatencyStatisticsProvider : LatencyStatisticsProvider
+{
+ NodeStatsSnapshot returnedSnapshot;
+
+ NodeStatsSnapshot doGetLatencyStatistics() const {
+ return returnedSnapshot;
+ }
+};
+
+struct MockedMinReplicaProvider : MinReplicaProvider
+{
+ std::unordered_map<uint16_t, uint32_t> minReplica;
+
+ std::unordered_map<uint16_t, uint32_t> getMinReplica() const override {
+ return minReplica;
+ }
+};
+
+const vespalib::slime::Inspector&
+getNode(const vespalib::Slime& root, uint16_t nodeIndex)
+{
+ auto& storage_nodes = root.get()["distributor"]["storage-nodes"];
+ const size_t n = storage_nodes.entries();
+ for (size_t i = 0; i < n; ++i) {
+ if (storage_nodes[i]["node-index"].asLong() == nodeIndex) {
+ return storage_nodes[i];
+ }
+ }
+ throw std::runtime_error("No node found with index "
+ + std::to_string(nodeIndex));
+}
+
+int
+getMinReplica(const vespalib::Slime& root, uint16_t nodeIndex)
+{
+ return getNode(root, nodeIndex)["min-current-replication-factor"].asLong();
+}
+
+const vespalib::slime::Inspector&
+getLatenciesForNode(const vespalib::Slime& root, uint16_t nodeIndex)
+{
+ return getNode(root, nodeIndex)["ops-latency"];
+}
+
+} // anon ns
+
+void
+DistributorHostInfoReporterTest::verifyReportedNodeLatencies(
+ const vespalib::Slime& root,
+ uint16_t node,
+ int64_t latencySum,
+ int64_t count)
+{
+ auto& latencies = getLatenciesForNode(root, node);
+ CPPUNIT_ASSERT_EQUAL(latencySum,
+ latencies["put"]["latency-ms-sum"].asLong());
+ CPPUNIT_ASSERT_EQUAL(count, latencies["put"]["count"].asLong());
+}
+
+void
+DistributorHostInfoReporterTest::hostInfoWithPutLatenciesOnly()
+{
+ MockedLatencyStatisticsProvider latencyStatsProvider;
+ MockedMinReplicaProvider minReplicaProvider;
+ DistributorHostInfoReporter reporter(latencyStatsProvider,
+ minReplicaProvider);
+
+ NodeStatsSnapshot snapshot;
+ snapshot.nodeToStats[0] = { makeOpStats(ms(10000), 3) };
+ snapshot.nodeToStats[5] = { makeOpStats(ms(25000), 7) };
+
+ latencyStatsProvider.returnedSnapshot = snapshot;
+
+ vespalib::Slime root;
+ util::reporterToSlime(reporter, root);
+ verifyReportedNodeLatencies(root, 0, 10000, 3);
+ verifyReportedNodeLatencies(root, 5, 25000, 7);
+}
+
+void
+DistributorHostInfoReporterTest::hostInfoAllInfo()
+{
+ MockedLatencyStatisticsProvider latencyStatsProvider;
+ MockedMinReplicaProvider minReplicaProvider;
+ DistributorHostInfoReporter reporter(latencyStatsProvider,
+ minReplicaProvider);
+
+ NodeStatsSnapshot latencySnapshot;
+ latencySnapshot.nodeToStats[0] = { makeOpStats(ms(10000), 3) };
+ latencySnapshot.nodeToStats[5] = { makeOpStats(ms(25000), 7) };
+ latencyStatsProvider.returnedSnapshot = latencySnapshot;
+
+ std::unordered_map<uint16_t, uint32_t> minReplica;
+ minReplica[0] = 2;
+ minReplica[5] = 9;
+ minReplicaProvider.minReplica = minReplica;
+
+ vespalib::Slime root;
+ util::reporterToSlime(reporter, root);
+ verifyReportedNodeLatencies(root, 0, 10000, 3);
+ verifyReportedNodeLatencies(root, 5, 25000, 7);
+
+ CPPUNIT_ASSERT_EQUAL(2, getMinReplica(root, 0));
+ CPPUNIT_ASSERT_EQUAL(9, getMinReplica(root, 5));
+}
+
+void
+DistributorHostInfoReporterTest::generateExampleJson()
+{
+ MockedLatencyStatisticsProvider latencyStatsProvider;
+ MockedMinReplicaProvider minReplicaProvider;
+ DistributorHostInfoReporter reporter(latencyStatsProvider,
+ minReplicaProvider);
+
+ NodeStatsSnapshot snapshot;
+ snapshot.nodeToStats[0] = { makeOpStats(ms(10000), 3) };
+ snapshot.nodeToStats[5] = { makeOpStats(ms(25000), 7) };
+ latencyStatsProvider.returnedSnapshot = snapshot;
+
+ std::unordered_map<uint16_t, uint32_t> minReplica;
+ minReplica[0] = 2;
+ minReplica[5] = 9;
+ minReplicaProvider.minReplica = minReplica;
+
+ vespalib::asciistream json;
+ vespalib::JsonStream stream(json, true);
+
+ stream << Object();
+ reporter.report(stream);
+ stream << End();
+ stream.finalize();
+
+ std::string jsonString = json.str();
+
+ std::string path = "../../../protocols/getnodestate/distributor.json";
+ std::string goldenString = File::readAll(path);
+
+ vespalib::slime::Memory goldenMemory(goldenString);
+ vespalib::Slime goldenSlime;
+ vespalib::slime::JsonFormat::decode(goldenMemory, goldenSlime);
+
+ vespalib::slime::Memory jsonMemory(jsonString);
+ vespalib::Slime jsonSlime;
+ vespalib::slime::JsonFormat::decode(jsonMemory, jsonSlime);
+
+ CPPUNIT_ASSERT_EQUAL(goldenSlime, jsonSlime);
+}
+
+void
+DistributorHostInfoReporterTest::noReportGeneratedIfDisabled()
+{
+ MockedLatencyStatisticsProvider latencyStatsProvider;
+ MockedMinReplicaProvider minReplicaProvider;
+ DistributorHostInfoReporter reporter(latencyStatsProvider,
+ minReplicaProvider);
+ reporter.enableReporting(false);
+
+ NodeStatsSnapshot snapshot;
+ snapshot.nodeToStats[0] = { makeOpStats(ms(10000), 3) };
+ snapshot.nodeToStats[5] = { makeOpStats(ms(25000), 7) };
+
+ latencyStatsProvider.returnedSnapshot = snapshot;
+
+ vespalib::Slime root;
+ util::reporterToSlime(reporter, root);
+ CPPUNIT_ASSERT_EQUAL(size_t(0), root.get().children());
+}
+
+} // distributor
+} // storage
+
diff --git a/storage/src/tests/distributor/distributortest.cpp b/storage/src/tests/distributor/distributortest.cpp
new file mode 100644
index 00000000000..b51c8dd3873
--- /dev/null
+++ b/storage/src/tests/distributor/distributortest.cpp
@@ -0,0 +1,691 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <iomanip>
+#include <iostream>
+#include <memory>
+#include <boost/assign/std/vector.hpp> // for 'operator+=()'
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <vespa/storage/bucketdb/distrbucketdb.h>
+#include <vespa/storage/distributor/idealstatemetricsset.h>
+#include <vespa/storageapi/message/persistence.h>
+#include <vespa/storageapi/message/bucketsplitting.h>
+#include <vespa/storageapi/message/visitor.h>
+#include <vespa/storageapi/message/removelocation.h>
+#include <vespa/storageframework/defaultimplementation/thread/threadpoolimpl.h>
+#include <tests/distributor/distributortestutil.h>
+#include <vespa/storage/config/config-stor-distributormanager.h>
+#include <tests/common/dummystoragelink.h>
+
+namespace storage {
+
+namespace distributor {
+
+class Distributor_Test : public CppUnit::TestFixture,
+ public DistributorTestUtil
+{
+ CPPUNIT_TEST_SUITE(Distributor_Test);
+ CPPUNIT_TEST(testOperationGeneration);
+ CPPUNIT_TEST(testOperationsGeneratedAndStartedWithoutDuplicates);
+ CPPUNIT_TEST(testRecoveryModeOnClusterStateChange);
+ CPPUNIT_TEST(testOperationsAreThrottled);
+ CPPUNIT_TEST_IGNORED(testRecoveryModeEntryResetsScanner);
+ CPPUNIT_TEST_IGNORED(testReprioritizeBucketOnMaintenanceReply);
+ CPPUNIT_TEST(testHandleUnknownMaintenanceReply);
+ CPPUNIT_TEST(testContainsTimeStatement);
+ CPPUNIT_TEST(testUpdateBucketDatabase);
+ CPPUNIT_TEST(testTickProcessesStatusRequests);
+ CPPUNIT_TEST(testMetricUpdateHookUpdatesPendingMaintenanceMetrics);
+ CPPUNIT_TEST(testPriorityConfigIsPropagatedToDistributorConfiguration);
+ CPPUNIT_TEST(testNoDbResurrectionForBucketNotOwnedInPendingState);
+ CPPUNIT_TEST(testAddedDbBucketsWithoutGcTimestampImplicitlyGetCurrentTime);
+ CPPUNIT_TEST(mergeStatsAreAccumulatedDuringDatabaseIteration);
+ CPPUNIT_TEST(statsGeneratedForPreemptedOperations);
+ CPPUNIT_TEST(hostInfoReporterConfigIsPropagatedToReporter);
+ CPPUNIT_TEST(replicaCountingModeIsConfiguredToTrustedByDefault);
+ CPPUNIT_TEST(replicaCountingModeConfigIsPropagatedToMetricUpdater);
+ CPPUNIT_TEST(bucketActivationIsEnabledByDefault);
+ CPPUNIT_TEST(bucketActivationConfigIsPropagatedToDistributorConfiguration);
+ CPPUNIT_TEST_SUITE_END();
+
+protected:
+ void testOperationGeneration();
+ void testOperationsGeneratedAndStartedWithoutDuplicates();
+ void testRecoveryModeOnClusterStateChange();
+ void testOperationsAreThrottled();
+ void testRecoveryModeEntryResetsScanner();
+ void testReprioritizeBucketOnMaintenanceReply();
+ void testHandleUnknownMaintenanceReply();
+ void testContainsTimeStatement();
+ void testUpdateBucketDatabase();
+ void testTickProcessesStatusRequests();
+ void testMetricUpdateHookUpdatesPendingMaintenanceMetrics();
+ void testPriorityConfigIsPropagatedToDistributorConfiguration();
+ void testNoDbResurrectionForBucketNotOwnedInPendingState();
+ void testAddedDbBucketsWithoutGcTimestampImplicitlyGetCurrentTime();
+ void mergeStatsAreAccumulatedDuringDatabaseIteration();
+ void statsGeneratedForPreemptedOperations();
+ void hostInfoReporterConfigIsPropagatedToReporter();
+ void replicaCountingModeIsConfiguredToTrustedByDefault();
+ void replicaCountingModeConfigIsPropagatedToMetricUpdater();
+ void bucketActivationIsEnabledByDefault();
+ void bucketActivationConfigIsPropagatedToDistributorConfiguration();
+
+public:
+ void setUp() {
+ createLinks();
+ };
+
+ void tearDown() {
+ close();
+ }
+
+private:
+ // Simple type aliases to make interfacing with certain utility functions
+ // easier. Note that this is only for readability and does not provide any
+ // added type safety.
+ using NodeCount = int;
+ using Redundancy = int;
+
+ using ConfigBuilder = vespa::config::content::core::StorDistributormanagerConfigBuilder;
+
+ void configureDistributor(const ConfigBuilder& config) {
+ getConfig().configure(config);
+ _distributor->enableNextConfig();
+ }
+
+ auto currentReplicaCountingMode() const noexcept {
+ return _distributor->_bucketDBMetricUpdater
+ .getMinimumReplicaCountingMode();
+ }
+
+ std::string testOp(api::StorageMessage* msg)
+ {
+ api::StorageMessage::SP msgPtr(msg);
+ _distributor->handleMessage(msgPtr);
+
+ std::string tmp = _sender.getCommands();
+ _sender.clear();
+ return tmp;
+ }
+
+ void tickDistributorNTimes(uint32_t n) {
+ for (uint32_t i = 0; i < n; ++i) {
+ tick();
+ }
+ }
+
+ typedef bool ResetTrusted;
+
+ std::string updateBucketDB(const std::string& firstState,
+ const std::string& secondState,
+ bool resetTrusted = false)
+ {
+ std::vector<std::string> states(toVector<std::string>(firstState, secondState));
+
+ for (uint32_t i = 0; i < states.size(); ++i) {
+ std::vector<uint16_t> removedNodes;
+ std::vector<BucketCopy> changedNodes;
+
+ vespalib::StringTokenizer tokenizer(states[i], ",");
+ for (uint32_t j = 0; j < tokenizer.size(); ++j) {
+ vespalib::StringTokenizer tokenizer2(tokenizer[j], ":");
+
+ bool trusted = false;
+ if (tokenizer2.size() > 2) {
+ trusted = true;
+ }
+
+ uint16_t node = atoi(tokenizer2[0].c_str());
+ if (tokenizer2[1] == "r") {
+ removedNodes.push_back(node);
+ } else {
+ uint32_t checksum = atoi(tokenizer2[1].c_str());
+ changedNodes.push_back(
+ BucketCopy(
+ i + 1,
+ node,
+ api::BucketInfo(
+ checksum,
+ checksum / 2,
+ checksum / 4)).setTrusted(trusted));
+ }
+ }
+
+ getExternalOperationHandler().removeNodesFromDB(document::BucketId(16, 1), removedNodes);
+
+ uint32_t flags(DatabaseUpdate::CREATE_IF_NONEXISTING
+ | (resetTrusted ? DatabaseUpdate::RESET_TRUSTED : 0));
+
+ getExternalOperationHandler().updateBucketDatabase(document::BucketId(16, 1),
+ changedNodes,
+ flags);
+ }
+
+ std::string retVal = dumpBucket(document::BucketId(16, 1));
+ getBucketDatabase().clear();
+ return retVal;
+ }
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(Distributor_Test);
+
+void
+Distributor_Test::testOperationGeneration()
+{
+ setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1");
+
+ document::BucketId bid;
+ addNodesToBucketDB(document::BucketId(16, 1), "0=1/1/1/t");
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Remove"),
+ testOp(new api::RemoveCommand(
+ bid,
+ document::DocumentId("userdoc:m:1:foo"),
+ api::Timestamp(1234))));
+
+ api::CreateVisitorCommand* cmd = new api::CreateVisitorCommand("foo", "bar", "");
+ cmd->addBucketToBeVisited(document::BucketId(16, 1));
+ cmd->addBucketToBeVisited(document::BucketId());
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Visitor Create"), testOp(cmd));
+}
+
+void
+Distributor_Test::testOperationsGeneratedAndStartedWithoutDuplicates()
+{
+ setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1");
+
+ for (uint32_t i = 0; i < 6; ++i) {
+ addNodesToBucketDB(document::BucketId(16, i), "0=1");
+ }
+
+ tickDistributorNTimes(20);
+
+ CPPUNIT_ASSERT(!tick());
+
+ CPPUNIT_ASSERT_EQUAL(6, (int)_sender.commands.size());
+}
+
+void
+Distributor_Test::testRecoveryModeOnClusterStateChange()
+{
+ setupDistributor(Redundancy(1), NodeCount(2),
+ "storage:1 .0.s:d distributor:1");
+ _distributor->enableClusterState(
+ lib::ClusterState("storage:1 distributor:1"));
+
+ CPPUNIT_ASSERT(_distributor->isInRecoveryMode());
+ for (uint32_t i = 0; i < 3; ++i) {
+ addNodesToBucketDB(document::BucketId(16, i), "0=1");
+ }
+ for (int i = 0; i < 3; ++i) {
+ tick();
+ CPPUNIT_ASSERT(_distributor->isInRecoveryMode());
+ }
+ tick();
+ CPPUNIT_ASSERT(!_distributor->isInRecoveryMode());
+
+ _distributor->enableClusterState(lib::ClusterState("storage:2 distributor:1"));
+ CPPUNIT_ASSERT(_distributor->isInRecoveryMode());
+}
+
+void
+Distributor_Test::testOperationsAreThrottled()
+{
+ setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1");
+ getConfig().setMinPendingMaintenanceOps(1);
+ getConfig().setMaxPendingMaintenanceOps(1);
+
+ for (uint32_t i = 0; i < 6; ++i) {
+ addNodesToBucketDB(document::BucketId(16, i), "0=1");
+ }
+ tickDistributorNTimes(20);
+ CPPUNIT_ASSERT_EQUAL(1, (int)_sender.commands.size());
+}
+
+void
+Distributor_Test::testRecoveryModeEntryResetsScanner()
+{
+ CPPUNIT_FAIL("TODO: refactor so this can be mocked and tested easily");
+}
+
+void
+Distributor_Test::testReprioritizeBucketOnMaintenanceReply()
+{
+ CPPUNIT_FAIL("TODO: refactor so this can be mocked and tested easily");
+}
+
+void
+Distributor_Test::testHandleUnknownMaintenanceReply()
+{
+ setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1");
+
+ {
+ api::SplitBucketCommand::SP cmd(
+ new api::SplitBucketCommand(document::BucketId(16, 1234)));
+ api::SplitBucketReply::SP reply(new api::SplitBucketReply(*cmd));
+
+ CPPUNIT_ASSERT(_distributor->handleReply(reply));
+ }
+
+ {
+ // RemoveLocationReply must be treated as a maintenance reply since
+ // it's what GC is currently built around.
+ auto cmd = std::make_shared<api::RemoveLocationCommand>(
+ "false", document::BucketId(30, 1234));
+ auto reply = std::shared_ptr<api::StorageReply>(cmd->makeReply());
+ CPPUNIT_ASSERT(_distributor->handleReply(reply));
+ }
+}
+
+void
+Distributor_Test::testContainsTimeStatement()
+{
+ setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1");
+
+ CPPUNIT_ASSERT_EQUAL(false, getConfig().containsTimeStatement(""));
+ CPPUNIT_ASSERT_EQUAL(false, getConfig().containsTimeStatement("testdoctype1"));
+ CPPUNIT_ASSERT_EQUAL(false, getConfig().containsTimeStatement("testdoctype1.headerfield > 42"));
+ CPPUNIT_ASSERT_EQUAL(true, getConfig().containsTimeStatement("testdoctype1.headerfield > now()"));
+ CPPUNIT_ASSERT_EQUAL(true, getConfig().containsTimeStatement("testdoctype1.headerfield > now() - 3600"));
+ CPPUNIT_ASSERT_EQUAL(true, getConfig().containsTimeStatement("testdoctype1.headerfield == now() - 3600"));
+}
+
+void
+Distributor_Test::testUpdateBucketDatabase()
+{
+ _distributor->enableClusterState(lib::ClusterState("distributor:1 storage:3"));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("BucketId(0x4000000000000001) : "
+ "node(idx=0,crc=0x1c8,docs=228/228,bytes=114/114,trusted=true,active=false), "
+ "node(idx=1,crc=0x1c8,docs=228/228,bytes=114/114,trusted=true,active=false)"
+ ),
+ updateBucketDB("0:456,1:456,2:789", "2:r"));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("BucketId(0x4000000000000001) : "
+ "node(idx=0,crc=0x1c8,docs=228/228,bytes=114/114,trusted=true,active=false), "
+ "node(idx=2,crc=0x1c8,docs=228/228,bytes=114/114,trusted=true,active=false), "
+ "node(idx=1,crc=0x1c8,docs=228/228,bytes=114/114,trusted=true,active=false)"
+ ),
+ updateBucketDB("0:456,1:456", "2:456"));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("BucketId(0x4000000000000001) : "
+ "node(idx=0,crc=0x315,docs=394/394,bytes=197/197,trusted=false,active=false), "
+ "node(idx=2,crc=0x14d,docs=166/166,bytes=83/83,trusted=false,active=false), "
+ "node(idx=1,crc=0x34a,docs=421/421,bytes=210/210,trusted=false,active=false)"
+ ),
+ updateBucketDB("0:456:t,1:456:t,2:123", "0:789,1:842,2:333"));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("BucketId(0x4000000000000001) : "
+ "node(idx=0,crc=0x315,docs=394/394,bytes=197/197,trusted=true,active=false), "
+ "node(idx=2,crc=0x14d,docs=166/166,bytes=83/83,trusted=false,active=false), "
+ "node(idx=1,crc=0x315,docs=394/394,bytes=197/197,trusted=true,active=false)"
+ ),
+ updateBucketDB("0:456:t,1:456:t,2:123", "0:789,1:789,2:333"));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("BucketId(0x4000000000000001) : "
+ "node(idx=2,crc=0x14d,docs=166/166,bytes=83/83,trusted=true,active=false)"),
+ updateBucketDB("0:456:t,1:456:t", "0:r,1:r,2:333"));
+
+ // Copies are in sync so should still be trusted even if explicitly reset.
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("BucketId(0x4000000000000001) : "
+ "node(idx=0,crc=0x1c8,docs=228/228,bytes=114/114,trusted=true,active=false), "
+ "node(idx=2,crc=0x1c8,docs=228/228,bytes=114/114,trusted=true,active=false), "
+ "node(idx=1,crc=0x1c8,docs=228/228,bytes=114/114,trusted=true,active=false)"
+ ),
+ updateBucketDB("0:456,1:456", "2:456", ResetTrusted(true)));
+
+ // When resetting, first inserted copy should not end up as implicitly trusted.
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("BucketId(0x4000000000000001) : "
+ "node(idx=0,crc=0x1c8,docs=228/228,bytes=114/114,trusted=false,active=false), "
+ "node(idx=2,crc=0x14d,docs=166/166,bytes=83/83,trusted=false,active=false)"
+ ),
+ updateBucketDB("0:456",
+ "2:333",
+ ResetTrusted(true)));
+}
+
+namespace {
+
+using namespace framework::defaultimplementation;
+
+class StatusRequestThread : public framework::Runnable
+{
+ StatusReporterDelegate& _reporter;
+ std::string _result;
+public:
+ StatusRequestThread(StatusReporterDelegate& reporter)
+ : _reporter(reporter)
+ {}
+ void run(framework::ThreadHandle&) {
+ framework::HttpUrlPath path("/distributor?page=buckets");
+ std::ostringstream stream;
+ _reporter.reportStatus(stream, path);
+ _result = stream.str();
+ }
+
+ std::string getResult() const {
+ return _result;
+ }
+};
+
+}
+
+void
+Distributor_Test::testTickProcessesStatusRequests()
+{
+ setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1");
+
+ addNodesToBucketDB(document::BucketId(16, 1), "0=1/1/1/t");
+
+ // Must go via delegate since reportStatus is now just a rendering
+ // function and not a request enqueuer (see Distributor::handleStatusRequest).
+ StatusRequestThread thread(_distributor->_distributorStatusDelegate);
+ FakeClock clock;
+ ThreadPoolImpl pool(clock);
+
+ uint64_t tickWaitMs = 5;
+ uint64_t tickMaxProcessTime = 5000;
+ int ticksBeforeWait = 1;
+ framework::Thread::UP tp(pool.startThread(
+ thread, "statustest", tickWaitMs, tickMaxProcessTime, ticksBeforeWait));
+
+ while (true) {
+ FastOS_Thread::Sleep(1);
+ framework::TickingLockGuard guard(
+ _distributor->_threadPool.freezeCriticalTicks());
+ if (!_distributor->_statusToDo.empty()) break;
+
+ }
+ CPPUNIT_ASSERT(tick());
+
+ tp->interruptAndJoin(0);
+
+ CPPUNIT_ASSERT_CONTAIN("BucketId(0x4000000000000001)", thread.getResult());
+}
+
+void
+Distributor_Test::testMetricUpdateHookUpdatesPendingMaintenanceMetrics()
+{
+ setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
+ // To ensure we count all operations, not just those fitting within the
+ // pending window.
+ getConfig().setMinPendingMaintenanceOps(1);
+ getConfig().setMaxPendingMaintenanceOps(1);
+
+ // 1 bucket must be merged, 1 must be split, 1 should be activated.
+ addNodesToBucketDB(document::BucketId(16, 1), "0=1/1/1/t/a,1=2/2/2");
+ addNodesToBucketDB(document::BucketId(16, 2),
+ "0=100/10000000/200000/t/a,1=100/10000000/200000/t");
+ addNodesToBucketDB(document::BucketId(16, 3),
+ "0=200/300/400/t,1=200/300/400/t");
+
+ // Go many full scanner rounds to check that metrics are set, not
+ // added to existing.
+ tickDistributorNTimes(50);
+
+ // By this point, no hook has been called so the metrics have not been
+ // set.
+ typedef MaintenanceOperation MO;
+ {
+ const IdealStateMetricSet& metrics(getIdealStateManager().getMetrics());
+ CPPUNIT_ASSERT_EQUAL(int64_t(0),
+ metrics.operations[MO::MERGE_BUCKET]
+ ->pending.getLast());
+ CPPUNIT_ASSERT_EQUAL(int64_t(0), metrics.operations[MO::SPLIT_BUCKET]
+ ->pending.getLast());
+ CPPUNIT_ASSERT_EQUAL(int64_t(0),
+ metrics.operations[MO::SET_BUCKET_STATE]
+ ->pending.getLast());
+ CPPUNIT_ASSERT_EQUAL(int64_t(0), metrics.operations[MO::DELETE_BUCKET]
+ ->pending.getLast());
+ CPPUNIT_ASSERT_EQUAL(int64_t(0), metrics.operations[MO::JOIN_BUCKET]
+ ->pending.getLast());
+ CPPUNIT_ASSERT_EQUAL(int64_t(0),
+ metrics.operations[MO::GARBAGE_COLLECTION]
+ ->pending.getLast());
+ }
+
+ // Force trigger update hook
+ vespalib::Monitor l;
+ _distributor->_metricUpdateHook.updateMetrics(metrics::MetricLockGuard(l));
+ // Metrics should now be updated to the last complete working state
+ {
+ const IdealStateMetricSet& metrics(getIdealStateManager().getMetrics());
+ CPPUNIT_ASSERT_EQUAL(int64_t(1),
+ metrics.operations[MO::MERGE_BUCKET]
+ ->pending.getLast());
+ CPPUNIT_ASSERT_EQUAL(int64_t(1), metrics.operations[MO::SPLIT_BUCKET]
+ ->pending.getLast());
+ CPPUNIT_ASSERT_EQUAL(int64_t(1),
+ metrics.operations[MO::SET_BUCKET_STATE]
+ ->pending.getLast());
+ CPPUNIT_ASSERT_EQUAL(int64_t(0), metrics.operations[MO::DELETE_BUCKET]
+ ->pending.getLast());
+ CPPUNIT_ASSERT_EQUAL(int64_t(0), metrics.operations[MO::JOIN_BUCKET]
+ ->pending.getLast());
+ CPPUNIT_ASSERT_EQUAL(int64_t(0),
+ metrics.operations[MO::GARBAGE_COLLECTION]
+ ->pending.getLast());
+ }
+}
+
+void
+Distributor_Test::testPriorityConfigIsPropagatedToDistributorConfiguration()
+{
+ using namespace vespa::config::content::core;
+ using ConfigBuilder = StorDistributormanagerConfigBuilder;
+
+ setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
+
+ ConfigBuilder builder;
+ builder.priorityMergeMoveToIdealNode = 1;
+ builder.priorityMergeOutOfSyncCopies = 2;
+ builder.priorityMergeTooFewCopies = 3;
+ builder.priorityActivateNoExistingActive = 4;
+ builder.priorityActivateWithExistingActive = 5;
+ builder.priorityDeleteBucketCopy = 6;
+ builder.priorityJoinBuckets = 7;
+ builder.prioritySplitDistributionBits = 8;
+ builder.prioritySplitLargeBucket = 9;
+ builder.prioritySplitInconsistentBucket = 10;
+ builder.priorityGarbageCollection = 11;
+
+ getConfig().configure(builder);
+
+ const DistributorConfiguration::MaintenancePriorities& mp(
+ getConfig().getMaintenancePriorities());
+ CPPUNIT_ASSERT_EQUAL(1, static_cast<int>(mp.mergeMoveToIdealNode));
+ CPPUNIT_ASSERT_EQUAL(2, static_cast<int>(mp.mergeOutOfSyncCopies));
+ CPPUNIT_ASSERT_EQUAL(3, static_cast<int>(mp.mergeTooFewCopies));
+ CPPUNIT_ASSERT_EQUAL(4, static_cast<int>(mp.activateNoExistingActive));
+ CPPUNIT_ASSERT_EQUAL(5, static_cast<int>(mp.activateWithExistingActive));
+ CPPUNIT_ASSERT_EQUAL(6, static_cast<int>(mp.deleteBucketCopy));
+ CPPUNIT_ASSERT_EQUAL(7, static_cast<int>(mp.joinBuckets));
+ CPPUNIT_ASSERT_EQUAL(8, static_cast<int>(mp.splitDistributionBits));
+ CPPUNIT_ASSERT_EQUAL(9, static_cast<int>(mp.splitLargeBucket));
+ CPPUNIT_ASSERT_EQUAL(10, static_cast<int>(mp.splitInconsistentBucket));
+ CPPUNIT_ASSERT_EQUAL(11, static_cast<int>(mp.garbageCollection));
+}
+
+void
+Distributor_Test::testNoDbResurrectionForBucketNotOwnedInPendingState()
+{
+ setupDistributor(Redundancy(1), NodeCount(10), "storage:2 distributor:2");
+ lib::ClusterState newState("storage:10 distributor:10");
+ auto stateCmd = std::make_shared<api::SetSystemStateCommand>(newState);
+ // Force newState into being the pending state. According to the initial
+ // state we own the bucket, but according to the pending state, we do
+ // not. This must be handled correctly by the database update code.
+ getBucketDBUpdater().onSetSystemState(stateCmd);
+
+ document::BucketId nonOwnedBucket(16, 3);
+ CPPUNIT_ASSERT(!getBucketDBUpdater()
+ .checkOwnershipInPendingState(nonOwnedBucket).isOwned());
+ CPPUNIT_ASSERT(!getBucketDBUpdater().getDistributorComponent()
+ .checkOwnershipInPendingAndCurrentState(nonOwnedBucket)
+ .isOwned());
+
+ std::vector<BucketCopy> copies;
+ copies.emplace_back(1234, 0, api::BucketInfo(0x567, 1, 2));
+ getExternalOperationHandler().updateBucketDatabase(nonOwnedBucket, copies,
+ DatabaseUpdate::CREATE_IF_NONEXISTING);
+
+ CPPUNIT_ASSERT_EQUAL(std::string("NONEXISTING"),
+ dumpBucket(nonOwnedBucket));
+}
+
+void
+Distributor_Test::testAddedDbBucketsWithoutGcTimestampImplicitlyGetCurrentTime()
+{
+ setupDistributor(Redundancy(1), NodeCount(10), "storage:2 distributor:2");
+ getClock().setAbsoluteTimeInSeconds(101234);
+ document::BucketId bucket(16, 7654);
+
+ std::vector<BucketCopy> copies;
+ copies.emplace_back(1234, 0, api::BucketInfo(0x567, 1, 2));
+ getExternalOperationHandler().updateBucketDatabase(bucket, copies,
+ DatabaseUpdate::CREATE_IF_NONEXISTING);
+ BucketDatabase::Entry e(getBucket(bucket));
+ CPPUNIT_ASSERT_EQUAL(uint32_t(101234), e->getLastGarbageCollectionTime());
+}
+
+
+void
+Distributor_Test::mergeStatsAreAccumulatedDuringDatabaseIteration()
+{
+ setupDistributor(Redundancy(2), NodeCount(3), "storage:3 distributor:1");
+ // Copies out of sync. Not possible for distributor to _reliably_ tell
+ // which direction(s) data will flow, so for simplicity assume that we
+ // must sync both copies.
+ // Note that we mark certain copies as active to prevent the bucketstate
+ // checker from pre-empting the merges.
+ // -> syncing[0] += 1, syncing[2] += 1
+ addNodesToBucketDB(document::BucketId(16, 1), "0=1/1/1/t/a,2=2/2/2");
+ // Must add missing node 2 for bucket
+ // -> copyingOut[0] += 1, copyingIn[2] += 1
+ addNodesToBucketDB(document::BucketId(16, 2), "0=1/1/1/t/a");
+ // Moving from non-ideal node 1 to ideal node 2. Both nodes 0 and 1 will
+ // be involved in this merge, but only node 1 will be tagged as source only
+ // (i.e. to be deleted after the merge is completed).
+ // -> copyingOut[0] += 1, movingOut[1] += 1, copyingIn[2] += 1
+ addNodesToBucketDB(document::BucketId(16, 3), "0=2/2/2/t/a,1=2/2/2/t");
+
+ // Go many full scanner rounds to check that stats are set, not
+ // added to existing.
+ tickDistributorNTimes(50);
+
+ const auto& stats(_distributor->_maintenanceStats);
+ {
+ NodeMaintenanceStats wanted;
+ wanted.syncing = 1;
+ wanted.copyingOut = 2;
+ CPPUNIT_ASSERT_EQUAL(wanted, stats.perNodeStats.forNode(0));
+ }
+ {
+ NodeMaintenanceStats wanted;
+ wanted.movingOut = 1;
+ CPPUNIT_ASSERT_EQUAL(wanted, stats.perNodeStats.forNode(1));
+ }
+ {
+ NodeMaintenanceStats wanted;
+ wanted.syncing = 1;
+ wanted.copyingIn = 2;
+ CPPUNIT_ASSERT_EQUAL(wanted, stats.perNodeStats.forNode(2));
+ }
+}
+
+/**
+ * Since maintenance operations are prioritized differently, activation
+ * pre-empts merging and other ops. If this also implies pre-empting running
+ * their state checkers at all, we won't get any statistics from any other
+ * operations for the bucket.
+ */
+void
+Distributor_Test::statsGeneratedForPreemptedOperations()
+{
+ setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
+ // For this test it suffices to have a single bucket with multiple aspects
+ // wrong about it. In this case, let a bucket be both out of sync _and_
+ // missing an active copy. This _should_ give a statistic with both nodes 0
+ // and 1 requiring a sync. If instead merge stats generation is preempted
+ // by activation, we'll see no merge stats at all.
+ addNodesToBucketDB(document::BucketId(16, 1), "0=1/1/1,1=2/2/2");
+ tickDistributorNTimes(50);
+ const auto& stats(_distributor->_maintenanceStats);
+ {
+ NodeMaintenanceStats wanted;
+ wanted.syncing = 1;
+ CPPUNIT_ASSERT_EQUAL(wanted, stats.perNodeStats.forNode(0));
+ }
+ {
+ NodeMaintenanceStats wanted;
+ wanted.syncing = 1;
+ CPPUNIT_ASSERT_EQUAL(wanted, stats.perNodeStats.forNode(1));
+ }
+}
+
+void
+Distributor_Test::hostInfoReporterConfigIsPropagatedToReporter()
+{
+ setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
+
+ // Default is enabled=true.
+ CPPUNIT_ASSERT(_distributor->_hostInfoReporter.isReportingEnabled());
+
+ ConfigBuilder builder;
+ builder.enableHostInfoReporting = false;
+ configureDistributor(builder);
+
+ CPPUNIT_ASSERT(!_distributor->_hostInfoReporter.isReportingEnabled());
+}
+
+void
+Distributor_Test::replicaCountingModeIsConfiguredToTrustedByDefault()
+{
+ setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
+ CPPUNIT_ASSERT_EQUAL(ConfigBuilder::TRUSTED, currentReplicaCountingMode());
+}
+
+void
+Distributor_Test::replicaCountingModeConfigIsPropagatedToMetricUpdater()
+{
+ setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
+ ConfigBuilder builder;
+ builder.minimumReplicaCountingMode = ConfigBuilder::ANY;
+ configureDistributor(builder);
+ CPPUNIT_ASSERT_EQUAL(ConfigBuilder::ANY, currentReplicaCountingMode());
+}
+
+void
+Distributor_Test::bucketActivationIsEnabledByDefault()
+{
+ setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
+ CPPUNIT_ASSERT(getConfig().isBucketActivationDisabled() == false);
+}
+
+void
+Distributor_Test::bucketActivationConfigIsPropagatedToDistributorConfiguration()
+{
+ using namespace vespa::config::content::core;
+ using ConfigBuilder = StorDistributormanagerConfigBuilder;
+
+ setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
+
+ ConfigBuilder builder;
+ builder.disableBucketActivation = true;
+ getConfig().configure(builder);
+
+ CPPUNIT_ASSERT(getConfig().isBucketActivationDisabled());
+}
+
+}
+
+}
diff --git a/storage/src/tests/distributor/distributortestutil.cpp b/storage/src/tests/distributor/distributortestutil.cpp
new file mode 100644
index 00000000000..c2d878a253d
--- /dev/null
+++ b/storage/src/tests/distributor/distributortestutil.cpp
@@ -0,0 +1,298 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <ctype.h>
+#include <vespa/document/base/testdocman.h>
+#include <vespa/storageframework/defaultimplementation/memory/nomemorymanager.h>
+#include <tests/distributor/distributortestutil.h>
+#include <vespa/vespalib/text/stringtokenizer.h>
+
+namespace storage {
+
+namespace distributor {
+
+void
+DistributorTestUtil::createLinks()
+{
+ _node.reset(new TestDistributorApp(_config.getConfigId()));
+ _threadPool = framework::TickingThreadPool::createDefault("distributor");
+ _distributor.reset(new Distributor(
+ _node->getComponentRegister(),
+ *_threadPool,
+ *this,
+ true,
+ _hostInfo,
+ &_messageSender));
+ _component.reset(new storage::DistributorComponent(_node->getComponentRegister(), "distrtestutil"));
+};
+
+void
+DistributorTestUtil::setupDistributor(int redundancy,
+ int nodeCount,
+ const std::string& systemState,
+ uint32_t earlyReturn,
+ bool requirePrimaryToBeWritten)
+{
+ lib::Distribution::DistributionConfig config(
+ lib::Distribution::getDefaultDistributionConfig(
+ redundancy, nodeCount));
+ config.redundancy = redundancy;
+ config.initialRedundancy = earlyReturn;
+ config.ensurePrimaryPersisted = requirePrimaryToBeWritten;
+ lib::Distribution* distribution = new lib::Distribution(config);
+ _node->getComponentRegister().setDistribution(
+ lib::Distribution::SP(distribution));
+ _distributor->enableClusterState(lib::ClusterState(systemState));
+}
+
+void
+DistributorTestUtil::setRedundancy(uint32_t redundancy)
+{
+ _node->getComponentRegister().setDistribution(lib::Distribution::SP(
+ new lib::Distribution(
+ lib::Distribution::getDefaultDistributionConfig(
+ redundancy, 100))));
+ _distributor->storageDistributionChanged();
+}
+
+void
+DistributorTestUtil::setTypeRepo(const document::DocumentTypeRepo::SP &repo)
+{
+ _node->getComponentRegister().setDocumentTypeRepo(repo);
+}
+
+void
+DistributorTestUtil::close()
+{
+ _component.reset(0);
+ if (_distributor.get()) {
+ _distributor->onClose();
+ }
+ _sender.clear();
+ _node.reset(0);
+ _config = getStandardConfig(false);
+}
+
+namespace {
+ std::string dumpVector(const std::vector<uint16_t>& vec) {
+ std::ostringstream ost;
+ for (uint32_t i = 0; i < vec.size(); ++i) {
+ if (i != 0) {
+ ost << ",";
+ }
+ ost << vec[i];
+ }
+ return ost.str();
+ }
+}
+
+std::string
+DistributorTestUtil::getNodes(document::BucketId id)
+{
+ BucketDatabase::Entry entry = getBucket(id);
+
+ if (!entry.valid()) {
+ return id.toString();
+ } else {
+ std::vector<uint16_t> nodes = entry->getNodes();
+ std::sort(nodes.begin(), nodes.end());
+
+ std::ostringstream ost;
+ ost << id << ": " << dumpVector(nodes);
+ return ost.str();
+ }
+}
+
+std::string
+DistributorTestUtil::getIdealStr(document::BucketId id, const lib::ClusterState& state)
+{
+ if (!getExternalOperationHandler().ownsBucketInState(state, id)) {
+ return id.toString();
+ }
+
+ std::vector<uint16_t> nodes;
+ _component->getDistribution()->getIdealNodes(lib::NodeType::STORAGE,
+ state,
+ id,
+ nodes);
+ std::sort(nodes.begin(), nodes.end());
+ std::ostringstream ost;
+ ost << id << ": " << dumpVector(nodes);
+ return ost.str();
+}
+
+void
+DistributorTestUtil::addIdealNodes(const lib::ClusterState& state,
+ const document::BucketId& id)
+{
+ BucketDatabase::Entry entry = getBucket(id);
+
+ if (!entry.valid()) {
+ entry = BucketDatabase::Entry(id);
+ }
+
+ std::vector<uint16_t> res;
+ assert(_component.get());
+ _component->getDistribution()->getIdealNodes(lib::NodeType::STORAGE,
+ state,
+ id,
+ res);
+
+ for (uint32_t i = 0; i < res.size(); ++i) {
+ if (state.getNodeState(lib::Node(lib::NodeType::STORAGE, res[i])).getState() !=
+ lib::State::MAINTENANCE)
+ {
+ entry->addNode(BucketCopy(0, res[i], api::BucketInfo(1,1,1)),
+ toVector<uint16_t>(0));
+ }
+ }
+
+ getBucketDatabase().update(entry);
+}
+
+void
+DistributorTestUtil::addNodesToBucketDB(const document::BucketId& id,
+ const std::string& nodeStr)
+{
+ BucketDatabase::Entry entry = getBucket(id);
+
+ if (!entry.valid()) {
+ entry = BucketDatabase::Entry(id);
+ }
+
+ entry->clear();
+
+ vespalib::StringTokenizer tokenizer(nodeStr, ",");
+ for (uint32_t i = 0; i < tokenizer.size(); ++i) {
+ vespalib::StringTokenizer tok2(tokenizer[i], "=");
+ vespalib::StringTokenizer tok3(tok2[1], "/");
+
+ api::BucketInfo info(atoi(tok3[0].c_str()),
+ atoi(tok3.size() > 1 ? tok3[1].c_str() : tok3[0].c_str()),
+ atoi(tok3.size() > 2 ? tok3[2].c_str() : tok3[0].c_str()));
+
+ size_t flagsIdx = 3;
+
+ // Meta info override? For simplicity, require both meta count and size
+ if (tok3.size() > 4 && (!tok3[3].empty() && isdigit(tok3[3][0]))) {
+ info.setMetaCount(atoi(tok3[3].c_str()));
+ info.setUsedFileSize(atoi(tok3[4].c_str()));
+ flagsIdx = 5;
+ }
+
+ if ((tok3.size() > flagsIdx + 1) && tok3[flagsIdx + 1] == "a") {
+ info.setActive();
+ } else {
+ info.setActive(false);
+ }
+ if ((tok3.size() > flagsIdx + 2) && tok3[flagsIdx + 2] == "r") {
+ info.setReady();
+ } else {
+ info.setReady(false);
+ }
+
+ uint16_t idx = atoi(tok2[0].c_str());
+ BucketCopy node(
+ 0,
+ idx,
+ info);
+
+ // Allow user to manually override trusted and active.
+ if (tok3.size() > flagsIdx && tok3[flagsIdx] == "t") {
+ node.setTrusted();
+ }
+
+ entry->addNodeManual(node);
+ }
+
+ getBucketDatabase().update(entry);
+}
+
+void
+DistributorTestUtil::removeFromBucketDB(const document::BucketId& id)
+{
+ getBucketDatabase().remove(id);
+}
+
+void
+DistributorTestUtil::addIdealNodes(const document::BucketId& id)
+{
+ addIdealNodes(getExternalOperationHandler().getClusterState(), id);
+}
+
+void
+DistributorTestUtil::insertBucketInfo(document::BucketId id,
+ uint16_t node,
+ uint32_t checksum,
+ uint32_t count,
+ uint32_t size,
+ bool trusted,
+ bool active)
+{
+ api::BucketInfo info(checksum, count, size);
+ insertBucketInfo(id, node, info, trusted, active);
+}
+
+void
+DistributorTestUtil::insertBucketInfo(document::BucketId id,
+ uint16_t node,
+ const api::BucketInfo& info,
+ bool trusted,
+ bool active)
+{
+ BucketDatabase::Entry entry = getBucketDatabase().get(id);
+ if (!entry.valid()) {
+ entry = BucketDatabase::Entry(id, BucketInfo());
+ }
+
+ api::BucketInfo info2(info);
+ if (active) {
+ info2.setActive();
+ }
+ BucketCopy copy(getExternalOperationHandler().getUniqueTimestamp(), node, info2);
+
+ entry->addNode(copy.setTrusted(trusted), toVector<uint16_t>(0));
+
+ getBucketDatabase().update(entry);
+}
+
+std::string
+DistributorTestUtil::dumpBucket(const document::BucketId& bid)
+{
+ return getBucketDatabase().get(bid).toString();
+}
+
+void
+DistributorTestUtil::sendReply(Operation& op,
+ int idx,
+ api::ReturnCode::Result result)
+{
+ if (idx == -1) {
+ idx = _sender.commands.size() - 1;
+ }
+ assert(idx >= 0 && idx < static_cast<int>(_sender.commands.size()));
+
+ std::shared_ptr<api::StorageCommand> cmd = _sender.commands[idx];
+ api::StorageReply::SP reply(cmd->makeReply().release());
+ reply->setResult(result);
+ op.receive(_sender, reply);
+}
+
+BucketDatabase::Entry
+DistributorTestUtil::getBucket(const document::BucketId& bId) const
+{
+ return _distributor->getBucketDatabase().get(bId);
+}
+
+void
+DistributorTestUtil::disableBucketActivationInConfig(bool disable)
+{
+ vespa::config::content::core::StorDistributormanagerConfigBuilder config;
+ config.disableBucketActivation = disable;
+ getConfig().configure(config);
+}
+
+}
+
+}
+
+
diff --git a/storage/src/tests/distributor/distributortestutil.h b/storage/src/tests/distributor/distributortestutil.h
new file mode 100644
index 00000000000..43b56859d0d
--- /dev/null
+++ b/storage/src/tests/distributor/distributortestutil.h
@@ -0,0 +1,200 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#pragma once
+
+#include <tests/common/dummystoragelink.h>
+#include <vespa/storageframework/defaultimplementation/clock/fakeclock.h>
+#include <vespa/storage/common/hostreporter/hostinfo.h>
+#include <vespa/storage/distributor/distributor.h>
+#include <vespa/storage/frameworkimpl/component/distributorcomponentregisterimpl.h>
+#include <vespa/storage/storageutil/utils.h>
+#include <tests/common/teststorageapp.h>
+#include <tests/distributor/messagesenderstub.h>
+#include <vespa/storageapi/message/state.h>
+#include <tests/common/testhelper.h>
+
+namespace storage {
+
+namespace distributor {
+
+class DistributorTestUtil : private DoneInitializeHandler
+{
+public:
+ DistributorTestUtil()
+ : _messageSender(_sender, _senderDown)
+ {
+ _config = getStandardConfig(false);
+ }
+ virtual ~DistributorTestUtil() {};
+
+ /**
+ * Sets up the storage link chain.
+ */
+ void createLinks();
+ void setTypeRepo(const document::DocumentTypeRepo::SP &repo);
+
+ void close();
+
+ /**
+ * Returns a string with the nodes currently stored in the bucket
+ * database for the given bucket.
+ */
+ std::string getNodes(document::BucketId id);
+
+ /**
+ * Returns a string with the ideal state nodes for the given bucket.
+ */
+ std::string getIdealStr(document::BucketId id, const lib::ClusterState& state);
+
+ /**
+ * Adds the ideal nodes for the given bucket and the given cluster state
+ * to the bucket database.
+ */
+ void addIdealNodes(const lib::ClusterState& state, const document::BucketId& id);
+
+ /**
+ * Adds all the ideal nodes for the given bucket to the bucket database.
+ */
+ void addIdealNodes(const document::BucketId& id);
+
+ /**
+ * Parses the given string to a set of node => bucket info data,
+ * and inserts them as nodes in the given bucket.
+ * Format:
+ * "node1=checksum/docs/size,node2=checksum/docs/size"
+ */
+ void addNodesToBucketDB(const document::BucketId& id, const std::string& nodeStr);
+
+ /**
+ * Removes the given bucket from the bucket database.
+ */
+ void removeFromBucketDB(const document::BucketId& id);
+
+ /**
+ * Inserts the given bucket information for the given bucket and node in
+ * the bucket database.
+ */
+ void insertBucketInfo(document::BucketId id,
+ uint16_t node,
+ uint32_t checksum,
+ uint32_t count,
+ uint32_t size,
+ bool trusted = false,
+ bool active = false);
+
+ /**
+ * Inserts the given bucket information for the given bucket and node in
+ * the bucket database.
+ */
+ void insertBucketInfo(document::BucketId id,
+ uint16_t node,
+ const api::BucketInfo& info,
+ bool trusted = false,
+ bool active = false);
+
+ std::string dumpBucket(const document::BucketId& bucket);
+
+ /**
+ * Replies to message idx sent upwards with the given result code.
+ * If idx = -1, replies to the last command received upwards.
+ */
+ void sendReply(Operation& op,
+ int idx = -1,
+ api::ReturnCode::Result result = api::ReturnCode::OK);
+
+ BucketDBUpdater& getBucketDBUpdater() {
+ return _distributor->_bucketDBUpdater;
+ }
+ IdealStateManager& getIdealStateManager() {
+ return _distributor->_idealStateManager;
+ }
+ ExternalOperationHandler& getExternalOperationHandler() {
+ return _distributor->_externalOperationHandler;
+ }
+
+ Distributor& getDistributor() {
+ return *_distributor;
+ }
+
+ bool tick() {
+ framework::ThreadWaitInfo res(
+ framework::ThreadWaitInfo::NO_MORE_CRITICAL_WORK_KNOWN);
+ {
+ framework::TickingLockGuard lock(
+ _distributor->_threadPool.freezeCriticalTicks());
+ res.merge(_distributor->doCriticalTick(0));
+ }
+ res.merge(_distributor->doNonCriticalTick(0));
+ return !res.waitWanted();
+ }
+
+ DistributorConfiguration& getConfig() {
+ return const_cast<DistributorConfiguration&>(_distributor->getConfig());
+ }
+
+ vdstestlib::DirConfig& getDirConfig() {
+ return _config;
+ }
+
+ BucketDatabase& getBucketDatabase() { return _distributor->getBucketDatabase(); }
+
+ framework::defaultimplementation::FakeClock& getClock() { return _node->getClock(); }
+ DistributorComponentRegister& getComponentRegister() { return _node->getComponentRegister(); }
+ DistributorComponentRegisterImpl& getComponentRegisterImpl() { return _node->getComponentRegister(); }
+
+ StorageComponent& getComponent() {
+ if (_component.get() == 0) {
+ _component.reset(new storage::DistributorComponent(
+ _node->getComponentRegister(), "distributor_test_utils"));
+ }
+ return *_component;
+ }
+
+ void setupDistributor(int redundancy,
+ int nodeCount,
+ const std::string& systemState,
+ uint32_t earlyReturn = false,
+ bool requirePrimaryToBeWritten = true);
+
+ void setRedundancy(uint32_t redundancy);
+
+ virtual void notifyDoneInitializing() {}
+
+ // Must implement this for storage server interface for now
+ virtual api::Timestamp getUniqueTimestamp() {
+ return _component->getUniqueTimestamp();
+ }
+
+ void disableBucketActivationInConfig(bool disable);
+
+ BucketDatabase::Entry getBucket(const document::BucketId& bId) const;
+
+protected:
+ vdstestlib::DirConfig _config;
+ std::unique_ptr<TestDistributorApp> _node;
+ framework::TickingThreadPool::UP _threadPool;
+ std::unique_ptr<Distributor> _distributor;
+ std::unique_ptr<storage::DistributorComponent> _component;
+ MessageSenderStub _sender;
+ MessageSenderStub _senderDown;
+ HostInfo _hostInfo;
+
+ struct MessageSenderImpl : public ChainedMessageSender {
+ MessageSenderStub& _sender;
+ MessageSenderStub& _senderDown;
+ MessageSenderImpl(MessageSenderStub& up, MessageSenderStub& down)
+ : _sender(up), _senderDown(down) {}
+
+ void sendUp(const std::shared_ptr<api::StorageMessage>& msg) {
+ _sender.send(msg);
+ }
+ void sendDown(const std::shared_ptr<api::StorageMessage>& msg) {
+ _senderDown.send(msg);
+ }
+ };
+ MessageSenderImpl _messageSender;
+};
+
+}
+
+}
+
diff --git a/storage/src/tests/distributor/externaloperationhandlertest.cpp b/storage/src/tests/distributor/externaloperationhandlertest.cpp
new file mode 100644
index 00000000000..ce8149b4bac
--- /dev/null
+++ b/storage/src/tests/distributor/externaloperationhandlertest.cpp
@@ -0,0 +1,176 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+
+#include <tests/distributor/distributortestutil.h>
+#include <vespa/storage/distributor/externaloperationhandler.h>
+#include <vespa/storageapi/message/persistence.h>
+#include <vespa/storageapi/message/state.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+
+namespace storage {
+namespace distributor {
+
+class ExternalOperationHandlerTest : public CppUnit::TestFixture,
+ public DistributorTestUtil
+{
+ CPPUNIT_TEST_SUITE(ExternalOperationHandlerTest);
+ CPPUNIT_TEST(testBucketSplitMask);
+ CPPUNIT_TEST(testOperationRejectedOnWrongDistribution);
+ CPPUNIT_TEST(testOperationRejectedOnPendingWrongDistribution);
+ CPPUNIT_TEST_SUITE_END();
+
+ document::BucketId findNonOwnedUserBucketInState(vespalib::stringref state);
+ document::BucketId findOwned1stNotOwned2ndInStates(
+ vespalib::stringref state1,
+ vespalib::stringref state2);
+
+ std::shared_ptr<api::StorageMessage> makeGetCommandForUser(uint64_t id);
+
+protected:
+ void testBucketSplitMask();
+ void testOperationRejectedOnWrongDistribution();
+ void testOperationRejectedOnPendingWrongDistribution();
+
+public:
+ void tearDown() {
+ close();
+ }
+
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(ExternalOperationHandlerTest);
+
+void
+ExternalOperationHandlerTest::testBucketSplitMask()
+{
+ {
+ createLinks();
+ getDirConfig().getConfig("stor-distributormanager").set("minsplitcount", "16");
+
+ CPPUNIT_ASSERT_EQUAL(document::BucketId(16, 0xffff),
+ getExternalOperationHandler().getBucketId(document::DocumentId(
+ vespalib::make_string("userdoc:ns:%d::", 0xffff))
+ ).stripUnused());
+ CPPUNIT_ASSERT_EQUAL(document::BucketId(16, 0),
+ getExternalOperationHandler().getBucketId(document::DocumentId(
+ vespalib::make_string("userdoc:ns:%d::", 0x10000))
+ ).stripUnused());
+ CPPUNIT_ASSERT_EQUAL(document::BucketId(16, 0xffff),
+ getExternalOperationHandler().getBucketId(document::DocumentId(
+ vespalib::make_string("userdoc:ns:%d::", 0xffff))
+ ).stripUnused());
+ CPPUNIT_ASSERT_EQUAL(document::BucketId(16, 0x100),
+ getExternalOperationHandler().getBucketId(document::DocumentId(
+ vespalib::make_string("userdoc:ns:%d::", 0x100))
+ ).stripUnused());
+ close();
+ }
+ {
+ getDirConfig().getConfig("stor-distributormanager").set("minsplitcount", "20");
+ createLinks();
+ CPPUNIT_ASSERT_EQUAL(document::BucketId(20, 0x11111),
+ getExternalOperationHandler().getBucketId(document::DocumentId(
+ vespalib::make_string("userdoc:ns:%d::", 0x111111))
+ ).stripUnused());
+ CPPUNIT_ASSERT_EQUAL(document::BucketId(20, 0x22222),
+ getExternalOperationHandler().getBucketId(document::DocumentId(
+ vespalib::make_string("userdoc:ns:%d::", 0x222222))
+ ).stripUnused());
+ }
+}
+
+document::BucketId
+ExternalOperationHandlerTest::findNonOwnedUserBucketInState(
+ vespalib::stringref statestr)
+{
+ lib::ClusterState state(statestr);
+ for (uint64_t i = 1; i < 1000; ++i) {
+ document::BucketId bucket(32, i);
+ if (!getExternalOperationHandler().ownsBucketInState(state, bucket)) {
+ return bucket;
+ }
+ }
+ throw std::runtime_error("no appropriate bucket found");
+}
+
+document::BucketId
+ExternalOperationHandlerTest::findOwned1stNotOwned2ndInStates(
+ vespalib::stringref statestr1,
+ vespalib::stringref statestr2)
+{
+ lib::ClusterState state1(statestr1);
+ lib::ClusterState state2(statestr2);
+ for (uint64_t i = 1; i < 1000; ++i) {
+ document::BucketId bucket(32, i);
+ if (getExternalOperationHandler().ownsBucketInState(state1, bucket)
+ && !getExternalOperationHandler().ownsBucketInState(state2, bucket))
+ {
+ return bucket;
+ }
+ }
+ throw std::runtime_error("no appropriate bucket found");
+}
+
+std::shared_ptr<api::StorageMessage>
+ExternalOperationHandlerTest::makeGetCommandForUser(uint64_t id)
+{
+ document::DocumentId docId(document::UserDocIdString("userdoc:foo:" + vespalib::make_string("%lu", id) + ":bar"));
+ std::shared_ptr<api::StorageMessage> cmd(
+ new api::GetCommand(document::BucketId(0), docId, "[all]"));
+ return cmd;
+}
+
+void
+ExternalOperationHandlerTest::testOperationRejectedOnWrongDistribution()
+{
+ createLinks();
+ std::string state("distributor:2 storage:2");
+ setupDistributor(1, 2, state);
+
+ document::BucketId bucket(findNonOwnedUserBucketInState(state));
+ auto cmd = makeGetCommandForUser(bucket.withoutCountBits());
+
+ Operation::SP genOp;
+ CPPUNIT_ASSERT(getExternalOperationHandler().handleMessage(cmd, genOp));
+ CPPUNIT_ASSERT(!genOp.get());
+ CPPUNIT_ASSERT_EQUAL(size_t(1), _sender.replies.size());
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("ReturnCode(WRONG_DISTRIBUTION, "
+ "distributor:2 storage:2)"),
+ _sender.replies[0]->getResult().toString());
+}
+
+void
+ExternalOperationHandlerTest::testOperationRejectedOnPendingWrongDistribution()
+{
+ createLinks();
+ std::string current("distributor:2 storage:2");
+ std::string pending("distributor:3 storage:3");
+ setupDistributor(1, 3, current);
+
+ document::BucketId b(findOwned1stNotOwned2ndInStates(current, pending));
+
+ // Trigger pending cluster state
+ auto stateCmd = std::make_shared<api::SetSystemStateCommand>(
+ lib::ClusterState(pending));
+ getBucketDBUpdater().onSetSystemState(stateCmd);
+
+ auto cmd = makeGetCommandForUser(b.withoutCountBits());
+
+ Operation::SP genOp;
+ CPPUNIT_ASSERT(getExternalOperationHandler().handleMessage(cmd, genOp));
+ CPPUNIT_ASSERT(!genOp.get());
+ CPPUNIT_ASSERT_EQUAL(size_t(1), _sender.replies.size());
+ // Fail back with _pending_ cluster state so client can start trying
+ // correct distributor immediately. If that distributor has not yet
+ // completed processing its pending cluster state, it'll return the
+ // old (current) cluster state, causing the client to bounce between
+ // the two until the pending states have been resolved. This is pretty
+ // much inevitable with the current design.
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("ReturnCode(WRONG_DISTRIBUTION, "
+ "distributor:3 storage:3)"),
+ _sender.replies[0]->getResult().toString());
+}
+} // distributor
+} // storage
diff --git a/storage/src/tests/distributor/garbagecollectiontest.cpp b/storage/src/tests/distributor/garbagecollectiontest.cpp
new file mode 100644
index 00000000000..399222f0e34
--- /dev/null
+++ b/storage/src/tests/distributor/garbagecollectiontest.cpp
@@ -0,0 +1,77 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <cppunit/extensions/HelperMacros.h>
+#include <vespa/storageapi/message/removelocation.h>
+#include <vespa/storage/distributor/operations/idealstate/garbagecollectionoperation.h>
+#include <vespa/storage/distributor/idealstatemanager.h>
+#include <tests/distributor/distributortestutil.h>
+
+namespace storage {
+namespace distributor {
+
+class GarbageCollectionOperationTest : public CppUnit::TestFixture, public DistributorTestUtil
+{
+ CPPUNIT_TEST_SUITE(GarbageCollectionOperationTest);
+ CPPUNIT_TEST(testSimple);
+ CPPUNIT_TEST_SUITE_END();
+
+protected:
+ void testSimple();
+
+public:
+ void setUp() {
+ createLinks();
+ };
+
+ void tearDown() {
+ close();
+ }
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(GarbageCollectionOperationTest);
+
+void
+GarbageCollectionOperationTest::testSimple()
+{
+ _distributor->enableClusterState(lib::ClusterState("distributor:1 storage:2"));
+ addNodesToBucketDB(document::BucketId(16, 1), "0=250/50/300,1=250/50/300");
+ getConfig().setGarbageCollection("music.date < 34", 3600);
+
+ GarbageCollectionOperation op("storage",
+ BucketAndNodes(document::BucketId(16, 1),
+ toVector<uint16_t>(0, 1)));
+
+ op.setIdealStateManager(&getIdealStateManager());
+ op.start(_sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL((size_t)2, _sender.commands.size());
+
+ getClock().setAbsoluteTimeInSeconds(34);
+
+ for (uint32_t i = 0; i < 2; ++i) {
+ std::shared_ptr<api::StorageCommand> msg = _sender.commands[i];
+ CPPUNIT_ASSERT(msg->getType() == api::MessageType::REMOVELOCATION);
+
+ api::RemoveLocationCommand* tmp = (api::RemoveLocationCommand*)msg.get();
+ CPPUNIT_ASSERT_EQUAL(vespalib::string("music.date < 34"),
+ tmp->getDocumentSelection());
+
+ std::shared_ptr<api::StorageReply> reply(tmp->makeReply().release());
+ api::RemoveLocationReply* sreply = (api::RemoveLocationReply*)reply.get();
+ sreply->setBucketInfo(api::BucketInfo(666, 90, 500));
+
+ op.receive(_sender, reply);
+ }
+
+ BucketDatabase::Entry entry = getBucket(document::BucketId(16, 1));
+ CPPUNIT_ASSERT(entry.valid());
+ CPPUNIT_ASSERT_EQUAL(2, (int)entry->getNodeCount());
+ CPPUNIT_ASSERT_EQUAL(34, (int)entry->getLastGarbageCollectionTime());
+ CPPUNIT_ASSERT_EQUAL(api::BucketInfo(666, 90, 500),
+ entry->getNodeRef(0).getBucketInfo());
+ CPPUNIT_ASSERT_EQUAL(api::BucketInfo(666, 90, 500),
+ entry->getNodeRef(1).getBucketInfo());
+}
+
+} // distributor
+} // storage
diff --git a/storage/src/tests/distributor/getoperationtest.cpp b/storage/src/tests/distributor/getoperationtest.cpp
new file mode 100644
index 00000000000..12853be2e42
--- /dev/null
+++ b/storage/src/tests/distributor/getoperationtest.cpp
@@ -0,0 +1,567 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+
+#include <vespa/config/helper/configgetter.h>
+#include <vespa/document/config/config-documenttypes.h>
+#include <vespa/document/repo/documenttyperepo.h>
+#include <vespa/storage/distributor/externaloperationhandler.h>
+#include <vespa/storage/distributor/distributormetricsset.h>
+#include <vespa/storageapi/message/persistence.h>
+#include <tests/distributor/distributortestutil.h>
+#include <tests/common/dummystoragelink.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <iomanip>
+#include <iostream>
+#include <memory>
+#include <vespa/storage/distributor/operations/external/getoperation.h>
+
+using std::shared_ptr;
+using config::ConfigGetter;
+using document::DocumenttypesConfig;
+using config::FileSpec;
+
+namespace storage {
+namespace distributor {
+
+class GetOperationTest : public CppUnit::TestFixture, public DistributorTestUtil {
+ CPPUNIT_TEST_SUITE(GetOperationTest);
+ CPPUNIT_TEST(testSimple);
+ CPPUNIT_TEST(testNotFound);
+ CPPUNIT_TEST(testResendOnStorageFailure);
+ CPPUNIT_TEST(testResendOnStorageFailureAllFail);
+ CPPUNIT_TEST(testSendToIdealCopyIfBucketInSync);
+ CPPUNIT_TEST(testReturnNotFoundWhenBucketNotInDb);
+ CPPUNIT_TEST(testAskAllNodesIfBucketIsInconsistent);
+ CPPUNIT_TEST(testSendToAllInvalidNodesWhenInconsistent);
+ CPPUNIT_TEST(testAskTrustedNodeIfBucketIsInconsistent);
+ CPPUNIT_TEST(testInconsistentSplit); // Test that we ask all nodes if a bucket is inconsistent.
+ CPPUNIT_TEST(testSendToAllInvalidCopies);
+ CPPUNIT_TEST(testMultiInconsistentBucket);
+ CPPUNIT_TEST(testMultiInconsistentBucketFail);
+ CPPUNIT_TEST(testMultiInconsistentBucketNotFound);
+ CPPUNIT_TEST(testMultiInconsistentBucketNotFoundDeleted);
+ CPPUNIT_TEST(testMultipleCopiesWithFailureOnLocalNode);
+ CPPUNIT_TEST(canGetDocumentsWhenAllReplicaNodesRetired);
+ CPPUNIT_TEST_SUITE_END();
+
+ document::DocumentTypeRepo::SP _repo;
+
+public:
+ document::DocumentId docId;
+ document::BucketId bucketId;
+ std::unique_ptr<Operation> op;
+
+ void setUp() {
+ _repo.reset(
+ new document::DocumentTypeRepo(*ConfigGetter<DocumenttypesConfig>::
+ getConfig("config-doctypes", FileSpec("config-doctypes.cfg"))));
+ createLinks();
+
+ docId = document::DocumentId(document::DocIdString("test", "uri"));
+ bucketId = getExternalOperationHandler().getBucketId(docId);
+ };
+
+ void tearDown() {
+ close();
+ op.reset();
+ }
+
+ void sendGet() {
+ std::shared_ptr<api::GetCommand> msg(
+ new api::GetCommand(document::BucketId(0), docId, "[all]"));
+
+ op.reset(new GetOperation(getExternalOperationHandler(),
+ msg,
+ getDistributor().getMetrics().
+ gets[msg->getLoadType()]));
+ op->start(_sender, framework::MilliSecTime(0));
+ }
+
+ void sendReply(uint32_t idx,
+ api::ReturnCode::Result result,
+ std::string authorVal, uint32_t timestamp)
+ {
+ if (idx == (uint32_t)-1) {
+ idx = _sender.commands.size() - 1;
+ }
+
+ std::shared_ptr<api::StorageCommand> msg2 = _sender.commands[idx];
+ CPPUNIT_ASSERT_EQUAL(api::MessageType::GET, msg2->getType());
+
+ api::GetCommand* tmp = static_cast<api::GetCommand*>(msg2.get());
+ document::Document::SP doc;
+
+ if (authorVal.length()) {
+ const document::DocumentType* type(_repo->getDocumentType("text/html"));
+ doc = document::Document::SP(
+ new document::Document(*type, docId));
+
+ doc->setValue(doc->getField("author"),
+ document::StringFieldValue(authorVal));
+ }
+
+ api::GetReply* reply = new api::GetReply(*tmp, doc, timestamp);
+ reply->setResult(result);
+
+ op->receive(_sender, std::shared_ptr<api::StorageReply>(reply));
+ }
+
+ void replyWithFailure() {
+ sendReply(-1, api::ReturnCode::IO_FAILURE, "", 0);
+ }
+
+ void replyWithNotFound() {
+ sendReply(-1, api::ReturnCode::OK, "", 0);
+ }
+
+ void replyWithDocument() {
+ sendReply(-1, api::ReturnCode::OK, "foo", 100);
+ }
+
+ std::string getLastReplyAuthor() {
+ api::StorageMessage& msg = *_sender.replies[_sender.replies.size() - 1];
+
+ if (msg.getType() == api::MessageType::GET_REPLY) {
+ document::Document::SP doc(
+ dynamic_cast<api::GetReply&>(msg).getDocument());
+
+ return doc->getValue(doc->getField("author"))->toString();
+ } else {
+ std::ostringstream ost;
+ ost << "Last reply was not a GET reply, but " << msg;
+ return ost.str();
+ }
+ }
+
+ void setClusterState(const std::string& clusterState) {
+ _distributor->enableClusterState(lib::ClusterState(clusterState));
+ }
+
+ void testSimple();
+ void testReturnNotFoundWhenBucketNotInDb();
+ void testNotFound();
+ void testResendOnStorageFailure();
+ void testResendOnStorageFailureAllFail();
+ void testSendToIdealCopyIfBucketInSync();
+ void testAskAllNodesIfBucketIsInconsistent();
+ void testSendToAllInvalidNodesWhenInconsistent();
+ void testAskTrustedNodeIfBucketIsInconsistent();
+ void testInconsistentSplit();
+ void testMultiInconsistentBucket();
+ void testMultiInconsistentBucketFail();
+ void testMultiInconsistentBucketNotFound();
+ void testMultiInconsistentBucketNotFoundDeleted();
+ void testSendToAllInvalidCopies();
+ void testMultipleCopiesWithFailureOnLocalNode();
+ void canGetDocumentsWhenAllReplicaNodesRetired();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(GetOperationTest);
+
+void
+GetOperationTest::testSimple()
+{
+ setClusterState("distributor:1 storage:2");
+
+ addNodesToBucketDB(bucketId, "0=4,1=4");
+
+ sendGet();
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("Get => 0"),
+ _sender.getCommands(true));
+
+ replyWithDocument();
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
+ "timestamp 100) ReturnCode(NONE)"),
+ _sender.getLastReply());
+}
+
+void
+GetOperationTest::testAskTrustedNodeIfBucketIsInconsistent()
+{
+ setClusterState("distributor:1 storage:4");
+
+ addNodesToBucketDB(bucketId, "0=100/3/10,1=200/4/12/t");
+
+ sendGet();
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Get => 1"),
+ _sender.getCommands(true));
+
+ replyWithDocument();
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
+ "timestamp 100) ReturnCode(NONE)"),
+ _sender.getLastReply());
+}
+
+void
+GetOperationTest::testAskAllNodesIfBucketIsInconsistent()
+{
+ setClusterState("distributor:1 storage:4");
+
+ addNodesToBucketDB(bucketId, "0=100/3/10,1=200/4/12");
+
+ sendGet();
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("Get => 0,Get => 1"),
+ _sender.getCommands(true));
+
+ sendReply(0, api::ReturnCode::OK, "newauthor", 2);
+ sendReply(1, api::ReturnCode::OK, "oldauthor", 1);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
+ "timestamp 2) ReturnCode(NONE)"),
+ _sender.getLastReply());
+
+ CPPUNIT_ASSERT_EQUAL(std::string("newauthor"), getLastReplyAuthor());
+}
+
+
+void
+GetOperationTest::testSendToAllInvalidCopies()
+{
+ setClusterState("distributor:1 storage:4");
+
+ addNodesToBucketDB(bucketId, "2=0/0/1,3=0/0/1");
+
+ sendGet();
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("Get => 2,Get => 3"),
+ _sender.getCommands(true));
+
+ sendReply(0, api::ReturnCode::OK, "newauthor", 2);
+ sendReply(1, api::ReturnCode::OK, "oldauthor", 1);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
+ "timestamp 2) ReturnCode(NONE)"),
+ _sender.getLastReply());
+
+ CPPUNIT_ASSERT_EQUAL(std::string("newauthor"), getLastReplyAuthor());
+}
+
+void
+GetOperationTest::testSendToAllInvalidNodesWhenInconsistent()
+{
+ setClusterState("distributor:1 storage:4");
+
+ addNodesToBucketDB(bucketId, "0=100,1=200,2=0/0/1,3=0/0/1");
+
+ sendGet();
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("Get => 2,Get => 3,Get => 0,Get => 1"),
+ _sender.getCommands(true));
+
+ sendReply(0, api::ReturnCode::OK, "newauthor", 2);
+ sendReply(1, api::ReturnCode::OK, "oldauthor", 1);
+ sendReply(2, api::ReturnCode::OK, "oldauthor", 1);
+ sendReply(3, api::ReturnCode::OK, "oldauthor", 1);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
+ "timestamp 2) ReturnCode(NONE)"),
+ _sender.getLastReply());
+
+ CPPUNIT_ASSERT_EQUAL(std::string("newauthor"), getLastReplyAuthor());
+}
+
+void
+GetOperationTest::testInconsistentSplit()
+{
+ setClusterState("distributor:1 storage:4");
+
+ addNodesToBucketDB(document::BucketId(16, 0x2a52), "0=100");
+ addNodesToBucketDB(document::BucketId(17, 0x2a52), "1=200");
+
+ sendGet();
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("Get => 0,Get => 1"),
+ _sender.getCommands(true));
+
+ sendReply(0, api::ReturnCode::OK, "newauthor", 2);
+ sendReply(1, api::ReturnCode::OK, "oldauthor", 1);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
+ "timestamp 2) ReturnCode(NONE)"),
+ _sender.getLastReply());
+
+ CPPUNIT_ASSERT_EQUAL(std::string("newauthor"), getLastReplyAuthor());
+}
+
+
+void
+GetOperationTest::testMultiInconsistentBucketNotFound()
+{
+ setClusterState("distributor:1 storage:4");
+
+ addNodesToBucketDB(bucketId, "0=100,2=100,1=200,3=200");
+
+ sendGet();
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("Get => 0,Get => 1"),
+ _sender.getCommands(true));
+
+ sendReply(0, api::ReturnCode::OK, "newauthor", 2);
+ sendReply(1, api::ReturnCode::OK, "", 0);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
+ "timestamp 2) ReturnCode(NONE)"),
+ _sender.getLastReply());
+}
+
+void
+GetOperationTest::testMultiInconsistentBucketNotFoundDeleted()
+{
+ setClusterState("distributor:1 storage:4");
+
+ addNodesToBucketDB(bucketId, "0=100,2=100,1=200,3=200");
+
+ sendGet();
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("Get => 0,Get => 1"),
+ _sender.getCommands(true));
+
+ sendReply(0, api::ReturnCode::OK, "newauthor", 2);
+ // This signifies that the latest change was that the document was deleted
+ // at timestamp 3.
+ sendReply(1, api::ReturnCode::OK, "", 3);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
+ "timestamp 3) ReturnCode(NONE)"),
+ _sender.getLastReply());
+}
+
+void
+GetOperationTest::testMultiInconsistentBucket()
+{
+ setClusterState("distributor:1 storage:4");
+
+ addNodesToBucketDB(bucketId, "0=100,2=100,1=200,3=200");
+
+ sendGet();
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("Get => 0,Get => 1"),
+ _sender.getCommands(true));
+
+ sendReply(0, api::ReturnCode::OK, "newauthor", 2);
+ sendReply(1, api::ReturnCode::OK, "oldauthor", 1);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
+ "timestamp 2) ReturnCode(NONE)"),
+ _sender.getLastReply());
+
+ CPPUNIT_ASSERT_EQUAL(std::string("newauthor"), getLastReplyAuthor());
+}
+
+void
+GetOperationTest::testMultiInconsistentBucketFail()
+{
+ setClusterState("distributor:1 storage:4");
+
+ addNodesToBucketDB(bucketId, "0=100,2=100,1=200,3=200");
+
+ sendGet();
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("Get => 0,Get => 1"),
+ _sender.getCommands(true));
+
+ sendReply(0, api::ReturnCode::OK, "newauthor", 1);
+ sendReply(1, api::ReturnCode::DISK_FAILURE, "", 0);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("Get(BucketId(0x4000000000002a52), doc:test:uri) => 3"),
+ _sender.getLastCommand());
+
+ replyWithDocument();
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
+ "timestamp 100) ReturnCode(NONE)"),
+ _sender.getLastReply());
+}
+
+
+void
+GetOperationTest::testReturnNotFoundWhenBucketNotInDb()
+{
+ setClusterState("distributor:1 storage:1");
+
+ sendGet();
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
+ "timestamp 0) ReturnCode(NONE)"),
+ _sender.getLastReply());
+}
+
+void
+GetOperationTest::testNotFound()
+{
+ setClusterState("distributor:1 storage:1");
+
+ addNodesToBucketDB(bucketId, "0=100");
+
+ sendGet();
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("Get(BucketId(0x4000000000002a52), doc:test:uri) => 0"),
+ _sender.getLastCommand());
+
+ replyWithNotFound();
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
+ "timestamp 0) ReturnCode(NONE)"),
+ _sender.getLastReply());
+
+ CPPUNIT_ASSERT_EQUAL(1, (int)(getDistributor().
+ getMetrics().gets[documentapi::LoadType::DEFAULT].
+ failures.notfound.getValue()));
+}
+
+void
+GetOperationTest::testResendOnStorageFailure()
+{
+ setClusterState("distributor:1 storage:3");
+
+ // Add two nodes that are not trusted. GET should retry each one of them
+ // if one fails.
+ addNodesToBucketDB(bucketId, "1=100,2=100");
+
+ sendGet();
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("Get(BucketId(0x4000000000002a52), doc:test:uri) => 1"),
+ _sender.getLastCommand());
+
+ replyWithFailure();
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("Get(BucketId(0x4000000000002a52), doc:test:uri) => 2"),
+ _sender.getLastCommand());
+
+ replyWithDocument();
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
+ "timestamp 100) ReturnCode(NONE)"),
+ _sender.getLastReply());
+}
+
+void
+GetOperationTest::testResendOnStorageFailureAllFail()
+{
+ setClusterState("distributor:1 storage:3");
+
+ // Add two nodes that are not trusted. GET should retry each one of them
+ // if one fails.
+ addNodesToBucketDB(bucketId, "1=100,2=100");
+
+ sendGet();
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("Get(BucketId(0x4000000000002a52), doc:test:uri) => 1"),
+ _sender.getLastCommand());
+
+ replyWithFailure();
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("Get(BucketId(0x4000000000002a52), doc:test:uri) => 2"),
+ _sender.getLastCommand());
+
+ replyWithFailure();
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
+ "timestamp 0) ReturnCode(IO_FAILURE)"),
+ _sender.getLastReply());
+}
+
+void
+GetOperationTest::testSendToIdealCopyIfBucketInSync()
+{
+ setClusterState("distributor:1 storage:4");
+
+ addNodesToBucketDB(bucketId, "1=100,2=100,3=100");
+
+ sendGet();
+
+ // Should always send to node 1 (follow bucket db order)
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("Get(BucketId(0x4000000000002a52), doc:test:uri) => 1"),
+ _sender.getLastCommand());
+
+ replyWithDocument();
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
+ "timestamp 100) ReturnCode(NONE)"),
+ _sender.getLastReply());
+}
+
+void
+GetOperationTest::testMultipleCopiesWithFailureOnLocalNode()
+{
+ setClusterState("distributor:1 storage:4");
+
+ // Node 0 is local copy to distributor 0 and will be preferred when
+ // sending initially.
+ addNodesToBucketDB(document::BucketId(16, 0x2a52), "2=100,0=100");
+
+ sendGet();
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("Get => 0"),
+ _sender.getCommands(true));
+
+ // Fail local node; no reply must be sent yet since we've got more nodes
+ // to try.
+ sendReply(0, api::ReturnCode::TIMEOUT, "", 0);
+
+ // Retry with remaining copy on node 2.
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("Get => 0,Get => 2"),
+ _sender.getCommands(true));
+
+ sendReply(1, api::ReturnCode::OK, "newestauthor", 3);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
+ "timestamp 3) ReturnCode(NONE)"),
+ _sender.getLastReply());
+
+ CPPUNIT_ASSERT_EQUAL(std::string("newestauthor"), getLastReplyAuthor());
+}
+
+void
+GetOperationTest::canGetDocumentsWhenAllReplicaNodesRetired()
+{
+ setClusterState("distributor:1 storage:2 .0.s:r .1.s:r");
+ addNodesToBucketDB(bucketId, "0=4,1=4");
+ sendGet();
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("Get => 0"),
+ _sender.getCommands(true));
+}
+
+} // distributor
+} // storage
diff --git a/storage/src/tests/distributor/idealstatemanagertest.cpp b/storage/src/tests/distributor/idealstatemanagertest.cpp
new file mode 100644
index 00000000000..9c97a2ba967
--- /dev/null
+++ b/storage/src/tests/distributor/idealstatemanagertest.cpp
@@ -0,0 +1,268 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <tests/common/dummystoragelink.h>
+#include <vespa/storageapi/message/persistence.h>
+#include <vespa/storage/distributor/bucketdbupdater.h>
+#include <vespa/document/fieldvalue/document.h>
+#include <vespa/storage/common/bucketmessages.h>
+#include <vespa/storage/distributor/idealstatemanager.h>
+#include <vespa/storage/distributor/operations/idealstate/mergeoperation.h>
+#include <vespa/storage/distributor/operations/idealstate/removebucketoperation.h>
+#include <vespa/storage/distributor/operations/idealstate/setbucketstateoperation.h>
+#include <vespa/storage/distributor/operations/idealstate/splitoperation.h>
+#include <vespa/storageapi/message/stat.h>
+#include <vespa/storageapi/message/visitor.h>
+#include <vespa/storageapi/message/bucketsplitting.h>
+#include <vespa/storage/storageutil/utils.h>
+#include <tests/distributor/distributortestutil.h>
+#include <vespa/storage/distributor/statecheckers.h>
+#include <vespa/storageapi/message/state.h>
+
+namespace storage {
+namespace distributor {
+
+class IdealStateManagerTest : public CppUnit::TestFixture,
+ public DistributorTestUtil
+{
+public:
+ IdealStateManagerTest() {}
+ void setUp() {
+ createLinks();
+ };
+
+ void tearDown() {
+ close();
+ }
+
+ void testSibling();
+ void testClearActiveOnNodeDown();
+ void testRecheckWhenActive();
+ void testRecheckWhenPending();
+ void testOpsGenerationBusy();
+ void testStatusPage();
+ void testDisabledStateChecker();
+ void testBlockIdealStateOpsOnFullRequestBucketInfo();
+ void testBlockCheckForAllOperationsToSpecificBucket();
+
+ void setSystemState(const lib::ClusterState& systemState) {
+ _distributor->enableClusterState(systemState);
+ }
+
+ CPPUNIT_TEST_SUITE(IdealStateManagerTest);
+ CPPUNIT_TEST(testSibling);
+ CPPUNIT_TEST(testClearActiveOnNodeDown);
+ CPPUNIT_TEST(testRecheckWhenActive);
+ CPPUNIT_TEST(testStatusPage);
+ CPPUNIT_TEST(testDisabledStateChecker);
+ CPPUNIT_TEST(testBlockIdealStateOpsOnFullRequestBucketInfo);
+ CPPUNIT_TEST(testBlockCheckForAllOperationsToSpecificBucket);
+ CPPUNIT_TEST_SUITE_END();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(IdealStateManagerTest);
+
+void
+IdealStateManagerTest::testSibling()
+{
+ CPPUNIT_ASSERT_EQUAL(document::BucketId(1,1),
+ getIdealStateManager().getDistributorComponent()
+ .getSibling(document::BucketId(1, 0)));
+ CPPUNIT_ASSERT_EQUAL(document::BucketId(1,0),
+ getIdealStateManager().getDistributorComponent()
+ .getSibling(document::BucketId(1, 1)));
+ CPPUNIT_ASSERT_EQUAL(document::BucketId(2,3),
+ getIdealStateManager().getDistributorComponent()
+ .getSibling(document::BucketId(2, 1)));
+ CPPUNIT_ASSERT_EQUAL(document::BucketId(2,1),
+ getIdealStateManager().getDistributorComponent()
+ .getSibling(document::BucketId(2, 3)));
+}
+
+void
+IdealStateManagerTest::testStatusPage() {
+ close();
+ getDirConfig().getConfig("stor-distributormanager").set("splitsize", "100");
+ getDirConfig().getConfig("stor-distributormanager").set("splitcount", "1000000");
+ getDirConfig().getConfig("stor-distributormanager").set("joinsize", "0");
+ getDirConfig().getConfig("stor-distributormanager").set("joincount", "0");
+ createLinks();
+ setupDistributor(1, 1, "distributor:1 storage:1");
+
+ insertBucketInfo(document::BucketId(16, 5), 0, 0xff, 100, 200, true, true);
+ insertBucketInfo(document::BucketId(16, 2), 0, 0xff, 10, 10, true, true);
+
+ std::ostringstream ost;
+ getIdealStateManager().getBucketStatus(ost);
+
+ CPPUNIT_ASSERT_EQUAL(std::string("BucketId(0x4000000000000002) : [node(idx=0,crc=0xff,docs=10/10,bytes=10/10,trusted=true,active=true)]<br>\n"
+ "<b>BucketId(0x4000000000000005):</b> <i> : split: [Splitting bucket because its maximum size (200 b, 100 docs, 100 meta, 200 b total) is "
+ "higher than the configured limit of (100, 1000000)]</i> [node(idx=0,crc=0xff,docs=100/100,bytes=200/200,trusted=true,"
+ "active=true)]<br>\n"),
+ ost.str());
+}
+
+void
+IdealStateManagerTest::testDisabledStateChecker() {
+ setupDistributor(1, 1, "distributor:1 storage:1");
+
+ getConfig().setSplitSize(100);
+ getConfig().setSplitCount(1000000);
+ getConfig().disableStateChecker("SplitBucket");
+
+ insertBucketInfo(document::BucketId(16, 5), 0, 0xff, 100, 200, true, true);
+ insertBucketInfo(document::BucketId(16, 2), 0, 0xff, 10, 10, true, true);
+
+ std::ostringstream ost;
+ getIdealStateManager().getBucketStatus(ost);
+
+ CPPUNIT_ASSERT_EQUAL(std::string(
+ "BucketId(0x4000000000000002) : [node(idx=0,crc=0xff,docs=10/10,bytes=10/10,trusted=true,active=true)]<br>\n"
+ "<b>BucketId(0x4000000000000005):</b> <i> : split: [Splitting bucket because its maximum size (200 b, 100 docs, 100 meta, 200 b total) is "
+ "higher than the configured limit of (100, 1000000)]</i> [node(idx=0,crc=0xff,docs=100/100,bytes=200/200,trusted=true,"
+ "active=true)]<br>\n"),
+ ost.str());
+
+ tick();
+ CPPUNIT_ASSERT_EQUAL(std::string(""),
+ _distributor->getActiveIdealStateOperations());
+
+}
+
+void
+IdealStateManagerTest::testClearActiveOnNodeDown()
+{
+ setSystemState(lib::ClusterState("distributor:1 storage:3"));
+ for (int i = 1; i < 4; i++) {
+ insertBucketInfo(document::BucketId(16, i), 0, 0xff, 100, 200);
+ insertBucketInfo(document::BucketId(16, i), 1, 0xffe, 1020, 2300);
+ insertBucketInfo(document::BucketId(16, i), 2, 0xfff, 1030, 2400);
+ }
+
+ tick();
+
+ // Start all three operations.
+ for (uint32_t i = 0; i < 3; ++i) {
+ tick();
+ }
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("setbucketstate to [0] BucketId(0x4000000000000001) (pri 100)\n"
+ "setbucketstate to [0] BucketId(0x4000000000000002) (pri 100)\n"
+ "setbucketstate to [0] BucketId(0x4000000000000003) (pri 100)\n"),
+ _distributor->getActiveIdealStateOperations());
+
+ setSystemState(lib::ClusterState("distributor:1 storage:3 .0.s:d"));
+
+ CPPUNIT_ASSERT_EQUAL(std::string(""),
+ _distributor->getActiveIdealStateOperations());
+ CPPUNIT_ASSERT_EQUAL(uint32_t(0),
+ _distributor->getPendingMessageTracker()
+ .getNodeInfo().getPendingCount(0));
+}
+
+void
+IdealStateManagerTest::testRecheckWhenActive()
+{
+ for (uint32_t j = 0; j < 3; j++) {
+ insertBucketInfo(document::BucketId(16, 1), j, 0xff - j, 100, 200);
+ }
+
+ setSystemState(lib::ClusterState("distributor:1 storage:3"));
+
+ tick();
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("setbucketstate to [0] BucketId(0x4000000000000001) (pri 100)\n"),
+ _distributor->getActiveIdealStateOperations());
+
+ tick();
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("setbucketstate to [0] BucketId(0x4000000000000001) (pri 100)\n"),
+ _distributor->getActiveIdealStateOperations());
+
+ tick();
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("setbucketstate to [0] BucketId(0x4000000000000001) (pri 100)\n"),
+ _distributor->getActiveIdealStateOperations());
+}
+
+void
+IdealStateManagerTest::testBlockIdealStateOpsOnFullRequestBucketInfo()
+{
+ setupDistributor(2, 10, "distributor:1 storage:2");
+
+ framework::defaultimplementation::FakeClock clock;
+ PendingMessageTracker tracker(_node->getComponentRegister());
+
+ document::BucketId bid(16, 1234);
+ std::vector<document::BucketId> buckets;
+
+ // RequestBucketInfoCommand does not have a specific bucketid since it's
+ // sent to the entire node. It will then use a null bucketid.
+ {
+ std::shared_ptr<api::RequestBucketInfoCommand> msg(
+ new api::RequestBucketInfoCommand(buckets));
+ msg->setAddress(
+ api::StorageMessageAddress("storage", lib::NodeType::STORAGE, 4));
+ tracker.insert(msg);
+ }
+
+ {
+ RemoveBucketOperation op("storage",
+ BucketAndNodes(bid, toVector<uint16_t>(3, 4)));
+ CPPUNIT_ASSERT(op.isBlocked(tracker));
+ }
+
+ {
+ // Don't trigger on requests to other nodes.
+ RemoveBucketOperation op("storage",
+ BucketAndNodes(bid, toVector<uint16_t>(3, 5)));
+ CPPUNIT_ASSERT(!op.isBlocked(tracker));
+ }
+
+ // Don't block on null-bucket messages that aren't RequestBucketInfo.
+ {
+ std::shared_ptr<api::CreateVisitorCommand> msg(
+ new api::CreateVisitorCommand("foo", "bar", "baz"));
+ msg->setAddress(
+ api::StorageMessageAddress("storage", lib::NodeType::STORAGE, 7));
+ tracker.insert(msg);
+ }
+
+ {
+ RemoveBucketOperation op("storage",
+ BucketAndNodes(bid, toVector<uint16_t>(7)));
+ CPPUNIT_ASSERT(!op.isBlocked(tracker));
+ }
+}
+
+void
+IdealStateManagerTest::testBlockCheckForAllOperationsToSpecificBucket()
+{
+ setupDistributor(2, 10, "distributor:1 storage:2");
+ framework::defaultimplementation::FakeClock clock;
+ PendingMessageTracker tracker(_node->getComponentRegister());
+ document::BucketId bid(16, 1234);
+
+ {
+ auto msg = std::make_shared<api::JoinBucketsCommand>(bid);
+ msg->setAddress(
+ api::StorageMessageAddress("storage", lib::NodeType::STORAGE, 4));
+ tracker.insert(msg);
+ }
+ {
+ RemoveBucketOperation op("storage",
+ BucketAndNodes(bid, toVector<uint16_t>(7)));
+ // Not blocked for exact node match.
+ CPPUNIT_ASSERT(!op.checkBlock(bid, tracker));
+ // But blocked for bucket match!
+ CPPUNIT_ASSERT(op.checkBlockForAllNodes(bid, tracker));
+ }
+}
+
+} // distributor
+} // storage
+
diff --git a/storage/src/tests/distributor/joinbuckettest.cpp b/storage/src/tests/distributor/joinbuckettest.cpp
new file mode 100644
index 00000000000..ec7e3aaac32
--- /dev/null
+++ b/storage/src/tests/distributor/joinbuckettest.cpp
@@ -0,0 +1,127 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <cppunit/extensions/HelperMacros.h>
+#include <vespa/storageapi/message/bucketsplitting.h>
+#include <vespa/storage/distributor/operations/idealstate/joinoperation.h>
+#include <vespa/storage/distributor/idealstatemanager.h>
+#include <tests/distributor/distributortestutil.h>
+
+namespace storage {
+namespace distributor {
+
+class JoinOperationTest : public CppUnit::TestFixture, public DistributorTestUtil
+{
+ CPPUNIT_TEST_SUITE(JoinOperationTest);
+ CPPUNIT_TEST(testSimple);
+ CPPUNIT_TEST(sendSparseJoinsToNodesWithoutBothSourceBuckets);
+ CPPUNIT_TEST_SUITE_END();
+
+ void checkSourceBucketsAndSendReply(
+ JoinOperation& op,
+ size_t msgIndex,
+ const std::vector<document::BucketId>& wantedIds);
+
+protected:
+ void testSimple();
+ void sendSparseJoinsToNodesWithoutBothSourceBuckets();
+
+public:
+ void setUp() {
+ createLinks();
+ };
+
+ void tearDown() {
+ close();
+ }
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(JoinOperationTest);
+
+void
+JoinOperationTest::testSimple()
+{
+ getConfig().setJoinCount(100);
+ getConfig().setJoinSize(1000);
+
+ addNodesToBucketDB(document::BucketId(33, 1), "0=250/50/300");
+ addNodesToBucketDB(document::BucketId(33, 0x100000001), "0=300/40/200");
+
+ _distributor->enableClusterState(lib::ClusterState("distributor:1 storage:1"));
+
+ JoinOperation op("storage",
+ BucketAndNodes(document::BucketId(32, 0),
+ toVector<uint16_t>(0)),
+ toVector(document::BucketId(33, 1),
+ document::BucketId(33, 0x100000001)));
+
+ op.setIdealStateManager(&getIdealStateManager());
+ op.start(_sender, framework::MilliSecTime(0));
+
+ checkSourceBucketsAndSendReply(op, 0, {{33, 1}, {33, 0x100000001}});
+
+ CPPUNIT_ASSERT(!getBucket(document::BucketId(33, 0x100000001)).valid());
+ CPPUNIT_ASSERT(!getBucket(document::BucketId(33, 1)).valid());
+
+ BucketDatabase::Entry entry = getBucket(document::BucketId(32, 0));
+ CPPUNIT_ASSERT(entry.valid());
+ CPPUNIT_ASSERT_EQUAL((uint16_t)0, entry->getNodeRef(0).getNode());
+ CPPUNIT_ASSERT_EQUAL(api::BucketInfo(666, 90, 500),
+ entry->getNodeRef(0).getBucketInfo());
+}
+
+void
+JoinOperationTest::checkSourceBucketsAndSendReply(
+ JoinOperation& op,
+ size_t msgIndex,
+ const std::vector<document::BucketId>& wantedIds)
+{
+ CPPUNIT_ASSERT(_sender.commands.size() > msgIndex);
+
+ std::shared_ptr<api::StorageCommand> msg(_sender.commands[msgIndex]);
+ CPPUNIT_ASSERT_EQUAL(api::MessageType::JOINBUCKETS, msg->getType());
+
+ api::JoinBucketsCommand& joinCmd(
+ dynamic_cast<api::JoinBucketsCommand&>(*msg));
+ CPPUNIT_ASSERT_EQUAL(wantedIds, joinCmd.getSourceBuckets());
+
+ std::shared_ptr<api::StorageReply> reply(joinCmd.makeReply());
+ api::JoinBucketsReply& sreply(
+ dynamic_cast<api::JoinBucketsReply&>(*reply));
+ sreply.setBucketInfo(api::BucketInfo(666, 90, 500));
+
+ op.receive(_sender, reply);
+}
+
+/**
+ * If the set of buckets kept on nodes is disjoint, send sparse joins (same
+ * bucket id used as both source buckets) for those nodes having only one of
+ * the buckets.
+ */
+void
+JoinOperationTest::sendSparseJoinsToNodesWithoutBothSourceBuckets()
+{
+ getConfig().setJoinCount(100);
+ getConfig().setJoinSize(1000);
+
+ addNodesToBucketDB(document::BucketId(33, 1), "0=250/50/300,1=250/50/300");
+ addNodesToBucketDB(document::BucketId(33, 0x100000001), "0=300/40/200");
+
+ _distributor->enableClusterState(
+ lib::ClusterState("distributor:1 storage:2"));
+
+ JoinOperation op("storage",
+ BucketAndNodes(document::BucketId(32, 0),
+ toVector<uint16_t>(0, 1)),
+ toVector(document::BucketId(33, 1),
+ document::BucketId(33, 0x100000001)));
+
+ op.setIdealStateManager(&getIdealStateManager());
+ op.start(_sender, framework::MilliSecTime(0));
+
+ checkSourceBucketsAndSendReply(op, 0, {{33, 1}, {33, 0x100000001}});
+ checkSourceBucketsAndSendReply(op, 1, {{33, 1}, {33, 1}});
+}
+
+}
+
+}
diff --git a/storage/src/tests/distributor/maintenancemocks.h b/storage/src/tests/distributor/maintenancemocks.h
new file mode 100644
index 00000000000..923f7edec2b
--- /dev/null
+++ b/storage/src/tests/distributor/maintenancemocks.h
@@ -0,0 +1,123 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#pragma once
+
+#include <sstream>
+#include <vespa/storage/distributor/maintenance/maintenanceprioritygenerator.h>
+#include <vespa/storage/distributor/maintenance/maintenanceoperationgenerator.h>
+#include <vespa/storage/distributor/operationstarter.h>
+#include <vespa/storage/distributor/operations/operation.h>
+#include <vespa/storageframework/defaultimplementation/clock/fakeclock.h>
+
+namespace storage {
+namespace distributor {
+
+class MockMaintenancePriorityGenerator
+ : public MaintenancePriorityGenerator
+{
+ MaintenancePriorityAndType prioritize(
+ const document::BucketId&,
+ NodeMaintenanceStatsTracker& stats) const
+ {
+ stats.incMovingOut(1);
+ stats.incCopyingIn(2);
+ return MaintenancePriorityAndType(
+ MaintenancePriority(MaintenancePriority::VERY_HIGH),
+ MaintenanceOperation::MERGE_BUCKET);
+ }
+};
+
+
+class MockOperation : public MaintenanceOperation
+{
+ document::BucketId _bucketId;
+ std::string _reason;
+ bool _shouldBlock;
+public:
+ MockOperation(const document::BucketId& bucketId)
+ : _bucketId(bucketId),
+ _shouldBlock(false)
+ {}
+
+ std::string toString() const {
+ return _bucketId.toString();
+ }
+
+ void onClose(DistributorMessageSender&) {
+ }
+ const char* getName() const {
+ return "MockOperation";
+ }
+ virtual const std::string& getDetailedReason() const {
+ return _reason;
+ }
+ void onStart(DistributorMessageSender&) {
+ }
+ void onReceive(DistributorMessageSender&, const std::shared_ptr<api::StorageReply>&) {
+ }
+ bool isBlocked(const PendingMessageTracker&) const {
+ return _shouldBlock;
+ }
+ void setShouldBlock(bool shouldBlock) {
+ _shouldBlock = shouldBlock;
+ }
+};
+
+class MockMaintenanceOperationGenerator
+ : public MaintenanceOperationGenerator
+{
+public:
+ MaintenanceOperation::SP generate(const document::BucketId& id) const {
+ return MaintenanceOperation::SP(new MockOperation(id));
+ }
+
+ std::vector<MaintenanceOperation::SP> generateAll(
+ const document::BucketId& id,
+ NodeMaintenanceStatsTracker& tracker) const
+ {
+ (void) tracker;
+ std::vector<MaintenanceOperation::SP> ret;
+ ret.push_back(MaintenanceOperation::SP(new MockOperation(id)));
+ return ret;
+ }
+
+};
+
+class MockOperationStarter
+ : public OperationStarter
+{
+ std::ostringstream _started;
+ std::vector<Operation::SP> _operations;
+ bool _shouldStart;
+public:
+ MockOperationStarter()
+ : _shouldStart(true)
+ {}
+
+ bool start(const std::shared_ptr<Operation>& operation,
+ Priority priority)
+ {
+ if (_shouldStart) {
+ _started << operation->toString()
+ << ", pri " << static_cast<int>(priority)
+ << "\n";
+ _operations.push_back(operation);
+ }
+ return _shouldStart;
+ }
+
+ void setShouldStartOperations(bool shouldStart) {
+ _shouldStart = shouldStart;
+ }
+
+ std::vector<Operation::SP>& getOperations() {
+ return _operations;
+ }
+
+ std::string toString() const {
+ return _started.str();
+ }
+};
+
+}
+}
+
diff --git a/storage/src/tests/distributor/maintenanceschedulertest.cpp b/storage/src/tests/distributor/maintenanceschedulertest.cpp
new file mode 100644
index 00000000000..4316bfd137c
--- /dev/null
+++ b/storage/src/tests/distributor/maintenanceschedulertest.cpp
@@ -0,0 +1,108 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <string>
+#include <sstream>
+#include <memory>
+#include <vespa/storage/distributor/maintenance/simplebucketprioritydatabase.h>
+#include <vespa/storage/distributor/maintenance/maintenancescheduler.h>
+#include <vespa/storage/distributor/bucketdb/mapbucketdatabase.h>
+#include <tests/distributor/maintenancemocks.h>
+
+namespace storage {
+
+namespace distributor {
+
+using document::BucketId;
+typedef MaintenancePriority Priority;
+typedef MaintenanceScheduler::WaitTimeMs WaitTimeMs;
+
+class MaintenanceSchedulerTest : public CppUnit::TestFixture {
+ CPPUNIT_TEST_SUITE(MaintenanceSchedulerTest);
+ CPPUNIT_TEST(testPriorityClearedAfterScheduled);
+ CPPUNIT_TEST(testOperationIsScheduled);
+ CPPUNIT_TEST(testNoOperationsToSchedule);
+ CPPUNIT_TEST(testSuppressLowPrioritiesInEmergencyMode);
+ CPPUNIT_TEST(testPriorityNotClearedIfOperationNotStarted);
+ CPPUNIT_TEST_SUITE_END();
+
+ std::unique_ptr<SimpleBucketPriorityDatabase> _priorityDb;
+ std::unique_ptr<MockMaintenanceOperationGenerator> _operationGenerator;
+ std::unique_ptr<MockOperationStarter> _operationStarter;
+ std::unique_ptr<MaintenanceScheduler> _scheduler;
+
+ void addBucketToDb(int bucketNum);
+public:
+ void testPriorityClearedAfterScheduled();
+ void testOperationIsScheduled();
+ void testNoOperationsToSchedule();
+ void testSuppressLowPrioritiesInEmergencyMode();
+ void testPriorityNotClearedIfOperationNotStarted();
+
+ void setUp();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(MaintenanceSchedulerTest);
+
+void
+MaintenanceSchedulerTest::setUp()
+{
+ _priorityDb.reset(new SimpleBucketPriorityDatabase());
+ _operationGenerator.reset(new MockMaintenanceOperationGenerator());
+ _operationStarter.reset(new MockOperationStarter());
+ _scheduler.reset(new MaintenanceScheduler(*_operationGenerator,
+ *_priorityDb,
+ *_operationStarter));
+}
+
+void
+MaintenanceSchedulerTest::testPriorityClearedAfterScheduled()
+{
+ _priorityDb->setPriority(PrioritizedBucket(BucketId(16, 1), Priority::VERY_HIGH));
+ _scheduler->tick(MaintenanceScheduler::NORMAL_SCHEDULING_MODE);
+ CPPUNIT_ASSERT_EQUAL(std::string(), _priorityDb->toString());
+}
+
+void
+MaintenanceSchedulerTest::testOperationIsScheduled()
+{
+ _priorityDb->setPriority(PrioritizedBucket(BucketId(16, 1), Priority::MEDIUM));
+ _scheduler->tick(MaintenanceScheduler::NORMAL_SCHEDULING_MODE);
+ CPPUNIT_ASSERT_EQUAL(std::string("BucketId(0x4000000000000001), pri 100\n"),
+ _operationStarter->toString());
+}
+
+void
+MaintenanceSchedulerTest::testNoOperationsToSchedule()
+{
+ WaitTimeMs waitMs(_scheduler->tick(MaintenanceScheduler::NORMAL_SCHEDULING_MODE));
+ CPPUNIT_ASSERT_EQUAL(WaitTimeMs(1), waitMs);
+ CPPUNIT_ASSERT_EQUAL(std::string(), _operationStarter->toString());
+}
+
+void
+MaintenanceSchedulerTest::testSuppressLowPrioritiesInEmergencyMode()
+{
+ _priorityDb->setPriority(PrioritizedBucket(BucketId(16, 1), Priority::HIGH));
+ _priorityDb->setPriority(PrioritizedBucket(BucketId(16, 2), Priority::VERY_HIGH));
+ CPPUNIT_ASSERT_EQUAL(WaitTimeMs(0), _scheduler->tick(MaintenanceScheduler::RECOVERY_SCHEDULING_MODE));
+ CPPUNIT_ASSERT_EQUAL(WaitTimeMs(1), _scheduler->tick(MaintenanceScheduler::RECOVERY_SCHEDULING_MODE));
+ CPPUNIT_ASSERT_EQUAL(std::string("BucketId(0x4000000000000002), pri 0\n"),
+ _operationStarter->toString());
+ CPPUNIT_ASSERT_EQUAL(std::string("PrioritizedBucket(BucketId(0x4000000000000001), pri HIGH)\n"),
+ _priorityDb->toString());
+}
+
+void
+MaintenanceSchedulerTest::testPriorityNotClearedIfOperationNotStarted()
+{
+ _priorityDb->setPriority(PrioritizedBucket(BucketId(16, 1), Priority::HIGH));
+ _operationStarter->setShouldStartOperations(false);
+ WaitTimeMs waitMs(_scheduler->tick(MaintenanceScheduler::NORMAL_SCHEDULING_MODE));
+ CPPUNIT_ASSERT_EQUAL(WaitTimeMs(1), waitMs);
+ CPPUNIT_ASSERT_EQUAL(std::string("PrioritizedBucket(BucketId(0x4000000000000001), pri HIGH)\n"),
+ _priorityDb->toString());
+}
+
+}
+}
diff --git a/storage/src/tests/distributor/mapbucketdatabasetest.cpp b/storage/src/tests/distributor/mapbucketdatabasetest.cpp
new file mode 100644
index 00000000000..ab8e5add65f
--- /dev/null
+++ b/storage/src/tests/distributor/mapbucketdatabasetest.cpp
@@ -0,0 +1,26 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/vespalib/util/document_runnable.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <cppunit/extensions/HelperMacros.h>
+#include <vespa/storage/distributor/bucketdb/mapbucketdatabase.h>
+#include <vespa/storage/storageutil/utils.h>
+#include <tests/distributor/bucketdatabasetest.h>
+
+namespace storage {
+namespace distributor {
+
+struct MapBucketDatabaseTest : public BucketDatabaseTest {
+ MapBucketDatabase _db;
+
+ virtual BucketDatabase& db() { return _db; };
+
+ CPPUNIT_TEST_SUITE(MapBucketDatabaseTest);
+ SETUP_DATABASE_TESTS();
+ CPPUNIT_TEST_SUITE_END();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(MapBucketDatabaseTest);
+
+}
+}
diff --git a/storage/src/tests/distributor/mergelimitertest.cpp b/storage/src/tests/distributor/mergelimitertest.cpp
new file mode 100644
index 00000000000..fd86e071579
--- /dev/null
+++ b/storage/src/tests/distributor/mergelimitertest.cpp
@@ -0,0 +1,161 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/vespalib/util/linkedptr.h>
+#include <vespa/storage/distributor/operations/idealstate/mergelimiter.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+
+namespace storage {
+namespace distributor {
+
+struct MergeLimiterTest : public CppUnit::TestFixture
+{
+ void testKeepsAllBelowLimit();
+ void testLessThanMaxUntrusted();
+ void testMoreThanMaxUntrusted();
+ void testAllUntrustedLessThanMaxVariants();
+ void testAllUntrustedMoreThanMaxVariants();
+ void testSourceOnlyLast();
+
+ CPPUNIT_TEST_SUITE(MergeLimiterTest);
+ CPPUNIT_TEST(testKeepsAllBelowLimit);
+ CPPUNIT_TEST(testLessThanMaxUntrusted);
+ CPPUNIT_TEST(testMoreThanMaxUntrusted);
+ CPPUNIT_TEST(testAllUntrustedLessThanMaxVariants);
+ CPPUNIT_TEST(testAllUntrustedMoreThanMaxVariants);
+ CPPUNIT_TEST(testSourceOnlyLast);
+ CPPUNIT_TEST_SUITE_END();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(MergeLimiterTest);
+
+namespace {
+ typedef vespalib::LinkedPtr<BucketCopy> BucketCopyPtr;
+ std::vector<BucketCopyPtr> _bucketDatabase;
+
+ struct NodeFactory {
+ std::vector<MergeMetaData> _nodes;
+
+ NodeFactory& add(int index, int crc) {
+ _bucketDatabase.push_back(BucketCopyPtr(
+ new BucketCopy(0, index, api::BucketInfo(crc, 5, 10))));
+ _nodes.push_back(MergeMetaData(index, *_bucketDatabase.back()));
+ return *this;
+ }
+ NodeFactory& addTrusted(int index, int crc) {
+ add(index, crc);
+ _bucketDatabase.back()->setTrusted(true);
+ return *this;
+ }
+ NodeFactory& setSourceOnly() {
+ _nodes.back()._sourceOnly = true;
+ return *this;
+ }
+
+ operator const MergeLimiter::NodeArray&() const { return _nodes; }
+ };
+
+ #define ASSERT_LIMIT(maxNodes, nodes, result) \
+ { \
+ MergeLimiter limiter(maxNodes); \
+ limiter.limitMergeToMaxNodes(nodes); \
+ std::ostringstream actual; \
+ for (uint32_t i=0; i<nodes.size(); ++i) { \
+ if (i != 0) actual << ","; \
+ actual << nodes[i]._nodeIndex; \
+ if (nodes[i]._sourceOnly) actual << 's'; \
+ } \
+ CPPUNIT_ASSERT_EQUAL(std::string(result), actual.str()); \
+ }
+}
+
+// If there is <= max nodes, then none should be removed.
+void
+MergeLimiterTest::testKeepsAllBelowLimit()
+{
+ MergeLimiter::NodeArray nodes(NodeFactory()
+ .addTrusted(3, 0x4)
+ .addTrusted(5, 0x4)
+ .add(9, 0x6)
+ .add(2, 0x6)
+ .add(4, 0x5));
+
+ ASSERT_LIMIT(8, nodes, "3,5,9,2,4");
+}
+// If less than max nodes is untrusted, merge all untrusted copies with a
+// trusted one. (Optionally with extra trusted copies if there is space)
+void
+MergeLimiterTest::testLessThanMaxUntrusted()
+{
+ MergeLimiter::NodeArray nodes(NodeFactory()
+ .addTrusted(3, 0x4)
+ .addTrusted(5, 0x4)
+ .add(9, 0x6)
+ .add(2, 0x6)
+ .add(4, 0x5));
+ ASSERT_LIMIT(4, nodes, "2,4,9,5");
+}
+// With more than max untrusted, just merge one trusted with as many untrusted
+// that fits.
+void
+MergeLimiterTest::testMoreThanMaxUntrusted()
+{
+ MergeLimiter::NodeArray nodes(NodeFactory()
+ .addTrusted(3, 0x4)
+ .addTrusted(5, 0x4)
+ .add(9, 0x6)
+ .add(2, 0x6)
+ .add(13, 0x9)
+ .add(1, 0x7)
+ .add(4, 0x5));
+ ASSERT_LIMIT(4, nodes, "2,13,1,5");
+}
+// With nothing trusted. If there is <= max different variants (checksums),
+// merge one of each variant. After this merge, all these nodes can be set
+// trusted. (Except for any source only ones)
+void
+MergeLimiterTest::testAllUntrustedLessThanMaxVariants()
+{
+ MergeLimiter::NodeArray nodes(NodeFactory()
+ .add(3, 0x4)
+ .add(5, 0x4)
+ .add(9, 0x6)
+ .add(2, 0x6)
+ .add(13, 0x3)
+ .add(1, 0x3)
+ .add(4, 0x3));
+ ASSERT_LIMIT(4, nodes, "5,2,4,3");
+}
+// With nothing trusted and more than max variants, we just have to merge one
+// of each variant until we end up with less than max variants.
+void
+MergeLimiterTest::testAllUntrustedMoreThanMaxVariants()
+{
+ MergeLimiter::NodeArray nodes(NodeFactory()
+ .add(3, 0x4)
+ .add(5, 0x5)
+ .add(9, 0x6)
+ .add(2, 0x6)
+ .add(13, 0x3)
+ .add(1, 0x9)
+ .add(4, 0x8));
+ ASSERT_LIMIT(4, nodes, "3,5,2,13");
+}
+
+// With more than max untrusted, just merge one trusted with as many untrusted
+// that fits.
+void
+MergeLimiterTest::testSourceOnlyLast()
+{
+ MergeLimiter::NodeArray nodes(NodeFactory()
+ .addTrusted(3, 0x4)
+ .addTrusted(5, 0x4).setSourceOnly()
+ .add(9, 0x6)
+ .add(2, 0x6).setSourceOnly()
+ .add(13, 0x9)
+ .add(1, 0x7)
+ .add(4, 0x5));
+ ASSERT_LIMIT(4, nodes, "13,1,2s,5s");
+}
+
+} // distributor
+} // storage
diff --git a/storage/src/tests/distributor/mergeoperationtest.cpp b/storage/src/tests/distributor/mergeoperationtest.cpp
new file mode 100644
index 00000000000..a2373731bc3
--- /dev/null
+++ b/storage/src/tests/distributor/mergeoperationtest.cpp
@@ -0,0 +1,430 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <boost/lexical_cast.hpp>
+#include <cppunit/extensions/HelperMacros.h>
+#include <iomanip>
+#include <iostream>
+#include <memory>
+#include <tests/common/dummystoragelink.h>
+#include <vespa/storage/distributor/idealstatemanager.h>
+#include <vespa/storageapi/message/persistence.h>
+#include <vespa/storage/distributor/operations/idealstate/mergeoperation.h>
+#include <vespa/storage/distributor/pendingmessagetracker.h>
+#include <vespa/storageapi/message/bucket.h>
+#include <vespa/storage/distributor/bucketdbupdater.h>
+#include <tests/distributor/distributortestutil.h>
+#include <vespa/vespalib/text/stringtokenizer.h>
+
+using std::shared_ptr;
+
+namespace storage {
+namespace distributor {
+
+class MergeOperationTest : public CppUnit::TestFixture,
+ public DistributorTestUtil
+{
+ CPPUNIT_TEST_SUITE(MergeOperationTest);
+ CPPUNIT_TEST(testSimple);
+ CPPUNIT_TEST(testFailIfSourceOnlyCopiesChanged);
+ CPPUNIT_TEST(testGenerateNodeList);
+ CPPUNIT_TEST(doNotRemoveCopiesWithPendingMessages);
+ CPPUNIT_TEST(testDoNotRemoveActiveSourceOnlyCopies);
+ CPPUNIT_TEST(testMarkRedundantTrustedCopiesAsSourceOnly);
+ CPPUNIT_TEST(onlyMarkRedundantRetiredReplicasAsSourceOnly);
+ CPPUNIT_TEST_SUITE_END();
+
+ std::unique_ptr<PendingMessageTracker> _pendingTracker;
+
+protected:
+ void testSimple();
+ void testFailIfSourceOnlyCopiesChanged();
+ void testGenerateNodeList();
+ void doNotRemoveCopiesWithPendingMessages();
+ void testDoNotRemoveActiveSourceOnlyCopies();
+ void testMarkRedundantTrustedCopiesAsSourceOnly();
+ void onlyMarkRedundantRetiredReplicasAsSourceOnly();
+
+public:
+ void setUp() {
+ createLinks();
+ _pendingTracker.reset(new PendingMessageTracker(getComponentRegister()));
+ _sender.setPendingMessageTracker(*_pendingTracker);
+ }
+
+ void tearDown() {
+ close();
+ }
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(MergeOperationTest);
+
+void
+MergeOperationTest::testSimple()
+{
+ getClock().setAbsoluteTimeInSeconds(10);
+
+ addNodesToBucketDB(document::BucketId(16, 1),
+ "0=10/1/1/t,"
+ "1=20/1/1,"
+ "2=10/1/1/t");
+
+ _distributor->enableClusterState(lib::ClusterState("distributor:1 storage:3"));
+
+ MergeOperation op(BucketAndNodes(document::BucketId(16, 1),
+ toVector<uint16_t>(0, 1, 2)));
+ op.setIdealStateManager(&getIdealStateManager());
+ op.start(_sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string(
+ "MergeBucketCommand(BucketId(0x4000000000000001), to time 10000000, "
+ "cluster state version: 0, nodes: [0, 2, 1 (source only)], chain: [], "
+ "reasons to start: ) => 0"),
+ _sender.getLastCommand(true));
+
+ sendReply(op);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("DeleteBucketCommand(BucketId(0x4000000000000001)) "
+ "Reasons to start: => 1"),
+ _sender.getLastCommand(true));
+
+}
+
+void
+MergeOperationTest::testFailIfSourceOnlyCopiesChanged()
+{
+ getClock().setAbsoluteTimeInSeconds(10);
+
+ addNodesToBucketDB(document::BucketId(16, 1),
+ "0=10/1/1/t,"
+ "1=20/1/1,"
+ "2=10/1/1/t");
+
+ _distributor->enableClusterState(lib::ClusterState("distributor:1 storage:3"));
+
+ MergeOperation op(BucketAndNodes(document::BucketId(16, 1),
+ toVector<uint16_t>(0, 1, 2)));
+ op.setIdealStateManager(&getIdealStateManager());
+ op.start(_sender, framework::MilliSecTime(0));
+
+ std::string merge("MergeBucketCommand(BucketId(0x4000000000000001), to time 10000000, "
+ "cluster state version: 0, nodes: [0, 2, 1 (source only)], chain: [], "
+ "reasons to start: ) => 0");
+
+ CPPUNIT_ASSERT_EQUAL(merge, _sender.getLastCommand(true));
+ {
+ const api::MergeBucketCommand& cmd(
+ dynamic_cast<api::MergeBucketCommand&>(*_sender.commands[0]));
+ CPPUNIT_ASSERT_EQUAL(uint16_t(0), cmd.getSourceIndex());
+ }
+
+ // Source-only copy changed during merge
+ addNodesToBucketDB(document::BucketId(16, 1),
+ "0=10/1/1/t,"
+ "1=40/1/1,"
+ "2=10/1/1/t");
+ sendReply(op);
+ // Should not be a remove here!
+ CPPUNIT_ASSERT_EQUAL(merge, _sender.getLastCommand(true));
+ CPPUNIT_ASSERT(!op.ok());
+}
+
+namespace {
+std::string getNodeList(std::string state, uint32_t redundancy, std::string existing) {
+ lib::Distribution distribution(
+ lib::Distribution::getDefaultDistributionConfig(redundancy));
+ lib::ClusterState clusterState(state);
+ vespalib::StringTokenizer st(existing, ",");
+ std::vector<BucketCopy> bucketDB(st.size());
+ for (uint32_t i = 0; i < st.size(); i++) {
+ std::string num = st[i];
+ size_t pos = num.find('t');
+ bool trusted = false;
+
+ if (pos != std::string::npos) {
+ num.erase(pos);
+ trusted = true;
+ }
+ bucketDB[i] = BucketCopy(0, atoi(num.c_str()),
+ api::BucketInfo(1, 2, 3));
+ bucketDB[i].setTrusted(trusted);
+ }
+ std::vector<MergeMetaData> nodes(st.size());
+ for (uint32_t i = 0; i < st.size(); i++) {
+ nodes[i] = MergeMetaData(bucketDB[i].getNode(), bucketDB[i]);
+ }
+ MergeLimiter limiter(16);
+ MergeOperation::generateSortedNodeList(distribution, clusterState,
+ document::BucketId(32, 1),
+ limiter, nodes);
+ std::ostringstream actual;
+ for (uint32_t i = 0; i < nodes.size(); i++) {
+ if (i != 0) {
+ actual << ",";
+ }
+ actual << nodes[i]._nodeIndex;
+ if (nodes[i]._sourceOnly) {
+ actual << "s";
+ }
+ }
+ return actual.str();
+}
+}
+
+void
+MergeOperationTest::testGenerateNodeList()
+{
+ // If this fails, the distribution has changed and the rest of the test will
+ // likely fail
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("3,5,7,6,8,0,9,2,1,4"),
+ getNodeList("storage:10", 10, "0,1,2,3,4,5,6,7,8,9"));
+
+ // Nodes that are initializing should be treated as up
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("3,5,7s,6s"),
+ getNodeList("storage:10 .3.s:i .5.s:i", 2, "7,6,3,5")); // Ideal: 3,5
+
+ // Order is given by ideal state algorithm, not order of storagenodes in bucket db
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("3,5,7"),
+ getNodeList("storage:10", 3, "3,7,5"));
+
+ // Node not in ideal state will be used if not enough nodes in ideal state
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("3,7,6"),
+ getNodeList("storage:10", 3, "3,7,6"));
+
+ // Nodes not in ideal state will be included as source only after redundancy
+ // is reached
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("3,5,7,8s"),
+ getNodeList("storage:10", 3, "3,5,7,8"));
+
+ // Need at least redundancy copies that are not source only
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("3,5,8,9s"),
+ getNodeList("storage:10", 3, "3,5,8,9"));
+
+ // Order is given by storagenodes in bucket db
+ // when no nodes are in ideal state
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("4,1,2"),
+ getNodeList("storage:10", 3, "4,1,2"));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("3,0s,1s,2s,4s,5s,6s,7s,8s,9s"),
+ getNodeList("storage:10", 1, "0,1,2,3,4,5,6,7,8,9"));
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("3,5,0s,1s,2s,4s,6s,7s,8s,9s"),
+ getNodeList("storage:10", 2, "0,1,2,3,4,5,6,7,8,9"));
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("3,5,7,0s,1s,2s,4s,6s,8s,9s"),
+ getNodeList("storage:10", 3, "0,1,2,3,4,5,6,7,8,9"));
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("3,5,7,6,0s,1s,2s,4s,8s,9s"),
+ getNodeList("storage:10", 4, "0,1,2,3,4,5,6,7,8,9"));
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("3,5,7,6,8,0s,1s,2s,4s,9s"),
+ getNodeList("storage:10", 5, "0,1,2,3,4,5,6,7,8,9"));
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("3,5,7,6,8,0,1s,2s,4s,9s"),
+ getNodeList("storage:10", 6, "0,1,2,3,4,5,6,7,8,9"));
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("3,5,7,6,8,0,9,1s,2s,4s"),
+ getNodeList("storage:10", 7, "0,1,2,3,4,5,6,7,8,9"));
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("3,5,7,6,8,0,9,2,1s,4s"),
+ getNodeList("storage:10", 8, "0,1,2,3,4,5,6,7,8,9"));
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("3,5,7,6,8,0,9,2,1,4s"),
+ getNodeList("storage:10", 9, "0,1,2,3,4,5,6,7,8,9"));
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("3,5,7,6,8,0,9,2,1,4"),
+ getNodeList("storage:10", 10, "0,1,2,3,4,5,6,7,8,9"));
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("3,9s,8s,7s,6s,5s,4s,2s,1s,0s"),
+ getNodeList("storage:10", 1, "9,8,7,6,5,4,3,2,1,0"));
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("3,5,9s,8s,7s,6s,4s,2s,1s,0s"),
+ getNodeList("storage:10", 2, "9,8,7,6,5,4,3,2,1,0"));
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("3,5,7,9s,8s,6s,4s,2s,1s,0s"),
+ getNodeList("storage:10", 3, "9,8,7,6,5,4,3,2,1,0"));
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("3,5,7,6,9s,8s,4s,2s,1s,0s"),
+ getNodeList("storage:10", 4, "9,8,7,6,5,4,3,2,1,0"));
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("3,5,7,6,8,9s,4s,2s,1s,0s"),
+ getNodeList("storage:10", 5, "9,8,7,6,5,4,3,2,1,0"));
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("3,5,7,6,8,0,9s,4s,2s,1s"),
+ getNodeList("storage:10", 6, "9,8,7,6,5,4,3,2,1,0"));
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("3,5,7,6,8,0,9,4s,2s,1s"),
+ getNodeList("storage:10", 7, "9,8,7,6,5,4,3,2,1,0"));
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("3,5,7,6,8,0,9,2,4s,1s"),
+ getNodeList("storage:10", 8, "9,8,7,6,5,4,3,2,1,0"));
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("3,5,7,6,8,0,9,2,1,4s"),
+ getNodeList("storage:10", 9, "9,8,7,6,5,4,3,2,1,0"));
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("3,5,7,6,8,0,9,2,1,4"),
+ getNodeList("storage:10", 10, "9,8,7,6,5,4,3,2,1,0"));
+
+ // Trusted copies should not be source only.
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("3,5,7,6,8,0,9,1,2,4s"),
+ getNodeList("storage:10", 7, "0,1t,2t,3,4,5,6,7,8,9"));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("3,5,7,6,8,0,9,2,1s,4s"),
+ getNodeList("storage:10", 7, "0,1,2t,3,4,5,6,7,8,9"));
+
+ // Retired nodes are not in ideal state
+ // Ideal: 5,7
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("0,2,3s"),
+ getNodeList("storage:10 .3.s:r", 2, "0,2,3"));
+ // Ideal: 5,7,6
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("0,2,3"),
+ getNodeList("storage:10 .3.s:r", 3, "0,2,3"));
+}
+
+void
+MergeOperationTest::doNotRemoveCopiesWithPendingMessages()
+{
+ document::BucketId bucket(16, 1);
+
+ getClock().setAbsoluteTimeInSeconds(10);
+ _distributor->enableClusterState(lib::ClusterState("distributor:1 storage:3"));
+ addNodesToBucketDB(bucket,
+ "0=10/1/1/t,"
+ "1=20/1/1,"
+ "2=10/1/1/t");
+
+ MergeOperation op(BucketAndNodes(bucket,
+ toVector<uint16_t>(0, 1, 2)));
+ op.setIdealStateManager(&getIdealStateManager());
+ op.start(_sender, framework::MilliSecTime(0));
+
+ std::string merge("MergeBucketCommand(BucketId(0x4000000000000001), to time 10000000, "
+ "cluster state version: 0, nodes: [0, 2, 1 (source only)], chain: [], "
+ "reasons to start: ) => 0");
+
+ CPPUNIT_ASSERT_EQUAL(merge, _sender.getLastCommand(true));
+
+ // Suddenly a wild operation appears to the source only copy!
+ // Removes are blocked by all and any operation types, so can just choose
+ // at will.
+ api::StorageMessage::SP msg(
+ new api::SetBucketStateCommand(bucket, api::SetBucketStateCommand::ACTIVE));
+ msg->setAddress(api::StorageMessageAddress("storage", lib::NodeType::STORAGE, 1));
+ _pendingTracker->insert(msg);
+
+
+ sendReply(op);
+ // Should not be a remove here!
+ CPPUNIT_ASSERT_EQUAL(merge, _sender.getLastCommand(true));
+ CPPUNIT_ASSERT(!op.ok());
+}
+
+void
+MergeOperationTest::testDoNotRemoveActiveSourceOnlyCopies()
+{
+ getClock().setAbsoluteTimeInSeconds(10);
+
+ addNodesToBucketDB(document::BucketId(16, 1),
+ "0=10/1/1/t,"
+ "1=20/1/1/u/a,"
+ "2=10/1/1/t");
+
+ _distributor->enableClusterState(
+ lib::ClusterState("distributor:1 storage:3"));
+ MergeOperation op(BucketAndNodes(document::BucketId(16, 1),
+ toVector<uint16_t>(0, 1, 2)));
+ op.setIdealStateManager(&getIdealStateManager());
+ op.start(_sender, framework::MilliSecTime(0));
+
+ std::string merge(
+ "MergeBucketCommand(BucketId(0x4000000000000001), to time "
+ "10000000, cluster state version: 0, nodes: [0, 2, 1 "
+ "(source only)], chain: [], reasons to start: ) => 0");
+ CPPUNIT_ASSERT_EQUAL(merge, _sender.getLastCommand(true));
+
+ sendReply(op);
+ // No DeleteBucket shall have been sent
+ CPPUNIT_ASSERT_EQUAL(merge, _sender.getLastCommand(true));
+}
+
+void
+MergeOperationTest::testMarkRedundantTrustedCopiesAsSourceOnly()
+{
+ // This test uses the same distribution as testGenerateNodeList(), i.e.
+ // an ideal state sequence of [3, 5, 7, 6, 8, 0, 9, 2, 1, 4]
+
+ // 3 redundancy, 5 trusted -> 2 trusted source only.
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("3,5,7,6s,8s"),
+ getNodeList("storage:10", 3, "3t,5t,7t,6t,8t"));
+
+ // 3 redundancy, 4 trusted -> 1 trusted source only.
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("3,5,7,6s,8s"),
+ getNodeList("storage:10", 3, "3t,5t,7t,6t,8"));
+
+ // 3 redundancy, 3 trusted -> 0 trusted source only, 2 non-trusted sources.
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("3,5,7,6s,8s"),
+ getNodeList("storage:10", 3, "3t,5t,7t,6,8"));
+
+ // 3 redundancy, 4 trusted -> 1 source only trusted.
+ // We allow marking a trusted, non-ideal copy as source even when we don't
+ // have #redundancy trusted _ideal_ copies, as long as we're left with >=
+ // #redundancy trusted copies in total.
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("3,5,7,6,8s"),
+ getNodeList("storage:10", 3, "3t,5t,7,6t,8t"));
+
+ // Not sufficient number of trusted copies to mark any as source only.
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("3,5,7,6,8"),
+ getNodeList("storage:10", 3, "3t,5,7,6t,8t"));
+
+ // Same as above, with all trusted copies being non-ideal.
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("3,5,7,6,8"),
+ getNodeList("storage:10", 3, "3,5,7,6t,8t"));
+
+ // #redundancy of trusted, but none are ideal. Non-ideal trusted should
+ // not be marked as source only (though we can mark non-trusted non-ideal
+ // node as source only).
+ // Note the node reordering since trusted are added before the rest.
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("3,5,7,8,0,9,6s"),
+ getNodeList("storage:10", 3, "3,5,7,6,8t,0t,9t"));
+
+ // But allow for removing excess trusted, non-ideal copies.
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("3,5,7,6,8,0,9s"),
+ getNodeList("storage:10", 3, "3,5,7,6t,8t,0t,9t"));
+}
+
+void
+MergeOperationTest::onlyMarkRedundantRetiredReplicasAsSourceOnly()
+{
+ // No nodes in ideal state and all nodes are retired. With redundancy of 2
+ // we can only mark the last replica in the DB as source-only. Retired
+ // nodes are meant as source-only due to being migrated away from, but
+ // source-only nodes will have their replica removed after a successful
+ // merge, which we cannot allow to happen here.
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("0,1,2s"),
+ getNodeList("storage:3 .0.s.:r .1.s:r .2.s:r", 2, "1,0,2"));
+}
+
+} // distributor
+} // storage
diff --git a/storage/src/tests/distributor/messagesenderstub.cpp b/storage/src/tests/distributor/messagesenderstub.cpp
new file mode 100644
index 00000000000..88210a94848
--- /dev/null
+++ b/storage/src/tests/distributor/messagesenderstub.cpp
@@ -0,0 +1,88 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <tests/distributor/messagesenderstub.h>
+#include <tests/distributor/distributortestutil.h>
+
+namespace storage {
+
+std::string
+MessageSenderStub::getLastCommand(bool verbose) const
+{
+ if (commands.empty()) {
+ throw std::logic_error("Expected command where there was none");
+ }
+ return dumpMessage(*commands[commands.size() - 1],
+ true,
+ verbose);
+
+}
+
+std::string
+MessageSenderStub::dumpMessage(const api::StorageMessage& msg,
+ bool includeAddress,
+ bool verbose) const
+{
+ std::ostringstream ost;
+
+ if (verbose) {
+ ost << msg;
+ } else {
+ ost << msg.getType().getName();
+ }
+
+ if (includeAddress && msg.getAddress()) {
+ ost << " => " << msg.getAddress()->getIndex();
+ }
+ if (verbose && msg.getType().isReply()) {
+ ost << " " << dynamic_cast<const api::StorageReply&>(msg).getResult();
+ }
+
+ return ost.str();
+}
+
+std::string
+MessageSenderStub::getCommands(bool includeAddress, bool verbose, uint32_t fromIdx) const
+{
+ std::ostringstream ost;
+
+ for (uint32_t i = fromIdx; i < commands.size(); i++) {
+ if (i != fromIdx) {
+ ost << ",";
+ }
+
+ ost << dumpMessage(*commands[i], includeAddress, verbose);
+ }
+
+ return ost.str();
+}
+
+std::string
+MessageSenderStub::getLastReply(bool verbose) const
+{
+ if (replies.empty()) {
+ throw std::logic_error("Expected reply where there was none");
+ }
+
+ return dumpMessage(*replies.back(),
+ true,
+ verbose);
+
+}
+
+std::string
+MessageSenderStub::getReplies(bool includeAddress, bool verbose) const
+{
+ std::ostringstream ost;
+ for (uint32_t i = 0; i < replies.size(); i++) {
+ if (i != 0) {
+ ost << ",";
+ }
+
+ ost << dumpMessage(*replies[i], includeAddress, verbose);
+ }
+
+ return ost.str();
+}
+
+}
+
diff --git a/storage/src/tests/distributor/messagesenderstub.h b/storage/src/tests/distributor/messagesenderstub.h
new file mode 100644
index 00000000000..d70c5355868
--- /dev/null
+++ b/storage/src/tests/distributor/messagesenderstub.h
@@ -0,0 +1,71 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#pragma once
+
+#include <vespa/storage/distributor/distributor.h>
+#include <vespa/storage/distributor/distributormessagesender.h>
+
+namespace storage {
+
+struct MessageSenderStub : distributor::DistributorMessageSender
+{
+ std::vector<std::shared_ptr<api::StorageCommand> > commands;
+ std::vector<std::shared_ptr<api::StorageReply> > replies;
+
+ MessageSenderStub()
+ : _clusterName("storage"),
+ _pendingMessageTracker(0)
+ {}
+
+ void clear() {
+ commands.clear();
+ replies.clear();
+ }
+
+ virtual void sendCommand(const std::shared_ptr<api::StorageCommand>& cmd)
+ {
+ commands.push_back(cmd);
+ }
+
+ virtual void sendReply(const std::shared_ptr<api::StorageReply>& reply)
+ {
+ replies.push_back(reply);
+ }
+
+ std::string getLastCommand(bool verbose = true) const;
+
+ std::string getCommands(bool includeAddress = false,
+ bool verbose = false,
+ uint32_t fromIndex = 0) const;
+
+ std::string getLastReply(bool verbose = true) const;
+
+ std::string getReplies(bool includeAddress = false,
+ bool verbose = false) const;
+
+ std::string dumpMessage(const api::StorageMessage& msg,
+ bool includeAddress,
+ bool verbose) const;
+
+ virtual int getDistributorIndex() const {
+ return 0;
+ }
+
+ virtual const std::string& getClusterName() const {
+ return _clusterName;
+ }
+
+ virtual const distributor::PendingMessageTracker& getPendingMessageTracker() const {
+ assert(_pendingMessageTracker);
+ return *_pendingMessageTracker;
+ }
+
+ void setPendingMessageTracker(distributor::PendingMessageTracker& tracker) {
+ _pendingMessageTracker = &tracker;
+ }
+private:
+ std::string _clusterName;
+ distributor::PendingMessageTracker* _pendingMessageTracker;
+};
+
+}
+
diff --git a/storage/src/tests/distributor/nodeinfotest.cpp b/storage/src/tests/distributor/nodeinfotest.cpp
new file mode 100644
index 00000000000..883e6ba7668
--- /dev/null
+++ b/storage/src/tests/distributor/nodeinfotest.cpp
@@ -0,0 +1,83 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <iomanip>
+#include <iostream>
+#include <memory>
+#include <vespa/storageapi/message/persistence.h>
+#include <vespa/storageframework/defaultimplementation/clock/fakeclock.h>
+#include <vespa/storage/distributor/bucketdbupdater.h>
+#include <vespa/storageapi/message/bucket.h>
+#include <vespa/storageapi/message/state.h>
+#include <vespa/document/fieldvalue/document.h>
+#include <vespa/vdslib/state/random.h>
+#include <vespa/storageapi/message/bucket.h>
+#include <vespa/storage/distributor/pendingclusterstate.h>
+#include <vespa/vespalib/text/stringtokenizer.h>
+#include <vespa/storage/distributor/nodeinfo.h>
+
+#include <iostream>
+#include <fstream>
+#include <string>
+
+namespace storage {
+namespace distributor {
+
+class NodeInfoTest : public CppUnit::TestFixture {
+ CPPUNIT_TEST_SUITE(NodeInfoTest);
+ CPPUNIT_TEST(testSimple);
+ CPPUNIT_TEST_SUITE_END();
+public:
+ void testSimple();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(NodeInfoTest);
+
+void
+NodeInfoTest::testSimple()
+{
+ framework::defaultimplementation::FakeClock clock;
+ NodeInfo info(clock);
+
+ CPPUNIT_ASSERT_EQUAL(0, (int)info.getPendingCount(3));
+ CPPUNIT_ASSERT_EQUAL(0, (int)info.getPendingCount(9));
+
+ info.incPending(3);
+ info.incPending(3);
+ info.incPending(3);
+ info.incPending(3);
+ info.decPending(3);
+ info.decPending(4);
+ info.incPending(7);
+ info.incPending(4);
+ info.decPending(3);
+
+ CPPUNIT_ASSERT_EQUAL(2, (int)info.getPendingCount(3));
+ CPPUNIT_ASSERT_EQUAL(1, (int)info.getPendingCount(4));
+ CPPUNIT_ASSERT_EQUAL(1, (int)info.getPendingCount(7));
+ CPPUNIT_ASSERT_EQUAL(0, (int)info.getPendingCount(5));
+
+ info.setBusy(5);
+ clock.addSecondsToTime(10);
+ info.setBusy(1);
+ clock.addSecondsToTime(20);
+ info.setBusy(42);
+
+ CPPUNIT_ASSERT_EQUAL(true, info.isBusy(5));
+ CPPUNIT_ASSERT_EQUAL(true, info.isBusy(1));
+ CPPUNIT_ASSERT_EQUAL(true, info.isBusy(42));
+ CPPUNIT_ASSERT_EQUAL(false, info.isBusy(7));
+
+ clock.addSecondsToTime(42);
+
+ CPPUNIT_ASSERT_EQUAL(false, info.isBusy(5));
+ CPPUNIT_ASSERT_EQUAL(false, info.isBusy(1));
+ CPPUNIT_ASSERT_EQUAL(true, info.isBusy(42));
+ CPPUNIT_ASSERT_EQUAL(false, info.isBusy(7));
+
+}
+
+}
+
+}
diff --git a/storage/src/tests/distributor/nodemaintenancestatstrackertest.cpp b/storage/src/tests/distributor/nodemaintenancestatstrackertest.cpp
new file mode 100644
index 00000000000..f1c177e7939
--- /dev/null
+++ b/storage/src/tests/distributor/nodemaintenancestatstrackertest.cpp
@@ -0,0 +1,102 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+
+#include <vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.h>
+
+namespace storage {
+namespace distributor {
+
+class NodeMaintenanceStatsTrackerTest : public CppUnit::TestFixture
+{
+ CPPUNIT_TEST_SUITE(NodeMaintenanceStatsTrackerTest);
+ CPPUNIT_TEST(emptyStatsInstancesAreEqual);
+ CPPUNIT_TEST(statsFieldsAffectEqualityComparison);
+ CPPUNIT_TEST(requestingNonExistingNodeGivesEmptyStats);
+ CPPUNIT_TEST(statsAreTrackedPerNode);
+ CPPUNIT_TEST_SUITE_END();
+
+ void emptyStatsInstancesAreEqual();
+ void statsFieldsAffectEqualityComparison();
+ void requestingNonExistingNodeGivesEmptyStats();
+ void statsAreTrackedPerNode();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(NodeMaintenanceStatsTrackerTest);
+
+void
+NodeMaintenanceStatsTrackerTest::emptyStatsInstancesAreEqual()
+{
+ NodeMaintenanceStats a;
+ NodeMaintenanceStats b;
+ CPPUNIT_ASSERT_EQUAL(a, b);
+}
+
+void
+NodeMaintenanceStatsTrackerTest::statsFieldsAffectEqualityComparison()
+{
+ NodeMaintenanceStats a;
+ NodeMaintenanceStats b;
+
+ a.movingOut = 1;
+ CPPUNIT_ASSERT(!(a == b));
+ b.movingOut = 1;
+ CPPUNIT_ASSERT(a == b);
+
+ a.syncing = 1;
+ CPPUNIT_ASSERT(!(a == b));
+ b.syncing = 1;
+ CPPUNIT_ASSERT(a == b);
+
+ a.copyingIn = 1;
+ CPPUNIT_ASSERT(!(a == b));
+ b.copyingIn = 1;
+ CPPUNIT_ASSERT(a == b);
+
+ a.copyingOut = 1;
+ CPPUNIT_ASSERT(!(a == b));
+ b.copyingOut = 1;
+ CPPUNIT_ASSERT(a == b);
+}
+
+void
+NodeMaintenanceStatsTrackerTest::requestingNonExistingNodeGivesEmptyStats()
+{
+ NodeMaintenanceStatsTracker tracker;
+ NodeMaintenanceStats wanted;
+ CPPUNIT_ASSERT_EQUAL(wanted, tracker.forNode(0));
+}
+
+void
+NodeMaintenanceStatsTrackerTest::statsAreTrackedPerNode()
+{
+ NodeMaintenanceStatsTracker tracker;
+ NodeMaintenanceStats wanted;
+
+ tracker.incMovingOut(0);
+ wanted.movingOut = 1;
+ CPPUNIT_ASSERT_EQUAL(wanted, tracker.forNode(0));
+ wanted.movingOut = 0;
+ CPPUNIT_ASSERT_EQUAL(wanted, tracker.forNode(1));
+
+ tracker.incMovingOut(0);
+ wanted.movingOut = 2;
+ CPPUNIT_ASSERT_EQUAL(wanted, tracker.forNode(0));
+
+ tracker.incMovingOut(1);
+ wanted.movingOut = 1;
+ CPPUNIT_ASSERT_EQUAL(wanted, tracker.forNode(1));
+
+ tracker.incSyncing(1);
+ tracker.incCopyingIn(1);
+ tracker.incCopyingOut(1);
+ wanted.syncing = 1;
+ wanted.copyingIn = 1;
+ wanted.copyingOut = 1;
+ CPPUNIT_ASSERT_EQUAL(wanted, tracker.forNode(1));
+}
+
+} // distributor
+} // storage
+
diff --git a/storage/src/tests/distributor/operationtargetresolvertest.cpp b/storage/src/tests/distributor/operationtargetresolvertest.cpp
new file mode 100644
index 00000000000..5b23d3a7a9e
--- /dev/null
+++ b/storage/src/tests/distributor/operationtargetresolvertest.cpp
@@ -0,0 +1,316 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+
+#include <iomanip>
+#include <iostream>
+#include <memory>
+#include <vespa/config/helper/configgetter.h>
+#include <vespa/document/config/config-documenttypes.h>
+#include <vespa/document/repo/documenttyperepo.h>
+#include <vespa/storageapi/message/bucket.h>
+#include <vespa/storageapi/message/persistence.h>
+#include <vespa/storageapi/message/state.h>
+#include <tests/distributor/distributortestutil.h>
+#include <tests/common/dummystoragelink.h>
+#include <vespa/vdslib/distribution/idealnodecalculatorimpl.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <vespa/storage/distributor/operationtargetresolverimpl.h>
+
+using document::BucketId;
+
+namespace storage {
+namespace distributor {
+
+struct OperationTargetResolverTest : public CppUnit::TestFixture,
+ public DistributorTestUtil
+{
+
+ document::DocumentTypeRepo::SP _repo;
+ const document::DocumentType* _html_type;
+ std::unique_ptr<Operation> op;
+
+ void testSimple();
+ void testMultipleNodes();
+ void testChooseIdealStateWhenManyCopies();
+ void testChooseHighestSplitBucket();
+ void testChooseHighestSplitBucketPerNode();
+ void testChooseHighestSplitBucketWithTrusted();
+ void testInconsistentBucketsAreNotExplicitlyCreated();
+ void testNoTrustedOrIdealStateCopyAvailable();
+ void testCreateMissingCopies();
+ void testNoExistingCopies();
+ void testCountMaintenanceNodesAsDown();
+ void testResolvingDoesNotMutateDatabase();
+ void testTrustedOverIdealState();
+
+ BucketInstanceList getInstances(const BucketId& bid,
+ bool stripToRedundancy);
+
+ void setUp() {
+ _repo.reset(new document::DocumentTypeRepo(
+ *config::ConfigGetter<document::DocumenttypesConfig>::getConfig(
+ "config-doctypes", config::FileSpec("config-doctypes.cfg"))));
+ _html_type = _repo->getDocumentType("text/html");
+ createLinks();
+ };
+
+ void tearDown() {
+ close();
+ }
+
+ CPPUNIT_TEST_SUITE(OperationTargetResolverTest);
+ CPPUNIT_TEST(testSimple);
+ CPPUNIT_TEST(testMultipleNodes);
+ CPPUNIT_TEST(testChooseIdealStateWhenManyCopies);
+ CPPUNIT_TEST(testChooseHighestSplitBucket);
+ CPPUNIT_TEST(testChooseHighestSplitBucketPerNode);
+ CPPUNIT_TEST(testChooseHighestSplitBucketWithTrusted);
+ CPPUNIT_TEST(testNoTrustedOrIdealStateCopyAvailable);
+ CPPUNIT_TEST(testInconsistentBucketsAreNotExplicitlyCreated);
+ CPPUNIT_TEST(testCreateMissingCopies);
+ CPPUNIT_TEST(testNoExistingCopies);
+ CPPUNIT_TEST(testCountMaintenanceNodesAsDown);
+ CPPUNIT_TEST(testResolvingDoesNotMutateDatabase);
+ CPPUNIT_TEST(testTrustedOverIdealState);
+ CPPUNIT_TEST_SUITE_END();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(OperationTargetResolverTest);
+
+namespace {
+
+ // Create assertion that makes it easy to write tests, and report correct
+ // line for problem at command line
+#define ASSERT_THAT(id) \
+ { \
+ struct MyAsserter : public Asserter { \
+ void assertEqualMsg(std::string t1, OperationTargetList t2, \
+ OperationTargetList t3) { \
+ CPPUNIT_ASSERT_EQUAL_MSG(t1, t2, t3); \
+ } \
+ }; \
+ _asserters.push_back(new MyAsserter); \
+ } \
+ TestTargets::createTest(id, *this, *_asserters.back())
+
+ struct Asserter {
+ virtual ~Asserter() {}
+ virtual void assertEqualMsg(std::string t1,
+ OperationTargetList t2,
+ OperationTargetList t3) = 0;
+ };
+ std::vector<Asserter*> _asserters;
+ struct TestTargets {
+ const BucketId& _id;
+ OperationTargetList _expected;
+ OperationTargetResolverTest& _test;
+ Asserter& _asserter;
+
+ TestTargets(const BucketId& id,
+ OperationTargetResolverTest& test,
+ Asserter& asserter)
+ : _id(id), _test(test), _asserter(asserter) {}
+
+ ~TestTargets() {
+ BucketInstanceList result(_test.getInstances(_id, true));
+ BucketInstanceList all(_test.getInstances(_id, false));
+ _asserter.assertEqualMsg(
+ all.toString(), _expected, result.createTargets());
+ delete _asserters.back();
+ _asserters.pop_back();
+ }
+
+ TestTargets& sendsTo(const BucketId& id, uint16_t node) {
+ _expected.push_back(OperationTarget(
+ id, lib::Node(lib::NodeType::STORAGE, node), false));
+ return *this;
+ }
+ TestTargets& createsAt(const BucketId& id, uint16_t node) {
+ _expected.push_back(OperationTarget(
+ id, lib::Node(lib::NodeType::STORAGE, node), true));
+ return *this;
+ }
+
+ static TestTargets createTest(const BucketId& id,
+ OperationTargetResolverTest& test,
+ Asserter& asserter)
+ {
+ return TestTargets(id, test, asserter);
+ }
+ };
+
+
+} // anonymous
+
+BucketInstanceList
+OperationTargetResolverTest::getInstances(const BucketId& id,
+ bool stripToRedundancy)
+{
+ lib::IdealNodeCalculatorImpl idealNodeCalc;
+ idealNodeCalc.setDistribution(getExternalOperationHandler().getDistribution());
+ idealNodeCalc.setClusterState(getExternalOperationHandler().getClusterState());
+ OperationTargetResolverImpl resolver(
+ getExternalOperationHandler().getBucketDatabase(), idealNodeCalc, 16,
+ getExternalOperationHandler().getDistribution().getRedundancy());
+ if (stripToRedundancy) {
+ return resolver.getInstances(OperationTargetResolver::PUT, id);
+ } else {
+ return resolver.getAllInstances(OperationTargetResolver::PUT, id);
+ }
+}
+
+/*
+ * Test basic case with no inconsistencies
+ */
+void
+OperationTargetResolverTest::testSimple()
+{
+ setupDistributor(2, 2, "storage:2 distributor:1");
+ addNodesToBucketDB(BucketId(16, 0), "0=0,1=0");
+
+ ASSERT_THAT(BucketId(32, 0)).sendsTo(BucketId(16, 0), 1)
+ .sendsTo(BucketId(16, 0), 0);
+}
+
+void
+OperationTargetResolverTest::testMultipleNodes()
+{
+ setupDistributor(1, 2, "storage:2 distributor:1");
+
+ for (int i = 0; i < 100; ++i) {
+ addNodesToBucketDB(BucketId(16, i), "0=0,1=0");
+
+ lib::IdealNodeCalculatorImpl idealNodeCalc;
+ idealNodeCalc.setDistribution(getExternalOperationHandler().getDistribution());
+ idealNodeCalc.setClusterState(getExternalOperationHandler().getClusterState());
+ lib::IdealNodeList idealNodes(
+ idealNodeCalc.getIdealStorageNodes(BucketId(16, i)));
+ uint16_t expectedNode = idealNodes[0].getIndex();
+ ASSERT_THAT(BucketId(32, i)).sendsTo(BucketId(16, i), expectedNode);
+ }
+}
+
+void
+OperationTargetResolverTest::testChooseIdealStateWhenManyCopies()
+{
+ setupDistributor(2, 4, "storage:4 distributor:1");
+ addNodesToBucketDB(BucketId(16, 0), "0=0,1=0,2=0,3=0"); // ideal nodes: 1, 3
+ ASSERT_THAT(BucketId(32, 0)).sendsTo(BucketId(16, 0), 1)
+ .sendsTo(BucketId(16, 0), 3);
+}
+
+void
+OperationTargetResolverTest::testTrustedOverIdealState()
+{
+ setupDistributor(2, 4, "storage:4 distributor:1");
+ addNodesToBucketDB(BucketId(16, 0), "0=0/0/0/t,1=0,2=0/0/0/t,3=0");
+ // ideal nodes: 1, 3
+ ASSERT_THAT(BucketId(32, 0)).sendsTo(BucketId(16, 0), 0)
+ .sendsTo(BucketId(16, 0), 2);
+}
+
+void
+OperationTargetResolverTest::testChooseHighestSplitBucket()
+{
+ setupDistributor(2, 2, "storage:2 distributor:1");
+ // 0, 1 are both in ideal state for both buckets.
+ addNodesToBucketDB(BucketId(16, 0), "0=0,1=0");
+ addNodesToBucketDB(BucketId(17, 0), "0=0,1=0");
+ ASSERT_THAT(BucketId(32, 0)).sendsTo(BucketId(17, 0), 1)
+ .sendsTo(BucketId(17, 0), 0);
+}
+
+void
+OperationTargetResolverTest::testChooseHighestSplitBucketPerNode()
+{
+ setupDistributor(2, 2, "storage:2 distributor:1");
+ addNodesToBucketDB(BucketId(16, 0), "1=0");
+ addNodesToBucketDB(BucketId(17, 0), "0=0");
+ ASSERT_THAT(BucketId(32, 0)).sendsTo(BucketId(17, 0), 0)
+ .sendsTo(BucketId(16, 0), 1);
+}
+
+void
+OperationTargetResolverTest::testChooseHighestSplitBucketWithTrusted()
+{
+ setupDistributor(2, 2, "storage:2 distributor:1");
+ // Unfinished split scenario: split done on 0, not on 1.
+ // Copy on 1 is only remaining for (16, 0), so always trusted.
+ addNodesToBucketDB(BucketId(16, 0), "1=1/2/3/t");
+ addNodesToBucketDB(BucketId(17, 0), "0=2/3/4/t");
+ addNodesToBucketDB(BucketId(17, 1ULL << 16), "0=3/4/5/t");
+ ASSERT_THAT(BucketId(32, 0)).sendsTo(BucketId(17, 0), 0)
+ .sendsTo(BucketId(16, 0), 1);
+}
+
+void
+OperationTargetResolverTest::testInconsistentBucketsAreNotExplicitlyCreated()
+{
+ setupDistributor(2, 2, "bits:8 storage:2 distributor:1");
+ addNodesToBucketDB(BucketId(15, 0), "1=9/9/9/t");
+ addNodesToBucketDB(BucketId(16, 1 << 15), "0=9/9/9/t");
+ // (32, 0) belongs in (16, 0) subtree, but it does not exist. We cannot
+ // create a bucket on (15, 0) node 0 since that will explicitly introduce
+ // an inconsistent bucket in its local state. Note that we still _send_ to
+ // the inconsistent (15, 0) bucket since it already exists and will be
+ // split out very soon anyway. This is predominantly to avoid making things
+ // even worse than they are and to avoid the edge case in bug 7296087.
+ ASSERT_THAT(BucketId(32, 0)).sendsTo(BucketId(15, 0), 1)
+ .createsAt(BucketId(16, 0), 0);
+}
+
+void
+OperationTargetResolverTest::testNoTrustedOrIdealStateCopyAvailable()
+{
+ setupDistributor(2, 4, "storage:4 distributor:1");
+ addNodesToBucketDB(BucketId(16, 0), "0=0,2=0");
+ addNodesToBucketDB(BucketId(18, 0), "0=0"); // ideal nodes: 1, 3
+ ASSERT_THAT(BucketId(32, 0)).sendsTo(BucketId(18, 0), 0)
+ .sendsTo(BucketId(16, 0), 2);
+}
+
+void
+OperationTargetResolverTest::testCreateMissingCopies()
+{
+ setupDistributor(4, 10, "storage:10 distributor:1");
+ addNodesToBucketDB(BucketId(16, 0), "6=0");
+ addNodesToBucketDB(BucketId(18, 0), "4=0"); // ideal nodes: 6, 8, 7, 1
+
+ ASSERT_THAT(BucketId(32, 0)).sendsTo(BucketId(18, 0), 4)
+ .sendsTo(BucketId(16, 0), 6)
+ .createsAt(BucketId(18, 0), 8)
+ .createsAt(BucketId(18, 0), 7);
+}
+
+void
+OperationTargetResolverTest::testNoExistingCopies()
+{
+ setupDistributor(2, 5, "storage:5 distributor:1");
+
+ ASSERT_THAT(BucketId(32, 0)).createsAt(BucketId(16, 0), 1)
+ .createsAt(BucketId(16, 0), 3);
+}
+
+void
+OperationTargetResolverTest::testCountMaintenanceNodesAsDown()
+{
+ setupDistributor(2, 5, "storage:5 .1.s:m distributor:1");
+
+ ASSERT_THAT(BucketId(32, 0)).createsAt(BucketId(16, 0), 3)
+ .createsAt(BucketId(16, 0), 2);
+}
+
+void
+OperationTargetResolverTest::testResolvingDoesNotMutateDatabase()
+{
+ setupDistributor(2, 5, "storage:5 distributor:1");
+
+ ASSERT_THAT(BucketId(32, 0)).createsAt(BucketId(16, 0), 1)
+ .createsAt(BucketId(16, 0), 3);
+
+ CPPUNIT_ASSERT_EQUAL(std::string("NONEXISTING"),
+ dumpBucket(BucketId(0x4000000000000000)));
+}
+
+} // distributor
+} // storage
diff --git a/storage/src/tests/distributor/pendingmessagetrackertest.cpp b/storage/src/tests/distributor/pendingmessagetrackertest.cpp
new file mode 100644
index 00000000000..f69525836be
--- /dev/null
+++ b/storage/src/tests/distributor/pendingmessagetrackertest.cpp
@@ -0,0 +1,674 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+
+#include <vespa/document/base/testdocman.h>
+#include <vespa/storage/distributor/pendingmessagetracker.h>
+#include <vespa/storage/frameworkimpl/component/storagecomponentregisterimpl.h>
+#include <vespa/storageapi/message/bucket.h>
+#include <vespa/storageapi/message/persistence.h>
+#include <vespa/storageframework/defaultimplementation/clock/fakeclock.h>
+#include <tests/common/dummystoragelink.h>
+#include <vespa/vdslib/state/random.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <fstream>
+#include <sstream>
+#include <iomanip>
+#include <iostream>
+#include <memory>
+#include <string>
+#include <iterator>
+
+namespace storage {
+namespace distributor {
+
+// Workaround typedef for not (yet) running with --std=c++14 which supports
+// user defined literals. Once we do, replace ms(123) with 123ms.
+using ms = std::chrono::milliseconds;
+
+class PendingMessageTrackerCallback_Test : public CppUnit::TestFixture {
+ CPPUNIT_TEST_SUITE(PendingMessageTrackerCallback_Test);
+ CPPUNIT_TEST(testSimple);
+ CPPUNIT_TEST(testMultipleMessages);
+ CPPUNIT_TEST(testStartPage);
+ CPPUNIT_TEST(testGetPendingMessageTypes);
+ CPPUNIT_TEST(testHasPendingMessage);
+ CPPUNIT_TEST(testGetAllMessagesForSingleBucket);
+ CPPUNIT_TEST(nodeStatsCanBeOutputStreamed);
+ CPPUNIT_TEST(totalPutLatencyIsInitiallyZero);
+ CPPUNIT_TEST(statsNotAlteredBeforeReplyReceived);
+ CPPUNIT_TEST(totalPutLatencyIsTrackedForSingleRequest);
+ CPPUNIT_TEST(statsAreTrackedSeparatelyPerNode);
+ CPPUNIT_TEST(onlyPutMessagesAreTracked);
+ CPPUNIT_TEST(totalPutLatencyIsAggregatedAcrossRequests);
+ CPPUNIT_TEST(clearingMessagesDoesNotAffectStats);
+ CPPUNIT_TEST(timeTravellingClockLatenciesNotRegistered);
+ CPPUNIT_TEST(statsSnapshotIncludesAllNodes);
+ CPPUNIT_TEST(latencyProviderForwardsToImplementation);
+ CPPUNIT_TEST_SUITE_END();
+
+public:
+ void testSimple();
+ void testMultipleMessages();
+ void testStartPage();
+ void testGetPendingMessageTypes();
+ void testHasPendingMessage();
+ void testGetAllMessagesForSingleBucket();
+ void nodeStatsCanBeOutputStreamed();
+ void totalPutLatencyIsInitiallyZero();
+ void statsNotAlteredBeforeReplyReceived();
+ void totalPutLatencyIsTrackedForSingleRequest();
+ void statsAreTrackedSeparatelyPerNode();
+ void onlyPutMessagesAreTracked();
+ void totalPutLatencyIsAggregatedAcrossRequests();
+ void clearingMessagesDoesNotAffectStats();
+ void timeTravellingClockLatenciesNotRegistered();
+ void statsSnapshotIncludesAllNodes();
+ void latencyProviderForwardsToImplementation();
+
+private:
+ void insertMessages(PendingMessageTracker& tracker);
+
+ OperationStats makeOpStats(std::chrono::milliseconds totalLatency,
+ uint64_t numRequests) const
+ {
+ OperationStats stats;
+ stats.totalLatency = totalLatency;
+ stats.numRequests = numRequests;
+ return stats;
+ }
+};
+
+bool
+operator==(const OperationStats& a, const OperationStats& b)
+{
+ return (a.totalLatency == b.totalLatency
+ && a.numRequests == b.numRequests);
+}
+
+namespace {
+
+class RequestBuilder {
+ uint16_t _toNode;
+ std::chrono::milliseconds _atTime;
+public:
+ RequestBuilder()
+ : _toNode(0),
+ _atTime()
+ {
+ }
+
+ RequestBuilder& atTime(std::chrono::milliseconds t) {
+ _atTime = t;
+ return *this;
+ }
+
+ RequestBuilder& toNode(uint16_t node) {
+ _toNode = node;
+ return *this;
+ }
+
+ uint16_t toNode() const { return _toNode; }
+ std::chrono::milliseconds atTime() const { return _atTime; }
+};
+
+class Fixture
+{
+ StorageComponentRegisterImpl _compReg;
+ framework::defaultimplementation::FakeClock _clock;
+ std::unique_ptr<PendingMessageTracker> _tracker;
+ document::TestDocMan _testDocMan;
+public:
+
+ Fixture()
+ : _compReg(),
+ _clock(),
+ _tracker(),
+ _testDocMan()
+ {
+ _compReg.setClock(_clock);
+ _clock.setAbsoluteTimeInSeconds(1);
+ // Have to set clock in compReg before constructing tracker, or it'll
+ // flip out and die on an explicit nullptr check.
+ _tracker = std::unique_ptr<PendingMessageTracker>(
+ new PendingMessageTracker(_compReg));
+ }
+
+ std::shared_ptr<api::PutCommand> sendPut(const RequestBuilder& builder) {
+ assignMockedTime(builder.atTime());
+ auto put = createPutToNode(builder.toNode());
+ _tracker->insert(put);
+ return put;
+ }
+
+ void sendPutReply(api::PutCommand& putCmd,
+ const RequestBuilder& builder)
+ {
+ assignMockedTime(builder.atTime());
+ auto putReply = putCmd.makeReply();
+ _tracker->reply(*putReply);
+ }
+
+ std::shared_ptr<api::RemoveCommand> sendRemove(
+ const RequestBuilder& builder)
+ {
+ assignMockedTime(builder.atTime());
+ auto remove = createRemoveToNode(builder.toNode());
+ _tracker->insert(remove);
+ return remove;
+ }
+
+ void sendRemoveReply(api::RemoveCommand& removeCmd,
+ const RequestBuilder& builder)
+ {
+ assignMockedTime(builder.atTime());
+ auto removeReply = removeCmd.makeReply();
+ _tracker->reply(*removeReply);
+ }
+
+ void sendPutAndReplyWithLatency(uint16_t node,
+ std::chrono::milliseconds latency)
+ {
+ auto put = sendPut(RequestBuilder().atTime(ms(1000)).toNode(node));
+ sendPutReply(*put, RequestBuilder().atTime(ms(1000) + latency));
+ }
+
+ OperationStats getNodePutOperationStats(uint16_t node) {
+ return _tracker->getNodeStats(node).puts;
+ }
+
+ PendingMessageTracker& tracker() { return *_tracker; }
+
+private:
+ std::string createDummyIdString(const document::BucketId& bucket) const {
+ std::ostringstream id;
+ id << "id:foo:testdoctype1:n=" << bucket.getId() << ":foo";
+ return id.str();
+ }
+
+ document::Document::SP createDummyDocumentForBucket(
+ const document::BucketId& bucket) const
+ {
+ return _testDocMan.createDocument("foobar",
+ createDummyIdString(bucket));
+ }
+
+ api::StorageMessageAddress makeStorageAddress(uint16_t node) const {
+ return {"storage", lib::NodeType::STORAGE, node};
+ }
+
+ std::shared_ptr<api::PutCommand> createPutToNode(uint16_t node) const {
+ document::BucketId bucket(16, 1234);
+ std::shared_ptr<api::PutCommand> cmd(
+ new api::PutCommand(bucket,
+ createDummyDocumentForBucket(bucket),
+ api::Timestamp(123456)));
+ cmd->setAddress(makeStorageAddress(node));
+ return cmd;
+ }
+
+ std::shared_ptr<api::RemoveCommand> createRemoveToNode(
+ uint16_t node) const
+ {
+ document::BucketId bucket(16, 1234);
+ std::shared_ptr<api::RemoveCommand> cmd(
+ new api::RemoveCommand(bucket,
+ document::DocumentId(
+ createDummyIdString(bucket)),
+ api::Timestamp(123456)));
+ cmd->setAddress(makeStorageAddress(node));
+ return cmd;
+ }
+
+ void assignMockedTime(std::chrono::milliseconds time) {
+ _clock.setAbsoluteTimeInMicroSeconds(time.count() * 1000);
+ }
+};
+
+
+}
+
+CPPUNIT_TEST_SUITE_REGISTRATION(PendingMessageTrackerCallback_Test);
+
+void
+PendingMessageTrackerCallback_Test::testSimple()
+{
+ StorageComponentRegisterImpl compReg;
+ framework::defaultimplementation::FakeClock clock;
+ compReg.setClock(clock);
+ clock.setAbsoluteTimeInSeconds(1);
+ PendingMessageTracker tracker(compReg);
+
+ std::shared_ptr<api::RemoveCommand> remove(
+ new api::RemoveCommand(
+ document::BucketId(16, 1234),
+ document::DocumentId("userdoc:footype:1234:foo"), 1001));
+ remove->setAddress(
+ api::StorageMessageAddress("storage", lib::NodeType::STORAGE, 0));
+ tracker.insert(remove);
+
+ {
+ std::ostringstream ost;
+ tracker.reportStatus(ost, framework::HttpUrlPath("/pendingmessages?order=bucket"));
+
+ CPPUNIT_ASSERT_CONTAIN(
+ std::string(
+ "<b>BucketId(0x40000000000004d2)</b>\n"
+ "<ul>\n"
+ "<li><i>Node 0</i>: <b>1970-01-01 00:00:01</b> "
+ "Remove(BucketId(0x40000000000004d2), "
+ "userdoc:footype:1234:foo, timestamp 1001)</li>\n"
+ "</ul>\n"),
+ ost.str());
+ }
+
+ api::RemoveReply reply(*remove);
+ tracker.reply(reply);
+
+ {
+ std::ostringstream ost;
+ tracker.reportStatus(ost, framework::HttpUrlPath("/pendingmessages?order=bucket"));
+
+ CPPUNIT_ASSERT_MSG(ost.str(),
+ ost.str().find("doc:") == std::string::npos);
+ }
+}
+
+void
+PendingMessageTrackerCallback_Test::insertMessages(PendingMessageTracker& tracker)
+{
+ for (uint32_t i = 0; i < 4; i++) {
+ std::ostringstream ost;
+ ost << "userdoc:footype:1234:" << i;
+ std::shared_ptr<api::RemoveCommand> remove(
+ new api::RemoveCommand(
+ document::BucketId(16, 1234),
+ document::DocumentId(ost.str()), 1000 + i));
+ remove->setAddress(
+ api::StorageMessageAddress("storage",
+ lib::NodeType::STORAGE, i % 2));
+ tracker.insert(remove);
+ }
+
+ for (uint32_t i = 0; i < 4; i++) {
+ std::ostringstream ost;
+ ost << "userdoc:footype:4567:" << i;
+ std::shared_ptr<api::RemoveCommand> remove(new api::RemoveCommand(document::BucketId(16, 4567), document::DocumentId(ost.str()), 2000 + i));
+ remove->setAddress(api::StorageMessageAddress("storage", lib::NodeType::STORAGE, i % 2));
+ tracker.insert(remove);
+ }
+}
+
+void
+PendingMessageTrackerCallback_Test::testStartPage()
+{
+ StorageComponentRegisterImpl compReg;
+ framework::defaultimplementation::FakeClock clock;
+ compReg.setClock(clock);
+ PendingMessageTracker tracker(compReg);
+
+ {
+ std::ostringstream ost;
+ tracker.reportStatus(ost, framework::HttpUrlPath("/pendingmessages"));
+
+ CPPUNIT_ASSERT_CONTAIN(
+ std::string(
+ "<h1>Pending messages to storage nodes</h1>\n"
+ "View:\n"
+ "<ul>\n"
+ "<li><a href=\"?order=bucket\">Group by bucket</a></li>"
+ "<li><a href=\"?order=node\">Group by node</a></li>"),
+ ost.str());
+
+ }
+}
+
+void
+PendingMessageTrackerCallback_Test::testMultipleMessages()
+{
+ StorageComponentRegisterImpl compReg;
+ framework::defaultimplementation::FakeClock clock;
+ compReg.setClock(clock);
+ clock.setAbsoluteTimeInSeconds(1);
+ PendingMessageTracker tracker(compReg);
+
+ insertMessages(tracker);
+
+ {
+ std::ostringstream ost;
+ tracker.reportStatus(ost, framework::HttpUrlPath("/pendingmessages?order=bucket"));
+
+ CPPUNIT_ASSERT_CONTAIN(
+ std::string(
+ "<b>BucketId(0x40000000000004d2)</b>\n"
+ "<ul>\n"
+ "<li><i>Node 0</i>: <b>1970-01-01 00:00:01</b> Remove(BucketId(0x40000000000004d2), userdoc:footype:1234:0, timestamp 1000)</li>\n"
+ "<li><i>Node 0</i>: <b>1970-01-01 00:00:01</b> Remove(BucketId(0x40000000000004d2), userdoc:footype:1234:2, timestamp 1002)</li>\n"
+ "<li><i>Node 1</i>: <b>1970-01-01 00:00:01</b> Remove(BucketId(0x40000000000004d2), userdoc:footype:1234:1, timestamp 1001)</li>\n"
+ "<li><i>Node 1</i>: <b>1970-01-01 00:00:01</b> Remove(BucketId(0x40000000000004d2), userdoc:footype:1234:3, timestamp 1003)</li>\n"
+ "</ul>\n"
+ "<b>BucketId(0x40000000000011d7)</b>\n"
+ "<ul>\n"
+ "<li><i>Node 0</i>: <b>1970-01-01 00:00:01</b> Remove(BucketId(0x40000000000011d7), userdoc:footype:4567:0, timestamp 2000)</li>\n"
+ "<li><i>Node 0</i>: <b>1970-01-01 00:00:01</b> Remove(BucketId(0x40000000000011d7), userdoc:footype:4567:2, timestamp 2002)</li>\n"
+ "<li><i>Node 1</i>: <b>1970-01-01 00:00:01</b> Remove(BucketId(0x40000000000011d7), userdoc:footype:4567:1, timestamp 2001)</li>\n"
+ "<li><i>Node 1</i>: <b>1970-01-01 00:00:01</b> Remove(BucketId(0x40000000000011d7), userdoc:footype:4567:3, timestamp 2003)</li>\n"
+ "</ul>\n"),
+ ost.str());
+ }
+
+ {
+ std::ostringstream ost;
+ tracker.reportStatus(ost, framework::HttpUrlPath("/pendingmessages?order=node"));
+
+ CPPUNIT_ASSERT_CONTAIN(std::string(
+ "<b>Node 0 (pending count: 4)</b>\n"
+ "<ul>\n"
+ "<li><b>1970-01-01 00:00:01</b> Remove(BucketId(0x40000000000004d2), userdoc:footype:1234:0, timestamp 1000)</li>\n"
+ "<li><b>1970-01-01 00:00:01</b> Remove(BucketId(0x40000000000004d2), userdoc:footype:1234:2, timestamp 1002)</li>\n"
+ "<li><b>1970-01-01 00:00:01</b> Remove(BucketId(0x40000000000011d7), userdoc:footype:4567:0, timestamp 2000)</li>\n"
+ "<li><b>1970-01-01 00:00:01</b> Remove(BucketId(0x40000000000011d7), userdoc:footype:4567:2, timestamp 2002)</li>\n"
+ "</ul>\n"
+ "<b>Node 1 (pending count: 4)</b>\n"
+ "<ul>\n"
+ "<li><b>1970-01-01 00:00:01</b> Remove(BucketId(0x40000000000004d2), userdoc:footype:1234:1, timestamp 1001)</li>\n"
+ "<li><b>1970-01-01 00:00:01</b> Remove(BucketId(0x40000000000004d2), userdoc:footype:1234:3, timestamp 1003)</li>\n"
+ "<li><b>1970-01-01 00:00:01</b> Remove(BucketId(0x40000000000011d7), userdoc:footype:4567:1, timestamp 2001)</li>\n"
+ "<li><b>1970-01-01 00:00:01</b> Remove(BucketId(0x40000000000011d7), userdoc:footype:4567:3, timestamp 2003)</li>\n"
+ "</ul>\n"
+ ), ost.str());
+ }
+}
+
+namespace {
+
+template <typename T>
+std::string setToString(const std::set<T>& s)
+{
+ std::ostringstream ost;
+ ost << '{';
+ for (typename std::set<T>::const_iterator i(s.begin()), e(s.end());
+ i != e; ++i)
+ {
+ if (i != s.begin()) {
+ ost << ',';
+ }
+ ost << *i;
+ }
+ ost << '}';
+ return ost.str();
+}
+
+}
+
+namespace {
+
+class TestChecker : public PendingMessageTracker::Checker
+{
+public:
+ uint8_t pri;
+
+ TestChecker() : pri(UINT8_MAX) {}
+
+ bool check(uint32_t msgType, uint16_t node, uint8_t p) {
+ (void) node;
+ if (msgType == api::MessageType::REMOVE_ID) {
+ pri = p;
+ return false;
+ }
+
+ return true;
+ }
+};
+
+
+}
+
+void
+PendingMessageTrackerCallback_Test::testGetPendingMessageTypes()
+{
+ StorageComponentRegisterImpl compReg;
+ framework::defaultimplementation::FakeClock clock;
+ compReg.setClock(clock);
+ clock.setAbsoluteTimeInSeconds(1);
+ PendingMessageTracker tracker(compReg);
+ document::BucketId bid(16, 1234);
+
+ std::shared_ptr<api::RemoveCommand> remove(
+ new api::RemoveCommand(
+ bid,
+ document::DocumentId("userdoc:footype:1234:foo"), 1001));
+ remove->setAddress(
+ api::StorageMessageAddress("storage", lib::NodeType::STORAGE, 0));
+ tracker.insert(remove);
+
+ {
+ TestChecker checker;
+ tracker.checkPendingMessages(0, bid, checker);
+ CPPUNIT_ASSERT_EQUAL(127, (int)checker.pri);
+ }
+
+ {
+ TestChecker checker;
+ tracker.checkPendingMessages(0, document::BucketId(16, 1235), checker);
+ CPPUNIT_ASSERT_EQUAL(255, (int)checker.pri);
+ }
+
+ {
+ TestChecker checker;
+ tracker.checkPendingMessages(1, bid, checker);
+ CPPUNIT_ASSERT_EQUAL(255, (int)checker.pri);
+ }
+}
+
+void
+PendingMessageTrackerCallback_Test::testHasPendingMessage()
+{
+ StorageComponentRegisterImpl compReg;
+ framework::defaultimplementation::FakeClock clock;
+ compReg.setClock(clock);
+ clock.setAbsoluteTimeInSeconds(1);
+ PendingMessageTracker tracker(compReg);
+ document::BucketId bid(16, 1234);
+
+ CPPUNIT_ASSERT(!tracker.hasPendingMessage(1, bid, api::MessageType::REMOVE_ID));
+
+ {
+ std::shared_ptr<api::RemoveCommand> remove(
+ new api::RemoveCommand(
+ bid,
+ document::DocumentId("userdoc:footype:1234:foo"), 1001));
+ remove->setAddress(
+ api::StorageMessageAddress("storage", lib::NodeType::STORAGE, 1));
+ tracker.insert(remove);
+ }
+
+ CPPUNIT_ASSERT(tracker.hasPendingMessage(1, bid, api::MessageType::REMOVE_ID));
+ CPPUNIT_ASSERT(!tracker.hasPendingMessage(0, bid, api::MessageType::REMOVE_ID));
+ CPPUNIT_ASSERT(!tracker.hasPendingMessage(2, bid, api::MessageType::REMOVE_ID));
+ CPPUNIT_ASSERT(!tracker.hasPendingMessage(1,
+ document::BucketId(16, 1233),
+ api::MessageType::REMOVE_ID));
+ CPPUNIT_ASSERT(!tracker.hasPendingMessage(1, bid, api::MessageType::DELETEBUCKET_ID));
+}
+
+namespace {
+
+class OperationEnumerator : public PendingMessageTracker::Checker
+{
+ std::ostringstream ss;
+public:
+ bool check(uint32_t msgType, uint16_t node, uint8_t p) override {
+ (void) p;
+ ss << api::MessageType::get(static_cast<api::MessageType::Id>(msgType))
+ .getName()
+ << " -> " << node
+ << "\n";
+
+ return true;
+ }
+
+ std::string str() const { return ss.str(); }
+};
+
+} // anon ns
+
+void
+PendingMessageTrackerCallback_Test::testGetAllMessagesForSingleBucket()
+{
+ StorageComponentRegisterImpl compReg;
+ framework::defaultimplementation::FakeClock clock;
+ compReg.setClock(clock);
+ clock.setAbsoluteTimeInSeconds(1);
+ PendingMessageTracker tracker(compReg);
+
+ insertMessages(tracker);
+
+ {
+ OperationEnumerator enumerator;
+ tracker.checkPendingMessages(document::BucketId(16, 1234), enumerator);
+ CPPUNIT_ASSERT_EQUAL(std::string("Remove -> 0\n"
+ "Remove -> 0\n"
+ "Remove -> 1\n"
+ "Remove -> 1\n"),
+ enumerator.str());
+ }
+ {
+ OperationEnumerator enumerator;
+ tracker.checkPendingMessages(document::BucketId(16, 9876), enumerator);
+ CPPUNIT_ASSERT_EQUAL(std::string(""), enumerator.str());
+ }
+}
+
+void
+PendingMessageTrackerCallback_Test::nodeStatsCanBeOutputStreamed()
+{
+ NodeStats stats;
+ stats.puts = makeOpStats(ms(56789), 10);
+
+ std::ostringstream os;
+ os << stats;
+ std::string expected(
+ "NodeStats(puts=OperationStats("
+ "totalLatency=56789ms, "
+ "numRequests=10))");
+ CPPUNIT_ASSERT_EQUAL(expected, os.str());
+}
+
+void
+PendingMessageTrackerCallback_Test::totalPutLatencyIsInitiallyZero()
+{
+ Fixture fixture;
+ CPPUNIT_ASSERT_EQUAL(makeOpStats(ms(0), 0),
+ fixture.getNodePutOperationStats(0));
+}
+
+void
+PendingMessageTrackerCallback_Test::statsNotAlteredBeforeReplyReceived()
+{
+ Fixture fixture;
+ fixture.sendPut(RequestBuilder().atTime(ms(1000)).toNode(0));
+ CPPUNIT_ASSERT_EQUAL(makeOpStats(ms(0), 0),
+ fixture.getNodePutOperationStats(0));
+}
+
+void
+PendingMessageTrackerCallback_Test::totalPutLatencyIsTrackedForSingleRequest()
+{
+ Fixture fixture;
+ fixture.sendPutAndReplyWithLatency(0, ms(500));
+
+ CPPUNIT_ASSERT_EQUAL(makeOpStats(ms(500), 1),
+ fixture.getNodePutOperationStats(0));
+}
+
+void
+PendingMessageTrackerCallback_Test::statsAreTrackedSeparatelyPerNode()
+{
+ Fixture fixture;
+ fixture.sendPutAndReplyWithLatency(0, ms(500));
+ fixture.sendPutAndReplyWithLatency(1, ms(600));
+
+ CPPUNIT_ASSERT_EQUAL(makeOpStats(ms(500), 1),
+ fixture.getNodePutOperationStats(0));
+ CPPUNIT_ASSERT_EQUAL(makeOpStats(ms(600), 1),
+ fixture.getNodePutOperationStats(1));
+}
+
+// Necessarily, this test will have to be altered when we add tracking of
+// other message types as well.
+void
+PendingMessageTrackerCallback_Test::onlyPutMessagesAreTracked()
+{
+ Fixture fixture;
+ auto remove = fixture.sendRemove(
+ RequestBuilder().atTime(ms(1000)).toNode(0));
+ fixture.sendRemoveReply(*remove, RequestBuilder().atTime(ms(2000)));
+ CPPUNIT_ASSERT_EQUAL(makeOpStats(ms(0), 0),
+ fixture.getNodePutOperationStats(0));
+}
+
+void
+PendingMessageTrackerCallback_Test::totalPutLatencyIsAggregatedAcrossRequests()
+{
+ Fixture fixture;
+ // Model 2 concurrent puts to node 0.
+ fixture.sendPutAndReplyWithLatency(0, ms(500));
+ fixture.sendPutAndReplyWithLatency(0, ms(600));
+ CPPUNIT_ASSERT_EQUAL(makeOpStats(ms(1100), 2),
+ fixture.getNodePutOperationStats(0));
+}
+
+void
+PendingMessageTrackerCallback_Test::clearingMessagesDoesNotAffectStats()
+{
+ Fixture fixture;
+ fixture.sendPutAndReplyWithLatency(2, ms(2000));
+ fixture.tracker().clearMessagesForNode(2);
+ CPPUNIT_ASSERT_EQUAL(makeOpStats(ms(2000), 1),
+ fixture.getNodePutOperationStats(2));
+}
+
+void
+PendingMessageTrackerCallback_Test::timeTravellingClockLatenciesNotRegistered()
+{
+ Fixture fixture;
+ auto put = fixture.sendPut(RequestBuilder().atTime(ms(1000)).toNode(0));
+ fixture.sendPutReply(*put, RequestBuilder().atTime(ms(999)));
+ // Latency increase of zero, but we do count the request itself.
+ CPPUNIT_ASSERT_EQUAL(makeOpStats(ms(0), 1),
+ fixture.getNodePutOperationStats(0));
+}
+
+void
+PendingMessageTrackerCallback_Test::statsSnapshotIncludesAllNodes()
+{
+ Fixture fixture;
+ fixture.sendPutAndReplyWithLatency(0, ms(500));
+ fixture.sendPutAndReplyWithLatency(1, ms(600));
+
+ NodeStatsSnapshot snapshot = fixture.tracker().getLatencyStatistics();
+
+ CPPUNIT_ASSERT_EQUAL(size_t(2), snapshot.nodeToStats.size());
+ CPPUNIT_ASSERT_EQUAL(makeOpStats(ms(500), 1),
+ snapshot.nodeToStats[0].puts);
+ CPPUNIT_ASSERT_EQUAL(makeOpStats(ms(600), 1),
+ snapshot.nodeToStats[1].puts);
+}
+
+void
+PendingMessageTrackerCallback_Test::latencyProviderForwardsToImplementation()
+{
+ Fixture fixture;
+ fixture.sendPutAndReplyWithLatency(0, ms(500));
+
+ LatencyStatisticsProvider& provider(
+ fixture.tracker().getLatencyStatisticsProvider());
+ NodeStatsSnapshot snapshot = provider.getLatencyStatistics();
+
+ CPPUNIT_ASSERT_EQUAL(size_t(1), snapshot.nodeToStats.size());
+ CPPUNIT_ASSERT_EQUAL(makeOpStats(ms(500), 1),
+ snapshot.nodeToStats[0].puts);
+}
+
+} // distributor
+} // storage
diff --git a/storage/src/tests/distributor/putoperationtest.cpp b/storage/src/tests/distributor/putoperationtest.cpp
new file mode 100644
index 00000000000..011b34cd1e3
--- /dev/null
+++ b/storage/src/tests/distributor/putoperationtest.cpp
@@ -0,0 +1,704 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+
+#include <vespa/config/helper/configgetter.h>
+#include <vespa/document/config/config-documenttypes.h>
+#include <vespa/document/repo/documenttyperepo.h>
+#include <vespa/storage/distributor/operations/external/putoperation.h>
+#include <vespa/storageapi/message/bucket.h>
+#include <vespa/storageapi/message/persistence.h>
+#include <vespa/storageapi/message/state.h>
+#include <tests/distributor/distributortestutil.h>
+#include <tests/common/dummystoragelink.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <vespa/vespalib/text/stringtokenizer.h>
+#include <iomanip>
+#include <iostream>
+#include <memory>
+
+using std::shared_ptr;
+using config::ConfigGetter;
+using document::DocumenttypesConfig;
+using config::FileSpec;
+using vespalib::string;
+using namespace document;
+using namespace storage;
+using namespace storage::api;
+using namespace storage::lib;
+using namespace std::literals::string_literals;
+
+namespace storage {
+
+namespace distributor {
+
+class PutOperationTest : public CppUnit::TestFixture,
+ public DistributorTestUtil {
+ CPPUNIT_TEST_SUITE(PutOperationTest);
+ CPPUNIT_TEST(testSimple);
+ CPPUNIT_TEST(testBucketDatabaseGetsSpecialEntryWhenCreateBucketSent);
+ CPPUNIT_TEST(testSendInlineSplitBeforePutIfBucketTooLarge);
+ CPPUNIT_TEST(testDoNotSendInlineSplitIfNotConfigured);
+ CPPUNIT_TEST(testNodeRemovedOnReply);
+ CPPUNIT_TEST(testDoNotSendCreateBucketIfAlreadyPending);
+ CPPUNIT_TEST(testMultipleCopies);
+ CPPUNIT_TEST(testMultipleCopiesEarlyReturnPrimaryNotRequired);
+ CPPUNIT_TEST(testMultipleCopiesEarlyReturnPrimaryRequired);
+ CPPUNIT_TEST(testMultipleCopiesEarlyReturnPrimaryRequiredNotDone);
+ CPPUNIT_TEST_IGNORED(testDoNotRevertOnFailureAfterEarlyReturn);
+ CPPUNIT_TEST(testStorageFailed);
+ CPPUNIT_TEST(testRevertSuccessfulCopiesWhenOneFails);
+ CPPUNIT_TEST(testNoRevertIfRevertDisabled);
+ CPPUNIT_TEST(testNoStorageNodes);
+ CPPUNIT_TEST(testUpdateCorrectBucketOnRemappedPut);
+ CPPUNIT_TEST(testTargetNodes);
+ CPPUNIT_TEST(testDoNotResurrectDownedNodesInBucketDB);
+ CPPUNIT_TEST(sendToRetiredNodesIfNoUpNodesAvailable);
+ CPPUNIT_TEST(replicaImplicitlyActivatedWhenActivationIsNotDisabled);
+ CPPUNIT_TEST(replicaNotImplicitlyActivatedWhenActivationIsDisabled);
+ CPPUNIT_TEST_SUITE_END();
+
+ DocumentTypeRepo::SP _repo;
+ const DocumentType* _html_type;
+ std::unique_ptr<Operation> op;
+
+protected:
+ void testSimple();
+ void testBucketDatabaseGetsSpecialEntryWhenCreateBucketSent();
+ void testSendInlineSplitBeforePutIfBucketTooLarge();
+ void testDoNotSendInlineSplitIfNotConfigured();
+ void testNodeRemovedOnReply();
+ void testDoNotSendCreateBucketIfAlreadyPending();
+ void testStorageFailed();
+ void testNoReply();
+ void testMultipleCopies();
+ void testRevertSuccessfulCopiesWhenOneFails();
+ void testNoRevertIfRevertDisabled();
+ void testInconsistentChecksum();
+ void testNoStorageNodes();
+ void testMultipleCopiesEarlyReturnPrimaryNotRequired();
+ void testMultipleCopiesEarlyReturnPrimaryRequired();
+ void testMultipleCopiesEarlyReturnPrimaryRequiredNotDone();
+ void testDoNotRevertOnFailureAfterEarlyReturn();
+ void testUpdateCorrectBucketOnRemappedPut();
+ void testBucketNotFound();
+ void testTargetNodes();
+ void testDoNotResurrectDownedNodesInBucketDB();
+ void sendToRetiredNodesIfNoUpNodesAvailable();
+ void replicaImplicitlyActivatedWhenActivationIsNotDisabled();
+ void replicaNotImplicitlyActivatedWhenActivationIsDisabled();
+
+ void doTestCreationWithBucketActivationDisabled(bool disabled);
+
+public:
+ void setUp() {
+ _repo.reset(
+ new DocumentTypeRepo(*ConfigGetter<DocumenttypesConfig>
+ ::getConfig("config-doctypes", FileSpec("config-doctypes.cfg"))));
+ _html_type = _repo->getDocumentType("text/html");
+ createLinks();
+ };
+
+ void tearDown() {
+ close();
+ }
+
+ document::BucketId createAndSendSampleDocument(uint32_t timeout);
+ std::string getNodes(const std::string& infoString);
+
+ void sendReply(int idx = -1,
+ api::ReturnCode::Result result
+ = api::ReturnCode::OK,
+ api::BucketInfo info = api::BucketInfo(1,2,3,4,5))
+ {
+ CPPUNIT_ASSERT(!_sender.commands.empty());
+ if (idx == -1) {
+ idx = _sender.commands.size() - 1;
+ } else if (static_cast<size_t>(idx) >= _sender.commands.size()) {
+ throw std::logic_error("Specified message index is greater "
+ "than number of received messages");
+ }
+
+ std::shared_ptr<api::StorageCommand> msg = _sender.commands[idx];
+ api::StorageReply::SP reply(msg->makeReply().release());
+ dynamic_cast<api::BucketInfoReply*>(reply.get())->setBucketInfo(info);
+ reply->setResult(result);
+
+ op->receive(_sender, reply);
+ }
+
+ void sendPut(std::shared_ptr<api::PutCommand> msg) {
+ op.reset(new PutOperation(getExternalOperationHandler(),
+ msg,
+ getDistributor().getMetrics().
+ puts[msg->getLoadType()]));
+ op->start(_sender, framework::MilliSecTime(0));
+ }
+
+ Document::SP createDummyDocument(const char* ns,
+ const char* id) const
+ {
+ return Document::SP(
+ new Document(*_html_type,
+ DocumentId(DocIdString(ns, id))));
+
+ }
+
+ std::shared_ptr<api::PutCommand> createPut(
+ const Document::SP doc) const
+ {
+ return std::shared_ptr<api::PutCommand>(
+ new api::PutCommand(document::BucketId(0), doc, 100));
+ }
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(PutOperationTest);
+
+document::BucketId
+PutOperationTest::createAndSendSampleDocument(uint32_t timeout) {
+ Document::SP
+ doc(new Document(*_html_type,
+ DocumentId(DocIdString("test", "test"))));
+
+ document::BucketId id = getExternalOperationHandler().getBucketId(doc->getId());
+ addIdealNodes(id);
+
+ std::shared_ptr<api::PutCommand> msg(
+ new api::PutCommand(document::BucketId(0),
+ doc,
+ 0));
+ msg->setTimestamp(100);
+ msg->setPriority(128);
+ msg->setTimeout(timeout);
+ sendPut(msg);
+ return id;
+}
+
+namespace {
+
+typedef int Redundancy;
+typedef int NodeCount;
+typedef uint32_t ReturnAfter;
+typedef bool RequirePrimaryWritten;
+
+}
+
+void
+PutOperationTest::testSimple()
+{
+ setupDistributor(1, 1, "storage:1 distributor:1");
+ createAndSendSampleDocument(180);
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Put(BucketId(0x4000000000008b13), "
+ "doc:test:test, timestamp 100, size 33) => 0"),
+ _sender.getCommands(true, true));
+
+ sendReply();
+
+ CPPUNIT_ASSERT_EQUAL(std::string("PutReply(doc:test:test, BucketId(0x0000000000000000), "
+ "timestamp 100) ReturnCode(NONE)"),
+ _sender.getLastReply());
+}
+
+void
+PutOperationTest::testBucketDatabaseGetsSpecialEntryWhenCreateBucketSent()
+{
+ setupDistributor(2, 1, "storage:1 distributor:1");
+
+ Document::SP doc(createDummyDocument("test", "test"));
+ document::BucketId bucketId(getExternalOperationHandler().getBucketId(doc->getId()));
+ sendPut(createPut(doc));
+
+ // Database updated before CreateBucket is sent
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("BucketId(0x4000000000008b13) : "
+ "node(idx=0,crc=0x1,docs=0/0,bytes=0/0,trusted=true,active=true)"),
+ dumpBucket(getExternalOperationHandler().getBucketId(doc->getId())));
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Create bucket => 0,Put => 0"),
+ _sender.getCommands(true));
+}
+
+void
+PutOperationTest::testSendInlineSplitBeforePutIfBucketTooLarge()
+{
+ setupDistributor(1, 1, "storage:1 distributor:1");
+ getConfig().setSplitCount(1024);
+ getConfig().setSplitSize(1000000);
+
+ addNodesToBucketDB(document::BucketId(0x4000000000002a52), "0=10000/10000/10000/t");
+
+ sendPut(createPut(createDummyDocument("test", "uri")));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("SplitBucketCommand(BucketId(0x4000000000002a52)Max doc count: "
+ "1024, Max total doc size: 1000000) Reasons to start: "
+ "[Splitting bucket because its maximum size (10000 b, 10000 docs, 10000 meta, 10000 b total) is "
+ "higher than the configured limit of (1000000, 1024)] => 0,"
+ "Put(BucketId(0x4000000000002a52), doc:test:uri, timestamp 100, "
+ "size 32) => 0"),
+ _sender.getCommands(true, true));
+}
+
+void
+PutOperationTest::testDoNotSendInlineSplitIfNotConfigured()
+{
+ setupDistributor(1, 1, "storage:1 distributor:1");
+ getConfig().setSplitCount(1024);
+ getConfig().setDoInlineSplit(false);
+
+ addNodesToBucketDB(document::BucketId(0x4000000000002a52), "0=10000/10000/10000/t");
+
+ sendPut(createPut(createDummyDocument("test", "uri")));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string(
+ "Put(BucketId(0x4000000000002a52), doc:test:uri, timestamp 100, "
+ "size 32) => 0"),
+ _sender.getCommands(true, true));
+}
+
+void
+PutOperationTest::testNodeRemovedOnReply()
+{
+ setupDistributor(2, 2, "storage:2 distributor:1");
+ createAndSendSampleDocument(180);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("Put(BucketId(0x4000000000008b13), "
+ "doc:test:test, timestamp 100, size 33) => 1,"
+ "Put(BucketId(0x4000000000008b13), "
+ "doc:test:test, timestamp 100, size 33) => 0"),
+ _sender.getCommands(true, true));
+
+ getExternalOperationHandler().removeNodeFromDB(document::BucketId(16, 0x8b13), 0);
+
+ sendReply(0);
+ sendReply(1);
+
+ CPPUNIT_ASSERT_EQUAL(std::string(
+ "PutReply(doc:test:test, BucketId(0x0000000000000000), "
+ "timestamp 100) ReturnCode(BUCKET_DELETED, "
+ "BucketId(0x4000000000008b13) was deleted from nodes [0] "
+ "after message was sent but before it was done. "
+ "Sent to [1,0])"),
+ _sender.getLastReply());
+}
+
+void
+PutOperationTest::testStorageFailed()
+{
+ setupDistributor(2, 1, "storage:1 distributor:1");
+
+ createAndSendSampleDocument(180);
+
+ sendReply(-1, api::ReturnCode::INTERNAL_FAILURE);
+
+ CPPUNIT_ASSERT_EQUAL(std::string("PutReply(doc:test:test, BucketId(0x0000000000000000), "
+ "timestamp 100) ReturnCode(INTERNAL_FAILURE)"),
+ _sender.getLastReply(true));
+}
+
+void
+PutOperationTest::testMultipleCopies()
+{
+ setupDistributor(3, 4, "storage:4 distributor:1");
+
+ Document::SP doc(createDummyDocument("test", "test"));
+ sendPut(createPut(doc));
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Create bucket => 3,Create bucket => 1,"
+ "Create bucket => 0,Put => 3,Put => 1,Put => 0"),
+ _sender.getCommands(true));
+
+ for (uint32_t i = 0; i < 6; i++) {
+ sendReply(i);
+ }
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("PutReply(doc:test:test, BucketId(0x0000000000000000), "
+ "timestamp 100) ReturnCode(NONE)"),
+ _sender.getLastReply(true));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("BucketId(0x4000000000008b13) : "
+ "node(idx=3,crc=0x1,docs=2/4,bytes=3/5,trusted=true,active=false), "
+ "node(idx=1,crc=0x1,docs=2/4,bytes=3/5,trusted=true,active=false), "
+ "node(idx=0,crc=0x1,docs=2/4,bytes=3/5,trusted=true,active=false)"),
+ dumpBucket(getExternalOperationHandler().getBucketId(doc->getId())));
+}
+
+
+void
+PutOperationTest::testMultipleCopiesEarlyReturnPrimaryRequired()
+{
+ setupDistributor(3, 4, "storage:4 distributor:1", 2, true);
+
+ sendPut(createPut(createDummyDocument("test", "test")));
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Create bucket => 3,Create bucket => 1,"
+ "Create bucket => 0,Put => 3,Put => 1,Put => 0"),
+ _sender.getCommands(true));
+
+ // Reply to 2 CreateBucket, including primary
+ for (uint32_t i = 0; i < 2; i++) {
+ sendReply(i);
+ }
+ // Reply to 2 puts, including primary
+ for (uint32_t i = 0; i < 2; i++) {
+ sendReply(3 + i);
+ }
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string(
+ "PutReply(doc:test:test, BucketId(0x0000000000000000), "
+ "timestamp 100) ReturnCode(NONE)"),
+ _sender.getLastReply());
+}
+
+void
+PutOperationTest::testMultipleCopiesEarlyReturnPrimaryNotRequired()
+{
+ setupDistributor(3, 4, "storage:4 distributor:1", 2, false);
+
+ sendPut(createPut(createDummyDocument("test", "test")));
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Create bucket => 3,Create bucket => 1,"
+ "Create bucket => 0,Put => 3,Put => 1,Put => 0"),
+ _sender.getCommands(true));
+
+ // Reply only to 2 nodes (but not the primary)
+ for (uint32_t i = 1; i < 3; i++) {
+ sendReply(i); // CreateBucket
+ }
+ for (uint32_t i = 1; i < 3; i++) {
+ sendReply(3 + i); // Put
+ }
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("PutReply(doc:test:test, BucketId(0x0000000000000000), "
+ "timestamp 100) ReturnCode(NONE)"),
+ _sender.getLastReply());
+}
+
+void
+PutOperationTest::testMultipleCopiesEarlyReturnPrimaryRequiredNotDone()
+{
+ setupDistributor(3, 4, "storage:4 distributor:1", 2, true);
+
+ sendPut(createPut(createDummyDocument("test", "test")));
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Create bucket => 3,Create bucket => 1,"
+ "Create bucket => 0,Put => 3,Put => 1,Put => 0"),
+ _sender.getCommands(true));
+
+ // Reply only to 2 nodes (but not the primary)
+ sendReply(1);
+ sendReply(2);
+ sendReply(4);
+ sendReply(5);
+
+ CPPUNIT_ASSERT_EQUAL(0, (int)_sender.replies.size());
+}
+
+void
+PutOperationTest::testDoNotRevertOnFailureAfterEarlyReturn()
+{
+ setupDistributor(Redundancy(3),NodeCount(4), "storage:4 distributor:1",
+ ReturnAfter(2), RequirePrimaryWritten(false));
+
+ sendPut(createPut(createDummyDocument("test", "test")));
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Create bucket => 3,Create bucket => 1,"
+ "Create bucket => 0,Put => 3,Put => 1,Put => 0"),
+ _sender.getCommands(true));
+
+ for (uint32_t i = 0; i < 3; i++) {
+ sendReply(i); // CreateBucket
+ }
+ for (uint32_t i = 0; i < 2; i++) {
+ sendReply(3 + i); // Put
+ }
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string(
+ "PutReply(doc:test:test, BucketId(0x0000000000000000), "
+ "timestamp 100) ReturnCode(NONE)"),
+ _sender.getLastReply());
+
+ sendReply(5, api::ReturnCode::INTERNAL_FAILURE);
+ // Should not be any revert commands sent
+ CPPUNIT_ASSERT_EQUAL(std::string("Create bucket => 3,Create bucket => 1,"
+ "Create bucket => 0,Put => 3,Put => 1,Put => 0"),
+ _sender.getCommands(true));
+}
+
+void
+PutOperationTest::testRevertSuccessfulCopiesWhenOneFails()
+{
+ setupDistributor(3, 4, "storage:4 distributor:1");
+
+ createAndSendSampleDocument(180);
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Put => 3,Put => 1,Put => 0"),
+ _sender.getCommands(true));
+
+ for (uint32_t i = 0; i < 2; i++) {
+ sendReply(i);
+ }
+
+ sendReply(2, api::ReturnCode::INTERNAL_FAILURE);
+
+ CPPUNIT_ASSERT_EQUAL(std::string("PutReply(doc:test:test, "
+ "BucketId(0x0000000000000000), timestamp 100) "
+ "ReturnCode(INTERNAL_FAILURE)"),
+ _sender.getLastReply(true));
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Revert => 3,Revert => 1"),
+ _sender.getCommands(true, false, 3));
+}
+
+void
+PutOperationTest::testNoRevertIfRevertDisabled()
+{
+ close();
+ getDirConfig().getConfig("stor-distributormanager")
+ .set("enable_revert", "false");
+ setUp();
+ setupDistributor(3, 4, "storage:4 distributor:1");
+
+ createAndSendSampleDocument(180);
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Put => 3,Put => 1,Put => 0"),
+ _sender.getCommands(true));
+
+ for (uint32_t i = 0; i < 2; i++) {
+ sendReply(i);
+ }
+
+ sendReply(2, api::ReturnCode::INTERNAL_FAILURE);
+
+ CPPUNIT_ASSERT_EQUAL(std::string("PutReply(doc:test:test, "
+ "BucketId(0x0000000000000000), timestamp 100) "
+ "ReturnCode(INTERNAL_FAILURE)"),
+ _sender.getLastReply(true));
+
+ CPPUNIT_ASSERT_EQUAL(std::string(""),
+ _sender.getCommands(true, false, 3));
+}
+
+void
+PutOperationTest::testDoNotSendCreateBucketIfAlreadyPending()
+{
+ setupDistributor(2, 2, "storage:2 distributor:1");
+
+ Document::SP doc(createDummyDocument("test", "uri"));
+ sendPut(createPut(doc));
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Create bucket => 1,Create bucket => 0,"
+ "Put => 1,Put => 0"),
+ _sender.getCommands(true));
+
+ // Manually shove sent messages into pending message tracker, since
+ // this isn't done automatically.
+ for (size_t i = 0; i < _sender.commands.size(); ++i) {
+ getExternalOperationHandler().getDistributor().getPendingMessageTracker()
+ .insert(_sender.commands[i]);
+ }
+
+ sendPut(createPut(doc));
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Create bucket => 1,Create bucket => 0,"
+ "Put => 1,Put => 0,"
+ "Put => 1,Put => 0"),
+ _sender.getCommands(true));
+}
+
+void
+PutOperationTest::testNoStorageNodes()
+{
+ setupDistributor(2, 1, "storage:0 distributor:1");
+ createAndSendSampleDocument(180);
+ CPPUNIT_ASSERT_EQUAL(std::string("PutReply(doc:test:test, BucketId(0x0000000000000000), "
+ "timestamp 100) ReturnCode(NOT_CONNECTED, "
+ "Can't store document: No storage nodes available)"),
+ _sender.getLastReply(true));
+}
+
+void
+PutOperationTest::testUpdateCorrectBucketOnRemappedPut()
+{
+ setupDistributor(2, 2, "storage:2 distributor:1");
+
+ Document::SP doc(new Document(*_html_type, DocumentId(
+ UserDocIdString("userdoc:test:13:uri"))));
+
+ addNodesToBucketDB(document::BucketId(16,13), "0=0,1=0");
+
+ sendPut(createPut(doc));
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Put => 0,Put => 1"),
+ _sender.getCommands(true));
+
+ {
+ std::shared_ptr<api::StorageCommand> msg2 = _sender.commands[0];
+ std::shared_ptr<api::StorageReply> reply(msg2->makeReply().release());
+ PutReply* sreply = (PutReply*)reply.get();
+ sreply->remapBucketId(document::BucketId(17, 13));
+ sreply->setBucketInfo(api::BucketInfo(1,2,3,4,5));
+ op->receive(_sender, reply);
+ }
+
+ sendReply(1);
+
+ CPPUNIT_ASSERT_EQUAL(std::string("PutReply(userdoc:test:13:uri, "
+ "BucketId(0x0000000000000000), "
+ "timestamp 100) ReturnCode(NONE)"),
+ _sender.getLastReply());
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("BucketId(0x440000000000000d) : "
+ "node(idx=0,crc=0x1,docs=2/4,bytes=3/5,trusted=true,active=false)"),
+ dumpBucket(document::BucketId(17, 13)));
+}
+
+BucketInfo
+parseBucketInfoString(const std::string& nodeList) {
+ vespalib::StringTokenizer tokenizer(nodeList, ",");
+
+ BucketInfo entry;
+ for (uint32_t i = 0; i < tokenizer.size(); i++) {
+ vespalib::StringTokenizer tokenizer2(tokenizer[i], "-");
+ int node = atoi(tokenizer2[0].c_str());
+ int size = atoi(tokenizer2[1].c_str());
+ bool trusted = (tokenizer2[2] == "true");
+
+ entry.addNode(BucketCopy(0,
+ node,
+ api::BucketInfo(size, size * 1000, size * 2000))
+ .setTrusted(trusted),
+ toVector<uint16_t>(0));
+ }
+
+ return entry;
+}
+
+std::string
+PutOperationTest::getNodes(const std::string& infoString) {
+ Document::SP doc(createDummyDocument("test", "uri"));
+ document::BucketId bid(getExternalOperationHandler().getBucketId(doc->getId()));
+
+ BucketInfo entry = parseBucketInfoString(infoString);
+
+ std::ostringstream ost;
+
+ std::vector<uint16_t> targetNodes;
+ std::vector<uint16_t> createNodes;
+ PutOperation::getTargetNodes(getExternalOperationHandler().getIdealNodes(bid),
+ targetNodes, createNodes, entry, 2);
+
+ ost << "target( ";
+ for (uint32_t i = 0; i < targetNodes.size(); i++) {
+ ost << targetNodes[i] << " ";
+ }
+ ost << ") create( ";
+ for (uint32_t i = 0; i < createNodes.size(); i++) {
+ ost << createNodes[i] << " ";
+ }
+ ost << ")";
+
+ return ost.str();
+}
+
+void
+PutOperationTest::testTargetNodes()
+{
+ setupDistributor(2, 6, "storage:6 distributor:1");
+
+ // Ideal state of bucket is 1,3.
+ CPPUNIT_ASSERT_EQUAL(std::string("target( 1 3 ) create( 1 3 )"), getNodes(""));
+ CPPUNIT_ASSERT_EQUAL(std::string("target( 1 3 ) create( 3 )"), getNodes("1-1-true"));
+ CPPUNIT_ASSERT_EQUAL(std::string("target( 1 3 ) create( 3 )"), getNodes("1-1-false"));
+ CPPUNIT_ASSERT_EQUAL(std::string("target( 3 4 5 ) create( )"), getNodes("3-1-true,4-1-true,5-1-true"));
+ CPPUNIT_ASSERT_EQUAL(std::string("target( 3 4 ) create( )"), getNodes("3-2-true,4-2-true,5-1-false"));
+ CPPUNIT_ASSERT_EQUAL(std::string("target( 1 3 4 ) create( )"), getNodes("3-2-true,4-2-true,1-1-false"));
+ CPPUNIT_ASSERT_EQUAL(std::string("target( 4 5 ) create( )"), getNodes("4-2-false,5-1-false"));
+ CPPUNIT_ASSERT_EQUAL(std::string("target( 1 4 ) create( 1 )"), getNodes("4-1-true"));
+}
+
+void
+PutOperationTest::testDoNotResurrectDownedNodesInBucketDB()
+{
+ setupDistributor(2, 2, "storage:2 distributor:1");
+
+ Document::SP doc(createDummyDocument("test", "uri"));
+ document::BucketId bId = getExternalOperationHandler().getBucketId(doc->getId());
+
+ addNodesToBucketDB(bId, "0=1/2/3/t,1=1/2/3/t");
+
+ sendPut(createPut(doc));
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Put => 1,Put => 0"),
+ _sender.getCommands(true));
+
+ _distributor->enableClusterState(ClusterState("distributor:1 storage:2 .1.s:d"));
+ addNodesToBucketDB(bId, "0=1/2/3/t"); // This will actually remove node #1.
+
+ sendReply(0, api::ReturnCode::OK, api::BucketInfo(9,9,9));
+ sendReply(1, api::ReturnCode::OK, api::BucketInfo(5,6,7));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("BucketId(0x4000000000002a52) : "
+ "node(idx=0,crc=0x5,docs=6/6,bytes=7/7,trusted=true,active=false)"),
+ dumpBucket(getExternalOperationHandler().getBucketId(doc->getId())));
+}
+
+void
+PutOperationTest::sendToRetiredNodesIfNoUpNodesAvailable()
+{
+ setupDistributor(Redundancy(2), NodeCount(2),
+ "distributor:1 storage:2 .0.s:r .1.s:r");
+ Document::SP doc(createDummyDocument("test", "uri"));
+ document::BucketId bucket(
+ getExternalOperationHandler().getBucketId(doc->getId()));
+ addNodesToBucketDB(bucket, "0=1/2/3/t,1=1/2/3/t");
+
+ sendPut(createPut(doc));
+
+ CPPUNIT_ASSERT_EQUAL("Put => 0,Put => 1"s,
+ _sender.getCommands(true));
+}
+
+void
+PutOperationTest::doTestCreationWithBucketActivationDisabled(bool disabled)
+{
+ setupDistributor(Redundancy(2), NodeCount(2), "distributor:1 storage:1");
+ disableBucketActivationInConfig(disabled);
+
+ Document::SP doc(createDummyDocument("test", "uri"));
+ sendPut(createPut(doc));
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Create bucket => 0,Put => 0"),
+ _sender.getCommands(true));
+ auto cmd = _sender.commands[0];
+ auto createCmd = std::dynamic_pointer_cast<api::CreateBucketCommand>(cmd);
+ CPPUNIT_ASSERT(createCmd.get() != nullptr);
+ // There's only 1 content node, so if activation were not disabled, it
+ // should always be activated.
+ CPPUNIT_ASSERT_EQUAL(!disabled, createCmd->getActive());
+}
+
+void
+PutOperationTest::replicaImplicitlyActivatedWhenActivationIsNotDisabled()
+{
+ doTestCreationWithBucketActivationDisabled(false);
+}
+
+void
+PutOperationTest::replicaNotImplicitlyActivatedWhenActivationIsDisabled()
+{
+ doTestCreationWithBucketActivationDisabled(true);
+}
+
+}
+
+}
diff --git a/storage/src/tests/distributor/removebucketoperationtest.cpp b/storage/src/tests/distributor/removebucketoperationtest.cpp
new file mode 100644
index 00000000000..aeceefa15a0
--- /dev/null
+++ b/storage/src/tests/distributor/removebucketoperationtest.cpp
@@ -0,0 +1,150 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <cppunit/extensions/HelperMacros.h>
+#include <tests/common/dummystoragelink.h>
+#include <vespa/storageapi/message/persistence.h>
+#include <vespa/storageapi/message/bucket.h>
+#include <vespa/storage/distributor/operations/idealstate/removebucketoperation.h>
+#include <vespa/storage/distributor/idealstatemanager.h>
+#include <vespa/storage/distributor/pendingmessagetracker.h>
+#include <vespa/storage/storageutil/utils.h>
+#include <tests/distributor/distributortestutil.h>
+
+namespace storage {
+namespace distributor {
+
+class RemoveBucketOperationTest : public CppUnit::TestFixture,
+ public DistributorTestUtil
+{
+ CPPUNIT_TEST_SUITE(RemoveBucketOperationTest);
+ CPPUNIT_TEST(testSimple);
+ CPPUNIT_TEST(testBucketInfoMismatchFailure);
+ CPPUNIT_TEST(testFailWithInvalidBucketInfo);
+ CPPUNIT_TEST_SUITE_END();
+
+protected:
+ void testSimple();
+ void testBucketInfoMismatchFailure();
+ void testFailWithInvalidBucketInfo();
+
+public:
+ void setUp() {
+ createLinks();
+ };
+
+ void tearDown() {
+ close();
+ }
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(RemoveBucketOperationTest);
+
+void
+RemoveBucketOperationTest::testSimple()
+{
+ addNodesToBucketDB(document::BucketId(16, 1),
+ "0=10/100/1/t,"
+ "1=10/100/1/t,"
+ "2=10/100/1/t");
+ setRedundancy(1);
+ _distributor->enableClusterState(lib::ClusterState("distributor:1 storage:3"));
+
+ RemoveBucketOperation op("storage",
+ BucketAndNodes(document::BucketId(16, 1),
+ toVector<uint16_t>(1,2)));
+ op.setIdealStateManager(&getIdealStateManager());
+ op.start(_sender, framework::MilliSecTime(0));
+
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Delete bucket => 1,"
+ "Delete bucket => 2"),
+ _sender.getCommands(true));
+
+ sendReply(op, 0);
+ sendReply(op, 1);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string(
+ "BucketId(0x4000000000000001) : "
+ "node(idx=0,crc=0xa,docs=100/100,bytes=1/1,trusted=true,active=false)"),
+ dumpBucket(document::BucketId(16, 1)));
+}
+
+/**
+ * Test that receiving a DeleteBucket failure from a storage node that sends
+ * back actual bucket info reinserts that bucket info into the distributor
+ * bucket database.
+ */
+void
+RemoveBucketOperationTest::testBucketInfoMismatchFailure()
+{
+ addNodesToBucketDB(document::BucketId(16, 1), "1=0/0/0/t");
+
+ getComponentRegisterImpl().setDistribution(std::shared_ptr<lib::Distribution>(
+ new lib::Distribution(
+ lib::Distribution::getDefaultDistributionConfig(1, 10))));
+
+ _distributor->enableClusterState(lib::ClusterState("distributor:1 storage:2"));
+
+ RemoveBucketOperation op("storage",
+ BucketAndNodes(document::BucketId(16, 1),
+ toVector<uint16_t>(1)));
+ op.setIdealStateManager(&getIdealStateManager());
+ op.start(_sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Delete bucket => 1"),
+ _sender.getCommands(true));
+
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, _sender.commands.size());
+ std::shared_ptr<api::StorageCommand> msg2 = _sender.commands[0];
+ std::shared_ptr<api::StorageReply> reply(msg2->makeReply().release());
+ dynamic_cast<api::DeleteBucketReply&>(*reply).setBucketInfo(
+ api::BucketInfo(10, 100, 1));
+ reply->setResult(api::ReturnCode::REJECTED);
+ op.receive(_sender, reply);
+
+ // RemoveBucketOperation should reinsert bucketinfo into database
+ CPPUNIT_ASSERT_EQUAL(
+ std::string(
+ "BucketId(0x4000000000000001) : "
+ "node(idx=1,crc=0xa,docs=100/100,bytes=1/1,trusted=true,active=false)"),
+ dumpBucket(document::BucketId(16, 1)));
+}
+
+/**
+ * Test that receiving a DeleteBucket failure from a storage node that does
+ * not include valid BucketInfo in its reply does not reinsert the bucket
+ * into the distributor.
+ */
+void
+RemoveBucketOperationTest::testFailWithInvalidBucketInfo()
+{
+ addNodesToBucketDB(document::BucketId(16, 1), "1=0/0/0/t");
+
+ getComponentRegisterImpl().setDistribution(std::shared_ptr<lib::Distribution>(
+ new lib::Distribution(
+ lib::Distribution::getDefaultDistributionConfig(1, 10))));
+
+ _distributor->enableClusterState(lib::ClusterState("distributor:1 storage:2"));
+
+ RemoveBucketOperation op("storage",
+ BucketAndNodes(document::BucketId(16, 1),
+ toVector<uint16_t>(1)));
+ op.setIdealStateManager(&getIdealStateManager());
+ op.start(_sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Delete bucket => 1"),
+ _sender.getCommands(true));
+
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, _sender.commands.size());
+ std::shared_ptr<api::StorageCommand> msg2 = _sender.commands[0];
+ std::shared_ptr<api::StorageReply> reply(msg2->makeReply().release());
+ reply->setResult(api::ReturnCode::ABORTED);
+ op.receive(_sender, reply);
+
+ CPPUNIT_ASSERT_EQUAL(std::string("NONEXISTING"),
+ dumpBucket(document::BucketId(16, 1)));
+}
+
+} // distributor
+} // storage
diff --git a/storage/src/tests/distributor/removelocationtest.cpp b/storage/src/tests/distributor/removelocationtest.cpp
new file mode 100644
index 00000000000..7a1bba86303
--- /dev/null
+++ b/storage/src/tests/distributor/removelocationtest.cpp
@@ -0,0 +1,84 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <cppunit/extensions/HelperMacros.h>
+#include <iomanip>
+#include <iostream>
+#include <memory>
+#include <tests/common/dummystoragelink.h>
+#include <vespa/storageapi/message/removelocation.h>
+#include <vespa/storage/distributor/operations/external/removelocationoperation.h>
+#include <tests/distributor/distributortestutil.h>
+
+namespace storage {
+namespace distributor {
+
+class RemoveLocationOperationTest : public CppUnit::TestFixture,
+ public DistributorTestUtil
+{
+ CPPUNIT_TEST_SUITE(RemoveLocationOperationTest);
+ CPPUNIT_TEST(testSimple);
+ CPPUNIT_TEST_SUITE_END();
+
+protected:
+ void testSimple();
+
+public:
+ std::unique_ptr<RemoveLocationOperation> op;
+
+ void setUp() {
+ createLinks();
+ };
+
+ void tearDown() {
+ close();
+ }
+
+ void sendRemoveLocation(const std::string& selection) {
+ std::shared_ptr<api::RemoveLocationCommand> msg(
+ new api::RemoveLocationCommand(selection, document::BucketId(0)));
+
+ op.reset(new RemoveLocationOperation(getExternalOperationHandler(),
+ msg,
+ getDistributor().getMetrics().
+ removelocations[msg->getLoadType()]));
+
+ op->start(_sender, framework::MilliSecTime(0));
+ }
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(RemoveLocationOperationTest);
+
+void
+RemoveLocationOperationTest::testSimple()
+{
+ _distributor->enableClusterState(lib::ClusterState("distributor:1 storage:3"));
+
+ addNodesToBucketDB(document::BucketId(34, 0x000001234), "0=1,1=1");
+ addNodesToBucketDB(document::BucketId(34, 0x100001234), "0=1,2=1");
+ addNodesToBucketDB(document::BucketId(34, 0x200001234), "0=1,2=1");
+ addNodesToBucketDB(document::BucketId(34, 0x300001234), "1=1,2=1");
+
+ sendRemoveLocation("id.user=4660");
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("Remove selection(id.user=4660): BucketInfoCommand() => 0,"
+ "Remove selection(id.user=4660): BucketInfoCommand() => 1,"
+ "Remove selection(id.user=4660): BucketInfoCommand() => 0,"
+ "Remove selection(id.user=4660): BucketInfoCommand() => 2,"
+ "Remove selection(id.user=4660): BucketInfoCommand() => 0,"
+ "Remove selection(id.user=4660): BucketInfoCommand() => 2,"
+ "Remove selection(id.user=4660): BucketInfoCommand() => 1,"
+ "Remove selection(id.user=4660): BucketInfoCommand() => 2"),
+ _sender.getCommands(true, true));
+
+ for (uint32_t i = 0; i < 8; ++i) {
+ sendReply(*op, i);
+ }
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("BucketInfoReply(BucketInfo(invalid)) ReturnCode(NONE)"),
+ _sender.getLastReply());
+}
+
+} // distributor
+} // storage
diff --git a/storage/src/tests/distributor/removeoperationtest.cpp b/storage/src/tests/distributor/removeoperationtest.cpp
new file mode 100644
index 00000000000..7907541a7c7
--- /dev/null
+++ b/storage/src/tests/distributor/removeoperationtest.cpp
@@ -0,0 +1,203 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <cppunit/extensions/HelperMacros.h>
+#include <iomanip>
+#include <iostream>
+#include <memory>
+#include <tests/common/dummystoragelink.h>
+#include <vespa/storage/distributor/externaloperationhandler.h>
+#include <vespa/storageapi/message/persistence.h>
+#include <tests/distributor/distributortestutil.h>
+#include <vespa/storage/distributor/operations/external/removeoperation.h>
+
+namespace storage {
+namespace distributor {
+
+class RemoveOperationTest : public CppUnit::TestFixture,
+ public DistributorTestUtil
+{
+ CPPUNIT_TEST_SUITE(RemoveOperationTest);
+ CPPUNIT_TEST(testSimple);
+ CPPUNIT_TEST(testNotFound);
+ CPPUNIT_TEST(testStorageFailure);
+ CPPUNIT_TEST(testNotInDB);
+ CPPUNIT_TEST(testMultipleCopies);
+ CPPUNIT_TEST(canSendRemoveWhenAllReplicaNodesRetired);
+ CPPUNIT_TEST_SUITE_END();
+
+protected:
+ void testSimple();
+ void testNotFound();
+ void testStorageFailure();
+ void testNoReply();
+ void testNotInDB();
+ void testMultipleCopies();
+ void testRevert();
+ void canSendRemoveWhenAllReplicaNodesRetired();
+
+public:
+ document::DocumentId docId;
+ document::BucketId bucketId;
+ std::unique_ptr<RemoveOperation> op;
+
+ void setUp() {
+ createLinks();
+
+ docId = document::DocumentId(document::DocIdString("test", "uri"));
+ bucketId = getExternalOperationHandler().getBucketId(docId);
+ _distributor->enableClusterState(lib::ClusterState("distributor:1 storage:4"));
+ };
+
+ void tearDown() {
+ close();
+ }
+
+ void sendRemove(document::DocumentId dId) {
+ std::shared_ptr<api::RemoveCommand> msg(
+ new api::RemoveCommand(document::BucketId(0), dId, 100));
+
+ op.reset(new RemoveOperation(getExternalOperationHandler(),
+ msg,
+ getDistributor().getMetrics().
+ removes[msg->getLoadType()]));
+
+ op->start(_sender, framework::MilliSecTime(0));
+ }
+
+ void replyToMessage(RemoveOperation& callback,
+ uint32_t index,
+ uint64_t oldTimestamp)
+ {
+ if (index == (uint32_t)-1) {
+ index = _sender.commands.size() - 1;
+ }
+
+ std::shared_ptr<api::StorageMessage> msg2 = _sender.commands[index];
+ api::RemoveCommand* removec = dynamic_cast<api::RemoveCommand*>(msg2.get());
+ std::unique_ptr<api::StorageReply> reply(removec->makeReply());
+ api::RemoveReply* removeR = static_cast<api::RemoveReply*>(reply.get());
+ removeR->setOldTimestamp(oldTimestamp);
+ callback.onReceive(_sender,
+ std::shared_ptr<api::StorageReply>(reply.release()));
+ }
+
+ void sendRemove() {
+ sendRemove(docId);
+ }
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(RemoveOperationTest);
+
+void
+RemoveOperationTest::testSimple()
+{
+ addNodesToBucketDB(bucketId, "1=0");
+
+ sendRemove();
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("Remove(BucketId(0x4000000000002a52), doc:test:uri, "
+ "timestamp 100) => 1"),
+ _sender.getLastCommand());
+
+ replyToMessage(*op, -1, 34);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("RemoveReply(BucketId(0x0000000000000000), doc:test:uri, "
+ "timestamp 100, removed doc from 34) ReturnCode(NONE)"),
+ _sender.getLastReply());
+}
+
+void
+RemoveOperationTest::testNotFound()
+{
+ addNodesToBucketDB(bucketId, "1=0");
+
+ sendRemove();
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("Remove(BucketId(0x4000000000002a52), doc:test:uri, "
+ "timestamp 100) => 1"),
+ _sender.getLastCommand());
+
+ replyToMessage(*op, -1, 0);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("RemoveReply(BucketId(0x0000000000000000), doc:test:uri, "
+ "timestamp 100, not found) ReturnCode(NONE)"),
+ _sender.getLastReply());
+}
+
+void
+RemoveOperationTest::testStorageFailure()
+{
+ addNodesToBucketDB(bucketId, "1=0");
+
+ sendRemove();
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("Remove(BucketId(0x4000000000002a52), doc:test:uri, "
+ "timestamp 100) => 1"),
+ _sender.getLastCommand());
+
+ sendReply(*op, -1, api::ReturnCode::INTERNAL_FAILURE);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("RemoveReply(BucketId(0x0000000000000000), doc:test:uri, "
+ "timestamp 100, not found) ReturnCode(INTERNAL_FAILURE)"),
+ _sender.getLastReply());
+}
+
+void
+RemoveOperationTest::testNotInDB()
+{
+ sendRemove();
+
+ CPPUNIT_ASSERT_EQUAL(std::string("RemoveReply(BucketId(0x0000000000000000), "
+ "doc:test:uri, timestamp 100, not found) ReturnCode(NONE)"),
+ _sender.getLastReply());
+}
+
+void
+RemoveOperationTest::testMultipleCopies()
+{
+ addNodesToBucketDB(bucketId, "1=0, 2=0, 3=0");
+
+ sendRemove();
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("Remove(BucketId(0x4000000000002a52), doc:test:uri, "
+ "timestamp 100) => 1,"
+ "Remove(BucketId(0x4000000000002a52), doc:test:uri, "
+ "timestamp 100) => 2,"
+ "Remove(BucketId(0x4000000000002a52), doc:test:uri, "
+ "timestamp 100) => 3"),
+ _sender.getCommands(true, true));
+
+ replyToMessage(*op, 0, 34);
+ replyToMessage(*op, 1, 34);
+ replyToMessage(*op, 2, 75);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("RemoveReply(BucketId(0x0000000000000000), "
+ "doc:test:uri, timestamp 100, removed doc from 75) ReturnCode(NONE)"),
+ _sender.getLastReply());
+}
+
+void
+RemoveOperationTest::canSendRemoveWhenAllReplicaNodesRetired()
+{
+ _distributor->enableClusterState(
+ lib::ClusterState("distributor:1 storage:1 .0.s:r"));
+ addNodesToBucketDB(bucketId, "0=123");
+ sendRemove();
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("Remove(BucketId(0x4000000000002a52), doc:test:uri, "
+ "timestamp 100) => 0"),
+ _sender.getLastCommand());
+}
+
+} // distributor
+} // storage
diff --git a/storage/src/tests/distributor/simplebucketprioritydatabasetest.cpp b/storage/src/tests/distributor/simplebucketprioritydatabasetest.cpp
new file mode 100644
index 00000000000..a066649477c
--- /dev/null
+++ b/storage/src/tests/distributor/simplebucketprioritydatabasetest.cpp
@@ -0,0 +1,143 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <string>
+#include <vespa/storage/distributor/maintenance/simplebucketprioritydatabase.h>
+
+namespace storage {
+
+namespace distributor {
+
+using document::BucketId;
+typedef MaintenancePriority Priority;
+
+class SimpleBucketPriorityDatabaseTest : public CppUnit::TestFixture {
+ CPPUNIT_TEST_SUITE(SimpleBucketPriorityDatabaseTest);
+ CPPUNIT_TEST(testIteratorRangeIsEqualOnEmptyDatabase);
+ CPPUNIT_TEST(testCanGetPrioritizedBucket);
+ CPPUNIT_TEST(testIterateOverMultiplePriorities);
+ CPPUNIT_TEST(testMultipleSetPriorityForOneBucket);
+ CPPUNIT_TEST(testIterateOverMultipleBucketsWithMultiplePriorities);
+ CPPUNIT_TEST(testNoMaintenanceNeededClearsBucketFromDatabase);
+ CPPUNIT_TEST_SUITE_END();
+
+ typedef SimpleBucketPriorityDatabase::const_iterator const_iterator;
+
+public:
+ void testIteratorRangeIsEqualOnEmptyDatabase();
+ void testCanGetPrioritizedBucket();
+ void testIterateOverMultiplePriorities();
+ void testMultipleSetPriorityForOneBucket();
+ void testIterateOverMultipleBucketsWithMultiplePriorities();
+ void testNoMaintenanceNeededClearsBucketFromDatabase();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(SimpleBucketPriorityDatabaseTest);
+
+void
+SimpleBucketPriorityDatabaseTest::testIteratorRangeIsEqualOnEmptyDatabase()
+{
+ SimpleBucketPriorityDatabase queue;
+ const_iterator begin(queue.begin());
+ const_iterator end(queue.end());
+
+ CPPUNIT_ASSERT(begin == end);
+ CPPUNIT_ASSERT(begin == begin);
+ CPPUNIT_ASSERT(end == end);
+}
+
+void
+SimpleBucketPriorityDatabaseTest::testCanGetPrioritizedBucket()
+{
+ SimpleBucketPriorityDatabase queue;
+
+ PrioritizedBucket lowPriBucket(BucketId(16, 1234), Priority::VERY_LOW);
+ queue.setPriority(lowPriBucket);
+
+ PrioritizedBucket highest(*queue.begin());
+ CPPUNIT_ASSERT_EQUAL(lowPriBucket, highest);
+}
+
+void
+SimpleBucketPriorityDatabaseTest::testIterateOverMultiplePriorities()
+{
+ SimpleBucketPriorityDatabase queue;
+
+ PrioritizedBucket lowPriBucket(BucketId(16, 1234), Priority::LOW);
+ PrioritizedBucket highPriBucket(BucketId(16, 4321), Priority::HIGH);
+ queue.setPriority(lowPriBucket);
+ queue.setPriority(highPriBucket);
+
+ const_iterator iter(queue.begin());
+ CPPUNIT_ASSERT_EQUAL(highPriBucket, *iter);
+ ++iter;
+ CPPUNIT_ASSERT(iter != queue.end());
+ CPPUNIT_ASSERT_EQUAL(lowPriBucket, *iter);
+ ++iter;
+ CPPUNIT_ASSERT(iter == queue.end());
+}
+
+void
+SimpleBucketPriorityDatabaseTest::testMultipleSetPriorityForOneBucket()
+{
+ SimpleBucketPriorityDatabase queue;
+
+ PrioritizedBucket lowPriBucket(BucketId(16, 1234), Priority::LOW);
+ PrioritizedBucket highPriBucket(BucketId(16, 1234), Priority::HIGH);
+
+ queue.setPriority(lowPriBucket);
+ queue.setPriority(highPriBucket);
+
+ const_iterator iter(queue.begin());
+ CPPUNIT_ASSERT_EQUAL(highPriBucket, *iter);
+ ++iter;
+ CPPUNIT_ASSERT(iter == queue.end());
+}
+
+void
+SimpleBucketPriorityDatabaseTest::testNoMaintenanceNeededClearsBucketFromDatabase()
+{
+ SimpleBucketPriorityDatabase queue;
+
+ PrioritizedBucket highPriBucket(BucketId(16, 1234), Priority::HIGH);
+ PrioritizedBucket noPriBucket(BucketId(16, 1234),
+ Priority::NO_MAINTENANCE_NEEDED);
+ queue.setPriority(highPriBucket);
+ queue.setPriority(noPriBucket);
+
+ const_iterator iter(queue.begin());
+ CPPUNIT_ASSERT(iter == queue.end());
+}
+
+void
+SimpleBucketPriorityDatabaseTest::testIterateOverMultipleBucketsWithMultiplePriorities()
+{
+ SimpleBucketPriorityDatabase queue;
+
+ PrioritizedBucket lowPriBucket1(BucketId(16, 1), Priority::LOW);
+ PrioritizedBucket lowPriBucket2(BucketId(16, 2), Priority::LOW);
+ PrioritizedBucket mediumPriBucket(BucketId(16, 3), Priority::MEDIUM);
+ PrioritizedBucket highPriBucket1(BucketId(16, 4), Priority::HIGH);
+ PrioritizedBucket highPriBucket2(BucketId(16, 5), Priority::HIGH);
+
+ queue.setPriority(highPriBucket1);
+ queue.setPriority(lowPriBucket2);
+ queue.setPriority(mediumPriBucket);
+ queue.setPriority(highPriBucket2);
+ queue.setPriority(lowPriBucket1);
+
+ const_iterator iter(queue.begin());
+ PrioritizedBucket lastBucket(BucketId(), Priority::PRIORITY_LIMIT);
+ for (int i = 0; i < 5; ++i) {
+ CPPUNIT_ASSERT(iter != queue.end());
+ CPPUNIT_ASSERT(!iter->moreImportantThan(lastBucket));
+ lastBucket = *iter;
+ ++iter;
+ }
+ CPPUNIT_ASSERT(iter == queue.end());
+}
+
+}
+}
+
diff --git a/storage/src/tests/distributor/simplemaintenancescannertest.cpp b/storage/src/tests/distributor/simplemaintenancescannertest.cpp
new file mode 100644
index 00000000000..512a10bbd9a
--- /dev/null
+++ b/storage/src/tests/distributor/simplemaintenancescannertest.cpp
@@ -0,0 +1,220 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <vespa/storage/distributor/maintenance/simplemaintenancescanner.h>
+#include <vespa/storage/distributor/maintenance/simplebucketprioritydatabase.h>
+#include <vespa/storage/distributor/bucketdb/mapbucketdatabase.h>
+#include <tests/distributor/maintenancemocks.h>
+
+#include <string>
+#include <sstream>
+#include <memory>
+#include <algorithm>
+#include <iterator>
+
+namespace storage {
+
+namespace distributor {
+
+using document::BucketId;
+typedef MaintenancePriority Priority;
+
+class SimpleMaintenanceScannerTest : public CppUnit::TestFixture {
+ CPPUNIT_TEST_SUITE(SimpleMaintenanceScannerTest);
+ CPPUNIT_TEST(testPrioritizeSingleBucket);
+ CPPUNIT_TEST(testPrioritizeMultipleBuckets);
+ CPPUNIT_TEST(testPendingMaintenanceOperationStatistics);
+ CPPUNIT_TEST(perNodeMaintenanceStatsAreTracked);
+ CPPUNIT_TEST(testReset);
+ CPPUNIT_TEST_SUITE_END();
+
+ using PendingStats = SimpleMaintenanceScanner::PendingMaintenanceStats;
+
+ std::string dumpPriorityDbToString(const BucketPriorityDatabase&) const;
+
+ std::unique_ptr<MockMaintenancePriorityGenerator> _priorityGenerator;
+ std::unique_ptr<MapBucketDatabase> _bucketDb;
+ std::unique_ptr<SimpleBucketPriorityDatabase> _priorityDb;
+ std::unique_ptr<SimpleMaintenanceScanner> _scanner;
+
+ void addBucketToDb(int bucketNum);
+
+ bool scanEntireDatabase(int expected);
+
+ std::string stringifyGlobalPendingStats(const PendingStats&) const;
+
+public:
+ void testPrioritizeSingleBucket();
+ void testPrioritizeMultipleBuckets();
+ void testPendingMaintenanceOperationStatistics();
+ void perNodeMaintenanceStatsAreTracked();
+ void testReset();
+
+ void setUp();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(SimpleMaintenanceScannerTest);
+
+void
+SimpleMaintenanceScannerTest::setUp()
+{
+ _priorityGenerator.reset(new MockMaintenancePriorityGenerator());
+ _bucketDb.reset(new MapBucketDatabase());
+ _priorityDb.reset(new SimpleBucketPriorityDatabase());
+ _scanner.reset(new SimpleMaintenanceScanner(*_priorityDb, *_priorityGenerator, *_bucketDb));
+}
+
+void
+SimpleMaintenanceScannerTest::addBucketToDb(int bucketNum)
+{
+ BucketDatabase::Entry entry(BucketId(16, bucketNum), BucketInfo());
+ _bucketDb->update(entry);
+}
+
+std::string
+SimpleMaintenanceScannerTest::stringifyGlobalPendingStats(
+ const PendingStats& stats) const
+{
+ std::ostringstream ss;
+ ss << stats.global;
+ return ss.str();
+}
+
+void
+SimpleMaintenanceScannerTest::testPrioritizeSingleBucket()
+{
+ addBucketToDb(1);
+ std::string expected("PrioritizedBucket(BucketId(0x4000000000000001), pri VERY_HIGH)\n");
+
+ CPPUNIT_ASSERT(!_scanner->scanNext().isDone());
+ CPPUNIT_ASSERT_EQUAL(expected, _priorityDb->toString());
+
+ CPPUNIT_ASSERT(_scanner->scanNext().isDone());
+ CPPUNIT_ASSERT_EQUAL(expected, _priorityDb->toString());
+}
+
+namespace {
+ std::string sortLines(const std::string& source) {
+ vespalib::StringTokenizer st(source,"\n","");
+ std::vector<std::string> lines;
+ std::copy(st.begin(), st.end(), std::back_inserter(lines));
+ std::sort(lines.begin(), lines.end());
+ std::ostringstream ost;
+ for (auto& line : lines) {
+ ost << line << "\n";
+ }
+ return ost.str();
+ }
+}
+
+void
+SimpleMaintenanceScannerTest::testPrioritizeMultipleBuckets()
+{
+ addBucketToDb(1);
+ addBucketToDb(2);
+ addBucketToDb(3);
+ std::string expected("PrioritizedBucket(BucketId(0x4000000000000001), pri VERY_HIGH)\n"
+ "PrioritizedBucket(BucketId(0x4000000000000002), pri VERY_HIGH)\n"
+ "PrioritizedBucket(BucketId(0x4000000000000003), pri VERY_HIGH)\n");
+
+ CPPUNIT_ASSERT(scanEntireDatabase(3));
+ CPPUNIT_ASSERT_EQUAL(sortLines(expected),
+ sortLines(_priorityDb->toString()));
+}
+
+bool
+SimpleMaintenanceScannerTest::scanEntireDatabase(int expected)
+{
+ for (int i = 0; i < expected; ++i) {
+ if (_scanner->scanNext().isDone()) {
+ return false;
+ }
+ }
+ return _scanner->scanNext().isDone();
+}
+
+void
+SimpleMaintenanceScannerTest::testReset()
+{
+ addBucketToDb(1);
+ addBucketToDb(3);
+
+ CPPUNIT_ASSERT(scanEntireDatabase(2));
+ std::string expected("PrioritizedBucket(BucketId(0x4000000000000001), pri VERY_HIGH)\n"
+ "PrioritizedBucket(BucketId(0x4000000000000003), pri VERY_HIGH)\n");
+ CPPUNIT_ASSERT_EQUAL(expected, _priorityDb->toString());
+
+ addBucketToDb(2);
+ CPPUNIT_ASSERT(scanEntireDatabase(0));
+ CPPUNIT_ASSERT_EQUAL(expected, _priorityDb->toString());
+
+ _scanner->reset();
+ CPPUNIT_ASSERT(scanEntireDatabase(3));
+
+ expected = "PrioritizedBucket(BucketId(0x4000000000000001), pri VERY_HIGH)\n"
+ "PrioritizedBucket(BucketId(0x4000000000000002), pri VERY_HIGH)\n"
+ "PrioritizedBucket(BucketId(0x4000000000000003), pri VERY_HIGH)\n";
+ CPPUNIT_ASSERT_EQUAL(sortLines(expected), sortLines(_priorityDb->toString()));
+}
+
+void
+SimpleMaintenanceScannerTest::testPendingMaintenanceOperationStatistics()
+{
+ addBucketToDb(1);
+ addBucketToDb(3);
+
+ std::string expectedEmpty("delete bucket: 0, merge bucket: 0, "
+ "split bucket: 0, join bucket: 0, "
+ "set bucket state: 0, garbage collection: 0");
+ {
+ auto stats(_scanner->getPendingMaintenanceStats());
+ CPPUNIT_ASSERT_EQUAL(expectedEmpty, stringifyGlobalPendingStats(stats));
+ }
+
+ CPPUNIT_ASSERT(scanEntireDatabase(2));
+
+ // All mock operations generated have the merge type.
+ {
+ auto stats(_scanner->getPendingMaintenanceStats());
+ std::string expected("delete bucket: 0, merge bucket: 2, "
+ "split bucket: 0, join bucket: 0, "
+ "set bucket state: 0, garbage collection: 0");
+ CPPUNIT_ASSERT_EQUAL(expected, stringifyGlobalPendingStats(stats));
+ }
+
+ _scanner->reset();
+ {
+ auto stats(_scanner->getPendingMaintenanceStats());
+ CPPUNIT_ASSERT_EQUAL(expectedEmpty, stringifyGlobalPendingStats(stats));
+ }
+}
+
+void
+SimpleMaintenanceScannerTest::perNodeMaintenanceStatsAreTracked()
+{
+ addBucketToDb(1);
+ addBucketToDb(3);
+ {
+ auto stats(_scanner->getPendingMaintenanceStats());
+ NodeMaintenanceStats emptyStats;
+ CPPUNIT_ASSERT_EQUAL(emptyStats, stats.perNodeStats.forNode(0));
+ }
+ CPPUNIT_ASSERT(scanEntireDatabase(2));
+ // Mock is currently hardwired to increment movingOut for node 1 and
+ // copyingIn for node 2 per bucket iterated (we've got 2).
+ auto stats(_scanner->getPendingMaintenanceStats());
+ {
+ NodeMaintenanceStats wantedNode1Stats;
+ wantedNode1Stats.movingOut = 2;
+ CPPUNIT_ASSERT_EQUAL(wantedNode1Stats, stats.perNodeStats.forNode(1));
+ }
+ {
+ NodeMaintenanceStats wantedNode2Stats;
+ wantedNode2Stats.copyingIn = 2;
+ CPPUNIT_ASSERT_EQUAL(wantedNode2Stats, stats.perNodeStats.forNode(2));
+ }
+}
+
+}
+}
diff --git a/storage/src/tests/distributor/splitbuckettest.cpp b/storage/src/tests/distributor/splitbuckettest.cpp
new file mode 100644
index 00000000000..d0fa69d600e
--- /dev/null
+++ b/storage/src/tests/distributor/splitbuckettest.cpp
@@ -0,0 +1,353 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <cppunit/extensions/HelperMacros.h>
+#include <iomanip>
+#include <iostream>
+#include <memory>
+#include <tests/common/dummystoragelink.h>
+#include <vespa/storageapi/message/bucketsplitting.h>
+#include <vespa/storage/distributor/operations/idealstate/splitoperation.h>
+#include <vespa/storage/common/bucketmessages.h>
+#include <vespa/document/base/documentid.h>
+#include <vespa/storageapi/message/persistence.h>
+#include <vespa/storage/distributor/idealstatemanager.h>
+#include <vespa/storageapi/message/multioperation.h>
+#include <tests/distributor/distributortestutil.h>
+
+using std::shared_ptr;
+using namespace document;
+
+namespace storage {
+
+namespace distributor {
+
+class SplitOperationTest : public CppUnit::TestFixture,
+ public DistributorTestUtil
+{
+ CPPUNIT_TEST_SUITE(SplitOperationTest);
+ CPPUNIT_TEST(testSimple);
+ CPPUNIT_TEST(testMultiNodeFailure);
+ CPPUNIT_TEST(testCopyTrustedStatusNotCarriedOverAfterSplit);
+ CPPUNIT_TEST(testOperationBlockedByPendingJoin);
+ CPPUNIT_TEST_SUITE_END();
+
+ uint32_t splitByteSize;
+ uint32_t tooLargeBucketSize;
+ uint32_t splitCount;
+ uint32_t maxSplitBits;
+
+protected:
+ void testSimple();
+ void testMultiNodeFailure();
+ void testCopyTrustedStatusNotCarriedOverAfterSplit();
+ void testOperationBlockedByPendingJoin();
+
+public:
+ SplitOperationTest();
+
+ void setUp() {
+ createLinks();
+ getConfig().setSplitCount(splitCount);
+ getConfig().setSplitSize(splitByteSize);
+
+ }
+
+ void tearDown() {
+ close();
+ }
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(SplitOperationTest);
+
+SplitOperationTest::SplitOperationTest()
+ : splitByteSize(10*1024*1024),
+ tooLargeBucketSize(splitByteSize * 1.1),
+ splitCount(UINT32_MAX),
+ maxSplitBits(58)
+{
+}
+
+void
+SplitOperationTest::testSimple()
+{
+ _distributor->enableClusterState(
+ lib::ClusterState("distributor:1 storage:1"));
+
+ insertBucketInfo(document::BucketId(16, 1), 0, 0xabc, 1000,
+ tooLargeBucketSize, 250);
+
+ SplitOperation op("storage",
+ BucketAndNodes(document::BucketId(16, 1),
+ toVector<uint16_t>(0)),
+ maxSplitBits,
+ splitCount,
+ splitByteSize);
+
+ op.setIdealStateManager(&getIdealStateManager());
+ op.start(_sender, framework::MilliSecTime(0));
+
+ {
+ CPPUNIT_ASSERT_EQUAL(size_t(1), _sender.commands.size());
+
+ std::shared_ptr<api::StorageCommand> msg = _sender.commands[0];
+ CPPUNIT_ASSERT(msg->getType() == api::MessageType::SPLITBUCKET);
+ CPPUNIT_ASSERT_EQUAL(
+ api::StorageMessageAddress("storage", lib::NodeType::STORAGE, 0)
+ .toString(),
+ msg->getAddress()->toString());
+
+ std::shared_ptr<api::StorageReply> reply(msg->makeReply().release());
+ api::SplitBucketReply* sreply(
+ static_cast<api::SplitBucketReply*>(reply.get()));
+
+ sreply->getSplitInfo().push_back(api::SplitBucketReply::Entry(
+ document::BucketId(17, 1),
+ api::BucketInfo(100, 600, 5000000)));
+
+ sreply->getSplitInfo().push_back(api::SplitBucketReply::Entry(
+ document::BucketId(17, 0x10001),
+ api::BucketInfo(110, 400, 6000000)));
+
+ op.receive(_sender, reply);
+ }
+
+ CPPUNIT_ASSERT(!getBucket(document::BucketId(16, 1)).valid());
+
+ {
+ BucketDatabase::Entry entry = getBucket(document::BucketId(17, 1));
+
+ CPPUNIT_ASSERT(entry.valid());
+ CPPUNIT_ASSERT_EQUAL((uint16_t)0, entry->getNodeRef(0).getNode());
+ CPPUNIT_ASSERT_EQUAL((uint32_t)100, entry->getNodeRef(0).getChecksum());
+ CPPUNIT_ASSERT_EQUAL((uint32_t)5000000,
+ entry->getNodeRef(0).getTotalDocumentSize());
+ CPPUNIT_ASSERT_EQUAL((uint32_t)600,
+ entry->getNodeRef(0).getDocumentCount());
+ }
+
+ {
+ BucketDatabase::Entry entry(getBucket(document::BucketId(17, 0x10001)));
+
+ CPPUNIT_ASSERT(entry.valid());
+ CPPUNIT_ASSERT_EQUAL((uint16_t)0, entry->getNodeRef(0).getNode());
+ CPPUNIT_ASSERT_EQUAL((uint32_t)110, entry->getNodeRef(0).getChecksum());
+ CPPUNIT_ASSERT_EQUAL((uint32_t)6000000,
+ entry->getNodeRef(0).getTotalDocumentSize());
+ CPPUNIT_ASSERT_EQUAL((uint32_t)400,
+ entry->getNodeRef(0).getDocumentCount());
+ }
+}
+
+void
+SplitOperationTest::testMultiNodeFailure()
+{
+ {
+ BucketDatabase::Entry entry(document::BucketId(16, 1));
+
+ BucketCopy copy(0, 0, api::BucketInfo(250, 1000, tooLargeBucketSize));
+ entry->addNode(copy, toVector<uint16_t>(0));
+
+ entry->addNode(BucketCopy(0, 1, copy.getBucketInfo()),
+ toVector<uint16_t>(0));
+ getBucketDatabase().update(entry);
+ }
+
+ _distributor->enableClusterState(
+ lib::ClusterState("distributor:1 storage:2"));
+
+
+ SplitOperation op("storage",
+ BucketAndNodes(document::BucketId(16, 1),
+ toVector<uint16_t>(0,1)),
+ maxSplitBits,
+ splitCount,
+ splitByteSize);
+
+ op.setIdealStateManager(&getIdealStateManager());
+ op.start(_sender, framework::MilliSecTime(0));
+
+ {
+ CPPUNIT_ASSERT_EQUAL((size_t)2, _sender.commands.size());
+
+ {
+ std::shared_ptr<api::StorageCommand> msg = _sender.commands[0];
+ CPPUNIT_ASSERT(msg->getType() == api::MessageType::SPLITBUCKET);
+ CPPUNIT_ASSERT_EQUAL(
+ api::StorageMessageAddress("storage",
+ lib::NodeType::STORAGE, 0).toString(),
+ msg->getAddress()->toString());
+
+ api::SplitBucketReply* sreply(
+ static_cast<api::SplitBucketReply*>(
+ msg->makeReply().release()));
+ sreply->setResult(api::ReturnCode::OK);
+
+ sreply->getSplitInfo().push_back(api::SplitBucketReply::Entry(
+ document::BucketId(17, 1),
+ api::BucketInfo(100, 600, 5000000)));
+
+ sreply->getSplitInfo().push_back(api::SplitBucketReply::Entry(
+ document::BucketId(17, 0x10001),
+ api::BucketInfo(110, 400, 6000000)));
+
+ op.receive(_sender, std::shared_ptr<api::StorageReply>(sreply));
+ }
+
+ sendReply(op, 1, api::ReturnCode::NOT_CONNECTED);
+ }
+
+ {
+ BucketDatabase::Entry entry = getBucket(document::BucketId(16, 1));
+
+ CPPUNIT_ASSERT(entry.valid());
+ CPPUNIT_ASSERT_EQUAL((uint32_t)1, entry->getNodeCount());
+
+ CPPUNIT_ASSERT_EQUAL((uint16_t)1, entry->getNodeRef(0).getNode());
+ CPPUNIT_ASSERT_EQUAL((uint32_t)250, entry->getNodeRef(0).getChecksum());
+ CPPUNIT_ASSERT_EQUAL(tooLargeBucketSize,
+ entry->getNodeRef(0).getTotalDocumentSize());
+ CPPUNIT_ASSERT_EQUAL((uint32_t)1000,
+ entry->getNodeRef(0).getDocumentCount());
+ }
+
+ {
+ BucketDatabase::Entry entry = getBucket(document::BucketId(17, 1));
+
+ CPPUNIT_ASSERT(entry.valid());
+ CPPUNIT_ASSERT_EQUAL((uint32_t)1, entry->getNodeCount());
+
+ CPPUNIT_ASSERT_EQUAL((uint16_t)0, entry->getNodeRef(0).getNode());
+ CPPUNIT_ASSERT_EQUAL((uint32_t)100, entry->getNodeRef(0).getChecksum());
+ CPPUNIT_ASSERT_EQUAL((uint32_t)5000000,
+ entry->getNodeRef(0).getTotalDocumentSize());
+ CPPUNIT_ASSERT_EQUAL((uint32_t)600,
+ entry->getNodeRef(0).getDocumentCount());
+ }
+
+ {
+ BucketDatabase::Entry entry(getBucket(document::BucketId(17, 0x10001)));
+
+ CPPUNIT_ASSERT(entry.valid());
+ CPPUNIT_ASSERT_EQUAL((uint32_t)1, entry->getNodeCount());
+
+ CPPUNIT_ASSERT_EQUAL((uint16_t)0, entry->getNodeRef(0).getNode());
+ CPPUNIT_ASSERT_EQUAL((uint32_t)110, entry->getNodeRef(0).getChecksum());
+ CPPUNIT_ASSERT_EQUAL((uint32_t)6000000,
+ entry->getNodeRef(0).getTotalDocumentSize());
+ CPPUNIT_ASSERT_EQUAL((uint32_t)400,
+ entry->getNodeRef(0).getDocumentCount());
+ }
+}
+
+void
+SplitOperationTest::testCopyTrustedStatusNotCarriedOverAfterSplit()
+{
+ _distributor->enableClusterState(
+ lib::ClusterState("distributor:1 storage:2"));
+
+ document::BucketId sourceBucket(16, 1);
+ /*
+ * Need 3 nodes to reproduce bug 6418516. Otherwise, the source bucket is
+ * left with only 1 copy which implicitly becomes trusted. When this copy
+ * is then split, the distributor db will automatically un-trust all buckets
+ * since it sees that multiple copies are trusted that are not consistent
+ * with each other. This prevents the bug from being visible.
+ */
+ addNodesToBucketDB(sourceBucket, "0=150/20/30000000/t,1=450/50/60000/u,"
+ "2=550/60/70000");
+
+ SplitOperation op("storage",
+ BucketAndNodes(sourceBucket, toVector<uint16_t>(0, 1)),
+ maxSplitBits,
+ splitCount,
+ splitByteSize);
+
+ op.setIdealStateManager(&getIdealStateManager());
+ op.start(_sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL(size_t(3), _sender.commands.size());
+
+ std::vector<document::BucketId> childBuckets;
+ childBuckets.push_back(document::BucketId(17, 1));
+ childBuckets.push_back(document::BucketId(17, 0x10001));
+
+ // Note: only 2 out of 3 requests replied to!
+ for (int i = 0; i < 2; ++i) {
+ std::shared_ptr<api::StorageCommand> msg = _sender.commands[i];
+ CPPUNIT_ASSERT(msg->getType() == api::MessageType::SPLITBUCKET);
+ std::shared_ptr<api::StorageReply> reply(msg->makeReply().release());
+ api::SplitBucketReply* sreply(
+ static_cast<api::SplitBucketReply*>(reply.get()));
+
+ // Make sure copies differ so they cannot become implicitly trusted.
+ sreply->getSplitInfo().push_back(api::SplitBucketReply::Entry(
+ childBuckets[0],
+ api::BucketInfo(100 + i, 600, 5000000)));
+ sreply->getSplitInfo().push_back(api::SplitBucketReply::Entry(
+ childBuckets[1],
+ api::BucketInfo(110 + i, 400, 6000000)));
+
+ op.receive(_sender, reply);
+ }
+
+ CPPUNIT_ASSERT(getBucket(sourceBucket).valid()); // Still alive
+
+ for (uint32_t i = 0; i < 2; ++i) {
+ BucketDatabase::Entry entry(getBucket(childBuckets[i]));
+
+ CPPUNIT_ASSERT(entry.valid());
+ CPPUNIT_ASSERT_EQUAL(size_t(2), entry->getNodes().size());
+
+ for (uint16_t j = 0; j < 2; ++j) {
+ CPPUNIT_ASSERT(!entry->getNodeRef(i).trusted());
+ }
+ }
+}
+
+void
+SplitOperationTest::testOperationBlockedByPendingJoin()
+{
+ StorageComponentRegisterImpl compReg;
+ framework::defaultimplementation::FakeClock clock;
+ compReg.setClock(clock);
+ clock.setAbsoluteTimeInSeconds(1);
+ PendingMessageTracker tracker(compReg);
+
+ _distributor->enableClusterState(
+ lib::ClusterState("distributor:1 storage:2"));
+
+ document::BucketId joinTarget(2, 1);
+ std::vector<document::BucketId> joinSources = {
+ document::BucketId(3, 1), document::BucketId(3, 5)
+ };
+ auto joinCmd = std::make_shared<api::JoinBucketsCommand>(joinTarget);
+ joinCmd->getSourceBuckets() = joinSources;
+ joinCmd->setAddress(
+ api::StorageMessageAddress("storage", lib::NodeType::STORAGE, 0));
+
+ tracker.insert(joinCmd);
+
+ insertBucketInfo(joinTarget, 0, 0xabc, 1000, 1234, 250);
+
+ SplitOperation op("storage",
+ BucketAndNodes(joinTarget, toVector<uint16_t>(0)),
+ maxSplitBits,
+ splitCount,
+ splitByteSize);
+
+ CPPUNIT_ASSERT(op.isBlocked(tracker));
+
+ // Now, pretend there's a join for another node in the same bucket. This
+ // will happen when a join is partially completed.
+ tracker.clearMessagesForNode(0);
+ CPPUNIT_ASSERT(!op.isBlocked(tracker));
+
+ joinCmd->setAddress(
+ api::StorageMessageAddress("storage", lib::NodeType::STORAGE, 1));
+ tracker.insert(joinCmd);
+
+ CPPUNIT_ASSERT(op.isBlocked(tracker));
+}
+
+} // distributor
+} // storage
diff --git a/storage/src/tests/distributor/statecheckerstest.cpp b/storage/src/tests/distributor/statecheckerstest.cpp
new file mode 100644
index 00000000000..da444b9d22a
--- /dev/null
+++ b/storage/src/tests/distributor/statecheckerstest.cpp
@@ -0,0 +1,1838 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <tests/common/dummystoragelink.h>
+#include <vespa/storageapi/message/persistence.h>
+#include <vespa/storage/distributor/bucketdbupdater.h>
+#include <vespa/document/fieldvalue/document.h>
+#include <vespa/storage/common/bucketmessages.h>
+#include <vespa/storage/config/config-stor-distributormanager.h>
+#include <vespa/storage/distributor/idealstatemanager.h>
+#include <vespa/storage/distributor/operations/idealstate/mergeoperation.h>
+#include <vespa/storage/distributor/operations/idealstate/removebucketoperation.h>
+#include <vespa/storage/distributor/operations/idealstate/setbucketstateoperation.h>
+#include <vespa/storage/distributor/operations/idealstate/splitoperation.h>
+#include <vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.h>
+#include <vespa/storageapi/message/stat.h>
+#include <vespa/storage/storageutil/utils.h>
+#include <tests/distributor/distributortestutil.h>
+#include <vespa/storage/distributor/statecheckers.h>
+#include <vespa/storageapi/message/state.h>
+
+using namespace std::literals::string_literals;
+
+namespace storage {
+namespace distributor {
+
+struct StateCheckersTest : public CppUnit::TestFixture,
+ public DistributorTestUtil
+{
+ StateCheckersTest() {}
+
+ void setUp() {
+ createLinks();
+ }
+
+ void tearDown() {
+ close();
+ }
+
+ struct PendingMessage
+ {
+ uint32_t _msgType;
+ uint8_t _pri;
+
+ PendingMessage() : _msgType(UINT32_MAX), _pri(0) {}
+
+ PendingMessage(uint32_t msgType, uint8_t pri)
+ : _msgType(msgType), _pri(pri) {}
+
+ bool shouldCheck() const { return _msgType != UINT32_MAX; }
+ };
+
+ void testSplit();
+ void testInconsistentSplit();
+ void splitCanBeScheduledWhenReplicasOnRetiredNodes();
+ void testSynchronizeAndMove();
+ void testDoNotMergeInconsistentlySplitBuckets();
+ void doNotMoveReplicasWithinRetiredNodes();
+ void testDeleteExtraCopies();
+ void testDoNotDeleteActiveExtraCopies();
+ void testConsistentCopiesOnRetiredNodesMayBeDeleted();
+ void redundantCopyDeletedEvenWhenAllNodesRetired();
+ void testJoin();
+ void testDoNotJoinBelowClusterStateBitCount();
+ void testAllowInconsistentJoinInDifferingSiblingIdealState();
+ void testDoNotAllowInconsistentJoinWhenNotInIdealState();
+ void testDoNotAllowInconsistentJoinWhenConfigDisabled();
+ void testNoJoinWhenInvalidCopyExists();
+ void testNoJoinOnDifferentNodes();
+ void testNoJoinWhenCopyCountAboveRedundancyLevelsForLeftSibling();
+ void testNoJoinWhenCopyCountAboveRedundancyLevelsForRightSibling();
+ void testNoJoinWhenCopyCountAboveRedundancyLevelsForBothSiblings();
+ void joinCanBeScheduledWhenReplicasOnRetiredNodes();
+ void testBucketState();
+ void testDoNotActivateNonReadyCopiesWhenIdealNodeInMaintenance();
+ void testDoNotChangeActiveStateForInconsistentlySplitBuckets();
+ void testNoActiveChangeForNonIdealCopiesWhenOtherwiseIdentical();
+ void testBucketStatePerGroup();
+ void allowActivationOfRetiredNodes();
+ void inhibitBucketActivationIfDisabledInConfig();
+ void inhibitBucketDeactivationIfDisabledInConfig();
+ void retiredNodesOutOfSyncAreMerged();
+ void testGarbageCollection();
+ void gcInhibitedWhenIdealNodeInMaintenance();
+ void testNoRemoveWhenIdealNodeInMaintenance();
+ void testStepwiseJoinForSmallBucketsWithoutSiblings();
+ void testNoStepwiseJoinWhenDisabledThroughConfig();
+ void testNoStepwiseJoinWhenSingleSiblingTooLarge();
+ void testStepwiseJoinMaySkipMultipleBitsWhenConsistent();
+ void testStepwiseJoinDoesNotSkipBeyondLevelWithSibling();
+ void contextPopulatesIdealStateContainers();
+ void statsUpdatedWhenMergingDueToMove();
+ void statsUpdatedWhenMergingDueToMissingCopy();
+ void statsUpdatedWhenMergingDueToOutOfSyncCopies();
+
+ void enableClusterState(const lib::ClusterState& systemState) {
+ _distributor->enableClusterState(systemState);
+ }
+
+ void insertJoinableBuckets();
+
+ void assertCurrentIdealState(const document::BucketId& bucket,
+ const std::vector<uint16_t> expected)
+ {
+ std::vector<uint16_t> idealNodes(
+ getIdealStateManager().getDistributorComponent()
+ .getDistribution().getIdealStorageNodes(
+ getIdealStateManager().getDistributorComponent()
+ .getClusterState(),
+ bucket,
+ "ui"));
+ CPPUNIT_ASSERT_EQUAL(expected, idealNodes);
+ }
+
+ void enableInconsistentJoinInConfig(bool enabled);
+
+ std::string testStateChecker(
+ StateChecker& checker,
+ StateChecker::Context& c,
+ bool includeBucketId = false,
+ const PendingMessage& blocker = PendingMessage(),
+ bool includeMessagePriority = false,
+ bool includeSchedulingPriority = false)
+ {
+ std::ostringstream ost;
+
+ c.siblingBucket = getIdealStateManager().getDistributorComponent()
+ .getSibling(c.bucketId);
+
+ std::vector<BucketDatabase::Entry> entries;
+ getBucketDatabase().getAll(c.bucketId, entries);
+ c.siblingEntry = getBucketDatabase().get(c.siblingBucket);
+
+ c.entries = entries;
+ for (uint32_t j = 0; j < entries.size(); ++j) {
+ // Run checking only on this bucketid, but include all buckets
+ // owned by it or owners of it, so we can detect inconsistent split.
+ if (entries[j].getBucketId() == c.bucketId) {
+ c.entry = entries[j];
+
+ StateChecker::Result result(checker.check(c));
+ IdealStateOperation::UP op(result.createOperation());
+ if (op.get()) {
+ if (blocker.shouldCheck()
+ && op->shouldBlockThisOperation(blocker._msgType,
+ blocker._pri))
+ {
+ return "BLOCKED";
+ }
+
+ if (!ost.str().empty()) {
+ ost << ",";
+ }
+ if (includeBucketId) {
+ ost << op->getBucketId() << ": ";
+ }
+
+ ost << op->getDetailedReason();
+ if (includeMessagePriority) {
+ ost << " (pri "
+ << static_cast<int>(op->getPriority())
+ << ')';
+ }
+ if (includeSchedulingPriority) {
+ ost << " (scheduling pri "
+ << MaintenancePriority::toString(
+ result.getPriority().getPriority())
+ << ")";
+ }
+ }
+ }
+ }
+
+ if (ost.str().empty()) {
+ ost << "NO OPERATIONS GENERATED";
+ }
+
+ getBucketDatabase().clear();
+
+ return ost.str();
+ }
+
+ std::string testGarbageCollection(uint32_t prevTimestamp,
+ uint32_t nowTimestamp,
+ uint32_t checkInterval,
+ uint32_t lastChangeTime = 0,
+ bool includePriority = false);
+
+ std::string testSplit(uint32_t splitCount,
+ uint32_t splitSize,
+ uint32_t minSplitBits,
+ const std::string& bucketInfo,
+ const PendingMessage& blocker = PendingMessage(),
+ bool includePriority = false);
+
+ std::string testInconsistentSplit(const document::BucketId& bid,
+ bool includePriority = false);
+
+ std::string testJoin(uint32_t joinCount,
+ uint32_t joinSize,
+ uint32_t minSplitBits,
+ const document::BucketId& bid,
+ const PendingMessage& blocker = PendingMessage(),
+ bool includePriority = false);
+
+ struct CheckerParams {
+ std::string _bucketInfo;
+ std::string _clusterState {"distributor:1 storage:2"};
+ std::string _expect;
+ static const PendingMessage NO_OP_BLOCKER;
+ const PendingMessage* _blockerMessage {&NO_OP_BLOCKER};
+ uint32_t _redundancy {2};
+ uint32_t _splitCount {0};
+ uint32_t _splitSize {0};
+ uint32_t _minSplitBits {0};
+ bool _includeMessagePriority {false};
+ bool _includeSchedulingPriority {false};
+
+ CheckerParams& expect(const std::string& e) {
+ _expect = e;
+ return *this;
+ }
+ CheckerParams& bucketInfo(const std::string& info) {
+ _bucketInfo = info;
+ return *this;
+ }
+ CheckerParams& clusterState(const std::string& state) {
+ _clusterState = state;
+ return *this;
+ }
+ CheckerParams& blockerMessage(const PendingMessage& blocker) {
+ _blockerMessage = &blocker;
+ return *this;
+ }
+ CheckerParams& redundancy(uint32_t r) {
+ _redundancy = r;
+ return *this;
+ }
+ CheckerParams& includeMessagePriority(bool includePri) {
+ _includeMessagePriority = includePri;
+ return *this;
+ }
+ CheckerParams& includeSchedulingPriority(bool includePri) {
+ _includeSchedulingPriority = includePri;
+ return *this;
+ }
+ };
+
+ template <typename CheckerImpl>
+ void runAndVerify(const CheckerParams& params) {
+ CheckerImpl checker;
+
+ document::BucketId bid(17, 0);
+ addNodesToBucketDB(bid, params._bucketInfo);
+ setRedundancy(params._redundancy);
+ _distributor->enableClusterState(
+ lib::ClusterState(params._clusterState));
+ NodeMaintenanceStatsTracker statsTracker;
+ StateChecker::Context c(
+ getExternalOperationHandler(), statsTracker, bid);
+ std::string result = testStateChecker(
+ checker, c, false, *params._blockerMessage,
+ params._includeMessagePriority,
+ params._includeSchedulingPriority);
+ CPPUNIT_ASSERT_EQUAL(params._expect, result);
+ }
+
+ std::string testSynchronizeAndMove(
+ const std::string& bucketInfo,
+ const std::string& clusterState = "distributor:1 storage:2",
+ uint32_t redundancy = 2,
+ const PendingMessage& blocker = PendingMessage(),
+ bool includePriority = false);
+
+ std::string testDeleteExtraCopies(
+ const std::string& bucketInfo,
+ uint32_t redundancy = 2,
+ const PendingMessage& blocker = PendingMessage(),
+ const std::string& clusterState = "",
+ bool includePriority = false);
+
+ std::string testBucketState(const std::string& bucketInfo,
+ uint32_t redundancy = 2,
+ bool includePriority = false);
+ std::string testBucketStatePerGroup(const std::string& bucketInfo,
+ bool includePriority = false);
+
+ CPPUNIT_TEST_SUITE(StateCheckersTest);
+ CPPUNIT_TEST(testSplit);
+ CPPUNIT_TEST(testInconsistentSplit);
+ CPPUNIT_TEST(splitCanBeScheduledWhenReplicasOnRetiredNodes);
+ CPPUNIT_TEST(testSynchronizeAndMove);
+ CPPUNIT_TEST(testDoNotMergeInconsistentlySplitBuckets);
+ CPPUNIT_TEST(doNotMoveReplicasWithinRetiredNodes);
+ CPPUNIT_TEST(retiredNodesOutOfSyncAreMerged);
+ CPPUNIT_TEST(testDoNotChangeActiveStateForInconsistentlySplitBuckets);
+ CPPUNIT_TEST(testDeleteExtraCopies);
+ CPPUNIT_TEST(testDoNotDeleteActiveExtraCopies);
+ CPPUNIT_TEST(testConsistentCopiesOnRetiredNodesMayBeDeleted);
+ CPPUNIT_TEST(redundantCopyDeletedEvenWhenAllNodesRetired);
+ CPPUNIT_TEST(testJoin);
+ CPPUNIT_TEST(testDoNotJoinBelowClusterStateBitCount);
+ CPPUNIT_TEST(testAllowInconsistentJoinInDifferingSiblingIdealState);
+ CPPUNIT_TEST(testDoNotAllowInconsistentJoinWhenNotInIdealState);
+ CPPUNIT_TEST(testDoNotAllowInconsistentJoinWhenConfigDisabled);
+ CPPUNIT_TEST(testNoJoinWhenInvalidCopyExists);
+ CPPUNIT_TEST(testNoJoinOnDifferentNodes);
+ CPPUNIT_TEST(testNoJoinWhenCopyCountAboveRedundancyLevelsForLeftSibling);
+ CPPUNIT_TEST(testNoJoinWhenCopyCountAboveRedundancyLevelsForRightSibling);
+ CPPUNIT_TEST(testNoJoinWhenCopyCountAboveRedundancyLevelsForBothSiblings);
+ CPPUNIT_TEST(joinCanBeScheduledWhenReplicasOnRetiredNodes);
+ CPPUNIT_TEST(testBucketState);
+ CPPUNIT_TEST(testDoNotActivateNonReadyCopiesWhenIdealNodeInMaintenance);
+ CPPUNIT_TEST(testNoActiveChangeForNonIdealCopiesWhenOtherwiseIdentical);
+ CPPUNIT_TEST(testBucketStatePerGroup);
+ CPPUNIT_TEST(allowActivationOfRetiredNodes);
+ CPPUNIT_TEST(inhibitBucketActivationIfDisabledInConfig);
+ CPPUNIT_TEST(inhibitBucketDeactivationIfDisabledInConfig);
+ CPPUNIT_TEST(testGarbageCollection);
+ CPPUNIT_TEST(gcInhibitedWhenIdealNodeInMaintenance);
+ CPPUNIT_TEST(testNoRemoveWhenIdealNodeInMaintenance);
+ CPPUNIT_TEST(testStepwiseJoinForSmallBucketsWithoutSiblings);
+ CPPUNIT_TEST(testNoStepwiseJoinWhenDisabledThroughConfig);
+ CPPUNIT_TEST(testNoStepwiseJoinWhenSingleSiblingTooLarge);
+ CPPUNIT_TEST(testStepwiseJoinMaySkipMultipleBitsWhenConsistent);
+ CPPUNIT_TEST(testStepwiseJoinDoesNotSkipBeyondLevelWithSibling);
+ CPPUNIT_TEST(contextPopulatesIdealStateContainers);
+ CPPUNIT_TEST(statsUpdatedWhenMergingDueToMove);
+ CPPUNIT_TEST(statsUpdatedWhenMergingDueToMissingCopy);
+ CPPUNIT_TEST(statsUpdatedWhenMergingDueToOutOfSyncCopies);
+ CPPUNIT_TEST_SUITE_END();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(StateCheckersTest);
+
+const StateCheckersTest::PendingMessage
+StateCheckersTest::CheckerParams::NO_OP_BLOCKER;
+
+std::string StateCheckersTest::testSplit(uint32_t splitCount,
+ uint32_t splitSize,
+ uint32_t minSplitBits,
+ const std::string& bucketInfo,
+ const PendingMessage& blocker,
+ bool includePriority)
+{
+ document::BucketId bid(17, 0);
+
+ addNodesToBucketDB(bid, bucketInfo);
+
+ SplitBucketStateChecker checker;
+ NodeMaintenanceStatsTracker statsTracker;
+ StateChecker::Context c(getExternalOperationHandler(), statsTracker, bid);
+ getConfig().setSplitSize(splitSize);
+ getConfig().setSplitCount(splitCount);
+ getConfig().setMinimalBucketSplit(minSplitBits);
+ return testStateChecker(checker, c, false, blocker, includePriority);
+}
+
+
+
+void
+StateCheckersTest::testSplit()
+{
+ setupDistributor(3, 10, "distributor:1 storage:2");
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("[Splitting bucket because its maximum size (2000 b, 10 docs, 10 meta, 2000 b total) "
+ "is higher than the configured limit of (1000, 4294967295)]"),
+ testSplit((uint32_t)-1, 1000, 16, "0=100/10/2000"));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("[Splitting bucket because its maximum size (1000 b, "
+ "200 docs, 200 meta, 1000 b total) "
+ "is higher than the configured limit of (10000, 100)] "
+ "(pri 175)"),
+ testSplit(100, 10000, 16, "0=100/200/1000", PendingMessage(), true));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("NO OPERATIONS GENERATED"),
+ testSplit(1000, 1000, 16, "0=100/200/200"));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("NO OPERATIONS GENERATED"),
+ testSplit(1000, 1000, 16, "0=100/200/200/2000/2000"));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("[Splitting bucket because the current system size requires "
+ "a higher minimum split bit]"),
+ testSplit((uint32_t)-1, (uint32_t)-1, 24, "0=100/200/1000"));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("[Splitting bucket because its maximum size (1000 b, 1000 docs, 1000 meta, 1000 b total) "
+ "is higher than the configured limit of (10000, 100)]"),
+ testSplit(100, 10000, 16, "0=100/10/10,1=100/1000/1000"));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("[Splitting bucket because its maximum size (1000 b, 1000 docs, 1000 meta, 1000 b total) "
+ "is higher than the configured limit of (10000, 100)]"),
+ testSplit(100, 10000, 16, "0=1/0/0,1=100/1000/1000"));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("[Splitting bucket because its maximum size (1000 b, 1000 docs, 1000 meta, 1000 b total) "
+ "is higher than the configured limit of (10000, 100)]"),
+ testSplit(100, 10000, 16, "0=0/0/1,1=100/1000/1000"));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("NO OPERATIONS GENERATED"),
+ testSplit(1000, 1000, 16, "0=100/1/200000"));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("BLOCKED"),
+ testSplit(100, 10000, 16, "0=0/0/1,1=100/1000/1000",
+ PendingMessage(api::MessageType::SPLITBUCKET_ID, 0)));
+
+ // Split on too high meta
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("[Splitting bucket because its maximum size (1000 b, 100 docs, 2100 meta, 15000000 b total) "
+ "is higher than the configured limit of (10000000, 1000)]"),
+ testSplit(1000, 10000000, 16, "0=14/100/1000/2100/15000000"));
+ // Split on too high file size
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("[Splitting bucket because its maximum size (1000 b, 100 docs, 1500 meta, 21000000 b total) "
+ "is higher than the configured limit of (10000000, 1000)]"),
+ testSplit(1000, 10000000, 16, "0=14/100/1000/1500/21000000"));
+
+ // Don't block higher priority splits than what's already pending.
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("[Splitting bucket because its maximum size (1000 b, 1000 docs, 1000 meta, 1000 b total) "
+ "is higher than the configured limit of (10000, 100)]"),
+ testSplit(100, 10000, 16, "0=100/10/10,1=100/1000/1000",
+ PendingMessage(api::MessageType::SPLITBUCKET_ID, 255)));
+
+ // But must block equal priority splits that are already pending, or
+ // we'll end up spamming the nodes with splits!
+ // NOTE: assuming split priority of 175.
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("BLOCKED"),
+ testSplit(100, 10000, 16, "0=0/0/1,1=100/1000/1000",
+ PendingMessage(api::MessageType::SPLITBUCKET_ID, 175)));
+
+ // Don't split if we're already joining, since there's a window of time
+ // where the bucket will appear to be inconsistently split when the join
+ // is not finished on all the nodes.
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("BLOCKED"),
+ testSplit(100, 10000, 16, "0=0/0/1,1=100/1000/1000",
+ PendingMessage(api::MessageType::JOINBUCKETS_ID, 175)));
+}
+
+std::string
+StateCheckersTest::testInconsistentSplit(const document::BucketId& bid,
+ bool includePriority)
+{
+ SplitInconsistentStateChecker checker;
+ NodeMaintenanceStatsTracker statsTracker;
+ StateChecker::Context c(getExternalOperationHandler(), statsTracker, bid);
+ return testStateChecker(checker, c, true,
+ PendingMessage(), includePriority);
+}
+
+void
+StateCheckersTest::testInconsistentSplit()
+{
+ setupDistributor(3, 10, "distributor:1 storage:2");
+
+ insertBucketInfo(document::BucketId(16, 1), 1, 0x1, 1, 1);
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("NO OPERATIONS GENERATED"),
+ testInconsistentSplit(document::BucketId(16, 1)));
+
+ insertBucketInfo(document::BucketId(17, 1), 1, 0x1, 1, 1);
+ insertBucketInfo(document::BucketId(16, 1), 1, 0x1, 1, 1);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("BucketId(0x4000000000000001): [Bucket is inconsistently "
+ "split (list includes 0x4000000000000001, 0x4400000000000001) "
+ "Splitting it to improve the problem (max used bits 17)]"),
+ testInconsistentSplit(document::BucketId(16, 1)));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("NO OPERATIONS GENERATED"),
+ testInconsistentSplit(document::BucketId(17, 1)));
+
+ insertBucketInfo(document::BucketId(17, 1), 0, 0x0, 0, 0);
+ insertBucketInfo(document::BucketId(16, 1), 1, 0x1, 1, 1);
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("BucketId(0x4000000000000001): [Bucket is inconsistently "
+ "split (list includes 0x4000000000000001, 0x4400000000000001) "
+ "Splitting it to improve the problem (max used bits "
+ "17)] (pri 110)"),
+ testInconsistentSplit(document::BucketId(16, 1), true));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("NO OPERATIONS GENERATED"),
+ testInconsistentSplit(document::BucketId(17, 1)));
+}
+
+void
+StateCheckersTest::splitCanBeScheduledWhenReplicasOnRetiredNodes()
+{
+ setupDistributor(Redundancy(2), NodeCount(2),
+ "distributor:1 storage:2, .0.s:r .1.s:r");
+ CPPUNIT_ASSERT_EQUAL(
+ "[Splitting bucket because its maximum size (2000 b, 10 docs, "
+ "10 meta, 2000 b total) is higher than the configured limit of "
+ "(1000, 4294967295)]"s,
+ testSplit(UINT32_MAX, 1000, 16, "0=100/10/2000"));
+}
+
+std::string
+StateCheckersTest::testJoin(uint32_t joinCount,
+ uint32_t joinSize,
+ uint32_t minSplitBits,
+ const document::BucketId& bid,
+ const PendingMessage& blocker,
+ bool includePriority)
+{
+ JoinBucketsStateChecker checker;
+ getConfig().setJoinSize(joinSize);
+ getConfig().setJoinCount(joinCount);
+ getConfig().setMinimalBucketSplit(minSplitBits);
+
+ NodeMaintenanceStatsTracker statsTracker;
+ StateChecker::Context c(getExternalOperationHandler(), statsTracker, bid);
+ return testStateChecker(checker, c, true, blocker, includePriority);
+}
+
+void
+StateCheckersTest::insertJoinableBuckets()
+{
+ insertBucketInfo(document::BucketId(33, 1), 1, 0x1, 1, 1);
+ insertBucketInfo(document::BucketId(33, 0x100000001), 1, 0x1, 1, 1);
+}
+
+void
+StateCheckersTest::testJoin()
+{
+ setupDistributor(3, 10, "distributor:1 storage:2");
+
+ insertJoinableBuckets();
+ CPPUNIT_ASSERT_EQUAL(std::string(
+ "BucketId(0x8000000000000001): "
+ "[Joining buckets BucketId(0x8400000000000001) and "
+ "BucketId(0x8400000100000001) because their size "
+ "(2 bytes, 2 docs) is less than the configured limit "
+ "of (100, 10)"),
+ testJoin(10, 100, 16, document::BucketId(33, 1)));
+
+ insertJoinableBuckets();
+ // Join size is 0, so only look at document count
+ CPPUNIT_ASSERT_EQUAL(std::string(
+ "BucketId(0x8000000000000001): "
+ "[Joining buckets BucketId(0x8400000000000001) and "
+ "BucketId(0x8400000100000001) because their size "
+ "(2 bytes, 2 docs) is less than the configured limit "
+ "of (0, 3) (pri 155)"),
+ testJoin(3, 0, 16, document::BucketId(33, 1), PendingMessage(), true));
+
+ insertJoinableBuckets();
+ // Should not generate joins for both pairs, just the primary
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("NO OPERATIONS GENERATED"),
+ testJoin(10, 100, 16, document::BucketId(33, 0x100000001)));
+
+ insertJoinableBuckets();
+ // Should not generate join if min split bits is higher
+ CPPUNIT_ASSERT_EQUAL(std::string("NO OPERATIONS GENERATED"),
+ testJoin(10, 100, 33, document::BucketId(33, 1)));
+
+ insertJoinableBuckets();
+ // Meta data too big, no join
+ insertBucketInfo(document::BucketId(33, 1), 1,
+ api::BucketInfo(0x1, 1, 1, 1000, 1000));
+
+ CPPUNIT_ASSERT_EQUAL(std::string("NO OPERATIONS GENERATED"),
+ testJoin(10, 100, 16, document::BucketId(33, 1)));
+
+ insertJoinableBuckets();
+ // Bucket recently created
+ insertBucketInfo(document::BucketId(33, 1), 1,
+ api::BucketInfo(0x1, 0, 0, 0, 0));
+ CPPUNIT_ASSERT_EQUAL(std::string("NO OPERATIONS GENERATED"),
+ testJoin(10, 100, 16, document::BucketId(33, 1)));
+
+}
+
+/**
+ * If distributor config says minsplitcount is 8, but cluster state says that
+ * distribution bit count is 16, we should not allow the join to take place.
+ * We don't properly handle the "reduce distribution bits" case in general, so
+ * the safest is to never violate this and to effectively make distribution
+ * bit increases a one-way street.
+ */
+void
+StateCheckersTest::testDoNotJoinBelowClusterStateBitCount()
+{
+ setupDistributor(2, 2, "bits:16 distributor:1 storage:2");
+ // Insert sibling buckets at 16 bits that are small enough to be joined
+ // unless there is special logic for dealing with distribution bits.
+ insertBucketInfo(document::BucketId(16, 1), 1, 0x1, 1, 1);
+ insertBucketInfo(document::BucketId(16, (1 << 15) | 1), 1, 0x1, 1, 1);
+ using ConfiguredMinSplitBits = uint32_t;
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("NO OPERATIONS GENERATED"),
+ testJoin(100, 100, ConfiguredMinSplitBits(8),
+ document::BucketId(16, 1)));
+}
+
+void
+StateCheckersTest::enableInconsistentJoinInConfig(bool enabled)
+{
+ vespa::config::content::core::StorDistributormanagerConfigBuilder config;
+ config.enableInconsistentJoin = enabled;
+ getConfig().configure(config);
+}
+
+void
+StateCheckersTest::testAllowInconsistentJoinInDifferingSiblingIdealState()
+{
+ // Normally, bucket siblings have an ideal state on the same node in order
+ // to enable joining these back together. However, the ideal disks assigned
+ // may differ and it's sufficient for a sibling bucket's ideal disk to be
+ // down on the node of its other sibling for it to be assigned a different
+ // node. In this case, there's no other way to get buckets joined back
+ // together than if we allow bucket replicas to get temporarily out of sync
+ // by _forcing_ a join across all replicas no matter their placement.
+ // This will trigger a merge to reconcile and move the new bucket copies to
+ // their ideal location.
+ setupDistributor(2, 3, "distributor:1 storage:3 .0.d:20 .0.d.14.s:d .2.d:20");
+ document::BucketId sibling1(33, 0x000000001); // ideal disk 14 on node 0
+ document::BucketId sibling2(33, 0x100000001); // ideal disk 1 on node 0
+
+ // Full node sequence sorted by score for sibling(1|2) is [0, 2, 1].
+ // Node 0 cannot be used, so use 1 instead.
+ assertCurrentIdealState(sibling1, {2, 1});
+ assertCurrentIdealState(sibling2, {0, 2});
+
+ insertBucketInfo(sibling1, 2, 0x1, 2, 3);
+ insertBucketInfo(sibling1, 1, 0x1, 2, 3);
+ insertBucketInfo(sibling2, 0, 0x1, 2, 3);
+ insertBucketInfo(sibling2, 2, 0x1, 2, 3);
+
+ enableInconsistentJoinInConfig(true);
+
+ CPPUNIT_ASSERT_EQUAL(std::string(
+ "BucketId(0x8000000000000001): "
+ "[Joining buckets BucketId(0x8400000000000001) and "
+ "BucketId(0x8400000100000001) because their size "
+ "(6 bytes, 4 docs) is less than the configured limit "
+ "of (100, 10)"),
+ testJoin(10, 100, 16, sibling1));
+}
+
+void
+StateCheckersTest::testDoNotAllowInconsistentJoinWhenNotInIdealState()
+{
+ setupDistributor(2, 4, "distributor:1 storage:4 .0.d:20 .0.d.14.s:d .2.d:20 .3.d:20");
+ document::BucketId sibling1(33, 0x000000001);
+ document::BucketId sibling2(33, 0x100000001);
+
+ assertCurrentIdealState(sibling1, {3, 2});
+ assertCurrentIdealState(sibling2, {3, 0});
+
+ insertBucketInfo(sibling1, 3, 0x1, 2, 3);
+ insertBucketInfo(sibling1, 2, 0x1, 2, 3);
+ insertBucketInfo(sibling2, 3, 0x1, 2, 3);
+ insertBucketInfo(sibling2, 1, 0x1, 2, 3); // not in ideal state
+
+ enableInconsistentJoinInConfig(true);
+
+ CPPUNIT_ASSERT_EQUAL(std::string("NO OPERATIONS GENERATED"),
+ testJoin(10, 100, 16, sibling1));
+}
+
+void
+StateCheckersTest::testDoNotAllowInconsistentJoinWhenConfigDisabled()
+{
+ setupDistributor(2, 3, "distributor:1 storage:3 .0.d:20 .0.d.14.s:d .2.d:20");
+ document::BucketId sibling1(33, 0x000000001); // ideal disk 14 on node 0
+ document::BucketId sibling2(33, 0x100000001); // ideal disk 1 on node 0
+
+ // Full node sequence sorted by score for sibling(1|2) is [0, 2, 1].
+ // Node 0 cannot be used, so use 1 instead.
+ assertCurrentIdealState(sibling1, {2, 1});
+ assertCurrentIdealState(sibling2, {0, 2});
+
+ insertBucketInfo(sibling1, 2, 0x1, 2, 3);
+ insertBucketInfo(sibling1, 1, 0x1, 2, 3);
+ insertBucketInfo(sibling2, 0, 0x1, 2, 3);
+ insertBucketInfo(sibling2, 2, 0x1, 2, 3);
+
+ enableInconsistentJoinInConfig(false);
+
+ CPPUNIT_ASSERT_EQUAL(std::string("NO OPERATIONS GENERATED"),
+ testJoin(10, 100, 16, sibling1));
+}
+
+void
+StateCheckersTest::testNoJoinWhenInvalidCopyExists()
+{
+ setupDistributor(3, 10, "distributor:1 storage:3");
+
+ insertBucketInfo(document::BucketId(33, 0x100000001), 1, 0x1, 1, 1);
+ // No join when there exists an invalid copy
+ insertBucketInfo(document::BucketId(33, 1), 1, api::BucketInfo());
+
+ CPPUNIT_ASSERT_EQUAL(std::string("NO OPERATIONS GENERATED"),
+ testJoin(10, 100, 16, document::BucketId(33, 1)));
+}
+
+void
+StateCheckersTest::testNoJoinOnDifferentNodes()
+{
+ setupDistributor(3, 10, "distributor:1 storage:2");
+
+ insertBucketInfo(document::BucketId(33, 0x000000001), 0, 0x1, 1, 1);
+ insertBucketInfo(document::BucketId(33, 0x100000001), 1, 0x1, 1, 1);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("NO OPERATIONS GENERATED"),
+ testJoin(10, 100, 16, document::BucketId(33, 0x1)));
+}
+
+void
+StateCheckersTest::testNoJoinWhenCopyCountAboveRedundancyLevelsForLeftSibling()
+{
+ setupDistributor(3, 10, "distributor:1 storage:2");
+ setRedundancy(1);
+ insertBucketInfo(document::BucketId(33, 0x000000001), 0, 0x1, 1, 1);
+ insertBucketInfo(document::BucketId(33, 0x000000001), 1, 0x1, 1, 1);
+ insertBucketInfo(document::BucketId(33, 0x100000001), 0, 0x1, 1, 1);
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("NO OPERATIONS GENERATED"),
+ testJoin(10, 100, 16, document::BucketId(33, 0x1)));
+}
+
+void
+StateCheckersTest::testNoJoinWhenCopyCountAboveRedundancyLevelsForRightSibling()
+{
+ setupDistributor(3, 10, "distributor:1 storage:2");
+ setRedundancy(1);
+ insertBucketInfo(document::BucketId(33, 0x000000001), 1, 0x1, 1, 1);
+ insertBucketInfo(document::BucketId(33, 0x100000001), 0, 0x1, 1, 1);
+ insertBucketInfo(document::BucketId(33, 0x100000001), 1, 0x1, 1, 1);
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("NO OPERATIONS GENERATED"),
+ testJoin(10, 100, 16, document::BucketId(33, 0x1)));
+}
+
+void
+StateCheckersTest::testNoJoinWhenCopyCountAboveRedundancyLevelsForBothSiblings()
+{
+ setupDistributor(3, 10, "distributor:1 storage:2");
+ setRedundancy(1);
+ insertBucketInfo(document::BucketId(33, 0x000000001), 0, 0x1, 1, 1);
+ insertBucketInfo(document::BucketId(33, 0x000000001), 1, 0x1, 1, 1);
+ insertBucketInfo(document::BucketId(33, 0x100000001), 0, 0x1, 1, 1);
+ insertBucketInfo(document::BucketId(33, 0x100000001), 1, 0x1, 1, 1);
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("NO OPERATIONS GENERATED"),
+ testJoin(10, 100, 16, document::BucketId(33, 0x1)));
+}
+
+std::string
+StateCheckersTest::testSynchronizeAndMove(const std::string& bucketInfo,
+ const std::string& clusterState,
+ uint32_t redundancy,
+ const PendingMessage& blocker,
+ bool includePriority)
+{
+ document::BucketId bid(17, 0);
+
+ addNodesToBucketDB(bid, bucketInfo);
+
+ SynchronizeAndMoveStateChecker checker;
+ setRedundancy(redundancy);
+
+ _distributor->enableClusterState(lib::ClusterState(clusterState));
+ NodeMaintenanceStatsTracker statsTracker;
+ StateChecker::Context c(getExternalOperationHandler(), statsTracker, bid);
+ return testStateChecker(checker, c, false, blocker, includePriority);
+}
+
+void
+StateCheckersTest::testSynchronizeAndMove()
+{
+ // Plus if it was more obvious which nodes were in ideal state for various
+ // cluster states. (One possibility to override ideal state function for
+ // test)
+ runAndVerify<SynchronizeAndMoveStateChecker>(
+ CheckerParams().expect(
+ "[Synchronizing buckets with different checksums "
+ "node(idx=0,crc=0x1,docs=1/1,bytes=1/1,trusted=false,"
+ "active=false), "
+ "node(idx=1,crc=0x2,docs=2/2,bytes=2/2,trusted=false,"
+ "active=false)] "
+ "(scheduling pri MEDIUM)")
+ .bucketInfo("0=1,1=2")
+ .includeSchedulingPriority(true));
+
+ // If 1+ nodes in ideal state is in maintenance, do nothing
+ runAndVerify<SynchronizeAndMoveStateChecker>(
+ CheckerParams()
+ .expect("NO OPERATIONS GENERATED")
+ .bucketInfo("0=1,2=2")
+ .clusterState("distributor:1 storage:3 .1.s:m"));
+
+ runAndVerify<SynchronizeAndMoveStateChecker>(
+ CheckerParams()
+ .expect("[Moving bucket to ideal node 3] "
+ "(scheduling pri VERY_LOW)")
+ .bucketInfo("0=1,1=1,2=1")
+ .clusterState("distributor:1 storage:4")
+ .includeSchedulingPriority(true));
+
+ // Not doing anything in ideal state
+ runAndVerify<SynchronizeAndMoveStateChecker>(
+ CheckerParams()
+ .expect("NO OPERATIONS GENERATED")
+ .bucketInfo("0=1,1=1,3=1")
+ .clusterState("distributor:1 storage:4"));
+
+ // Both copies out of ideal state
+ runAndVerify<SynchronizeAndMoveStateChecker>(
+ CheckerParams()
+ .expect("[Moving bucket to ideal node 1]"
+ "[Moving bucket to ideal node 3] (pri 165) "
+ "(scheduling pri VERY_LOW)")
+ .clusterState("distributor:1 storage:5")
+ .bucketInfo("0=1,4=1,5=1")
+ .includeMessagePriority(true)
+ .includeSchedulingPriority(true));
+
+ // Too little redundancy and out of ideal state. Note that in this case,
+ // the non-ideal node is reported as a missing node and not with a "Moving
+ // bucket to ideal node" reason.
+ runAndVerify<SynchronizeAndMoveStateChecker>(
+ CheckerParams()
+ .expect("[Adding missing node 1]"
+ "[Adding missing node 3] (pri 120) "
+ "(scheduling pri MEDIUM)")
+ .bucketInfo("0=1")
+ .clusterState("distributor:1 storage:5")
+ .includeMessagePriority(true)
+ .includeSchedulingPriority(true));
+
+ // Synchronizing even when ideal state is in sync
+ runAndVerify<SynchronizeAndMoveStateChecker>(
+ CheckerParams()
+ .expect("[Synchronizing buckets with different checksums "
+ "node(idx=0,crc=0x3,docs=3/3,bytes=3/3,trusted=false,"
+ "active=false), "
+ "node(idx=1,crc=0x3,docs=3/3,bytes=3/3,trusted=false,"
+ "active=false), "
+ "node(idx=2,crc=0x0,docs=0/0,bytes=0/0,trusted=false,"
+ "active=false)]")
+ .bucketInfo("0=3,1=3,2=0")
+ .clusterState("distributor:1 storage:3"));
+
+ // Synchronize even when we have >= redundancy trusted copies and ideal
+ // nodes are in sync.
+ runAndVerify<SynchronizeAndMoveStateChecker>(
+ CheckerParams()
+ .expect("[Synchronizing buckets with different checksums "
+ "node(idx=0,crc=0x2,docs=3/3,bytes=4/4,trusted=false,"
+ "active=false), "
+ "node(idx=1,crc=0x1,docs=2/2,bytes=3/3,trusted=true,"
+ "active=false), "
+ "node(idx=2,crc=0x1,docs=2/2,bytes=3/3,trusted=true,"
+ "active=false), "
+ "node(idx=3,crc=0x1,docs=2/2,bytes=3/3,trusted=true,"
+ "active=false)] "
+ "(pri 120) (scheduling pri MEDIUM)")
+ .bucketInfo("0=2/3/4,1=1/2/3/t,2=1/2/3/t,3=1/2/3/t")
+ .clusterState("distributor:1 storage:5")
+ .includeMessagePriority(true)
+ .includeSchedulingPriority(true));
+
+ // Not doing anything if one of the buckets in ideal state is invalid
+ // but we have redundancy coverage otherwise
+ runAndVerify<SynchronizeAndMoveStateChecker>(
+ CheckerParams()
+ .expect("NO OPERATIONS GENERATED")
+ .bucketInfo("1=0/0/1,3=1")
+ .clusterState("distributor:1 storage:4"));
+
+ // Not doing anything if all copies we have are invalid
+ runAndVerify<SynchronizeAndMoveStateChecker>(
+ CheckerParams()
+ .expect("NO OPERATIONS GENERATED")
+ .bucketInfo("1=0/0/1,3=0/0/1")
+ .clusterState("distributor:1 storage:4"));
+
+ // Not doing anything if we have < redundancy copies but all existing
+ // copies are invalid.
+ runAndVerify<SynchronizeAndMoveStateChecker>(
+ CheckerParams()
+ .expect("NO OPERATIONS GENERATED")
+ .bucketInfo("1=0/0/1")
+ .clusterState("distributor:1 storage:4"));
+}
+
+void
+StateCheckersTest::testDoNotMergeInconsistentlySplitBuckets()
+{
+ // No merge generated if buckets are inconsistently split.
+ // This matches the case where a bucket has been split into 2 on one
+ // node and is not yet split on another; we should never try to merge
+ // either two of the split leaf buckets back onto the first node!
+ // Running state checker on a leaf:
+ addNodesToBucketDB(document::BucketId(16, 0), "0=2");
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("NO OPERATIONS GENERATED"),
+ testSynchronizeAndMove("1=1", // 17 bits
+ "distributor:1 storage:4"));
+ // Running state checker on an inner node bucket:
+ addNodesToBucketDB(document::BucketId(18, 0), "0=2");
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("NO OPERATIONS GENERATED"),
+ testSynchronizeAndMove("0=1", // 17 bits
+ "distributor:1 storage:4"));
+}
+
+void
+StateCheckersTest::doNotMoveReplicasWithinRetiredNodes()
+{
+ // Nodes 1 and 3 would be in ideal state if the nodes were not retired.
+ // Here, all nodes are retired and we should thus not do any sort of
+ // moving.
+ runAndVerify<SynchronizeAndMoveStateChecker>(
+ CheckerParams()
+ .expect("NO OPERATIONS GENERATED")
+ .bucketInfo("0=2,1=2")
+ .clusterState("distributor:1 storage:4 "
+ ".0.s:r .1.s:r .2.s:r .3.s:r"));
+}
+
+void
+StateCheckersTest::retiredNodesOutOfSyncAreMerged()
+{
+ // Normally, we'd do a merge that'd move the bucket to new nodes, leaving
+ // the out of sync retired nodes as source-only replicas. But here we
+ // don't have that choice and thus try to do the most useful thing we can
+ // with what we have available to us (which is to try to get things in
+ // sync).
+ runAndVerify<SynchronizeAndMoveStateChecker>(
+ CheckerParams()
+ .expect("[Synchronizing buckets with different checksums "
+ "node(idx=0,crc=0x1,docs=1/1,bytes=1/1,trusted=false,"
+ "active=false), "
+ "node(idx=1,crc=0x2,docs=2/2,bytes=2/2,trusted=false,"
+ "active=false)]")
+ .bucketInfo("0=1,1=2")
+ .clusterState("distributor:1 storage:4 "
+ ".0.s:r .1.s:r .2.s:r .3.s:r"));
+}
+
+std::string
+StateCheckersTest::testDeleteExtraCopies(
+ const std::string& bucketInfo, uint32_t redundancy,
+ const PendingMessage& blocker,
+ const std::string& clusterState,
+ bool includePriority)
+{
+ document::BucketId bid(17, 0);
+
+ addNodesToBucketDB(bid, bucketInfo);
+ setRedundancy(redundancy);
+
+ if (!clusterState.empty()) {
+ _distributor->enableClusterState(lib::ClusterState(clusterState));
+ }
+ DeleteExtraCopiesStateChecker checker;
+ NodeMaintenanceStatsTracker statsTracker;
+ StateChecker::Context c(getExternalOperationHandler(), statsTracker, bid);
+ return testStateChecker(checker, c, false, blocker, includePriority);
+}
+
+
+void
+StateCheckersTest::testDeleteExtraCopies()
+{
+ setupDistributor(2, 100, "distributor:1 storage:4");
+
+ {
+ std::vector<uint16_t> idealNodes(
+ getIdealStateManager().getDistributorComponent()
+ .getDistribution().getIdealStorageNodes(
+ getIdealStateManager().getDistributorComponent().getClusterState(),
+ document::BucketId(17, 0),
+ "ui"));
+ std::vector<uint16_t> wanted;
+ wanted.push_back(1);
+ wanted.push_back(3);
+ CPPUNIT_ASSERT_EQUAL(wanted, idealNodes);
+ }
+
+ CPPUNIT_ASSERT_EQUAL_MSG(
+ "Remove empty buckets",
+ std::string("[Removing all copies since bucket is empty:node(idx=0,crc=0x0,"
+ "docs=0/0,bytes=0/0,trusted=false,active=false)]"
+ " (pri 100)"),
+ testDeleteExtraCopies("0=0", 2, PendingMessage(), "", true));
+
+ CPPUNIT_ASSERT_EQUAL_MSG(
+ "Remove extra trusted copy",
+ std::string("[Removing redundant in-sync copy from node 2]"),
+ testDeleteExtraCopies("3=3/3/3/t,1=3/3/3/t,2=3/3/3/t"));
+
+ CPPUNIT_ASSERT_EQUAL_MSG(
+ "Redundant copies in sync can be removed without trusted being a "
+ "factor of consideration. Ideal state copy not removed.",
+ std::string("[Removing redundant in-sync copy from node 2]"),
+ testDeleteExtraCopies("3=3/3/3,1=3/3/3/t,2=3/3/3/t"));
+
+ CPPUNIT_ASSERT_EQUAL_MSG(
+ "Need redundancy number of copies",
+ std::string("NO OPERATIONS GENERATED"),
+ testDeleteExtraCopies("0=3,1=3"));
+
+ CPPUNIT_ASSERT_EQUAL_MSG(
+ "Do not remove extra copies without enough trusted copies",
+ std::string("NO OPERATIONS GENERATED"),
+ testDeleteExtraCopies("0=0/0/1,1=3,2=3"));
+
+ CPPUNIT_ASSERT_EQUAL_MSG(
+ "Do not remove buckets that have meta entries",
+ std::string("NO OPERATIONS GENERATED"),
+ testDeleteExtraCopies("0=0/0/1,1=0/0/1"));
+
+ CPPUNIT_ASSERT_EQUAL_MSG(
+ "Do not remove any recently created copies",
+ std::string("NO OPERATIONS GENERATED"),
+ testDeleteExtraCopies("0=1/0/0/t,1=1/0/0/t,2=1/0/0/t"));
+
+ CPPUNIT_ASSERT_EQUAL_MSG(
+ "Do not remove untrusted copy that is out of sync",
+ std::string("NO OPERATIONS GENERATED"),
+ testDeleteExtraCopies("0=2/3/4,1=1/2/3/t,2=1/2/3/t"));
+
+ CPPUNIT_ASSERT_EQUAL_MSG(
+ "Do not remove out of sync copies, even if we have more than #"
+ "redundancy trusted copies",
+ std::string("NO OPERATIONS GENERATED"),
+ testDeleteExtraCopies("0=2/3/4,1=1/2/3/t,2=1/2/3/t,3=1/2/3/t"));
+
+ CPPUNIT_ASSERT_EQUAL_MSG(
+ "Don't remove unless we have enough trusted "
+ "copies to satisfy redundancy",
+ std::string("NO OPERATIONS GENERATED"),
+ testDeleteExtraCopies("0=2/3/4,1=1/2/3,2=2/3/4,3=1/2/3"));
+
+ CPPUNIT_ASSERT_EQUAL_MSG(
+ "Only remove empty copies unless all other copies are in sync",
+ std::string("[Removing empty copy from node 4]"),
+ testDeleteExtraCopies("0=2/3/4,1=1/2/3,2=2/3/4,3=1/2/3,4=0/0/0"));
+
+ CPPUNIT_ASSERT_EQUAL_MSG(
+ "Remove redundant empty copy",
+ std::string("[Removing empty copy from node 0]"),
+ testDeleteExtraCopies("1=2/3,3=1/2/3,0=0/0/0"));
+
+ CPPUNIT_ASSERT_EQUAL_MSG(
+ "Remove empty bucket with multiple copies",
+ std::string(
+ "[Removing all copies since bucket is empty:"
+ "node(idx=0,crc=0x0,docs=0/0,bytes=0/0,trusted=false,active=false), "
+ "node(idx=1,crc=0x0,docs=0/0,bytes=0/0,trusted=false,active=false), "
+ "node(idx=2,crc=0x0,docs=0/0,bytes=0/0,trusted=false,active=false)]"),
+ testDeleteExtraCopies("0=0/0/0,1=0/0/0,2=0/0/0"));
+
+ CPPUNIT_ASSERT_EQUAL_MSG(
+ "Pending persistence operation blocks delete",
+ std::string("BLOCKED"),
+ testDeleteExtraCopies("0=0/0/0,1=1/2/3/t,2=1/2/3/t",
+ 2,
+ PendingMessage(api::MessageType::PUT_ID, 255)));
+}
+
+void
+StateCheckersTest::testDoNotDeleteActiveExtraCopies()
+{
+ setupDistributor(2, 100, "distributor:1 storage:4");
+
+ CPPUNIT_ASSERT_EQUAL_MSG(
+ "Do not delete redundant copy if it is marked active",
+ std::string("NO OPERATIONS GENERATED"),
+ testDeleteExtraCopies("3=3/3/3/t,1=3/3/3/t,2=3/3/3/t/a"));
+}
+
+void
+StateCheckersTest::testConsistentCopiesOnRetiredNodesMayBeDeleted()
+{
+ setupDistributor(2, 100, "distributor:1 storage:4 .1.s:r");
+
+ CPPUNIT_ASSERT_EQUAL_MSG(
+ "Remove in-sync copy on node that is retired",
+ std::string("[Removing redundant in-sync copy from node 1]"),
+ testDeleteExtraCopies("3=3/3/3/t,1=3/3/3/t,2=3/3/3/t"));
+}
+
+void
+StateCheckersTest::redundantCopyDeletedEvenWhenAllNodesRetired()
+{
+ setupDistributor(2, 100, "distributor:1 storage:4 "
+ ".0.s:r .1.s:r .2.s:r .3.s:r");
+
+ CPPUNIT_ASSERT_EQUAL_MSG(
+ "Remove in-sync copy on node that is retired",
+ "[Removing redundant in-sync copy from node 2]"s,
+ testDeleteExtraCopies("3=3/3/3/t,1=3/3/3/t,2=3/3/3/t"));
+}
+
+std::string StateCheckersTest::testBucketState(
+ const std::string& bucketInfo, uint32_t redundancy,
+ bool includePriority)
+{
+ document::BucketId bid(17, 0);
+ setRedundancy(redundancy);
+ addNodesToBucketDB(bid, bucketInfo);
+
+ BucketStateStateChecker checker;
+ NodeMaintenanceStatsTracker statsTracker;
+ StateChecker::Context c(getExternalOperationHandler(), statsTracker, bid);
+ return testStateChecker(checker, c, false, PendingMessage(),
+ includePriority);
+}
+
+void
+StateCheckersTest::testBucketState()
+{
+ setupDistributor(2, 100, "distributor:1 storage:4");
+
+ {
+ // Set config explicitly so we can compare priorities for differing
+ // cases.
+ DistributorConfiguration::MaintenancePriorities mp;
+ mp.activateNoExistingActive = 90;
+ mp.activateWithExistingActive = 120;
+ getConfig().setMaintenancePriorities(mp);
+ }
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("NO OPERATIONS GENERATED"),
+ testBucketState(""));
+
+ // Node 1 is in ideal state
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("[Setting node 1 as active:"
+ " copy is ideal state priority 0] (pri 90)"),
+ testBucketState("1=2/3/4", 2, true));
+
+ // Node 3 is in ideal state
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("[Setting node 3 as active:"
+ " copy is ideal state priority 1]"),
+ testBucketState("3=2/3/4"));
+
+ // No trusted nodes, but node 1 is first in ideal state.
+ // Also check bad case where more than 1 node is set as active just
+ // to ensure we can get out of that situation if it should ever happen.
+ // Nothing done with node 3 since is't not active and shouldn't be.
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("[Setting node 1 as active:"
+ " copy is ideal state priority 0]"
+ "[Setting node 0 as inactive]"
+ "[Setting node 2 as inactive] (pri 120)"),
+ testBucketState("0=3/4/5/u/a,1=3,2=4/5/6/u/a,3=3", 2, true));
+
+ // Test setting active when only node available is not contained
+ // within the resolved ideal state.
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("[Setting node 0 as active: first available copy]"),
+ testBucketState("0=2/3/4"));
+
+ // A trusted ideal state copy should be set active rather than a non-trusted
+ // ideal state copy
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("[Setting node 3 as active:"
+ " copy is trusted and ideal state priority 1]"
+ "[Setting node 1 as inactive]"),
+ testBucketState("1=2/3/4/u/a,3=5/6/7/t"));
+
+ // None of the ideal state copies are trusted but a non-ideal copy is.
+ // The trusted copy should be active.
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("[Setting node 2 as active: copy is trusted]"),
+ testBucketState("1=2/3/4,3=5/6/7/,2=8/9/10/t"));
+
+ // Make sure bucket db ordering does not matter
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("[Setting node 2 as active: copy is trusted]"),
+ testBucketState("2=8/9/10/t,1=2/3/4,3=5/6/7"));
+
+ // If copy is already active, we shouldn't generate operations
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("NO OPERATIONS GENERATED"),
+ testBucketState("1=2/3/4/t/a"));
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("NO OPERATIONS GENERATED"),
+ testBucketState("1=2/3/4,3=5/6/7/t/a"));
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("NO OPERATIONS GENERATED"),
+ testBucketState("2=8/9/10/t/a,1=2/3/4,3=5/6/7"));
+
+ // If multiple buckets are active, deactive all but one
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("[Setting node 2 as inactive]"
+ "[Setting node 3 as inactive]"),
+ testBucketState("1=1/2/3/t/a,2=1/2/3/t/a,3=1/2/3/t/a"));
+
+ // Invalid buckets should not be included
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("NO OPERATIONS GENERATED"),
+ testBucketState("1=0/0/1,3=0/0/1"));
+
+ // Ready preferred over trusted & ideal state
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("NO OPERATIONS GENERATED"),
+ testBucketState("2=8/9/10/t/i/u,1=2/3/4/u/a/r,3=5/6/7"));
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("[Setting node 2 as active: copy is ready]"
+ "[Setting node 1 as inactive]"),
+ testBucketState("2=8/9/10/u/i/r,1=2/3/4/u/a/u,3=5/6/7/u/i/u"));
+
+ // Prefer in ideal state if multiple copies ready
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("[Setting node 3 as active: copy is ready]"
+ "[Setting node 1 as inactive]"),
+ testBucketState("2=8/9/10/u/i/r,1=2/3/4/u/a/u,3=5/6/7/u/i/r"));
+
+ // Prefer ideal state if all ready but no trusted
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("[Setting node 1 as active: copy is ready]"),
+ testBucketState("2=8/9/10/u/i/r,1=2/3/4/u/i/r,3=5/6/7/u/i/r"));
+
+ // Prefer trusted over ideal state
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("[Setting node 2 as active: copy is ready and trusted]"
+ "[Setting node 1 as inactive]"),
+ testBucketState("2=8/9/10/t/i/r,1=2/3/4/u/a/r,3=5/6/7"));
+}
+
+/**
+ * Users assume that setting nodes into maintenance will not cause extra load
+ * on the cluster, but activating non-ready copies because the active copy went
+ * into maintenance violates that assumption. See bug 6833209 for context and
+ * details.
+ */
+void
+StateCheckersTest::testDoNotActivateNonReadyCopiesWhenIdealNodeInMaintenance()
+{
+ setupDistributor(2, 100, "distributor:1 storage:4 .1.s:m");
+ // Ideal node 1 is in maintenance and no ready copy available.
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("NO OPERATIONS GENERATED"),
+ testBucketState("2=8/9/10/t/i/u,3=5/6/7"));
+ // But we should activate another copy iff there's another ready copy.
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("[Setting node 2 as active: copy is ready]"),
+ testBucketState("2=8/9/10/u/i/r,3=5/6/7/u/i/u"));
+}
+
+/**
+ * We really do not want to activate buckets when they are inconsistent.
+ * See bug 6395693 for a set of reasons why.
+ */
+void
+StateCheckersTest::testDoNotChangeActiveStateForInconsistentlySplitBuckets()
+{
+ setupDistributor(2, 100, "distributor:1 storage:4");
+ // Running state checker on a leaf:
+ addNodesToBucketDB(document::BucketId(16, 0), "0=2");
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("NO OPERATIONS GENERATED"),
+ testBucketState("1=1")); // 17 bits
+ // Running state checker on an inner node bucket:
+ addNodesToBucketDB(document::BucketId(18, 0), "0=2");
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("NO OPERATIONS GENERATED"),
+ testSynchronizeAndMove("0=1")); // 17 bits
+}
+
+/**
+ * If all existing copies are outside the ideal state, e.g. if the set of nodes
+ * in the cluster has changed significantly, we do not want to change the active
+ * state of copies needlessly iff the copies are otherwise equally scored in
+ * terms of activation eligibility. If we do not prioritize existing active
+ * copies higher in this case, it's possible that their ideal order has been
+ * permutated, causing another copy to rank higher in the ideal state node
+ * sequence. This would in turn activate the newly higher ranked copy and
+ * deactivate the previously active copy, causing transient search duplicates
+ * and uneeded work in the cluster; new copies will be created and indexed
+ * soon anyway.
+ *
+ * See bug 7278932.
+ */
+void
+StateCheckersTest::testNoActiveChangeForNonIdealCopiesWhenOtherwiseIdentical()
+{
+ setupDistributor(2, 100, "distributor:1 storage:50");
+ // 1 is more ideal than 3 in this state, but since they're both not part
+ // of the #redundancy ideal set, activation should not change hands.
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("NO OPERATIONS GENERATED"),
+ testBucketState("1=2/3/4/t/i/r,3=2/3/4/t/a/r"));
+ // Same applies if the copies aren't ready, since if a copy has been marked
+ // as active it will already have started background indexing. No need in
+ // undoing that if we don't have any better candidates going anyway.
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("NO OPERATIONS GENERATED"),
+ testBucketState("1=2/3/4/t,3=2/3/4/t/a"));
+}
+
+std::string StateCheckersTest::testBucketStatePerGroup(
+ const std::string& bucketInfo, bool includePriority)
+{
+ document::BucketId bid(17, 0);
+ addNodesToBucketDB(bid, bucketInfo);
+
+ BucketStateStateChecker checker;
+ NodeMaintenanceStatsTracker statsTracker;
+ StateChecker::Context c(getExternalOperationHandler(), statsTracker, bid);
+ return testStateChecker(checker, c, false, PendingMessage(),
+ includePriority);
+}
+
+void
+StateCheckersTest::testBucketStatePerGroup()
+{
+ setupDistributor(6, 20, "distributor:1 storage:12 .2.s:d .4.s:d .7.s:d");
+ vespa::config::content::StorDistributionConfigBuilder config;
+ config.activePerLeafGroup = true;
+ config.redundancy = 6;
+ config.group.resize(4);
+ config.group[0].index = "invalid";
+ config.group[0].name = "invalid";
+ config.group[0].partitions = "2|2|*";
+ config.group[1].index = "0";
+ config.group[1].name = "left";
+ config.group[1].nodes.resize(3);
+ config.group[1].nodes[0].index = 0;
+ config.group[1].nodes[1].index = 1;
+ config.group[1].nodes[2].index = 3;
+ config.group[2].index = "1";
+ config.group[2].name = "right";
+ config.group[2].nodes.resize(3);
+ config.group[2].nodes[0].index = 5;
+ config.group[2].nodes[1].index = 6;
+ config.group[2].nodes[2].index = 8;
+ config.group[3].index = "2";
+ config.group[3].name = "middle";
+ config.group[3].nodes.resize(3);
+ config.group[3].nodes[0].index = 9;
+ config.group[3].nodes[1].index = 10;
+ config.group[3].nodes[2].index = 11;
+ lib::Distribution::SP distr(new lib::Distribution(config));
+ _node->getComponentRegister().setDistribution(distr);
+
+ {
+ DistributorConfiguration::MaintenancePriorities mp;
+ mp.activateNoExistingActive = 90;
+ mp.activateWithExistingActive = 120;
+ getConfig().setMaintenancePriorities(mp);
+ }
+
+ // Node 1 and 8 is is ideal state
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("[Setting node 1 as active: "
+ "copy is trusted and ideal state priority 4]"
+ "[Setting node 6 as active: "
+ "copy is trusted and ideal state priority 0] (pri 90)"),
+ testBucketStatePerGroup("0=2/3/4/t, 1=2/3/4/t, 3=2/3/4/t, "
+ "5=2/3/4/t, 6=2/3/4/t, 8=2/3/4/t", true));
+
+ // Data differ between groups
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("[Setting node 1 as active: "
+ "copy is trusted and ideal state priority 4]"
+ "[Setting node 6 as active: "
+ "copy is ideal state priority 0] (pri 90)"),
+ testBucketStatePerGroup("0=2/3/4/t, 1=2/3/4/t, 3=2/3/4/t, "
+ "5=5/6/7, 6=5/6/7, 8=5/6/7", true));
+
+ // Disable too
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("[Setting node 0 as inactive]"
+ "[Setting node 3 as inactive]"
+ "[Setting node 5 as inactive]"
+ "[Setting node 8 as inactive] (pri 90)"),
+ testBucketStatePerGroup("0=2/3/4/t/a, 1=2/3/4/t/a, 3=2/3/4/t/a, "
+ "5=2/3/4/t/a, 6=2/3/4/t/a, 8=2/3/4/t/a",
+ true));
+
+ // Node 1 and 8 is is ideal state
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("[Setting node 1 as active: "
+ "copy is trusted and ideal state priority 4]"
+ "[Setting node 6 as active: "
+ "copy is trusted and ideal state priority 0]"
+ "[Setting node 9 as active: "
+ "copy is trusted and ideal state priority 2] (pri 90)"),
+ testBucketStatePerGroup("0=2/3/4/t, 1=2/3/4/t, 3=2/3/4/t, "
+ "5=2/3/4/t, 6=2/3/4/t, 8=2/3/4/t, "
+ "9=2/3/4/t, 10=2/3/4/t, 11=2/3/4/t",
+ true));
+}
+
+void
+StateCheckersTest::allowActivationOfRetiredNodes()
+{
+ // All nodes in retired state implies that the ideal state is empty. But
+ // we still want to be able to shuffle bucket activations around in order
+ // to preserve coverage.
+ setupDistributor(2, 2, "distributor:1 storage:2 .0.s:r .1.s:r");
+ CPPUNIT_ASSERT_EQUAL(
+ "[Setting node 1 as active: copy is trusted]"
+ "[Setting node 0 as inactive]"s,
+ testBucketState("0=2/3/4/u/a,1=5/6/7/t"));
+}
+
+void
+StateCheckersTest::inhibitBucketActivationIfDisabledInConfig()
+{
+ setupDistributor(2, 4, "distributor:1 storage:4");
+ disableBucketActivationInConfig(true);
+
+ // Node 1 is in ideal state and only replica and should be activated in
+ // an indexed cluster context (but not here).
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("NO OPERATIONS GENERATED"),
+ testBucketState("1=2/3/4", 2, true));
+}
+
+void
+StateCheckersTest::inhibitBucketDeactivationIfDisabledInConfig()
+{
+ setupDistributor(2, 4, "distributor:1 storage:4");
+ disableBucketActivationInConfig(true);
+
+ // Multiple replicas which would have been deactivated. This test is mostly
+ // for the sake of completion; a scenario where buckets are active while
+ // having no indexed documents configured should not happen.
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("NO OPERATIONS GENERATED"),
+ testBucketState("1=1/2/3/t/a,2=1/2/3/t/a,3=1/2/3/t/a"));
+}
+
+std::string StateCheckersTest::testGarbageCollection(
+ uint32_t prevTimestamp, uint32_t nowTimestamp,
+ uint32_t checkInterval, uint32_t lastChangeTime,
+ bool includePriority)
+{
+ BucketDatabase::Entry e(document::BucketId(17, 0));
+ e.getBucketInfo().addNode(BucketCopy(prevTimestamp, 0,
+ api::BucketInfo(3,3,3)),
+ toVector((uint16_t)0));
+ e.getBucketInfo().setLastGarbageCollectionTime(prevTimestamp);
+ getBucketDatabase().update(e);
+
+ GarbageCollectionStateChecker checker;
+ getConfig().setGarbageCollection("music", checkInterval);
+ getConfig().setLastGarbageCollectionChangeTime(lastChangeTime);
+ NodeMaintenanceStatsTracker statsTracker;
+ StateChecker::Context c(getExternalOperationHandler(), statsTracker,
+ e.getBucketId());
+ getClock().setAbsoluteTimeInSeconds(nowTimestamp);
+ return testStateChecker(checker, c, false, PendingMessage(),
+ includePriority);
+}
+
+void
+StateCheckersTest::testGarbageCollection()
+{
+ // BucketId(17, 0) has id (and thus 'hash') 0x4400000000000000. With a
+ // check interval modulo of 3600, this implies a start point of 848.
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("NO OPERATIONS GENERATED"),
+ testGarbageCollection(900, 3600 + 847, 3600));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("[Needs garbage collection: Last check at 900, current time 4448, "
+ "configured interval 3600]"),
+ testGarbageCollection(900, 3600 + 848, 3600));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("[Needs garbage collection: Last check at 3, current time 4000, "
+ "configured interval 3600]"),
+ testGarbageCollection(3, 4000, 3600));
+
+ // GC start point 3648.
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("NO OPERATIONS GENERATED"),
+ testGarbageCollection(3, 3647, 8000));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("[Needs garbage collection: Last check at 3, current time 4000, "
+ "configured interval 3600]"),
+ testGarbageCollection(3, 4000, 3600));
+
+ // GC explicitly disabled.
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("NO OPERATIONS GENERATED"),
+ testGarbageCollection(3, 4000, 0));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("NO OPERATIONS GENERATED"),
+ testGarbageCollection(3, 3, 1));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("[Needs garbage collection: Last check at 3, current time 4000, "
+ "configured interval 300] (pri 200)"),
+ testGarbageCollection(3, 4000, 300, 1, true));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("NO OPERATIONS GENERATED"),
+ testGarbageCollection(3850, 4000, 300, 1));
+}
+
+/**
+ * When a node is in maintenance, we want to do our best to avoid any unneeded
+ * changes to the bucket replicas' states, as this will require re-syncing of
+ * the replicas when the node out of maintenance. Consequently we should not
+ * trigger GC for buckets when this is the case.
+ */
+void
+StateCheckersTest::gcInhibitedWhenIdealNodeInMaintenance()
+{
+ // Redundancy is 3, so with only 3 nodes, node 1 is guaranteed to be part of
+ // the ideal state of any bucket in the system.
+ setupDistributor(3, 3, "distributor:1 storage:3 .1.s:m");
+ document::BucketId bucket(17, 0);
+ addNodesToBucketDB(bucket, "0=10/100/1/true,"
+ "1=10/100/1/true,"
+ "2=10/100/1/true");
+ BucketDatabase::Entry e(getBucketDatabase().get(bucket));
+ e.getBucketInfo().setLastGarbageCollectionTime(3);
+ getBucketDatabase().update(e);
+
+ GarbageCollectionStateChecker checker;
+ getConfig().setGarbageCollection("music", 3600);
+ getConfig().setLastGarbageCollectionChangeTime(0);
+ NodeMaintenanceStatsTracker statsTracker;
+ StateChecker::Context c(getExternalOperationHandler(), statsTracker,
+ bucket);
+ getClock().setAbsoluteTimeInSeconds(4000);
+ // Would normally (in a non-maintenance case) trigger GC due to having
+ // overshot the GC check cycle.
+ auto result = testStateChecker(checker, c, false, PendingMessage(), false);
+
+ CPPUNIT_ASSERT_EQUAL(std::string("NO OPERATIONS GENERATED"), result);
+}
+
+/*
+ * Bug 6656726, comment #25. Merge state checker does not execute if an ideal
+ * node is in maintenance, so for symmetry we need to do the same for deletes
+ * (it's bad mojo to potentially delete something that would've been merged
+ * had it not been for a node being in maintenance).
+ */
+void
+StateCheckersTest::testNoRemoveWhenIdealNodeInMaintenance()
+{
+ CPPUNIT_ASSERT_EQUAL_MSG(
+ "Do not remove when ideal node is in maintenance mode",
+ std::string("NO OPERATIONS GENERATED"),
+ testDeleteExtraCopies("0=10/100/1/true,"
+ "1=10/100/1/true,"
+ "2=10/100/1/true",
+ 2, PendingMessage(),
+ "distributor:1 storage:3 .1.s:m"));
+}
+
+/*
+ * Just joining buckets where both children are present is not enough to
+ * ensure any system can compact its bucket tree. We must therefore
+ * gradually hoist buckets higher into the tree when possible in order
+ * to converge in a state where as many buckets as possible have siblings
+ * on the same level.
+ *
+ * See bug 6768991 for context.
+ */
+void
+StateCheckersTest::testStepwiseJoinForSmallBucketsWithoutSiblings()
+{
+ setupDistributor(3, 10, "distributor:1 storage:2 bits:1");
+ vespa::config::content::core::StorDistributormanagerConfigBuilder config;
+ config.enableJoinForSiblingLessBuckets = true;
+ getConfig().configure(config);
+ // Buckets without siblings but that should be step-wise joined back
+ // into bucket (2, 1).
+ insertBucketInfo(document::BucketId(3, 1), 1, 0x1, 1, 1);
+ insertBucketInfo(document::BucketId(3, 0x3), 1, 0x1, 1, 1);
+ CPPUNIT_ASSERT_EQUAL(std::string(
+ "BucketId(0x0800000000000001): "
+ "[Joining buckets BucketId(0x0c00000000000001) and "
+ "BucketId(0x0c00000000000001) because their size "
+ "(1 bytes, 1 docs) is less than the configured limit "
+ "of (100, 10)"),
+ testJoin(10, 100, 2, document::BucketId(3, 1)));
+
+ // Other bucket should be joined as well. Together the two join targets
+ // will transform into a mighty sibling pair that can rule the galaxy
+ // (and also be joined together afterwards)!
+ insertBucketInfo(document::BucketId(3, 1), 1, 0x1, 1, 1);
+ insertBucketInfo(document::BucketId(3, 0x3), 1, 0x1, 1, 1);
+ CPPUNIT_ASSERT_EQUAL(std::string(
+ "BucketId(0x0800000000000003): "
+ "[Joining buckets BucketId(0x0c00000000000003) and "
+ "BucketId(0x0c00000000000003) because their size "
+ "(1 bytes, 1 docs) is less than the configured limit "
+ "of (100, 10)"),
+ testJoin(10, 100, 2, document::BucketId(3, 0x3)));
+}
+
+void
+StateCheckersTest::testNoStepwiseJoinWhenDisabledThroughConfig()
+{
+ setupDistributor(3, 10, "distributor:1 storage:2 bits:1");
+ vespa::config::content::core::StorDistributormanagerConfigBuilder config;
+ config.enableJoinForSiblingLessBuckets = false;
+ getConfig().configure(config);
+
+ // Buckets without siblings but that would have been step-wise joined back
+ // into bucket 1 if it had been config-enabled.
+ insertBucketInfo(document::BucketId(3, 1), 1, 0x1, 1, 1);
+ insertBucketInfo(document::BucketId(3, 0x3), 1, 0x1, 1, 1);
+ CPPUNIT_ASSERT_EQUAL(std::string("NO OPERATIONS GENERATED"),
+ testJoin(10, 100, 1, document::BucketId(3, 1)));
+}
+
+void
+StateCheckersTest::testNoStepwiseJoinWhenSingleSiblingTooLarge()
+{
+ setupDistributor(3, 10, "distributor:1 storage:2 bits:1");
+ vespa::config::content::core::StorDistributormanagerConfigBuilder config;
+ config.enableJoinForSiblingLessBuckets = true;
+ getConfig().configure(config);
+
+ // Bucket is exactly at the boundary where it's too big.
+ insertBucketInfo(document::BucketId(3, 1), 1, 0x1, 10, 100);
+ insertBucketInfo(document::BucketId(3, 0x3), 1, 0x1, 1, 1);
+ CPPUNIT_ASSERT_EQUAL(std::string("NO OPERATIONS GENERATED"),
+ testJoin(10, 100, 1, document::BucketId(3, 1)));
+}
+
+void
+StateCheckersTest::testStepwiseJoinMaySkipMultipleBitsWhenConsistent()
+{
+ setupDistributor(2, 10, "distributor:1 storage:2 bits:8");
+ vespa::config::content::core::StorDistributormanagerConfigBuilder config;
+ config.enableJoinForSiblingLessBuckets = true;
+ getConfig().configure(config);
+
+ insertBucketInfo(document::BucketId(16, 1), 1, 0x1, 1, 1);
+ // No buckets further up in the tree, can join up to the distribution bit
+ // limit at 8.
+ CPPUNIT_ASSERT_EQUAL(std::string(
+ "BucketId(0x2000000000000001): "
+ "[Joining buckets BucketId(0x4000000000000001) and "
+ "BucketId(0x4000000000000001) because their size "
+ "(1 bytes, 1 docs) is less than the configured limit "
+ "of (100, 10)"),
+ testJoin(10, 100, 8, document::BucketId(16, 1)));
+}
+
+void
+StateCheckersTest::testStepwiseJoinDoesNotSkipBeyondLevelWithSibling()
+{
+ setupDistributor(2, 10, "distributor:1 storage:2 bits:8");
+ vespa::config::content::core::StorDistributormanagerConfigBuilder config;
+ config.enableJoinForSiblingLessBuckets = true;
+ getConfig().configure(config);
+
+ // All 0-branch children
+ insertBucketInfo(document::BucketId(16, 0), 1, 0x1, 1, 1);
+ // 0-branches down to level 10, then 1-branch down to level 11. This means
+ // the (16, 0) bucket cannot be moved further up than level 11 as it has a
+ // sibling there (0x2c00000000000400 sibling of 0x2c00000000000000).
+ insertBucketInfo(document::BucketId(11, 1 << 10), 1, 0x1, 1, 1);
+ CPPUNIT_ASSERT_EQUAL(std::string(
+ "BucketId(0x2c00000000000000): "
+ "[Joining buckets BucketId(0x4000000000000000) and "
+ "BucketId(0x4000000000000000) because their size "
+ "(1 bytes, 1 docs) is less than the configured limit "
+ "of (100, 10)"),
+ testJoin(10, 100, 8, document::BucketId(16, 0)));
+}
+
+void
+StateCheckersTest::joinCanBeScheduledWhenReplicasOnRetiredNodes()
+{
+ setupDistributor(1, 1, "distributor:1 storage:1 .0.s.:r");
+ insertJoinableBuckets();
+ CPPUNIT_ASSERT_EQUAL(
+ "BucketId(0x8000000000000001): "
+ "[Joining buckets BucketId(0x8400000000000001) and "
+ "BucketId(0x8400000100000001) because their size "
+ "(2 bytes, 2 docs) is less than the configured limit "
+ "of (100, 10)"s,
+ testJoin(10, 100, 16, document::BucketId(33, 1)));
+}
+
+void
+StateCheckersTest::contextPopulatesIdealStateContainers()
+{
+ // 1 and 3 are ideal nodes for bucket {17, 0}
+ setupDistributor(2, 100, "distributor:1 storage:4");
+
+ NodeMaintenanceStatsTracker statsTracker;
+ StateChecker::Context c(getExternalOperationHandler(), statsTracker, {17, 0});
+
+ CPPUNIT_ASSERT_EQUAL((std::vector<uint16_t>{1, 3}), c.idealState);
+ CPPUNIT_ASSERT_EQUAL(size_t(2), c.unorderedIdealState.size());
+ CPPUNIT_ASSERT(c.unorderedIdealState.find(1)
+ != c.unorderedIdealState.end());
+ CPPUNIT_ASSERT(c.unorderedIdealState.find(3)
+ != c.unorderedIdealState.end());
+}
+
+namespace {
+
+template <typename Checker>
+class StateCheckerRunner
+{
+ StateCheckersTest& _fixture;
+ NodeMaintenanceStatsTracker _statsTracker;
+ std::string _result;
+public:
+ StateCheckerRunner(StateCheckersTest& fixture)
+ : _fixture(fixture)
+ {
+ }
+
+ StateCheckerRunner& addToDb(const document::BucketId& bid,
+ const std::string& bucketInfo)
+ {
+ _fixture.addNodesToBucketDB(bid, bucketInfo);
+ return *this;
+ }
+
+ StateCheckerRunner& redundancy(uint32_t red) {
+ _fixture.setRedundancy(red);
+ return *this;
+ }
+
+ StateCheckerRunner& clusterState(const std::string& state) {
+ _fixture.enableClusterState(lib::ClusterState(state));
+ return *this;
+ }
+
+ // Run the templated state checker with the provided parameters, updating
+ // _result with the ideal state operations triggered.
+ // NOTE: resets the bucket database!
+ void runFor(const document::BucketId& bid) {
+ Checker checker;
+ StateChecker::Context c(_fixture.getExternalOperationHandler(), _statsTracker, bid);
+ _result = _fixture.testStateChecker(
+ checker, c, false, StateCheckersTest::PendingMessage(), false);
+ }
+
+ const std::string& result() const { return _result; }
+ const NodeMaintenanceStatsTracker& stats() const {
+ return _statsTracker;
+ }
+};
+
+} // anon ns
+
+void
+StateCheckersTest::statsUpdatedWhenMergingDueToMove()
+{
+ StateCheckerRunner<SynchronizeAndMoveStateChecker> runner(*this);
+ // Ideal state for bucket {17,0} in given cluster state is [1, 3]
+ runner.addToDb({17, 0}, "0=1,1=1,2=1")
+ .clusterState("distributor:1 storage:4")
+ .runFor({17, 0});
+ // Node 1 treated as copy source, but not as move source.
+ {
+ NodeMaintenanceStats wanted;
+ wanted.copyingOut = 1;
+ CPPUNIT_ASSERT_EQUAL(wanted, runner.stats().forNode(1));
+ }
+ // Moving 1 bucket from nodes {0, 2} into 3.
+ // Note that we do not at this point in time distinguish _which_ of these
+ // will do the actual data movement to node 3.
+ {
+ NodeMaintenanceStats wanted;
+ wanted.copyingIn = 1;
+ CPPUNIT_ASSERT_EQUAL(wanted, runner.stats().forNode(3));
+ }
+ {
+ NodeMaintenanceStats wanted;
+ wanted.movingOut = 1;
+ CPPUNIT_ASSERT_EQUAL(wanted, runner.stats().forNode(0));
+ CPPUNIT_ASSERT_EQUAL(wanted, runner.stats().forNode(2));
+ }
+}
+
+void
+StateCheckersTest::statsUpdatedWhenMergingDueToMissingCopy()
+{
+ StateCheckerRunner<SynchronizeAndMoveStateChecker> runner(*this);
+ // Ideal state for bucket {17,0} in given cluster state is [1, 3]
+ runner.addToDb({17, 0}, "1=1")
+ .clusterState("distributor:1 storage:4")
+ .runFor({17, 0});
+
+ {
+ NodeMaintenanceStats wanted;
+ wanted.copyingIn = 1;
+ CPPUNIT_ASSERT_EQUAL(wanted, runner.stats().forNode(3));
+ }
+ {
+ NodeMaintenanceStats wanted;
+ wanted.copyingOut = 1;
+ CPPUNIT_ASSERT_EQUAL(wanted, runner.stats().forNode(1));
+ }
+}
+
+void
+StateCheckersTest::statsUpdatedWhenMergingDueToOutOfSyncCopies()
+{
+ StateCheckerRunner<SynchronizeAndMoveStateChecker> runner(*this);
+ runner.addToDb({17, 0}, "1=1,3=2")
+ .clusterState("distributor:1 storage:4")
+ .runFor({17, 0});
+ {
+ NodeMaintenanceStats wanted;
+ wanted.syncing = 1;
+ CPPUNIT_ASSERT_EQUAL(wanted, runner.stats().forNode(1));
+ CPPUNIT_ASSERT_EQUAL(wanted, runner.stats().forNode(3));
+ }
+}
+
+} // distributor
+} // storage}
diff --git a/storage/src/tests/distributor/statoperationtest.cpp b/storage/src/tests/distributor/statoperationtest.cpp
new file mode 100644
index 00000000000..22fee6e44d7
--- /dev/null
+++ b/storage/src/tests/distributor/statoperationtest.cpp
@@ -0,0 +1,115 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <tests/common/dummystoragelink.h>
+#include <vespa/storageapi/message/stat.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <tests/distributor/distributortestutil.h>
+#include <vespa/storage/distributor/operations/external/statbucketoperation.h>
+#include <vespa/storage/distributor/operations/external/statbucketlistoperation.h>
+
+namespace storage {
+namespace distributor {
+
+struct StatOperationTest : public CppUnit::TestFixture,
+ public DistributorTestUtil
+{
+ void setUp() {
+ createLinks();
+ };
+
+ void tearDown() {
+ close();
+ }
+
+ void testBucketInfo();
+ void testBucketList();
+
+ CPPUNIT_TEST_SUITE(StatOperationTest);
+ CPPUNIT_TEST(testBucketInfo);
+ CPPUNIT_TEST(testBucketList);
+ CPPUNIT_TEST_SUITE_END();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(StatOperationTest);
+
+void
+StatOperationTest::testBucketInfo()
+{
+ _distributor->enableClusterState(lib::ClusterState("distributor:1 storage:2"));
+
+ addNodesToBucketDB(document::BucketId(16, 5),
+ "0=4/2/100,1=4/2/100");
+
+ StatBucketOperation op(
+ getExternalOperationHandler(),
+ std::shared_ptr<api::StatBucketCommand>(
+ new api::StatBucketCommand(document::BucketId(16, 5), "")));
+
+ op.start(_sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Statbucket => 0,Statbucket => 1"),
+ _sender.getCommands(true));
+
+ {
+ api::StatBucketCommand* tmp(
+ static_cast<api::StatBucketCommand*>(_sender.commands[0].get()));
+ api::StatBucketReply* reply = new api::StatBucketReply(*tmp, "foo");
+ op.receive(_sender, std::shared_ptr<api::StorageReply>(reply));
+ }
+
+ {
+ api::StatBucketCommand* tmp(
+ static_cast<api::StatBucketCommand*>(_sender.commands[1].get()));
+ api::StatBucketReply* reply = new api::StatBucketReply(*tmp, "bar");
+ op.receive(_sender, std::shared_ptr<api::StorageReply>(reply));
+ }
+
+ api::StatBucketReply* replyback(
+ static_cast<api::StatBucketReply*>(_sender.replies.back().get()));
+ CPPUNIT_ASSERT_CONTAIN("foo", replyback->getResults());
+ CPPUNIT_ASSERT_CONTAIN("bar", replyback->getResults());
+}
+
+void
+StatOperationTest::testBucketList() {
+ setupDistributor(2, 2, "distributor:1 storage:2");
+
+ getConfig().setSplitCount(10);
+ getConfig().setSplitSize(100);
+
+ for (uint32_t i = 0; i < 2; ++i) {
+ insertBucketInfo(document::BucketId(16, 5), i,
+ 0xff, 100, 200, true, (i == 1));
+ }
+
+ std::shared_ptr<api::GetBucketListCommand> msg(
+ new api::GetBucketListCommand(document::BucketId(16, 5)));
+
+ StatBucketListOperation op(
+ getExternalOperationHandler().getBucketDatabase(),
+ getIdealStateManager(),
+ getExternalOperationHandler().getIndex(),
+ msg);
+ op.start(_sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL(1, (int)_sender.replies.size());
+
+ api::GetBucketListReply* repl(
+ dynamic_cast<api::GetBucketListReply*>(_sender.replies[0].get()));
+
+ CPPUNIT_ASSERT_EQUAL(1, (int)repl->getBuckets().size());
+ CPPUNIT_ASSERT_EQUAL(document::BucketId(16, 5),
+ repl->getBuckets()[0]._bucket);
+ CPPUNIT_ASSERT_EQUAL(
+ vespalib::string(
+ "[distributor:0] split: "
+ "[Splitting bucket because its maximum size (200 b, 100 docs, 100 meta, 200 b total) "
+ "is higher than the configured limit of (100, 10)] "
+ "[node(idx=0,crc=0xff,docs=100/100,bytes=200/200,trusted=true,active=false), "
+ "node(idx=1,crc=0xff,docs=100/100,bytes=200/200,trusted=true,active=true)]"),
+ repl->getBuckets()[0]._bucketInformation);
+}
+
+} // distributor
+} // storage
diff --git a/storage/src/tests/distributor/statusreporterdelegatetest.cpp b/storage/src/tests/distributor/statusreporterdelegatetest.cpp
new file mode 100644
index 00000000000..f05eebed0ce
--- /dev/null
+++ b/storage/src/tests/distributor/statusreporterdelegatetest.cpp
@@ -0,0 +1,87 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <tests/common/testhelper.h>
+#include <tests/distributor/distributortestutil.h>
+
+#include <vespa/storage/distributor/statusreporterdelegate.h>
+
+namespace storage {
+namespace distributor {
+
+class StatusReporterDelegateTest : public CppUnit::TestFixture
+{
+ CPPUNIT_TEST_SUITE(StatusReporterDelegateTest);
+ CPPUNIT_TEST(testDelegateInvokesDelegatorOnStatusRequest);
+ CPPUNIT_TEST_SUITE_END();
+
+ void testDelegateInvokesDelegatorOnStatusRequest();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(StatusReporterDelegateTest);
+
+namespace {
+
+// We really ought to get GoogleMock as part of our testing suite...
+class MockDelegator : public StatusDelegator
+{
+ mutable std::ostringstream _calls;
+ bool handleStatusRequest(const DelegatedStatusRequest& request) const {
+ _calls << "Request(" << request.path << ")";
+ return request.reporter.reportStatus(request.outputStream, request.path);
+ }
+public:
+ std::string getCalls() const {
+ return _calls.str();
+ }
+};
+
+class MockStatusReporter : public framework::StatusReporter
+{
+public:
+ MockStatusReporter()
+ : framework::StatusReporter("foo", "Bar")
+ {}
+ vespalib::string getReportContentType(
+ const framework::HttpUrlPath&) const
+ {
+ return "foo/bar";
+ }
+
+ bool reportStatus(std::ostream& os,
+ const framework::HttpUrlPath& path) const
+ {
+ os << "reportStatus with " << path;
+ return true;
+ }
+};
+
+}
+
+void
+StatusReporterDelegateTest::testDelegateInvokesDelegatorOnStatusRequest()
+{
+ vdstestlib::DirConfig config(getStandardConfig(false));
+ TestDistributorApp app(config.getConfigId());
+
+ MockDelegator mockDelegator;
+ MockStatusReporter reporter;
+
+ StatusReporterDelegate delegate(app.getComponentRegister(),
+ mockDelegator,
+ reporter);
+ framework::HttpUrlPath path("dummy");
+ CPPUNIT_ASSERT_EQUAL(vespalib::string("foo/bar"),
+ delegate.getReportContentType(path));
+
+ std::ostringstream ss;
+ CPPUNIT_ASSERT(delegate.reportStatus(ss, path));
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Request(dummy)"),
+ mockDelegator.getCalls());
+ CPPUNIT_ASSERT_EQUAL(std::string("reportStatus with dummy"),
+ ss.str());
+}
+
+} // distributor
+} // storage
diff --git a/storage/src/tests/distributor/throttlingoperationstartertest.cpp b/storage/src/tests/distributor/throttlingoperationstartertest.cpp
new file mode 100644
index 00000000000..5c4ba99563c
--- /dev/null
+++ b/storage/src/tests/distributor/throttlingoperationstartertest.cpp
@@ -0,0 +1,142 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <string>
+#include <sstream>
+#include <memory>
+#include <vespa/storage/distributor/throttlingoperationstarter.h>
+#include <tests/distributor/maintenancemocks.h>
+
+namespace storage {
+
+namespace distributor {
+
+using document::BucketId;
+
+class ThrottlingOperationStarterTest : public CppUnit::TestFixture {
+ CPPUNIT_TEST_SUITE(ThrottlingOperationStarterTest);
+ CPPUNIT_TEST(testOperationNotThrottledWhenSlotAvailable);
+ CPPUNIT_TEST(testOperationStartingIsForwardedToImplementation);
+ CPPUNIT_TEST(testOperationThrottledWhenNoAvailableSlots);
+ CPPUNIT_TEST(testThrottlingWithMaxPendingRange);
+ CPPUNIT_TEST(testStartingOperationsFillsUpPendingWindow);
+ CPPUNIT_TEST(testFinishingOperationsAllowsMoreToStart);
+ CPPUNIT_TEST_SUITE_END();
+
+ std::shared_ptr<Operation> createMockOperation() {
+ return std::shared_ptr<Operation>(new MockOperation(BucketId(16, 1)));
+ }
+
+ std::unique_ptr<MockOperationStarter> _starterImpl;
+ std::unique_ptr<ThrottlingOperationStarter> _operationStarter;
+
+public:
+ void testOperationNotThrottledWhenSlotAvailable();
+ void testOperationStartingIsForwardedToImplementation();
+ void testOperationThrottledWhenNoAvailableSlots();
+ void testThrottlingWithMaxPendingRange();
+ void testStartingOperationsFillsUpPendingWindow();
+ void testFinishingOperationsAllowsMoreToStart();
+
+ void setUp();
+ void tearDown();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(ThrottlingOperationStarterTest);
+
+void
+ThrottlingOperationStarterTest::setUp()
+{
+ _starterImpl.reset(new MockOperationStarter());
+ _operationStarter.reset(new ThrottlingOperationStarter(*_starterImpl));
+}
+
+void
+ThrottlingOperationStarterTest::tearDown()
+{
+ // Must clear before _operationStarter goes out of scope, or operation
+ // destructors will try to call method on destroyed object.
+ _starterImpl->getOperations().clear();
+}
+
+void
+ThrottlingOperationStarterTest::testOperationNotThrottledWhenSlotAvailable()
+{
+ CPPUNIT_ASSERT(_operationStarter->start(createMockOperation(),
+ OperationStarter::Priority(0)));
+}
+
+void
+ThrottlingOperationStarterTest::testOperationStartingIsForwardedToImplementation()
+{
+ CPPUNIT_ASSERT(_operationStarter->start(createMockOperation(),
+ OperationStarter::Priority(0)));
+ CPPUNIT_ASSERT_EQUAL(std::string("BucketId(0x4000000000000001), pri 0\n"),
+ _starterImpl->toString());
+}
+
+void
+ThrottlingOperationStarterTest::testOperationThrottledWhenNoAvailableSlots()
+{
+ _operationStarter->setMaxPendingRange(0, 0);
+ CPPUNIT_ASSERT(!_operationStarter->start(createMockOperation(),
+ OperationStarter::Priority(0)));
+}
+
+void
+ThrottlingOperationStarterTest::testThrottlingWithMaxPendingRange()
+{
+ _operationStarter->setMaxPendingRange(0, 1);
+ CPPUNIT_ASSERT(!_operationStarter->canStart(0, OperationStarter::Priority(255)));
+ CPPUNIT_ASSERT(_operationStarter->canStart(0, OperationStarter::Priority(0)));
+
+ _operationStarter->setMaxPendingRange(1, 1);
+ CPPUNIT_ASSERT(_operationStarter->canStart(0, OperationStarter::Priority(255)));
+ CPPUNIT_ASSERT(_operationStarter->canStart(0, OperationStarter::Priority(0)));
+
+ _operationStarter->setMaxPendingRange(1, 3);
+ CPPUNIT_ASSERT(!_operationStarter->canStart(1, OperationStarter::Priority(255)));
+ CPPUNIT_ASSERT(_operationStarter->canStart(1, OperationStarter::Priority(100)));
+ CPPUNIT_ASSERT(_operationStarter->canStart(1, OperationStarter::Priority(0)));
+ CPPUNIT_ASSERT(_operationStarter->canStart(2, OperationStarter::Priority(0)));
+ CPPUNIT_ASSERT(!_operationStarter->canStart(3, OperationStarter::Priority(0)));
+ CPPUNIT_ASSERT(!_operationStarter->canStart(4, OperationStarter::Priority(0)));
+}
+
+void
+ThrottlingOperationStarterTest::testStartingOperationsFillsUpPendingWindow()
+{
+ _operationStarter->setMaxPendingRange(1, 3);
+ CPPUNIT_ASSERT(_operationStarter->start(createMockOperation(),
+ OperationStarter::Priority(255)));
+ CPPUNIT_ASSERT(!_operationStarter->start(createMockOperation(),
+ OperationStarter::Priority(255)));
+ CPPUNIT_ASSERT(_operationStarter->start(createMockOperation(),
+ OperationStarter::Priority(100)));
+ CPPUNIT_ASSERT(!_operationStarter->start(createMockOperation(),
+ OperationStarter::Priority(100)));
+ CPPUNIT_ASSERT(_operationStarter->start(createMockOperation(),
+ OperationStarter::Priority(0)));
+ CPPUNIT_ASSERT(!_operationStarter->start(createMockOperation(),
+ OperationStarter::Priority(0)));
+}
+
+void
+ThrottlingOperationStarterTest::testFinishingOperationsAllowsMoreToStart()
+{
+ _operationStarter->setMaxPendingRange(1, 1);
+ CPPUNIT_ASSERT(_operationStarter->start(createMockOperation(),
+ OperationStarter::Priority(255)));
+ CPPUNIT_ASSERT(!_operationStarter->start(createMockOperation(),
+ OperationStarter::Priority(255)));
+ CPPUNIT_ASSERT(!_starterImpl->getOperations().empty());
+
+ _starterImpl->getOperations().pop_back();
+
+ CPPUNIT_ASSERT(_operationStarter->start(createMockOperation(),
+ OperationStarter::Priority(255)));
+ CPPUNIT_ASSERT(!_starterImpl->getOperations().empty());
+}
+
+}
+}
diff --git a/storage/src/tests/distributor/twophaseupdateoperationtest.cpp b/storage/src/tests/distributor/twophaseupdateoperationtest.cpp
new file mode 100644
index 00000000000..f6346c9755f
--- /dev/null
+++ b/storage/src/tests/distributor/twophaseupdateoperationtest.cpp
@@ -0,0 +1,1194 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/config/helper/configgetter.h>
+#include <cppunit/extensions/HelperMacros.h>
+#include <vespa/document/config/config-documenttypes.h>
+#include <vespa/document/repo/documenttyperepo.h>
+#include <vespa/document/base/testdocrepo.h>
+#include <vespa/document/update/arithmeticvalueupdate.h>
+#include <iomanip>
+#include <iostream>
+#include <memory>
+#include <tests/common/dummystoragelink.h>
+#include <vespa/storage/distributor/externaloperationhandler.h>
+#include <vespa/storageapi/message/persistence.h>
+#include <vespa/storage/distributor/operations/external/twophaseupdateoperation.h>
+#include <vespa/storageapi/message/state.h>
+#include <vespa/storageapi/message/batch.h>
+#include <vespa/storageapi/message/bucket.h>
+#include <tests/distributor/distributortestutil.h>
+#include <tests/distributor/messagesenderstub.h>
+
+namespace storage {
+namespace distributor {
+
+using std::shared_ptr;
+using config::ConfigGetter;
+using document::DocumenttypesConfig;
+using config::FileSpec;
+using namespace document;
+using namespace storage;
+using namespace storage::distributor;
+using namespace storage::api;
+using namespace storage::lib;
+
+using namespace std::literals::string_literals;
+
+class TwoPhaseUpdateOperationTest : public CppUnit::TestFixture,
+ public DistributorTestUtil
+{
+ CPPUNIT_TEST_SUITE(TwoPhaseUpdateOperationTest);
+ CPPUNIT_TEST(testSimple);
+ CPPUNIT_TEST(testNonExisting);
+ CPPUNIT_TEST(testUpdateFailed);
+ CPPUNIT_TEST(testFastPathInconsistentTimestamps);
+ CPPUNIT_TEST(testFastPathInconsistentTimestampsNotFound);
+ CPPUNIT_TEST(testFastPathInconsistentTimestampsUpdateError);
+ CPPUNIT_TEST(testFastPathInconsistentTimestampsGetError);
+ CPPUNIT_TEST(testFastPathInconsistentTimestampsPutError);
+ CPPUNIT_TEST(testFastPathInconsistentTimestampsPutNotStarted);
+ CPPUNIT_TEST(testFastPathInconsistentTimestampsInconsistentSplit);
+ CPPUNIT_TEST(testFastPathPropagatesMessageSettingsToUpdate);
+ CPPUNIT_TEST(testNofM);
+ CPPUNIT_TEST(testSafePathUpdatesNewestReceivedDocument);
+ CPPUNIT_TEST(testCreateIfNonExistentCreatesDocumentIfAllEmptyGets);
+ CPPUNIT_TEST(testUpdateFailsIfSafePathHasFailedPut);
+ CPPUNIT_TEST(testUpdateFailsIfSafePathGetsFail);
+ CPPUNIT_TEST(testUpdateFailsIfApplyThrowsException);
+ CPPUNIT_TEST(testNonExistingWithAutoCreate);
+ CPPUNIT_TEST(testSafePathFailsUpdateWhenMismatchingTimestampConstraint);
+ CPPUNIT_TEST(testSafePathUpdatePropagatesMessageSettingsToGetsAndPuts);
+ CPPUNIT_TEST(testSafePathPropagatesMbusTracesFromReplies);
+ CPPUNIT_TEST(testUpdateFailsIfOwnershipChangesBetweenGetAndPut);
+ CPPUNIT_TEST(testSafePathConditionMismatchFailsWithTasError);
+ CPPUNIT_TEST(testSafePathConditionMatchSendsPutsWithUpdatedDoc);
+ CPPUNIT_TEST(testSafePathConditionParseFailureFailsWithIllegalParamsError);
+ CPPUNIT_TEST(testSafePathConditonUnknownDocTypeFailsWithIllegalParamsError);
+ CPPUNIT_TEST(testSafePathConditionWithMissingDocFailsWithTasError);
+ CPPUNIT_TEST(testFastPathCloseEdgeSendsCorrectReply);
+ CPPUNIT_TEST(testSafePathCloseEdgeSendsCorrectReply);
+ CPPUNIT_TEST_SUITE_END();
+
+ document::TestDocRepo _testRepo;
+ DocumentTypeRepo::SP _repo;
+ const DocumentType* _doc_type;
+
+protected:
+ void testSimple();
+ void testNonExisting();
+ void testUpdateFailed();
+ void testFastPathInconsistentTimestamps();
+ void testFastPathInconsistentTimestampsNotFound();
+ void testFastPathInconsistentTimestampsUpdateError();
+ void testFastPathInconsistentTimestampsGetError();
+ void testFastPathInconsistentTimestampsPutError();
+ void testFastPathInconsistentTimestampsPutNotStarted();
+ void testFastPathInconsistentTimestampsInconsistentSplit();
+ void testFastPathPropagatesMessageSettingsToUpdate();
+ void testNofM();
+ void testSafePathUpdatesNewestReceivedDocument();
+ void testCreateIfNonExistentCreatesDocumentIfAllEmptyGets();
+ void testUpdateFailsIfSafePathHasFailedPut();
+ void testUpdateFailsIfSafePathGetsFail();
+ void testUpdateFailsIfApplyThrowsException();
+ void testNonExistingWithAutoCreate();
+ void testSafePathFailsUpdateWhenMismatchingTimestampConstraint();
+ void testSafePathUpdatePropagatesMessageSettingsToGetsAndPuts();
+ void testSafePathPropagatesMbusTracesFromReplies();
+ void testUpdateFailsIfOwnershipChangesBetweenGetAndPut();
+ void testSafePathConditionMismatchFailsWithTasError();
+ void testSafePathConditionMatchSendsPutsWithUpdatedDoc();
+ void testSafePathConditionParseFailureFailsWithIllegalParamsError();
+ void testSafePathConditonUnknownDocTypeFailsWithIllegalParamsError();
+ void testSafePathConditionWithMissingDocFailsWithTasError();
+ void testFastPathCloseEdgeSendsCorrectReply();
+ void testSafePathCloseEdgeSendsCorrectReply();
+
+ void checkMessageSettingsPropagatedTo(
+ const api::StorageCommand::SP& msg) const;
+
+ std::string getUpdatedValueFromLastPut(MessageSenderStub&);
+public:
+ void setUp() {
+ _repo = _testRepo.getTypeRepoSp();
+ _doc_type = _repo->getDocumentType("testdoctype1");
+ createLinks();
+ setTypeRepo(_repo);
+ getClock().setAbsoluteTimeInSeconds(200);
+ }
+
+ void tearDown() {
+ close();
+ }
+
+ void replyToMessage(Operation& callback,
+ MessageSenderStub& sender,
+ uint32_t index,
+ uint64_t oldTimestamp,
+ api::ReturnCode::Result result = api::ReturnCode::OK);
+
+ void replyToPut(
+ Operation& callback,
+ MessageSenderStub& sender,
+ uint32_t index,
+ api::ReturnCode::Result result = api::ReturnCode::OK,
+ const std::string& traceMsg = "");
+
+ void replyToCreateBucket(
+ Operation& callback,
+ MessageSenderStub& sender,
+ uint32_t index,
+ api::ReturnCode::Result result = api::ReturnCode::OK);
+
+ void replyToGet(
+ Operation& callback,
+ MessageSenderStub& sender,
+ uint32_t index,
+ uint64_t oldTimestamp,
+ bool haveDocument = true,
+ api::ReturnCode::Result result = api::ReturnCode::OK,
+ const std::string& traceMsg = "");
+
+ struct UpdateOptions {
+ bool _makeInconsistentSplit;
+ bool _createIfNonExistent;
+ bool _withError;
+ api::Timestamp _timestampToUpdate;
+ documentapi::TestAndSetCondition _condition;
+
+ UpdateOptions()
+ : _makeInconsistentSplit(false),
+ _createIfNonExistent(false),
+ _withError(false),
+ _timestampToUpdate(0),
+ _condition()
+ {
+ }
+
+ UpdateOptions& makeInconsistentSplit(bool mis) {
+ _makeInconsistentSplit = mis;
+ return *this;
+ }
+ UpdateOptions& createIfNonExistent(bool cine) {
+ _createIfNonExistent = cine;
+ return *this;
+ }
+ UpdateOptions& withError(bool error = true) {
+ _withError = error;
+ return *this;
+ }
+ UpdateOptions& timestampToUpdate(api::Timestamp ts) {
+ _timestampToUpdate = ts;
+ return *this;
+ }
+ UpdateOptions& condition(vespalib::stringref cond) {
+ _condition = documentapi::TestAndSetCondition(cond);
+ return *this;
+ }
+ };
+
+ std::shared_ptr<TwoPhaseUpdateOperation>
+ sendUpdate(const std::string& bucketState,
+ const UpdateOptions& options = UpdateOptions());
+
+ void assertAbortedUpdateReplyWithContextPresent(
+ const MessageSenderStub& closeSender) const;
+
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(TwoPhaseUpdateOperationTest);
+
+void
+TwoPhaseUpdateOperationTest::replyToMessage(
+ Operation& callback,
+ MessageSenderStub& sender,
+ uint32_t index,
+ uint64_t oldTimestamp,
+ api::ReturnCode::Result result)
+{
+ std::shared_ptr<api::StorageMessage> msg2 = sender.commands.at(index);
+ UpdateCommand& updatec = dynamic_cast<UpdateCommand&>(*msg2);
+ std::unique_ptr<api::StorageReply> reply(updatec.makeReply());
+ static_cast<api::UpdateReply*>(reply.get())->setOldTimestamp(oldTimestamp);
+ reply->setResult(api::ReturnCode(result, ""));
+
+ callback.receive(sender,
+ std::shared_ptr<StorageReply>(reply.release()));
+}
+
+void
+TwoPhaseUpdateOperationTest::replyToPut(
+ Operation& callback,
+ MessageSenderStub& sender,
+ uint32_t index,
+ api::ReturnCode::Result result,
+ const std::string& traceMsg)
+{
+ std::shared_ptr<api::StorageMessage> msg2 = sender.commands.at(index);
+ PutCommand& putc = dynamic_cast<PutCommand&>(*msg2);
+ std::unique_ptr<api::StorageReply> reply(putc.makeReply());
+ reply->setResult(api::ReturnCode(result, ""));
+ if (!traceMsg.empty()) {
+ MBUS_TRACE(reply->getTrace(), 1, traceMsg);
+ }
+ callback.receive(sender,
+ std::shared_ptr<StorageReply>(reply.release()));
+}
+
+void
+TwoPhaseUpdateOperationTest::replyToCreateBucket(
+ Operation& callback,
+ MessageSenderStub& sender,
+ uint32_t index,
+ api::ReturnCode::Result result)
+{
+ std::shared_ptr<api::StorageMessage> msg2 = sender.commands.at(index);
+ CreateBucketCommand& putc = dynamic_cast<CreateBucketCommand&>(*msg2);
+ std::unique_ptr<api::StorageReply> reply(putc.makeReply());
+ reply->setResult(api::ReturnCode(result, ""));
+ callback.receive(sender,
+ std::shared_ptr<StorageReply>(reply.release()));
+}
+
+void
+TwoPhaseUpdateOperationTest::replyToGet(
+ Operation& callback,
+ MessageSenderStub& sender,
+ uint32_t index,
+ uint64_t oldTimestamp,
+ bool haveDocument,
+ api::ReturnCode::Result result,
+ const std::string& traceMsg)
+{
+ const api::GetCommand& get(
+ static_cast<const api::GetCommand&>(*sender.commands.at(index)));
+ std::shared_ptr<api::StorageReply> reply;
+
+ if (haveDocument) {
+ auto doc(std::make_shared<Document>(
+ *_doc_type, DocumentId(DocIdString("test", "test"))));
+ doc->setValue("headerval", IntFieldValue(oldTimestamp));
+
+ reply = std::make_shared<api::GetReply>(get, doc, oldTimestamp);
+ } else {
+ reply = std::make_shared<api::GetReply>(get, Document::SP(), 0);
+ }
+ reply->setResult(api::ReturnCode(result, ""));
+ if (!traceMsg.empty()) {
+ MBUS_TRACE(reply->getTrace(), 1, traceMsg);
+ }
+
+ callback.receive(sender, reply);
+}
+
+namespace {
+
+struct DummyTransportContext : api::TransportContext {
+ // No methods to implement.
+};
+
+}
+
+std::shared_ptr<TwoPhaseUpdateOperation>
+TwoPhaseUpdateOperationTest::sendUpdate(const std::string& bucketState,
+ const UpdateOptions& options)
+{
+ document::DocumentUpdate::SP update;
+ if (!options._withError) {
+ update = std::make_shared<document::DocumentUpdate>(
+ *_doc_type,
+ document::DocumentId(document::DocIdString("test", "test")));
+ document::FieldUpdate fup(_doc_type->getField("headerval"));
+ fup.addUpdate(ArithmeticValueUpdate(ArithmeticValueUpdate::Add, 10));
+ update->addUpdate(fup);
+ } else {
+ // Create an update to a different doctype than the one returned as
+ // part of the Get. Just a sneaky way to force an eval error.
+ auto* badDocType = _repo->getDocumentType("testdoctype2");
+ update = std::make_shared<document::DocumentUpdate>(
+ *badDocType,
+ document::DocumentId(document::DocIdString("test", "test")));
+ document::FieldUpdate fup(badDocType->getField("onlyinchild"));
+ fup.addUpdate(ArithmeticValueUpdate(ArithmeticValueUpdate::Add, 10));
+ update->addUpdate(fup);
+ }
+ update->setCreateIfNonExistent(options._createIfNonExistent);
+
+ document::BucketId id = getExternalOperationHandler().getBucketId(update->getId());
+ document::BucketId id2 = document::BucketId(id.getUsedBits() + 1, id.getRawId());
+
+ if (bucketState.length()) {
+ addNodesToBucketDB(id, bucketState);
+ }
+
+ if (options._makeInconsistentSplit) {
+ addNodesToBucketDB(id2, bucketState);
+ }
+
+ auto msg(std::make_shared<api::UpdateCommand>(
+ document::BucketId(0), update, api::Timestamp(0)));
+ // Misc settings for checking that propagation works.
+ msg->getTrace().setLevel(6);
+ msg->setTimeout(6789);
+ msg->setPriority(99);
+ if (options._timestampToUpdate) {
+ msg->setOldTimestamp(options._timestampToUpdate);
+ }
+ msg->setCondition(options._condition);
+ msg->setTransportContext(std::make_unique<DummyTransportContext>());
+
+ ExternalOperationHandler& handler = getExternalOperationHandler();
+ return std::make_shared<TwoPhaseUpdateOperation>(
+ handler, msg, getDistributor().getMetrics());
+}
+
+
+void
+TwoPhaseUpdateOperationTest::testSimple()
+{
+ setupDistributor(1, 1, "storage:1 distributor:1");
+
+ std::shared_ptr<TwoPhaseUpdateOperation> cb(sendUpdate("0=1/2/3"));
+ MessageSenderStub sender;
+ cb->start(sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("Update => 0"),
+ sender.getCommands(true));
+
+ replyToMessage(*cb, sender, 0, 90);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
+ "timestamp 0, timestamp of updated doc: 90) ReturnCode(NONE)"),
+ sender.getLastReply(true));
+}
+
+void
+TwoPhaseUpdateOperationTest::testNonExisting()
+{
+ setupDistributor(1, 1, "storage:1 distributor:1");
+
+ std::shared_ptr<TwoPhaseUpdateOperation> cb(sendUpdate(""));
+ MessageSenderStub sender;
+ cb->start(sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
+ "timestamp 0, timestamp of updated doc: 0) ReturnCode(NONE)"),
+ sender.getLastReply(true));
+}
+
+void
+TwoPhaseUpdateOperationTest::testUpdateFailed()
+{
+ setupDistributor(1, 1, "storage:1 distributor:1");
+
+ std::shared_ptr<TwoPhaseUpdateOperation> cb(sendUpdate("0=1/2/3"));
+ MessageSenderStub sender;
+ cb->start(sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("Update => 0"),
+ sender.getCommands(true));
+
+ replyToMessage(*cb, sender, 0, 90, api::ReturnCode::INTERNAL_FAILURE);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
+ "timestamp 0, timestamp of updated doc: 0) "
+ "ReturnCode(INTERNAL_FAILURE)"),
+ sender.getLastReply(true));
+}
+
+void
+TwoPhaseUpdateOperationTest::testFastPathInconsistentTimestamps()
+{
+ setupDistributor(2, 2, "storage:2 distributor:1");
+
+ std::shared_ptr<TwoPhaseUpdateOperation> cb(sendUpdate("0=1/2/3,1=1/2/3"));
+ MessageSenderStub sender;
+ cb->start(sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("Update => 0,Update => 1"),
+ sender.getCommands(true));
+
+ replyToMessage(*cb, sender, 0, 90);
+ replyToMessage(*cb, sender, 1, 110);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("Get(BucketId(0x4000000000008b13), doc:test:test) => 1"),
+ sender.getLastCommand(true));
+
+ replyToGet(*cb, sender, 2, 110);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("Update => 0,Update => 1,Get => 1,Put => 1,Put => 0"),
+ sender.getCommands(true));
+
+ CPPUNIT_ASSERT(sender.replies.empty());
+
+ replyToPut(*cb, sender, 3);
+ replyToPut(*cb, sender, 4);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
+ "timestamp 0, timestamp of updated doc: 110 Was inconsistent "
+ "(best node 1)) ReturnCode(NONE)"),
+ sender.getLastReply(true));
+}
+
+void
+TwoPhaseUpdateOperationTest::testFastPathInconsistentTimestampsNotFound()
+{
+ setupDistributor(2, 2, "storage:2 distributor:1");
+
+ std::shared_ptr<TwoPhaseUpdateOperation> cb(sendUpdate("0=1/2/3,1=1/2/3"));
+ MessageSenderStub sender;
+ cb->start(sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("Update => 0,Update => 1"),
+ sender.getCommands(true));
+
+ replyToMessage(*cb, sender, 0, 90);
+ replyToMessage(*cb, sender, 1, 110);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("Get(BucketId(0x4000000000008b13), doc:test:test) => 1"),
+ sender.getLastCommand(true));
+ CPPUNIT_ASSERT(sender.replies.empty());
+
+ replyToGet(*cb, sender, 2, 110, false);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
+ "timestamp 0, timestamp of updated doc: 110 Was inconsistent "
+ "(best node 1)) ReturnCode(INTERNAL_FAILURE)"),
+ sender.getLastReply(true));
+}
+
+void
+TwoPhaseUpdateOperationTest::testFastPathInconsistentTimestampsUpdateError()
+{
+ setupDistributor(2, 2, "storage:2 distributor:1");
+
+ std::shared_ptr<TwoPhaseUpdateOperation> cb(sendUpdate("0=1/2/3,1=1/2/3"));
+ MessageSenderStub sender;
+ cb->start(sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("Update => 0,Update => 1"),
+ sender.getCommands(true));
+
+ replyToMessage(*cb, sender, 0, 90);
+ CPPUNIT_ASSERT(sender.replies.empty());
+ replyToMessage(*cb, sender, 1, 110, api::ReturnCode::IO_FAILURE);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
+ "timestamp 0, timestamp of updated doc: 90) "
+ "ReturnCode(IO_FAILURE)"),
+ sender.getLastReply(true));
+}
+
+void
+TwoPhaseUpdateOperationTest::testFastPathInconsistentTimestampsGetError()
+{
+ setupDistributor(2, 2, "storage:2 distributor:1");
+
+ std::shared_ptr<TwoPhaseUpdateOperation> cb(sendUpdate("0=1/2/3,1=1/2/3"));
+ MessageSenderStub sender;
+ cb->start(sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("Update => 0,Update => 1"),
+ sender.getCommands(true));
+
+ replyToMessage(*cb, sender, 0, 90);
+ replyToMessage(*cb, sender, 1, 110);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("Get(BucketId(0x4000000000008b13), doc:test:test) => 1"),
+ sender.getLastCommand(true));
+
+ CPPUNIT_ASSERT(sender.replies.empty());
+ replyToGet(*cb, sender, 2, 110, false, api::ReturnCode::IO_FAILURE);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
+ "timestamp 0, timestamp of updated doc: 110 Was inconsistent "
+ "(best node 1)) ReturnCode(IO_FAILURE)"),
+ sender.getLastReply(true));
+}
+
+void
+TwoPhaseUpdateOperationTest::testFastPathInconsistentTimestampsPutError()
+{
+ setupDistributor(2, 2, "storage:2 distributor:1");
+ std::shared_ptr<TwoPhaseUpdateOperation> cb(sendUpdate("0=1/2/3,1=1/2/3"));
+
+ MessageSenderStub sender;
+ cb->start(sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("Update => 0,Update => 1"),
+ sender.getCommands(true));
+
+ replyToMessage(*cb, sender, 0, 90);
+ replyToMessage(*cb, sender, 1, 110);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("Get(BucketId(0x4000000000008b13), doc:test:test) => 1"),
+ sender.getLastCommand(true));
+
+ replyToGet(*cb, sender, 2, 110);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("Update => 0,Update => 1,Get => 1,Put => 1,Put => 0"),
+ sender.getCommands(true));
+
+ replyToPut(*cb, sender, 3, api::ReturnCode::IO_FAILURE);
+ CPPUNIT_ASSERT(sender.replies.empty());
+ replyToPut(*cb, sender, 4);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
+ "timestamp 0, timestamp of updated doc: 110 Was inconsistent "
+ "(best node 1)) ReturnCode(IO_FAILURE)"),
+ sender.getLastReply(true));
+}
+
+void
+TwoPhaseUpdateOperationTest::testFastPathInconsistentTimestampsPutNotStarted()
+{
+ setupDistributor(2, 2, "storage:2 distributor:1");
+ std::shared_ptr<TwoPhaseUpdateOperation> cb(sendUpdate("0=1/2/3,1=1/2/3"));
+
+ MessageSenderStub sender;
+ cb->start(sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("Update => 0,Update => 1"),
+ sender.getCommands(true));
+
+ replyToMessage(*cb, sender, 0, 90);
+ replyToMessage(*cb, sender, 1, 110);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("Get(BucketId(0x4000000000008b13), doc:test:test) => 1"),
+ sender.getLastCommand(true));
+ checkMessageSettingsPropagatedTo(sender.commands.back());
+
+ _distributor->enableClusterState(lib::ClusterState("storage:0 distributor:1"));
+ CPPUNIT_ASSERT(sender.replies.empty());
+ replyToGet(*cb, sender, 2, 110);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
+ "timestamp 0, timestamp of updated doc: 110 Was inconsistent "
+ "(best node 1)) ReturnCode(NOT_CONNECTED, "
+ "Can't store document: No storage nodes available)"),
+ sender.getLastReply(true));
+}
+
+
+void
+TwoPhaseUpdateOperationTest::testFastPathInconsistentTimestampsInconsistentSplit()
+{
+ setupDistributor(2, 2, "storage:2 distributor:1");
+
+ std::shared_ptr<TwoPhaseUpdateOperation> cb(
+ sendUpdate("0=1/2/3,1=1/2/3",
+ UpdateOptions().makeInconsistentSplit(true)));
+ MessageSenderStub sender;
+ cb->start(sender, framework::MilliSecTime(0));
+
+ std::string wanted("Get(BucketId(0x4000000000008b13), doc:test:test) => 0,"
+ "Get(BucketId(0x4400000000008b13), doc:test:test) => 0");
+
+ std::string text = sender.getCommands(true, true);
+ CPPUNIT_ASSERT_EQUAL(wanted, text);
+
+ replyToGet(*cb, sender, 0, 90);
+ replyToGet(*cb, sender, 1, 120);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string(
+ "Put(BucketId(0x4400000000008b13), doc:test:test, "
+ "timestamp 200000000, size 52) => 1,"
+ "Put(BucketId(0x4400000000008b13), doc:test:test, "
+ "timestamp 200000000, size 52) => 0"),
+ sender.getCommands(true, true, 2));
+
+ replyToPut(*cb, sender, 2);
+ CPPUNIT_ASSERT(sender.replies.empty());
+ replyToPut(*cb, sender, 3);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("UpdateReply(doc:test:test, "
+ "BucketId(0x0000000000000000), "
+ "timestamp 0, timestamp of updated doc: 120) "
+ "ReturnCode(NONE)"),
+ sender.getLastReply(true));
+}
+
+void
+TwoPhaseUpdateOperationTest::checkMessageSettingsPropagatedTo(
+ const api::StorageCommand::SP& msg) const
+{
+ // Settings set in sendUpdate().
+ CPPUNIT_ASSERT_EQUAL(uint32_t(6), msg->getTrace().getLevel());
+ CPPUNIT_ASSERT_EQUAL(uint32_t(6789), msg->getTimeout());
+ CPPUNIT_ASSERT_EQUAL(uint8_t(99), msg->getPriority());
+}
+
+void
+TwoPhaseUpdateOperationTest::testFastPathPropagatesMessageSettingsToUpdate()
+{
+ setupDistributor(1, 1, "storage:1 distributor:1");
+ std::shared_ptr<TwoPhaseUpdateOperation> cb(sendUpdate("0=1/2/3"));
+ MessageSenderStub sender;
+ cb->start(sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Update => 0"), sender.getCommands(true));
+
+ StorageCommand::SP msg(sender.commands.back());
+ checkMessageSettingsPropagatedTo(msg);
+}
+
+void
+TwoPhaseUpdateOperationTest::testNofM()
+{
+ setupDistributor(2, 2, "storage:2 distributor:1", 1);
+
+ std::shared_ptr<TwoPhaseUpdateOperation> cb(sendUpdate("0=1/2/3,1=1/2/3"));
+ MessageSenderStub sender;
+ cb->start(sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("Update => 0,Update => 1"),
+ sender.getCommands(true));
+
+ CPPUNIT_ASSERT(sender.replies.empty());
+ replyToMessage(*cb, sender, 0, 90);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
+ "timestamp 0, timestamp of updated doc: 90) ReturnCode(NONE)"),
+ sender.getLastReply(true));
+
+ replyToMessage(*cb, sender, 1, 123);
+}
+
+std::string
+TwoPhaseUpdateOperationTest::getUpdatedValueFromLastPut(
+ MessageSenderStub& sender)
+{
+ Document::SP doc(dynamic_cast<api::PutCommand&>(*sender.commands.back())
+ .getDocument());
+ FieldValue::UP value(doc->getValue("headerval"));
+ return value->toString();
+}
+
+void
+TwoPhaseUpdateOperationTest::testSafePathUpdatesNewestReceivedDocument()
+{
+ setupDistributor(3, 3, "storage:3 distributor:1");
+ // 0,1 in sync. 2 out of sync.
+ std::shared_ptr<TwoPhaseUpdateOperation> cb(
+ sendUpdate("0=1/2/3,1=1/2/3,2=2/3/4"));
+
+ MessageSenderStub sender;
+ cb->start(sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("Get(BucketId(0x4000000000008b13), doc:test:test) => 0,"
+ "Get(BucketId(0x4000000000008b13), doc:test:test) => 2"),
+ sender.getCommands(true, true));
+ replyToGet(*cb, sender, 0, 50);
+ replyToGet(*cb, sender, 1, 70);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string(
+ "Put(BucketId(0x4000000000008b13), doc:test:test, "
+ "timestamp 200000000, size 52) => 1,"
+ "Put(BucketId(0x4000000000008b13), doc:test:test, "
+ "timestamp 200000000, size 52) => 0,"
+ "Put(BucketId(0x4000000000008b13), doc:test:test, "
+ "timestamp 200000000, size 52) => 2"),
+ sender.getCommands(true, true, 2));
+ // Make sure Put contains an updated document (+10 arith. update on field
+ // whose value equals gotten timestamp). In this case we want 70 -> 80.
+ CPPUNIT_ASSERT_EQUAL(std::string("80"), getUpdatedValueFromLastPut(sender));
+
+ replyToPut(*cb, sender, 2);
+ replyToPut(*cb, sender, 3);
+ CPPUNIT_ASSERT(sender.replies.empty());
+ replyToPut(*cb, sender, 4);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("UpdateReply(doc:test:test, "
+ "BucketId(0x0000000000000000), "
+ "timestamp 0, timestamp of updated doc: 70) "
+ "ReturnCode(NONE)"),
+ sender.getLastReply(true));
+}
+
+void
+TwoPhaseUpdateOperationTest::testCreateIfNonExistentCreatesDocumentIfAllEmptyGets()
+{
+ setupDistributor(3, 3, "storage:3 distributor:1");
+ std::shared_ptr<TwoPhaseUpdateOperation> cb(
+ sendUpdate("0=1/2/3,1=1/2/3,2=2/3/4",
+ UpdateOptions().createIfNonExistent(true)));
+
+ MessageSenderStub sender;
+ cb->start(sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Get => 0,Get => 2"),
+ sender.getCommands(true));
+ replyToGet(*cb, sender, 0, 0, false);
+ replyToGet(*cb, sender, 1, 0, false);
+ // Since create-if-non-existent is set, distributor should create doc from
+ // scratch.
+ CPPUNIT_ASSERT_EQUAL(
+ std::string(
+ "Put(BucketId(0x4000000000008b13), doc:test:test, "
+ "timestamp 200000000, size 52) => 1,"
+ "Put(BucketId(0x4000000000008b13), doc:test:test, "
+ "timestamp 200000000, size 52) => 0,"
+ "Put(BucketId(0x4000000000008b13), doc:test:test, "
+ "timestamp 200000000, size 52) => 2"),
+ sender.getCommands(true, true, 2));
+
+ CPPUNIT_ASSERT_EQUAL(std::string("10"), getUpdatedValueFromLastPut(sender));
+
+ replyToPut(*cb, sender, 2);
+ replyToPut(*cb, sender, 3);
+ CPPUNIT_ASSERT(sender.replies.empty());
+ replyToPut(*cb, sender, 4);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("UpdateReply(doc:test:test, "
+ "BucketId(0x0000000000000000), "
+ "timestamp 0, timestamp of updated doc: 200000000) "
+ "ReturnCode(NONE)"),
+ sender.getLastReply(true));
+}
+
+void
+TwoPhaseUpdateOperationTest::testUpdateFailsIfSafePathHasFailedPut()
+{
+ setupDistributor(3, 3, "storage:3 distributor:1");
+ std::shared_ptr<TwoPhaseUpdateOperation> cb(
+ sendUpdate("0=1/2/3,1=1/2/3,2=2/3/4",
+ UpdateOptions().createIfNonExistent(true)));
+
+ MessageSenderStub sender;
+ cb->start(sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Get => 0,Get => 2"),
+ sender.getCommands(true));
+ replyToGet(*cb, sender, 0, 0, false);
+ replyToGet(*cb, sender, 1, 0, false);
+ // Since create-if-non-existent is set, distributor should create doc from
+ // scratch.
+ CPPUNIT_ASSERT_EQUAL(std::string("Put => 1,Put => 0,Put => 2"),
+ sender.getCommands(true, false, 2));
+
+ replyToPut(*cb, sender, 2);
+ replyToPut(*cb, sender, 3);
+ CPPUNIT_ASSERT(sender.replies.empty());
+ replyToPut(*cb, sender, 4, api::ReturnCode::IO_FAILURE);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("UpdateReply(doc:test:test, "
+ "BucketId(0x0000000000000000), "
+ "timestamp 0, timestamp of updated doc: 200000000) "
+ "ReturnCode(IO_FAILURE)"),
+ sender.getLastReply(true));
+}
+
+void
+TwoPhaseUpdateOperationTest::testUpdateFailsIfSafePathGetsFail()
+{
+ setupDistributor(2, 2, "storage:2 distributor:1");
+ std::shared_ptr<TwoPhaseUpdateOperation> cb(
+ sendUpdate("0=1/2/3,1=2/3/4",
+ UpdateOptions().createIfNonExistent(true)));
+
+ MessageSenderStub sender;
+ cb->start(sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Get => 0,Get => 1"),
+ sender.getCommands(true));
+ replyToGet(*cb, sender, 0, 0, false, api::ReturnCode::IO_FAILURE);
+ CPPUNIT_ASSERT(sender.replies.empty());
+ replyToGet(*cb, sender, 1, 0, false, api::ReturnCode::IO_FAILURE);
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("UpdateReply(doc:test:test, "
+ "BucketId(0x0000000000000000), "
+ "timestamp 0, timestamp of updated doc: 0) "
+ "ReturnCode(IO_FAILURE)"),
+ sender.getLastReply(true));
+}
+
+void
+TwoPhaseUpdateOperationTest::testUpdateFailsIfApplyThrowsException()
+{
+ setupDistributor(2, 2, "storage:2 distributor:1");
+ // Create update for wrong doctype which will fail the update.
+ std::shared_ptr<TwoPhaseUpdateOperation> cb(
+ sendUpdate("0=1/2/3,1=2/3/4", UpdateOptions().withError()));
+
+ MessageSenderStub sender;
+ cb->start(sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Get => 0,Get => 1"),
+ sender.getCommands(true));
+ replyToGet(*cb, sender, 0, 50);
+ CPPUNIT_ASSERT(sender.replies.empty());
+ replyToGet(*cb, sender, 1, 70);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("UpdateReply(doc:test:test, "
+ "BucketId(0x0000000000000000), "
+ "timestamp 0, timestamp of updated doc: 70) "
+ "ReturnCode(INTERNAL_FAILURE, Can not apply a "
+ "\"testdoctype2\" document update to a "
+ "\"testdoctype1\" document.)"),
+ sender.getLastReply(true));
+}
+
+void
+TwoPhaseUpdateOperationTest::testNonExistingWithAutoCreate()
+{
+ setupDistributor(1, 1, "storage:1 distributor:1");
+
+ std::shared_ptr<TwoPhaseUpdateOperation> cb(
+ sendUpdate("", UpdateOptions().createIfNonExistent(true)));
+ MessageSenderStub sender;
+ cb->start(sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string(
+ "CreateBucketCommand(BucketId(0x4000000000008b13), active) "
+ "Reasons to start: => 0,"
+ "Put(BucketId(0x4000000000008b13), doc:test:test, "
+ "timestamp 200000000, size 52) => 0"),
+ sender.getCommands(true, true));
+
+ CPPUNIT_ASSERT_EQUAL(std::string("10"), getUpdatedValueFromLastPut(sender));
+
+ replyToCreateBucket(*cb, sender, 0);
+ CPPUNIT_ASSERT(sender.replies.empty());
+ replyToPut(*cb, sender, 1);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("UpdateReply(doc:test:test, "
+ "BucketId(0x0000000000000000), "
+ "timestamp 0, timestamp of updated doc: 200000000) "
+ "ReturnCode(NONE)"),
+ sender.getLastReply(true));
+}
+
+void
+TwoPhaseUpdateOperationTest::testSafePathFailsUpdateWhenMismatchingTimestampConstraint()
+{
+ setupDistributor(2, 2, "storage:2 distributor:1");
+ std::shared_ptr<TwoPhaseUpdateOperation> cb(
+ sendUpdate("0=1/2/3,1=2/3/4",
+ UpdateOptions().timestampToUpdate(1234)));
+
+ MessageSenderStub sender;
+ cb->start(sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Get => 0,Get => 1"),
+ sender.getCommands(true));
+ replyToGet(*cb, sender, 0, 100);
+ CPPUNIT_ASSERT(sender.replies.empty());
+ replyToGet(*cb, sender, 1, 110);
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("UpdateReply(doc:test:test, "
+ "BucketId(0x0000000000000000), "
+ "timestamp 0, timestamp of updated doc: 0) "
+ "ReturnCode(NONE, No document with requested "
+ "timestamp found)"),
+ sender.getLastReply(true));
+}
+
+void
+TwoPhaseUpdateOperationTest::testSafePathUpdatePropagatesMessageSettingsToGetsAndPuts()
+{
+ setupDistributor(3, 3, "storage:3 distributor:1");
+ std::shared_ptr<TwoPhaseUpdateOperation> cb(
+ sendUpdate("0=1/2/3,1=1/2/3,2=2/3/4"));
+ MessageSenderStub sender;
+ cb->start(sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Get => 0,Get => 2"),
+ sender.getCommands(true));
+ checkMessageSettingsPropagatedTo(sender.commands.at(0));
+ checkMessageSettingsPropagatedTo(sender.commands.at(1));
+ replyToGet(*cb, sender, 0, 50);
+ replyToGet(*cb, sender, 1, 70);
+ CPPUNIT_ASSERT_EQUAL(std::string("Put => 1,Put => 0,Put => 2"),
+ sender.getCommands(true, false, 2));
+ checkMessageSettingsPropagatedTo(sender.commands.at(2));
+ checkMessageSettingsPropagatedTo(sender.commands.at(3));
+ checkMessageSettingsPropagatedTo(sender.commands.at(4));
+ replyToPut(*cb, sender, 2);
+ replyToPut(*cb, sender, 3);
+ replyToPut(*cb, sender, 4);
+}
+
+void
+TwoPhaseUpdateOperationTest::testSafePathPropagatesMbusTracesFromReplies()
+{
+ setupDistributor(3, 3, "storage:3 distributor:1");
+ std::shared_ptr<TwoPhaseUpdateOperation> cb(
+ sendUpdate("0=1/2/3,1=1/2/3,2=2/3/4"));
+ MessageSenderStub sender;
+ cb->start(sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Get => 0,Get => 2"),
+ sender.getCommands(true));
+ replyToGet(*cb, sender, 0, 50, true,
+ api::ReturnCode::OK, "hello earthlings");
+ replyToGet(*cb, sender, 1, 70);
+ CPPUNIT_ASSERT_EQUAL(std::string("Put => 1,Put => 0,Put => 2"),
+ sender.getCommands(true, false, 2));
+ replyToPut(*cb, sender, 2, api::ReturnCode::OK, "fooo");
+ replyToPut(*cb, sender, 3, api::ReturnCode::OK, "baaa");
+ CPPUNIT_ASSERT(sender.replies.empty());
+ replyToPut(*cb, sender, 4);
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Update Reply"),
+ sender.getLastReply(false));
+
+ std::string trace(sender.replies.back()->getTrace().toString());
+ //std::cout << "\n\n" << trace << "\n\n";
+ CPPUNIT_ASSERT(trace.find("hello earthlings") != std::string::npos);
+ CPPUNIT_ASSERT(trace.find("fooo") != std::string::npos);
+ CPPUNIT_ASSERT(trace.find("baaa") != std::string::npos);
+}
+
+void
+TwoPhaseUpdateOperationTest::testUpdateFailsIfOwnershipChangesBetweenGetAndPut()
+{
+ setupDistributor(2, 2, "storage:2 distributor:1");
+
+ // Update towards inconsistent bucket invokes safe path.
+ std::shared_ptr<TwoPhaseUpdateOperation> cb(
+ sendUpdate("0=1/2/3,1=2/3/4"));
+ MessageSenderStub sender;
+ cb->start(sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Get => 0,Get => 1"),
+ sender.getCommands(true));
+
+ // Alter cluster state so that distributor is now down (technically the
+ // entire cluster is down in this state, but this should not matter). In
+ // this new state, the distributor no longer owns the bucket in question
+ // and the operation should thus be failed. We must not try to send Puts
+ // to a bucket we no longer own.
+ _distributor->enableClusterState(
+ lib::ClusterState("storage:2 distributor:1 .0.s:d"));
+ getBucketDatabase().clear();
+ replyToGet(*cb, sender, 0, 70);
+ replyToGet(*cb, sender, 1, 70);
+
+ // BUCKET_NOT_FOUND is a transient error code which should cause the client
+ // to re-send the operation, presumably to the correct distributor the next
+ // time.
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("UpdateReply(doc:test:test, "
+ "BucketId(0x0000000000000000), "
+ "timestamp 0, timestamp of updated doc: 70) "
+ "ReturnCode(BUCKET_NOT_FOUND, Distributor lost "
+ "ownership of bucket between executing the read "
+ "and write phases of a two-phase update operation)"),
+ sender.getLastReply(true));
+}
+
+void
+TwoPhaseUpdateOperationTest::testSafePathConditionMismatchFailsWithTasError()
+{
+ setupDistributor(2, 2, "storage:2 distributor:1");
+ std::shared_ptr<TwoPhaseUpdateOperation> cb(
+ sendUpdate("0=1/2/3,1=2/3/4", UpdateOptions().condition(
+ "testdoctype1.headerval==120")));
+
+ MessageSenderStub sender;
+ cb->start(sender, framework::MilliSecTime(0));
+ // Newest doc has headerval==110, not 120.
+ replyToGet(*cb, sender, 0, 100);
+ replyToGet(*cb, sender, 1, 110);
+ CPPUNIT_ASSERT_EQUAL(
+ "UpdateReply(doc:test:test, "
+ "BucketId(0x0000000000000000), "
+ "timestamp 0, timestamp of updated doc: 0) "
+ "ReturnCode(TEST_AND_SET_CONDITION_FAILED, "
+ "Condition did not match document)"s,
+ sender.getLastReply(true));
+}
+
+void
+TwoPhaseUpdateOperationTest::testSafePathConditionMatchSendsPutsWithUpdatedDoc()
+{
+ setupDistributor(2, 2, "storage:2 distributor:1");
+ std::shared_ptr<TwoPhaseUpdateOperation> cb(
+ sendUpdate("0=1/2/3,1=2/3/4", UpdateOptions().condition(
+ "testdoctype1.headerval==110")));
+
+ MessageSenderStub sender;
+ cb->start(sender, framework::MilliSecTime(0));
+ replyToGet(*cb, sender, 0, 100);
+ replyToGet(*cb, sender, 1, 110);
+ CPPUNIT_ASSERT_EQUAL("Put => 1,Put => 0"s,
+ sender.getCommands(true, false, 2));
+}
+
+void
+TwoPhaseUpdateOperationTest::testSafePathConditionParseFailureFailsWithIllegalParamsError()
+{
+ setupDistributor(2, 2, "storage:2 distributor:1");
+ std::shared_ptr<TwoPhaseUpdateOperation> cb(
+ sendUpdate("0=1/2/3,1=2/3/4", UpdateOptions().condition(
+ "testdoctype1.san==fran...cisco")));
+
+ MessageSenderStub sender;
+ cb->start(sender, framework::MilliSecTime(0));
+ replyToGet(*cb, sender, 0, 100);
+ replyToGet(*cb, sender, 1, 110);
+ // NOTE: condition is currently not attempted parsed until Gets have been
+ // replied to. This may change in the future.
+ // XXX reliance on parser/exception error message is very fragile.
+ CPPUNIT_ASSERT_EQUAL(
+ "UpdateReply(doc:test:test, "
+ "BucketId(0x0000000000000000), "
+ "timestamp 0, timestamp of updated doc: 0) "
+ "ReturnCode(ILLEGAL_PARAMETERS, "
+ "Failed to parse test and set condition: "
+ "Unexpected token at position 16 "
+ "('==fran...c') in query 'testdoctype1."
+ "san==fran...cisco',)"s,
+ sender.getLastReply(true));
+}
+
+void
+TwoPhaseUpdateOperationTest::testSafePathConditonUnknownDocTypeFailsWithIllegalParamsError()
+{
+ setupDistributor(2, 2, "storage:2 distributor:1");
+ std::shared_ptr<TwoPhaseUpdateOperation> cb(
+ sendUpdate("0=1/2/3,1=2/3/4", UpdateOptions().condition(
+ "langbein.headerval=1234")));
+
+ MessageSenderStub sender;
+ cb->start(sender, framework::MilliSecTime(0));
+ replyToGet(*cb, sender, 0, 100);
+ replyToGet(*cb, sender, 1, 110);
+ // NOTE: condition is currently not attempted parsed until Gets have been
+ // replied to. This may change in the future.
+ CPPUNIT_ASSERT_EQUAL(
+ "UpdateReply(doc:test:test, "
+ "BucketId(0x0000000000000000), "
+ "timestamp 0, timestamp of updated doc: 0) "
+ "ReturnCode(ILLEGAL_PARAMETERS, "
+ "Failed to parse test and set condition: "
+ "Document type langbein not found)"s,
+ sender.getLastReply(true));
+}
+
+void
+TwoPhaseUpdateOperationTest::testSafePathConditionWithMissingDocFailsWithTasError()
+{
+ setupDistributor(2, 2, "storage:2 distributor:1");
+ std::shared_ptr<TwoPhaseUpdateOperation> cb(
+ sendUpdate("0=1/2/3,1=2/3/4", UpdateOptions().condition(
+ "testdoctype1.headerval==120")));
+
+ MessageSenderStub sender;
+ cb->start(sender, framework::MilliSecTime(0));
+ // Both Gets return nothing at all, nothing at all.
+ replyToGet(*cb, sender, 0, 100, false);
+ replyToGet(*cb, sender, 1, 110, false);
+ CPPUNIT_ASSERT_EQUAL(
+ "UpdateReply(doc:test:test, "
+ "BucketId(0x0000000000000000), "
+ "timestamp 0, timestamp of updated doc: 0) "
+ "ReturnCode(TEST_AND_SET_CONDITION_FAILED, "
+ "Document did not exist)"s,
+ sender.getLastReply(true));
+}
+
+void
+TwoPhaseUpdateOperationTest::assertAbortedUpdateReplyWithContextPresent(
+ const MessageSenderStub& closeSender) const
+{
+ CPPUNIT_ASSERT_EQUAL(size_t(1), closeSender.replies.size());
+ StorageReply::SP reply(closeSender.replies.back());
+ CPPUNIT_ASSERT_EQUAL(api::MessageType::UPDATE_REPLY, reply->getType());
+ CPPUNIT_ASSERT_EQUAL(api::ReturnCode::ABORTED,
+ reply->getResult().getResult());
+ auto context = reply->getTransportContext(); // Transfers ownership
+ CPPUNIT_ASSERT(context.get());
+}
+
+void
+TwoPhaseUpdateOperationTest::testFastPathCloseEdgeSendsCorrectReply()
+{
+ setupDistributor(1, 1, "storage:1 distributor:1");
+ // Only 1 replica; consistent with itself by definition.
+ std::shared_ptr<TwoPhaseUpdateOperation> cb(sendUpdate("0=1/2/3"));
+ MessageSenderStub sender;
+ cb->start(sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL("Update => 0"s, sender.getCommands(true));
+ // Close the operation. This should generate a single reply that is
+ // bound to the original command. We can identify rogue replies by these
+ // not having a transport context, as these are unique_ptrs that are
+ // moved to the reply upon the first reply construction. Any subsequent or
+ // erroneous replies will not have this context attached to themselves.
+ MessageSenderStub closeSender;
+ cb->onClose(closeSender);
+
+ assertAbortedUpdateReplyWithContextPresent(closeSender);
+}
+
+void
+TwoPhaseUpdateOperationTest::testSafePathCloseEdgeSendsCorrectReply()
+{
+ setupDistributor(2, 2, "storage:2 distributor:1");
+
+ std::shared_ptr<TwoPhaseUpdateOperation> cb(
+ sendUpdate("0=1/2/3,1=2/3/4")); // Inconsistent replicas.
+ MessageSenderStub sender;
+ cb->start(sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Get => 0,Get => 1"),
+ sender.getCommands(true));
+ // Closing the operation should now only return an ABORTED reply for
+ // the UpdateCommand, _not_ from the nested, pending Get operation (which
+ // will implicitly generate an ABORTED reply for the synthesized Get
+ // command passed to it).
+ MessageSenderStub closeSender;
+ cb->onClose(closeSender);
+
+ assertAbortedUpdateReplyWithContextPresent(closeSender);
+}
+
+// XXX currently differs in behavior from content nodes in that updates for
+// document IDs without explicit doctypes will _not_ be auto-failed on the
+// distributor.
+
+// XXX shouldn't be necessary to have any special handling of create-if... and
+// test-and-set right? They appear fully mutually exclusive.
+
+// XXX: test case where update reply has been sent but callback still
+// has pending messages (e.g. n-of-m case).
+
+} // distributor
+} // storage
diff --git a/storage/src/tests/distributor/updateoperationtest.cpp b/storage/src/tests/distributor/updateoperationtest.cpp
new file mode 100644
index 00000000000..912d0235e42
--- /dev/null
+++ b/storage/src/tests/distributor/updateoperationtest.cpp
@@ -0,0 +1,210 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <cppunit/extensions/HelperMacros.h>
+#include <iomanip>
+#include <iostream>
+#include <memory>
+#include <tests/common/dummystoragelink.h>
+#include <vespa/storageapi/message/persistence.h>
+#include <vespa/storageapi/message/state.h>
+#include <vespa/storageapi/message/bucket.h>
+#include <tests/distributor/distributortestutil.h>
+#include <vespa/config/helper/configgetter.h>
+#include <vespa/document/config/config-documenttypes.h>
+#include <tests/distributor/messagesenderstub.h>
+#include <vespa/storage/distributor/operations/external/updateoperation.h>
+
+using std::shared_ptr;
+using namespace document;
+using namespace storage;
+using namespace storage::distributor;
+using namespace storage::api;
+using namespace std;
+using namespace storage::lib;
+using config::ConfigGetter;
+using config::FileSpec;
+using vespalib::string;
+
+class UpdateOperation_Test : public CppUnit::TestFixture,
+ public DistributorTestUtil
+{
+ CPPUNIT_TEST_SUITE(UpdateOperation_Test);
+ CPPUNIT_TEST(testSimple);
+ CPPUNIT_TEST(testNotFound);
+ CPPUNIT_TEST(testMultiNode);
+ CPPUNIT_TEST(testMultiNodeInconsistentTimestamp);
+ CPPUNIT_TEST_SUITE_END();
+
+ DocumentTypeRepo::SP _repo;
+ const DocumentType *_html_type;
+
+protected:
+ void testSimple();
+ void testNotFound();
+ void testMultiNode();
+ void testMultiNodeInconsistentTimestamp();
+
+public:
+ void setUp() {
+ _repo.reset(
+ new DocumentTypeRepo(*ConfigGetter<DocumenttypesConfig>::
+ getConfig("config-doctypes", FileSpec("config-doctypes.cfg"))));
+ _html_type = _repo->getDocumentType("text/html");
+ createLinks();
+ }
+
+ void tearDown() {
+ close();
+ }
+
+ void replyToMessage(
+ UpdateOperation& callback,
+ MessageSenderStub& sender,
+ uint32_t index,
+ uint64_t oldTimestamp,
+ api::BucketInfo info = api::BucketInfo(2,4,6));
+
+ std::shared_ptr<UpdateOperation>
+ sendUpdate(const std::string& bucketState);
+
+ document::BucketId _bId;
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(UpdateOperation_Test);
+
+std::shared_ptr<UpdateOperation>
+UpdateOperation_Test::sendUpdate(const std::string& bucketState)
+{
+ document::DocumentUpdate::SP update(
+ new document::DocumentUpdate(
+ *_html_type,
+ document::DocumentId(document::DocIdString("test", "test"))));
+
+ _bId = getExternalOperationHandler().getBucketId(update->getId());
+
+ addNodesToBucketDB(_bId, bucketState);
+
+ std::shared_ptr<api::UpdateCommand> msg(
+ new api::UpdateCommand(document::BucketId(0),
+ update,
+ 100));
+
+ ExternalOperationHandler& handler = getExternalOperationHandler();
+ return std::shared_ptr<UpdateOperation>(
+ new UpdateOperation(handler,
+ msg,
+ getDistributor().getMetrics().updates[msg->getLoadType()]));
+}
+
+
+void
+UpdateOperation_Test::replyToMessage(
+ UpdateOperation& callback,
+ MessageSenderStub& sender,
+ uint32_t index,
+ uint64_t oldTimestamp,
+ api::BucketInfo info)
+{
+ std::shared_ptr<api::StorageMessage> msg2 = sender.commands[index];
+ UpdateCommand* updatec = dynamic_cast<UpdateCommand*>(msg2.get());
+ std::unique_ptr<api::StorageReply> reply(updatec->makeReply());
+ UpdateReply* updateR = static_cast<api::UpdateReply*>(reply.get());
+ updateR->setOldTimestamp(oldTimestamp);
+ updateR->setBucketInfo(info);
+
+ callback.onReceive(sender,
+ std::shared_ptr<StorageReply>(reply.release()));
+}
+
+void
+UpdateOperation_Test::testSimple()
+{
+ setupDistributor(1, 1, "storage:1 distributor:1");
+
+ std::shared_ptr<UpdateOperation> cb(sendUpdate("0=1/2/3"));
+ MessageSenderStub sender;
+ cb->start(sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("Update => 0"),
+ sender.getCommands(true));
+
+ replyToMessage(*cb, sender, 0, 90);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
+ "timestamp 100, timestamp of updated doc: 90) ReturnCode(NONE)"),
+ sender.getLastReply(true));
+}
+
+void
+UpdateOperation_Test::testNotFound()
+{
+ setupDistributor(1, 1, "storage:1 distributor:1");
+
+ std::shared_ptr<UpdateOperation> cb(sendUpdate("0=1/2/3"));
+ MessageSenderStub sender;
+ cb->start(sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("Update => 0"),
+ sender.getCommands(true));
+
+ replyToMessage(*cb, sender, 0, 0);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
+ "timestamp 100, timestamp of updated doc: 0) ReturnCode(NONE)"),
+ sender.getLastReply(true));
+}
+
+void
+UpdateOperation_Test::testMultiNode()
+{
+ setupDistributor(2, 2, "distributor:1 storage:2");
+ std::shared_ptr<UpdateOperation> cb(sendUpdate("0=1/2/3,1=1/2/3"));
+ MessageSenderStub sender;
+ cb->start(sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("Update => 0,Update => 1"),
+ sender.getCommands(true));
+
+ replyToMessage(*cb, sender, 0, 120);
+ replyToMessage(*cb, sender, 1, 120);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
+ "timestamp 100, timestamp of updated doc: 120) ReturnCode(NONE)"),
+ sender.getLastReply(true));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string(
+ _bId.toString() + " : "
+ "node(idx=1,crc=0x2,docs=4/4,bytes=6/6,trusted=true,active=false), "
+ "node(idx=0,crc=0x2,docs=4/4,bytes=6/6,trusted=true,active=false)"),
+ dumpBucket(_bId));
+}
+
+void
+UpdateOperation_Test::testMultiNodeInconsistentTimestamp()
+{
+ setupDistributor(2, 2, "distributor:1 storage:2");
+ std::shared_ptr<UpdateOperation> cb(sendUpdate("0=1/2/3,1=1/2/3"));
+ MessageSenderStub sender;
+ cb->start(sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("Update => 0,Update => 1"),
+ sender.getCommands(true));
+
+ replyToMessage(*cb, sender, 0, 119);
+ replyToMessage(*cb, sender, 1, 120);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
+ "timestamp 100, timestamp of updated doc: 120 Was inconsistent "
+ "(best node 1)) ReturnCode(NONE)"),
+ sender.getLastReply(true));
+}
+
diff --git a/storage/src/tests/distributor/visitoroperationtest.cpp b/storage/src/tests/distributor/visitoroperationtest.cpp
new file mode 100644
index 00000000000..a8f28a73fb6
--- /dev/null
+++ b/storage/src/tests/distributor/visitoroperationtest.cpp
@@ -0,0 +1,1646 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <iomanip>
+#include <iostream>
+#include <memory>
+#include <math.h>
+#include <vespa/storageapi/message/bucket.h>
+#include <vespa/storageapi/message/bucketsplitting.h>
+#include <vespa/storageapi/message/datagram.h>
+#include <vespa/storageapi/message/persistence.h>
+#include <vespa/storageapi/message/state.h>
+#include <vespa/storage/distributor/operations/external/visitoroperation.h>
+#include <vespa/storage/distributor/operations/external/visitororder.h>
+#include <tests/distributor/distributortestutil.h>
+#include <tests/common/dummystoragelink.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+
+using namespace document;
+using namespace storage::api;
+using namespace storage::lib;
+using namespace std::string_literals;
+
+namespace storage {
+namespace distributor {
+
+class VisitorOperationTest : public CppUnit::TestFixture,
+ public DistributorTestUtil {
+ CPPUNIT_TEST_SUITE(VisitorOperationTest);
+ CPPUNIT_TEST(testParameterForwarding);
+ CPPUNIT_TEST(testShutdown);
+ CPPUNIT_TEST(testNoBucket);
+ CPPUNIT_TEST(testOnlySuperBucketAndProgressAllowed);
+ CPPUNIT_TEST(testRetiredStorageNode);
+ CPPUNIT_TEST(testNoResendAfterTimeoutPassed);
+ CPPUNIT_TEST(testDistributorNotReady);
+ CPPUNIT_TEST(testInvalidOrderDocSelection);
+ CPPUNIT_TEST(testNonExistingBucket);
+ CPPUNIT_TEST(testUserSingleBucket);
+ CPPUNIT_TEST(testUserInconsistentlySplitBucket);
+ CPPUNIT_TEST(testBucketRemovedWhileVisitorPending);
+ CPPUNIT_TEST(testEmptyBucketsVisitedWhenVisitingRemoves);
+ CPPUNIT_TEST(testResendToOtherStorageNodeOnFailure);
+ CPPUNIT_TEST(testTimeoutOnlyAfterReplyFromAllStorageNodes);
+ CPPUNIT_TEST(testTimeoutDoesNotOverrideCriticalError);
+ CPPUNIT_TEST(testWrongDistribution);
+ CPPUNIT_TEST(testWrongDistributionInPendingState);
+ CPPUNIT_TEST(testVisitorAbortedIfNodeIsMarkedAsDown);
+ CPPUNIT_TEST(testBucketHighBitCount);
+ CPPUNIT_TEST(testBucketLowBitCount);
+ CPPUNIT_TEST(testParallelVisitorsToOneStorageNode);
+ CPPUNIT_TEST(testParallelVisitorsResendOnlyFailing);
+ CPPUNIT_TEST(testParallelVisitorsToOneStorageNodeOneSuperBucket);
+ CPPUNIT_TEST(testVisitWhenOneBucketCopyIsInvalid);
+ CPPUNIT_TEST(testVisitingWhenAllBucketsAreInvalid);
+ CPPUNIT_TEST(testInconsistencyHandling);
+ CPPUNIT_TEST(testVisitIdealNode);
+ CPPUNIT_TEST(testNoResendingOnCriticalFailure);
+ CPPUNIT_TEST(testFailureOnAllNodes);
+ CPPUNIT_TEST(testVisitOrder);
+ CPPUNIT_TEST(testVisitInChunks);
+ CPPUNIT_TEST(testVisitOrderSplitPastOrderBits);
+ CPPUNIT_TEST(testVisitOrderInconsistentlySplit);
+ CPPUNIT_TEST(testUserVisitorOrder);
+ CPPUNIT_TEST(testUserVisitorOrderSplitPastOrderBits);
+ CPPUNIT_TEST(testNoClientReplyBeforeAllStorageRepliesReceived);
+ CPPUNIT_TEST(testSkipFailedSubBucketsWhenVisitingInconsistent);
+ CPPUNIT_TEST(testQueueTimeoutIsFactorOfTotalTimeout);
+ CPPUNIT_TEST_SUITE_END();
+
+protected:
+ void testParameterForwarding();
+ void testShutdown();
+ void testNoBucket();
+ void testOnlySuperBucketAndProgressAllowed();
+ void testRetiredStorageNode();
+ void testNoResendAfterTimeoutPassed();
+ void testDistributorNotReady();
+ void testInvalidOrderDocSelection();
+ void testNonExistingBucket();
+ void testUserSingleBucket();
+ void testUserInconsistentlySplitBucket();
+ void testBucketRemovedWhileVisitorPending();
+ void testEmptyBucketsVisitedWhenVisitingRemoves();
+ void testResendToOtherStorageNodeOnFailure();
+ void testTimeoutOnlyAfterReplyFromAllStorageNodes();
+ void testTimeoutDoesNotOverrideCriticalError();
+ void testAbortNonExisting();
+ void testAbort();
+ void testWrongDistribution();
+ void testWrongDistributionInPendingState();
+ void testVisitorAbortedIfNodeIsMarkedAsDown();
+ void testBucketHighBitCount();
+ void testBucketLowBitCount();
+ void testParallelVisitorsToOneStorageNode();
+ void testParallelVisitorsResendOnlyFailing();
+ void testParallelVisitorsToOneStorageNodeOneSuperBucket();
+ void testVisitWhenOneBucketCopyIsInvalid();
+ void testVisitingWhenAllBucketsAreInvalid();
+ void testInconsistencyHandling();
+ void testVisitIdealNode();
+ void testNoResendingOnCriticalFailure();
+ void testFailureOnAllNodes();
+ void testVisitOrder();
+ void testVisitInChunks();
+ void testVisitOrderSplitPastOrderBits();
+ void testVisitOrderInconsistentlySplit();
+ void testUserVisitorOrder();
+ void testUserVisitorOrderSplitPastOrderBits();
+ void testUserVisitorOrderInconsistentlySplit();
+ void testNoClientReplyBeforeAllStorageRepliesReceived();
+ void testSkipFailedSubBucketsWhenVisitingInconsistent();
+ void testQueueTimeoutIsFactorOfTotalTimeout();
+public:
+ VisitorOperationTest()
+ : defaultConfig(framework::MilliSecTime(0),
+ 100,
+ 100)
+ {}
+
+ void setUp() {
+ createLinks();
+ nullId = document::BucketId(0, 0);
+ doneId = document::BucketId(INT_MAX);
+ };
+
+ void tearDown() {
+ close();
+ }
+
+ enum {MAX_PENDING = 2};
+private:
+ document::BucketId nullId;
+ document::BucketId doneId;
+ VisitorOperation::Config defaultConfig;
+
+ api::CreateVisitorCommand::SP
+ createVisitorCommand(std::string instanceId,
+ document::BucketId superBucket,
+ document::BucketId lastBucket,
+ uint32_t maxBuckets = 8,
+ uint32_t timeoutMS = 500,
+ bool visitInconsistentBuckets = false,
+ bool visitRemoves = false,
+ std::string libraryName = "dumpvisitor",
+ document::OrderingSpecification::Order visitorOrdering =
+ document::OrderingSpecification::ASCENDING,
+ const std::string& docSelection = "")
+ {
+ api::CreateVisitorCommand::SP cmd(
+ new api::CreateVisitorCommand(libraryName, instanceId, docSelection));
+ cmd->setControlDestination("controldestination");
+ cmd->setDataDestination("datadestination");
+ cmd->setFieldSet("[header]");
+ if (visitRemoves) {
+ cmd->setVisitRemoves();
+ }
+ cmd->setFromTime(10);
+ cmd->setToTime(100);
+
+ cmd->addBucketToBeVisited(superBucket);
+ cmd->addBucketToBeVisited(lastBucket);
+
+ cmd->setMaximumPendingReplyCount(VisitorOperationTest::MAX_PENDING);
+ cmd->setMaxBucketsPerVisitor(maxBuckets);
+ cmd->setTimeout(timeoutMS);
+ if (visitInconsistentBuckets) {
+ cmd->setVisitInconsistentBuckets();
+ }
+ cmd->setVisitorOrdering(visitorOrdering);
+ return cmd;
+ }
+
+ std::string
+ serializeVisitorCommand(int idx = -1) {
+ if (idx == -1) {
+ idx = _sender.commands.size() - 1;
+ }
+
+ std::ostringstream ost;
+
+ CreateVisitorCommand* cvc = dynamic_cast<CreateVisitorCommand*>(
+ _sender.commands[idx].get());
+
+ ost << *cvc << " Buckets: [ ";
+ for (uint32_t i = 0; i < cvc->getBuckets().size(); ++i) {
+ ost << cvc->getBuckets()[i] << " ";
+ }
+ ost << "]";
+ return ost.str();
+ }
+
+ /**
+ Starts a visitor where we expect no createVisitorCommands to be sent
+ to storage, either due to error or due to no data actually stored.
+ */
+ std::string runEmptyVisitor(api::CreateVisitorCommand::SP msg) {
+ VisitorOperation op(getExternalOperationHandler(),
+ msg,
+ defaultConfig);
+ op.start(_sender, framework::MilliSecTime(0));
+ return _sender.getLastReply();
+ }
+
+ const std::vector<BucketId>& getBucketsFromLastCommand() {
+ const CreateVisitorCommand& cvc(
+ dynamic_cast<const CreateVisitorCommand&>(
+ *_sender.commands[_sender.commands.size() - 1]));
+ return cvc.getBuckets();
+ }
+
+ std::pair<std::string, std::string>
+ runVisitor(document::BucketId id,
+ document::BucketId lastId,
+ uint32_t maxBuckets);
+
+ std::string doOrderedVisitor(document::BucketId startBucket);
+
+ void doStandardVisitTest(const std::string& clusterState);
+
+ std::unique_ptr<VisitorOperation> startOperationWith2StorageNodeVisitors(
+ bool inconsistent);
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(VisitorOperationTest);
+
+void
+VisitorOperationTest::testParameterForwarding()
+{
+ doStandardVisitTest("distributor:1 storage:1");
+}
+
+void
+VisitorOperationTest::doStandardVisitTest(const std::string& clusterState)
+{
+ _distributor->enableClusterState(ClusterState(clusterState));
+
+ // Create bucket in bucketdb
+ document::BucketId id(uint64_t(0x400000000000007b));
+ addNodesToBucketDB(id, "0=1/1/1/t");
+
+ // Send create visitor
+ vespalib::string instanceId("testParameterForwarding");
+ vespalib::string libraryName("dumpvisitor");
+ vespalib::string docSelection("");
+ api::CreateVisitorCommand::SP msg(
+ new api::CreateVisitorCommand(libraryName,
+ instanceId,
+ docSelection));
+ vespalib::string controlDestination("controldestination");
+ msg->setControlDestination(controlDestination);
+ vespalib::string dataDestination("datadestination");
+ msg->setDataDestination(dataDestination);
+ msg->setMaximumPendingReplyCount(VisitorOperationTest::MAX_PENDING);
+ msg->setMaxBucketsPerVisitor(8);
+ msg->setFromTime(10);
+ msg->setToTime(0);
+ msg->addBucketToBeVisited(id);
+ msg->addBucketToBeVisited(nullId);
+ msg->setFieldSet("[header]");
+ msg->setVisitRemoves();
+ msg->setTimeout(1234);
+ msg->getTrace().setLevel(7);
+
+ VisitorOperation op(getExternalOperationHandler(),
+ msg,
+ defaultConfig);
+
+ op.start(_sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Visitor Create => 0"),
+ _sender.getCommands(true));
+
+ // Receive create visitor command for storage and simulate reply
+ api::StorageMessage::SP rep0 = _sender.commands[0];
+ CreateVisitorCommand* cvc = dynamic_cast<CreateVisitorCommand*>(rep0.get());
+ CPPUNIT_ASSERT(cvc);
+ CPPUNIT_ASSERT_EQUAL(libraryName, cvc->getLibraryName());
+ CPPUNIT_ASSERT_EQUAL(instanceId, cvc->getInstanceId().substr(0, instanceId.length()));
+ CPPUNIT_ASSERT_EQUAL(docSelection, cvc->getDocumentSelection());
+ CPPUNIT_ASSERT_EQUAL(controlDestination, cvc->getControlDestination());
+ CPPUNIT_ASSERT_EQUAL(dataDestination, cvc->getDataDestination());
+ CPPUNIT_ASSERT_EQUAL((unsigned int) VisitorOperationTest::MAX_PENDING, cvc->getMaximumPendingReplyCount());
+ CPPUNIT_ASSERT_EQUAL((unsigned int) 8, cvc->getMaxBucketsPerVisitor());
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, cvc->getBuckets().size());
+ CPPUNIT_ASSERT_EQUAL((api::Timestamp) 10, cvc->getFromTime());
+ CPPUNIT_ASSERT(cvc->getToTime() > 0);
+ CPPUNIT_ASSERT_EQUAL(vespalib::string("[header]"), cvc->getFieldSet());
+ CPPUNIT_ASSERT_EQUAL((bool) 1, cvc->visitRemoves());
+ CPPUNIT_ASSERT_EQUAL(uint32_t(1234), cvc->getTimeout());
+ CPPUNIT_ASSERT_EQUAL(uint32_t(7), cvc->getTrace().getLevel());
+
+ sendReply(op);
+
+ CPPUNIT_ASSERT_EQUAL(std::string("CreateVisitorReply("
+ "last=BucketId(0x000000007fffffff)) "
+ "ReturnCode(NONE)"),
+ _sender.getLastReply());
+}
+
+void
+VisitorOperationTest::testShutdown()
+{
+ _distributor->enableClusterState(ClusterState("distributor:1 storage:1"));
+
+ // Create bucket in bucketdb
+ document::BucketId id(uint64_t(0x400000000000007b));
+ addNodesToBucketDB(id, "0=1/1/1/t");
+
+ // Send create visitor
+ vespalib::string instanceId("testShutdown");
+ vespalib::string libraryName("dumpvisitor");
+ vespalib::string docSelection("");
+ api::CreateVisitorCommand::SP msg(
+ new api::CreateVisitorCommand(libraryName,
+ instanceId,
+ docSelection));
+ msg->addBucketToBeVisited(id);
+ msg->addBucketToBeVisited(nullId);
+
+ VisitorOperation op(getExternalOperationHandler(), msg, defaultConfig);
+
+ op.start(_sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Visitor Create => 0"),
+ _sender.getCommands(true));
+
+ op.onClose(_sender); // This will fail the visitor
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("CreateVisitorReply(last=BucketId(0x0000000000000000)) "
+ "ReturnCode(ABORTED, Process is shutting down)"),
+ _sender.getLastReply());
+}
+
+void
+VisitorOperationTest::testNoBucket()
+{
+ _distributor->enableClusterState(ClusterState("distributor:1 storage:1"));
+
+ // Send create visitor
+ api::CreateVisitorCommand::SP msg(new api::CreateVisitorCommand(
+ "dumpvisitor", "instance", ""));
+
+ CPPUNIT_ASSERT_EQUAL(std::string(
+ "CreateVisitorReply(last=BucketId(0x0000000000000000)) "
+ "ReturnCode(ILLEGAL_PARAMETERS, No buckets in "
+ "CreateVisitorCommand for visitor 'instance')"),
+ runEmptyVisitor(msg));
+}
+
+void
+VisitorOperationTest::testOnlySuperBucketAndProgressAllowed()
+{
+ _distributor->enableClusterState(ClusterState("distributor:1 storage:1"));
+
+ // Send create visitor
+ api::CreateVisitorCommand::SP msg(new api::CreateVisitorCommand(
+ "dumpvisitor", "instance", ""));
+ msg->addBucketToBeVisited(nullId);
+ msg->addBucketToBeVisited(nullId);
+ msg->addBucketToBeVisited(nullId);
+
+ CPPUNIT_ASSERT_EQUAL(std::string(
+ "CreateVisitorReply(last=BucketId(0x0000000000000000)) "
+ "ReturnCode(ILLEGAL_PARAMETERS, CreateVisitorCommand "
+ "does not contain 2 buckets for visitor "
+ "'instance')"),
+ runEmptyVisitor(msg));
+}
+
+void
+VisitorOperationTest::testRetiredStorageNode()
+{
+ doStandardVisitTest("distributor:1 storage:1 .0.s:r");
+}
+
+void
+VisitorOperationTest::testNoResendAfterTimeoutPassed()
+{
+ document::BucketId id(uint64_t(0x400000000000007b));
+
+ _distributor->enableClusterState(ClusterState("distributor:1 storage:2"));
+ addNodesToBucketDB(id, "0=1/1/1/t,1=1/1/1/t");
+
+ VisitorOperation op(
+ getExternalOperationHandler(),
+ createVisitorCommand("lowtimeoutbusy", id, nullId, 8, 20),
+ defaultConfig);
+
+ op.start(_sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Visitor Create => 0"),
+ _sender.getCommands(true));
+
+ getClock().addMilliSecondsToTime(22);
+
+ sendReply(op, -1, api::ReturnCode::BUSY);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string(
+ "CreateVisitorReply(last=BucketId(0x0000000000000000)) "
+ "ReturnCode(ABORTED, Timeout of 20 ms is running out)"),
+ _sender.getLastReply());
+}
+
+void
+VisitorOperationTest::testDistributorNotReady()
+{
+ _distributor->enableClusterState(ClusterState("distributor:0 storage:0"));
+ document::BucketId id(uint64_t(0x400000000000007b));
+ CPPUNIT_ASSERT_EQUAL(
+ std::string(
+ "CreateVisitorReply(last=BucketId(0x0000000000000000)) "
+ "ReturnCode(NODE_NOT_READY, No distributors available when "
+ "processing visitor 'notready')"),
+ runEmptyVisitor(createVisitorCommand("notready", id, nullId)));
+}
+
+// Distributor only parses selection if in the order doc case (which is detected
+// by first checking if string contains "order" which it must to refer to
+// "id.order" !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+void
+VisitorOperationTest::testInvalidOrderDocSelection()
+{
+ _distributor->enableClusterState(ClusterState("distributor:1 storage:1"));
+ document::BucketId id(0x400000000000007b);
+ addNodesToBucketDB(id, "0=1/1/1/t");
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("CreateVisitorReply(last=BucketId(0x0000000000000000)) "
+ "ReturnCode(ILLEGAL_PARAMETERS, Failed to parse document select "
+ "string 'id.order(10,3)=1 and dummy': Document type dummy not found)"),
+ runEmptyVisitor(
+ createVisitorCommand("invalidOrderDoc",
+ id,
+ nullId,
+ 8,
+ 500,
+ false,
+ false,
+ "dumpvisitor",
+ document::OrderingSpecification::ASCENDING,
+ "id.order(10,3)=1 and dummy")));
+}
+
+void
+VisitorOperationTest::testNonExistingBucket()
+{
+ document::BucketId id(uint64_t(0x400000000000007b));
+ _distributor->enableClusterState(ClusterState("distributor:1 storage:1"));
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("CreateVisitorReply(last=BucketId(0x000000007fffffff)) "
+ "ReturnCode(NONE)"),
+ runEmptyVisitor(
+ createVisitorCommand("nonExistingBucket",
+ id,
+ nullId)));
+}
+
+void
+VisitorOperationTest::testUserSingleBucket()
+{
+ document::BucketId id(uint64_t(0x400000000000007b));
+ document::BucketId userid(uint64_t(0x800000000000007b));
+ _distributor->enableClusterState(ClusterState("distributor:1 storage:1"));
+
+ addNodesToBucketDB(id, "0=1/1/1/t");
+
+ VisitorOperation op(getExternalOperationHandler(),
+ createVisitorCommand("userSingleBucket",
+ userid,
+ nullId,
+ 8,
+ 500,
+ false,
+ false,
+ "dumpvisitor",
+ document::OrderingSpecification::ASCENDING,
+ "true"),
+ defaultConfig);
+
+ op.start(_sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL_MSG(_sender.getLastReply(),
+ std::string("Visitor Create => 0"),
+ _sender.getCommands(true));
+ sendReply(op);
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("CreateVisitorReply(last=BucketId(0x000000007fffffff)) "
+ "ReturnCode(NONE)"),
+ _sender.getLastReply());
+}
+
+std::pair<std::string, std::string>
+VisitorOperationTest::runVisitor(document::BucketId id,
+ document::BucketId lastId,
+ uint32_t maxBuckets)
+{
+ VisitorOperation op(getExternalOperationHandler(),
+ createVisitorCommand("inconsistentSplit",
+ id,
+ lastId,
+ maxBuckets,
+ 500,
+ false,
+ false,
+ "dumpvisitor",
+ document::OrderingSpecification::ASCENDING,
+ "true"),
+ defaultConfig);
+
+ op.start(_sender, framework::MilliSecTime(0));
+
+ sendReply(op);
+
+ std::pair<std::string, std::string> retVal =
+ std::make_pair(serializeVisitorCommand(), _sender.getLastReply());
+
+ _sender.clear();
+
+ return retVal;
+}
+
+void
+VisitorOperationTest::testUserInconsistentlySplitBucket()
+{
+ _distributor->enableClusterState(ClusterState("distributor:1 storage:1"));
+
+ // Not containing (19, 0x40001)
+ addNodesToBucketDB(document::BucketId(17, 0x0), "0=1/1/1/t");
+ addNodesToBucketDB(document::BucketId(18, 0x20001), "0=1/1/1/t");
+ addNodesToBucketDB(document::BucketId(19, 0x1), "0=1/1/1/t");
+
+ // Containing (19, 0x40001)
+ addNodesToBucketDB(document::BucketId(17, 0x1), "0=1/1/1/t");
+ addNodesToBucketDB(document::BucketId(18, 0x1), "0=1/1/1/t");
+
+ // Equal to (19, 0x40001)
+ addNodesToBucketDB(document::BucketId(19, 0x40001), "0=1/1/1/t");
+
+ // Contained in (19, 0x40001)
+ addNodesToBucketDB(document::BucketId(20, 0x40001), "0=1/1/1/t");
+ addNodesToBucketDB(document::BucketId(20, 0xc0001), "0=1/1/1/t");
+ addNodesToBucketDB(document::BucketId(21, 0x40001), "0=1/1/1/t");
+ addNodesToBucketDB(document::BucketId(21, 0x140001), "0=1/1/1/t");
+
+ document::BucketId id(19, 0x40001);
+
+ {
+ std::pair<std::string, std::string> val(
+ runVisitor(id, nullId, 100));
+
+ CPPUNIT_ASSERT_EQUAL(std::string(
+ "CreateVisitorCommand(dumpvisitor, true, 7 buckets) "
+ "Buckets: [ BucketId(0x4400000000000001) "
+ "BucketId(0x4800000000000001) "
+ "BucketId(0x4c00000000040001) "
+ "BucketId(0x5000000000040001) "
+ "BucketId(0x5400000000040001) "
+ "BucketId(0x5400000000140001) "
+ "BucketId(0x50000000000c0001) ]"),
+ val.first);
+
+ CPPUNIT_ASSERT_EQUAL(std::string(
+ "CreateVisitorReply(last=BucketId(0x000000007fffffff)) "
+ "ReturnCode(NONE)"),
+ val.second);
+ }
+}
+
+void
+VisitorOperationTest::testBucketRemovedWhileVisitorPending()
+{
+ _distributor->enableClusterState(ClusterState("distributor:1 storage:1"));
+
+ // Create bucket in bucketdb
+ document::BucketId id(uint64_t(0x400000000000007b));
+
+ addNodesToBucketDB(id, "0=1/1/1/t");
+
+ VisitorOperation op(getExternalOperationHandler(),
+ createVisitorCommand("removefrombucketdb",
+ id,
+ nullId),
+ defaultConfig);
+
+ op.start(_sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Visitor Create => 0"),
+ _sender.getCommands(true));
+
+ removeFromBucketDB(id);
+
+ sendReply(op, -1, api::ReturnCode::NOT_CONNECTED);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("CreateVisitorReply(last=BucketId(0x0000000000000000)) "
+ "ReturnCode(BUCKET_NOT_FOUND)"),
+ _sender.getLastReply());
+}
+
+void
+VisitorOperationTest::testEmptyBucketsVisitedWhenVisitingRemoves()
+{
+ _distributor->enableClusterState(ClusterState("distributor:1 storage:1"));
+ document::BucketId id(uint64_t(0x400000000000007b));
+ addNodesToBucketDB(id, "0=0/0/0/1/2/t");
+
+ VisitorOperation op(getExternalOperationHandler(),
+ createVisitorCommand("emptybucket",
+ id,
+ nullId,
+ 8,
+ 500,
+ false,
+ true),
+ defaultConfig);
+
+ op.start(_sender, framework::MilliSecTime(0));
+
+ // Since visitRemoves is true, the empty bucket will be visited
+ CPPUNIT_ASSERT_EQUAL(std::string("Visitor Create => 0"),
+ _sender.getCommands(true));
+}
+
+void
+VisitorOperationTest::testResendToOtherStorageNodeOnFailure()
+{
+ _distributor->enableClusterState(ClusterState("distributor:1 storage:2"));
+ document::BucketId id(uint64_t(0x400000000000007b));
+
+ addNodesToBucketDB(id, "0=1/1/1/t,1=1/1/1/t");
+
+ VisitorOperation op(getExternalOperationHandler(),
+ createVisitorCommand("emptyinconsistent",
+ id,
+ nullId),
+ defaultConfig);
+
+ op.start(_sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Visitor Create => 0"),
+ _sender.getCommands(true));
+
+ sendReply(op, -1, api::ReturnCode::NOT_CONNECTED);
+ CPPUNIT_ASSERT_EQUAL(""s, _sender.getReplies());
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Visitor Create => 0,Visitor Create => 1"),
+ _sender.getCommands(true));
+}
+
+// Since MessageBus handles timeouts for us implicitly, we make the assumption
+// that we can safely wait for all replies to be received before sending a
+// client reply and that this won't cause things to hang for indeterminate
+// amounts of time.
+void
+VisitorOperationTest::testTimeoutOnlyAfterReplyFromAllStorageNodes()
+{
+ _distributor->enableClusterState(ClusterState("distributor:1 storage:2"));
+
+ // Contained in (16, 0x1)
+ addNodesToBucketDB(document::BucketId(17, 0x00001), "0=1/1/1/t");
+ addNodesToBucketDB(document::BucketId(17, 0x10001), "1=1/1/1/t");
+
+ VisitorOperation op(getExternalOperationHandler(),
+ createVisitorCommand("timeout2bucketson2nodes",
+ document::BucketId(16, 1),
+ nullId,
+ 8),
+ defaultConfig);
+
+ op.start(_sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL("Visitor Create => 0,Visitor Create => 1"s,
+ _sender.getCommands(true));
+
+ getClock().addMilliSecondsToTime(501);
+
+ sendReply(op, 0);
+ CPPUNIT_ASSERT_EQUAL(""s, _sender.getReplies()); // No reply yet.
+
+ sendReply(op, 1, api::ReturnCode::BUSY);
+
+ CPPUNIT_ASSERT_EQUAL(
+ "CreateVisitorReply(last=BucketId(0x4400000000000001)) "
+ "ReturnCode(ABORTED, Timeout of 500 ms is running out)"s,
+ _sender.getLastReply());
+
+ // XXX This is sub-optimal in the case that we time out but all storage
+ // visitors return OK, as we'll then be failing an operation that
+ // technically went fine. However, this is assumed to happen sufficiently
+ // rarely (requires timing to be so that mbus timouts don't happen for
+ // neither client -> distributor nor distributor -> storage for the
+ // operation to possibly could have been considered successful) that we
+ // don't bother to add complexity for handling it as a special case.
+}
+
+void
+VisitorOperationTest::testTimeoutDoesNotOverrideCriticalError()
+{
+ _distributor->enableClusterState(ClusterState("distributor:1 storage:2"));
+ addNodesToBucketDB(document::BucketId(17, 0x00001), "0=1/1/1/t");
+ addNodesToBucketDB(document::BucketId(17, 0x10001), "1=1/1/1/t");
+
+ VisitorOperation op(getExternalOperationHandler(),
+ createVisitorCommand("timeout2bucketson2nodes",
+ document::BucketId(16, 1),
+ nullId,
+ 8,
+ 500), // ms timeout
+ defaultConfig);
+
+ op.start(_sender, framework::MilliSecTime(0));
+ CPPUNIT_ASSERT_EQUAL("Visitor Create => 0,Visitor Create => 1"s,
+ _sender.getCommands(true));
+
+ getClock().addMilliSecondsToTime(501);
+ // Technically has timed out at this point, but should still report the
+ // critical failure.
+ sendReply(op, 0, api::ReturnCode::INTERNAL_FAILURE);
+ CPPUNIT_ASSERT_EQUAL(""s, _sender.getReplies());
+ sendReply(op, 1, api::ReturnCode::BUSY);
+
+ CPPUNIT_ASSERT_EQUAL(
+ "CreateVisitorReply(last=BucketId(0x0000000000000000)) "
+ "ReturnCode(INTERNAL_FAILURE, [from content node 0] )"s,
+ _sender.getLastReply());
+}
+
+void
+VisitorOperationTest::testWrongDistribution()
+{
+ setupDistributor(1, 100, "distributor:100 storage:2");
+
+ document::BucketId id(uint64_t(0x400000000000127b));
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("CreateVisitorReply(last=BucketId(0x0000000000000000)) "
+ "ReturnCode(WRONG_DISTRIBUTION, distributor:100 storage:2)"),
+ runEmptyVisitor(createVisitorCommand("wrongdist", id, nullId)));
+}
+
+void
+VisitorOperationTest::testWrongDistributionInPendingState()
+{
+ // Force bucket to belong to this distributor in currently enabled state.
+ setupDistributor(1, 100, "distributor:1 storage:2");
+ // Trigger pending cluster state. Note: increase in storage node count
+ // to force resending of bucket info requests.
+ auto stateCmd = std::make_shared<api::SetSystemStateCommand>(
+ lib::ClusterState("distributor:100 storage:3"));
+ getBucketDBUpdater().onSetSystemState(stateCmd);
+
+ document::BucketId id(uint64_t(0x400000000000127b));
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("CreateVisitorReply(last=BucketId(0x0000000000000000)) "
+ "ReturnCode(WRONG_DISTRIBUTION, distributor:100 storage:3)"),
+ runEmptyVisitor(createVisitorCommand("wrongdistpending", id, nullId)));
+}
+
+// If the current node state changes, this alters the node's cluster state
+// internally without this change being part of a new version. As a result,
+// we cannot answer with WRONG_DISTRIBUTION as the client expects to see a
+// higher version number.
+// See ticket 6353382 for details.
+void
+VisitorOperationTest::testVisitorAbortedIfNodeIsMarkedAsDown()
+{
+ setupDistributor(1, 10, "distributor:10 .0.s:s storage:10");
+
+ document::BucketId id(uint64_t(0x400000000000127b));
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("CreateVisitorReply(last=BucketId(0x0000000000000000)) "
+ "ReturnCode(ABORTED, Distributor is shutting down)"),
+ runEmptyVisitor(createVisitorCommand("wrongdist", id, nullId)));
+}
+
+void
+VisitorOperationTest::testBucketHighBitCount()
+{
+ _distributor->enableClusterState(ClusterState("distributor:1 storage:1 bits:16"));
+
+ document::BucketId id(18, 0x0);
+ addNodesToBucketDB(id, "0=1/1/1/t");
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("CreateVisitorReply(last=BucketId(0x0000000000000000)) "
+ "ReturnCode(WRONG_DISTRIBUTION, distributor:1 storage:1)"),
+ runEmptyVisitor(createVisitorCommand("buckethigbit", id, nullId)));
+
+ VisitorOperation op(getExternalOperationHandler(),
+ createVisitorCommand("buckethighbitcount",
+ id,
+ nullId,
+ 8,
+ 500,
+ false,
+ false,
+ "dumpvisitor",
+ document::OrderingSpecification::ASCENDING,
+ "true"),
+ defaultConfig);
+
+ op.start(_sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Visitor Create => 0"),
+ _sender.getCommands(true));
+}
+
+void
+VisitorOperationTest::testBucketLowBitCount()
+{
+ _distributor->enableClusterState(ClusterState("distributor:1 storage:1 bits:16"));
+
+ document::BucketId id(1, 0x0);
+ addNodesToBucketDB(id, "0=1/1/1/t");
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("CreateVisitorReply(last=BucketId(0x0000000000000000)) "
+ "ReturnCode(WRONG_DISTRIBUTION, distributor:1 storage:1)"),
+ runEmptyVisitor(createVisitorCommand("bucketlowbit", id, nullId)));
+
+ VisitorOperation op(getExternalOperationHandler(),
+ createVisitorCommand("buckethighbitcount",
+ id,
+ nullId,
+ 8,
+ 500,
+ false,
+ false,
+ "dumpvisitor",
+ document::OrderingSpecification::ASCENDING,
+ "true"),
+ defaultConfig);
+
+ op.start(_sender, framework::MilliSecTime(0));
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("CreateVisitorReply(last=BucketId(0x0000000000000000)) "
+ "ReturnCode(WRONG_DISTRIBUTION, distributor:1 storage:1)"),
+ _sender.getLastReply());
+}
+
+void
+VisitorOperationTest::testParallelVisitorsToOneStorageNode()
+{
+ _distributor->enableClusterState(ClusterState("distributor:1 storage:1"));
+
+ // Create buckets in bucketdb
+ for (int i=0; i<32; i++) {
+ document::BucketId id(21, i*0x10000 + 0x0001);
+ addNodesToBucketDB(id, "0=1/1/1/t");
+ }
+
+ document::BucketId id(16, 1);
+
+ VisitorOperation op(getExternalOperationHandler(),
+ createVisitorCommand("multiplebuckets",
+ id,
+ nullId,
+ 31),
+ VisitorOperation::Config(
+ framework::MilliSecTime(0),
+ 1,
+ 4));
+
+ op.start(_sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Visitor Create => 0,Visitor Create => 0,"
+ "Visitor Create => 0,Visitor Create => 0"),
+ _sender.getCommands(true));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("CreateVisitorCommand(dumpvisitor, , 8 buckets) Buckets: [ "
+ "BucketId(0x5400000000000001) BucketId(0x5400000000040001) "
+ "BucketId(0x5400000000020001) BucketId(0x5400000000060001) "
+ "BucketId(0x5400000000010001) BucketId(0x5400000000050001) "
+ "BucketId(0x5400000000030001) BucketId(0x5400000000070001) ]"),
+ serializeVisitorCommand(0));
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("CreateVisitorCommand(dumpvisitor, , 8 buckets) Buckets: [ "
+ "BucketId(0x5400000000100001) BucketId(0x5400000000140001) "
+ "BucketId(0x5400000000120001) BucketId(0x5400000000160001) "
+ "BucketId(0x5400000000110001) BucketId(0x5400000000150001) "
+ "BucketId(0x5400000000130001) BucketId(0x5400000000170001) ]"),
+ serializeVisitorCommand(1));
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("CreateVisitorCommand(dumpvisitor, , 8 buckets) Buckets: [ "
+ "BucketId(0x5400000000080001) BucketId(0x54000000000c0001) "
+ "BucketId(0x54000000000a0001) BucketId(0x54000000000e0001) "
+ "BucketId(0x5400000000090001) BucketId(0x54000000000d0001) "
+ "BucketId(0x54000000000b0001) BucketId(0x54000000000f0001) ]"),
+ serializeVisitorCommand(2));
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("CreateVisitorCommand(dumpvisitor, , 7 buckets) Buckets: [ "
+ "BucketId(0x5400000000180001) BucketId(0x54000000001c0001) "
+ "BucketId(0x54000000001a0001) BucketId(0x54000000001e0001) "
+ "BucketId(0x5400000000190001) BucketId(0x54000000001d0001) "
+ "BucketId(0x54000000001b0001) ]"),
+ serializeVisitorCommand(3));
+
+ for (uint32_t i = 0; i < 4; ++i) {
+ sendReply(op, i);
+ }
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("CreateVisitorReply(last=BucketId(0x54000000000f0001)) "
+ "ReturnCode(NONE)"),
+ _sender.getLastReply());
+
+ _sender.clear();
+
+ uint32_t minBucketsPerVisitor = 1;
+ uint32_t maxVisitorsPerNode = 4;
+ VisitorOperation op2(getExternalOperationHandler(),
+ createVisitorCommand("multiplebuckets",
+ id,
+ document::BucketId(0x54000000000f0001),
+ 31),
+ VisitorOperation::Config(
+ framework::MilliSecTime(0),
+ minBucketsPerVisitor,
+ maxVisitorsPerNode));
+
+ op2.start(_sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Visitor Create => 0"),
+ _sender.getCommands(true));
+
+ sendReply(op2);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("CreateVisitorReply(last=BucketId(0x000000007fffffff)) "
+ "ReturnCode(NONE)"),
+ _sender.getLastReply());
+}
+
+void
+VisitorOperationTest::testParallelVisitorsResendOnlyFailing()
+{
+ _distributor->enableClusterState(ClusterState("distributor:1 storage:2"));
+
+ // Create buckets in bucketdb
+ for (int i=0; i<32; i++) {
+ document::BucketId id(21, i*0x10000 + 0x0001);
+ addNodesToBucketDB(id, "0=1/1/1/t,1=1/1/1/t");
+ }
+
+ document::BucketId id(16, 1);
+
+ uint32_t minBucketsPerVisitor = 5;
+ uint32_t maxVisitorsPerNode = 4;
+ VisitorOperation op(getExternalOperationHandler(),
+ createVisitorCommand("multiplebuckets",
+ id,
+ nullId,
+ 31),
+ VisitorOperation::Config(
+ framework::MilliSecTime(0),
+ minBucketsPerVisitor,
+ maxVisitorsPerNode));
+
+ op.start(_sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Visitor Create => 0,Visitor Create => 0,"
+ "Visitor Create => 0,Visitor Create => 0"),
+ _sender.getCommands(true));
+
+ for (uint32_t i = 0; i < 2; ++i) {
+ sendReply(op, i, api::ReturnCode::NOT_CONNECTED);
+ }
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Visitor Create => 0,Visitor Create => 0,"
+ "Visitor Create => 0,Visitor Create => 0,"
+ "Visitor Create => 1,Visitor Create => 1"),
+ _sender.getCommands(true));
+
+ for (uint32_t i = 2; i < 6; ++i) {
+ sendReply(op, i);
+ }
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("CreateVisitorReply(last=BucketId(0x54000000000f0001)) "
+ "ReturnCode(NONE)"),
+ _sender.getLastReply());
+}
+
+void
+VisitorOperationTest::testParallelVisitorsToOneStorageNodeOneSuperBucket()
+{
+ _distributor->enableClusterState(ClusterState("distributor:1 storage:1"));
+
+ // Create buckets in bucketdb
+ for (int i=0; i<8; i++) {
+ document::BucketId id(0x8c000000e3362b6aULL+i*0x100000000ull);
+ addNodesToBucketDB(id, "0=1/1/1/t");
+ }
+
+ document::BucketId id(16, 0x2b6a);
+
+ VisitorOperation op(getExternalOperationHandler(),
+ createVisitorCommand("multiplebucketsonesuper",
+ id,
+ nullId),
+ VisitorOperation::Config(
+ framework::MilliSecTime(0),
+ 5,
+ 4));
+
+ op.start(_sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Visitor Create => 0"),
+ _sender.getCommands(true));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("CreateVisitorCommand(dumpvisitor, , 8 buckets) Buckets: [ "
+ "BucketId(0x8c000000e3362b6a) BucketId(0x8c000004e3362b6a) "
+ "BucketId(0x8c000002e3362b6a) BucketId(0x8c000006e3362b6a) "
+ "BucketId(0x8c000001e3362b6a) BucketId(0x8c000005e3362b6a) "
+ "BucketId(0x8c000003e3362b6a) BucketId(0x8c000007e3362b6a) ]"),
+ serializeVisitorCommand(0));
+
+ sendReply(op);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("CreateVisitorReply(last=BucketId(0x000000007fffffff)) "
+ "ReturnCode(NONE)"),
+ _sender.getLastReply());
+}
+
+void
+VisitorOperationTest::testVisitWhenOneBucketCopyIsInvalid()
+{
+ _distributor->enableClusterState(ClusterState("distributor:1 storage:2"));
+
+ document::BucketId id(16, 0);
+
+ addNodesToBucketDB(id, "0=100,1=0/0/1");
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("CreateVisitorReply(last=BucketId(0x0000000000000000)) "
+ "ReturnCode(BUCKET_NOT_FOUND)"),
+ runEmptyVisitor(createVisitorCommand("incompletehandling",
+ id,
+ nullId)));
+}
+
+void
+VisitorOperationTest::testVisitingWhenAllBucketsAreInvalid()
+{
+ _distributor->enableClusterState(ClusterState("distributor:1 storage:2"));
+
+ document::BucketId id(16, 0);
+
+ addNodesToBucketDB(id, "0=0/0/1,1=0/0/1");
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("CreateVisitorReply(last=BucketId(0x0000000000000000)) "
+ "ReturnCode(BUCKET_NOT_FOUND)"),
+ runEmptyVisitor(createVisitorCommand("allincompletehandling",
+ id,
+ nullId)));
+}
+
+void
+VisitorOperationTest::testInconsistencyHandling()
+{
+ _distributor->enableClusterState(ClusterState("distributor:1 storage:2"));
+
+ document::BucketId id(16, 0);
+
+ addNodesToBucketDB(id, "0=1/1/1,1=2/2/2");
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("CreateVisitorReply(last=BucketId(0x0000000000000000)) "
+ "ReturnCode(BUCKET_NOT_FOUND)"),
+ runEmptyVisitor(createVisitorCommand("testinconsistencyhandling",
+ id,
+ nullId)));
+ _sender.clear();
+
+ VisitorOperation op(getExternalOperationHandler(),
+ createVisitorCommand("multiplebucketsonesuper",
+ id,
+ nullId,
+ 8,
+ 500,
+ true),
+ VisitorOperation::Config(
+ framework::MilliSecTime(0),
+ 5,
+ 4));
+
+ op.start(_sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Visitor Create => 1"),
+ _sender.getCommands(true));
+
+ sendReply(op);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("CreateVisitorReply(last=BucketId(0x000000007fffffff)) "
+ "ReturnCode(NONE)"),
+ _sender.getLastReply());
+}
+
+void
+VisitorOperationTest::testVisitIdealNode()
+{
+ ClusterState state("distributor:1 storage:3");
+ _distributor->enableClusterState(state);
+
+ // Create buckets in bucketdb
+ for (int i=0; i<32; i++ ) {
+ document::BucketId id(21, i*0x10000 + 0x0001);
+ addIdealNodes(state, id);
+ }
+
+ document::BucketId id(16, 1);
+ VisitorOperation op(getExternalOperationHandler(),
+ createVisitorCommand("multinode",
+ id,
+ nullId,
+ 8),
+ defaultConfig);
+
+ op.start(_sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Visitor Create => 0"),
+ _sender.getCommands(true));
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("CreateVisitorCommand(dumpvisitor, , 8 buckets) Buckets: [ "
+ "BucketId(0x5400000000000001) BucketId(0x5400000000100001) "
+ "BucketId(0x5400000000080001) BucketId(0x5400000000180001) "
+ "BucketId(0x5400000000040001) BucketId(0x5400000000140001) "
+ "BucketId(0x54000000000c0001) BucketId(0x54000000001c0001) ]"),
+ serializeVisitorCommand(0));
+
+ sendReply(op);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("CreateVisitorReply(last=BucketId(0x54000000001c0001)) "
+ "ReturnCode(NONE)"),
+ _sender.getLastReply());
+}
+
+void
+VisitorOperationTest::testNoResendingOnCriticalFailure()
+{
+ ClusterState state("distributor:1 storage:3");
+ _distributor->enableClusterState(state);
+
+ // Create buckets in bucketdb
+ for (int i=0; i<32; i++ ) {
+ document::BucketId id(21, i*0x10000 + 0x0001);
+ addNodesToBucketDB(id, "0=1/1/1/t,1=1/1/1/t");
+ }
+
+ document::BucketId id(16, 1);
+ VisitorOperation op(getExternalOperationHandler(),
+ createVisitorCommand("multinodefailurecritical",
+ id,
+ nullId,
+ 8),
+ defaultConfig);
+
+ op.start(_sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Visitor Create => 0"),
+ _sender.getCommands(true));
+
+ sendReply(op, -1, api::ReturnCode::ILLEGAL_PARAMETERS);
+
+ CPPUNIT_ASSERT_EQUAL(
+ "CreateVisitorReply(last=BucketId(0x0000000000000000)) "
+ "ReturnCode(ILLEGAL_PARAMETERS, [from content node 0] )"s,
+ _sender.getLastReply());
+}
+
+void
+VisitorOperationTest::testFailureOnAllNodes()
+{
+ ClusterState state("distributor:1 storage:3");
+ _distributor->enableClusterState(state);
+
+ // Create buckets in bucketdb
+ for (int i=0; i<32; i++ ) {
+ document::BucketId id(21, i*0x10000 + 0x0001);
+ addNodesToBucketDB(id, "0=1/1/1/t,1=1/1/1/t");
+ }
+
+ document::BucketId id(16, 1);
+ VisitorOperation op(getExternalOperationHandler(),
+ createVisitorCommand("multinodefailurecritical",
+ id,
+ nullId,
+ 8),
+ defaultConfig);
+
+ op.start(_sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Visitor Create => 0"),
+ _sender.getCommands(true));
+
+ sendReply(op, -1, api::ReturnCode::NOT_CONNECTED);
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Visitor Create => 0,Visitor Create => 1"),
+ _sender.getCommands(true));
+
+ sendReply(op, -1, api::ReturnCode::NOT_CONNECTED);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("CreateVisitorReply(last=BucketId(0x0000000000000000)) "
+ "ReturnCode(BUCKET_NOT_FOUND)"),
+ _sender.getLastReply());
+}
+
+
+void
+VisitorOperationTest::testVisitOrder()
+{
+ std::vector<document::BucketId> buckets;
+
+ document::BucketId id000(35, 0x0000004d2);
+ buckets.push_back(id000);
+ document::BucketId id001(35, 0x4000004d2);
+ buckets.push_back(id001);
+ document::BucketId id01(34, 0x2000004d2);
+ buckets.push_back(id01);
+ document::BucketId id1(33, 0x1000004d2);
+ buckets.push_back(id1);
+
+ std::sort(buckets.begin(),
+ buckets.end(),
+ VisitorOrder(document::OrderingSpecification(
+ document::OrderingSpecification::ASCENDING, 0x0, 6, 2)));
+
+ CPPUNIT_ASSERT_EQUAL(buckets[0], id000);
+ CPPUNIT_ASSERT_EQUAL(buckets[1], id001);
+ CPPUNIT_ASSERT_EQUAL(buckets[2], id01);
+ CPPUNIT_ASSERT_EQUAL(buckets[3], id1);
+
+ std::sort(buckets.begin(),
+ buckets.end(),
+ VisitorOrder(document::OrderingSpecification(
+ document::OrderingSpecification::DESCENDING, 0xFF, 6, 2)));
+ CPPUNIT_ASSERT_EQUAL(buckets[0], id1);
+ CPPUNIT_ASSERT_EQUAL(buckets[1], id01);
+ CPPUNIT_ASSERT_EQUAL(buckets[2], id001);
+ CPPUNIT_ASSERT_EQUAL(buckets[3], id000);
+
+ std::sort(buckets.begin(),
+ buckets.end(),
+ VisitorOrder(document::OrderingSpecification(
+ document::OrderingSpecification::ASCENDING, 0x14, 6, 2)));
+ CPPUNIT_ASSERT_EQUAL(buckets[0], id01);
+ CPPUNIT_ASSERT_EQUAL(buckets[1], id1);
+ CPPUNIT_ASSERT_EQUAL(buckets[2], id000);
+ CPPUNIT_ASSERT_EQUAL(buckets[3], id001);
+
+ std::sort(buckets.begin(),
+ buckets.end(),
+ VisitorOrder(document::OrderingSpecification(
+ document::OrderingSpecification::DESCENDING, 0x14, 6, 2)));
+ CPPUNIT_ASSERT_EQUAL(buckets[0], id01);
+ CPPUNIT_ASSERT_EQUAL(buckets[1], id001);
+ CPPUNIT_ASSERT_EQUAL(buckets[2], id000);
+ CPPUNIT_ASSERT_EQUAL(buckets[3], id1);
+}
+
+void
+VisitorOperationTest::testVisitInChunks()
+{
+ _distributor->enableClusterState(ClusterState("distributor:1 storage:1"));
+
+ for (int i = 0; i < 9; ++i) {
+ addNodesToBucketDB(document::BucketId(30, i << 16), "0=1/1/1/t");
+ }
+
+ document::BucketId id(16, 0);
+
+ std::pair<std::string, std::string> val(runVisitor(id, nullId, 3));
+ CPPUNIT_ASSERT_EQUAL(std::string(
+ "CreateVisitorCommand(dumpvisitor, true, 3 buckets) "
+ "Buckets: [ BucketId(0x7800000000000000) "
+ "BucketId(0x7800000000080000) "
+ "BucketId(0x7800000000040000) ]"),
+ val.first);
+
+ CPPUNIT_ASSERT_EQUAL(std::string(
+ "CreateVisitorReply(last=BucketId(0x7800000000040000)) "
+ "ReturnCode(NONE)"),
+ val.second);
+
+ val = runVisitor(id, document::BucketId(0x7800000000040000), 3);
+ CPPUNIT_ASSERT_EQUAL(std::string(
+ "CreateVisitorCommand(dumpvisitor, true, 3 buckets) "
+ "Buckets: [ BucketId(0x7800000000020000) "
+ "BucketId(0x7800000000060000) "
+ "BucketId(0x7800000000010000) ]"),
+ val.first);
+
+ CPPUNIT_ASSERT_EQUAL(std::string(
+ "CreateVisitorReply(last=BucketId(0x7800000000010000)) "
+ "ReturnCode(NONE)"),
+ val.second);
+
+ val = runVisitor(id, document::BucketId(0x7800000000010000), 3);
+ CPPUNIT_ASSERT_EQUAL(std::string(
+ "CreateVisitorCommand(dumpvisitor, true, 3 buckets) "
+ "Buckets: [ BucketId(0x7800000000050000) "
+ "BucketId(0x7800000000030000) "
+ "BucketId(0x7800000000070000) ]"),
+ val.first);
+
+ CPPUNIT_ASSERT_EQUAL(std::string(
+ "CreateVisitorReply(last=BucketId(0x000000007fffffff)) "
+ "ReturnCode(NONE)"),
+ val.second);
+}
+
+void
+VisitorOperationTest::testVisitOrderSplitPastOrderBits()
+{
+ std::vector<document::BucketId> buckets;
+
+ document::BucketId max(INT_MAX);
+ buckets.push_back(max);
+ document::BucketId id1(33, 0x1000004d2);
+ buckets.push_back(id1);
+ document::BucketId id01(34, 0x2000004d2);
+ buckets.push_back(id01);
+ document::BucketId id00001(37, 0x10000004d2);
+ buckets.push_back(id00001);
+ document::BucketId id00000(37, 0x00000004d2);
+ buckets.push_back(id00000);
+ document::BucketId id0000(36, 0x0000004d2);
+ buckets.push_back(id0000);
+ document::BucketId null(0, 0);
+ buckets.push_back(null);
+
+ std::sort(buckets.begin(), buckets.end(), VisitorOrder(document::OrderingSpecification(document::OrderingSpecification::ASCENDING, 0x0, 6, 2)));
+ CPPUNIT_ASSERT_EQUAL(buckets[0], null);
+ CPPUNIT_ASSERT_EQUAL(buckets[1], id0000);
+ CPPUNIT_ASSERT_EQUAL(buckets[2], id00000);
+ CPPUNIT_ASSERT_EQUAL(buckets[3], id00001);
+ CPPUNIT_ASSERT_EQUAL(buckets[4], id01);
+ CPPUNIT_ASSERT_EQUAL(buckets[5], id1);
+ CPPUNIT_ASSERT_EQUAL(buckets[6], max);
+
+ std::sort(buckets.begin(), buckets.end(), VisitorOrder(document::OrderingSpecification(document::OrderingSpecification::DESCENDING, 0xFF, 6, 2)));
+ CPPUNIT_ASSERT_EQUAL(buckets[0], null);
+ CPPUNIT_ASSERT_EQUAL(buckets[1], id1);
+ CPPUNIT_ASSERT_EQUAL(buckets[2], id01);
+ CPPUNIT_ASSERT_EQUAL(buckets[3], id0000);
+ CPPUNIT_ASSERT_EQUAL(buckets[4], id00000);
+ CPPUNIT_ASSERT_EQUAL(buckets[5], id00001);
+ CPPUNIT_ASSERT_EQUAL(buckets[6], max);
+
+ std::sort(buckets.begin(), buckets.end(), VisitorOrder(document::OrderingSpecification(document::OrderingSpecification::ASCENDING, 0x14, 6, 2)));
+ CPPUNIT_ASSERT_EQUAL(buckets[0], null);
+ CPPUNIT_ASSERT_EQUAL(buckets[1], id01);
+ CPPUNIT_ASSERT_EQUAL(buckets[2], id1);
+ CPPUNIT_ASSERT_EQUAL(buckets[3], id0000);
+ CPPUNIT_ASSERT_EQUAL(buckets[4], id00000);
+ CPPUNIT_ASSERT_EQUAL(buckets[5], id00001);
+ CPPUNIT_ASSERT_EQUAL(buckets[6], max);
+
+ std::sort(buckets.begin(), buckets.end(), VisitorOrder(document::OrderingSpecification(document::OrderingSpecification::DESCENDING, 0x14, 6, 2)));
+ CPPUNIT_ASSERT_EQUAL(buckets[0], null);
+ CPPUNIT_ASSERT_EQUAL(buckets[1], id01);
+ CPPUNIT_ASSERT_EQUAL(buckets[2], id0000);
+ CPPUNIT_ASSERT_EQUAL(buckets[3], id00000);
+ CPPUNIT_ASSERT_EQUAL(buckets[4], id00001);
+ CPPUNIT_ASSERT_EQUAL(buckets[5], id1);
+ CPPUNIT_ASSERT_EQUAL(buckets[6], max);
+}
+
+void
+VisitorOperationTest::testVisitOrderInconsistentlySplit()
+{
+ std::vector<document::BucketId> buckets;
+
+ document::BucketId max(INT_MAX);
+ buckets.push_back(max);
+ document::BucketId id000(35, 0x0000004d2);
+ buckets.push_back(id000);
+ document::BucketId id001(35, 0x4000004d2);
+ buckets.push_back(id001);
+ document::BucketId id01(34, 0x2000004d2);
+ buckets.push_back(id01);
+ document::BucketId id1(33, 0x1000004d2);
+ buckets.push_back(id1);
+ document::BucketId idsuper(16, 0x04d2);
+ buckets.push_back(idsuper);
+ document::BucketId null(0, 0);
+ buckets.push_back(null);
+
+ std::sort(buckets.begin(), buckets.end(), VisitorOrder(document::OrderingSpecification(document::OrderingSpecification::ASCENDING, 0x0, 6, 2)));
+ CPPUNIT_ASSERT_EQUAL(buckets[0], null);
+ CPPUNIT_ASSERT_EQUAL(buckets[1], idsuper);
+ CPPUNIT_ASSERT_EQUAL(buckets[2], id000);
+ CPPUNIT_ASSERT_EQUAL(buckets[3], id001);
+ CPPUNIT_ASSERT_EQUAL(buckets[4], id01);
+ CPPUNIT_ASSERT_EQUAL(buckets[5], id1);
+ CPPUNIT_ASSERT_EQUAL(buckets[6], max);
+
+ std::sort(buckets.begin(), buckets.end(), VisitorOrder(document::OrderingSpecification(document::OrderingSpecification::DESCENDING, 0xFF, 6, 2)));
+ CPPUNIT_ASSERT_EQUAL(buckets[0], null);
+ CPPUNIT_ASSERT_EQUAL(buckets[1], idsuper);
+ CPPUNIT_ASSERT_EQUAL(buckets[2], id1);
+ CPPUNIT_ASSERT_EQUAL(buckets[3], id01);
+ CPPUNIT_ASSERT_EQUAL(buckets[4], id001);
+ CPPUNIT_ASSERT_EQUAL(buckets[5], id000);
+ CPPUNIT_ASSERT_EQUAL(buckets[6], max);
+
+ std::sort(buckets.begin(), buckets.end(), VisitorOrder(document::OrderingSpecification(document::OrderingSpecification::ASCENDING, 0x14, 6, 2)));
+ CPPUNIT_ASSERT_EQUAL(buckets[0], null);
+ CPPUNIT_ASSERT_EQUAL(buckets[1], idsuper);
+ CPPUNIT_ASSERT_EQUAL(buckets[2], id01);
+ CPPUNIT_ASSERT_EQUAL(buckets[3], id1);
+ CPPUNIT_ASSERT_EQUAL(buckets[4], id000);
+ CPPUNIT_ASSERT_EQUAL(buckets[5], id001);
+ CPPUNIT_ASSERT_EQUAL(buckets[6], max);
+
+ std::sort(buckets.begin(), buckets.end(), VisitorOrder(document::OrderingSpecification(document::OrderingSpecification::DESCENDING, 0x14, 6, 2)));
+ CPPUNIT_ASSERT_EQUAL(buckets[0], null);
+ CPPUNIT_ASSERT_EQUAL(buckets[1], idsuper);
+ CPPUNIT_ASSERT_EQUAL(buckets[2], id01);
+ CPPUNIT_ASSERT_EQUAL(buckets[3], id001);
+ CPPUNIT_ASSERT_EQUAL(buckets[4], id000);
+ CPPUNIT_ASSERT_EQUAL(buckets[5], id1);
+ CPPUNIT_ASSERT_EQUAL(buckets[6], max);
+}
+
+std::string
+VisitorOperationTest::doOrderedVisitor(document::BucketId startBucket)
+{
+ std::vector<document::BucketId> buckets;
+
+ while (true) {
+ _sender.clear();
+
+ VisitorOperation op(getExternalOperationHandler(),
+ createVisitorCommand(
+ "uservisitororder",
+ startBucket,
+ buckets.size() ? buckets[buckets.size() - 1] :
+ nullId,
+ 1,
+ 500,
+ false,
+ false,
+ "dumpvisitor",
+ document::OrderingSpecification::DESCENDING,
+ "id.order(6,2)<= 20"),
+ defaultConfig);
+
+ op.start(_sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL(std::string("Visitor Create => 0"),
+ _sender.getCommands(true));
+
+ for (uint32_t i = 0; i < _sender.commands.size(); ++i) {
+ const api::CreateVisitorCommand cmd(
+ static_cast<const api::CreateVisitorCommand&>(
+ *_sender.commands[i]));
+
+ for (uint32_t j = 0; j < cmd.getBuckets().size(); ++j) {
+ buckets.push_back(cmd.getBuckets()[j]);
+ }
+ }
+
+ sendReply(op);
+
+ CPPUNIT_ASSERT_EQUAL(1, (int)_sender.replies.size());
+
+ const api::CreateVisitorReply& reply(
+ static_cast<const api::CreateVisitorReply&>(*_sender.replies[0]));
+
+ if (reply.getLastBucket() == document::BucketId(0x000000007fffffff)) {
+ break;
+ }
+ }
+
+ std::ostringstream ost;
+ for (uint32_t i = 0; i < buckets.size(); ++i) {
+ ost << buckets[i] << "\n";
+ }
+
+ return ost.str();
+}
+
+void
+VisitorOperationTest::testUserVisitorOrder()
+{
+ _distributor->enableClusterState(ClusterState("distributor:1 storage:1"));
+
+ // Create buckets in bucketdb
+ std::vector<document::BucketId> buckets;
+ document::BucketId id000(35, 0x0000004d2);
+ buckets.push_back(id000);
+ document::BucketId id001(35, 0x4000004d2);
+ buckets.push_back(id001);
+ document::BucketId id01(34, 0x2000004d2);
+ buckets.push_back(id01);
+ document::BucketId id1(33, 0x1000004d2);
+ buckets.push_back(id1);
+
+ for (uint32_t i=0; i<buckets.size(); i++) {
+ addNodesToBucketDB(buckets[i], "0=1/1/1/t");
+ }
+
+ document::BucketId id(16, 0x04d2);
+
+ CPPUNIT_ASSERT_EQUAL(std::string("BucketId(0x88000002000004d2)\n"
+ "BucketId(0x8c000004000004d2)\n"
+ "BucketId(0x8c000000000004d2)\n"
+ "BucketId(0x84000001000004d2)\n"),
+ doOrderedVisitor(id));
+}
+
+void
+VisitorOperationTest::testUserVisitorOrderSplitPastOrderBits()
+{
+ _distributor->enableClusterState(ClusterState("distributor:1 storage:1"));
+
+ // Create buckets in bucketdb
+ std::vector<document::BucketId> buckets;
+ document::BucketId id1(33, 0x1000004d2);
+ buckets.push_back(id1);
+ document::BucketId id01(34, 0x2000004d2);
+ buckets.push_back(id01);
+ document::BucketId id00001(37, 0x10000004d2);
+ buckets.push_back(id00001);
+ document::BucketId id00000(37, 0x00000004d2);
+ buckets.push_back(id00000);
+ document::BucketId id0000(36, 0x0000004d2);
+ buckets.push_back(id0000);
+ for (uint32_t i=0; i<buckets.size(); i++) {
+ addNodesToBucketDB(buckets[i], "0=1/1/1/t");
+ }
+
+ document::BucketId id(16, 0x04d2);
+
+ CPPUNIT_ASSERT_EQUAL(std::string("BucketId(0x88000002000004d2)\n"
+ "BucketId(0x90000000000004d2)\n"
+ "BucketId(0x94000000000004d2)\n"
+ "BucketId(0x94000010000004d2)\n"
+ "BucketId(0x84000001000004d2)\n"),
+ doOrderedVisitor(id));
+}
+
+std::unique_ptr<VisitorOperation>
+VisitorOperationTest::startOperationWith2StorageNodeVisitors(bool inconsistent)
+{
+ ClusterState state("distributor:1 storage:3");
+ _distributor->enableClusterState(state);
+
+ addNodesToBucketDB(document::BucketId(17, 1), "0=1/1/1/t");
+ addNodesToBucketDB(document::BucketId(17, 1 << 16 | 1),
+ "1=1/1/1/t");
+
+ document::BucketId id(16, 1);
+ auto op = std::make_unique<VisitorOperation>(
+ getExternalOperationHandler(),
+ createVisitorCommand(
+ "multinodefailurecritical",
+ id,
+ nullId,
+ 8,
+ 500,
+ inconsistent),
+ defaultConfig);
+
+ op->start(_sender, framework::MilliSecTime(0));
+
+ CPPUNIT_ASSERT_EQUAL("Visitor Create => 0,Visitor Create => 1"s,
+ _sender.getCommands(true));
+ return op;
+}
+
+void
+VisitorOperationTest::testNoClientReplyBeforeAllStorageRepliesReceived()
+{
+ auto op = startOperationWith2StorageNodeVisitors(false);
+
+ sendReply(*op, 0, api::ReturnCode::BUSY);
+ // We don't want to see a reply here until the other node has replied.
+ CPPUNIT_ASSERT_EQUAL(""s, _sender.getReplies(true));
+ // OK reply from 1, but have to retry from client anyhow since one of
+ // the sub buckets failed to be processed and we don't have inconsistent
+ // visiting set in the client visitor command.
+ sendReply(*op, 1);
+ CPPUNIT_ASSERT_EQUAL(
+ "CreateVisitorReply(last=BucketId(0x0000000000000000)) "
+ "ReturnCode(BUCKET_NOT_FOUND)"s,
+ _sender.getLastReply());
+ // XXX we should consider wether we want BUSY to be returned instead.
+ // Non-critical error codes are currently converted to a generic "not found"
+ // code to let the client silently retry until the bucket has hopefully
+ // become consistent/available.
+}
+
+void
+VisitorOperationTest::testSkipFailedSubBucketsWhenVisitingInconsistent()
+{
+ auto op = startOperationWith2StorageNodeVisitors(true);
+
+ sendReply(*op, 0, api::ReturnCode::BUSY);
+ CPPUNIT_ASSERT_EQUAL(""s, _sender.getReplies(true));
+ // Subset of buckets could not be visited, but visit inconsistent flag is
+ // set in the client visitor so we treat it as a success anyway. In this
+ // case we've expanded the entire superbucket sub-tree so return with magic
+ // number to signify this.
+ sendReply(*op, 1);
+ CPPUNIT_ASSERT_EQUAL(
+ "CreateVisitorReply(last=BucketId(0x000000007fffffff)) "
+ "ReturnCode(NONE)"s,
+ _sender.getLastReply());
+}
+
+// By default, queue timeout should be half of remaining visitor time. This
+// is a highly un-scientific heuristic, but seems rather more reasonable than
+// having it hard-coded to 2000 ms as was the case earlier.
+void
+VisitorOperationTest::testQueueTimeoutIsFactorOfTotalTimeout()
+{
+ document::BucketId id(uint64_t(0x400000000000007b));
+ _distributor->enableClusterState(ClusterState("distributor:1 storage:2"));
+ addNodesToBucketDB(id, "0=1/1/1/t,1=1/1/1/t");
+
+ VisitorOperation op(
+ getExternalOperationHandler(),
+ createVisitorCommand("foo", id, nullId, 8, 10000),
+ defaultConfig);
+
+ op.start(_sender, framework::MilliSecTime(0));
+ CPPUNIT_ASSERT_EQUAL(std::string("Visitor Create => 0"),
+ _sender.getCommands(true));
+
+ auto& cmd(dynamic_cast<CreateVisitorCommand&>(*_sender.commands[0]));
+ CPPUNIT_ASSERT_EQUAL(uint32_t(5000), cmd.getQueueTimeout());
+}
+
+} // distributor
+} // storage
diff --git a/storage/src/tests/fastos.project.newcore b/storage/src/tests/fastos.project.newcore
new file mode 100644
index 00000000000..7b5cad846b1
--- /dev/null
+++ b/storage/src/tests/fastos.project.newcore
@@ -0,0 +1,80 @@
+APPLICATION testrunner
+OBJS storageserver/dummystoragelink
+OBJS testrunner testhelper
+LIBS tests/persistence/memfile/testmemfiletop
+LIBS tests/serverapp/testserverapp
+LIBS tests/storageserver/teststorageserver
+LIBS tests/bucketmover/testbucketmover
+LIBS tests/storageutil/teststorageutil
+LIBS tests/visiting/testvisiting
+LIBS tests/bucketdb/testbucketdb
+LIBS tests/common/testcommon
+LIBS tests/common/hostreporter/testhostreporter
+LIBS tests/distributor/testdistributor
+LIBS tests/persistence/testpersistence
+LIBS tests/persistence/device/testdevice
+LIBS tests/persistence/filestorage/testfilestorage
+LIBS tests/persistence/filestorage/slotfile/testslotfile
+LIBS tests/splitting/testsplitting
+LIBS tests/memorymanager/testmemorymanager
+
+LIBS storage/storageserver/storageserver
+LIBS storage/bucketmover/bucketmover
+LIBS storage/visiting/visitor
+LIBS storage/memorymanager/memorymanager
+LIBS storage/persistence/persistence
+LIBS storage/persistence/filestorage/filestorpersistence
+LIBS storage/persistence/memfile/memfiletop
+LIBS storage/persistence/memfile/common/memfilecommon
+LIBS storage/persistence/memfile/mapper/memfilemapper
+LIBS storage/persistence/memfile/handler/memfilehandler
+LIBS storage/persistence/memfile/memfile/memfile
+LIBS storage/persistence/memfile/common/memfilecommon
+LIBS storage/persistence/memfile/memfiletop
+LIBS storage/storageutil/storageutil
+LIBS storage/persistence/device/device
+LIBS storage/persistence/filestorage/slotfile/slotfile
+LIBS storage/bucketdb/bucketdb
+LIBS storage/distributor/distributor
+LIBS storage/common/common
+LIBS storage/config/storageconfig
+EXTERNALLIBS cppunit vdslib storageapi
+EXTERNALLIBS document metrics boost_regex-mt-d
+EXTERNALLIBS fast iconv
+EXTERNALLIBS vespa
+EXTERNALLIBS config vespalog Judy vdslib documentapi vespalib
+EXTERNALLIBS messagebus-test slobrokserver
+
+CUSTOMMAKE
+
+LIBDIR_TESTS=persistence/memfile:bucketdb:common:distributor:persistence:persistence/device:persistence/filestorage:persistence/filestorage/slotfile:serverapp:storageserver:storageutil:visiting:splitting:memorymanager:bucketmover
+
+test: all
+ rm -f test.vlog
+ VESPA_LOG_TARGET=file:test.vlog LD_LIBRARY_PATH=$(LIBDIR_BOOST_REGEX-MT-D):$(LIBDIR_DSTORE):$(LIBDIR_ICONV):$(LIBDIR_CPPUNIT):$(LIBDIR_TESTS):$(LD_LIBRARY_PATH) $(VALGRIND) ./testrunner --verbose $(TESTRUNARGS)
+
+vtest: all
+ rm -f test.vlog
+ VESPA_LOG_TARGET=file:test.vlog LD_LIBRARY_PATH=$(LIBDIR_BOOST_REGEX-MT-D):$(LIBDIR_DSTORE):$(LIBDIR_ICONV):$(LIBDIR_CPPUNIT):$(LIBDIR_TESTS):$(LD_LIBRARY_PATH) valgrind --leak-check=no ./testrunner --verbose
+
+testdebug: all
+ rm -f test.vlog
+ VESPA_LOG_TARGET=file:test.vlog LD_LIBRARY_PATH=$(LIBDIR_BOOST_REGEX-MT-D):$(LIBDIR_DSTORE):$(LIBDIR_ICONV):$(LIBDIR_CPPUNIT):$(LIBDIR_TESTS):$(LD_LIBRARY_PATH) gdb53 ./testrunner --verbose
+
+testwithlog: all
+ LD_LIBRARY_PATH=$(LIBDIR_BOOST_REGEX-MT-D):$(LIBDIR_DSTORE):$(LIBDIR_ICONV):$(LIBDIR_CPPUNIT):$(LIBDIR_TESTS):$(LD_LIBRARY_PATH) ./testrunner --verbose
+
+vtestwithlog: all
+ LD_LIBRARY_PATH=$(LIBDIR_BOOST_REGEX-MT-D):$(LIBDIR_DSTORE):$(LIBDIR_ICONV):$(LIBDIR_CPPUNIT):$(LIBDIR_TESTS):$(LD_LIBRARY_PATH) valgrind ./testrunner --verbose
+
+stresstest: all
+ rm -f test.vlog
+ VESPA_LOG_TARGET=file:test.vlog LD_LIBRARY_PATH=$(LIBDIR_BOOST_REGEX-MT-D):$(LIBDIR_DSTORE):$(LIBDIR_ICONV):$(LIBDIR_CPPUNIT):$(LIBDIR_TESTS):$(LD_LIBRARY_PATH) ./testrunner --verbose --includestress stress Stress
+
+testverbose: all
+ rm -f test.vlog
+ VESPA_LOG_TARGET=file:test.vlog LD_LIBRARY_PATH=$(LIBDIR_BOOST_REGEX-MT-D):$(LIBDIR_DSTORE):$(LIBDIR_ICONV):$(LIBDIR_CPPUNIT):$(LIBDIR_TESTS):$(LD_LIBRARY_PATH) ./testrunner --verbose
+
+testall: all
+ rm -f test.vlog
+ VESPA_LOG_TARGET=file:test.vlog LD_LIBRARY_PATH=$(LIBDIR_BOOST_REGEX-MT-D):$(LIBDIR_DSTORE):$(LIBDIR_ICONV):$(LIBDIR_CPPUNIT):$(LIBDIR_TESTS):$(LD_LIBRARY_PATH) $(VALGRIND) ./testrunner --verbose --includestress
diff --git a/storage/src/tests/frameworkimpl/memory/CMakeLists.txt b/storage/src/tests/frameworkimpl/memory/CMakeLists.txt
new file mode 100644
index 00000000000..da78716459f
--- /dev/null
+++ b/storage/src/tests/frameworkimpl/memory/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_library(storage_testmemory
+ SOURCES
+ memorystatusviewertest.cpp
+ DEPENDS
+ AFTER
+ storage_storageconfig
+)
diff --git a/storage/src/tests/frameworkimpl/memory/memorystatusviewertest.cpp b/storage/src/tests/frameworkimpl/memory/memorystatusviewertest.cpp
new file mode 100644
index 00000000000..cc7e98d8718
--- /dev/null
+++ b/storage/src/tests/frameworkimpl/memory/memorystatusviewertest.cpp
@@ -0,0 +1,168 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/metrics/metrics.h>
+#include <vespa/storage/frameworkimpl/memory/memorystatusviewer.h>
+#include <vespa/storageframework/defaultimplementation/memory/prioritymemorylogic.h>
+#include <tests/common/teststorageapp.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+
+namespace storage {
+
+struct MemoryStatusViewerTest : public CppUnit::TestFixture
+{
+ static const int maxMemory = 1000;
+ std::unique_ptr<TestServiceLayerApp> _node;
+ std::unique_ptr<framework::defaultimplementation::MemoryManager> _memMan;
+
+ void setUp();
+
+ void testEmptyState();
+ void testSnapshots();
+
+ CPPUNIT_TEST_SUITE(MemoryStatusViewerTest);
+ CPPUNIT_TEST(testEmptyState);
+ CPPUNIT_TEST(testSnapshots);
+ CPPUNIT_TEST_SUITE_END();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(MemoryStatusViewerTest);
+
+void
+MemoryStatusViewerTest::setUp()
+{
+ _node.reset(new TestServiceLayerApp(DiskCount(2)));
+ framework::defaultimplementation::PriorityMemoryLogic* logic(
+ new framework::defaultimplementation::PriorityMemoryLogic(
+ _node->getClock(), maxMemory));
+ logic->setMinJumpToUpdateMax(1);
+ _memMan.reset(new framework::defaultimplementation::MemoryManager(
+ framework::defaultimplementation::AllocationLogic::UP(logic)));
+}
+
+void
+MemoryStatusViewerTest::testEmptyState()
+{
+ // Add a memory manager, and add a bit of load to it, so it's not
+ // totally empty.
+ StorageComponent component(_node->getComponentRegister(), "test");
+
+ metrics::MetricManager mm;
+ MemoryStatusViewer viewer(
+ *_memMan, mm, _node->getComponentRegister());
+ std::ostringstream actual;
+ viewer.reportStatus(actual, framework::HttpUrlPath("/"));
+ CPPUNIT_ASSERT_MATCH_REGEX(".*Plotr.LineChart.*", actual.str());
+ CPPUNIT_ASSERT_MATCH_REGEX(
+ ".*Current: 1970-01-01 00:00:00 Max memory 1000 SnapShot\\(Used 0, w/o cache 0\\).*",
+ actual.str());
+ CPPUNIT_ASSERT_MATCH_REGEX(
+ ".*Last hour: na.*", actual.str());
+}
+
+namespace {
+ void waitForProcessedTime(
+ const MemoryStatusViewer& viewer, framework::SecondTime time,
+ framework::SecondTime timeout = framework::SecondTime(30))
+ {
+ framework::defaultimplementation::RealClock clock;
+ framework::MilliSecTime endTime(
+ clock.getTimeInMillis() + timeout.getMillis());
+ framework::SecondTime processedTime(0);
+ while (clock.getTimeInMillis() < endTime) {
+ processedTime = viewer.getProcessedTime();
+ if (processedTime >= time) return;
+ FastOS_Thread::Sleep(1);
+ }
+ std::ostringstream ost;
+ ost << "Timed out waiting " << timeout << " ms for time " << time
+ << " to be processed. Currently time is only processed up to "
+ << processedTime;
+ throw new vespalib::IllegalStateException(ost.str(), VESPA_STRLOC);
+ }
+}
+
+#define ASSERT_MEMORY(output, period, maxmem, used, usedwocache) \
+{ \
+ std::string::size_type _pos1_(output.find(period)); \
+ std::string::size_type _pos2_(output.find("Max memory", _pos1_)); \
+ std::string::size_type _pos3_(output.find("SnapShot", _pos2_)); \
+ std::string _maxMemory_(output.substr(_pos2_ + 11, _pos3_ - _pos2_ - 12)); \
+ std::string::size_type _pos4_(output.find(",", _pos3_)); \
+ std::string _used_(output.substr(_pos3_ + 14, _pos4_ - _pos3_ - 14)); \
+ std::string::size_type _pos5_(output.find(")", _pos4_)); \
+ std::string _usedwo_(output.substr(_pos4_ + 12, _pos5_ - _pos4_ - 12)); \
+ std::ostringstream _failure_; \
+ _failure_ << "Wrong match in period " << period << " in output:\n" \
+ << output << "\nFor value: "; \
+ \
+ CPPUNIT_ASSERT_EQUAL_MSG(_failure_.str() + "Max memory", \
+ uint64_t(maxmem), boost::lexical_cast<uint64_t>(_maxMemory_)); \
+ CPPUNIT_ASSERT_EQUAL_MSG(_failure_.str() + "Used memory", \
+ uint64_t(used), boost::lexical_cast<uint64_t>(_used_)); \
+ CPPUNIT_ASSERT_EQUAL_MSG(_failure_.str() + "Used memory w/o cache", \
+ uint64_t(usedwocache), boost::lexical_cast<uint64_t>(_usedwo_)); \
+}
+
+void
+MemoryStatusViewerTest::testSnapshots()
+{
+ // Add a memory manager, and add a bit of load to it, so it's not
+ // totally empty.
+ StorageComponent component(_node->getComponentRegister(), "test");
+ const framework::MemoryAllocationType putAlloc(
+ component.getMemoryManager().registerAllocationType(
+ framework::MemoryAllocationType("PUT")));
+ const framework::MemoryAllocationType getAlloc(
+ component.getMemoryManager().registerAllocationType(
+ framework::MemoryAllocationType("GET")));
+
+ framework::MemoryToken::UP put = _memMan->allocate(putAlloc, 0, 100, 80);
+ framework::MemoryToken::UP get = _memMan->allocate(getAlloc, 30, 200, 50);
+ framework::MemoryToken::UP get2 = _memMan->allocate(getAlloc, 70, 150, 60);
+
+ metrics::MetricManager mm;
+ MemoryStatusViewer viewer(*_memMan, mm, _node->getComponentRegister());
+
+ _node->getClock().addSecondsToTime(1000);
+ viewer.notifyThread();
+ waitForProcessedTime(viewer, framework::SecondTime(1000));
+
+ std::ostringstream actual;
+ viewer.printDebugOutput(actual);
+ //std::cerr << actual.str() << "\n";
+ ASSERT_MEMORY(actual.str(), "Current", 1000, 450, 450);
+ ASSERT_MEMORY(actual.str(), "Last hour", 1000, 450, 450);
+ ASSERT_MEMORY(actual.str(), "Last ever", 1000, 450, 450);
+
+ put = _memMan->allocate(putAlloc, 0, 50, 80);
+ get = _memMan->allocate(getAlloc, 100, 140, 50);
+ get2 = _memMan->allocate(getAlloc, 20, 100, 70);
+
+ _node->getClock().addSecondsToTime(3600);
+ viewer.notifyThread();
+ waitForProcessedTime(viewer, framework::SecondTime(4600));
+
+ actual.str("");
+ viewer.printDebugOutput(actual);
+ //std::cerr << actual.str() << "\n";
+ ASSERT_MEMORY(actual.str(), "Current", 1000, 290, 290);
+ ASSERT_MEMORY(actual.str(), "Last hour", 1000, 540, 540);
+ ASSERT_MEMORY(actual.str(), "Last ever", 1000, 540, 540);
+
+ get.reset();
+
+ _node->getClock().addSecondsToTime(3600);
+ viewer.notifyThread();
+ waitForProcessedTime(viewer, framework::SecondTime(4600 + 3600));
+
+ actual.str("");
+ viewer.printDebugOutput(actual);
+ //std::cerr << actual.str() << "\n";
+ ASSERT_MEMORY(actual.str(), "Current", 1000, 150, 150);
+ ASSERT_MEMORY(actual.str(), "Last hour", 1000, 290, 290);
+ ASSERT_MEMORY(actual.str(), "Last ever", 1000, 540, 540);
+
+}
+
+} // storage
diff --git a/storage/src/tests/frameworkimpl/status/CMakeLists.txt b/storage/src/tests/frameworkimpl/status/CMakeLists.txt
new file mode 100644
index 00000000000..734be8e9998
--- /dev/null
+++ b/storage/src/tests/frameworkimpl/status/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_library(storage_teststatus
+ SOURCES
+ statustest.cpp
+ DEPENDS
+ AFTER
+ storage_storageconfig
+)
diff --git a/storage/src/tests/frameworkimpl/status/statustest.cpp b/storage/src/tests/frameworkimpl/status/statustest.cpp
new file mode 100644
index 00000000000..0fc10e411cb
--- /dev/null
+++ b/storage/src/tests/frameworkimpl/status/statustest.cpp
@@ -0,0 +1,222 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/document/util/stringutil.h>
+#include <vespa/log/log.h>
+#include <sstream>
+#include <vespa/storageframework/defaultimplementation/component/componentregisterimpl.h>
+#include <vespa/storage/frameworkimpl/status/statuswebserver.h>
+#include <vespa/storageframework/defaultimplementation/thread/threadpoolimpl.h>
+#include <tests/common/teststorageapp.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+
+LOG_SETUP(".test.status");
+
+namespace storage {
+
+struct StatusTest : public CppUnit::TestFixture {
+ std::unique_ptr<TestServiceLayerApp> _node;
+
+ void setUp();
+
+ void testIndexStatusPage();
+ void testHtmlStatus();
+ void testXmlStatus();
+ void test404();
+ void requireThatServerSpecIsConstructedCorrectly();
+
+ CPPUNIT_TEST_SUITE(StatusTest);
+ CPPUNIT_TEST(testIndexStatusPage);
+ CPPUNIT_TEST(testHtmlStatus);
+ CPPUNIT_TEST(testXmlStatus);
+ CPPUNIT_TEST(test404);
+ CPPUNIT_TEST(requireThatServerSpecIsConstructedCorrectly);
+ CPPUNIT_TEST_SUITE_END();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(StatusTest);
+
+namespace {
+ struct HtmlStatusReporter : public framework::HtmlStatusReporter {
+ std::string _headerAddition;
+ std::string _content;
+
+ HtmlStatusReporter(const std::string& id, const std::string& name,
+ const std::string& content,
+ const std::string& headerAddition = "")
+ : framework::HtmlStatusReporter(id, name),
+ _headerAddition(headerAddition),
+ _content(content)
+ {
+ }
+
+ virtual void reportHtmlHeaderAdditions(
+ std::ostream& out, const framework::HttpUrlPath&) const
+ {
+ out << _headerAddition;
+ }
+
+ virtual void reportHtmlStatus(
+ std::ostream& out, const framework::HttpUrlPath&) const
+ {
+ out << _content;
+ }
+ };
+
+ struct XmlStatusReporter : public framework::XmlStatusReporter {
+ XmlStatusReporter(const std::string& id, const std::string& name)
+ : framework::XmlStatusReporter(id, name) {}
+ virtual vespalib::string reportXmlStatus(
+ vespalib::xml::XmlOutputStream& xos,
+ const framework::HttpUrlPath&) const
+ {
+ xos << vespalib::xml::XmlTag("mytag")
+ << vespalib::xml::XmlAttribute("foo", "bar")
+ << vespalib::xml::XmlContent("content")
+ << vespalib::xml::XmlEndTag();
+ return "";
+ }
+ };
+
+ struct StatusComponent : public framework::Component {
+ framework::StatusReporter* _reporter;
+
+ StatusComponent(framework::ComponentRegister& reg, const char* name,
+ framework::StatusReporter* reporter)
+ : framework::Component(reg, name),
+ _reporter(reporter)
+ {
+ registerStatusPage(*_reporter);
+ }
+ ~StatusComponent() { delete _reporter; }
+ };
+
+}
+
+void
+StatusTest::setUp()
+{
+ _node.reset(new TestServiceLayerApp);
+}
+
+void
+StatusTest::testIndexStatusPage()
+{
+ StatusComponent rep1(_node->getComponentRegister(), "foo",
+ new HtmlStatusReporter(
+ "fooid", "Foo impl", "<p>info</p>"));
+ StatusComponent rep2(_node->getComponentRegister(), "bar",
+ new HtmlStatusReporter(
+ "barid", "Bar impl", "<p>info</p>"));
+ StatusWebServer webServer(_node->getComponentRegister(),
+ _node->getComponentRegister(),
+ "raw:httpport -1");
+ std::ostringstream ss;
+ framework::HttpUrlPath path("");
+ webServer.handlePage(path, ss);
+ std::string expected(
+ "HTTP\\/1.1 200 OK\r\n"
+ "Connection: Close\r\n"
+ "Content-type: text\\/html\r\n"
+ "\r\n"
+ "<html>\n"
+ "<head>\n"
+ " <title>Index page</title>\n"
+ "<\\/head>\n"
+ "<body>\n"
+ " <h1>Index page</h1>\n"
+ "<p><b>Binary version of Vespa:<\\/b> [0-9.]+<\\/p>\n"
+ "<a href=\"fooid\">Foo impl<\\/a><br>\n"
+ "<a href=\"barid\">Bar impl<\\/a><br>\n"
+ "<\\/body>\n"
+ "<\\/html>\n"
+ );
+ CPPUNIT_ASSERT_MATCH_REGEX(expected, ss.str());
+}
+
+void
+StatusTest::testHtmlStatus()
+{
+ StatusComponent rep1(_node->getComponentRegister(), "foo",
+ new HtmlStatusReporter(
+ "fooid", "Foo impl", "<p>info</p>", "<!-- script -->"));
+ StatusWebServer webServer(_node->getComponentRegister(),
+ _node->getComponentRegister(),
+ "raw:httpport -1");
+ std::ostringstream ost;
+ framework::HttpUrlPath path("/fooid?unusedParam");
+ webServer.handlePage(path, ost);
+ std::string expected(
+ "HTTP/1.1 200 OK\r\n"
+ "Connection: Close\r\n"
+ "Content-type: text/html\r\n"
+ "\r\n"
+ "<html>\n"
+ "<head>\n"
+ " <title>Foo impl</title>\n"
+ "<!-- script --></head>\n"
+ "<body>\n"
+ " <h1>Foo impl</h1>\n"
+ "<p>info</p></body>\n"
+ "</html>\n"
+ );
+ CPPUNIT_ASSERT_EQUAL(expected, ost.str());
+}
+
+void
+StatusTest::testXmlStatus()
+{
+ StatusComponent rep1(_node->getComponentRegister(), "foo",
+ new XmlStatusReporter(
+ "fooid", "Foo impl"));
+ StatusWebServer webServer(_node->getComponentRegister(),
+ _node->getComponentRegister(),
+ "raw:httpport -1");
+ std::ostringstream ost;
+ framework::HttpUrlPath path("/fooid?unusedParam");
+ webServer.handlePage(path, ost);
+ std::string expected(
+ "HTTP/1.1 200 OK\r\n"
+ "Connection: Close\r\n"
+ "Content-type: application/xml\r\n"
+ "\r\n"
+ "<?xml version=\"1.0\"?>\n"
+ "<status id=\"fooid\" name=\"Foo impl\">\n"
+ "<mytag foo=\"bar\">content</mytag>\n"
+ "</status>"
+ );
+ CPPUNIT_ASSERT_EQUAL(expected, ost.str());
+}
+
+void
+StatusTest::test404()
+{
+ StatusWebServer webServer(_node->getComponentRegister(),
+ _node->getComponentRegister(),
+ "raw:httpport -1");
+ std::ostringstream ost;
+ framework::HttpUrlPath path("/fooid?unusedParam");
+ webServer.handlePage(path, ost);
+ std::string expected(
+ "HTTP/1.1 404 Not found\r\n"
+ "Connection: Close\r\n"
+ "Content-type: text/html\r\n"
+ "\r\n"
+ "<html><head><title>404 Not found</title></head>\r\n"
+ "<body><h1>404 Not found</h1>\r\n"
+ "<p></p></body>\r\n"
+ "</html>\r\n"
+ );
+ CPPUNIT_ASSERT_EQUAL_ESCAPED(expected, ost.str());
+}
+
+void
+StatusTest::requireThatServerSpecIsConstructedCorrectly()
+{
+ CPPUNIT_ASSERT_EQUAL(vespalib::string("requesthost:10"),
+ StatusWebServer::getServerSpec("requesthost:10", "serverhost:20"));
+ CPPUNIT_ASSERT_EQUAL(vespalib::string("serverhost:20"),
+ StatusWebServer::getServerSpec("", "serverhost:20"));
+}
+
+} // storage
diff --git a/storage/src/tests/persistence/.gitignore b/storage/src/tests/persistence/.gitignore
new file mode 100644
index 00000000000..184e5d1c936
--- /dev/null
+++ b/storage/src/tests/persistence/.gitignore
@@ -0,0 +1,12 @@
+*.So
+*.lo
+*.o
+.*.swp
+.config.log
+.depend
+.depend.NEW
+.deps
+.libs
+Makefile
+testrunner
+testrunner.core
diff --git a/storage/src/tests/persistence/CMakeLists.txt b/storage/src/tests/persistence/CMakeLists.txt
new file mode 100644
index 00000000000..c065c3eef5b
--- /dev/null
+++ b/storage/src/tests/persistence/CMakeLists.txt
@@ -0,0 +1,19 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_library(storage_testpersistence
+ SOURCES
+ processalltest.cpp
+ persistencetestutils.cpp
+ splitbitdetectortest.cpp
+ legacyoperationhandlertest.cpp
+ persistenceproviderwrapper.cpp
+ diskmoveoperationhandlertest.cpp
+ providershutdownwrappertest.cpp
+ mergehandlertest.cpp
+ persistencethread_splittest.cpp
+ bucketownershipnotifiertest.cpp
+ persistencequeuetest.cpp
+ testandsettest.cpp
+ DEPENDS
+ AFTER
+ storage_storageconfig
+)
diff --git a/storage/src/tests/persistence/bucketownershipnotifiertest.cpp b/storage/src/tests/persistence/bucketownershipnotifiertest.cpp
new file mode 100644
index 00000000000..ae54e629473
--- /dev/null
+++ b/storage/src/tests/persistence/bucketownershipnotifiertest.cpp
@@ -0,0 +1,162 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <tests/distributor/messagesenderstub.h>
+#include <tests/common/teststorageapp.h>
+#include <vespa/storage/persistence/bucketownershipnotifier.h>
+
+namespace storage {
+
+class BucketOwnershipNotifierTest : public CppUnit::TestFixture
+{
+ std::unique_ptr<TestServiceLayerApp> _app;
+ lib::ClusterState _clusterState;
+public:
+
+ BucketOwnershipNotifierTest()
+ : _app(),
+ _clusterState("distributor:2 storage:1")
+ {}
+
+ void setUp();
+
+ CPPUNIT_TEST_SUITE(BucketOwnershipNotifierTest);
+ CPPUNIT_TEST(testSendNotifyBucketChangeIfOwningDistributorChanged);
+ CPPUNIT_TEST(testDoNotSendNotifyBucketChangeIfBucketOwnedByInitialSender);
+ CPPUNIT_TEST(testIgnoreIdealStateCalculationExceptions);
+ CPPUNIT_TEST(testGuardNotifyAlways);
+ CPPUNIT_TEST_SUITE_END();
+
+ bool ownsBucket(uint16_t distributorIndex,
+ const document::BucketId& bucket) const
+ {
+ uint16_t distributor = _app->getDistribution()->getIdealDistributorNode(
+ _clusterState, bucket);
+ return distributor == distributorIndex;
+ }
+
+ document::BucketId getFirstNonOwnedBucket() {
+ for (int i = 0; i < 1000; ++i) {
+ if (!ownsBucket(0, document::BucketId(16, i))) {
+ return document::BucketId(16, i);
+ }
+ }
+ return document::BucketId(0);
+ }
+
+ document::BucketId getFirstOwnedBucket() {
+ for (int i = 0; i < 1000; ++i) {
+ if (ownsBucket(0, document::BucketId(16, i))) {
+ return document::BucketId(16, i);
+ }
+ }
+ return document::BucketId(0);
+ }
+
+
+ void testSendNotifyBucketChangeIfOwningDistributorChanged();
+ void testDoNotSendNotifyBucketChangeIfBucketOwnedByInitialSender();
+ void testIgnoreIdealStateCalculationExceptions();
+ void testGuardNotifyAlways();
+
+ void doTestNotification(const document::BucketId& bucket,
+ const api::BucketInfo& info,
+ const std::string& wantedSend);
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(BucketOwnershipNotifierTest);
+
+void
+BucketOwnershipNotifierTest::setUp()
+{
+ _app.reset(new TestServiceLayerApp);
+ _app->setDistribution(Redundancy(1), NodeCount(2));
+ _app->setClusterState(_clusterState);
+}
+
+void
+BucketOwnershipNotifierTest::doTestNotification(const document::BucketId& bucket,
+ const api::BucketInfo& info,
+ const std::string& wantedSend)
+{
+ ServiceLayerComponent component(_app->getComponentRegister(), "dummy");
+ MessageSenderStub sender;
+
+ BucketOwnershipNotifier notifier(component, sender);
+
+ notifier.notifyIfOwnershipChanged(bucket, 0, info);
+
+ CPPUNIT_ASSERT_EQUAL(wantedSend, sender.getCommands(true, true));
+}
+
+void
+BucketOwnershipNotifierTest::testSendNotifyBucketChangeIfOwningDistributorChanged()
+{
+ api::BucketInfo info(0x1, 2, 3);
+ document::BucketId bucket(getFirstNonOwnedBucket());
+ CPPUNIT_ASSERT(bucket.getRawId() != 0);
+
+ std::ostringstream wanted;
+ wanted << "NotifyBucketChangeCommand("
+ << bucket
+ << ", " << info
+ << ") => 1";
+
+ doTestNotification(bucket, info, wanted.str());
+}
+
+void
+BucketOwnershipNotifierTest::testDoNotSendNotifyBucketChangeIfBucketOwnedByInitialSender()
+{
+ api::BucketInfo info(0x1, 2, 3);
+ document::BucketId bucket(getFirstOwnedBucket());
+ CPPUNIT_ASSERT(bucket.getRawId() != 0);
+
+ doTestNotification(bucket, info, "");
+}
+
+void
+BucketOwnershipNotifierTest::testIgnoreIdealStateCalculationExceptions()
+{
+ api::BucketInfo info(0x1, 2, 3);
+ document::BucketId bucket(getFirstNonOwnedBucket());
+ CPPUNIT_ASSERT(bucket.getRawId() != 0);
+
+ _app->setClusterState(lib::ClusterState("distributor:0 storage:1"));
+
+ doTestNotification(bucket, info, "");
+}
+
+void
+BucketOwnershipNotifierTest::testGuardNotifyAlways()
+{
+ ServiceLayerComponent component(_app->getComponentRegister(), "dummy");
+ MessageSenderStub sender;
+ BucketOwnershipNotifier notifier(component, sender);
+ std::ostringstream wanted;
+ {
+ NotificationGuard guard(notifier);
+
+ api::BucketInfo info(0x1, 2, 3);
+ document::BucketId bucket1(getFirstOwnedBucket());
+ guard.notifyAlways(bucket1, info);
+
+ document::BucketId bucket2(getFirstNonOwnedBucket());
+ guard.notifyAlways(bucket2, info);
+
+ wanted << "NotifyBucketChangeCommand("
+ << bucket1
+ << ", " << info
+ << ") => 0,"
+ << "NotifyBucketChangeCommand("
+ << bucket2
+ << ", " << info
+ << ") => 1";
+ }
+
+ CPPUNIT_ASSERT_EQUAL(wanted.str(), sender.getCommands(true, true));
+}
+
+} // storage
+
diff --git a/storage/src/tests/persistence/diskmoveoperationhandlertest.cpp b/storage/src/tests/persistence/diskmoveoperationhandlertest.cpp
new file mode 100644
index 00000000000..f47cc334e30
--- /dev/null
+++ b/storage/src/tests/persistence/diskmoveoperationhandlertest.cpp
@@ -0,0 +1,57 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/storage/persistence/diskmoveoperationhandler.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <vespa/storage/persistence/messages.h>
+#include <tests/persistence/persistencetestutils.h>
+
+namespace storage {
+
+class DiskMoveOperationHandlerTest : public PersistenceTestUtils
+{
+ CPPUNIT_TEST_SUITE(DiskMoveOperationHandlerTest);
+ CPPUNIT_TEST(testSimple);
+ CPPUNIT_TEST_SUITE_END();
+
+public:
+ void testSimple();
+ void testTargetExists();
+ void testTargetWithOverlap();
+
+ void insertDocumentInBucket(uint64_t location, uint64_t timestamp, document::BucketId bucket);
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(DiskMoveOperationHandlerTest);
+
+void
+DiskMoveOperationHandlerTest::testSimple()
+{
+ setupDisks(10);
+
+ // Create bucket 16, 4 on disk 3.
+ {
+ StorBucketDatabase::WrappedEntry entry(
+ createBucket(document::BucketId(16, 4)));
+ entry->disk = 3;
+ entry.write();
+ }
+
+ for (uint32_t i = 0; i < 10; i++) {
+ doPutOnDisk(3, 4, spi::Timestamp(1000 + i));
+ }
+
+ DiskMoveOperationHandler diskMoveHandler(
+ getEnv(3),
+ getPersistenceProvider());
+ BucketDiskMoveCommand move(document::BucketId(16, 4), 3, 4);
+
+ spi::Context context(documentapi::LoadType::DEFAULT, 0, 0);
+ diskMoveHandler.handleBucketDiskMove(move, context);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("BucketId(0x4000000000000004): 10,4"),
+ getBucketStatus(document::BucketId(16,4)));
+}
+
+}
diff --git a/storage/src/tests/persistence/filestorage/.gitignore b/storage/src/tests/persistence/filestorage/.gitignore
new file mode 100644
index 00000000000..cfeb99e9e3f
--- /dev/null
+++ b/storage/src/tests/persistence/filestorage/.gitignore
@@ -0,0 +1,13 @@
+*.So
+*.lo
+*.o
+.*.swp
+.config.log
+.depend
+.depend.NEW
+.deps
+.libs
+Makefile
+persistence
+testrunner
+testrunner.core
diff --git a/storage/src/tests/persistence/filestorage/CMakeLists.txt b/storage/src/tests/persistence/filestorage/CMakeLists.txt
new file mode 100644
index 00000000000..b1314ca0537
--- /dev/null
+++ b/storage/src/tests/persistence/filestorage/CMakeLists.txt
@@ -0,0 +1,17 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_library(storage_testfilestorage
+ SOURCES
+ filestormanagertest.cpp
+ operationabortingtest.cpp
+ filestortestfixture.cpp
+ mergeblockingtest.cpp
+ sanitycheckeddeletetest.cpp
+ deactivatebucketstest.cpp
+ modifiedbucketcheckertest.cpp
+ filestormodifiedbucketstest.cpp
+ deletebuckettest.cpp
+ singlebucketjointest.cpp
+ DEPENDS
+ AFTER
+ storage_storageconfig
+)
diff --git a/storage/src/tests/persistence/filestorage/deactivatebucketstest.cpp b/storage/src/tests/persistence/filestorage/deactivatebucketstest.cpp
new file mode 100644
index 00000000000..6de67a3fec0
--- /dev/null
+++ b/storage/src/tests/persistence/filestorage/deactivatebucketstest.cpp
@@ -0,0 +1,66 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <vespa/storageapi/message/bucket.h>
+#include <vespa/storageapi/message/state.h>
+#include <tests/persistence/persistenceproviderwrapper.h>
+#include <vespa/persistence/dummyimpl/dummypersistence.h>
+#include <tests/persistence/filestorage/filestortestfixture.h>
+
+namespace storage {
+
+class DeactivateBucketsTest : public FileStorTestFixture
+{
+ bool isActive(const document::BucketId&) const;
+public:
+ void bucketsInDatabaseDeactivatedWhenNodeDownInClusterState();
+
+ CPPUNIT_TEST_SUITE(DeactivateBucketsTest);
+ CPPUNIT_TEST(bucketsInDatabaseDeactivatedWhenNodeDownInClusterState);
+ CPPUNIT_TEST_SUITE_END();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(DeactivateBucketsTest);
+
+bool
+DeactivateBucketsTest::isActive(const document::BucketId& bucket) const
+{
+ StorBucketDatabase::WrappedEntry entry(
+ _node->getStorageBucketDatabase().get(bucket, "foo"));
+ CPPUNIT_ASSERT(entry.exist());
+ return entry->info.isActive();
+}
+
+void
+DeactivateBucketsTest::bucketsInDatabaseDeactivatedWhenNodeDownInClusterState()
+{
+ TestFileStorComponents c(*this, "bucketsInDatabaseDeactivatedWhenNodeDownInClusterState");
+ // Must set state to up first, or down-edge case won't trigger.
+ std::string upState("storage:2 distributor:2");
+ _node->getStateUpdater().setClusterState(
+ lib::ClusterState::CSP(new lib::ClusterState(upState)));
+
+ document::BucketId bucket(8, 123);
+ spi::Bucket spiBucket(bucket, spi::PartitionId(0));
+
+ createBucket(bucket);
+ api::BucketInfo serviceLayerInfo(1, 2, 3, 4, 5, true, true);
+ {
+ StorBucketDatabase::WrappedEntry entry(
+ _node->getStorageBucketDatabase().get(bucket, "foo",
+ StorBucketDatabase::CREATE_IF_NONEXISTING));
+ entry->disk = 0;
+ entry->info = serviceLayerInfo;
+ entry.write();
+ }
+ CPPUNIT_ASSERT(isActive(bucket));
+ std::string downState("storage:2 .1.s:d distributor:2");
+ _node->getStateUpdater().setClusterState(
+ lib::ClusterState::CSP(new lib::ClusterState(downState)));
+
+ // Buckets should have been deactivated in content layer
+ CPPUNIT_ASSERT(!isActive(bucket));
+}
+
+} // namespace storage
diff --git a/storage/src/tests/persistence/filestorage/deletebuckettest.cpp b/storage/src/tests/persistence/filestorage/deletebuckettest.cpp
new file mode 100644
index 00000000000..08ca9bc68fa
--- /dev/null
+++ b/storage/src/tests/persistence/filestorage/deletebuckettest.cpp
@@ -0,0 +1,63 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <vespa/storageapi/message/bucket.h>
+#include <tests/persistence/persistenceproviderwrapper.h>
+#include <vespa/persistence/dummyimpl/dummypersistence.h>
+#include <tests/persistence/filestorage/filestortestfixture.h>
+
+LOG_SETUP(".deletebuckettest");
+
+namespace storage {
+
+class DeleteBucketTest : public FileStorTestFixture
+{
+public:
+ void testDeleteAbortsOperationsForBucket();
+
+ CPPUNIT_TEST_SUITE(DeleteBucketTest);
+ CPPUNIT_TEST(testDeleteAbortsOperationsForBucket);
+ CPPUNIT_TEST_SUITE_END();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(DeleteBucketTest);
+
+void
+DeleteBucketTest::testDeleteAbortsOperationsForBucket()
+{
+ TestFileStorComponents c(*this, "testDeleteAbortsOperationsForBucket");
+ document::BucketId bucket(16, 1);
+
+ createBucket(bucket);
+ LOG(info, "TEST STAGE: taking resume guard");
+ ResumeGuard rg(c.manager->getFileStorHandler().pause());
+ // First put may or may not be queued, since pausing might race with
+ // an existing getNextMessage iteration (ugh...).
+ c.sendPut(bucket, DocumentIndex(0), PutTimestamp(1000));
+ // Put will be queued since thread now must know it's paused.
+ c.sendPut(bucket, DocumentIndex(1), PutTimestamp(1000));
+
+ auto deleteMsg = std::make_shared<api::DeleteBucketCommand>(bucket);
+ c.top.sendDown(deleteMsg);
+ // We should now have two put replies. The first one will either be OK
+ // or BUCKET_DELETED depending on whether it raced. The second (which is
+ // the one we care about since it's deterministic) must be BUCKET_DELETED.
+ // Problem is, their returned ordering is not deterministic so we're left
+ // with having to check that _at least_ 1 reply had BUCKET_DELETED. Joy!
+ c.top.waitForMessages(2, 60*2);
+ std::vector<api::StorageMessage::SP> msgs(c.top.getRepliesOnce());
+ CPPUNIT_ASSERT_EQUAL(size_t(2), msgs.size());
+ int numDeleted = 0;
+ for (uint32_t i = 0; i < 2; ++i) {
+ api::StorageReply& reply(dynamic_cast<api::StorageReply&>(*msgs[i]));
+ if (reply.getResult().getResult() == api::ReturnCode::BUCKET_DELETED) {
+ ++numDeleted;
+ }
+ }
+ CPPUNIT_ASSERT(numDeleted >= 1);
+ LOG(info, "TEST STAGE: done, releasing resume guard");
+}
+
+} // namespace storage
diff --git a/storage/src/tests/persistence/filestorage/filestormanagertest.cpp b/storage/src/tests/persistence/filestorage/filestormanagertest.cpp
new file mode 100644
index 00000000000..0ffbe9fa440
--- /dev/null
+++ b/storage/src/tests/persistence/filestorage/filestormanagertest.cpp
@@ -0,0 +1,3150 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/document/update/assignvalueupdate.h>
+#include <vespa/document/datatype/datatype.h>
+#include <vespa/document/fieldvalue/document.h>
+#include <vespa/document/datatype/documenttype.h>
+#include <vespa/document/update/documentupdate.h>
+#include <vespa/document/fieldvalue/rawfieldvalue.h>
+#include <vespa/document/fieldvalue/stringfieldvalue.h>
+#include <vespa/document/select/parser.h>
+#include <fstream>
+#include <memory>
+#include <atomic>
+#include <vespa/vdslib/state/random.h>
+#include <vespa/vdslib/container/mutabledocumentlist.h>
+#include <vespa/vdslib/container/operationlist.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <vespa/vespalib/io/fileutil.h>
+#include <vespa/storageapi/message/bucket.h>
+#include <vespa/storageapi/message/bucketsplitting.h>
+#include <vespa/storageapi/message/multioperation.h>
+#include <vespa/storageapi/message/persistence.h>
+#include <vespa/storageapi/message/removelocation.h>
+#include <vespa/storage/bucketdb/bucketmanager.h>
+#include <vespa/storage/bucketdb/storbucketdb.h>
+#include <vespa/storage/common/bucketmessages.h>
+#include <vespa/storageframework/storageframework.h>
+#include <vespa/storage/persistence/persistencethread.h>
+#include <vespa/storage/persistence/messages.h>
+#include <vespa/storage/persistence/filestorage/filestormanager.h>
+#include <vespa/storage/persistence/filestorage/modifiedbucketchecker.h>
+#include <tests/common/testhelper.h>
+#include <tests/common/storagelinktest.h>
+#include <tests/common/teststorageapp.h>
+#include <tests/common/dummystoragelink.h>
+#include <tests/persistence/filestorage/forwardingmessagesender.h>
+#include <vespa/persistence/dummyimpl/dummypersistence.h>
+#include <vespa/log/log.h>
+#include <vespa/storageapi/message/batch.h>
+#include <vespa/storage/storageserver/statemanager.h>
+
+LOG_SETUP(".filestormanagertest");
+
+using std::unique_ptr;
+using document::Document;
+using namespace storage::api;
+
+#define ASSERT_SINGLE_REPLY(replytype, reply, link, time) \
+reply = 0; \
+try{ \
+ link.waitForMessages(1, time); \
+ CPPUNIT_ASSERT_EQUAL((size_t)1, link.getNumReplies()); \
+ reply = dynamic_cast<replytype*>(link.getReply(0).get()); \
+ if (reply == 0) { \
+ CPPUNIT_FAIL("Got reply of unexpected type: " \
+ + link.getReply(0)->getType().toString()); \
+ } \
+} catch (vespalib::Exception& e) { \
+ reply = 0; \
+ CPPUNIT_FAIL("Failed to find single reply in time"); \
+}
+
+namespace storage {
+
+namespace {
+ spi::LoadType defaultLoadType(0, "default");
+}
+
+struct FileStorManagerTest : public CppUnit::TestFixture {
+ enum {LONG_WAITTIME=60};
+ unique_ptr<TestServiceLayerApp> _node;
+ std::unique_ptr<vdstestlib::DirConfig> config;
+ std::unique_ptr<vdstestlib::DirConfig> config2;
+ std::unique_ptr<vdstestlib::DirConfig> smallConfig;
+ const uint32_t _waitTime;
+ const document::DocumentType* _testdoctype1;
+
+ FileStorManagerTest() : _node(), _waitTime(LONG_WAITTIME) {}
+
+ void setUp();
+ void tearDown();
+
+ void testPut();
+ void testHeaderOnlyPut();
+ void testFlush();
+ void testRemapSplit();
+ void testHandlerPriority();
+ void testHandlerPriorityBlocking();
+ void testHandlerPriorityPreempt();
+ void testHandlerMulti();
+ void testHandlerTimeout();
+ void testHandlerPause();
+ void testHandlerPausedMultiThread();
+ void testPriority();
+ void testSplit1();
+ void testSplitSingleGroup();
+ void testSplitEmptyTargetWithRemappedOps();
+ void testNotifyOnSplitSourceOwnershipChanged();
+ void testJoin();
+ void testVisiting();
+ void testRemoveLocation();
+ void testDeleteBucket();
+ void testDeleteBucketRejectOutdatedBucketInfo();
+ void testDeleteBucketWithInvalidBucketInfo();
+ void testNoTimestamps();
+ void testEqualTimestamps();
+ void testMultiOp();
+ void testGetIter();
+ void testSetBucketActiveState();
+ void testNotifyOwnerDistributorOnOutdatedSetBucketState();
+ void testGetBucketDiffImplicitCreateBucket();
+ void testMergeBucketImplicitCreateBucket();
+ void testNewlyCreatedBucketIsReady();
+ void testCreateBucketSetsActiveFlagInDatabaseAndReply();
+ void testFileStorThreadLockingStressTest();
+ void testStateChange();
+ void testRepairNotifiesDistributorOnChange();
+ void testDiskMove();
+
+ CPPUNIT_TEST_SUITE(FileStorManagerTest);
+ CPPUNIT_TEST(testPut);
+ CPPUNIT_TEST(testHeaderOnlyPut);
+ CPPUNIT_TEST(testFlush);
+ CPPUNIT_TEST(testRemapSplit);
+ CPPUNIT_TEST(testHandlerPriority);
+ CPPUNIT_TEST(testHandlerPriorityBlocking);
+ CPPUNIT_TEST(testHandlerPriorityPreempt);
+ CPPUNIT_TEST(testHandlerMulti);
+ CPPUNIT_TEST(testHandlerTimeout);
+ CPPUNIT_TEST(testHandlerPause);
+ CPPUNIT_TEST(testHandlerPausedMultiThread);
+ CPPUNIT_TEST(testPriority);
+ CPPUNIT_TEST(testSplit1);
+ CPPUNIT_TEST(testSplitSingleGroup);
+ CPPUNIT_TEST(testSplitEmptyTargetWithRemappedOps);
+ CPPUNIT_TEST(testNotifyOnSplitSourceOwnershipChanged);
+ CPPUNIT_TEST(testJoin);
+ CPPUNIT_TEST(testVisiting);
+ CPPUNIT_TEST(testRemoveLocation);
+ CPPUNIT_TEST(testDeleteBucket);
+ CPPUNIT_TEST(testDeleteBucketRejectOutdatedBucketInfo);
+ CPPUNIT_TEST(testDeleteBucketWithInvalidBucketInfo);
+ CPPUNIT_TEST(testNoTimestamps);
+ CPPUNIT_TEST(testEqualTimestamps);
+ CPPUNIT_TEST(testMultiOp);
+ CPPUNIT_TEST(testGetIter);
+ CPPUNIT_TEST(testSetBucketActiveState);
+ CPPUNIT_TEST(testNotifyOwnerDistributorOnOutdatedSetBucketState);
+ CPPUNIT_TEST(testGetBucketDiffImplicitCreateBucket);
+ CPPUNIT_TEST(testMergeBucketImplicitCreateBucket);
+ CPPUNIT_TEST(testNewlyCreatedBucketIsReady);
+ CPPUNIT_TEST(testCreateBucketSetsActiveFlagInDatabaseAndReply);
+ CPPUNIT_TEST(testStateChange);
+ CPPUNIT_TEST(testRepairNotifiesDistributorOnChange);
+ CPPUNIT_TEST(testDiskMove);
+ CPPUNIT_TEST_SUITE_END();
+
+ void createBucket(document::BucketId bid, uint16_t disk)
+ {
+ spi::Context context(defaultLoadType, spi::Priority(0),
+ spi::Trace::TraceLevel(0));
+ _node->getPersistenceProvider().createBucket(
+ spi::Bucket(bid, spi::PartitionId(disk)), context);
+
+ StorBucketDatabase::WrappedEntry entry(
+ _node->getStorageBucketDatabase().get(bid, "foo",
+ StorBucketDatabase::CREATE_IF_NONEXISTING));
+ entry->disk = disk;
+ entry->info = api::BucketInfo(0, 0, 0, 0, 0, true, false);
+ entry.write();
+ }
+
+ document::Document::UP createDocument(
+ const std::string& content, const std::string& id)
+ {
+ return _node->getTestDocMan().createDocument(content, id);
+ }
+
+ bool ownsBucket(uint16_t distributorIndex,
+ const document::BucketId& bucket) const
+ {
+ uint16_t distributor(
+ _node->getDistribution()->getIdealDistributorNode(
+ *_node->getStateUpdater().getSystemState(), bucket));
+ return distributor == distributorIndex;
+ }
+
+ document::BucketId getFirstBucketNotOwnedByDistributor(uint16_t distributor) {
+ for (int i = 0; i < 1000; ++i) {
+ if (!ownsBucket(distributor, document::BucketId(16, i))) {
+ return document::BucketId(16, i);
+ }
+ }
+ return document::BucketId(0);
+ }
+
+ spi::dummy::DummyPersistence& getDummyPersistence() {
+ return static_cast<spi::dummy::DummyPersistence&>
+ (_node->getPersistenceProvider());
+ }
+
+ void setClusterState(const std::string& state) {
+ _node->getStateUpdater().setClusterState(
+ lib::ClusterState::CSP(
+ new lib::ClusterState(state)));
+ }
+
+ void setupDisks(uint32_t diskCount) {
+ config.reset(new vdstestlib::DirConfig(getStandardConfig(true)));
+
+ config2.reset(new vdstestlib::DirConfig(*config));
+ config2->getConfig("stor-server").set("root_folder", "vdsroot.2");
+ config2->getConfig("stor-devices").set("root_folder", "vdsroot.2");
+ config2->getConfig("stor-server").set("node_index", "1");
+
+ smallConfig.reset(new vdstestlib::DirConfig(*config));
+ vdstestlib::DirConfig::Config& c(
+ smallConfig->getConfig("stor-filestor", true));
+ c.set("initial_index_read", "128");
+ c.set("use_direct_io", "false");
+ c.set("maximum_gap_to_read_through", "64");
+
+ assert(system("rm -rf vdsroot") == 0);
+ assert(system("rm -rf vdsroot.2") == 0);
+ assert(system("mkdir -p vdsroot/disks/d0") == 0);
+ assert(system("mkdir -p vdsroot.2/disks/d0") == 0);
+ try {
+ _node.reset(new TestServiceLayerApp(DiskCount(diskCount), NodeIndex(0),
+ config->getConfigId()));
+ _node->setupDummyPersistence();
+ } catch (config::InvalidConfigException& e) {
+ fprintf(stderr, "%s\n", e.what());
+ }
+ _testdoctype1 = _node->getTypeRepo()->getDocumentType("testdoctype1");
+ _node->getMemoryManager().registerAllocationType(
+ framework::MemoryAllocationType("VISITOR_BUFFER"));
+ }
+
+ void putDoc(DummyStorageLink& top,
+ FileStorHandler& filestorHandler,
+ const document::BucketId& bucket,
+ uint32_t docNum);
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(FileStorManagerTest);
+
+std::string findFile(const std::string& path, const std::string& file) {
+ FastOS_DirectoryScan dirScan(path.c_str());
+ while (dirScan.ReadNext()) {
+ if (dirScan.GetName()[0] == '.') {
+ // Ignore current and parent dir.. Ignores hidden files too, but
+ // that doesn't matter as we're not trying to find them.
+ continue;
+ }
+ std::string filename(dirScan.GetName());
+ if (dirScan.IsDirectory()) {
+ std::string result = findFile(path + "/" + filename, file);
+ if (result != "") {
+ return result;
+ }
+ }
+ if (filename == file) {
+ return path + "/" + filename;
+ }
+ }
+ return "";
+}
+
+bool fileExistsWithin(const std::string& path, const std::string& file) {
+ return !(findFile(path, file) == "");
+}
+
+std::unique_ptr<DiskThread> createThread(vdstestlib::DirConfig& config,
+ TestServiceLayerApp& node,
+ spi::PersistenceProvider& provider,
+ FileStorHandler& filestorHandler,
+ FileStorThreadMetrics& metrics,
+ uint16_t deviceIndex,
+ uint8_t lowestPriority)
+{
+ (void) config;
+ std::unique_ptr<DiskThread> disk;
+ disk.reset(new PersistenceThread(
+ node.getComponentRegister(), config.getConfigId(), provider,
+ filestorHandler, metrics,
+ deviceIndex, lowestPriority));
+ return disk;
+}
+
+namespace {
+
+struct TestFileStorComponents
+{
+private:
+ TestName _testName;
+public:
+ DummyStorageLink top;
+ FileStorManager* manager;
+
+ TestFileStorComponents(FileStorManagerTest& test, const char* testName)
+ : _testName(testName),
+ manager(new FileStorManager(test.config->getConfigId(),
+ test._node->getPartitions(),
+ test._node->getPersistenceProvider(),
+ test._node->getComponentRegister()))
+ {
+ top.push_back(unique_ptr<StorageLink>(manager));
+ top.open();
+ }
+};
+
+}
+
+void
+FileStorManagerTest::setUp()
+{
+ setupDisks(1);
+}
+
+void
+FileStorManagerTest::tearDown()
+{
+ _node.reset(0);
+}
+
+void
+FileStorManagerTest::testHeaderOnlyPut()
+{
+ TestName testName("testHeaderOnlyPut");
+ // Setting up manager
+ DummyStorageLink top;
+ FileStorManager *manager;
+ top.push_back(unique_ptr<StorageLink>(manager =
+ new FileStorManager(config->getConfigId(), _node->getPartitions(), _node->getPersistenceProvider(), _node->getComponentRegister())));
+ top.open();
+ api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 3);
+ // Creating a document to test with
+ Document::SP doc(createDocument(
+ "some content", "userdoc:crawler:4000:foo").release());
+
+ document::BucketId bid(16, 4000);
+
+ createBucket(bid, 0);
+
+ // Putting it
+ {
+ std::shared_ptr<api::PutCommand> cmd(
+ new api::PutCommand(bid, doc, 105));
+ cmd->setAddress(address);
+ top.sendDown(cmd);
+ top.waitForMessages(1, _waitTime);
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ std::shared_ptr<api::PutReply> reply(
+ std::dynamic_pointer_cast<api::PutReply>(
+ top.getReply(0)));
+ top.reset();
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
+ CPPUNIT_ASSERT_EQUAL(1, (int)reply->getBucketInfo().getDocumentCount());
+ }
+ doc->setValue(doc->getField("headerval"), document::IntFieldValue(42));
+ // Putting it again, this time with header only
+ {
+ std::shared_ptr<api::PutCommand> cmd(
+ new api::PutCommand(bid, doc, 124));
+ cmd->setUpdateTimestamp(105);
+ cmd->setAddress(address);
+ top.sendDown(cmd);
+ top.waitForMessages(1, _waitTime);
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ std::shared_ptr<api::PutReply> reply(
+ std::dynamic_pointer_cast<api::PutReply>(
+ top.getReply(0)));
+ top.reset();
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(ReturnCode::OK, reply->getResult().getResult());
+ }
+ // Getting it
+ {
+ std::shared_ptr<api::GetCommand> cmd(new api::GetCommand(
+ bid, doc->getId(), "[all]"));
+ cmd->setAddress(address);
+ top.sendDown(cmd);
+ top.waitForMessages(1, _waitTime);
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ std::shared_ptr<api::GetReply> reply2(
+ std::dynamic_pointer_cast<api::GetReply>(
+ top.getReply(0)));
+ top.reset();
+ CPPUNIT_ASSERT(reply2.get());
+ CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply2->getResult());
+ CPPUNIT_ASSERT_EQUAL(doc->getId().toString(),
+ reply2->getDocumentId().toString());
+ // Ensure partial update was done, but other things are equal
+ document::FieldValue::UP value(
+ reply2->getDocument()->getValue(doc->getField("headerval")));
+ CPPUNIT_ASSERT(value.get());
+ CPPUNIT_ASSERT_EQUAL(42, dynamic_cast<document::IntFieldValue&>(
+ *value).getAsInt());
+ reply2->getDocument()->remove("headerval");
+ doc->remove("headerval");
+ CPPUNIT_ASSERT_EQUAL(*doc, *reply2->getDocument());
+ }
+}
+
+void
+FileStorManagerTest::testPut()
+{
+ TestName testName("testPut");
+ // Setting up manager
+ DummyStorageLink top;
+ FileStorManager *manager;
+ top.push_back(unique_ptr<StorageLink>(manager =
+ new FileStorManager(config->getConfigId(), _node->getPartitions(), _node->getPersistenceProvider(), _node->getComponentRegister())));
+ top.open();
+ api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 3);
+ // Creating a document to test with
+ Document::SP doc(createDocument(
+ "some content", "userdoc:crawler:4000:foo").release());
+
+ document::BucketId bid(16, 4000);
+
+ createBucket(bid, 0);
+
+ // Putting it
+ {
+ std::shared_ptr<api::PutCommand> cmd(
+ new api::PutCommand(bid, doc, 105));
+ cmd->setAddress(address);
+ top.sendDown(cmd);
+ top.waitForMessages(1, _waitTime);
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ std::shared_ptr<api::PutReply> reply(
+ std::dynamic_pointer_cast<api::PutReply>(
+ top.getReply(0)));
+ top.reset();
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
+ CPPUNIT_ASSERT_EQUAL(1, (int)reply->getBucketInfo().getDocumentCount());
+ }
+}
+
+void
+FileStorManagerTest::testDiskMove()
+{
+ setupDisks(2);
+
+ // Setting up manager
+ DummyStorageLink top;
+ FileStorManager *manager;
+ top.push_back(unique_ptr<StorageLink>(manager =
+ new FileStorManager(config->getConfigId(), _node->getPartitions(), _node->getPersistenceProvider(), _node->getComponentRegister())));
+ top.open();
+ api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 3);
+ // Creating a document to test with
+ Document::SP doc(createDocument(
+ "some content", "userdoc:crawler:4000:foo").release());
+
+ document::BucketId bid(16, 4000);
+
+ createBucket(bid, 0);
+
+ // Putting it
+ {
+ std::shared_ptr<api::PutCommand> cmd(
+ new api::PutCommand(bid, doc, 105));
+ cmd->setAddress(address);
+ top.sendDown(cmd);
+ top.waitForMessages(1, _waitTime);
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ std::shared_ptr<api::PutReply> reply(
+ std::dynamic_pointer_cast<api::PutReply>(
+ top.getReply(0)));
+ top.reset();
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
+ CPPUNIT_ASSERT_EQUAL(1, (int)reply->getBucketInfo().getDocumentCount());
+ }
+
+ {
+ StorBucketDatabase::WrappedEntry entry(
+ _node->getStorageBucketDatabase().get(bid, "foo"));
+
+ CPPUNIT_ASSERT_EQUAL(0, (int)entry->disk);
+ CPPUNIT_ASSERT_EQUAL(
+ vespalib::string(
+ "BucketInfo(crc 0x28cc441f, docCount 1, totDocSize 122, "
+ "ready true, active false)"),
+ entry->getBucketInfo().toString());
+ }
+
+ {
+ std::shared_ptr<BucketDiskMoveCommand> cmd(
+ new BucketDiskMoveCommand(bid, 0, 1));
+
+ top.sendDown(cmd);
+ top.waitForMessages(1, _waitTime);
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ std::shared_ptr<BucketDiskMoveReply> reply(
+ std::dynamic_pointer_cast<BucketDiskMoveReply>(top.getReply(0)));
+ top.reset();
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
+ CPPUNIT_ASSERT_EQUAL(1, (int)reply->getBucketInfo().getDocumentCount());
+ }
+
+ {
+ StorBucketDatabase::WrappedEntry entry(
+ _node->getStorageBucketDatabase().get(bid, "foo"));
+
+ CPPUNIT_ASSERT_EQUAL(1, (int)entry->disk);
+ CPPUNIT_ASSERT_EQUAL(
+ vespalib::string(
+ "BucketInfo(crc 0x28cc441f, docCount 1, totDocSize 122, "
+ "ready true, active false)"),
+ entry->getBucketInfo().toString());
+ }
+}
+
+
+void
+FileStorManagerTest::testStateChange()
+{
+ TestName testName("testStateChange");
+ // Setting up manager
+ DummyStorageLink top;
+ FileStorManager *manager;
+ top.push_back(unique_ptr<StorageLink>(manager =
+ new FileStorManager(config->getConfigId(), _node->getPartitions(),
+ _node->getPersistenceProvider(),
+ _node->getComponentRegister())));
+ top.open();
+
+ setClusterState("storage:3 distributor:3");
+
+ CPPUNIT_ASSERT_EQUAL(true, getDummyPersistence().getClusterState().nodeUp());
+
+ setClusterState("storage:3 .0.s:d distributor:3");
+
+ CPPUNIT_ASSERT_EQUAL(false, getDummyPersistence().getClusterState().nodeUp());
+}
+
+void
+FileStorManagerTest::testRepairNotifiesDistributorOnChange()
+{
+ // Setting up manager
+ DummyStorageLink top;
+ FileStorManager *manager;
+ top.push_back(unique_ptr<StorageLink>(manager =
+ new FileStorManager(config->getConfigId(), _node->getPartitions(), _node->getPersistenceProvider(), _node->getComponentRegister())));
+ setClusterState("storage:1 distributor:1");
+ top.open();
+
+ createBucket(document::BucketId(16, 1), 0);
+
+ api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 3);
+
+ // Creating a document to test with
+
+ for (uint32_t i = 0; i < 3; ++i) {
+ document::DocumentId docId(vespalib::make_string("userdoc:ns:1:%d", i));
+ Document::SP doc(new Document(*_testdoctype1, docId));
+ std::shared_ptr<api::PutCommand> cmd(
+ new api::PutCommand(document::BucketId(16, 1), doc, i + 1));
+ cmd->setAddress(address);
+ top.sendDown(cmd);
+ }
+
+ top.waitForMessages(3, _waitTime);
+ top.reset();
+
+ getDummyPersistence().simulateMaintenanceFailure();
+
+ std::shared_ptr<RepairBucketCommand> cmd(
+ new RepairBucketCommand(document::BucketId(16, 1), 0));
+ top.sendDown(cmd);
+
+ top.waitForMessages(2, _waitTime);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("NotifyBucketChangeCommand(BucketId(0x4000000000000001), "
+ "BucketInfo(crc 0x2625a314, docCount 2, totDocSize 170, "
+ "ready true, active false))"), top.getReply(0)->toString());
+
+ top.close();
+}
+
+
+void
+FileStorManagerTest::testFlush()
+{
+ TestName testName("testFlush");
+ // Setting up manager
+ DummyStorageLink top;
+ FileStorManager *manager;
+ top.push_back(unique_ptr<StorageLink>(manager = new FileStorManager(
+ config->getConfigId(), _node->getPartitions(), _node->getPersistenceProvider(), _node->getComponentRegister())));
+ top.open();
+ api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 3);
+ // Creating a document to test with
+
+ document::DocumentId docId("doc:crawler:http://www.ntnu.no/");
+ Document::SP doc(new Document(*_testdoctype1, docId));
+ document::BucketId bid(4000);
+
+ static const uint32_t msgCount = 10;
+
+ // Generating many put commands
+ std::vector<std::shared_ptr<api::StorageCommand> > _commands;
+ for (uint32_t i=0; i<msgCount; ++i) {
+ std::shared_ptr<api::PutCommand> cmd(
+ new api::PutCommand(bid, doc, i+1));
+ cmd->setAddress(address);
+ _commands.push_back(cmd);
+ }
+ for (uint32_t i=0; i<msgCount; ++i) {
+ top.sendDown(_commands[i]);
+ }
+ top.close();
+ top.flush();
+ CPPUNIT_ASSERT_EQUAL((size_t) msgCount, top.getNumReplies());
+}
+
+void
+FileStorManagerTest::testHandlerPriority()
+{
+ TestName testName("testHandlerPriority");
+ // Setup a filestorthread to test
+ DummyStorageLink top;
+ DummyStorageLink *dummyManager;
+ top.push_back(std::unique_ptr<StorageLink>(
+ dummyManager = new DummyStorageLink));
+ top.open();
+ ForwardingMessageSender messageSender(*dummyManager);
+ // Since we fake time with small numbers, we need to make sure we dont
+ // compact them away, as they will seem to be from 1970
+
+ documentapi::LoadTypeSet loadTypes("raw:");
+ FileStorMetrics metrics(loadTypes.getMetricLoadTypes());
+ metrics.initDiskMetrics(_node->getPartitions().size(), loadTypes.getMetricLoadTypes(), 1);
+
+ FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(),
+ _node->getComponentRegister(), 255, 0);
+ filestorHandler.setGetNextMessageTimeout(50);
+
+ std::string content("Here is some content which is in all documents");
+ std::ostringstream uri;
+
+ Document::SP doc(createDocument(
+ content, "userdoc:footype:1234:bar").release());
+
+ document::BucketIdFactory factory;
+ document::BucketId bucket(16, factory.getBucketId(
+ doc->getId()).getRawId());
+
+ // Populate bucket with the given data
+ for (uint32_t i = 1; i < 6; i++) {
+ std::shared_ptr<api::PutCommand> cmd(
+ new api::PutCommand(bucket, doc, 100));
+ std::unique_ptr<api::StorageMessageAddress> address(
+ new api::StorageMessageAddress(
+ "storage", lib::NodeType::STORAGE, 3));
+ cmd->setAddress(*address);
+ cmd->setPriority(i * 15);
+ filestorHandler.schedule(cmd, 0);
+ }
+
+ CPPUNIT_ASSERT_EQUAL(15, (int)filestorHandler.getNextMessage(0, 20).second->getPriority());
+ CPPUNIT_ASSERT(filestorHandler.getNextMessage(0, 20).second.get() == NULL);
+ CPPUNIT_ASSERT_EQUAL(30, (int)filestorHandler.getNextMessage(0, 50).second->getPriority());
+ CPPUNIT_ASSERT_EQUAL(45, (int)filestorHandler.getNextMessage(0, 50).second->getPriority());
+ CPPUNIT_ASSERT(filestorHandler.getNextMessage(0, 50).second.get() == NULL);
+ CPPUNIT_ASSERT_EQUAL(60, (int)filestorHandler.getNextMessage(0, 255).second->getPriority());
+ CPPUNIT_ASSERT_EQUAL(75, (int)filestorHandler.getNextMessage(0, 255).second->getPriority());
+}
+
+class MessagePusherThread : public document::Runnable
+{
+public:
+ FileStorHandler& _handler;
+ Document::SP _doc;
+ bool _done;
+ bool _threadDone;
+
+ MessagePusherThread(FileStorHandler& handler, Document::SP doc)
+ : _handler(handler), _doc(doc), _done(false), _threadDone(false) {}
+
+ void run() {
+ while (!_done) {
+ document::BucketIdFactory factory;
+ document::BucketId bucket(16, factory.getBucketId(
+ _doc->getId()).getRawId());
+
+ std::shared_ptr<api::PutCommand> cmd(
+ new api::PutCommand(bucket, _doc, 100));
+ _handler.schedule(cmd, 0);
+ FastOS_Thread::Sleep(1);
+ }
+
+ _threadDone = true;
+ }
+};
+
+class MessageFetchingThread : public document::Runnable {
+public:
+ FileStorHandler& _handler;
+ std::atomic<uint32_t> _config;
+ uint32_t _fetchedCount;
+ bool _done;
+ bool _failed;
+ bool _threadDone;
+
+ MessageFetchingThread(FileStorHandler& handler)
+ : _handler(handler), _config(0), _fetchedCount(0), _done(false),
+ _failed(false), _threadDone(false) {}
+
+ void run() {
+ while (!_done) {
+ FileStorHandler::LockedMessage msg = _handler.getNextMessage(0, 255);
+ if (msg.second.get()) {
+ uint32_t originalConfig = _config.load();
+ _fetchedCount++;
+ FastOS_Thread::Sleep(5);
+
+ if (_config.load() != originalConfig) {
+ _failed = true;
+ }
+ } else {
+ FastOS_Thread::Sleep(1);
+ }
+ }
+
+ _threadDone = true;
+ };
+};
+
+void
+FileStorManagerTest::testHandlerPausedMultiThread()
+{
+ TestName testName("testHandlerPausedMultiThread");
+ // Setup a filestorthread to test
+ DummyStorageLink top;
+ DummyStorageLink *dummyManager;
+ top.push_back(std::unique_ptr<StorageLink>(
+ dummyManager = new DummyStorageLink));
+ top.open();
+ ForwardingMessageSender messageSender(*dummyManager);
+ // Since we fake time with small numbers, we need to make sure we dont
+ // compact them away, as they will seem to be from 1970
+
+ documentapi::LoadTypeSet loadTypes("raw:");
+ FileStorMetrics metrics(loadTypes.getMetricLoadTypes());
+ metrics.initDiskMetrics(_node->getPartitions().size(), loadTypes.getMetricLoadTypes(), 1);
+
+ FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(),
+ _node->getComponentRegister(), 255, 0);
+ filestorHandler.setGetNextMessageTimeout(50);
+
+ std::string content("Here is some content which is in all documents");
+ std::ostringstream uri;
+
+ Document::SP doc(createDocument(content, "userdoc:footype:1234:bar").release());
+
+ FastOS_ThreadPool pool(512 * 1024);
+ MessagePusherThread pushthread(filestorHandler, doc);
+ pushthread.start(pool);
+
+ MessageFetchingThread thread(filestorHandler);
+ thread.start(pool);
+
+ for (uint32_t i = 0; i < 50; ++i) {
+ FastOS_Thread::Sleep(2);
+ ResumeGuard guard = filestorHandler.pause();
+ thread._config.fetch_add(1);
+ uint32_t count = thread._fetchedCount;
+ CPPUNIT_ASSERT_EQUAL(count, thread._fetchedCount);
+ }
+
+ pushthread._done = true;
+ thread._done = true;
+ CPPUNIT_ASSERT(!thread._failed);
+
+ while (!pushthread._threadDone || !thread._threadDone) {
+ FastOS_Thread::Sleep(1);
+ }
+}
+
+
+void
+FileStorManagerTest::testHandlerPause()
+{
+ TestName testName("testHandlerPriority");
+ // Setup a filestorthread to test
+ DummyStorageLink top;
+ DummyStorageLink *dummyManager;
+ top.push_back(std::unique_ptr<StorageLink>(
+ dummyManager = new DummyStorageLink));
+ top.open();
+ ForwardingMessageSender messageSender(*dummyManager);
+ // Since we fake time with small numbers, we need to make sure we dont
+ // compact them away, as they will seem to be from 1970
+
+ documentapi::LoadTypeSet loadTypes("raw:");
+ FileStorMetrics metrics(loadTypes.getMetricLoadTypes());
+ metrics.initDiskMetrics(_node->getPartitions().size(), loadTypes.getMetricLoadTypes(), 1);
+
+ FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(),
+ _node->getComponentRegister(), 255, 0);
+ filestorHandler.setGetNextMessageTimeout(50);
+
+ std::string content("Here is some content which is in all documents");
+ std::ostringstream uri;
+
+ Document::SP doc(createDocument(content, "userdoc:footype:1234:bar").release());
+
+ document::BucketIdFactory factory;
+ document::BucketId bucket(16, factory.getBucketId(
+ doc->getId()).getRawId());
+
+ // Populate bucket with the given data
+ for (uint32_t i = 1; i < 6; i++) {
+ std::shared_ptr<api::PutCommand> cmd(
+ new api::PutCommand(bucket, doc, 100));
+ std::unique_ptr<api::StorageMessageAddress> address(
+ new api::StorageMessageAddress(
+ "storage", lib::NodeType::STORAGE, 3));
+ cmd->setAddress(*address);
+ cmd->setPriority(i * 15);
+ filestorHandler.schedule(cmd, 0);
+ }
+
+ CPPUNIT_ASSERT_EQUAL(15, (int)filestorHandler.getNextMessage(0, 255).second->getPriority());
+
+ {
+ ResumeGuard guard = filestorHandler.pause();
+ (void)guard;
+ CPPUNIT_ASSERT(filestorHandler.getNextMessage(0, 255).second.get() == NULL);
+ }
+
+ CPPUNIT_ASSERT_EQUAL(30, (int)filestorHandler.getNextMessage(0, 255).second->getPriority());
+}
+
+namespace {
+
+uint64_t getPutTime(api::StorageMessage::SP& msg)
+{
+ if (!msg.get()) {
+ return (uint64_t)-1;
+ }
+
+ return static_cast<api::PutCommand*>(msg.get())->getTimestamp();
+};
+
+}
+
+void
+FileStorManagerTest::testRemapSplit()
+{
+ TestName testName("testRemapSplit");
+ // Setup a filestorthread to test
+ DummyStorageLink top;
+ DummyStorageLink *dummyManager;
+ top.push_back(std::unique_ptr<StorageLink>(
+ dummyManager = new DummyStorageLink));
+ top.open();
+ ForwardingMessageSender messageSender(*dummyManager);
+ // Since we fake time with small numbers, we need to make sure we dont
+ // compact them away, as they will seem to be from 1970
+
+ documentapi::LoadTypeSet loadTypes("raw:");
+ FileStorMetrics metrics(loadTypes.getMetricLoadTypes());
+ metrics.initDiskMetrics(_node->getPartitions().size(), loadTypes.getMetricLoadTypes(), 1);
+
+ FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(),
+ _node->getComponentRegister(), 255, 0);
+ filestorHandler.setGetNextMessageTimeout(50);
+
+ std::string content("Here is some content which is in all documents");
+
+ Document::SP doc1(createDocument(content, "userdoc:footype:1234:bar").release());
+
+ Document::SP doc2(createDocument(content, "userdoc:footype:4567:bar").release());
+
+ document::BucketIdFactory factory;
+ document::BucketId bucket1(16, 1234);
+ document::BucketId bucket2(16, 4567);
+
+ // Populate bucket with the given data
+ for (uint32_t i = 1; i < 4; i++) {
+ filestorHandler.schedule(
+ api::StorageMessage::SP(new api::PutCommand(bucket1, doc1, i)), 0);
+ filestorHandler.schedule(
+ api::StorageMessage::SP(new api::PutCommand(bucket2, doc2, i + 10)), 0);
+ }
+
+ CPPUNIT_ASSERT_EQUAL(std::string("BucketId(0x40000000000004d2): Put(BucketId(0x40000000000004d2), userdoc:footype:1234:bar, timestamp 1, size 108) (priority: 127)\n"
+ "BucketId(0x40000000000011d7): Put(BucketId(0x40000000000011d7), userdoc:footype:4567:bar, timestamp 11, size 108) (priority: 127)\n"
+ "BucketId(0x40000000000004d2): Put(BucketId(0x40000000000004d2), userdoc:footype:1234:bar, timestamp 2, size 108) (priority: 127)\n"
+ "BucketId(0x40000000000011d7): Put(BucketId(0x40000000000011d7), userdoc:footype:4567:bar, timestamp 12, size 108) (priority: 127)\n"
+ "BucketId(0x40000000000004d2): Put(BucketId(0x40000000000004d2), userdoc:footype:1234:bar, timestamp 3, size 108) (priority: 127)\n"
+ "BucketId(0x40000000000011d7): Put(BucketId(0x40000000000011d7), userdoc:footype:4567:bar, timestamp 13, size 108) (priority: 127)\n"),
+ filestorHandler.dumpQueue(0));
+
+ FileStorHandler::RemapInfo a(document::BucketId(17, 1234), 0);
+ FileStorHandler::RemapInfo b(document::BucketId(17, 1234 | 1 << 16), 0);
+ filestorHandler.remapQueueAfterSplit(FileStorHandler::RemapInfo(bucket1, 0), a, b);
+
+ CPPUNIT_ASSERT(a.foundInQueue);
+ CPPUNIT_ASSERT(!b.foundInQueue);
+
+ CPPUNIT_ASSERT_EQUAL(std::string(
+ "BucketId(0x40000000000011d7): Put(BucketId(0x40000000000011d7), userdoc:footype:4567:bar, timestamp 11, size 108) (priority: 127)\n"
+ "BucketId(0x40000000000011d7): Put(BucketId(0x40000000000011d7), userdoc:footype:4567:bar, timestamp 12, size 108) (priority: 127)\n"
+ "BucketId(0x40000000000011d7): Put(BucketId(0x40000000000011d7), userdoc:footype:4567:bar, timestamp 13, size 108) (priority: 127)\n"
+ "BucketId(0x44000000000004d2): Put(BucketId(0x44000000000004d2), userdoc:footype:1234:bar, timestamp 1, size 108) (priority: 127)\n"
+ "BucketId(0x44000000000004d2): Put(BucketId(0x44000000000004d2), userdoc:footype:1234:bar, timestamp 2, size 108) (priority: 127)\n"
+ "BucketId(0x44000000000004d2): Put(BucketId(0x44000000000004d2), userdoc:footype:1234:bar, timestamp 3, size 108) (priority: 127)\n"),
+ filestorHandler.dumpQueue(0));
+
+}
+
+void
+FileStorManagerTest::testHandlerMulti()
+{
+ TestName testName("testHandlerMulti");
+ // Setup a filestorthread to test
+ DummyStorageLink top;
+ DummyStorageLink *dummyManager;
+ top.push_back(std::unique_ptr<StorageLink>(
+ dummyManager = new DummyStorageLink));
+ top.open();
+ ForwardingMessageSender messageSender(*dummyManager);
+ // Since we fake time with small numbers, we need to make sure we dont
+ // compact them away, as they will seem to be from 1970
+
+ documentapi::LoadTypeSet loadTypes("raw:");
+ FileStorMetrics metrics(loadTypes.getMetricLoadTypes());
+ metrics.initDiskMetrics(_node->getPartitions().size(), loadTypes.getMetricLoadTypes(), 1);
+
+ FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(),
+ _node->getComponentRegister(), 255, 0);
+ filestorHandler.setGetNextMessageTimeout(50);
+
+ std::string content("Here is some content which is in all documents");
+
+ Document::SP doc1(createDocument(content, "userdoc:footype:1234:bar").release());
+
+ Document::SP doc2(createDocument(content, "userdoc:footype:4567:bar").release());
+
+ document::BucketIdFactory factory;
+ document::BucketId bucket1(16, factory.getBucketId(
+ doc1->getId()).getRawId());
+ document::BucketId bucket2(16, factory.getBucketId(
+ doc2->getId()).getRawId());
+
+ // Populate bucket with the given data
+ for (uint32_t i = 1; i < 10; i++) {
+ filestorHandler.schedule(
+ api::StorageMessage::SP(new api::PutCommand(bucket1, doc1, i)), 0);
+ filestorHandler.schedule(
+ api::StorageMessage::SP(new api::PutCommand(bucket2, doc2, i + 10)), 0);
+ }
+
+ {
+ FileStorHandler::LockedMessage lock = filestorHandler.getNextMessage(0, 255);
+ CPPUNIT_ASSERT_EQUAL((uint64_t)1, getPutTime(lock.second));
+
+ lock = filestorHandler.getNextMessage(0, lock, 255);
+ CPPUNIT_ASSERT_EQUAL((uint64_t)2, getPutTime(lock.second));
+
+ lock = filestorHandler.getNextMessage(0, lock, 255);
+ CPPUNIT_ASSERT_EQUAL((uint64_t)3, getPutTime(lock.second));
+ }
+
+ {
+ FileStorHandler::LockedMessage lock = filestorHandler.getNextMessage(0, 255);
+ CPPUNIT_ASSERT_EQUAL((uint64_t)11, getPutTime(lock.second));
+
+ lock = filestorHandler.getNextMessage(0, lock, 255);
+ CPPUNIT_ASSERT_EQUAL((uint64_t)12, getPutTime(lock.second));
+ }
+}
+
+
+void
+FileStorManagerTest::testHandlerTimeout()
+{
+ TestName testName("testHandlerTimeout");
+ // Setup a filestorthread to test
+ DummyStorageLink top;
+ DummyStorageLink *dummyManager;
+ top.push_back(std::unique_ptr<StorageLink>(
+ dummyManager = new DummyStorageLink));
+ top.open();
+ ForwardingMessageSender messageSender(*dummyManager);
+
+ // Since we fake time with small numbers, we need to make sure we dont
+ // compact them away, as they will seem to be from 1970
+
+ documentapi::LoadTypeSet loadTypes("raw:");
+ FileStorMetrics metrics(loadTypes.getMetricLoadTypes());
+ metrics.initDiskMetrics(_node->getPartitions().size(), loadTypes.getMetricLoadTypes(), 1);
+
+ FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(),
+ _node->getComponentRegister(), 255, 0);
+ filestorHandler.setGetNextMessageTimeout(50);
+
+ std::string content("Here is some content which is in all documents");
+ std::ostringstream uri;
+
+ Document::SP doc(createDocument(content, "userdoc:footype:1234:bar").release());
+
+ document::BucketIdFactory factory;
+ document::BucketId bucket(16, factory.getBucketId(
+ doc->getId()).getRawId());
+
+ // Populate bucket with the given data
+ {
+ std::shared_ptr<api::PutCommand> cmd(
+ new api::PutCommand(bucket, doc, 100));
+ std::unique_ptr<api::StorageMessageAddress> address(
+ new api::StorageMessageAddress(
+ "storage", lib::NodeType::STORAGE, 3));
+ cmd->setAddress(*address);
+ cmd->setPriority(0);
+ cmd->setTimeout(50);
+ filestorHandler.schedule(cmd, 0);
+ }
+
+ {
+ std::shared_ptr<api::PutCommand> cmd(
+ new api::PutCommand(bucket, doc, 100));
+ std::unique_ptr<api::StorageMessageAddress> address(
+ new api::StorageMessageAddress(
+ "storage", lib::NodeType::STORAGE, 3));
+ cmd->setAddress(*address);
+ cmd->setPriority(200);
+ cmd->setTimeout(10000);
+ filestorHandler.schedule(cmd, 0);
+ }
+
+ FastOS_Thread::Sleep(51);
+ for (;;) {
+ auto lock = filestorHandler.getNextMessage(0, 255);
+ if (lock.first.get()) {
+ CPPUNIT_ASSERT_EQUAL(uint8_t(200), lock.second->getPriority());
+ break;
+ }
+ }
+
+ CPPUNIT_ASSERT_EQUAL(size_t(1), top.getNumReplies());
+ CPPUNIT_ASSERT_EQUAL(api::ReturnCode::TIMEOUT,
+ static_cast<api::StorageReply&>(*top.getReply(0))
+ .getResult().getResult());
+}
+
+void
+FileStorManagerTest::testHandlerPriorityBlocking()
+{
+ TestName testName("testHandlerPriorityBlocking");
+ // Setup a filestorthread to test
+ DummyStorageLink top;
+ DummyStorageLink *dummyManager;
+ top.push_back(std::unique_ptr<StorageLink>(
+ dummyManager = new DummyStorageLink));
+ top.open();
+ ForwardingMessageSender messageSender(*dummyManager);
+ // Since we fake time with small numbers, we need to make sure we dont
+ // compact them away, as they will seem to be from 1970
+
+ documentapi::LoadTypeSet loadTypes("raw:");
+ FileStorMetrics metrics(loadTypes.getMetricLoadTypes());
+ metrics.initDiskMetrics(_node->getPartitions().size(), loadTypes.getMetricLoadTypes(), 1);
+
+ FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(),
+ _node->getComponentRegister(), 21, 21);
+ filestorHandler.setGetNextMessageTimeout(50);
+
+ std::string content("Here is some content which is in all documents");
+ std::ostringstream uri;
+
+ document::BucketIdFactory factory;
+
+ // Populate bucket with the given data
+ for (uint32_t i = 1; i < 6; i++) {
+ Document::SP doc(createDocument(content, vespalib::make_string("doc:foo:%d",i)).release());
+ document::BucketId bucket(16, factory.getBucketId(
+ doc->getId()).getRawId());
+ std::shared_ptr<api::PutCommand> cmd(
+ new api::PutCommand(bucket, doc, 100));
+ std::unique_ptr<api::StorageMessageAddress> address(
+ new api::StorageMessageAddress(
+ "storage", lib::NodeType::STORAGE, 3));
+ cmd->setAddress(*address);
+ cmd->setPriority(i * 15);
+ filestorHandler.schedule(cmd, 0);
+ }
+
+ {
+ FileStorHandler::LockedMessage lock1 = filestorHandler.getNextMessage(0, 20);
+ CPPUNIT_ASSERT_EQUAL(15, (int)lock1.second->getPriority());
+
+ LOG(debug, "Waiting for request that should time out");
+ FileStorHandler::LockedMessage lock2 = filestorHandler.getNextMessage(0, 30);
+ LOG(debug, "Got request that should time out");
+ CPPUNIT_ASSERT(lock2.second.get() == NULL);
+ }
+
+ {
+ FileStorHandler::LockedMessage lock1 = filestorHandler.getNextMessage(0, 40);
+ CPPUNIT_ASSERT_EQUAL(30, (int)lock1.second->getPriority());
+
+ // New high-pri message comes in
+ Document::SP doc(createDocument(content, vespalib::make_string("doc:foo:%d", 100)).release());
+ document::BucketId bucket(16, factory.getBucketId(
+ doc->getId()).getRawId());
+ std::shared_ptr<api::PutCommand> cmd(
+ new api::PutCommand(bucket, doc, 100));
+ std::unique_ptr<api::StorageMessageAddress> address(
+ new api::StorageMessageAddress(
+ "storage", lib::NodeType::STORAGE, 3));
+ cmd->setAddress(*address);
+ cmd->setPriority(15);
+ filestorHandler.schedule(cmd, 0);
+
+ FileStorHandler::LockedMessage lock2 = filestorHandler.getNextMessage(0, 20);
+ CPPUNIT_ASSERT_EQUAL(15, (int)lock2.second->getPriority());
+
+ LOG(debug, "Waiting for request that should time out");
+ FileStorHandler::LockedMessage lock3 = filestorHandler.getNextMessage(0, 255);
+ LOG(debug, "Got request that should time out");
+ CPPUNIT_ASSERT(lock3.second.get() == NULL);
+ }
+
+ {
+ FileStorHandler::LockedMessage lock1 = filestorHandler.getNextMessage(0, 255);
+ CPPUNIT_ASSERT_EQUAL(45, (int)lock1.second->getPriority());
+
+ FileStorHandler::LockedMessage lock = filestorHandler.getNextMessage(0, 255);
+ CPPUNIT_ASSERT_EQUAL(60, (int)lock.second->getPriority());
+ }
+ LOG(debug, "Test done");
+}
+
+class PausedThread : public document::Runnable {
+private:
+ FileStorHandler& _handler;
+
+public:
+ bool pause;
+ bool done;
+ bool gotoperation;
+
+ PausedThread(FileStorHandler& handler)
+ : _handler(handler), pause(false), done(false), gotoperation(false) {}
+
+ void run() {
+ FileStorHandler::LockedMessage msg = _handler.getNextMessage(0, 255);
+ gotoperation = true;
+
+ while (!done) {
+ if (pause) {
+ _handler.pause(0, msg.second->getPriority());
+ pause = false;
+ }
+ FastOS_Thread::Sleep(10);
+ }
+
+ done = false;
+ };
+};
+
+void
+FileStorManagerTest::testHandlerPriorityPreempt()
+{
+ TestName testName("testHandlerPriorityPreempt");
+ // Setup a filestorthread to test
+ DummyStorageLink top;
+ DummyStorageLink *dummyManager;
+ top.push_back(std::unique_ptr<StorageLink>(
+ dummyManager = new DummyStorageLink));
+ top.open();
+ ForwardingMessageSender messageSender(*dummyManager);
+ // Since we fake time with small numbers, we need to make sure we dont
+ // compact them away, as they will seem to be from 1970
+
+ documentapi::LoadTypeSet loadTypes("raw:");
+ FileStorMetrics metrics(loadTypes.getMetricLoadTypes());
+ metrics.initDiskMetrics(_node->getPartitions().size(), loadTypes.getMetricLoadTypes(), 1);
+
+ FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(),
+ _node->getComponentRegister(), 21, 21);
+ filestorHandler.setGetNextMessageTimeout(50);
+
+ std::string content("Here is some content which is in all documents");
+ std::ostringstream uri;
+
+ document::BucketIdFactory factory;
+
+ {
+ Document::SP doc(createDocument(content, "doc:foo:1").release());
+ document::BucketId bucket(16, factory.getBucketId(
+ doc->getId()).getRawId());
+ std::shared_ptr<api::PutCommand> cmd(
+ new api::PutCommand(bucket, doc, 100));
+ std::unique_ptr<api::StorageMessageAddress> address(
+ new api::StorageMessageAddress(
+ "storage", lib::NodeType::STORAGE, 3));
+ cmd->setAddress(*address);
+ cmd->setPriority(60);
+ filestorHandler.schedule(cmd, 0);
+ }
+
+ PausedThread thread(filestorHandler);
+ FastOS_ThreadPool pool(512 * 1024);
+ thread.start(pool);
+
+ while (!thread.gotoperation) {
+ FastOS_Thread::Sleep(10);
+ }
+
+ {
+ Document::SP doc(createDocument(content, "doc:foo:2").release());
+ document::BucketId bucket(16, factory.getBucketId(
+ doc->getId()).getRawId());
+ std::shared_ptr<api::PutCommand> cmd(
+ new api::PutCommand(bucket, doc, 100));
+ std::unique_ptr<api::StorageMessageAddress> address(
+ new api::StorageMessageAddress(
+ "storage", lib::NodeType::STORAGE, 3));
+ cmd->setAddress(*address);
+ cmd->setPriority(20);
+ filestorHandler.schedule(cmd, 0);
+ }
+
+ {
+ FileStorHandler::LockedMessage lock1 = filestorHandler.getNextMessage(0, 20);
+ CPPUNIT_ASSERT_EQUAL(20, (int)lock1.second->getPriority());
+
+ thread.pause = true;
+
+ for (uint32_t i = 0; i < 10; i++) {
+ CPPUNIT_ASSERT(thread.pause);
+ FastOS_Thread::Sleep(100);
+ }
+ }
+
+ while (thread.pause) {
+ FastOS_Thread::Sleep(10);
+ }
+
+ thread.done = true;
+
+ while (thread.done) {
+ FastOS_Thread::Sleep(10);
+ }
+}
+
+void
+FileStorManagerTest::testPriority()
+{
+ TestName testName("testPriority");
+ // Setup a filestorthread to test
+ DummyStorageLink top;
+ DummyStorageLink *dummyManager;
+ top.push_back(std::unique_ptr<StorageLink>(
+ dummyManager = new DummyStorageLink));
+ top.open();
+ ForwardingMessageSender messageSender(*dummyManager);
+ // Since we fake time with small numbers, we need to make sure we dont
+ // compact them away, as they will seem to be from 1970
+
+ documentapi::LoadTypeSet loadTypes("raw:");
+ FileStorMetrics metrics(loadTypes.getMetricLoadTypes());
+ metrics.initDiskMetrics(_node->getPartitions().size(), loadTypes.getMetricLoadTypes(), 2);
+
+ FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(),
+ _node->getComponentRegister(), 255, 0);
+ std::unique_ptr<DiskThread> thread(createThread(
+ *config, *_node, _node->getPersistenceProvider(),
+ filestorHandler, *metrics.disks[0]->threads[0], 0, 25));
+ std::unique_ptr<DiskThread> thread2(createThread(
+ *config, *_node, _node->getPersistenceProvider(),
+ filestorHandler, *metrics.disks[0]->threads[1], 0, 255));
+
+ // Creating documents to test with. Different gids, 2 locations.
+ std::vector<document::Document::SP > documents;
+ for (uint32_t i=0; i<50; ++i) {
+ std::string content("Here is some content which is in all documents");
+ std::ostringstream uri;
+
+ uri << "userdoc:footype:" << (i % 3 == 0 ? 0x10001 : 0x0100001)
+ << ":mydoc-" << i;
+ Document::SP doc(createDocument(content, uri.str()).release());
+ documents.push_back(doc);
+ }
+
+ document::BucketIdFactory factory;
+
+ // Create buckets in separate, initial pass to avoid races with puts
+ for (uint32_t i=0; i<documents.size(); ++i) {
+ document::BucketId bucket(16, factory.getBucketId(
+ documents[i]->getId()).getRawId());
+
+ spi::Context context(defaultLoadType, spi::Priority(0),
+ spi::Trace::TraceLevel(0));
+
+ _node->getPersistenceProvider().createBucket(
+ spi::Bucket(bucket, spi::PartitionId(0)), context);
+ }
+
+ // Populate bucket with the given data
+ for (uint32_t i=0; i<documents.size(); ++i) {
+ document::BucketId bucket(16, factory.getBucketId(
+ documents[i]->getId()).getRawId());
+
+ std::shared_ptr<api::PutCommand> cmd(
+ new api::PutCommand(bucket, documents[i], 100 + i));
+ std::unique_ptr<api::StorageMessageAddress> address(
+ new api::StorageMessageAddress(
+ "storage", lib::NodeType::STORAGE, 3));
+ cmd->setAddress(*address);
+ cmd->setPriority(i * 2);
+ filestorHandler.schedule(cmd, 0);
+ }
+
+ filestorHandler.flush(true);
+
+ // Wait until everything is done.
+ int count = 0;
+ while (documents.size() != top.getNumReplies() && count < 1000) {
+ FastOS_Thread::Sleep(100);
+ count++;
+ }
+ CPPUNIT_ASSERT(count < 1000);
+
+ for (uint32_t i = 0; i < documents.size(); i++) {
+ std::shared_ptr<api::PutReply> reply(
+ std::dynamic_pointer_cast<api::PutReply>(
+ top.getReply(i)));
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
+ reply->getResult());
+ }
+
+ // Verify that thread 1 gets documents over 50 pri
+ CPPUNIT_ASSERT_EQUAL(uint64_t(documents.size()),
+ metrics.disks[0]->threads[0]->operations.getValue()
+ + metrics.disks[0]->threads[1]->operations.getValue());
+ CPPUNIT_ASSERT(metrics.disks[0]->threads[0]->operations.getValue() <= 13);
+ // Closing file stor handler before threads are deleted, such that
+ // file stor threads getNextMessage calls returns.
+ filestorHandler.close();
+}
+
+void
+FileStorManagerTest::testSplit1()
+{
+ TestName testName("testSplit1");
+ // Setup a filestorthread to test
+ DummyStorageLink top;
+ DummyStorageLink *dummyManager;
+ top.push_back(std::unique_ptr<StorageLink>(
+ dummyManager = new DummyStorageLink));
+ setClusterState("storage:2 distributor:1");
+ top.open();
+ ForwardingMessageSender messageSender(*dummyManager);
+ documentapi::LoadTypeSet loadTypes("raw:");
+ FileStorMetrics metrics(loadTypes.getMetricLoadTypes());
+ metrics.initDiskMetrics(_node->getPartitions().size(), loadTypes.getMetricLoadTypes(), 1);
+ FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(),
+ _node->getComponentRegister(), 255, 0);
+ std::unique_ptr<DiskThread> thread(createThread(
+ *config, *_node, _node->getPersistenceProvider(),
+ filestorHandler, *metrics.disks[0]->threads[0], 0, 255));
+ // Creating documents to test with. Different gids, 2 locations.
+ std::vector<document::Document::SP > documents;
+ for (uint32_t i=0; i<20; ++i) {
+ std::string content("Here is some content which is in all documents");
+ std::ostringstream uri;
+
+ uri << "userdoc:footype:" << (i % 3 == 0 ? 0x10001 : 0x0100001)
+ << ":mydoc-" << i;
+ Document::SP doc(createDocument(
+ content, uri.str()).release());
+ documents.push_back(doc);
+ }
+ document::BucketIdFactory factory;
+ spi::Context context(defaultLoadType, spi::Priority(0),
+ spi::Trace::TraceLevel(0));
+ {
+ // Populate bucket with the given data
+ for (uint32_t i=0; i<documents.size(); ++i) {
+ document::BucketId bucket(16, factory.getBucketId(
+ documents[i]->getId()).getRawId());
+
+ _node->getPersistenceProvider().createBucket(
+ spi::Bucket(bucket, spi::PartitionId(0)), context);
+
+ std::shared_ptr<api::PutCommand> cmd(
+ new api::PutCommand(bucket, documents[i], 100 + i));
+ std::unique_ptr<api::StorageMessageAddress> address(
+ new api::StorageMessageAddress(
+ "storage", lib::NodeType::STORAGE, 3));
+ cmd->setAddress(*address);
+ cmd->setSourceIndex(0);
+
+ filestorHandler.schedule(cmd, 0);
+ filestorHandler.flush(true);
+ LOG(debug, "Got %" PRIu64 " replies", top.getNumReplies());
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ std::shared_ptr<api::PutReply> reply(
+ std::dynamic_pointer_cast<api::PutReply>(
+ top.getReply(0)));
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
+ reply->getResult());
+ top.reset();
+
+ // Delete every 5th document to have delete entries in file too
+ if (i % 5 == 0) {
+ std::shared_ptr<api::RemoveCommand> rcmd(
+ new api::RemoveCommand(
+ bucket, documents[i]->getId(), 1000000 + 100 + i));
+ rcmd->setAddress(*address);
+ filestorHandler.schedule(rcmd, 0);
+ filestorHandler.flush(true);
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ std::shared_ptr<api::RemoveReply> rreply(
+ std::dynamic_pointer_cast<api::RemoveReply>(
+ top.getReply(0)));
+ CPPUNIT_ASSERT_MSG(top.getReply(0)->getType().toString(),
+ rreply.get());
+ CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
+ rreply->getResult());
+ top.reset();
+ }
+ }
+
+ // Perform a split, check that locations are split
+ {
+ std::shared_ptr<api::SplitBucketCommand> cmd(
+ new api::SplitBucketCommand(document::BucketId(16, 1)));
+ cmd->setSourceIndex(0);
+ filestorHandler.schedule(cmd, 0);
+ filestorHandler.flush(true);
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ std::shared_ptr<api::SplitBucketReply> reply(
+ std::dynamic_pointer_cast<api::SplitBucketReply>(
+ top.getReply(0)));
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
+ reply->getResult());
+ top.reset();
+ }
+
+ // Test that the documents have gotten into correct parts.
+ for (uint32_t i=0; i<documents.size(); ++i) {
+ document::BucketId bucket(
+ 17, i % 3 == 0 ? 0x10001 : 0x0100001);
+ std::shared_ptr<api::GetCommand> cmd(
+ new api::GetCommand(bucket, documents[i]->getId(), "[all]"));
+ api::StorageMessageAddress address(
+ "storage", lib::NodeType::STORAGE, 3);
+ cmd->setAddress(address);
+ filestorHandler.schedule(cmd, 0);
+ filestorHandler.flush(true);
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ std::shared_ptr<api::GetReply> reply(
+ std::dynamic_pointer_cast<api::GetReply>(
+ top.getReply(0)));
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(i % 5 != 0 ? true : false, reply->wasFound());
+ top.reset();
+ }
+
+ // Keep splitting location 1 until we gidsplit
+ for (int i=17; i<=32; ++i) {
+ std::shared_ptr<api::SplitBucketCommand> cmd(
+ new api::SplitBucketCommand(
+ document::BucketId(i, 0x0100001)));
+ cmd->setSourceIndex(0);
+ filestorHandler.schedule(cmd, 0);
+ filestorHandler.flush(true);
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ std::shared_ptr<api::SplitBucketReply> reply(
+ std::dynamic_pointer_cast<api::SplitBucketReply>(
+ top.getReply(0)));
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
+ reply->getResult());
+ top.reset();
+ }
+
+ // Test that the documents have gotten into correct parts.
+ for (uint32_t i=0; i<documents.size(); ++i) {
+ document::BucketId bucket;
+ if (i % 3 == 0) {
+ bucket = document::BucketId(17, 0x10001);
+ } else {
+ bucket = document::BucketId(33, factory.getBucketId(
+ documents[i]->getId()).getRawId());
+ }
+ std::shared_ptr<api::GetCommand> cmd(
+ new api::GetCommand(bucket, documents[i]->getId(), "[all]"));
+ api::StorageMessageAddress address(
+ "storage", lib::NodeType::STORAGE, 3);
+ cmd->setAddress(address);
+ filestorHandler.schedule(cmd, 0);
+ filestorHandler.flush(true);
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ std::shared_ptr<api::GetReply> reply(
+ std::dynamic_pointer_cast<api::GetReply>(
+ top.getReply(0)));
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(i % 5 != 0 ? true : false, reply->wasFound());
+ top.reset();
+ }
+ }
+ // Closing file stor handler before threads are deleted, such that
+ // file stor threads getNextMessage calls returns.
+ filestorHandler.close();
+}
+
+void
+FileStorManagerTest::testSplitSingleGroup()
+{
+ TestName testName("testSplitSingleGroup");
+ // Setup a filestorthread to test
+ DummyStorageLink top;
+ DummyStorageLink *dummyManager;
+ top.push_back(std::unique_ptr<StorageLink>(
+ dummyManager = new DummyStorageLink));
+ setClusterState("storage:2 distributor:1");
+ top.open();
+ ForwardingMessageSender messageSender(*dummyManager);
+ documentapi::LoadTypeSet loadTypes("raw:");
+ FileStorMetrics metrics(loadTypes.getMetricLoadTypes());
+ metrics.initDiskMetrics(_node->getPartitions().size(), loadTypes.getMetricLoadTypes(), 1);
+ FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(),
+ _node->getComponentRegister(), 255, 0);
+ spi::Context context(defaultLoadType, spi::Priority(0),
+ spi::Trace::TraceLevel(0));
+ for (uint32_t j=0; j<1; ++j) {
+ // Test this twice, once where all the data ends up in file with
+ // splitbit set, and once where all the data ends up in file with
+ // splitbit unset
+ bool state = (j == 0);
+
+ std::unique_ptr<DiskThread> thread(createThread(
+ *config, *_node, _node->getPersistenceProvider(),
+ filestorHandler, *metrics.disks[0]->threads[0], 0, 255));
+ // Creating documents to test with. Different gids, 2 locations.
+ std::vector<document::Document::SP > documents;
+ for (uint32_t i=0; i<20; ++i) {
+ std::string content("Here is some content for all documents");
+ std::ostringstream uri;
+
+ uri << "userdoc:footype:" << (state ? 0x10001 : 0x0100001)
+ << ":mydoc-" << i;
+ Document::SP doc(createDocument(
+ content, uri.str()).release());
+ documents.push_back(doc);
+ }
+ document::BucketIdFactory factory;
+
+ // Populate bucket with the given data
+ for (uint32_t i=0; i<documents.size(); ++i) {
+ document::BucketId bucket(16, factory.getBucketId(
+ documents[i]->getId()).getRawId());
+
+ _node->getPersistenceProvider().createBucket(
+ spi::Bucket(bucket, spi::PartitionId(0)), context);
+
+ std::shared_ptr<api::PutCommand> cmd(
+ new api::PutCommand(bucket, documents[i], 100 + i));
+ api::StorageMessageAddress address(
+ "storage", lib::NodeType::STORAGE, 3);
+ cmd->setAddress(address);
+ filestorHandler.schedule(cmd, 0);
+ filestorHandler.flush(true);
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ std::shared_ptr<api::PutReply> reply(
+ std::dynamic_pointer_cast<api::PutReply>(
+ top.getReply(0)));
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
+ reply->getResult());
+ top.reset();
+ }
+ // Perform a split, check that locations are split
+ {
+ std::shared_ptr<api::SplitBucketCommand> cmd(
+ new api::SplitBucketCommand(document::BucketId(16, 1)));
+ cmd->setSourceIndex(0);
+ filestorHandler.schedule(cmd, 0);
+ filestorHandler.flush(true);
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ std::shared_ptr<api::SplitBucketReply> reply(
+ std::dynamic_pointer_cast<api::SplitBucketReply>(
+ top.getReply(0)));
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
+ reply->getResult());
+ top.reset();
+ }
+
+
+ // Test that the documents are all still there
+ for (uint32_t i=0; i<documents.size(); ++i) {
+ document::BucketId bucket(17, state ? 0x10001 : 0x00001);
+ std::shared_ptr<api::GetCommand> cmd(
+ new api::GetCommand(bucket, documents[i]->getId(), "[all]"));
+ api::StorageMessageAddress address(
+ "storage", lib::NodeType::STORAGE, 3);
+ cmd->setAddress(address);
+ filestorHandler.schedule(cmd, 0);
+ filestorHandler.flush(true);
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ std::shared_ptr<api::GetReply> reply(
+ std::dynamic_pointer_cast<api::GetReply>(
+ top.getReply(0)));
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
+ reply->getResult());
+ top.reset();
+ }
+ // Closing file stor handler before threads are deleted, such that
+ // file stor threads getNextMessage calls returns.
+ filestorHandler.close();
+ }
+}
+
+void
+FileStorManagerTest::putDoc(DummyStorageLink& top,
+ FileStorHandler& filestorHandler,
+ const document::BucketId& target,
+ uint32_t docNum)
+{
+ api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 3);
+ spi::Context context(defaultLoadType, spi::Priority(0),
+ spi::Trace::TraceLevel(0));
+ document::BucketIdFactory factory;
+ document::DocumentId docId(vespalib::make_string("userdoc:ns:%zu:%d", target.getId(), docNum));
+ document::BucketId bucket(16, factory.getBucketId(docId).getRawId());
+ //std::cerr << "doc bucket is " << bucket << " vs source " << source << "\n";
+ _node->getPersistenceProvider().createBucket(
+ spi::Bucket(target, spi::PartitionId(0)), context);
+ Document::SP doc(new Document(*_testdoctype1, docId));
+ std::shared_ptr<api::PutCommand> cmd(
+ new api::PutCommand(target, doc, docNum+1));
+ cmd->setAddress(address);
+ cmd->setPriority(120);
+ filestorHandler.schedule(cmd, 0);
+ filestorHandler.flush(true);
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ std::shared_ptr<api::PutReply> reply(
+ std::dynamic_pointer_cast<api::PutReply>(
+ top.getReply(0)));
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
+ reply->getResult());
+ top.reset();
+}
+
+void
+FileStorManagerTest::testSplitEmptyTargetWithRemappedOps()
+{
+ TestName testName("testSplitEmptyTargetWithRemappedOps");
+
+ DummyStorageLink top;
+ DummyStorageLink *dummyManager;
+ top.push_back(std::unique_ptr<StorageLink>(
+ dummyManager = new DummyStorageLink));
+ setClusterState("storage:2 distributor:1");
+ top.open();
+ ForwardingMessageSender messageSender(*dummyManager);
+ documentapi::LoadTypeSet loadTypes("raw:");
+ FileStorMetrics metrics(loadTypes.getMetricLoadTypes());
+ metrics.initDiskMetrics(_node->getPartitions().size(), loadTypes.getMetricLoadTypes(), 1);
+ FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(),
+ _node->getComponentRegister(), 255, 0);
+ std::unique_ptr<DiskThread> thread(createThread(
+ *config, *_node, _node->getPersistenceProvider(),
+ filestorHandler, *metrics.disks[0]->threads[0], 0, 255));
+
+ document::BucketId source(16, 0x10001);
+
+ api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 3);
+
+ for (uint32_t i=0; i<10; ++i) {
+ putDoc(top, filestorHandler, source, i);
+ }
+
+ // Send split followed by a put that is bound for a target bucket that
+ // will end up empty in the split itself. The split should notice this
+ // and create the bucket explicitly afterwards in order to compensate for
+ // the persistence provider deleting it internally.
+ // Make sure we block the operation queue until we've scheduled all
+ // the operations.
+ std::unique_ptr<ResumeGuard> resumeGuard(
+ new ResumeGuard(filestorHandler.pause()));
+
+ std::shared_ptr<api::SplitBucketCommand> splitCmd(
+ new api::SplitBucketCommand(source));
+ splitCmd->setPriority(120);
+ splitCmd->setSourceIndex(0);
+
+ document::DocumentId docId(
+ vespalib::make_string("userdoc:ns:%d:1234", 0x100001));
+ Document::SP doc(new Document(*_testdoctype1, docId));
+ std::shared_ptr<api::PutCommand> putCmd(
+ new api::PutCommand(source, doc, 1001));
+ putCmd->setAddress(address);
+ putCmd->setPriority(120);
+
+ filestorHandler.schedule(splitCmd, 0);
+ filestorHandler.schedule(putCmd, 0);
+ resumeGuard.reset(0); // Unpause
+ filestorHandler.flush(true);
+
+ top.waitForMessages(2, _waitTime);
+
+ CPPUNIT_ASSERT_EQUAL((size_t) 2, top.getNumReplies());
+ {
+ std::shared_ptr<api::SplitBucketReply> reply(
+ std::dynamic_pointer_cast<api::SplitBucketReply>(
+ top.getReply(0)));
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
+ reply->getResult());
+ }
+ {
+ std::shared_ptr<api::PutReply> reply(
+ std::dynamic_pointer_cast<api::PutReply>(
+ top.getReply(1)));
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
+ reply->getResult());
+ }
+
+ top.reset();
+}
+
+void
+FileStorManagerTest::testNotifyOnSplitSourceOwnershipChanged()
+{
+ TestName testName("testSplit1");
+ // Setup a filestorthread to test
+ DummyStorageLink top;
+ DummyStorageLink *dummyManager;
+ top.push_back(std::unique_ptr<StorageLink>(dummyManager = new DummyStorageLink));
+ setClusterState("storage:2 distributor:2");
+ top.open();
+ ForwardingMessageSender messageSender(*dummyManager);
+ documentapi::LoadTypeSet loadTypes("raw:");
+ FileStorMetrics metrics(loadTypes.getMetricLoadTypes());
+ metrics.initDiskMetrics(_node->getPartitions().size(), loadTypes.getMetricLoadTypes(), 1);
+ FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(),
+ _node->getComponentRegister(), 255, 0);
+ std::unique_ptr<DiskThread> thread(createThread(
+ *config, *_node, _node->getPersistenceProvider(),
+ filestorHandler, *metrics.disks[0]->threads[0], 0, 255));
+
+ document::BucketId source(getFirstBucketNotOwnedByDistributor(0));
+ createBucket(source, 0);
+ for (uint32_t i=0; i<10; ++i) {
+ putDoc(top, filestorHandler, source, i);
+ }
+
+ std::shared_ptr<api::SplitBucketCommand> splitCmd(
+ new api::SplitBucketCommand(source));
+ splitCmd->setPriority(120);
+ splitCmd->setSourceIndex(0); // Source not owned by this distributor.
+
+ filestorHandler.schedule(splitCmd, 0);
+ filestorHandler.flush(true);
+ top.waitForMessages(4, _waitTime); // 3 notify cmds + split reply
+
+ CPPUNIT_ASSERT_EQUAL(size_t(4), top.getNumReplies());
+ for (int i = 0; i < 3; ++i) {
+ CPPUNIT_ASSERT_EQUAL(api::MessageType::NOTIFYBUCKETCHANGE,
+ top.getReply(i)->getType());
+ }
+
+ std::shared_ptr<api::SplitBucketReply> reply(
+ std::dynamic_pointer_cast<api::SplitBucketReply>(
+ top.getReply(3)));
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
+ reply->getResult());
+}
+
+void
+FileStorManagerTest::testJoin()
+{
+ TestName testName("testJoin");
+ // Setup a filestorthread to test
+ DummyStorageLink top;
+ DummyStorageLink *dummyManager;
+ top.push_back(std::unique_ptr<StorageLink>(
+ dummyManager = new DummyStorageLink));
+ top.open();
+ ForwardingMessageSender messageSender(*dummyManager);
+
+ documentapi::LoadTypeSet loadTypes("raw:");
+ FileStorMetrics metrics(loadTypes.getMetricLoadTypes());
+ metrics.initDiskMetrics(_node->getPartitions().size(), loadTypes.getMetricLoadTypes(), 1);
+ FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(),
+ _node->getComponentRegister(), 255, 0);
+ std::unique_ptr<DiskThread> thread(createThread(
+ *config, *_node, _node->getPersistenceProvider(),
+ filestorHandler, *metrics.disks[0]->threads[0], 0, 255));
+ // Creating documents to test with. Different gids, 2 locations.
+ std::vector<document::Document::SP > documents;
+ for (uint32_t i=0; i<20; ++i) {
+ std::string content("Here is some content which is in all documents");
+ std::ostringstream uri;
+
+ uri << "userdoc:footype:" << (i % 3 == 0 ? 0x10001 : 0x0100001)
+ << ":mydoc-" << i;
+ Document::SP doc(createDocument(
+ content, uri.str()).release());
+ documents.push_back(doc);
+ }
+ document::BucketIdFactory factory;
+
+ createBucket(document::BucketId(17, 0x00001), 0);
+ createBucket(document::BucketId(17, 0x10001), 0);
+
+ {
+ // Populate bucket with the given data
+ for (uint32_t i=0; i<documents.size(); ++i) {
+ document::BucketId bucket(17, factory.getBucketId(
+ documents[i]->getId()).getRawId());
+ std::shared_ptr<api::PutCommand> cmd(
+ new api::PutCommand(bucket, documents[i], 100 + i));
+ std::unique_ptr<api::StorageMessageAddress> address(
+ new api::StorageMessageAddress(
+ "storage", lib::NodeType::STORAGE, 3));
+ cmd->setAddress(*address);
+ filestorHandler.schedule(cmd, 0);
+ filestorHandler.flush(true);
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ std::shared_ptr<api::PutReply> reply(
+ std::dynamic_pointer_cast<api::PutReply>(
+ top.getReply(0)));
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
+ reply->getResult());
+ top.reset();
+ // Delete every 5th document to have delete entries in file too
+ if (i % 5 == 0) {
+ std::shared_ptr<api::RemoveCommand> rcmd(
+ new api::RemoveCommand(
+ bucket, documents[i]->getId(), 1000000 + 100 + i));
+ rcmd->setAddress(*address);
+ filestorHandler.schedule(rcmd, 0);
+ filestorHandler.flush(true);
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ std::shared_ptr<api::RemoveReply> rreply(
+ std::dynamic_pointer_cast<api::RemoveReply>(
+ top.getReply(0)));
+ CPPUNIT_ASSERT_MSG(top.getReply(0)->getType().toString(),
+ rreply.get());
+ CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
+ rreply->getResult());
+ top.reset();
+ }
+ }
+ LOG(debug, "Starting the actual join after populating data");
+ // Perform a join, check that other files are gone
+ {
+ std::shared_ptr<api::JoinBucketsCommand> cmd(
+ new api::JoinBucketsCommand(document::BucketId(16, 1)));
+ cmd->getSourceBuckets().push_back(document::BucketId(17, 0x00001));
+ cmd->getSourceBuckets().push_back(document::BucketId(17, 0x10001));
+ filestorHandler.schedule(cmd, 0);
+ filestorHandler.flush(true);
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ std::shared_ptr<api::JoinBucketsReply> reply(
+ std::dynamic_pointer_cast<api::JoinBucketsReply>(
+ top.getReply(0)));
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
+ reply->getResult());
+ top.reset();
+ }
+ // Test that the documents have gotten into the file.
+ for (uint32_t i=0; i<documents.size(); ++i) {
+ document::BucketId bucket(16, 1);
+ std::shared_ptr<api::GetCommand> cmd(
+ new api::GetCommand(bucket, documents[i]->getId(), "[all]"));
+ api::StorageMessageAddress address(
+ "storage", lib::NodeType::STORAGE, 3);
+ cmd->setAddress(address);
+ filestorHandler.schedule(cmd, 0);
+ filestorHandler.flush(true);
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ std::shared_ptr<api::GetReply> reply(
+ std::dynamic_pointer_cast<api::GetReply>(
+ top.getReply(0)));
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(i % 5 != 0 ? true : false, reply->wasFound());
+ top.reset();
+ }
+ }
+ // Closing file stor handler before threads are deleted, such that
+ // file stor threads getNextMessage calls returns.
+ filestorHandler.close();
+}
+
+namespace {
+
+spi::IteratorId
+createIterator(DummyStorageLink& link,
+ const document::BucketId& bucketId,
+ const std::string& docSel,
+ framework::MicroSecTime fromTime = framework::MicroSecTime(0),
+ framework::MicroSecTime toTime = framework::MicroSecTime::max(),
+ bool headerOnly = false)
+{
+ spi::Bucket bucket(bucketId, spi::PartitionId(0));
+
+ spi::Selection selection =
+ spi::Selection(spi::DocumentSelection(docSel));
+ selection.setFromTimestamp(spi::Timestamp(fromTime.getTime()));
+ selection.setToTimestamp(spi::Timestamp(toTime.getTime()));
+ CreateIteratorCommand::SP createIterCmd(
+ new CreateIteratorCommand(bucket,
+ selection,
+ headerOnly ? "[header]" : "[all]",
+ spi::NEWEST_DOCUMENT_ONLY));
+ link.sendDown(createIterCmd);
+ link.waitForMessages(1, FileStorManagerTest::LONG_WAITTIME);
+ CPPUNIT_ASSERT_EQUAL(size_t(1), link.getNumReplies());
+ std::shared_ptr<CreateIteratorReply> reply(
+ std::dynamic_pointer_cast<CreateIteratorReply>(
+ link.getReply(0)));
+ CPPUNIT_ASSERT(reply.get());
+ link.reset();
+ CPPUNIT_ASSERT(reply->getResult().success());
+ return reply->getIteratorId();
+}
+
+}
+
+void
+FileStorManagerTest::testVisiting()
+{
+ TestName testName("testVisiting");
+ // Setting up manager
+ DummyStorageLink top;
+ FileStorManager *manager;
+ top.push_back(unique_ptr<StorageLink>(manager = new FileStorManager(
+ smallConfig->getConfigId(), _node->getPartitions(), _node->getPersistenceProvider(), _node->getComponentRegister())));
+ top.open();
+ // Adding documents to two buckets which we are going to visit
+ // We want one bucket in one slotfile, and one bucket with a file split
+ uint32_t docCount = 50;
+ std::vector<document::BucketId> ids(2);
+ ids[0] = document::BucketId(16, 1);
+ ids[1] = document::BucketId(16, 2);
+
+ createBucket(ids[0], 0);
+ createBucket(ids[1], 0);
+
+ lib::RandomGen randomizer(523);
+ for (uint32_t i=0; i<docCount; ++i) {
+ std::string content("Here is some content which is in all documents");
+ std::ostringstream uri;
+
+ uri << "userdoc:crawler:" << (i < 3 ? 1 : 2) << ":"
+ << randomizer.nextUint32() << ".html";
+ Document::SP doc(createDocument(
+ content, uri.str()).release());
+ const document::DocumentType& type(doc->getType());
+ if (i < 30) {
+ doc->setValue(type.getField("hstringval"),
+ document::StringFieldValue("John Doe"));
+ } else {
+ doc->setValue(type.getField("hstringval"),
+ document::StringFieldValue("Jane Doe"));
+ }
+ std::shared_ptr<api::PutCommand> cmd(new api::PutCommand(
+ ids[i < 3 ? 0 : 1], doc, i+1));
+ top.sendDown(cmd);
+ }
+ top.waitForMessages(docCount, _waitTime);
+ CPPUNIT_ASSERT_EQUAL((size_t) docCount, top.getNumReplies());
+ // Check nodestate with splitting
+ {
+ api::BucketInfo info;
+ for (uint32_t i=3; i<docCount; ++i) {
+ std::shared_ptr<api::BucketInfoReply> reply(
+ std::dynamic_pointer_cast<api::BucketInfoReply>(
+ top.getReply(i)));
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_MESSAGE(reply->getResult().toString(),
+ reply->getResult().success());
+
+ info = reply->getBucketInfo();
+ }
+ CPPUNIT_ASSERT_EQUAL(docCount-3, info.getDocumentCount());
+ }
+ top.reset();
+ // Visit bucket with no split, using no selection
+ {
+ framework::MemoryToken::UP token(
+ _node->getMemoryManager().allocate(
+ _node->getMemoryManager().getAllocationType(
+ "VISITOR_BUFFER"),
+ 16*1024,
+ 16*1024,
+ 127));
+ spi::IteratorId iterId(createIterator(top, ids[0], "true"));
+ std::shared_ptr<GetIterCommand> cmd(
+ new GetIterCommand(std::move(token), ids[0], iterId, 16*1024));
+ top.sendDown(cmd);
+ top.waitForMessages(1, _waitTime);
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ std::shared_ptr<GetIterReply> reply(
+ std::dynamic_pointer_cast<GetIterReply>(top.getReply(0)));
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
+ CPPUNIT_ASSERT_EQUAL(ids[0], reply->getBucketId());
+ CPPUNIT_ASSERT_EQUAL(size_t(3), reply->getEntries().size());
+ top.reset();
+ }
+ // Visit bucket with split, using selection
+ {
+ uint32_t totalDocs = 0;
+ spi::IteratorId iterId(
+ createIterator(top,
+ ids[1],
+ "testdoctype1.hstringval = \"John Doe\""));
+ while (true) {
+ framework::MemoryToken::UP token(
+ _node->getMemoryManager().allocate(
+ _node->getMemoryManager().getAllocationType(
+ "VISITOR_BUFFER"),
+ 16*1024,
+ 16*1024,
+ 127));
+ std::shared_ptr<GetIterCommand> cmd(
+ new GetIterCommand(std::move(token), ids[1], iterId, 16*1024));
+ top.sendDown(cmd);
+ top.waitForMessages(1, _waitTime);
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ std::shared_ptr<GetIterReply> reply(
+ std::dynamic_pointer_cast<GetIterReply>(
+ top.getReply(0)));
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
+ reply->getResult());
+ CPPUNIT_ASSERT_EQUAL(ids[1], reply->getBucketId());
+ totalDocs += reply->getEntries().size();
+ top.reset();
+ if (reply->isCompleted()) {
+ break;
+ }
+ }
+ CPPUNIT_ASSERT_EQUAL(27u, totalDocs);
+ }
+ // Visit bucket with min and max timestamps set, headers only
+ {
+ document::BucketId bucket(16, 2);
+ spi::IteratorId iterId(
+ createIterator(top,
+ ids[1],
+ "",
+ framework::MicroSecTime(30),
+ framework::MicroSecTime(40),
+ true));
+ uint32_t totalDocs = 0;
+ while (true) {
+ framework::MemoryToken::UP token(
+ _node->getMemoryManager().allocate(
+ _node->getMemoryManager().getAllocationType(
+ "VISITOR_BUFFER"),
+ 16*1024,
+ 16*1024,
+ 127));
+ std::shared_ptr<GetIterCommand> cmd(
+ new GetIterCommand(std::move(token), ids[1], iterId, 16*1024));
+ top.sendDown(cmd);
+ top.waitForMessages(1, _waitTime);
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ std::shared_ptr<GetIterReply> reply(
+ std::dynamic_pointer_cast<GetIterReply>(
+ top.getReply(0)));
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
+ reply->getResult());
+ CPPUNIT_ASSERT_EQUAL(bucket, reply->getBucketId());
+/* Header only is a VDS-specific thing.
+
+ for (size_t i = 0; i < reply->getEntries().size(); ++i) {
+ CPPUNIT_ASSERT(reply->getEntries()[i]->getDocument()
+ ->getBody().empty());
+ }
+*/
+ totalDocs += reply->getEntries().size();
+ top.reset();
+ if (reply->isCompleted()) {
+ break;
+ }
+ }
+ CPPUNIT_ASSERT_EQUAL(11u, totalDocs);
+ }
+
+}
+
+void
+FileStorManagerTest::testRemoveLocation()
+{
+ TestName testName("testRemoveLocation");
+ // Setting up manager
+ DummyStorageLink top;
+ FileStorManager *manager;
+ top.push_back(unique_ptr<StorageLink>(manager =
+ new FileStorManager(config->getConfigId(), _node->getPartitions(), _node->getPersistenceProvider(), _node->getComponentRegister())));
+ top.open();
+ api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 3);
+ document::BucketId bid(8, 0);
+
+ createBucket(bid, 0);
+
+ // Adding some documents to be removed later
+ for (uint32_t i=0; i<=10; ++i) {
+ std::ostringstream docid;
+ docid << "userdoc:ns:" << (i << 8) << ":foo";
+ Document::SP doc(createDocument(
+ "some content", docid.str()).release());
+ std::shared_ptr<api::PutCommand> cmd(
+ new api::PutCommand(bid, doc, 1000 + i));
+ cmd->setAddress(address);
+ top.sendDown(cmd);
+ top.waitForMessages(1, _waitTime);
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ std::shared_ptr<api::PutReply> reply(
+ std::dynamic_pointer_cast<api::PutReply>(
+ top.getReply(0)));
+ top.reset();
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
+ CPPUNIT_ASSERT_EQUAL(i + 1u, reply->getBucketInfo().getDocumentCount());
+ }
+ // Issuing remove location command
+ {
+ std::shared_ptr<api::RemoveLocationCommand> cmd(
+ new api::RemoveLocationCommand("id.user % 512 == 0", bid));
+ //new api::RemoveLocationCommand("id.user == 1", bid));
+ cmd->setAddress(address);
+ top.sendDown(cmd);
+ top.waitForMessages(1, _waitTime);
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ std::shared_ptr<api::RemoveLocationReply> reply(
+ std::dynamic_pointer_cast<api::RemoveLocationReply>(
+ top.getReply(0)));
+ top.reset();
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
+ CPPUNIT_ASSERT_EQUAL(5u, reply->getBucketInfo().getDocumentCount());
+ }
+}
+
+void FileStorManagerTest::testDeleteBucket()
+{
+ TestName testName("testDeleteBucket");
+ // Setting up manager
+ DummyStorageLink top;
+ FileStorManager *manager;
+ top.push_back(unique_ptr<StorageLink>(manager = new FileStorManager(
+ config->getConfigId(), _node->getPartitions(), _node->getPersistenceProvider(), _node->getComponentRegister())));
+ top.open();
+ api::StorageMessageAddress address(
+ "storage", lib::NodeType::STORAGE, 2);
+ // Creating a document to test with
+ document::DocumentId docId("userdoc:crawler:4000:http://www.ntnu.no/");
+ Document::SP doc(new Document(*_testdoctype1, docId));
+ document::BucketId bid(16, 4000);
+
+ createBucket(bid, 0);
+
+ api::BucketInfo bucketInfo;
+ // Putting it
+ {
+ std::shared_ptr<api::PutCommand> cmd(
+ new api::PutCommand(bid, doc, 105));
+ cmd->setAddress(address);
+ top.sendDown(cmd);
+ top.waitForMessages(1, _waitTime);
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ std::shared_ptr<api::PutReply> reply(
+ std::dynamic_pointer_cast<api::PutReply>(
+ top.getReply(0)));
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
+
+ CPPUNIT_ASSERT_EQUAL(1, (int)reply->getBucketInfo().getDocumentCount());
+ bucketInfo = reply->getBucketInfo();
+ top.reset();
+ }
+
+ // Delete bucket
+ {
+ std::shared_ptr<api::DeleteBucketCommand> cmd(
+ new api::DeleteBucketCommand(bid));
+ cmd->setAddress(address);
+ cmd->setBucketInfo(bucketInfo);
+ top.sendDown(cmd);
+ top.waitForMessages(1, _waitTime);
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ std::shared_ptr<api::DeleteBucketReply> reply(
+ std::dynamic_pointer_cast<api::DeleteBucketReply>(
+ top.getReply(0)));
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
+ }
+}
+
+void
+FileStorManagerTest::testDeleteBucketRejectOutdatedBucketInfo()
+{
+ TestName testName("testDeleteBucketRejectOutdatedBucketInfo");
+ // Setting up manager
+ DummyStorageLink top;
+ FileStorManager *manager;
+ top.push_back(unique_ptr<StorageLink>(manager = new FileStorManager(
+ config->getConfigId(), _node->getPartitions(), _node->getPersistenceProvider(), _node->getComponentRegister())));
+ top.open();
+ api::StorageMessageAddress address(
+ "storage", lib::NodeType::STORAGE, 2);
+ // Creating a document to test with
+ document::DocumentId docId("userdoc:crawler:4000:http://www.ntnu.no/");
+ Document::SP doc(new Document(*_testdoctype1, docId));
+ document::BucketId bid(16, 4000);
+
+ createBucket(bid, 0);
+
+ api::BucketInfo bucketInfo;
+
+ // Putting it
+ {
+ std::shared_ptr<api::PutCommand> cmd(
+ new api::PutCommand(bid, doc, 105));
+ cmd->setAddress(address);
+ top.sendDown(cmd);
+ top.waitForMessages(1, _waitTime);
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ std::shared_ptr<api::PutReply> reply(
+ std::dynamic_pointer_cast<api::PutReply>(
+ top.getReply(0)));
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
+
+ CPPUNIT_ASSERT_EQUAL(1, (int)reply->getBucketInfo().getDocumentCount());
+ bucketInfo = reply->getBucketInfo();
+ top.reset();
+ }
+
+ // Attempt to delete bucket, but with non-matching bucketinfo
+ {
+ std::shared_ptr<api::DeleteBucketCommand> cmd(
+ new api::DeleteBucketCommand(bid));
+ cmd->setBucketInfo(BucketInfo(0xf000baaa, 1, 123, 1, 456));
+ cmd->setAddress(address);
+ top.sendDown(cmd);
+ top.waitForMessages(1, _waitTime);
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ std::shared_ptr<api::DeleteBucketReply> reply(
+ std::dynamic_pointer_cast<api::DeleteBucketReply>(
+ top.getReply(0)));
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(
+ ReturnCode::REJECTED,
+ reply->getResult().getResult());
+ CPPUNIT_ASSERT_EQUAL(bucketInfo, reply->getBucketInfo());
+ }
+}
+
+/**
+ * Test that receiving a DeleteBucketCommand with invalid
+ * BucketInfo deletes the bucket and does not fail the operation.
+ */
+void
+FileStorManagerTest::testDeleteBucketWithInvalidBucketInfo()
+{
+ TestName testName("testDeleteBucketWithInvalidBucketInfo");
+ // Setting up manager
+ DummyStorageLink top;
+ FileStorManager *manager;
+ top.push_back(unique_ptr<StorageLink>(manager = new FileStorManager(
+ config->getConfigId(), _node->getPartitions(), _node->getPersistenceProvider(), _node->getComponentRegister())));
+ top.open();
+ api::StorageMessageAddress address(
+ "storage", lib::NodeType::STORAGE, 2);
+ // Creating a document to test with
+ document::DocumentId docId("userdoc:crawler:4000:http://www.ntnu.no/");
+ Document::SP doc(new Document(*_testdoctype1, docId));
+ document::BucketId bid(16, 4000);
+
+ createBucket(bid, 0);
+
+ // Putting it
+ {
+ std::shared_ptr<api::PutCommand> cmd(
+ new api::PutCommand(bid, doc, 105));
+ cmd->setAddress(address);
+ top.sendDown(cmd);
+ top.waitForMessages(1, _waitTime);
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ std::shared_ptr<api::PutReply> reply(
+ std::dynamic_pointer_cast<api::PutReply>(
+ top.getReply(0)));
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
+ CPPUNIT_ASSERT_EQUAL(1, (int)reply->getBucketInfo().getDocumentCount());
+ top.reset();
+ }
+
+ // Attempt to delete bucket with invalid bucketinfo
+ {
+ std::shared_ptr<api::DeleteBucketCommand> cmd(
+ new api::DeleteBucketCommand(bid));
+ cmd->setAddress(address);
+ top.sendDown(cmd);
+ top.waitForMessages(1, _waitTime);
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ std::shared_ptr<api::DeleteBucketReply> reply(
+ std::dynamic_pointer_cast<api::DeleteBucketReply>(
+ top.getReply(0)));
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(
+ ReturnCode::OK,
+ reply->getResult().getResult());
+ CPPUNIT_ASSERT_EQUAL(api::BucketInfo(), reply->getBucketInfo());
+ }
+}
+
+namespace {
+
+ /**
+ * Utility storage link, sending data to the given links instead of through
+ * a regular chain.
+ */
+ struct MidLink : public StorageLink {
+ StorageLink& _up;
+
+ public:
+ MidLink(std::unique_ptr<StorageLink> down, StorageLink& up)
+ : StorageLink("MidLink"), _up(up)
+ {
+ push_back(std::move(down));
+ }
+ ~MidLink() {
+ closeNextLink();
+ }
+
+ virtual void print(std::ostream& out, bool, const std::string&) const
+ { out << "MidLink"; }
+
+ virtual bool onUp(const std::shared_ptr<api::StorageMessage> & msg) {
+ if (!StorageLinkTest::callOnUp(_up, msg)) _up.sendUp(msg);
+ return true;
+ }
+
+ };
+
+ /**
+ * Utility class, connecting two storage links below it, sending
+ * messages coming up from one down the other (providing address is set
+ * correctly.)
+ */
+ class BinaryStorageLink : public DummyStorageLink {
+ vespalib::Lock _lock;
+ std::set<api::StorageMessage::Id> _seen;
+ MidLink _left;
+ MidLink _right;
+ uint16_t _leftAddr;
+ uint16_t _rightAddr;
+
+ public:
+ BinaryStorageLink(uint16_t leftAddr, std::unique_ptr<StorageLink> left,
+ uint16_t rightAddr, std::unique_ptr<StorageLink> right)
+ : _left(std::move(left), *this),
+ _right(std::move(right), *this),
+ _leftAddr(leftAddr),
+ _rightAddr(rightAddr) {}
+
+ virtual void print(std::ostream& out, bool, const std::string&) const
+ { out << "BinaryStorageLink"; }
+
+ virtual bool onDown(const std::shared_ptr<api::StorageMessage> & msg) {
+// LOG(debug, "onDown Received msg: ->%s, %s %llu\n", msg->getAddress() ? msg->getAddress()->toString().c_str() : "(null)", msg->toString().c_str(), msg->getMsgId());
+
+ vespalib::LockGuard lock(_lock);
+ _seen.insert(msg->getMsgId());
+ return sendOn(msg);
+ }
+
+ bool sendOn(const std::shared_ptr<api::StorageMessage> & msg) {
+ if (msg->getAddress()) {
+ uint16_t address = msg->getAddress()->getIndex();
+ if ((address == _leftAddr && !msg->getType().isReply()) ||
+ (address == _rightAddr && msg->getType().isReply()))
+ {
+ if (!StorageLinkTest::callOnDown(_left, msg)) {
+ _left.sendDown(msg);
+ }
+ } else if ((address == _rightAddr && !msg->getType().isReply()) ||
+ (address == _leftAddr && msg->getType().isReply()))
+ {
+ if (!StorageLinkTest::callOnDown(_right, msg)) {
+ _right.sendDown(msg);
+ }
+ } else {
+ std::ostringstream ost;
+ ost << "Address " << address << " is neither " << _leftAddr
+ << " or " << _rightAddr << " in message " << *msg
+ << ".\n";
+ CPPUNIT_FAIL(ost.str());
+ }
+ }
+ return true;
+ }
+
+ virtual bool onUp(const std::shared_ptr<api::StorageMessage> & msg) { // LOG(debug, "onUp Received msg: ->%s, %s %llu\n", msg->getAddress() ? msg->getAddress()->toString().c_str() : "(null)", msg->toString().c_str(), msg->getMsgId());
+
+ vespalib::LockGuard lock(_lock);
+ std::set<api::StorageMessage::Id>::iterator it
+ = _seen.find(msg->getMsgId());
+ // If message originated from the outside
+ if (it != _seen.end()) {
+ LOG(debug, "Have seen this message before, storing");
+
+ _seen.erase(it);
+ return DummyStorageLink::onUp(msg);
+ // If it originated from below, send it down again.
+ } else if (msg->getType() == api::MessageType::NOTIFYBUCKETCHANGE) {
+ // Just throw away notify bucket change
+ return true;
+ } else {
+ LOG(debug, "Never seen %s, sending on!",
+ msg->toString().c_str());
+
+ return sendOn(msg);
+ }
+ }
+
+ void onFlush(bool downwards) {
+ if (downwards) {
+ _left.flush();
+ _right.flush();
+ }
+ }
+ void onOpen() {
+ _left.open();
+ _right.open();
+ }
+ void onClose() {
+ _left.close();
+ _right.close();
+ }
+ };
+}
+
+void
+FileStorManagerTest::testNoTimestamps()
+{
+ TestName testName("testNoTimestamps");
+ // Setting up manager
+ DummyStorageLink top;
+ FileStorManager *manager;
+ top.push_back(unique_ptr<StorageLink>(manager =
+ new FileStorManager(config->getConfigId(), _node->getPartitions(), _node->getPersistenceProvider(), _node->getComponentRegister())));
+ top.open();
+ api::StorageMessageAddress address(
+ "storage", lib::NodeType::STORAGE, 3);
+ // Creating a document to test with
+ Document::SP doc(createDocument(
+ "some content", "doc:crawler:http://www.ntnu.no/").release());
+ document::BucketId bid(16, 4000);
+
+ createBucket(bid, 0);
+
+ // Putting it
+ {
+ std::shared_ptr<api::PutCommand> cmd(
+ new api::PutCommand(bid, doc, 0));
+ cmd->setAddress(address);
+ CPPUNIT_ASSERT_EQUAL((api::Timestamp)0, cmd->getTimestamp());
+ top.sendDown(cmd);
+ top.waitForMessages(1, _waitTime);
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ std::shared_ptr<api::PutReply> reply(
+ std::dynamic_pointer_cast<api::PutReply>(
+ top.getReply(0)));
+ top.reset();
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(ReturnCode::REJECTED,
+ reply->getResult().getResult());
+ }
+ // Removing it
+ {
+ std::shared_ptr<api::RemoveCommand> cmd(
+ new api::RemoveCommand(bid, doc->getId(), 0));
+ cmd->setAddress(address);
+ CPPUNIT_ASSERT_EQUAL((api::Timestamp)0, cmd->getTimestamp());
+ top.sendDown(cmd);
+ top.waitForMessages(1, _waitTime);
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ std::shared_ptr<api::RemoveReply> reply(
+ std::dynamic_pointer_cast<api::RemoveReply>(
+ top.getReply(0)));
+ top.reset();
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(ReturnCode::REJECTED,
+ reply->getResult().getResult());
+ }
+}
+
+void
+FileStorManagerTest::testEqualTimestamps()
+{
+ TestName testName("testEqualTimestamps");
+ // Setting up manager
+ DummyStorageLink top;
+ FileStorManager *manager;
+ top.push_back(unique_ptr<StorageLink>(manager =
+ new FileStorManager(config->getConfigId(), _node->getPartitions(), _node->getPersistenceProvider(), _node->getComponentRegister())));
+ top.open();
+ api::StorageMessageAddress address(
+ "storage", lib::NodeType::STORAGE, 3);
+ // Creating a document to test with
+ document::BucketId bid(16, 4000);
+
+ createBucket(bid, 0);
+
+ // Putting it
+ {
+ Document::SP doc(createDocument(
+ "some content", "userdoc:crawler:4000:http://www.ntnu.no/")
+ .release());
+ std::shared_ptr<api::PutCommand> cmd(
+ new api::PutCommand(bid, doc, 100));
+ cmd->setAddress(address);
+ top.sendDown(cmd);
+ top.waitForMessages(1, _waitTime);
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ std::shared_ptr<api::PutReply> reply(
+ std::dynamic_pointer_cast<api::PutReply>(
+ top.getReply(0)));
+ top.reset();
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(ReturnCode::OK, reply->getResult().getResult());
+ }
+
+ // Putting it on same timestamp again
+ // (ok as doc is the same. Since merge can move doc to other copy we
+ // have to accept this)
+ {
+ Document::SP doc(createDocument(
+ "some content", "userdoc:crawler:4000:http://www.ntnu.no/")
+ .release());
+ std::shared_ptr<api::PutCommand> cmd(
+ new api::PutCommand(bid, doc, 100));
+ cmd->setAddress(address);
+ top.sendDown(cmd);
+ top.waitForMessages(1, _waitTime);
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ std::shared_ptr<api::PutReply> reply(
+ std::dynamic_pointer_cast<api::PutReply>(
+ top.getReply(0)));
+ top.reset();
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(ReturnCode::OK, reply->getResult().getResult());
+ }
+
+ // Putting the doc with other id. Now we should fail
+ {
+ Document::SP doc(createDocument(
+ "some content", "userdoc:crawler:4000:http://www.ntnu.nu/")
+ .release());
+ std::shared_ptr<api::PutCommand> cmd(
+ new api::PutCommand(bid, doc, 100));
+ cmd->setAddress(address);
+ top.sendDown(cmd);
+ top.waitForMessages(1, _waitTime);
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ std::shared_ptr<api::PutReply> reply(
+ std::dynamic_pointer_cast<api::PutReply>(
+ top.getReply(0)));
+ top.reset();
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(ReturnCode::TIMESTAMP_EXIST,
+ reply->getResult().getResult());
+ }
+}
+
+void
+FileStorManagerTest::testMultiOp()
+{
+ TestName testName("testMultiOp");
+ // Setting up manager
+ DummyStorageLink top;
+ FileStorManager *manager;
+ top.push_back(unique_ptr<StorageLink>(manager =
+ new FileStorManager(config->getConfigId(), _node->getPartitions(), _node->getPersistenceProvider(), _node->getComponentRegister())));
+ top.open();
+ api::StorageMessageAddress address(
+ "storage", lib::NodeType::STORAGE, 3);
+
+ createBucket(document::BucketId(16, 0), 0);
+
+ // Add some documents to remove/update later
+ for (uint32_t i=0; i<10; ++i) {
+ std::ostringstream did;
+ did << "userdoc:crawler:0:http://www.ntnu.no/" << i;
+ Document::SP doc(createDocument(
+ "some content", did.str()).release());
+ doc->set("headerval", (int) i);
+ doc->set("content", "some content");
+ std::shared_ptr<api::PutCommand> cmd(
+ new api::PutCommand(document::BucketId(16, 0), doc, 100 + i));
+ cmd->setAddress(address);
+ top.sendDown(cmd);
+ top.waitForMessages(1, _waitTime);
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ std::shared_ptr<api::PutReply> reply(
+ std::dynamic_pointer_cast<api::PutReply>(
+ top.getReply(0)));
+ top.reset();
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
+ }
+ document::DocumentTypeRepo::SP repo = _node->getTypeRepo();
+
+ // Create operation list
+ std::vector<char> buffer(1024 * 1024);
+ vdslib::WritableDocumentList mdl(repo, &buffer[0], buffer.size());
+ for (uint32_t i=10; i<15; ++i) {
+ std::ostringstream did;
+ did << "userdoc:crawler:0:http://www.ntnu.no/" << i;
+ mdl.addPut(*createDocument("foo bar", did.str()),
+ 1000 + i);
+ }
+ for (uint32_t i=4; i<8; ++i) {
+ std::ostringstream did;
+ did << "userdoc:crawler:0:http://www.ntnu.no/" << i;
+ mdl.addRemove(document::DocumentId(did.str()), 2000 + i);
+ }
+ for (uint32_t i=1; i<3; ++i) {
+ std::ostringstream did;
+ did << "userdoc:crawler:0:http://www.ntnu.no/" << i;
+ document::DocumentUpdate update(*_testdoctype1,
+ document::DocumentId(did.str()));
+ if (i % 2 == 0) {
+ document::FieldUpdate fupd(_testdoctype1->getField("content"));
+ fupd.addUpdate(document::AssignValueUpdate(
+ document::StringFieldValue("baah")));
+ update.addUpdate(fupd);
+ } else {
+ document::FieldUpdate fupd(_testdoctype1->getField("headerval"));
+ fupd.addUpdate(document::AssignValueUpdate(
+ document::IntFieldValue(i + 100)));
+ update.addUpdate(fupd);
+ }
+ mdl.addUpdate(update, 3000 + i);
+ }
+ // Add a non-existing update
+ {
+ std::ostringstream did;
+ did << "userdoc:crawler:0:http://www.ntnu.no/nonexisting1";
+ document::DocumentUpdate update(*_testdoctype1,
+ document::DocumentId(did.str()));
+ document::FieldUpdate fupd(_testdoctype1->getField("content"));
+ fupd.addUpdate(document::AssignValueUpdate(
+ document::StringFieldValue("baah")));
+ update.addUpdate(fupd);
+ mdl.addUpdate(update, 4000);
+ }
+
+ // Issue operation.
+ {
+ std::shared_ptr<api::MultiOperationCommand> cmd(
+ new api::MultiOperationCommand(
+ repo, document::BucketId(16, 0), buffer));
+ cmd->setAddress(address);
+ top.sendDown(cmd);
+ top.waitForMessages(1, _waitTime);
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ std::shared_ptr<api::MultiOperationReply> reply(
+ std::dynamic_pointer_cast<api::MultiOperationReply>(
+ top.getReply(0)));
+ top.reset();
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
+ reply->getResult());
+ }
+ // Verify that new documents exist and that removed are gone.
+ // Removing it
+ for (uint32_t i=0; i<16; ++i) {
+ std::ostringstream did;
+ did << "userdoc:crawler:0:http://www.ntnu.no/" << i;
+ std::shared_ptr<api::GetCommand> cmd(new api::GetCommand(
+ document::BucketId(16, 0), document::DocumentId(did.str()),
+ "[all]"));
+ cmd->setAddress(address);
+ top.sendDown(cmd);
+ top.waitForMessages(1, _waitTime);
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ std::shared_ptr<api::GetReply> reply3(
+ std::dynamic_pointer_cast<api::GetReply>(
+ top.getReply(0)));
+ top.reset();
+ CPPUNIT_ASSERT(reply3.get());
+ if (i < 4 || (i >= 8 && i < 15)) {
+ CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
+ reply3->getResult());
+ CPPUNIT_ASSERT_EQUAL(vespalib::string(did.str()),
+ reply3->getDocumentId().toString());
+ if (i >= 10) {
+ CPPUNIT_ASSERT(!reply3->getDocument()->hasValue("headerval"));
+ CPPUNIT_ASSERT(reply3->getDocument()->hasValue("content"));
+ } else if (i >= 1 && i <3) {
+ CPPUNIT_ASSERT(reply3->getDocument()->hasValue("headerval"));
+ CPPUNIT_ASSERT(reply3->getDocument()->hasValue("content"));
+ CPPUNIT_ASSERT_EQUAL(
+ static_cast<const document::FieldValue&>(
+ document::IntFieldValue(i % 2 == 0 ? i : i + 100)),
+ *reply3->getDocument()->getValue("headerval"));
+ CPPUNIT_ASSERT_EQUAL(
+ static_cast<const document::FieldValue&>(
+ document::StringFieldValue(i % 2 == 0 ? "baah" : "some content")),
+ *reply3->getDocument()->getValue("content"));
+ } else {
+ CPPUNIT_ASSERT(reply3->getDocument()->hasValue("headerval"));
+ CPPUNIT_ASSERT(reply3->getDocument()->hasValue("content"));
+ CPPUNIT_ASSERT_EQUAL(
+ static_cast<const document::FieldValue&>(
+ document::IntFieldValue(i)),
+ *reply3->getDocument()->getValue("headerval"));
+ }
+ } else {
+ CPPUNIT_ASSERT_EQUAL(false, reply3->wasFound());
+ CPPUNIT_ASSERT_EQUAL(vespalib::string(did.str()),
+ reply3->getDocumentId().toString());
+ }
+ }
+}
+
+void
+FileStorManagerTest::testGetIter()
+{
+ TestName testName("testGetIter");
+ // Setting up manager
+ DummyStorageLink top;
+ FileStorManager *manager;
+ top.push_back(unique_ptr<StorageLink>(manager =
+ new FileStorManager(config->getConfigId(), _node->getPartitions(), _node->getPersistenceProvider(), _node->getComponentRegister())));
+ top.open();
+ api::StorageMessageAddress address(
+ "storage", lib::NodeType::STORAGE, 3);
+ document::BucketId bid(16, 4000);
+
+ createBucket(bid, 0);
+
+ std::vector<Document::SP > docs;
+ // Creating some documents to test with
+ for (uint32_t i=0; i<10; ++i) {
+ std::ostringstream id;
+ id << "userdoc:crawler:4000:http://www.ntnu.no/" << i;
+ docs.push_back(
+ Document::SP(
+ _node->getTestDocMan().createRandomDocumentAtLocation(
+ 4000, i, 400, 400)));
+ }
+ BucketInfo bucketInfo;
+ // Putting all docs to have something to visit
+ for (uint32_t i=0; i<docs.size(); ++i) {
+ std::shared_ptr<api::PutCommand> cmd(
+ new api::PutCommand(bid, docs[i], 100 + i));
+ cmd->setAddress(address);
+ top.sendDown(cmd);
+ top.waitForMessages(1, _waitTime);
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ std::shared_ptr<api::PutReply> reply(
+ std::dynamic_pointer_cast<api::PutReply>(
+ top.getReply(0)));
+ top.reset();
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
+ bucketInfo = reply->getBucketInfo();
+ }
+ // Sending a getiter request that will only visit some of the docs
+ spi::IteratorId iterId(createIterator(top, bid, ""));
+ {
+ framework::MemoryToken::UP token(
+ _node->getMemoryManager().allocate(
+ _node->getMemoryManager().getAllocationType(
+ "VISITOR_BUFFER"),
+ 2048,
+ 2048,
+ 127));
+ std::shared_ptr<GetIterCommand> cmd(
+ new GetIterCommand(std::move(token), bid, iterId, 2048));
+ top.sendDown(cmd);
+ top.waitForMessages(1, _waitTime);
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ std::shared_ptr<GetIterReply> reply(
+ std::dynamic_pointer_cast<GetIterReply>(
+ top.getReply(0)));
+ top.reset();
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
+ CPPUNIT_ASSERT(reply->getEntries().size() > 0);
+ CPPUNIT_ASSERT(reply->getEntries().size() < docs.size());
+ }
+ // Normal case of get iter is testing through visitor tests.
+ // Testing specific situation where file is deleted while visiting here
+ {
+ std::shared_ptr<api::DeleteBucketCommand> cmd(
+ new api::DeleteBucketCommand(bid));
+ cmd->setBucketInfo(bucketInfo);
+ top.sendDown(cmd);
+ top.waitForMessages(1, _waitTime);
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ std::shared_ptr<api::DeleteBucketReply> reply(
+ std::dynamic_pointer_cast<api::DeleteBucketReply>(
+ top.getReply(0)));
+ top.reset();
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
+ }
+ {
+ framework::MemoryToken::UP token(
+ _node->getMemoryManager().allocate(
+ _node->getMemoryManager().getAllocationType(
+ "VISITOR_BUFFER"),
+ 2048,
+ 2048,
+ 127));
+ std::shared_ptr<GetIterCommand> cmd(
+ new GetIterCommand(std::move(token), bid, iterId, 2048));
+ top.sendDown(cmd);
+ top.waitForMessages(1, _waitTime);
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ std::shared_ptr<GetIterReply> reply(
+ std::dynamic_pointer_cast<GetIterReply>(
+ top.getReply(0)));
+ top.reset();
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(ReturnCode::BUCKET_NOT_FOUND,
+ reply->getResult().getResult());
+ CPPUNIT_ASSERT(reply->getEntries().empty());
+ }
+}
+
+void
+FileStorManagerTest::testSetBucketActiveState()
+{
+ TestName testName("testSetBucketActiveState");
+ DummyStorageLink top;
+ FileStorManager* manager(
+ new FileStorManager(config->getConfigId(),
+ _node->getPartitions(),
+ _node->getPersistenceProvider(),
+ _node->getComponentRegister()));
+ top.push_back(unique_ptr<StorageLink>(manager));
+ setClusterState("storage:4 distributor:1");
+ top.open();
+ api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 3);
+
+ document::BucketId bid(16, 4000);
+
+ const uint16_t disk = 0;
+ createBucket(bid, disk);
+ spi::dummy::DummyPersistence& provider(
+ dynamic_cast<spi::dummy::DummyPersistence&>(_node->getPersistenceProvider()));
+ CPPUNIT_ASSERT(!provider.isActive(spi::Bucket(bid, spi::PartitionId(disk))));
+
+ {
+ std::shared_ptr<api::SetBucketStateCommand> cmd(
+ new api::SetBucketStateCommand(
+ bid, api::SetBucketStateCommand::ACTIVE));
+ cmd->setAddress(address);
+ top.sendDown(cmd);
+ top.waitForMessages(1, _waitTime);
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ std::shared_ptr<api::SetBucketStateReply> reply(
+ std::dynamic_pointer_cast<api::SetBucketStateReply>(
+ top.getReply(0)));
+ top.reset();
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
+ }
+
+ CPPUNIT_ASSERT(provider.isActive(spi::Bucket(bid, spi::PartitionId(disk))));
+ {
+ StorBucketDatabase::WrappedEntry entry(
+ _node->getStorageBucketDatabase().get(
+ bid, "foo"));
+ CPPUNIT_ASSERT(entry->info.isActive());
+ }
+ // Trigger bucket info to be read back into the database
+ {
+ std::shared_ptr<ReadBucketInfo> cmd(
+ new ReadBucketInfo(bid));
+ top.sendDown(cmd);
+ top.waitForMessages(1, _waitTime);
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ std::shared_ptr<ReadBucketInfoReply> reply(
+ std::dynamic_pointer_cast<ReadBucketInfoReply>(
+ top.getReply(0)));
+ top.reset();
+ CPPUNIT_ASSERT(reply.get());
+ }
+ // Should not have lost active flag
+ {
+ StorBucketDatabase::WrappedEntry entry(
+ _node->getStorageBucketDatabase().get(
+ bid, "foo"));
+ CPPUNIT_ASSERT(entry->info.isActive());
+ }
+
+ {
+ std::shared_ptr<api::SetBucketStateCommand> cmd(
+ new api::SetBucketStateCommand(
+ bid, api::SetBucketStateCommand::INACTIVE));
+ cmd->setAddress(address);
+ top.sendDown(cmd);
+ top.waitForMessages(1, _waitTime);
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ std::shared_ptr<api::SetBucketStateReply> reply(
+ std::dynamic_pointer_cast<api::SetBucketStateReply>(
+ top.getReply(0)));
+ top.reset();
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
+ }
+
+ CPPUNIT_ASSERT(!provider.isActive(spi::Bucket(bid, spi::PartitionId(disk))));
+ {
+ StorBucketDatabase::WrappedEntry entry(
+ _node->getStorageBucketDatabase().get(
+ bid, "foo"));
+ CPPUNIT_ASSERT(!entry->info.isActive());
+ }
+}
+
+void
+FileStorManagerTest::testNotifyOwnerDistributorOnOutdatedSetBucketState()
+{
+ TestName testName("testNotifyOwnerDistributorOnOutdatedSetBucketState");
+ DummyStorageLink top;
+ FileStorManager* manager(
+ new FileStorManager(config->getConfigId(),
+ _node->getPartitions(),
+ _node->getPersistenceProvider(),
+ _node->getComponentRegister()));
+ top.push_back(unique_ptr<StorageLink>(manager));
+
+ setClusterState("storage:2 distributor:2");
+ top.open();
+
+ document::BucketId bid(getFirstBucketNotOwnedByDistributor(0));
+ CPPUNIT_ASSERT(bid.getRawId() != 0);
+ createBucket(bid, 0);
+
+ std::shared_ptr<api::SetBucketStateCommand> cmd(
+ new api::SetBucketStateCommand(
+ bid, api::SetBucketStateCommand::ACTIVE));
+ cmd->setAddress(api::StorageMessageAddress(
+ "cluster", lib::NodeType::STORAGE, 1));
+ cmd->setSourceIndex(0);
+
+ top.sendDown(cmd);
+ top.waitForMessages(2, _waitTime);
+
+ CPPUNIT_ASSERT_EQUAL(size_t(2), top.getNumReplies());
+ // Not necessarily deterministic order.
+ int idxOffset = 0;
+ if (top.getReply(0)->getType() != api::MessageType::NOTIFYBUCKETCHANGE) {
+ ++idxOffset;
+ }
+ std::shared_ptr<api::NotifyBucketChangeCommand> notifyCmd(
+ std::dynamic_pointer_cast<api::NotifyBucketChangeCommand>(
+ top.getReply(idxOffset)));
+ std::shared_ptr<api::SetBucketStateReply> stateReply(
+ std::dynamic_pointer_cast<api::SetBucketStateReply>(
+ top.getReply(1 - idxOffset)));
+
+ CPPUNIT_ASSERT(stateReply.get());
+ CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
+ stateReply->getResult());
+
+ CPPUNIT_ASSERT(notifyCmd.get());
+ CPPUNIT_ASSERT_EQUAL(uint16_t(1), notifyCmd->getAddress()->getIndex());
+ // Not necessary for this to be set since distributor does not insert this
+ // info into its db, but useful for debugging purposes.
+ CPPUNIT_ASSERT(notifyCmd->getBucketInfo().isActive());
+}
+
+void
+FileStorManagerTest::testGetBucketDiffImplicitCreateBucket()
+{
+ TestName testName("testGetBucketDiffImplicitCreateBucket");
+ DummyStorageLink top;
+ FileStorManager* manager(
+ new FileStorManager(config->getConfigId(),
+ _node->getPartitions(),
+ _node->getPersistenceProvider(),
+ _node->getComponentRegister()));
+ top.push_back(unique_ptr<StorageLink>(manager));
+ setClusterState("storage:2 distributor:1");
+ top.open();
+
+ document::BucketId bid(16, 4000);
+
+ std::vector<api::MergeBucketCommand::Node> nodes;
+ nodes.push_back(1);
+ nodes.push_back(0);
+
+ std::shared_ptr<api::GetBucketDiffCommand> cmd(
+ new api::GetBucketDiffCommand(bid, nodes, Timestamp(1000)));
+ cmd->setAddress(api::StorageMessageAddress(
+ "cluster", lib::NodeType::STORAGE, 1));
+ cmd->setSourceIndex(0);
+ top.sendDown(cmd);
+
+ api::GetBucketDiffReply* reply;
+ ASSERT_SINGLE_REPLY(api::GetBucketDiffReply, reply, top, _waitTime);
+ CPPUNIT_ASSERT_EQUAL(api::ReturnCode(api::ReturnCode::OK),
+ reply->getResult());
+ {
+ StorBucketDatabase::WrappedEntry entry(
+ _node->getStorageBucketDatabase().get(
+ bid, "foo"));
+ CPPUNIT_ASSERT(entry.exist());
+ CPPUNIT_ASSERT(entry->info.isReady());
+ }
+}
+
+void
+FileStorManagerTest::testMergeBucketImplicitCreateBucket()
+{
+ TestName testName("testMergeBucketImplicitCreateBucket");
+ DummyStorageLink top;
+ FileStorManager* manager(
+ new FileStorManager(config->getConfigId(),
+ _node->getPartitions(),
+ _node->getPersistenceProvider(),
+ _node->getComponentRegister()));
+ top.push_back(unique_ptr<StorageLink>(manager));
+ setClusterState("storage:3 distributor:1");
+ top.open();
+
+ document::BucketId bid(16, 4000);
+
+ std::vector<api::MergeBucketCommand::Node> nodes;
+ nodes.push_back(1);
+ nodes.push_back(2);
+
+ std::shared_ptr<api::MergeBucketCommand> cmd(
+ new api::MergeBucketCommand(bid, nodes, Timestamp(1000)));
+ cmd->setAddress(api::StorageMessageAddress(
+ "cluster", lib::NodeType::STORAGE, 1));
+ cmd->setSourceIndex(0);
+ top.sendDown(cmd);
+
+ api::GetBucketDiffCommand* diffCmd;
+ ASSERT_SINGLE_REPLY(api::GetBucketDiffCommand, diffCmd, top, _waitTime);
+ {
+ StorBucketDatabase::WrappedEntry entry(
+ _node->getStorageBucketDatabase().get(
+ bid, "foo"));
+ CPPUNIT_ASSERT(entry.exist());
+ CPPUNIT_ASSERT(entry->info.isReady());
+ }
+}
+
+void
+FileStorManagerTest::testNewlyCreatedBucketIsReady()
+{
+ TestName testName("testNewlyCreatedBucketIsReady");
+ DummyStorageLink top;
+ FileStorManager* manager(
+ new FileStorManager(config->getConfigId(),
+ _node->getPartitions(),
+ _node->getPersistenceProvider(),
+ _node->getComponentRegister()));
+ top.push_back(unique_ptr<StorageLink>(manager));
+ setClusterState("storage:2 distributor:1");
+ top.open();
+
+ document::BucketId bid(16, 4000);
+
+ std::shared_ptr<api::CreateBucketCommand> cmd(
+ new api::CreateBucketCommand(bid));
+ cmd->setAddress(api::StorageMessageAddress(
+ "cluster", lib::NodeType::STORAGE, 1));
+ cmd->setSourceIndex(0);
+ top.sendDown(cmd);
+
+ api::CreateBucketReply* reply;
+ ASSERT_SINGLE_REPLY(api::CreateBucketReply, reply, top, _waitTime);
+ CPPUNIT_ASSERT_EQUAL(api::ReturnCode(api::ReturnCode::OK),
+ reply->getResult());
+ {
+ StorBucketDatabase::WrappedEntry entry(
+ _node->getStorageBucketDatabase().get(
+ bid, "foo"));
+ CPPUNIT_ASSERT(entry.exist());
+ CPPUNIT_ASSERT(entry->info.isReady());
+ CPPUNIT_ASSERT(!entry->info.isActive());
+ }
+}
+
+void
+FileStorManagerTest::testCreateBucketSetsActiveFlagInDatabaseAndReply()
+{
+ TestFileStorComponents c(*this, "testNotifyOnSplitSourceOwnershipChanged");
+ setClusterState("storage:2 distributor:1");
+
+ document::BucketId bid(16, 4000);
+ std::shared_ptr<api::CreateBucketCommand> cmd(
+ new api::CreateBucketCommand(bid));
+ cmd->setAddress(api::StorageMessageAddress(
+ "cluster", lib::NodeType::STORAGE, 1));
+ cmd->setSourceIndex(0);
+ cmd->setActive(true);
+ c.top.sendDown(cmd);
+
+ api::CreateBucketReply* reply;
+ ASSERT_SINGLE_REPLY(api::CreateBucketReply, reply, c.top, _waitTime);
+ CPPUNIT_ASSERT_EQUAL(api::ReturnCode(api::ReturnCode::OK),
+ reply->getResult());
+ {
+ StorBucketDatabase::WrappedEntry entry(
+ _node->getStorageBucketDatabase().get(
+ bid, "foo"));
+ CPPUNIT_ASSERT(entry.exist());
+ CPPUNIT_ASSERT(entry->info.isReady());
+ CPPUNIT_ASSERT(entry->info.isActive());
+ }
+}
+
+} // storage
diff --git a/storage/src/tests/persistence/filestorage/filestormodifiedbucketstest.cpp b/storage/src/tests/persistence/filestorage/filestormodifiedbucketstest.cpp
new file mode 100644
index 00000000000..19b84ef475b
--- /dev/null
+++ b/storage/src/tests/persistence/filestorage/filestormodifiedbucketstest.cpp
@@ -0,0 +1,142 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <memory>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <vespa/storageapi/message/bucket.h>
+#include <vespa/storage/persistence/filestorage/modifiedbucketchecker.h>
+#include <tests/persistence/persistenceproviderwrapper.h>
+#include <vespa/persistence/dummyimpl/dummypersistence.h>
+#include <tests/persistence/filestorage/filestortestfixture.h>
+
+namespace storage {
+
+/**
+ * Effectively an integration test between the ModifiedBucketChecker storage
+ * link and the behavior of the filestor component.
+ */
+class FileStorModifiedBucketsTest : public FileStorTestFixture
+{
+public:
+ void modifiedBucketsSendNotifyBucketChange();
+ void fileStorRepliesToRecheckBucketCommands();
+
+ void modifyBuckets(uint32_t first, uint32_t count);
+
+ spi::dummy::DummyPersistence& getDummyPersistence() {
+ return dynamic_cast<spi::dummy::DummyPersistence&>(_node->getPersistenceProvider());
+ }
+
+ CPPUNIT_TEST_SUITE(FileStorModifiedBucketsTest);
+ CPPUNIT_TEST(modifiedBucketsSendNotifyBucketChange);
+ CPPUNIT_TEST(fileStorRepliesToRecheckBucketCommands);
+ CPPUNIT_TEST_SUITE_END();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(FileStorModifiedBucketsTest);
+
+namespace {
+
+struct BucketCheckerInjector : FileStorTestFixture::StorageLinkInjector
+{
+ TestServiceLayerApp& _node;
+ FileStorTestFixture& _fixture;
+ BucketCheckerInjector(TestServiceLayerApp& node,
+ FileStorTestFixture& fixture)
+ : _node(node),
+ _fixture(fixture)
+ {}
+ void inject(DummyStorageLink& link) const {
+ link.push_back(std::unique_ptr<ModifiedBucketChecker>(
+ new ModifiedBucketChecker(_node.getComponentRegister(),
+ _node.getPersistenceProvider(),
+ _fixture._config->getConfigId())));
+ }
+};
+
+void
+assertIsNotifyCommandWithActiveBucket(api::StorageMessage& msg)
+{
+ api::NotifyBucketChangeCommand& cmd(
+ dynamic_cast<api::NotifyBucketChangeCommand&>(msg));
+ CPPUNIT_ASSERT(cmd.getBucketInfo().isActive());
+ CPPUNIT_ASSERT_EQUAL(
+ vespalib::string("StorageMessageAddress(Storage protocol, "
+ "cluster storage, nodetype distributor, index 0)"),
+ cmd.getAddress()->toString());
+}
+
+}
+
+void
+FileStorModifiedBucketsTest::modifyBuckets(uint32_t first, uint32_t count)
+{
+ spi::BucketIdListResult::List buckets;
+ for (uint32_t i = 0; i < count; ++i) {
+ buckets.push_back(document::BucketId(16, first + i));
+ _node->getPersistenceProvider().setActiveState(
+ spi::Bucket(buckets[i], spi::PartitionId(0)),
+ spi::BucketInfo::ACTIVE);
+ }
+
+ getDummyPersistence().setModifiedBuckets(buckets);
+}
+
+void
+FileStorModifiedBucketsTest::modifiedBucketsSendNotifyBucketChange()
+{
+ BucketCheckerInjector bcj(*_node, *this);
+ TestFileStorComponents c(*this, "modifiedBucketsSendNotifyBucketChange", bcj);
+ setClusterState("storage:1 distributor:1");
+
+ uint32_t numBuckets = 10;
+
+ for (uint32_t i = 0; i < numBuckets; ++i) {
+ document::BucketId bucket(16, i);
+ createBucket(spi::Bucket(bucket, spi::PartitionId(0)));
+ c.sendPut(bucket, DocumentIndex(0), PutTimestamp(1000));
+ }
+ c.top.waitForMessages(numBuckets, MSG_WAIT_TIME);
+ c.top.reset();
+
+ modifyBuckets(0, numBuckets);
+ c.top.waitForMessages(numBuckets, MSG_WAIT_TIME);
+
+ for (uint32_t i = 0; i < 10; ++i) {
+ assertIsNotifyCommandWithActiveBucket(*c.top.getReply(i));
+
+ StorBucketDatabase::WrappedEntry entry(
+ _node->getStorageBucketDatabase().get(
+ document::BucketId(16, i), "foo"));
+
+ CPPUNIT_ASSERT(entry->info.isActive());
+ }
+}
+
+void
+FileStorModifiedBucketsTest::fileStorRepliesToRecheckBucketCommands()
+{
+ BucketCheckerInjector bcj(*_node, *this);
+ TestFileStorComponents c(*this, "fileStorRepliesToRecheckBucketCommands", bcj);
+ setClusterState("storage:1 distributor:1");
+
+ document::BucketId bucket(16, 0);
+ createBucket(spi::Bucket(bucket, spi::PartitionId(0)));
+ c.sendPut(bucket, DocumentIndex(0), PutTimestamp(1000));
+ c.top.waitForMessages(1, MSG_WAIT_TIME);
+ c.top.reset();
+
+ modifyBuckets(0, 1);
+ c.top.waitForMessages(1, MSG_WAIT_TIME);
+ assertIsNotifyCommandWithActiveBucket(*c.top.getReply(0));
+
+ // If we don't reply to the recheck bucket commands, we won't trigger
+ // a new round of getModifiedBuckets and recheck commands.
+ c.top.reset();
+ createBucket(spi::Bucket(document::BucketId(16, 1), spi::PartitionId(0)));
+ modifyBuckets(1, 1);
+ c.top.waitForMessages(1, MSG_WAIT_TIME);
+ assertIsNotifyCommandWithActiveBucket(*c.top.getReply(0));
+}
+
+} // storage
+
diff --git a/storage/src/tests/persistence/filestorage/filestortestfixture.cpp b/storage/src/tests/persistence/filestorage/filestortestfixture.cpp
new file mode 100644
index 00000000000..69b109b5cfc
--- /dev/null
+++ b/storage/src/tests/persistence/filestorage/filestortestfixture.cpp
@@ -0,0 +1,143 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <sstream>
+#include <vespa/storage/persistence/messages.h>
+#include <vespa/storage/persistence/filestorage/filestormanager.h>
+#include <vespa/persistence/dummyimpl/dummypersistence.h>
+#include <tests/persistence/filestorage/filestortestfixture.h>
+
+namespace storage {
+
+spi::LoadType FileStorTestFixture::defaultLoadType = spi::LoadType(0, "default");
+const uint32_t FileStorTestFixture::MSG_WAIT_TIME;
+
+void
+FileStorTestFixture::setupDisks(uint32_t diskCount)
+{
+ _config.reset(new vdstestlib::DirConfig(getStandardConfig(true)));
+
+ _config2.reset(new vdstestlib::DirConfig(*_config));
+ _config2->getConfig("stor-server").set("root_folder", "vdsroot.2");
+ _config2->getConfig("stor-devices").set("root_folder", "vdsroot.2");
+ _config2->getConfig("stor-server").set("node_index", "1");
+
+ _smallConfig.reset(new vdstestlib::DirConfig(*_config));
+ _node.reset(new TestServiceLayerApp(DiskCount(diskCount), NodeIndex(1),
+ _config->getConfigId()));
+ _testdoctype1 = _node->getTypeRepo()->getDocumentType("testdoctype1");
+}
+
+// Default provider setup which should work out of the box for most tests.
+void
+FileStorTestFixture::setUp()
+{
+ setupDisks(1);
+ _node->setPersistenceProvider(
+ spi::PersistenceProvider::UP(
+ new spi::dummy::DummyPersistence(_node->getTypeRepo(), 1)));
+}
+
+void
+FileStorTestFixture::tearDown()
+{
+ _node.reset(0);
+}
+
+void
+FileStorTestFixture::createBucket(const document::BucketId& bid)
+{
+ spi::Context context(defaultLoadType, spi::Priority(0),
+ spi::Trace::TraceLevel(0));
+ _node->getPersistenceProvider().createBucket(
+ spi::Bucket(bid, spi::PartitionId(0)), context);
+
+ StorBucketDatabase::WrappedEntry entry(
+ _node->getStorageBucketDatabase().get(bid, "foo",
+ StorBucketDatabase::CREATE_IF_NONEXISTING));
+ entry->disk = 0;
+ entry->info = api::BucketInfo(0, 0, 0, 0, 0, true, false);
+ entry.write();
+}
+
+bool
+FileStorTestFixture::bucketExistsInDb(const document::BucketId& bucket) const
+{
+ StorBucketDatabase::WrappedEntry entry(
+ _node->getStorageBucketDatabase().get(bucket, "bucketExistsInDb"));
+ return entry.exist();
+}
+
+FileStorTestFixture::TestFileStorComponents::TestFileStorComponents(
+ FileStorTestFixture& fixture,
+ const char* testName,
+ const StorageLinkInjector& injector)
+ : _testName(testName),
+ _fixture(fixture),
+ manager(new FileStorManager(fixture._config->getConfigId(),
+ fixture._node->getPartitions(),
+ fixture._node->getPersistenceProvider(),
+ fixture._node->getComponentRegister()))
+{
+ injector.inject(top);
+ top.push_back(StorageLink::UP(manager));
+ top.open();
+}
+
+api::StorageMessageAddress
+FileStorTestFixture::TestFileStorComponents::makeSelfAddress() const {
+ return api::StorageMessageAddress("storage", lib::NodeType::STORAGE, 0);
+}
+
+void
+FileStorTestFixture::TestFileStorComponents::sendDummyGet(
+ const document::BucketId& bid)
+{
+ std::ostringstream id;
+ id << "id:foo:testdoctype1:n=" << bid.getId() << ":0";
+ std::shared_ptr<api::GetCommand> cmd(
+ new api::GetCommand(bid, document::DocumentId(id.str()), "[all]"));
+ cmd->setAddress(makeSelfAddress());
+ cmd->setPriority(255);
+ top.sendDown(cmd);
+}
+
+void
+FileStorTestFixture::TestFileStorComponents::sendDummyGetDiff(
+ const document::BucketId& bid)
+{
+ std::vector<api::GetBucketDiffCommand::Node> nodes;
+ nodes.push_back(0);
+ nodes.push_back(1);
+ std::shared_ptr<api::GetBucketDiffCommand> cmd(
+ new api::GetBucketDiffCommand(bid, nodes, 12345));
+ cmd->setAddress(makeSelfAddress());
+ cmd->setPriority(255);
+ top.sendDown(cmd);
+}
+
+void
+FileStorTestFixture::TestFileStorComponents::sendPut(
+ const document::BucketId& bid,
+ uint32_t docIdx,
+ uint64_t timestamp)
+{
+ std::ostringstream id;
+ id << "id:foo:testdoctype1:n=" << bid.getId() << ":" << docIdx;
+ document::Document::SP doc(
+ _fixture._node->getTestDocMan().createDocument("foobar", id.str()));
+ std::shared_ptr<api::PutCommand> cmd(
+ new api::PutCommand(bid, doc, timestamp));
+ cmd->setAddress(makeSelfAddress());
+ top.sendDown(cmd);
+}
+
+void
+FileStorTestFixture::setClusterState(const std::string& state)
+{
+ _node->getStateUpdater().setClusterState(
+ lib::ClusterState::CSP(new lib::ClusterState(state)));
+}
+
+
+} // ns storage
diff --git a/storage/src/tests/persistence/filestorage/filestortestfixture.h b/storage/src/tests/persistence/filestorage/filestortestfixture.h
new file mode 100644
index 00000000000..4f1de549f47
--- /dev/null
+++ b/storage/src/tests/persistence/filestorage/filestortestfixture.h
@@ -0,0 +1,112 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#pragma once
+
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <tests/common/testhelper.h>
+#include <vespa/persistence/spi/persistenceprovider.h>
+#include <vespa/storage/persistence/filestorage/filestormanager.h>
+#include <vespa/storageapi/message/persistence.h>
+#include <tests/common/dummystoragelink.h>
+#include <tests/common/teststorageapp.h>
+#include <tests/common/testhelper.h>
+
+namespace storage {
+
+class FileStorTestFixture : public CppUnit::TestFixture
+{
+public:
+ static spi::LoadType defaultLoadType;
+
+ std::unique_ptr<TestServiceLayerApp> _node;
+ std::unique_ptr<vdstestlib::DirConfig> _config;
+ std::unique_ptr<vdstestlib::DirConfig> _config2;
+ std::unique_ptr<vdstestlib::DirConfig> _smallConfig;
+ const document::DocumentType* _testdoctype1;
+
+ static const uint32_t MSG_WAIT_TIME = 60 * 1000;
+
+ typedef uint32_t DocumentIndex;
+ typedef uint64_t PutTimestamp;
+
+ void setUp() override;
+ void tearDown() override;
+ void setupDisks(uint32_t diskCount);
+ void createBucket(const document::BucketId& bid);
+ bool bucketExistsInDb(const document::BucketId& bucket) const;
+
+ api::ReturnCode::Result resultOf(const api::StorageReply& reply) const {
+ return reply.getResult().getResult();
+ }
+ void setClusterState(const std::string&);
+
+ struct StorageLinkInjector
+ {
+ virtual ~StorageLinkInjector() {}
+
+ virtual void inject(DummyStorageLink&) const = 0;
+ };
+
+ struct NoOpStorageLinkInjector : StorageLinkInjector
+ {
+ void inject(DummyStorageLink&) const {}
+ };
+
+ void
+ expectNoReplies(DummyStorageLink& link) {
+ CPPUNIT_ASSERT_EQUAL(size_t(0), link.getNumReplies());
+ }
+
+ template <typename ReplyType>
+ void
+ expectReply(DummyStorageLink& link,
+ api::ReturnCode::Result result)
+ {
+ link.waitForMessages(1, 60*1000);
+ api::StorageReply* reply(
+ dynamic_cast<ReplyType*>(link.getReply(0).get()));
+ if (reply == 0) {
+ std::ostringstream ss;
+ ss << "got unexpected reply "
+ << link.getReply(0)->toString(true);
+ CPPUNIT_FAIL(ss.str());
+ }
+ CPPUNIT_ASSERT_EQUAL(result, reply->getResult().getResult());
+ }
+
+ template <typename ReplyType>
+ void
+ expectAbortedReply(DummyStorageLink& link) {
+ expectReply<ReplyType>(link, api::ReturnCode::ABORTED);
+ }
+
+ template <typename ReplyType>
+ void
+ expectOkReply(DummyStorageLink& link) {
+ expectReply<ReplyType>(link, api::ReturnCode::OK);
+ }
+
+
+ struct TestFileStorComponents
+ {
+ private:
+ TestName _testName;
+ FileStorTestFixture& _fixture;
+ public:
+ DummyStorageLink top;
+ FileStorManager* manager;
+
+ TestFileStorComponents(FileStorTestFixture& fixture,
+ const char* testName,
+ const StorageLinkInjector& i = NoOpStorageLinkInjector());
+
+ api::StorageMessageAddress makeSelfAddress() const;
+
+ void sendDummyGet(const document::BucketId& bid);
+ void sendPut(const document::BucketId& bid,
+ uint32_t docIdx,
+ uint64_t timestamp);
+ void sendDummyGetDiff(const document::BucketId& bid);
+ };
+};
+
+} // ns storage
diff --git a/storage/src/tests/persistence/filestorage/forwardingmessagesender.h b/storage/src/tests/persistence/filestorage/forwardingmessagesender.h
new file mode 100644
index 00000000000..691e291e534
--- /dev/null
+++ b/storage/src/tests/persistence/filestorage/forwardingmessagesender.h
@@ -0,0 +1,26 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#pragma once
+
+#include <vespa/storage/common/messagesender.h>
+#include <vespa/storage/common/storagelink.h>
+
+namespace storage {
+
+/**
+ * Simple implementation of MessageSender which simply forwards all messages
+ * to a provided storage link.
+ */
+struct ForwardingMessageSender : public MessageSender {
+ StorageLink& link;
+
+ ForwardingMessageSender(StorageLink& l) : link(l) {}
+
+ void sendCommand(const std::shared_ptr<api::StorageCommand> & cmd)
+ { link.sendUp(cmd); }
+
+ void sendReply(const std::shared_ptr<api::StorageReply> & reply)
+ { link.sendUp(reply); }
+};
+
+} // storage
+
diff --git a/storage/src/tests/persistence/filestorage/mergeblockingtest.cpp b/storage/src/tests/persistence/filestorage/mergeblockingtest.cpp
new file mode 100644
index 00000000000..ff9ec063555
--- /dev/null
+++ b/storage/src/tests/persistence/filestorage/mergeblockingtest.cpp
@@ -0,0 +1,239 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vector>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <vespa/storage/persistence/messages.h>
+#include <tests/persistence/persistenceproviderwrapper.h>
+#include <vespa/persistence/dummyimpl/dummypersistence.h>
+#include <tests/persistence/filestorage/filestortestfixture.h>
+
+namespace storage {
+
+class MergeBlockingTest : public FileStorTestFixture
+{
+public:
+ void setupDisks() {
+ FileStorTestFixture::setupDisks(1);
+ _node->setPersistenceProvider(
+ spi::PersistenceProvider::UP(
+ new spi::dummy::DummyPersistence(_node->getTypeRepo(), 1)));
+ }
+
+public:
+ void testRejectMergeForInconsistentInnerBucket();
+ void testRejectMergeForInconsistentLeafBucket();
+ void testRejectGetBucketDiffWithInconsistentBucket();
+ void testRejectApplyDiffWhenBucketHasBecomeInconsistent();
+ void testRejectApplyReplyWhenBucketHasBecomeInconsistent();
+ void testRejectGetDiffReplyWhenBucketHasBecomeInconsistent();
+ void testRejectMergeWhenLowUsedBitCount();
+
+ void setUp() override;
+
+ CPPUNIT_TEST_SUITE(MergeBlockingTest);
+ CPPUNIT_TEST(testRejectMergeForInconsistentInnerBucket);
+ CPPUNIT_TEST(testRejectMergeForInconsistentLeafBucket);
+ CPPUNIT_TEST(testRejectGetBucketDiffWithInconsistentBucket);
+ CPPUNIT_TEST(testRejectApplyDiffWhenBucketHasBecomeInconsistent);
+ CPPUNIT_TEST(testRejectApplyReplyWhenBucketHasBecomeInconsistent);
+ CPPUNIT_TEST(testRejectGetDiffReplyWhenBucketHasBecomeInconsistent);
+ CPPUNIT_TEST(testRejectMergeWhenLowUsedBitCount);
+ CPPUNIT_TEST_SUITE_END();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(MergeBlockingTest);
+
+void
+MergeBlockingTest::setUp()
+{
+ setupDisks();
+}
+
+namespace {
+
+api::StorageMessageAddress
+makeAddress() {
+ return api::StorageMessageAddress("storage", lib::NodeType::STORAGE, 0);
+}
+
+void
+assignCommandMeta(api::StorageCommand& msg) {
+ msg.setAddress(makeAddress());
+ msg.setSourceIndex(0);
+}
+
+std::vector<api::MergeBucketCommand::Node>
+getNodes() {
+ std::vector<api::MergeBucketCommand::Node> nodes;
+ nodes.push_back(0);
+ nodes.push_back(1);
+ return nodes;
+}
+
+std::vector<api::MergeBucketCommand::Node>
+getNodesWithForwarding() {
+ std::vector<api::MergeBucketCommand::Node> nodes;
+ nodes.push_back(0);
+ nodes.push_back(1);
+ nodes.push_back(2);
+ return nodes;
+}
+
+std::shared_ptr<api::MergeBucketCommand>
+createMerge(const document::BucketId& bucket) {
+ std::shared_ptr<api::MergeBucketCommand> cmd(
+ new api::MergeBucketCommand(bucket, getNodes(), api::Timestamp(1000)));
+ assignCommandMeta(*cmd);
+ return cmd;
+}
+
+std::shared_ptr<api::GetBucketDiffCommand>
+createGetDiff(const document::BucketId& bucket,
+ const std::vector<api::MergeBucketCommand::Node>& nodes)
+{
+ std::shared_ptr<api::GetBucketDiffCommand> cmd(
+ new api::GetBucketDiffCommand(bucket, nodes, api::Timestamp(1000)));
+ assignCommandMeta(*cmd);
+ return cmd;
+}
+
+std::shared_ptr<api::ApplyBucketDiffCommand>
+createApplyDiff(const document::BucketId& bucket,
+ const std::vector<api::MergeBucketCommand::Node>& nodes) {
+ std::shared_ptr<api::ApplyBucketDiffCommand> cmd(
+ new api::ApplyBucketDiffCommand(bucket, nodes, 1024*1024));
+ assignCommandMeta(*cmd);
+ return cmd;
+}
+
+const document::BucketId leafBucket(17, 1);
+const document::BucketId innerBucket(16, 1);
+const document::BucketId innerBucket2(15, 1);
+
+}
+
+void
+MergeBlockingTest::testRejectMergeForInconsistentInnerBucket()
+{
+ TestFileStorComponents c(*this, "testRejectMergeForInconsistentInnerBucket");
+ createBucket(leafBucket);
+
+ std::shared_ptr<api::MergeBucketCommand> cmd(createMerge(innerBucket));
+ c.top.sendDown(cmd);
+
+ expectAbortedReply<api::MergeBucketReply>(c.top);
+ CPPUNIT_ASSERT(!bucketExistsInDb(innerBucket));
+}
+
+void
+MergeBlockingTest::testRejectMergeForInconsistentLeafBucket()
+{
+ TestFileStorComponents c(*this, "testRejectMergeForInconsistentInnerBucket");
+ createBucket(innerBucket);
+
+ std::shared_ptr<api::MergeBucketCommand> cmd(createMerge(leafBucket));
+ c.top.sendDown(cmd);
+
+ expectAbortedReply<api::MergeBucketReply>(c.top);
+ CPPUNIT_ASSERT(!bucketExistsInDb(leafBucket));
+}
+
+void
+MergeBlockingTest::testRejectGetBucketDiffWithInconsistentBucket()
+{
+ TestFileStorComponents c(*this, "testRejectGetBucketDiffWithInconsistentBucket");
+ CPPUNIT_ASSERT(innerBucket.contains(leafBucket));
+ createBucket(innerBucket);
+
+ std::shared_ptr<api::GetBucketDiffCommand> cmd(createGetDiff(leafBucket, getNodes()));
+ c.top.sendDown(cmd);
+
+ expectAbortedReply<api::GetBucketDiffReply>(c.top);
+ CPPUNIT_ASSERT(!bucketExistsInDb(leafBucket));
+}
+
+void
+MergeBlockingTest::testRejectApplyDiffWhenBucketHasBecomeInconsistent()
+{
+ TestFileStorComponents c(*this, "testRejectApplyDiffWhenBucketHasBecomeInconsistent");
+ createBucket(leafBucket);
+ createBucket(innerBucket);
+
+ std::shared_ptr<api::ApplyBucketDiffCommand> applyDiff(
+ createApplyDiff(innerBucket, getNodes()));
+ c.top.sendDown(applyDiff);
+
+ expectAbortedReply<api::ApplyBucketDiffReply>(c.top);
+}
+
+void
+MergeBlockingTest::testRejectApplyReplyWhenBucketHasBecomeInconsistent()
+{
+ TestFileStorComponents c(*this, "testRejectApplyReplyWhenBucketHasBecomeInconsistent");
+ createBucket(innerBucket);
+
+ std::shared_ptr<api::ApplyBucketDiffCommand> applyDiff(
+ createApplyDiff(innerBucket, getNodesWithForwarding()));
+ c.top.sendDown(applyDiff);
+ c.top.waitForMessages(1, MSG_WAIT_TIME);
+
+ api::StorageMessage::SP fwdDiff(
+ c.top.getAndRemoveMessage(api::MessageType::APPLYBUCKETDIFF));
+ api::ApplyBucketDiffCommand& diffCmd(
+ dynamic_cast<api::ApplyBucketDiffCommand&>(*fwdDiff));
+
+ api::ApplyBucketDiffReply::SP diffReply(
+ new api::ApplyBucketDiffReply(diffCmd));
+ createBucket(leafBucket);
+ c.top.sendDown(diffReply);
+
+ expectAbortedReply<api::ApplyBucketDiffReply>(c.top);
+}
+
+void
+MergeBlockingTest::testRejectGetDiffReplyWhenBucketHasBecomeInconsistent()
+{
+ TestFileStorComponents c(*this, "testRejectGetDiffReplyWhenBucketHasBecomeInconsistent");
+ createBucket(innerBucket);
+
+ std::shared_ptr<api::GetBucketDiffCommand> getDiff(
+ createGetDiff(innerBucket, getNodesWithForwarding()));
+ c.top.sendDown(getDiff);
+ c.top.waitForMessages(1, MSG_WAIT_TIME);
+
+ api::StorageMessage::SP fwdDiff(
+ c.top.getAndRemoveMessage(api::MessageType::GETBUCKETDIFF));
+ api::GetBucketDiffCommand& diffCmd(
+ dynamic_cast<api::GetBucketDiffCommand&>(*fwdDiff));
+
+ api::GetBucketDiffReply::SP diffReply(
+ new api::GetBucketDiffReply(diffCmd));
+ createBucket(innerBucket2);
+ c.top.sendDown(diffReply);
+
+ expectAbortedReply<api::GetBucketDiffReply>(c.top);
+}
+
+/**
+ * Test case for buckets in ticket 6389558, comment #4.
+ */
+void
+MergeBlockingTest::testRejectMergeWhenLowUsedBitCount()
+{
+ document::BucketId superBucket(1, 0x1);
+ document::BucketId subBucket(2, 0x1);
+
+ CPPUNIT_ASSERT(superBucket.contains(subBucket));
+
+ TestFileStorComponents c(*this, "testRejectMergeWithInconsistentBucket");
+ createBucket(superBucket);
+
+ std::shared_ptr<api::MergeBucketCommand> cmd(createMerge(subBucket));
+ c.top.sendDown(cmd);
+
+ expectAbortedReply<api::MergeBucketReply>(c.top);
+ CPPUNIT_ASSERT(!bucketExistsInDb(subBucket));
+}
+
+} // ns storage
diff --git a/storage/src/tests/persistence/filestorage/modifiedbucketcheckertest.cpp b/storage/src/tests/persistence/filestorage/modifiedbucketcheckertest.cpp
new file mode 100644
index 00000000000..848799fde95
--- /dev/null
+++ b/storage/src/tests/persistence/filestorage/modifiedbucketcheckertest.cpp
@@ -0,0 +1,214 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <tests/common/testhelper.h>
+#include <tests/common/storagelinktest.h>
+#include <tests/common/teststorageapp.h>
+#include <tests/common/dummystoragelink.h>
+#include <vespa/persistence/dummyimpl/dummypersistence.h>
+#include <vespa/storage/persistence/filestorage/modifiedbucketchecker.h>
+#include <vespa/storage/persistence/messages.h>
+
+namespace storage {
+
+class ModifiedBucketCheckerTest : public CppUnit::TestFixture
+{
+public:
+ enum {
+ MESSAGE_WAIT_TIME = 60*2
+ };
+
+ void setUp();
+ void tearDown();
+
+ void testModifiedBucketThreadSendsRecheckBucketCommands();
+ void testDoNotCheckModifiedBucketsIfAlreadyPending();
+ void testBucketCheckerOnlySwallowsRecheckBucketReplies();
+ void testRecheckRequestsAreChunked();
+ void testInvalidChunkSizeConfigIsRejected();
+
+ CPPUNIT_TEST_SUITE(ModifiedBucketCheckerTest);
+ CPPUNIT_TEST(testModifiedBucketThreadSendsRecheckBucketCommands);
+ CPPUNIT_TEST(testDoNotCheckModifiedBucketsIfAlreadyPending);
+ CPPUNIT_TEST(testBucketCheckerOnlySwallowsRecheckBucketReplies);
+ CPPUNIT_TEST(testRecheckRequestsAreChunked);
+ CPPUNIT_TEST(testInvalidChunkSizeConfigIsRejected);
+ CPPUNIT_TEST_SUITE_END();
+private:
+ spi::dummy::DummyPersistence& getDummyPersistence() {
+ return static_cast<spi::dummy::DummyPersistence&>(
+ _node->getPersistenceProvider());
+ }
+ void expectCommandsAndSendReplies(uint32_t count, uint32_t firstBucket);
+ void modifyBuckets(uint32_t count, uint32_t firstBucket);
+ void replyToAll(const std::vector<api::StorageMessage::SP>& messages,
+ uint32_t firstBucket);
+
+ std::unique_ptr<DummyStorageLink> _top;
+ ModifiedBucketChecker* _handler;
+ DummyStorageLink* _bottom;
+
+ std::unique_ptr<TestServiceLayerApp> _node;
+ std::unique_ptr<vdstestlib::DirConfig> _config;
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(ModifiedBucketCheckerTest);
+
+void
+ModifiedBucketCheckerTest::setUp()
+{
+ _config.reset(new vdstestlib::DirConfig(getStandardConfig(true)));
+ _node.reset(new TestServiceLayerApp(DiskCount(1), NodeIndex(0),
+ _config->getConfigId()));
+ _node->setupDummyPersistence();
+
+ _top.reset(new DummyStorageLink);
+ _handler = new ModifiedBucketChecker(_node->getComponentRegister(),
+ _node->getPersistenceProvider(),
+ _config->getConfigId());
+ _top->push_back(std::unique_ptr<StorageLink>(_handler));
+ _bottom = new DummyStorageLink;
+ _handler->push_back(std::unique_ptr<StorageLink>(_bottom));
+}
+
+void
+ModifiedBucketCheckerTest::tearDown()
+{
+ _top->close();
+ _top.reset(0);
+ _node.reset(0);
+ _config.reset(0);
+}
+
+void
+ModifiedBucketCheckerTest::modifyBuckets(uint32_t count, uint32_t firstBucket)
+{
+ spi::BucketIdListResult::List buckets;
+ for (uint32_t i = firstBucket; i < firstBucket + count; ++i) {
+ buckets.push_back(document::BucketId(16, i));
+ }
+ getDummyPersistence().setModifiedBuckets(buckets);
+}
+
+void
+ModifiedBucketCheckerTest::replyToAll(
+ const std::vector<api::StorageMessage::SP>& messages,
+ uint32_t firstBucket)
+{
+ for (uint32_t i = 0; i < messages.size(); ++i) {
+ RecheckBucketInfoCommand& cmd(
+ dynamic_cast<RecheckBucketInfoCommand&>(*messages[i]));
+ CPPUNIT_ASSERT_EQUAL(document::BucketId(16, i+firstBucket),
+ cmd.getBucketId());
+ _bottom->sendUp(cmd.makeReply());
+ }
+}
+
+void
+ModifiedBucketCheckerTest::expectCommandsAndSendReplies(
+ uint32_t count, uint32_t firstBucket)
+{
+ std::vector<api::StorageMessage::SP> messages(_bottom->getCommandsOnce());
+ CPPUNIT_ASSERT_EQUAL(size_t(count), messages.size());
+ replyToAll(messages, firstBucket);
+}
+
+void
+ModifiedBucketCheckerTest::testModifiedBucketThreadSendsRecheckBucketCommands()
+{
+ _top->open(); // Multi-threaded test
+ modifyBuckets(3, 0);
+ // Should now get 3 RecheckBucketInfo commands down the dummy link.
+ _bottom->waitForMessages(3, MESSAGE_WAIT_TIME);
+ expectCommandsAndSendReplies(3, 0);
+ // No replies should reach top link
+ CPPUNIT_ASSERT_EQUAL(size_t(0), _top->getNumReplies());
+}
+
+void
+ModifiedBucketCheckerTest::testDoNotCheckModifiedBucketsIfAlreadyPending()
+{
+ _handler->setUnitTestingSingleThreadedMode();
+ _top->open();
+ modifyBuckets(3, 0);
+ _handler->tick();
+
+ std::vector<api::StorageMessage::SP> messages(_bottom->getCommandsOnce());
+ CPPUNIT_ASSERT_EQUAL(size_t(3), messages.size());
+
+ modifyBuckets(3, 3);
+ _handler->tick();
+ expectCommandsAndSendReplies(0, 0);
+ // After replies received, tick should send new requests again.
+ replyToAll(messages, 0);
+ _handler->tick();
+ expectCommandsAndSendReplies(3, 3);
+}
+
+void
+ModifiedBucketCheckerTest::testBucketCheckerOnlySwallowsRecheckBucketReplies()
+{
+ _top->open();
+ DestroyIteratorCommand cmd(spi::IteratorId(123));
+ _bottom->sendUp(api::StorageMessage::SP(cmd.makeReply()));
+ CPPUNIT_ASSERT_EQUAL(size_t(1), _top->getNumReplies());
+}
+
+void
+ModifiedBucketCheckerTest::testRecheckRequestsAreChunked()
+{
+ namespace cfgns = vespa::config::content::core;
+ _handler->setUnitTestingSingleThreadedMode();
+ _top->open();
+ cfgns::StorServerConfigBuilder cfgBuilder;
+ cfgBuilder.bucketRecheckingChunkSize = 2;
+ _handler->configure(std::unique_ptr<cfgns::StorServerConfig>(
+ new cfgns::StorServerConfig(cfgBuilder)));
+
+ modifyBuckets(5, 0);
+ _handler->tick();
+
+ modifyBuckets(1, 10); // should not be checked yet;
+ // Rechecks should now be done in 3 chunks of 2, 2 and 1 each, respectively.
+ expectCommandsAndSendReplies(2, 0);
+
+ _handler->tick();
+ expectCommandsAndSendReplies(2, 2);
+
+ _handler->tick();
+ expectCommandsAndSendReplies(1, 4);
+
+ // New round of fetching
+ _handler->tick();
+ expectCommandsAndSendReplies(1, 10);
+
+ // And done!
+ _handler->tick();
+ expectCommandsAndSendReplies(0, 0);
+}
+
+void
+ModifiedBucketCheckerTest::testInvalidChunkSizeConfigIsRejected()
+{
+ namespace cfgns = vespa::config::content::core;
+ _handler->setUnitTestingSingleThreadedMode();
+ _top->open();
+ cfgns::StorServerConfigBuilder cfgBuilder;
+ cfgBuilder.bucketRecheckingChunkSize = 0;
+ try {
+ _handler->configure(std::unique_ptr<cfgns::StorServerConfig>(
+ new cfgns::StorServerConfig(cfgBuilder)));
+ CPPUNIT_FAIL("Expected bad config to be rejected");
+ } catch (const config::InvalidConfigException&) {
+ // Happy days
+ } catch (...) {
+ CPPUNIT_FAIL("Got unexpected exception");
+ }
+}
+
+// RecheckBucketInfoCommand handling is done in persistence threads,
+// so that functionality is tested in the filestor tests.
+
+} // ns storage
+
diff --git a/storage/src/tests/persistence/filestorage/operationabortingtest.cpp b/storage/src/tests/persistence/filestorage/operationabortingtest.cpp
new file mode 100644
index 00000000000..0d6583cacdb
--- /dev/null
+++ b/storage/src/tests/persistence/filestorage/operationabortingtest.cpp
@@ -0,0 +1,470 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vector>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <vespa/storage/persistence/messages.h>
+#include <tests/persistence/persistenceproviderwrapper.h>
+#include <vespa/persistence/dummyimpl/dummypersistence.h>
+#include <tests/persistence/filestorage/filestortestfixture.h>
+#include <vespa/vespalib/util/barrier.h>
+#include <vespa/vespalib/util/thread.h>
+
+#include <vespa/log/log.h>
+LOG_SETUP(".operationabortingtest");
+
+namespace storage {
+
+namespace {
+
+// Exploit the fact that PersistenceProviderWrapper already provides a forwarding
+// implementation of all SPI calls, so we can selectively override.
+class BlockingMockProvider : public PersistenceProviderWrapper
+{
+ vespalib::Barrier& _queueBarrier;
+ vespalib::Barrier& _completionBarrier;
+public:
+ typedef std::unique_ptr<BlockingMockProvider> UP;
+
+ mutable uint32_t _bucketInfoInvocations;
+ uint32_t _createBucketInvocations;
+ uint32_t _deleteBucketInvocations;
+
+ BlockingMockProvider(spi::PersistenceProvider& wrappedProvider,
+ vespalib::Barrier& queueBarrier,
+ vespalib::Barrier& completionBarrier)
+ : PersistenceProviderWrapper(wrappedProvider),
+ _queueBarrier(queueBarrier),
+ _completionBarrier(completionBarrier),
+ _bucketInfoInvocations(0),
+ _createBucketInvocations(0),
+ _deleteBucketInvocations(0)
+ {}
+
+ spi::Result put(const spi::Bucket& bucket,
+ spi::Timestamp timestamp,
+ const document::Document::SP& doc,
+ spi::Context& context) override
+ {
+ (void) bucket;
+ (void) timestamp;
+ (void) doc;
+ (void) context;
+ _queueBarrier.await();
+ // message abort stage with active opertion in disk queue
+ FastOS_Thread::Sleep(75);
+ _completionBarrier.await();
+ // test finished
+ return spi::Result();
+ }
+
+ spi::BucketInfoResult getBucketInfo(const spi::Bucket& bucket) const override {
+ ++_bucketInfoInvocations;
+ return PersistenceProviderWrapper::getBucketInfo(bucket);
+ }
+
+ spi::Result createBucket(const spi::Bucket& bucket, spi::Context& ctx) override {
+ ++_createBucketInvocations;
+ return PersistenceProviderWrapper::createBucket(bucket, ctx);
+ }
+
+ spi::Result deleteBucket(const spi::Bucket& bucket, spi::Context& ctx) override {
+ ++_deleteBucketInvocations;
+ return PersistenceProviderWrapper::deleteBucket(bucket, ctx);
+ }
+};
+
+spi::LoadType defaultLoadType(0, "default");
+
+}
+
+class OperationAbortingTest : public FileStorTestFixture
+{
+public:
+ spi::PersistenceProvider::UP _dummyProvider;
+ BlockingMockProvider* _blockingProvider;
+ std::unique_ptr<vespalib::Barrier> _queueBarrier;
+ std::unique_ptr<vespalib::Barrier> _completionBarrier;
+
+ void setupDisks(uint32_t diskCount, uint32_t queueBarrierThreads) {
+ FileStorTestFixture::setupDisks(diskCount);
+ _dummyProvider.reset(new spi::dummy::DummyPersistence(
+ _node->getTypeRepo(), diskCount));
+ _queueBarrier.reset(new vespalib::Barrier(queueBarrierThreads));
+ _completionBarrier.reset(new vespalib::Barrier(2));
+ _blockingProvider = new BlockingMockProvider(*_dummyProvider,
+ *_queueBarrier, *_completionBarrier);
+
+ _node->setPersistenceProvider(
+ spi::PersistenceProvider::UP(_blockingProvider));
+ }
+
+ void validateReplies(DummyStorageLink& link,
+ size_t repliesTotal,
+ const std::vector<document::BucketId>& okReplies,
+ const std::vector<document::BucketId>& abortedGetDiffs);
+
+ void doTestSpecificOperationsNotAborted(
+ const char* testName,
+ const std::vector<api::StorageMessage::SP>& msgs,
+ bool shouldCreateBucketInitially);
+
+ api::BucketInfo getBucketInfoFromDB(const document::BucketId&) const;
+
+public:
+ void testAbortMessageClearsRelevantQueuedOperations();
+ void testWaitForCurrentOperationCompletionForAbortedBucket();
+ void testDoNotAbortCreateBucketCommands();
+ void testDoNotAbortRecheckBucketCommands();
+ void testDoNotAbortDeleteBucketCommands();
+
+ void setUp() override;
+
+ CPPUNIT_TEST_SUITE(OperationAbortingTest);
+ CPPUNIT_TEST(testAbortMessageClearsRelevantQueuedOperations);
+ CPPUNIT_TEST(testWaitForCurrentOperationCompletionForAbortedBucket);
+ CPPUNIT_TEST(testDoNotAbortCreateBucketCommands);
+ CPPUNIT_TEST(testDoNotAbortRecheckBucketCommands);
+ CPPUNIT_TEST(testDoNotAbortDeleteBucketCommands);
+ CPPUNIT_TEST_SUITE_END();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(OperationAbortingTest);
+
+namespace {
+
+template <typename T, typename Collection>
+bool
+existsIn(const T& elem, const Collection& collection) {
+ return (std::find(collection.begin(), collection.end(), elem)
+ != collection.end());
+}
+
+}
+
+void
+OperationAbortingTest::setUp()
+{
+}
+
+void
+OperationAbortingTest::validateReplies(
+ DummyStorageLink& link,
+ size_t repliesTotal,
+ const std::vector<document::BucketId>& okReplies,
+ const std::vector<document::BucketId>& abortedGetDiffs)
+{
+ link.waitForMessages(repliesTotal, MSG_WAIT_TIME);
+ CPPUNIT_ASSERT_EQUAL(repliesTotal, link.getNumReplies());
+
+ for (uint32_t i = 0; i < repliesTotal; ++i) {
+ api::StorageReply& reply(
+ dynamic_cast<api::StorageReply&>(*link.getReply(i)));
+ LOG(info, "Checking reply %s", reply.toString(true).c_str());
+ switch (static_cast<uint32_t>(reply.getType().getId())) {
+ case api::MessageType::PUT_REPLY_ID:
+ case api::MessageType::CREATEBUCKET_REPLY_ID:
+ case api::MessageType::DELETEBUCKET_REPLY_ID:
+ case api::MessageType::GET_REPLY_ID:
+ CPPUNIT_ASSERT_EQUAL(api::ReturnCode::OK, resultOf(reply));
+ break;
+ case api::MessageType::GETBUCKETDIFF_REPLY_ID:
+ {
+ api::GetBucketDiffReply& gr(
+ static_cast<api::GetBucketDiffReply&>(reply));
+ if (existsIn(gr.getBucketId(), abortedGetDiffs)) {
+ CPPUNIT_ASSERT_EQUAL(api::ReturnCode::ABORTED, resultOf(reply));
+ } else {
+ CPPUNIT_ASSERT(existsIn(gr.getBucketId(), okReplies));
+ CPPUNIT_ASSERT_EQUAL(api::ReturnCode::OK, resultOf(reply));
+ }
+ break;
+ }
+ case api::MessageType::INTERNAL_REPLY_ID:
+ CPPUNIT_ASSERT_EQUAL(api::ReturnCode::OK, resultOf(reply));
+ break;
+ default:
+ CPPUNIT_FAIL("got unknown reply type");
+ }
+ }
+}
+
+namespace {
+
+template <typename Container>
+AbortBucketOperationsCommand::SP
+makeAbortCmd(const Container& buckets)
+{
+ std::unique_ptr<AbortBucketOperationsCommand::AbortPredicate> pred(
+ new AbortBucketOperationsCommand::ExplicitBucketSetPredicate(
+ buckets.begin(), buckets.end()));
+ AbortBucketOperationsCommand::SP cmd(
+ new AbortBucketOperationsCommand(std::move(pred)));
+ return cmd;
+}
+
+}
+
+void
+OperationAbortingTest::testAbortMessageClearsRelevantQueuedOperations()
+{
+ uint32_t queueBarrierThreads = 2;
+ setupDisks(1, queueBarrierThreads);
+ TestFileStorComponents c(*this, "testAbortMessageClearsRelevantQueuedOperations");
+ document::BucketId bucket(16, 1);
+ createBucket(bucket);
+ LOG(info, "Sending put to trigger thread barrier");
+ c.sendPut(bucket, DocumentIndex(0), PutTimestamp(1000));
+ LOG(info, "waiting for test and persistence thread to reach barriers");
+ _queueBarrier->await();
+ LOG(info, "barrier passed");
+ /*
+ * All load we send down to filestor from now on wil be enqueued, as the
+ * persistence thread is blocked.
+ *
+ * Cannot abort the bucket we're blocking the thread on since we'll
+ * deadlock the test if we do.
+ */
+ std::vector<document::BucketId> bucketsToAbort;
+ bucketsToAbort.push_back(document::BucketId(16, 3));
+ bucketsToAbort.push_back(document::BucketId(16, 5));
+ std::vector<document::BucketId> bucketsToKeep;
+ bucketsToKeep.push_back(document::BucketId(16, 2));
+ bucketsToKeep.push_back(document::BucketId(16, 4));
+
+ for (uint32_t i = 0; i < bucketsToAbort.size(); ++i) {
+ createBucket(bucketsToAbort[i]);
+ c.sendDummyGetDiff(bucketsToAbort[i]);
+ }
+ for (uint32_t i = 0; i < bucketsToKeep.size(); ++i) {
+ createBucket(bucketsToKeep[i]);
+ c.sendDummyGetDiff(bucketsToKeep[i]);
+ }
+
+ AbortBucketOperationsCommand::SP abortCmd(makeAbortCmd(bucketsToAbort));
+ c.top.sendDown(abortCmd);
+
+ LOG(info, "waiting on completion barrier");
+ _completionBarrier->await();
+
+ // put+abort+get replies
+ size_t expectedMsgs(2 + bucketsToAbort.size() + bucketsToKeep.size());
+ LOG(info, "barrier passed, waiting for %zu replies", expectedMsgs);
+
+ validateReplies(c.top, expectedMsgs, bucketsToKeep, bucketsToAbort);
+}
+
+namespace {
+
+/**
+ * Sending an abort while we're processing a message for a bucket in its set
+ * will block until the operation has completed. Therefore we logically cannot
+ * do any operations to trigger the operation to complete after the send in
+ * the same thread as we're sending in...
+ */
+class SendTask : public vespalib::Runnable
+{
+ AbortBucketOperationsCommand::SP _abortCmd;
+ vespalib::Barrier& _queueBarrier;
+ StorageLink& _downLink;
+public:
+ SendTask(const AbortBucketOperationsCommand::SP& abortCmd,
+ vespalib::Barrier& queueBarrier,
+ StorageLink& downLink)
+ : _abortCmd(abortCmd),
+ _queueBarrier(queueBarrier),
+ _downLink(downLink)
+ {}
+
+ void run() {
+ // Best-effort synchronized starting
+ _queueBarrier.await();
+ _downLink.sendDown(_abortCmd);
+ }
+};
+
+}
+
+/**
+ * This test basically is not fully deterministic in that it tests cross-thread
+ * behavior on mutexes that are not visible to the thread itself and where there
+ * are no available side-effects to consistently sync around. However, it should
+ * impose sufficient ordering guarantees that it never provides false positives
+ * as long as the tested functionality is in fact correct.
+ */
+void
+OperationAbortingTest::testWaitForCurrentOperationCompletionForAbortedBucket()
+{
+ uint32_t queueBarrierThreads = 3;
+ setupDisks(1, queueBarrierThreads);
+ TestFileStorComponents c(*this, "testWaitForCurrentOperationCompletionForAbortedBucket");
+
+ document::BucketId bucket(16, 1);
+ createBucket(bucket);
+ LOG(info, "Sending put to trigger thread barrier");
+ c.sendPut(bucket, DocumentIndex(0), PutTimestamp(1000));
+
+ std::vector<document::BucketId> abortSet { bucket };
+ AbortBucketOperationsCommand::SP abortCmd(makeAbortCmd(abortSet));
+
+ SendTask sendTask(abortCmd, *_queueBarrier, c.top);
+ vespalib::Thread thread(sendTask);
+ thread.start();
+
+ LOG(info, "waiting for threads to reach barriers");
+ _queueBarrier->await();
+ LOG(info, "barrier passed");
+
+ LOG(info, "waiting on completion barrier");
+ _completionBarrier->await();
+
+ thread.stop();
+ thread.join();
+
+ // If waiting works, put reply shall always be ordered before the internal
+ // reply, as it must finish processing fully before the abort returns.
+ c.top.waitForMessages(2, MSG_WAIT_TIME);
+ CPPUNIT_ASSERT_EQUAL(size_t(2), c.top.getNumReplies());
+ CPPUNIT_ASSERT_EQUAL(api::MessageType::PUT_REPLY,
+ c.top.getReply(0)->getType());
+ CPPUNIT_ASSERT_EQUAL(api::MessageType::INTERNAL_REPLY,
+ c.top.getReply(1)->getType());
+}
+
+void
+OperationAbortingTest::testDoNotAbortCreateBucketCommands()
+{
+ document::BucketId bucket(16, 1);
+ std::vector<api::StorageMessage::SP> msgs;
+ msgs.push_back(api::StorageMessage::SP(new api::CreateBucketCommand(bucket)));
+
+ bool shouldCreateBucketInitially(false);
+ doTestSpecificOperationsNotAborted(
+ "testDoNotAbortCreateBucketCommands",
+ msgs,
+ shouldCreateBucketInitially);
+}
+
+void
+OperationAbortingTest::testDoNotAbortRecheckBucketCommands()
+{
+ document::BucketId bucket(16, 1);
+ std::vector<api::StorageMessage::SP> msgs;
+ msgs.push_back(api::StorageMessage::SP(new RecheckBucketInfoCommand(bucket)));
+
+ bool shouldCreateBucketInitially(true);
+ doTestSpecificOperationsNotAborted(
+ "testDoNotAbortRecheckBucketCommands",
+ msgs,
+ shouldCreateBucketInitially);
+}
+
+api::BucketInfo
+OperationAbortingTest::getBucketInfoFromDB(const document::BucketId& id) const
+{
+ StorBucketDatabase::WrappedEntry entry(
+ _node->getStorageBucketDatabase().get(id, "foo",
+ StorBucketDatabase::CREATE_IF_NONEXISTING));
+ CPPUNIT_ASSERT(entry.exist());
+ return entry->info;
+}
+
+void
+OperationAbortingTest::testDoNotAbortDeleteBucketCommands()
+{
+ document::BucketId bucket(16, 1);
+ std::vector<api::StorageMessage::SP> msgs;
+ api::DeleteBucketCommand::SP cmd(new api::DeleteBucketCommand(bucket));
+ msgs.push_back(cmd);
+
+ bool shouldCreateBucketInitially(true);
+ doTestSpecificOperationsNotAborted(
+ "testDoNotAbortRecheckBucketCommands",
+ msgs,
+ shouldCreateBucketInitially);
+}
+
+void
+OperationAbortingTest::doTestSpecificOperationsNotAborted(
+ const char* testName,
+ const std::vector<api::StorageMessage::SP>& msgs,
+ bool shouldCreateBucketInitially)
+{
+ uint32_t queueBarrierThreads = 2;
+ setupDisks(1, queueBarrierThreads);
+ TestFileStorComponents c(*this, testName);
+ document::BucketId bucket(16, 1);
+ document::BucketId blockerBucket(16, 2);
+
+ if (shouldCreateBucketInitially) {
+ createBucket(bucket);
+ }
+ createBucket(blockerBucket);
+ LOG(info, "Sending put to trigger thread barrier");
+ c.sendPut(blockerBucket, DocumentIndex(0), PutTimestamp(1000));
+ LOG(info, "waiting for test and persistence thread to reach barriers");
+ _queueBarrier->await();
+ LOG(info, "barrier passed");
+
+ uint32_t expectedCreateBuckets = 0;
+ uint32_t expectedDeleteBuckets = 0;
+ uint32_t expectedBucketInfoInvocations = 1; // from blocker put
+ uint32_t expectedRecheckReplies = 0;
+
+ for (uint32_t i = 0; i < msgs.size(); ++i) {
+ switch (msgs[i]->getType().getId()) {
+ case api::MessageType::CREATEBUCKET_ID:
+ ++expectedCreateBuckets;
+ break;
+ case api::MessageType::DELETEBUCKET_ID:
+ {
+ api::DeleteBucketCommand& delCmd(
+ dynamic_cast<api::DeleteBucketCommand&>(*msgs[i]));
+ delCmd.setBucketInfo(getBucketInfoFromDB(delCmd.getBucketId()));
+ }
+ ++expectedDeleteBuckets;
+ ++expectedBucketInfoInvocations;
+ break;
+ case api::MessageType::INTERNAL_ID:
+ ++expectedRecheckReplies;
+ ++expectedBucketInfoInvocations;
+ break;
+ default:
+ CPPUNIT_FAIL("unsupported message type");
+ }
+ c.top.sendDown(msgs[i]);
+ }
+
+ std::vector<document::BucketId> abortSet { bucket };
+ AbortBucketOperationsCommand::SP abortCmd(makeAbortCmd(abortSet));
+ c.top.sendDown(abortCmd);
+
+ LOG(info, "waiting on completion barrier");
+ _completionBarrier->await();
+
+ // At this point, the recheck command is still either enqueued, is processing
+ // or has finished. Since it does not generate any replies, send a low priority
+ // get which will wait until it has finished processing.
+ c.sendDummyGet(blockerBucket);
+
+ // put+abort+get + any other creates/deletes/rechecks
+ size_t expectedMsgs(3 + expectedCreateBuckets + expectedDeleteBuckets
+ + expectedRecheckReplies);
+ LOG(info, "barrier passed, waiting for %zu replies", expectedMsgs);
+
+ std::vector<document::BucketId> okReplies;
+ okReplies.push_back(bucket);
+ okReplies.push_back(blockerBucket);
+ std::vector<document::BucketId> abortedGetDiffs;
+ validateReplies(c.top, expectedMsgs, okReplies, abortedGetDiffs);
+
+ CPPUNIT_ASSERT_EQUAL(expectedBucketInfoInvocations,
+ _blockingProvider->_bucketInfoInvocations);
+ CPPUNIT_ASSERT_EQUAL(expectedCreateBuckets + (shouldCreateBucketInitially ? 2 : 1),
+ _blockingProvider->_createBucketInvocations);
+ CPPUNIT_ASSERT_EQUAL(expectedDeleteBuckets,
+ _blockingProvider->_deleteBucketInvocations);
+}
+
+
+} // storage
diff --git a/storage/src/tests/persistence/filestorage/sanitycheckeddeletetest.cpp b/storage/src/tests/persistence/filestorage/sanitycheckeddeletetest.cpp
new file mode 100644
index 00000000000..9b492a3aaa6
--- /dev/null
+++ b/storage/src/tests/persistence/filestorage/sanitycheckeddeletetest.cpp
@@ -0,0 +1,78 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <vespa/storageapi/message/bucket.h>
+#include <tests/persistence/persistenceproviderwrapper.h>
+#include <vespa/persistence/dummyimpl/dummypersistence.h>
+#include <tests/persistence/filestorage/filestortestfixture.h>
+
+namespace storage {
+
+class SanityCheckedDeleteTest : public FileStorTestFixture
+{
+public:
+ void testDeleteBucketFailsWhenProviderOutOfSync();
+
+ CPPUNIT_TEST_SUITE(SanityCheckedDeleteTest);
+ CPPUNIT_TEST(testDeleteBucketFailsWhenProviderOutOfSync);
+ CPPUNIT_TEST_SUITE_END();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(SanityCheckedDeleteTest);
+
+void
+SanityCheckedDeleteTest::testDeleteBucketFailsWhenProviderOutOfSync()
+{
+ TestFileStorComponents c(*this, "testDeleteBucketFailsWhenProviderOutOfSync");
+ document::BucketId bucket(8, 123);
+ document::BucketId syncBucket(8, 234);
+ spi::Bucket spiBucket(bucket, spi::PartitionId(0));
+
+ createBucket(bucket);
+ // Send a put to ensure bucket isn't empty.
+ c.sendPut(bucket, DocumentIndex(0), PutTimestamp(1000));
+ c.top.waitForMessages(1, MSG_WAIT_TIME);
+ c.top.getRepliesOnce();
+ spi::BucketInfo infoBefore(
+ _node->getPersistenceProvider()
+ .getBucketInfo(spiBucket).getBucketInfo());
+
+ createBucket(syncBucket);
+
+ api::BucketInfo serviceLayerInfo(1, 2, 3, 4, 5, true, false);
+ {
+ StorBucketDatabase::WrappedEntry entry(
+ _node->getStorageBucketDatabase().get(bucket, "foo",
+ StorBucketDatabase::CREATE_IF_NONEXISTING));
+ entry->disk = 0;
+ entry->info = serviceLayerInfo;
+ entry.write();
+ }
+
+ std::shared_ptr<api::DeleteBucketCommand> cmd(
+ new api::DeleteBucketCommand(bucket));
+ cmd->setBucketInfo(serviceLayerInfo);
+
+ c.top.sendDown(cmd);
+ c.top.waitForMessages(1, MSG_WAIT_TIME);
+ api::StorageMessage::SP reply(c.top.getReply(0));
+ api::DeleteBucketReply& deleteReply(
+ dynamic_cast<api::DeleteBucketReply&>(*reply));
+ // Reply happens in a filestor manager context and before the sanity
+ // check kicks in, meaning it will always be OK.
+ CPPUNIT_ASSERT_EQUAL(api::ReturnCode::OK, resultOf(deleteReply));
+ // At this point we do not know if the scheduled delete has been
+ // executed; it may still be in the persistence queue.
+ // Send a put to another bucket to serialize the operation (guaranteed
+ // since we only have 1 thread and the delete always has max priority).
+ c.sendPut(syncBucket, DocumentIndex(0), PutTimestamp(1001));
+ c.top.waitForMessages(1, MSG_WAIT_TIME);
+ // Should still be able to get identical bucket info for bucket.
+ spi::BucketInfoResult infoResult(
+ _node->getPersistenceProvider().getBucketInfo(spiBucket));
+ CPPUNIT_ASSERT_MSG(infoResult.getErrorMessage(), !infoResult.hasError());
+ CPPUNIT_ASSERT(infoBefore == infoResult.getBucketInfo());
+}
+
+} // namespace storage
diff --git a/storage/src/tests/persistence/filestorage/singlebucketjointest.cpp b/storage/src/tests/persistence/filestorage/singlebucketjointest.cpp
new file mode 100644
index 00000000000..480652207d3
--- /dev/null
+++ b/storage/src/tests/persistence/filestorage/singlebucketjointest.cpp
@@ -0,0 +1,51 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <vespa/storageapi/message/bucket.h>
+#include <vespa/storageapi/message/bucketsplitting.h>
+#include <tests/persistence/persistenceproviderwrapper.h>
+#include <vespa/persistence/dummyimpl/dummypersistence.h>
+#include <tests/persistence/filestorage/filestortestfixture.h>
+
+LOG_SETUP(".singlebucketjointest");
+
+namespace storage {
+
+class SingleBucketJoinTest : public FileStorTestFixture
+{
+public:
+ void testPersistenceCanHandleSingleBucketJoin();
+
+ CPPUNIT_TEST_SUITE(SingleBucketJoinTest);
+ CPPUNIT_TEST(testPersistenceCanHandleSingleBucketJoin);
+ CPPUNIT_TEST_SUITE_END();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(SingleBucketJoinTest);
+
+void
+SingleBucketJoinTest::testPersistenceCanHandleSingleBucketJoin()
+{
+ TestFileStorComponents c(*this, "testPersistenceCanHandleSingleBucketJoin");
+ document::BucketId targetBucket(16, 1);
+ document::BucketId sourceBucket(17, 1);
+
+ createBucket(sourceBucket);
+ // Make sure it's not empty
+ c.sendPut(sourceBucket, DocumentIndex(0), PutTimestamp(1000));
+ expectOkReply<api::PutReply>(c.top);
+ c.top.getRepliesOnce();
+
+ auto cmd = std::make_shared<api::JoinBucketsCommand>(targetBucket);
+ cmd->getSourceBuckets().push_back(sourceBucket);
+ cmd->getSourceBuckets().push_back(sourceBucket);
+
+ c.top.sendDown(cmd);
+ // If single bucket join locking is not working properly, this
+ // will hang forever.
+ expectOkReply<api::JoinBucketsReply>(c.top);
+}
+
+} // namespace storage
diff --git a/storage/src/tests/persistence/legacyoperationhandlertest.cpp b/storage/src/tests/persistence/legacyoperationhandlertest.cpp
new file mode 100644
index 00000000000..ca496f4a260
--- /dev/null
+++ b/storage/src/tests/persistence/legacyoperationhandlertest.cpp
@@ -0,0 +1,190 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/document/base/testdocrepo.h>
+#include <vespa/document/repo/documenttyperepo.h>
+#include <vespa/documentapi/loadtypes/loadtype.h>
+#include <vespa/storage/persistence/messages.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <vespa/storageapi/message/multioperation.h>
+#include <vespa/persistence/dummyimpl/dummypersistence.h>
+#include <tests/persistence/persistencetestutils.h>
+#include <vespa/storage/persistence/types.h>
+
+using document::DocumentTypeRepo;
+using document::TestDocRepo;
+
+namespace storage {
+
+class LegacyOperationHandlerTest : public SingleDiskPersistenceTestUtils
+{
+ CPPUNIT_TEST_SUITE(LegacyOperationHandlerTest);
+ CPPUNIT_TEST(testMultioperationSingleBodyPut);
+ CPPUNIT_TEST(testMultioperationSingleRemove);
+ CPPUNIT_TEST(testMultioperationSingleUpdate);
+ CPPUNIT_TEST(testMultioperationUpdateNotFound);
+ CPPUNIT_TEST(testMultioperationMixedOperations);
+ CPPUNIT_TEST_SUITE_END();
+
+public:
+ void setUp() {
+ SingleDiskPersistenceTestUtils::setUp();
+ createBucket(document::BucketId(16, 4));
+ spi::Context context(spi::LoadType(0, "default"), spi::Priority(0),
+ spi::Trace::TraceLevel(0));
+ getPersistenceProvider().createBucket(
+ spi::Bucket(document::BucketId(16, 4), spi::PartitionId(0)),
+ context);
+ }
+
+ std::string stat() {
+ return dumpBucket(document::BucketId(16, 4), 0);
+ }
+
+ void testMultioperationSingleBodyPut();
+ void testMultioperationSingleRemove();
+ void testMultioperationSingleUpdate();
+ void testMultioperationUpdateNotFound();
+ void testMultioperationMixedOperations();
+ void testMultioperationMixedOperationsWrongBucket();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(LegacyOperationHandlerTest);
+
+void
+LegacyOperationHandlerTest::testMultioperationSingleBodyPut()
+{
+ std::unique_ptr<PersistenceThread> thread(createPersistenceThread(0));
+ document::BucketId bucketId(16, 4);
+
+ document::Document::SP doc(createRandomDocumentAtLocation(4, 1234, 0, 128));
+
+ std::vector<char> buffer(1024);
+ vdslib::WritableDocumentList block(getTypeRepo(), &buffer[0], buffer.size());
+ block.addPut(*doc, api::Timestamp(1234));
+
+ api::MultiOperationCommand cmd(getTypeRepo(), bucketId, 0);
+ cmd.setOperations(block);
+
+ thread->handleMultiOperation(cmd);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("DocEntry(1234, 0, Doc(id:mail:testdoctype1:n=4:3619.html))\n"), stat());
+}
+
+void
+LegacyOperationHandlerTest::testMultioperationSingleRemove()
+{
+ std::unique_ptr<PersistenceThread> thread(createPersistenceThread(0));
+ document::BucketId bucketId(16, 4);
+
+ document::Document::SP doc = doPut(4, spi::Timestamp(1234));
+
+ std::vector<char> buffer(1024);
+ vdslib::WritableDocumentList block(getTypeRepo(), &buffer[0], buffer.size());
+ block.addRemove(doc->getId(), spi::Timestamp(1235));
+
+ api::MultiOperationCommand cmd(getTypeRepo(), bucketId, 0);
+ cmd.setOperations(block);
+
+ thread->handleMultiOperation(cmd);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("DocEntry(1234, 0, Doc(id:mail:testdoctype1:n=4:3619.html))\n"
+ "DocEntry(1235, 1, id:mail:testdoctype1:n=4:3619.html)\n"), stat());
+}
+
+void
+LegacyOperationHandlerTest::testMultioperationSingleUpdate()
+{
+ std::unique_ptr<PersistenceThread> thread(createPersistenceThread(0));
+ document::BucketId bucketId(16, 4);
+ document::StringFieldValue updateValue("foo");
+
+ document::Document::SP doc = doPut(4, spi::Timestamp(1234));
+ document::Document originalDoc(*doc);
+
+ document::DocumentUpdate::SP update = createBodyUpdate(
+ doc->getId(), updateValue);
+
+ std::vector<char> buffer(1024);
+ vdslib::WritableDocumentList block(getTypeRepo(), &buffer[0], buffer.size());
+ block.addUpdate(*update, api::Timestamp(1235));
+
+ api::MultiOperationCommand cmd(getTypeRepo(), bucketId, 0);
+ cmd.setOperations(block);
+
+ thread->handleMultiOperation(cmd);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("DocEntry(1234, 0, Doc(id:mail:testdoctype1:n=4:3619.html))\n"
+ "DocEntry(1235, 0, Doc(id:mail:testdoctype1:n=4:3619.html))\n"), stat());
+}
+
+void
+LegacyOperationHandlerTest::testMultioperationUpdateNotFound()
+{
+ std::unique_ptr<PersistenceThread> thread(createPersistenceThread(0));
+ document::BucketId bucketId(16, 4);
+ document::DocumentId docId("userdoc:test:4:0");
+ document::StringFieldValue updateValue("foo");
+
+ document::DocumentUpdate::SP update = createBodyUpdate(
+ docId, updateValue);
+
+ std::vector<char> buffer(1024);
+ vdslib::WritableDocumentList block(getTypeRepo(), &buffer[0], buffer.size());
+ block.addUpdate(*update, api::Timestamp(1235));
+
+ api::MultiOperationCommand cmd(getTypeRepo(), bucketId, 0);
+ cmd.setOperations(block);
+
+ thread->handleMultiOperation(cmd);
+
+ CPPUNIT_ASSERT_EQUAL(std::string(""), stat());
+}
+
+void
+LegacyOperationHandlerTest::testMultioperationMixedOperations()
+{
+ std::unique_ptr<PersistenceThread> thread(createPersistenceThread(0));
+ document::BucketId bucketId(16, 4);
+ document::StringFieldValue updateValue("bar");
+
+ document::Document::SP originalUpdateDoc = doPut(4, spi::Timestamp(1234));
+ document::Document::SP originalRemoveDoc = doPut(4, spi::Timestamp(2345));
+
+ document::DocumentUpdate::SP update = createBodyUpdate(
+ originalUpdateDoc->getId(), updateValue);
+
+ document::DocumentUpdate::SP nonExistingUpdate = createBodyUpdate(
+ document::DocumentId("id:test:testdoctype1:n=4:nonexisting1"), updateValue);
+
+ document::Document::SP putDoc(createRandomDocumentAtLocation(4, 5678, 0, 128));
+
+ std::vector<char> buffer(1024);
+ vdslib::WritableDocumentList block(getTypeRepo(), &buffer[0], buffer.size());
+
+ block.addUpdate(*update, api::Timestamp(3456));
+ block.addUpdate(*nonExistingUpdate, api::Timestamp(3457));
+ block.addRemove(originalRemoveDoc->getId(), api::Timestamp(4567));
+ block.addRemove(document::DocumentId("id:test:testdoctype1:n=4:nonexisting2"),
+ api::Timestamp(4568));
+ block.addPut(*putDoc, api::Timestamp(5678));
+
+ api::MultiOperationCommand cmd(getTypeRepo(), bucketId, 0);
+ cmd.setOperations(block);
+
+ thread->handleMultiOperation(cmd);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("DocEntry(1234, 0, Doc(id:mail:testdoctype1:n=4:3619.html))\n"
+ "DocEntry(2345, 0, Doc(id:mail:testdoctype1:n=4:4008.html))\n"
+ "DocEntry(3456, 0, Doc(id:mail:testdoctype1:n=4:3619.html))\n"
+ "DocEntry(4567, 1, id:mail:testdoctype1:n=4:4008.html)\n"
+ "DocEntry(4568, 1, id:test:testdoctype1:n=4:nonexisting2)\n"
+ "DocEntry(5678, 0, Doc(id:mail:testdoctype1:n=4:5177.html))\n"),
+ stat());
+}
+
+}
diff --git a/storage/src/tests/persistence/mergehandlertest.cpp b/storage/src/tests/persistence/mergehandlertest.cpp
new file mode 100644
index 00000000000..3d3ce25a7d7
--- /dev/null
+++ b/storage/src/tests/persistence/mergehandlertest.cpp
@@ -0,0 +1,1494 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/document/base/testdocman.h>
+#include <vespa/storage/persistence/mergehandler.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <vespa/storageapi/message/bucket.h>
+#include <vespa/log/log.h>
+#include <tests/persistence/persistencetestutils.h>
+#include <tests/persistence/persistenceproviderwrapper.h>
+#include <tests/distributor/messagesenderstub.h>
+#include <vespa/storageframework/defaultimplementation/clock/fakeclock.h>
+
+LOG_SETUP(".test.persistence.handler.merge");
+
+namespace storage {
+
+struct MergeHandlerTest : public SingleDiskPersistenceTestUtils
+{
+ uint32_t _location; // Location used for all merge tests
+ document::BucketId _bucket; // Bucket used for all merge tests
+ uint64_t _maxTimestamp;
+ std::vector<api::MergeBucketCommand::Node> _nodes;
+ std::unique_ptr<spi::Context> _context;
+
+ // Fetch a single command or reply; doesn't care which.
+ template <typename T>
+ std::shared_ptr<T> fetchSingleMessage();
+
+ void setUp();
+
+ enum ChainPos { FRONT, MIDDLE, BACK };
+ void setUpChain(ChainPos);
+
+ // Test a regular merge bucket command fetching data, including
+ // puts, removes, unrevertable removes & duplicates.
+ void testMergeBucketCommand();
+ // Test that a simplistic merge with nothing to actually merge,
+ // sends get bucket diff through the entire chain of 3 nodes.
+ void testGetBucketDiffChain(bool midChain);
+ void testGetBucketDiffMidChain() { testGetBucketDiffChain(true); }
+ void testGetBucketDiffEndOfChain() { testGetBucketDiffChain(false); }
+ // Test that a simplistic merge with nothing to actually merge,
+ // sends apply bucket diff through the entire chain of 3 nodes.
+ void testApplyBucketDiffChain(bool midChain);
+ void testApplyBucketDiffMidChain() { testApplyBucketDiffChain(true); }
+ void testApplyBucketDiffEndOfChain() { testApplyBucketDiffChain(false); }
+ // Test that a simplistic merge with one thing to actually merge,
+ // sends correct commands and finish.
+ void testMasterMessageFlow();
+ // Test that a simplistic merge with 1 doc to actually merge,
+ // sends apply bucket diff through the entire chain of 3 nodes.
+ void testApplyBucketDiffChain();
+ void testMergeUnrevertableRemove();
+ void testChunkedApplyBucketDiff();
+ void testChunkLimitPartiallyFilledDiff();
+ void testMaxTimestamp();
+ void testSPIFlushGuard();
+ void testBucketNotFoundInDb();
+ void testMergeProgressSafeGuard();
+ void testSafeGuardNotInvokedWhenHasMaskChanges();
+ void testEntryRemovedAfterGetBucketDiff();
+
+ void testMergeBucketSPIFailures();
+ void testGetBucketDiffSPIFailures();
+ void testApplyBucketDiffSPIFailures();
+ void testGetBucketDiffReplySPIFailures();
+ void testApplyBucketDiffReplySPIFailures();
+
+ void testRemoveFromDiff();
+
+ void testRemovePutOnExistingTimestamp();
+
+ CPPUNIT_TEST_SUITE(MergeHandlerTest);
+ CPPUNIT_TEST(testMergeBucketCommand);
+ CPPUNIT_TEST(testGetBucketDiffMidChain);
+ CPPUNIT_TEST(testGetBucketDiffEndOfChain);
+ CPPUNIT_TEST(testApplyBucketDiffMidChain);
+ CPPUNIT_TEST(testApplyBucketDiffEndOfChain);
+ CPPUNIT_TEST(testMasterMessageFlow);
+ CPPUNIT_TEST(testMergeUnrevertableRemove);
+ CPPUNIT_TEST(testChunkedApplyBucketDiff);
+ CPPUNIT_TEST(testChunkLimitPartiallyFilledDiff);
+ CPPUNIT_TEST(testMaxTimestamp);
+ CPPUNIT_TEST(testSPIFlushGuard);
+ CPPUNIT_TEST(testBucketNotFoundInDb);
+ CPPUNIT_TEST(testMergeProgressSafeGuard);
+ CPPUNIT_TEST(testSafeGuardNotInvokedWhenHasMaskChanges);
+ CPPUNIT_TEST(testEntryRemovedAfterGetBucketDiff);
+ CPPUNIT_TEST(testMergeBucketSPIFailures);
+ CPPUNIT_TEST(testGetBucketDiffSPIFailures);
+ CPPUNIT_TEST(testApplyBucketDiffSPIFailures);
+ CPPUNIT_TEST(testGetBucketDiffReplySPIFailures);
+ CPPUNIT_TEST(testApplyBucketDiffReplySPIFailures);
+ CPPUNIT_TEST(testRemoveFromDiff);
+ CPPUNIT_TEST(testRemovePutOnExistingTimestamp);
+ CPPUNIT_TEST_SUITE_END();
+
+ // @TODO Add test to test that buildBucketInfo and mergeLists create minimal list (wrong sorting screws this up)
+private:
+ void fillDummyApplyDiff(std::vector<api::ApplyBucketDiffCommand::Entry>& diff);
+ std::shared_ptr<api::ApplyBucketDiffCommand> createDummyApplyDiff(
+ int timestampOffset,
+ uint16_t hasMask = 0x1,
+ bool filled = true);
+
+ std::shared_ptr<api::GetBucketDiffCommand>
+ createDummyGetBucketDiff(int timestampOffset,
+ uint16_t hasMask);
+
+ struct ExpectedExceptionSpec // Try saying this out loud 3 times in a row.
+ {
+ uint32_t mask;
+ const char* expected;
+ };
+
+ class HandlerInvoker
+ {
+ public:
+ virtual ~HandlerInvoker() {}
+ virtual void beforeInvoke(MergeHandlerTest&, MergeHandler&, spi::Context&) {}
+ virtual void invoke(MergeHandlerTest&, MergeHandler&, spi::Context&) = 0;
+ virtual std::string afterInvoke(MergeHandlerTest&, MergeHandler&) = 0;
+ };
+ friend class HandlerInvoker;
+
+ class NoReplyHandlerInvoker
+ : public HandlerInvoker
+ {
+ public:
+ std::string afterInvoke(MergeHandlerTest&, MergeHandler&);
+ };
+
+ template <typename ExpectedMessage>
+ std::string checkMessage(api::ReturnCode::Result expectedResult);
+
+ class HandleMergeBucketInvoker
+ : public NoReplyHandlerInvoker
+ {
+ public:
+ void invoke(MergeHandlerTest&, MergeHandler&, spi::Context&);
+ };
+
+ class HandleMergeBucketReplyInvoker
+ : public NoReplyHandlerInvoker
+ {
+ public:
+ void invoke(MergeHandlerTest&, MergeHandler&, spi::Context&);
+ };
+
+ class HandleGetBucketDiffInvoker
+ : public NoReplyHandlerInvoker
+ {
+ public:
+ void invoke(MergeHandlerTest&, MergeHandler&, spi::Context&);
+ };
+
+ class MultiPositionHandlerInvoker
+ : public HandlerInvoker
+ {
+ public:
+ MultiPositionHandlerInvoker()
+ : _pos(FRONT)
+ {
+ }
+ void setChainPos(ChainPos pos) { _pos = pos; }
+ ChainPos getChainPos() const { return _pos; }
+ private:
+ ChainPos _pos;
+ };
+
+ class HandleGetBucketDiffReplyInvoker
+ : public HandlerInvoker
+ {
+ public:
+ void beforeInvoke(MergeHandlerTest&, MergeHandler&, spi::Context&);
+ void invoke(MergeHandlerTest&, MergeHandler&, spi::Context&);
+ std::string afterInvoke(MergeHandlerTest&, MergeHandler&);
+ private:
+ MessageSenderStub _stub;
+ std::shared_ptr<api::GetBucketDiffCommand> _diffCmd;
+ };
+
+ class HandleApplyBucketDiffInvoker
+ : public NoReplyHandlerInvoker
+ {
+ public:
+ HandleApplyBucketDiffInvoker() : _counter(0) {}
+ void invoke(MergeHandlerTest&, MergeHandler&, spi::Context&);
+ private:
+ int _counter;
+ };
+
+ class HandleApplyBucketDiffReplyInvoker
+ : public MultiPositionHandlerInvoker
+ {
+ public:
+ HandleApplyBucketDiffReplyInvoker()
+ : _counter(0),
+ _stub(),
+ _applyCmd()
+ {}
+ void beforeInvoke(MergeHandlerTest&, MergeHandler&, spi::Context&);
+ void invoke(MergeHandlerTest&, MergeHandler&, spi::Context&);
+ std::string afterInvoke(MergeHandlerTest&, MergeHandler&);
+ private:
+ int _counter;
+ MessageSenderStub _stub;
+ std::shared_ptr<api::ApplyBucketDiffCommand> _applyCmd;
+ };
+
+ std::string
+ doTestSPIException(MergeHandler& handler,
+ PersistenceProviderWrapper& providerWrapper,
+ HandlerInvoker& invoker,
+ const ExpectedExceptionSpec& spec);
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(MergeHandlerTest);
+
+void
+MergeHandlerTest::setUp() {
+ _context.reset(new spi::Context(documentapi::LoadType::DEFAULT, 0, 0));
+ SingleDiskPersistenceTestUtils::setUp();
+
+ _location = 1234;
+ _bucket = document::BucketId(16, _location);
+ _maxTimestamp = 11501;
+
+ LOG(info, "Creating %s in bucket database", _bucket.toString().c_str());
+ bucketdb::StorageBucketInfo bucketDBEntry;
+ bucketDBEntry.disk = 0;
+ getEnv().getBucketDatabase().insert(_bucket, bucketDBEntry, "mergetestsetup");
+
+ LOG(info, "Creating bucket to merge");
+ createTestBucket(_bucket);
+
+ setUpChain(FRONT);
+}
+
+void
+MergeHandlerTest::setUpChain(ChainPos pos) {
+ _nodes.clear();
+ if (pos != FRONT) {
+ _nodes.push_back(api::MergeBucketCommand::Node(2, false));
+ }
+ _nodes.push_back(api::MergeBucketCommand::Node(0, false));
+ if (pos != BACK) {
+ _nodes.push_back(api::MergeBucketCommand::Node(1, false));
+ }
+}
+
+void
+MergeHandlerTest::testMergeBucketCommand()
+{
+ MergeHandler handler(getPersistenceProvider(), getEnv());
+
+ LOG(info, "Handle a merge bucket command");
+ api::MergeBucketCommand cmd(_bucket, _nodes, _maxTimestamp);
+ cmd.setSourceIndex(1234);
+ MessageTracker::UP tracker = handler.handleMergeBucket(cmd, *_context);
+
+ LOG(info, "Check state");
+ CPPUNIT_ASSERT_EQUAL(uint64_t(1), messageKeeper()._msgs.size());
+ CPPUNIT_ASSERT_EQUAL(api::MessageType::GETBUCKETDIFF,
+ messageKeeper()._msgs[0]->getType());
+ api::GetBucketDiffCommand& cmd2(dynamic_cast<api::GetBucketDiffCommand&>(
+ *messageKeeper()._msgs[0]));
+ CPPUNIT_ASSERT_EQUAL(_nodes, cmd2.getNodes());
+ std::vector<api::GetBucketDiffCommand::Entry> diff(cmd2.getDiff());
+ CPPUNIT_ASSERT_EQUAL(uint64_t(17), diff.size());
+ CPPUNIT_ASSERT_EQUAL(uint16_t(1), cmd2.getAddress()->getIndex());
+ CPPUNIT_ASSERT_EQUAL(uint16_t(1234), cmd2.getSourceIndex());
+
+ tracker->generateReply(cmd);
+ CPPUNIT_ASSERT(!tracker->getReply().get());
+}
+
+void
+MergeHandlerTest::testGetBucketDiffChain(bool midChain)
+{
+ setUpChain(midChain ? MIDDLE : BACK);
+ MergeHandler handler(getPersistenceProvider(), getEnv());
+
+ LOG(info, "Verifying that get bucket diff is sent on");
+ api::GetBucketDiffCommand cmd(_bucket, _nodes, _maxTimestamp);
+ MessageTracker::UP tracker1 = handler.handleGetBucketDiff(cmd, *_context);
+ api::StorageMessage::SP replySent = tracker1->getReply();
+
+ if (midChain) {
+ LOG(info, "Check state");
+ CPPUNIT_ASSERT_EQUAL(uint64_t(1), messageKeeper()._msgs.size());
+ CPPUNIT_ASSERT_EQUAL(api::MessageType::GETBUCKETDIFF,
+ messageKeeper()._msgs[0]->getType());
+ api::GetBucketDiffCommand& cmd2(
+ dynamic_cast<api::GetBucketDiffCommand&>(
+ *messageKeeper()._msgs[0]));
+ CPPUNIT_ASSERT_EQUAL(_nodes, cmd2.getNodes());
+ std::vector<api::GetBucketDiffCommand::Entry> diff(cmd2.getDiff());
+ CPPUNIT_ASSERT_EQUAL(uint64_t(17), diff.size());
+ CPPUNIT_ASSERT_EQUAL(uint16_t(1), cmd2.getAddress()->getIndex());
+
+ LOG(info, "Verifying that replying the diff sends on back");
+ api::GetBucketDiffReply::UP reply(new api::GetBucketDiffReply(cmd2));
+
+ CPPUNIT_ASSERT(!replySent.get());
+
+ MessageSenderStub stub;
+ handler.handleGetBucketDiffReply(*reply, stub);
+ CPPUNIT_ASSERT_EQUAL(1, (int)stub.replies.size());
+ replySent = stub.replies[0];
+ }
+ api::GetBucketDiffReply::SP reply2(
+ std::dynamic_pointer_cast<api::GetBucketDiffReply>(
+ replySent));
+ CPPUNIT_ASSERT(reply2.get());
+
+ CPPUNIT_ASSERT_EQUAL(_nodes, reply2->getNodes());
+ std::vector<api::GetBucketDiffCommand::Entry> diff(reply2->getDiff());
+ CPPUNIT_ASSERT_EQUAL(uint64_t(17), diff.size());
+}
+
+void
+MergeHandlerTest::testApplyBucketDiffChain(bool midChain)
+{
+ setUpChain(midChain ? MIDDLE : BACK);
+ MergeHandler handler(getPersistenceProvider(), getEnv());
+
+ LOG(info, "Verifying that apply bucket diff is sent on");
+ api::ApplyBucketDiffCommand cmd(_bucket, _nodes, _maxTimestamp);
+ MessageTracker::UP tracker1 = handler.handleApplyBucketDiff(cmd, *_context);
+ api::StorageMessage::SP replySent = tracker1->getReply();
+
+ if (midChain) {
+ LOG(info, "Check state");
+ CPPUNIT_ASSERT_EQUAL(uint64_t(1), messageKeeper()._msgs.size());
+ CPPUNIT_ASSERT_EQUAL(api::MessageType::APPLYBUCKETDIFF,
+ messageKeeper()._msgs[0]->getType());
+ api::ApplyBucketDiffCommand& cmd2(
+ dynamic_cast<api::ApplyBucketDiffCommand&>(
+ *messageKeeper()._msgs[0]));
+ CPPUNIT_ASSERT_EQUAL(_nodes, cmd2.getNodes());
+ std::vector<api::ApplyBucketDiffCommand::Entry> diff(cmd2.getDiff());
+ CPPUNIT_ASSERT_EQUAL(uint64_t(0), diff.size());
+ CPPUNIT_ASSERT_EQUAL(uint16_t(1), cmd2.getAddress()->getIndex());
+
+ CPPUNIT_ASSERT(!replySent.get());
+
+ LOG(info, "Verifying that replying the diff sends on back");
+ api::ApplyBucketDiffReply::UP reply(
+ new api::ApplyBucketDiffReply(cmd2));
+
+ MessageSenderStub stub;
+ handler.handleApplyBucketDiffReply(*reply, stub);
+ CPPUNIT_ASSERT_EQUAL(1, (int)stub.replies.size());
+ replySent = stub.replies[0];
+ }
+
+ api::ApplyBucketDiffReply::SP reply2(
+ std::dynamic_pointer_cast<api::ApplyBucketDiffReply>(replySent));
+ CPPUNIT_ASSERT(reply2.get());
+
+ CPPUNIT_ASSERT_EQUAL(_nodes, reply2->getNodes());
+ std::vector<api::ApplyBucketDiffCommand::Entry> diff(reply2->getDiff());
+ CPPUNIT_ASSERT_EQUAL(uint64_t(0), diff.size());
+}
+
+void
+MergeHandlerTest::testMasterMessageFlow()
+{
+ MergeHandler handler(getPersistenceProvider(), getEnv());
+
+ LOG(info, "Handle a merge bucket command");
+ api::MergeBucketCommand cmd(_bucket, _nodes, _maxTimestamp);
+
+ handler.handleMergeBucket(cmd, *_context);
+ LOG(info, "Check state");
+ CPPUNIT_ASSERT_EQUAL(uint64_t(1), messageKeeper()._msgs.size());
+ CPPUNIT_ASSERT_EQUAL(api::MessageType::GETBUCKETDIFF,
+ messageKeeper()._msgs[0]->getType());
+ api::GetBucketDiffCommand& cmd2(dynamic_cast<api::GetBucketDiffCommand&>(
+ *messageKeeper()._msgs[0]));
+
+ api::GetBucketDiffReply::UP reply(new api::GetBucketDiffReply(cmd2));
+ // End of chain can remove entries all have. This should end up with
+ // one entry master node has other node don't have
+ reply->getDiff().resize(1);
+
+ handler.handleGetBucketDiffReply(*reply, messageKeeper());
+
+ LOG(info, "Check state");
+ CPPUNIT_ASSERT_EQUAL(uint64_t(2), messageKeeper()._msgs.size());
+ CPPUNIT_ASSERT_EQUAL(api::MessageType::APPLYBUCKETDIFF,
+ messageKeeper()._msgs[1]->getType());
+ api::ApplyBucketDiffCommand& cmd3(
+ dynamic_cast<api::ApplyBucketDiffCommand&>(
+ *messageKeeper()._msgs[1]));
+ api::ApplyBucketDiffReply::UP reply2(new api::ApplyBucketDiffReply(cmd3));
+ CPPUNIT_ASSERT_EQUAL(size_t(1), reply2->getDiff().size());
+ reply2->getDiff()[0]._entry._hasMask |= 2;
+
+ MessageSenderStub stub;
+ handler.handleApplyBucketDiffReply(*reply2, stub);
+
+ CPPUNIT_ASSERT_EQUAL(1, (int)stub.replies.size());
+
+ api::MergeBucketReply::SP reply3(
+ std::dynamic_pointer_cast<api::MergeBucketReply>(stub.replies[0]));
+ CPPUNIT_ASSERT(reply3.get());
+
+ CPPUNIT_ASSERT_EQUAL(_nodes, reply3->getNodes());
+ CPPUNIT_ASSERT(reply3->getResult().success());
+ CPPUNIT_ASSERT(!fsHandler().isMerging(_bucket));
+}
+
+void
+MergeHandlerTest::testMergeUnrevertableRemove()
+{
+/*
+ MergeHandler handler(getPersistenceProvider(), getEnv());
+
+ LOG(info, "Handle a merge bucket command");
+ api::MergeBucketCommand cmd(_bucket, _nodes, _maxTimestamp);
+ {
+ MessageTracker tracker;
+ handler.handleMergeBucket(cmd, tracker);
+ }
+
+ LOG(info, "Check state");
+ CPPUNIT_ASSERT_EQUAL(uint64_t(1), messageKeeper()._msgs.size());
+ CPPUNIT_ASSERT_EQUAL(api::MessageType::GETBUCKETDIFF,
+ messageKeeper()._msgs[0]->getType());
+ api::GetBucketDiffCommand& cmd2(
+ dynamic_cast<api::GetBucketDiffCommand&>(
+ *messageKeeper()._msgs[0]));
+
+ api::GetBucketDiffReply::UP reply(new api::GetBucketDiffReply(cmd2));
+
+ std::vector<Timestamp> docTimestamps;
+ for (int i = 0; i < 4; ++i) {
+ docTimestamps.push_back(Timestamp(reply->getDiff()[i]._timestamp));
+ }
+ CPPUNIT_ASSERT(reply->getDiff().size() >= 4);
+ reply->getDiff().resize(4);
+ // Add one non-unrevertable entry for existing timestamp which
+ // should not be added
+ reply->getDiff()[0]._flags |= Types::DELETED;
+ reply->getDiff()[0]._bodySize = 0;
+ reply->getDiff()[0]._hasMask = 2;
+ // Add a unrevertable entry which should be modified
+ reply->getDiff()[1]._flags |= Types::DELETED | Types::DELETED_IN_PLACE;
+ reply->getDiff()[1]._bodySize = 0;
+ reply->getDiff()[1]._hasMask = 2;
+ // Add one non-unrevertable entry that is a duplicate put
+ // which should not be added or fail the merge.
+ LOG(info, "duplicate put has timestamp %zu and flags %u",
+ reply->getDiff()[2]._timestamp,
+ reply->getDiff()[2]._flags);
+ reply->getDiff()[2]._hasMask = 2;
+ // Add one unrevertable entry for a timestamp that does not exist
+ reply->getDiff()[3]._flags |= Types::DELETED | Types::DELETED_IN_PLACE;
+ reply->getDiff()[3]._timestamp = 12345678;
+ reply->getDiff()[3]._bodySize = 0;
+ reply->getDiff()[3]._hasMask = 2;
+ {
+ MessageTracker tracker;
+ handler.handleGetBucketDiffReply(*reply, tracker);
+ }
+
+ LOG(info, "%s", reply->toString(true).c_str());
+
+ LOG(info, "Create bucket diff reply");
+ CPPUNIT_ASSERT_EQUAL(uint64_t(2), messageKeeper()._msgs.size());
+ CPPUNIT_ASSERT_EQUAL(api::MessageType::APPLYBUCKETDIFF,
+ messageKeeper()._msgs[1]->getType());
+ api::ApplyBucketDiffCommand& cmd3(
+ dynamic_cast<api::ApplyBucketDiffCommand&>(
+ *messageKeeper()._msgs[1]));
+ api::ApplyBucketDiffReply::UP reply2(
+ new api::ApplyBucketDiffReply(cmd3));
+ CPPUNIT_ASSERT_EQUAL(size_t(4), reply2->getDiff().size());
+
+ memfile::DataLocation headerLocs[4];
+ std::vector<DocumentId> documentIds;
+ // So deserialization won't fail, we need some kind of header blob
+ // for each entry
+
+ for (int i = 0; i < 4; ++i) {
+ api::ApplyBucketDiffReply::Entry& entry = reply2->getDiff()[i];
+ CPPUNIT_ASSERT_EQUAL(uint16_t(2), entry._entry._hasMask);
+
+ memfile::MemFilePtr file(getMemFile(_bucket));
+ const memfile::MemSlot* slot = file->getSlotAtTime(docTimestamps[i]);
+ CPPUNIT_ASSERT(slot != NULL);
+ LOG(info, "Processing slot %s", slot->toString().c_str());
+ CPPUNIT_ASSERT(slot->hasBodyContent());
+ documentIds.push_back(file->getDocumentId(*slot));
+ entry._docName = documentIds.back().toString();
+ headerLocs[i] = slot->getLocation(HEADER);
+
+ document::Document::UP doc(file->getDocument(*slot, ALL));
+ {
+ vespalib::nbostream stream;
+ doc->serializeHeader(stream);
+ std::vector<char> buf(
+ stream.peek(), stream.peek() + stream.size());
+ entry._headerBlob.swap(buf);
+ }
+ // Put duplicate needs body blob as well
+ if (i == 2) {
+ vespalib::nbostream stream;
+ doc->serializeBody(stream);
+ std::vector<char> buf(
+ stream.peek(), stream.peek() + stream.size());
+ entry._bodyBlob.swap(buf);
+ }
+ }
+
+ LOG(info, "%s", reply2->toString(true).c_str());
+
+ MessageTracker tracker;
+ handler.handleApplyBucketDiffReply(*reply2, tracker);
+
+ CPPUNIT_ASSERT(tracker._sendReply);
+ api::MergeBucketReply::SP reply3(
+ std::dynamic_pointer_cast<api::MergeBucketReply>(
+ tracker._reply));
+ CPPUNIT_ASSERT(reply3.get());
+
+ CPPUNIT_ASSERT_EQUAL(_nodes, reply3->getNodes());
+ CPPUNIT_ASSERT(reply3->getResult().success());
+
+ memfile::MemFilePtr file(getMemFile(_bucket));
+ // Existing timestamp should not be modified by
+ // non-unrevertable entry
+ {
+ const memfile::MemSlot* slot = file->getSlotAtTime(
+ Timestamp(reply->getDiff()[0]._timestamp));
+ CPPUNIT_ASSERT(slot != NULL);
+ CPPUNIT_ASSERT(!slot->deleted());
+ }
+ // Ensure unrevertable remove for existing put was merged in OK
+ {
+ const memfile::MemSlot* slot = file->getSlotAtTime(
+ Timestamp(reply->getDiff()[1]._timestamp));
+ CPPUNIT_ASSERT(slot != NULL);
+ CPPUNIT_ASSERT(slot->deleted());
+ CPPUNIT_ASSERT(slot->deletedInPlace());
+ CPPUNIT_ASSERT(!slot->hasBodyContent());
+ // Header location should not have changed
+ CPPUNIT_ASSERT_EQUAL(headerLocs[1], slot->getLocation(HEADER));
+ }
+
+ // Non-existing timestamp unrevertable remove should be added as
+ // entry with doc id-only header
+ {
+ const memfile::MemSlot* slot = file->getSlotAtTime(
+ Timestamp(reply->getDiff()[3]._timestamp));
+ CPPUNIT_ASSERT(slot != NULL);
+ CPPUNIT_ASSERT(slot->deleted());
+ CPPUNIT_ASSERT(slot->deletedInPlace());
+ CPPUNIT_ASSERT(!slot->hasBodyContent());
+ CPPUNIT_ASSERT_EQUAL(documentIds[3], file->getDocumentId(*slot));
+ }
+
+*/
+}
+
+template <typename T>
+std::shared_ptr<T>
+MergeHandlerTest::fetchSingleMessage()
+{
+ std::vector<api::StorageMessage::SP>& msgs(messageKeeper()._msgs);
+ if (msgs.empty()) {
+ std::ostringstream oss;
+ oss << "No messages available to fetch (expected type "
+ << typeid(T).name()
+ << ")";
+ throw std::runtime_error(oss.str());
+ }
+ std::shared_ptr<T> ret(std::dynamic_pointer_cast<T>(
+ messageKeeper()._msgs.back()));
+ if (!ret) {
+ std::ostringstream oss;
+ oss << "Expected message of type "
+ << typeid(T).name()
+ << ", but got "
+ << messageKeeper()._msgs[0]->toString();
+ throw std::runtime_error(oss.str());
+ }
+ messageKeeper()._msgs.pop_back();
+
+ return ret;
+}
+
+namespace {
+
+size_t
+getFilledCount(const std::vector<api::ApplyBucketDiffCommand::Entry>& diff)
+{
+ size_t filledCount = 0;
+ for (size_t i=0; i<diff.size(); ++i) {
+ if (diff[i].filled()) {
+ ++filledCount;
+ }
+ }
+ return filledCount;
+}
+
+size_t
+getFilledDataSize(const std::vector<api::ApplyBucketDiffCommand::Entry>& diff)
+{
+ size_t filledSize = 0;
+ for (size_t i=0; i<diff.size(); ++i) {
+ filledSize += diff[i]._headerBlob.size();
+ filledSize += diff[i]._bodyBlob.size();
+ }
+ return filledSize;
+}
+
+}
+
+void
+MergeHandlerTest::testChunkedApplyBucketDiff()
+{
+ uint32_t docSize = 1024;
+ uint32_t docCount = 10;
+ uint32_t maxChunkSize = docSize * 3;
+ for (uint32_t i = 0; i < docCount; ++i) {
+ doPut(1234, spi::Timestamp(4000 + i), docSize, docSize);
+ }
+
+ MergeHandler handler(getPersistenceProvider(), getEnv(), maxChunkSize);
+
+ LOG(info, "Handle a merge bucket command");
+ api::MergeBucketCommand cmd(_bucket, _nodes, _maxTimestamp);
+ handler.handleMergeBucket(cmd, *_context);
+
+ std::shared_ptr<api::GetBucketDiffCommand> getBucketDiffCmd(
+ fetchSingleMessage<api::GetBucketDiffCommand>());
+ api::GetBucketDiffReply::UP getBucketDiffReply(
+ new api::GetBucketDiffReply(*getBucketDiffCmd));
+
+ handler.handleGetBucketDiffReply(*getBucketDiffReply, messageKeeper());
+
+ uint32_t totalDiffs = getBucketDiffCmd->getDiff().size();
+ std::set<spi::Timestamp> seen;
+
+ api::MergeBucketReply::SP reply;
+ while (seen.size() != totalDiffs) {
+ std::shared_ptr<api::ApplyBucketDiffCommand> applyBucketDiffCmd(
+ fetchSingleMessage<api::ApplyBucketDiffCommand>());
+
+ LOG(info, "Test that we get chunked diffs in ApplyBucketDiff");
+ std::vector<api::ApplyBucketDiffCommand::Entry>& diff(
+ applyBucketDiffCmd->getDiff());
+ CPPUNIT_ASSERT(getFilledCount(diff) < totalDiffs);
+ CPPUNIT_ASSERT(getFilledDataSize(diff) <= maxChunkSize);
+
+ // Include node 1 in hasmask for all diffs to indicate it's done
+ // Also remember the diffs we've seen thus far to ensure chunking
+ // does not send duplicates.
+ for (size_t i = 0; i < diff.size(); ++i) {
+ if (!diff[i].filled()) {
+ continue;
+ }
+ diff[i]._entry._hasMask |= 2;
+ std::pair<std::set<spi::Timestamp>::iterator, bool> inserted(
+ seen.insert(spi::Timestamp(diff[i]._entry._timestamp)));
+ if (!inserted.second) {
+ std::ostringstream ss;
+ ss << "Diff for " << diff[i]
+ << " has already been seen in another ApplyBucketDiff";
+ CPPUNIT_FAIL(ss.str());
+ }
+ }
+
+ api::ApplyBucketDiffReply::UP applyBucketDiffReply(
+ new api::ApplyBucketDiffReply(*applyBucketDiffCmd));
+ {
+ handler.handleApplyBucketDiffReply(*applyBucketDiffReply, messageKeeper());
+
+ if (messageKeeper()._msgs.size()) {
+ CPPUNIT_ASSERT(!reply.get());
+ reply = std::dynamic_pointer_cast<api::MergeBucketReply>(
+ messageKeeper()._msgs[messageKeeper()._msgs.size() - 1]);
+ }
+ }
+ }
+ LOG(info, "Done with applying diff");
+
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(_nodes, reply->getNodes());
+ CPPUNIT_ASSERT(reply->getResult().success());
+}
+
+void
+MergeHandlerTest::testChunkLimitPartiallyFilledDiff()
+{
+ setUpChain(FRONT);
+
+ uint32_t docSize = 1024;
+ uint32_t docCount = 3;
+ uint32_t maxChunkSize = 1024 + 1024 + 512;
+
+ for (uint32_t i = 0; i < docCount; ++i) {
+ doPut(1234, spi::Timestamp(4000 + i), docSize, docSize);
+ }
+
+ std::vector<api::ApplyBucketDiffCommand::Entry> applyDiff;
+ for (uint32_t i = 0; i < docCount; ++i) {
+ api::ApplyBucketDiffCommand::Entry e;
+ e._entry._timestamp = 4000 + i;
+ if (i == 0) {
+ e._headerBlob.resize(docSize);
+ }
+ e._entry._hasMask = 0x3;
+ e._entry._flags = MergeHandler::IN_USE;
+ applyDiff.push_back(e);
+ }
+
+ setUpChain(MIDDLE);
+ std::shared_ptr<api::ApplyBucketDiffCommand> applyBucketDiffCmd(
+ new api::ApplyBucketDiffCommand(_bucket, _nodes, maxChunkSize));
+ applyBucketDiffCmd->getDiff() = applyDiff;
+
+ MergeHandler handler(
+ getPersistenceProvider(), getEnv(), maxChunkSize);
+ handler.handleApplyBucketDiff(*applyBucketDiffCmd, *_context);
+
+ std::shared_ptr<api::ApplyBucketDiffCommand> fwdDiffCmd(
+ fetchSingleMessage<api::ApplyBucketDiffCommand>());
+ // Should not fill up more than chunk size allows for
+ CPPUNIT_ASSERT_EQUAL(size_t(2), getFilledCount(fwdDiffCmd->getDiff()));
+ CPPUNIT_ASSERT(getFilledDataSize(fwdDiffCmd->getDiff()) <= maxChunkSize);
+}
+
+void
+MergeHandlerTest::testMaxTimestamp()
+{
+ doPut(1234, spi::Timestamp(_maxTimestamp + 10), 1024, 1024);
+
+ MergeHandler handler(getPersistenceProvider(), getEnv());
+
+ api::MergeBucketCommand cmd(_bucket, _nodes, _maxTimestamp);
+ handler.handleMergeBucket(cmd, *_context);
+
+ std::shared_ptr<api::GetBucketDiffCommand> getCmd(
+ fetchSingleMessage<api::GetBucketDiffCommand>());
+
+ CPPUNIT_ASSERT(!getCmd->getDiff().empty());
+ CPPUNIT_ASSERT(getCmd->getDiff().back()._timestamp <= _maxTimestamp);
+}
+
+void
+MergeHandlerTest::fillDummyApplyDiff(
+ std::vector<api::ApplyBucketDiffCommand::Entry>& diff)
+{
+ document::TestDocMan docMan;
+ document::Document::SP doc(
+ docMan.createRandomDocumentAtLocation(_location));
+ std::vector<char> headerBlob;
+ {
+ vespalib::nbostream stream;
+ doc->serializeHeader(stream);
+ headerBlob.resize(stream.size());
+ memcpy(&headerBlob[0], stream.peek(), stream.size());
+ }
+
+ assert(diff.size() == 3);
+ diff[0]._headerBlob = headerBlob;
+ diff[1]._docName = doc->getId().toString();
+ diff[2]._docName = doc->getId().toString();
+}
+
+std::shared_ptr<api::ApplyBucketDiffCommand>
+MergeHandlerTest::createDummyApplyDiff(int timestampOffset,
+ uint16_t hasMask,
+ bool filled)
+{
+
+ std::vector<api::ApplyBucketDiffCommand::Entry> applyDiff;
+ {
+ api::ApplyBucketDiffCommand::Entry e;
+ e._entry._timestamp = timestampOffset;
+ e._entry._hasMask = hasMask;
+ e._entry._flags = MergeHandler::IN_USE;
+ applyDiff.push_back(e);
+ }
+ {
+ api::ApplyBucketDiffCommand::Entry e;
+ e._entry._timestamp = timestampOffset + 1;
+ e._entry._hasMask = hasMask;
+ e._entry._flags = MergeHandler::IN_USE | MergeHandler::DELETED;
+ applyDiff.push_back(e);
+ }
+ {
+ api::ApplyBucketDiffCommand::Entry e;
+ e._entry._timestamp = timestampOffset + 2;
+ e._entry._hasMask = hasMask;
+ e._entry._flags = MergeHandler::IN_USE |
+ MergeHandler::DELETED |
+ MergeHandler::DELETED_IN_PLACE;
+ applyDiff.push_back(e);
+ }
+
+ if (filled) {
+ fillDummyApplyDiff(applyDiff);
+ }
+
+ std::shared_ptr<api::ApplyBucketDiffCommand> applyBucketDiffCmd(
+ new api::ApplyBucketDiffCommand(_bucket, _nodes, 1024*1024));
+ applyBucketDiffCmd->getDiff() = applyDiff;
+ return applyBucketDiffCmd;
+}
+
+// Must match up with diff used in createDummyApplyDiff
+std::shared_ptr<api::GetBucketDiffCommand>
+MergeHandlerTest::createDummyGetBucketDiff(int timestampOffset,
+ uint16_t hasMask)
+{
+ std::vector<api::GetBucketDiffCommand::Entry> diff;
+ {
+ api::GetBucketDiffCommand::Entry e;
+ e._timestamp = timestampOffset;
+ e._hasMask = hasMask;
+ e._flags = MergeHandler::IN_USE;
+ diff.push_back(e);
+ }
+ {
+ api::GetBucketDiffCommand::Entry e;
+ e._timestamp = timestampOffset + 1;
+ e._hasMask = hasMask;
+ e._flags = MergeHandler::IN_USE | MergeHandler::DELETED;
+ diff.push_back(e);
+ }
+ {
+ api::GetBucketDiffCommand::Entry e;
+ e._timestamp = timestampOffset + 2;
+ e._hasMask = hasMask;
+ e._flags = MergeHandler::IN_USE |
+ MergeHandler::DELETED |
+ MergeHandler::DELETED_IN_PLACE;
+ diff.push_back(e);
+ }
+
+ std::shared_ptr<api::GetBucketDiffCommand> getBucketDiffCmd(
+ new api::GetBucketDiffCommand(_bucket, _nodes, 1024*1024));
+ getBucketDiffCmd->getDiff() = diff;
+ return getBucketDiffCmd;
+}
+
+void
+MergeHandlerTest::testSPIFlushGuard()
+{
+ PersistenceProviderWrapper providerWrapper(
+ getPersistenceProvider());
+ MergeHandler handler(providerWrapper, getEnv());
+
+ providerWrapper.setResult(
+ spi::Result(spi::Result::PERMANENT_ERROR,
+ "who you gonna call?"));
+
+ setUpChain(MIDDLE);
+ // Fail applying unrevertable remove
+ providerWrapper.setFailureMask(
+ PersistenceProviderWrapper::FAIL_REMOVE);
+ providerWrapper.clearOperationLog();
+ try {
+ handler.handleApplyBucketDiff(*createDummyApplyDiff(6000), *_context);
+ CPPUNIT_FAIL("No exception thrown on failing in-place remove");
+ } catch (const std::runtime_error& e) {
+ CPPUNIT_ASSERT(std::string(e.what()).find("Failed remove")
+ != std::string::npos);
+ }
+ // Test that we always flush after applying diff locally, even when
+ // errors are encountered.
+ const std::vector<std::string>& opLog(providerWrapper.getOperationLog());
+ CPPUNIT_ASSERT(!opLog.empty());
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("flush(Bucket(0x40000000000004d2, partition 0))"),
+ opLog.back());
+}
+
+void
+MergeHandlerTest::testBucketNotFoundInDb()
+{
+ MergeHandler handler(getPersistenceProvider(), getEnv());
+ // Send merge for unknown bucket
+ api::MergeBucketCommand cmd(document::BucketId(16, 6789), _nodes, _maxTimestamp);
+ MessageTracker::UP tracker = handler.handleMergeBucket(cmd, *_context);
+ CPPUNIT_ASSERT(tracker->getResult().isBucketDisappearance());
+}
+
+void
+MergeHandlerTest::testMergeProgressSafeGuard()
+{
+ MergeHandler handler(getPersistenceProvider(), getEnv());
+ api::MergeBucketCommand cmd(_bucket, _nodes, _maxTimestamp);
+ handler.handleMergeBucket(cmd, *_context);
+
+ std::shared_ptr<api::GetBucketDiffCommand> getBucketDiffCmd(
+ fetchSingleMessage<api::GetBucketDiffCommand>());
+ api::GetBucketDiffReply::UP getBucketDiffReply(
+ new api::GetBucketDiffReply(*getBucketDiffCmd));
+
+ handler.handleGetBucketDiffReply(*getBucketDiffReply, messageKeeper());
+
+ std::shared_ptr<api::ApplyBucketDiffCommand> applyBucketDiffCmd(
+ fetchSingleMessage<api::ApplyBucketDiffCommand>());
+ api::ApplyBucketDiffReply::UP applyBucketDiffReply(
+ new api::ApplyBucketDiffReply(*applyBucketDiffCmd));
+
+ MessageSenderStub stub;
+ handler.handleApplyBucketDiffReply(*applyBucketDiffReply, stub);
+
+ CPPUNIT_ASSERT_EQUAL(1, (int)stub.replies.size());
+
+ api::MergeBucketReply::SP mergeReply(
+ std::dynamic_pointer_cast<api::MergeBucketReply>(
+ stub.replies[0]));
+ CPPUNIT_ASSERT(mergeReply.get());
+ CPPUNIT_ASSERT(mergeReply->getResult().getResult()
+ == api::ReturnCode::INTERNAL_FAILURE);
+}
+
+void
+MergeHandlerTest::testSafeGuardNotInvokedWhenHasMaskChanges()
+{
+ MergeHandler handler(getPersistenceProvider(), getEnv());
+ _nodes.clear();
+ _nodes.push_back(api::MergeBucketCommand::Node(0, false));
+ _nodes.push_back(api::MergeBucketCommand::Node(1, false));
+ _nodes.push_back(api::MergeBucketCommand::Node(2, false));
+ api::MergeBucketCommand cmd(_bucket, _nodes, _maxTimestamp);
+ handler.handleMergeBucket(cmd, *_context);
+
+ std::shared_ptr<api::GetBucketDiffCommand> getBucketDiffCmd(
+ fetchSingleMessage<api::GetBucketDiffCommand>());
+ api::GetBucketDiffReply::UP getBucketDiffReply(
+ new api::GetBucketDiffReply(*getBucketDiffCmd));
+
+ handler.handleGetBucketDiffReply(*getBucketDiffReply, messageKeeper());
+
+ std::shared_ptr<api::ApplyBucketDiffCommand> applyBucketDiffCmd(
+ fetchSingleMessage<api::ApplyBucketDiffCommand>());
+ api::ApplyBucketDiffReply::UP applyBucketDiffReply(
+ new api::ApplyBucketDiffReply(*applyBucketDiffCmd));
+ CPPUNIT_ASSERT(!applyBucketDiffReply->getDiff().empty());
+ // Change a hasMask to indicate something changed during merging.
+ applyBucketDiffReply->getDiff()[0]._entry._hasMask = 0x5;
+
+ MessageSenderStub stub;
+ LOG(debug, "sending apply bucket diff reply");
+ handler.handleApplyBucketDiffReply(*applyBucketDiffReply, stub);
+
+ CPPUNIT_ASSERT_EQUAL(1, (int)stub.commands.size());
+
+ api::ApplyBucketDiffCommand::SP applyBucketDiffCmd2(
+ std::dynamic_pointer_cast<api::ApplyBucketDiffCommand>(
+ stub.commands[0]));
+ CPPUNIT_ASSERT(applyBucketDiffCmd2.get());
+ CPPUNIT_ASSERT_EQUAL(applyBucketDiffCmd->getDiff().size(),
+ applyBucketDiffCmd2->getDiff().size());
+ CPPUNIT_ASSERT_EQUAL(uint16_t(0x5),
+ applyBucketDiffCmd2->getDiff()[0]._entry._hasMask);
+}
+
+void
+MergeHandlerTest::testEntryRemovedAfterGetBucketDiff()
+{
+ MergeHandler handler(getPersistenceProvider(), getEnv());
+ std::vector<api::ApplyBucketDiffCommand::Entry> applyDiff;
+ {
+ api::ApplyBucketDiffCommand::Entry e;
+ e._entry._timestamp = 13001; // Removed in persistence
+ e._entry._hasMask = 0x2;
+ e._entry._flags = MergeHandler::IN_USE;
+ applyDiff.push_back(e);
+ }
+ setUpChain(BACK);
+ std::shared_ptr<api::ApplyBucketDiffCommand> applyBucketDiffCmd(
+ new api::ApplyBucketDiffCommand(_bucket, _nodes, 1024*1024));
+ applyBucketDiffCmd->getDiff() = applyDiff;
+
+ MessageTracker::UP tracker = handler.handleApplyBucketDiff(*applyBucketDiffCmd, *_context);
+
+ api::ApplyBucketDiffReply::SP applyBucketDiffReply(
+ std::dynamic_pointer_cast<api::ApplyBucketDiffReply>(
+ tracker->getReply()));
+ CPPUNIT_ASSERT(applyBucketDiffReply.get());
+
+ std::vector<api::ApplyBucketDiffCommand::Entry>& diff(
+ applyBucketDiffReply->getDiff());
+ CPPUNIT_ASSERT_EQUAL(size_t(1), diff.size());
+ CPPUNIT_ASSERT(!diff[0].filled());
+ CPPUNIT_ASSERT_EQUAL(uint16_t(0x0), diff[0]._entry._hasMask);
+}
+
+std::string
+MergeHandlerTest::doTestSPIException(MergeHandler& handler,
+ PersistenceProviderWrapper& providerWrapper,
+ HandlerInvoker& invoker,
+ const ExpectedExceptionSpec& spec)
+{
+ providerWrapper.setFailureMask(0);
+ invoker.beforeInvoke(*this, handler, *_context); // Do any setup stuff first
+
+ uint32_t failureMask = spec.mask;
+ const char* expectedSubstring = spec.expected;
+ providerWrapper.setFailureMask(failureMask);
+ try {
+ invoker.invoke(*this, handler, *_context);
+ if (failureMask != 0) {
+ return (std::string("No exception was thrown during handler "
+ "invocation. Expected exception containing '")
+ + expectedSubstring + "'");
+ }
+ } catch (const std::runtime_error& e) {
+ if (std::string(e.what()).find(expectedSubstring)
+ == std::string::npos)
+ {
+ return (std::string("Expected exception to contain substring '")
+ + expectedSubstring + "', but message was: " + e.what());
+ }
+ }
+ if (fsHandler().isMerging(_bucket)) {
+ return (std::string("After operation with expected exception '")
+ + expectedSubstring + "', merge state was not cleared");
+ }
+ // Postcondition check.
+ std::string check = invoker.afterInvoke(*this, handler);
+ if (!check.empty()) {
+ return (std::string("Postcondition validation failed for operation "
+ "with expected exception '")
+ + expectedSubstring + "': " + check);
+ }
+ return "";
+}
+
+std::string
+MergeHandlerTest::NoReplyHandlerInvoker::afterInvoke(
+ MergeHandlerTest& test,
+ MergeHandler& handler)
+{
+ (void) handler;
+ if (!test.messageKeeper()._msgs.empty()) {
+ std::ostringstream ss;
+ ss << "Expected 0 explicit replies, got "
+ << test.messageKeeper()._msgs.size();
+ return ss.str();
+ }
+ return "";
+}
+
+template <typename ExpectedMessage>
+std::string
+MergeHandlerTest::checkMessage(api::ReturnCode::Result expectedResult)
+{
+ try {
+ std::shared_ptr<ExpectedMessage> msg(
+ fetchSingleMessage<ExpectedMessage>());
+ if (msg->getResult().getResult() != expectedResult) {
+ return "Got unexpected result: " + msg->getResult().toString();
+ }
+ } catch (std::exception& e) {
+ return e.what();
+ }
+ return "";
+}
+
+void
+MergeHandlerTest::HandleMergeBucketInvoker::invoke(
+ MergeHandlerTest& test,
+ MergeHandler& handler,
+ spi::Context& context)
+{
+ api::MergeBucketCommand cmd(test._bucket, test._nodes, test._maxTimestamp);
+ handler.handleMergeBucket(cmd, context);
+}
+
+void
+MergeHandlerTest::testMergeBucketSPIFailures()
+{
+ PersistenceProviderWrapper providerWrapper(
+ getPersistenceProvider());
+ MergeHandler handler(providerWrapper, getEnv());
+ providerWrapper.setResult(
+ spi::Result(spi::Result::PERMANENT_ERROR,
+ "who you gonna call?"));
+ setUpChain(MIDDLE);
+
+ ExpectedExceptionSpec exceptions[] = {
+ { PersistenceProviderWrapper::FAIL_CREATE_BUCKET, "create bucket" },
+ { PersistenceProviderWrapper::FAIL_BUCKET_INFO, "get bucket info" },
+ { PersistenceProviderWrapper::FAIL_CREATE_ITERATOR, "create iterator" },
+ { PersistenceProviderWrapper::FAIL_ITERATE, "iterate" },
+ };
+ typedef ExpectedExceptionSpec* ExceptionIterator;
+ ExceptionIterator last = exceptions + sizeof(exceptions)/sizeof(exceptions[0]);
+
+ for (ExceptionIterator it = exceptions; it != last; ++it) {
+ HandleMergeBucketInvoker invoker;
+ CPPUNIT_ASSERT_EQUAL(std::string(),
+ doTestSPIException(handler,
+ providerWrapper,
+ invoker,
+ *it));
+ }
+}
+
+void
+MergeHandlerTest::HandleGetBucketDiffInvoker::invoke(
+ MergeHandlerTest& test,
+ MergeHandler& handler,
+ spi::Context& context)
+{
+ api::GetBucketDiffCommand cmd(test._bucket, test._nodes, test._maxTimestamp);
+ handler.handleGetBucketDiff(cmd, context);
+}
+
+void
+MergeHandlerTest::testGetBucketDiffSPIFailures()
+{
+ PersistenceProviderWrapper providerWrapper(
+ getPersistenceProvider());
+ MergeHandler handler(providerWrapper, getEnv());
+ providerWrapper.setResult(
+ spi::Result(spi::Result::PERMANENT_ERROR,
+ "who you gonna call?"));
+ setUpChain(MIDDLE);
+
+ ExpectedExceptionSpec exceptions[] = {
+ { PersistenceProviderWrapper::FAIL_CREATE_BUCKET, "create bucket" },
+ { PersistenceProviderWrapper::FAIL_BUCKET_INFO, "get bucket info" },
+ { PersistenceProviderWrapper::FAIL_CREATE_ITERATOR, "create iterator" },
+ { PersistenceProviderWrapper::FAIL_ITERATE, "iterate" },
+ };
+
+ typedef ExpectedExceptionSpec* ExceptionIterator;
+ ExceptionIterator last = exceptions + sizeof(exceptions)/sizeof(exceptions[0]);
+
+ for (ExceptionIterator it = exceptions; it != last; ++it) {
+ HandleGetBucketDiffInvoker invoker;
+ CPPUNIT_ASSERT_EQUAL(std::string(),
+ doTestSPIException(handler,
+ providerWrapper,
+ invoker,
+ *it));
+ }
+}
+
+void
+MergeHandlerTest::HandleApplyBucketDiffInvoker::invoke(
+ MergeHandlerTest& test,
+ MergeHandler& handler,
+ spi::Context& context)
+{
+ ++_counter;
+ std::shared_ptr<api::ApplyBucketDiffCommand> cmd(
+ test.createDummyApplyDiff(100000 * _counter));
+ handler.handleApplyBucketDiff(*cmd, context);
+}
+
+void
+MergeHandlerTest::testApplyBucketDiffSPIFailures()
+{
+ PersistenceProviderWrapper providerWrapper(
+ getPersistenceProvider());
+ MergeHandler handler(providerWrapper, getEnv());
+ providerWrapper.setResult(
+ spi::Result(spi::Result::PERMANENT_ERROR,
+ "who you gonna call?"));
+ setUpChain(MIDDLE);
+
+ ExpectedExceptionSpec exceptions[] = {
+ { PersistenceProviderWrapper::FAIL_CREATE_ITERATOR, "create iterator" },
+ { PersistenceProviderWrapper::FAIL_ITERATE, "iterate" },
+ { PersistenceProviderWrapper::FAIL_PUT, "Failed put" },
+ { PersistenceProviderWrapper::FAIL_REMOVE, "Failed remove" },
+ { PersistenceProviderWrapper::FAIL_FLUSH, "Failed flush" },
+ };
+
+ typedef ExpectedExceptionSpec* ExceptionIterator;
+ ExceptionIterator last = exceptions + sizeof(exceptions)/sizeof(exceptions[0]);
+
+ for (ExceptionIterator it = exceptions; it != last; ++it) {
+ HandleApplyBucketDiffInvoker invoker;
+ CPPUNIT_ASSERT_EQUAL(std::string(),
+ doTestSPIException(handler,
+ providerWrapper,
+ invoker,
+ *it));
+ // Casual, in-place testing of bug 6752085.
+ // This will fail if we give NaN to the metric in question.
+ CPPUNIT_ASSERT(std::isfinite(getEnv()._metrics
+ .mergeAverageDataReceivedNeeded.getLast()));
+ }
+}
+
+void
+MergeHandlerTest::HandleGetBucketDiffReplyInvoker::beforeInvoke(
+ MergeHandlerTest& test,
+ MergeHandler& handler,
+ spi::Context& context)
+{
+ api::MergeBucketCommand cmd(test._bucket, test._nodes, test._maxTimestamp);
+ handler.handleMergeBucket(cmd, context);
+ _diffCmd = test.fetchSingleMessage<api::GetBucketDiffCommand>();
+}
+
+void
+MergeHandlerTest::HandleGetBucketDiffReplyInvoker::invoke(
+ MergeHandlerTest& test,
+ MergeHandler& handler,
+ spi::Context&)
+{
+ (void) test;
+ api::GetBucketDiffReply reply(*_diffCmd);
+ handler.handleGetBucketDiffReply(reply, _stub);
+}
+
+std::string
+MergeHandlerTest::HandleGetBucketDiffReplyInvoker::afterInvoke(
+ MergeHandlerTest& test,
+ MergeHandler& handler)
+{
+ (void) handler;
+ if (!_stub.commands.empty()) {
+ return "Unexpected commands in reply stub";
+ }
+ if (!_stub.replies.empty()) {
+ return "Unexpected replies in reply stub";
+ }
+ // Initial merge bucket should have been replied to by clearMergeStatus.
+ return test.checkMessage<api::MergeBucketReply>(
+ api::ReturnCode::INTERNAL_FAILURE);
+}
+
+void
+MergeHandlerTest::testGetBucketDiffReplySPIFailures()
+{
+ PersistenceProviderWrapper providerWrapper(
+ getPersistenceProvider());
+ MergeHandler handler(providerWrapper, getEnv());
+ providerWrapper.setResult(
+ spi::Result(spi::Result::PERMANENT_ERROR,
+ "who you gonna call?"));
+ HandleGetBucketDiffReplyInvoker invoker;
+
+ setUpChain(FRONT);
+
+ ExpectedExceptionSpec exceptions[] = {
+ { PersistenceProviderWrapper::FAIL_CREATE_ITERATOR, "create iterator" },
+ { PersistenceProviderWrapper::FAIL_ITERATE, "iterate" },
+ };
+
+ typedef ExpectedExceptionSpec* ExceptionIterator;
+ ExceptionIterator last = exceptions + sizeof(exceptions)/sizeof(exceptions[0]);
+
+ for (ExceptionIterator it = exceptions; it != last; ++it) {
+ CPPUNIT_ASSERT_EQUAL(std::string(),
+ doTestSPIException(handler,
+ providerWrapper,
+ invoker,
+ *it));
+ }
+}
+
+void
+MergeHandlerTest::HandleApplyBucketDiffReplyInvoker::beforeInvoke(
+ MergeHandlerTest& test,
+ MergeHandler& handler,
+ spi::Context& context)
+{
+ ++_counter;
+ _stub.clear();
+ if (getChainPos() == FRONT) {
+ api::MergeBucketCommand cmd(test._bucket, test._nodes, test._maxTimestamp);
+ handler.handleMergeBucket(cmd, context);
+ std::shared_ptr<api::GetBucketDiffCommand> diffCmd(
+ test.fetchSingleMessage<api::GetBucketDiffCommand>());
+ std::shared_ptr<api::GetBucketDiffCommand> dummyDiff(
+ test.createDummyGetBucketDiff(100000 * _counter, 0x4));
+ diffCmd->getDiff() = dummyDiff->getDiff();
+
+ api::GetBucketDiffReply diffReply(*diffCmd);
+ handler.handleGetBucketDiffReply(diffReply, _stub);
+
+ CPPUNIT_ASSERT_EQUAL(size_t(1), _stub.commands.size());
+ _applyCmd = std::dynamic_pointer_cast<api::ApplyBucketDiffCommand>(
+ _stub.commands[0]);
+ } else {
+ // Pretend last node in chain has data and that it will be fetched when
+ // chain is unwinded.
+ std::shared_ptr<api::ApplyBucketDiffCommand> cmd(
+ test.createDummyApplyDiff(100000 * _counter, 0x4, false));
+ handler.handleApplyBucketDiff(*cmd, context);
+ _applyCmd = test.fetchSingleMessage<api::ApplyBucketDiffCommand>();
+ }
+}
+
+void
+MergeHandlerTest::HandleApplyBucketDiffReplyInvoker::invoke(
+ MergeHandlerTest& test,
+ MergeHandler& handler,
+ spi::Context&)
+{
+ (void) test;
+ api::ApplyBucketDiffReply reply(*_applyCmd);
+ test.fillDummyApplyDiff(reply.getDiff());
+ _stub.clear();
+ handler.handleApplyBucketDiffReply(reply, _stub);
+}
+
+std::string
+MergeHandlerTest::HandleApplyBucketDiffReplyInvoker::afterInvoke(
+ MergeHandlerTest& test,
+ MergeHandler& handler)
+{
+ (void) handler;
+ if (!_stub.commands.empty()) {
+ return "Unexpected commands in reply stub";
+ }
+ if (!_stub.replies.empty()) {
+ return "Unexpected replies in reply stub";
+ }
+ if (getChainPos() == FRONT) {
+ return test.checkMessage<api::MergeBucketReply>(
+ api::ReturnCode::INTERNAL_FAILURE);
+ } else {
+ return test.checkMessage<api::ApplyBucketDiffReply>(
+ api::ReturnCode::INTERNAL_FAILURE);
+ }
+}
+
+void
+MergeHandlerTest::testApplyBucketDiffReplySPIFailures()
+{
+ PersistenceProviderWrapper providerWrapper(
+ getPersistenceProvider());
+ HandleApplyBucketDiffReplyInvoker invoker;
+ for (int i = 0; i < 2; ++i) {
+ ChainPos pos(i == 0 ? FRONT : MIDDLE);
+ setUpChain(pos);
+ invoker.setChainPos(pos);
+ MergeHandler handler(providerWrapper, getEnv());
+ providerWrapper.setResult(
+ spi::Result(spi::Result::PERMANENT_ERROR,
+ "who you gonna call?"));
+
+ ExpectedExceptionSpec exceptions[] = {
+ { PersistenceProviderWrapper::FAIL_CREATE_ITERATOR, "create iterator" },
+ { PersistenceProviderWrapper::FAIL_ITERATE, "iterate" },
+ { PersistenceProviderWrapper::FAIL_PUT, "Failed put" },
+ { PersistenceProviderWrapper::FAIL_REMOVE, "Failed remove" },
+ { PersistenceProviderWrapper::FAIL_FLUSH, "Failed flush" },
+ };
+
+ typedef ExpectedExceptionSpec* ExceptionIterator;
+ ExceptionIterator last = exceptions + sizeof(exceptions)/sizeof(exceptions[0]);
+
+ for (ExceptionIterator it = exceptions; it != last; ++it) {
+ CPPUNIT_ASSERT_EQUAL(std::string(),
+ doTestSPIException(handler,
+ providerWrapper,
+ invoker,
+ *it));
+ }
+ }
+}
+
+void
+MergeHandlerTest::testRemoveFromDiff()
+{
+ framework::defaultimplementation::FakeClock clock;
+ MergeStatus status(clock, documentapi::LoadType::DEFAULT, 0, 0);
+
+ std::vector<api::GetBucketDiffCommand::Entry> diff(2);
+ diff[0]._timestamp = 1234;
+ diff[0]._flags = 0x1;
+ diff[0]._hasMask = 0x2;
+
+ diff[1]._timestamp = 5678;
+ diff[1]._flags = 0x3;
+ diff[1]._hasMask = 0x6;
+
+ status.diff.insert(status.diff.end(), diff.begin(), diff.end());
+
+ {
+ std::vector<api::ApplyBucketDiffCommand::Entry> applyDiff(2);
+ applyDiff[0]._entry._timestamp = 1234;
+ applyDiff[0]._entry._flags = 0x1;
+ applyDiff[0]._entry._hasMask = 0x0; // Removed during merging
+
+ applyDiff[1]._entry._timestamp = 5678;
+ applyDiff[1]._entry._flags = 0x3;
+ applyDiff[1]._entry._hasMask = 0x7;
+
+ CPPUNIT_ASSERT(status.removeFromDiff(applyDiff, 0x7));
+ CPPUNIT_ASSERT(status.diff.empty());
+ }
+
+ status.diff.insert(status.diff.end(), diff.begin(), diff.end());
+
+ {
+ std::vector<api::ApplyBucketDiffCommand::Entry> applyDiff(2);
+ applyDiff[0]._entry._timestamp = 1234;
+ applyDiff[0]._entry._flags = 0x1;
+ applyDiff[0]._entry._hasMask = 0x2;
+
+ applyDiff[1]._entry._timestamp = 5678;
+ applyDiff[1]._entry._flags = 0x3;
+ applyDiff[1]._entry._hasMask = 0x6;
+
+ CPPUNIT_ASSERT(!status.removeFromDiff(applyDiff, 0x7));
+ CPPUNIT_ASSERT_EQUAL(size_t(2), status.diff.size());
+ }
+
+ status.diff.clear();
+ status.diff.insert(status.diff.end(), diff.begin(), diff.end());
+
+ {
+ // Hasmasks have changed but diff still remains the same size.
+ std::vector<api::ApplyBucketDiffCommand::Entry> applyDiff(2);
+ applyDiff[0]._entry._timestamp = 1234;
+ applyDiff[0]._entry._flags = 0x1;
+ applyDiff[0]._entry._hasMask = 0x1;
+
+ applyDiff[1]._entry._timestamp = 5678;
+ applyDiff[1]._entry._flags = 0x3;
+ applyDiff[1]._entry._hasMask = 0x5;
+
+ CPPUNIT_ASSERT(status.removeFromDiff(applyDiff, 0x7));
+ CPPUNIT_ASSERT_EQUAL(size_t(2), status.diff.size());
+ }
+}
+
+void
+MergeHandlerTest::testRemovePutOnExistingTimestamp()
+{
+ setUpChain(BACK);
+
+ document::TestDocMan docMan;
+ document::Document::SP doc(
+ docMan.createRandomDocumentAtLocation(_location));
+ spi::Timestamp ts(10111);
+ doPut(doc, ts);
+
+ MergeHandler handler(getPersistenceProvider(), getEnv());
+ std::vector<api::ApplyBucketDiffCommand::Entry> applyDiff;
+ {
+ api::ApplyBucketDiffCommand::Entry e;
+ e._entry._timestamp = ts;
+ e._entry._hasMask = 0x1;
+ e._docName = doc->getId().toString();
+ e._entry._flags = MergeHandler::IN_USE | MergeHandler::DELETED;
+ applyDiff.push_back(e);
+ }
+
+ std::shared_ptr<api::ApplyBucketDiffCommand> applyBucketDiffCmd(
+ new api::ApplyBucketDiffCommand(_bucket, _nodes, 1024*1024));
+ applyBucketDiffCmd->getDiff() = applyDiff;
+
+ MessageTracker::UP tracker = handler.handleApplyBucketDiff(*applyBucketDiffCmd, *_context);
+
+ api::ApplyBucketDiffReply::SP applyBucketDiffReply(
+ std::dynamic_pointer_cast<api::ApplyBucketDiffReply>(
+ tracker->getReply()));
+ CPPUNIT_ASSERT(applyBucketDiffReply.get());
+
+ api::MergeBucketCommand cmd(_bucket, _nodes, _maxTimestamp);
+ handler.handleMergeBucket(cmd, *_context);
+
+ std::shared_ptr<api::GetBucketDiffCommand> getBucketDiffCmd(
+ fetchSingleMessage<api::GetBucketDiffCommand>());
+
+ // Timestamp should now be a regular remove
+ bool foundTimestamp = false;
+ for (size_t i = 0; i < getBucketDiffCmd->getDiff().size(); ++i) {
+ const api::GetBucketDiffCommand::Entry& e(
+ getBucketDiffCmd->getDiff()[i]);
+ if (e._timestamp == ts) {
+ CPPUNIT_ASSERT_EQUAL(
+ uint16_t(MergeHandler::IN_USE | MergeHandler::DELETED),
+ e._flags);
+ foundTimestamp = true;
+ break;
+ }
+ }
+ CPPUNIT_ASSERT(foundTimestamp);
+}
+
+} // storage
diff --git a/storage/src/tests/persistence/persistenceproviderwrapper.cpp b/storage/src/tests/persistence/persistenceproviderwrapper.cpp
new file mode 100644
index 00000000000..4a09235ddce
--- /dev/null
+++ b/storage/src/tests/persistence/persistenceproviderwrapper.cpp
@@ -0,0 +1,222 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <iostream>
+#include <sstream>
+#include <tests/persistence/persistenceproviderwrapper.h>
+
+#define LOG_SPI(ops) \
+ { \
+ std::ostringstream logStream; \
+ logStream << ops; \
+ _log.push_back(logStream.str()); \
+ }
+
+#define CHECK_ERROR(className, failType) \
+ { \
+ if (_result.getErrorCode() != spi::Result::NONE && (_failureMask & (failType))) { \
+ return className(_result.getErrorCode(), _result.getErrorMessage()); \
+ } \
+ }
+
+namespace storage {
+
+namespace {
+
+const char*
+includedVersionsToString(spi::IncludedVersions versions)
+{
+ switch (versions) {
+ case spi::NEWEST_DOCUMENT_ONLY:
+ return "NEWEST_DOCUMENT_ONLY";
+ case spi::NEWEST_DOCUMENT_OR_REMOVE:
+ return "NEWEST_DOCUMENT_OR_REMOVE";
+ case spi::ALL_VERSIONS:
+ return "ALL_VERSIONS";
+ }
+ return "!!UNKNOWN!!";
+}
+
+} // anon namespace
+
+std::string
+PersistenceProviderWrapper::toString() const
+{
+ std::ostringstream ss;
+ for (size_t i = 0; i < _log.size(); ++i) {
+ ss << _log[i] << "\n";
+ }
+ return ss.str();
+}
+
+spi::PartitionStateListResult
+PersistenceProviderWrapper::getPartitionStates() const
+{
+ LOG_SPI("getPartitionStates()");
+ return _spi.getPartitionStates();
+}
+
+spi::BucketIdListResult
+PersistenceProviderWrapper::listBuckets(spi::PartitionId partitionId) const
+{
+ LOG_SPI("listBuckets(" << uint16_t(partitionId) << ")");
+ CHECK_ERROR(spi::BucketIdListResult, FAIL_LIST_BUCKETS);
+ return _spi.listBuckets(partitionId);
+}
+
+spi::Result
+PersistenceProviderWrapper::createBucket(const spi::Bucket& bucket,
+ spi::Context& context)
+{
+ LOG_SPI("createBucket(" << bucket << ")");
+ CHECK_ERROR(spi::Result, FAIL_CREATE_BUCKET);
+ return _spi.createBucket(bucket, context);
+}
+
+spi::BucketInfoResult
+PersistenceProviderWrapper::getBucketInfo(const spi::Bucket& bucket) const
+{
+ LOG_SPI("getBucketInfo(" << bucket << ")");
+ CHECK_ERROR(spi::BucketInfoResult, FAIL_BUCKET_INFO);
+ return _spi.getBucketInfo(bucket);
+}
+
+spi::Result
+PersistenceProviderWrapper::put(const spi::Bucket& bucket,
+ spi::Timestamp timestamp,
+ const document::Document::SP& doc,
+ spi::Context& context)
+{
+ LOG_SPI("put(" << bucket << ", " << timestamp << ", " << doc->getId() << ")");
+ CHECK_ERROR(spi::Result, FAIL_PUT);
+ return _spi.put(bucket, timestamp, doc, context);
+}
+
+spi::RemoveResult
+PersistenceProviderWrapper::remove(const spi::Bucket& bucket,
+ spi::Timestamp timestamp,
+ const spi::DocumentId& id,
+ spi::Context& context)
+{
+ LOG_SPI("remove(" << bucket << ", " << timestamp << ", " << id << ")");
+ CHECK_ERROR(spi::RemoveResult, FAIL_REMOVE);
+ return _spi.remove(bucket, timestamp, id, context);
+}
+
+spi::RemoveResult
+PersistenceProviderWrapper::removeIfFound(const spi::Bucket& bucket,
+ spi::Timestamp timestamp,
+ const spi::DocumentId& id,
+ spi::Context& context)
+{
+ LOG_SPI("removeIfFound(" << bucket << ", " << timestamp << ", " << id << ")");
+ CHECK_ERROR(spi::RemoveResult, FAIL_REMOVE_IF_FOUND);
+ return _spi.removeIfFound(bucket, timestamp, id, context);
+}
+
+spi::UpdateResult
+PersistenceProviderWrapper::update(const spi::Bucket& bucket,
+ spi::Timestamp timestamp,
+ const document::DocumentUpdate::SP& upd,
+ spi::Context& context)
+{
+ LOG_SPI("update(" << bucket << ", " << timestamp << ", " << upd->getId() << ")");
+ CHECK_ERROR(spi::UpdateResult, FAIL_UPDATE);
+ return _spi.update(bucket, timestamp, upd, context);
+}
+
+spi::GetResult
+PersistenceProviderWrapper::get(const spi::Bucket& bucket,
+ const document::FieldSet& fieldSet,
+ const spi::DocumentId& id,
+ spi::Context& context) const
+{
+ LOG_SPI("get(" << bucket << ", " << id << ")");
+ CHECK_ERROR(spi::GetResult, FAIL_GET);
+ return _spi.get(bucket, fieldSet, id, context);
+}
+
+spi::Result
+PersistenceProviderWrapper::flush(const spi::Bucket& bucket,
+ spi::Context& context)
+{
+ LOG_SPI("flush(" << bucket << ")");
+ CHECK_ERROR(spi::Result, FAIL_FLUSH);
+ return _spi.flush(bucket, context);
+}
+
+spi::CreateIteratorResult
+PersistenceProviderWrapper::createIterator(const spi::Bucket& bucket,
+ const document::FieldSet& fields,
+ const spi::Selection& sel,
+ spi::IncludedVersions versions,
+ spi::Context& context)
+{
+ // TODO: proper printing of FieldSet and Selection
+
+ LOG_SPI("createIterator(" << bucket << ", "
+ << includedVersionsToString(versions) << ")");
+ CHECK_ERROR(spi::CreateIteratorResult, FAIL_CREATE_ITERATOR);
+ return _spi.createIterator(bucket, fields, sel, versions, context);
+}
+
+spi::IterateResult
+PersistenceProviderWrapper::iterate(spi::IteratorId iterId,
+ uint64_t maxByteSize,
+ spi::Context& context) const
+{
+ LOG_SPI("iterate(" << uint64_t(iterId) << ", " << maxByteSize << ")");
+ CHECK_ERROR(spi::IterateResult, FAIL_ITERATE);
+ return _spi.iterate(iterId, maxByteSize, context);
+}
+
+spi::Result
+PersistenceProviderWrapper::destroyIterator(spi::IteratorId iterId,
+ spi::Context& context)
+{
+ LOG_SPI("destroyIterator(" << uint64_t(iterId) << ")");
+ CHECK_ERROR(spi::Result, FAIL_DESTROY_ITERATOR);
+ return _spi.destroyIterator(iterId, context);
+}
+
+spi::Result
+PersistenceProviderWrapper::deleteBucket(const spi::Bucket& bucket,
+ spi::Context& context)
+{
+ LOG_SPI("deleteBucket(" << bucket << ")");
+ CHECK_ERROR(spi::Result, FAIL_DELETE_BUCKET);
+ return _spi.deleteBucket(bucket, context);
+}
+
+spi::Result
+PersistenceProviderWrapper::split(const spi::Bucket& source,
+ const spi::Bucket& target1,
+ const spi::Bucket& target2,
+ spi::Context& context)
+{
+ LOG_SPI("split(" << source << ", " << target1 << ", " << target2 << ")");
+ CHECK_ERROR(spi::Result, FAIL_SPLIT);
+ return _spi.split(source, target1, target2, context);
+}
+
+spi::Result
+PersistenceProviderWrapper::join(const spi::Bucket& source1,
+ const spi::Bucket& source2,
+ const spi::Bucket& target,
+ spi::Context& context)
+{
+ LOG_SPI("join(" << source1 << ", " << source2 << ", " << target << ")");
+ CHECK_ERROR(spi::Result, FAIL_JOIN);
+ return _spi.join(source1, source2, target, context);
+}
+
+spi::Result
+PersistenceProviderWrapper::removeEntry(const spi::Bucket& bucket,
+ spi::Timestamp timestamp,
+ spi::Context& context)
+{
+ LOG_SPI("revert(" << bucket << ", " << timestamp << ")");
+ CHECK_ERROR(spi::Result, FAIL_REVERT);
+ return _spi.removeEntry(bucket, timestamp, context);
+}
+
+}
diff --git a/storage/src/tests/persistence/persistenceproviderwrapper.h b/storage/src/tests/persistence/persistenceproviderwrapper.h
new file mode 100644
index 00000000000..b115eb7ef3d
--- /dev/null
+++ b/storage/src/tests/persistence/persistenceproviderwrapper.h
@@ -0,0 +1,153 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * \class storage::PersistenceProviderWrapper
+ *
+ * \brief Test utility class for intercepting all operations upon a
+ * persistence layer, injecting errors and performing logging.
+ *
+ * The PersistenceProviderWrapper class implements the basic SPI by
+ * logging all operations and then delegating handling the operation
+ * to the SPI instance given during construction. If an error result
+ * is specified and the operation invoked is tagged that it should be
+ * failed via setFailureMask(), the operation on the wrapped SPI will
+ * not be executed, but the given error result will be immediately
+ * returned instead (wrapped in the proper return type).
+ */
+#pragma once
+
+#include <vector>
+#include <string>
+#include <vespa/persistence/spi/abstractpersistenceprovider.h>
+
+namespace storage {
+
+class PersistenceProviderWrapper : public spi::AbstractPersistenceProvider
+{
+public:
+ enum OPERATION_FAILURE_FLAGS
+ {
+ FAIL_LIST_BUCKETS = 1 << 0,
+ FAIL_BUCKET_INFO = 1 << 1,
+ FAIL_GET = 1 << 2,
+ FAIL_PUT = 1 << 3,
+ FAIL_REMOVE = 1 << 4,
+ FAIL_REMOVE_IF_FOUND = 1 << 5,
+ FAIL_REPLACE_WITH_REMOVE = 1 << 6,
+ FAIL_UPDATE = 1 << 7,
+ FAIL_REVERT = 1 << 8,
+ FAIL_FLUSH = 1 << 9,
+ FAIL_CREATE_ITERATOR = 1 << 10,
+ FAIL_ITERATE = 1 << 11,
+ FAIL_DESTROY_ITERATOR = 1 << 12,
+ FAIL_DELETE_BUCKET = 1 << 13,
+ FAIL_SPLIT = 1 << 14,
+ FAIL_JOIN = 1 << 15,
+ FAIL_CREATE_BUCKET = 1 << 16,
+ FAIL_BUCKET_PERSISTENCE = FAIL_PUT|FAIL_REMOVE|FAIL_UPDATE|FAIL_REVERT|FAIL_FLUSH,
+ FAIL_ALL_OPERATIONS = 0xffff,
+ // TODO: add more as needed
+ };
+private:
+ spi::PersistenceProvider& _spi;
+ spi::Result _result;
+ mutable std::vector<std::string> _log;
+ uint32_t _failureMask;
+public:
+ PersistenceProviderWrapper(spi::PersistenceProvider& spi)
+ : _spi(spi),
+ _result(spi::Result(spi::Result::NONE, "")),
+ _log(),
+ _failureMask(0)
+ {
+ }
+
+ /**
+ * Explicitly set result to anything != NONE to have all operations
+ * return the given error without the wrapped SPI ever being invoked.
+ */
+ void setResult(const spi::Result& result) {
+ _result = result;
+ }
+ void clearResult() {
+ _result = spi::Result(spi::Result::NONE, "");
+ }
+ const spi::Result& getResult() const { return _result; }
+ /**
+ * Set a mask for operations to fail with _result
+ */
+ void setFailureMask(uint32_t mask) { _failureMask = mask; }
+ uint32_t getFailureMask() const { return _failureMask; }
+
+ /**
+ * Get a string representation of all the operations performed on the
+ * SPI with a newline separating each operation.
+ */
+ std::string toString() const;
+ /**
+ * Clear log of all operations performed.
+ */
+ void clearOperationLog() { _log.clear(); }
+ const std::vector<std::string>& getOperationLog() const { return _log; }
+
+ spi::Result createBucket(const spi::Bucket&, spi::Context&);
+
+ spi::PartitionStateListResult getPartitionStates() const;
+
+ spi::BucketIdListResult listBuckets(spi::PartitionId) const;
+
+ spi::BucketInfoResult getBucketInfo(const spi::Bucket&) const;
+
+ spi::Result put(const spi::Bucket&, spi::Timestamp, const document::Document::SP&, spi::Context&);
+
+ spi::RemoveResult remove(const spi::Bucket&,
+ spi::Timestamp,
+ const spi::DocumentId&,
+ spi::Context&);
+
+ spi::RemoveResult removeIfFound(const spi::Bucket&,
+ spi::Timestamp,
+ const spi::DocumentId&,
+ spi::Context&);
+
+ spi::UpdateResult update(const spi::Bucket&,
+ spi::Timestamp,
+ const document::DocumentUpdate::SP&,
+ spi::Context&);
+
+ spi::GetResult get(const spi::Bucket&,
+ const document::FieldSet&,
+ const spi::DocumentId&,
+ spi::Context&) const;
+
+ spi::Result flush(const spi::Bucket&, spi::Context&);
+
+ spi::CreateIteratorResult createIterator(const spi::Bucket&,
+ const document::FieldSet&,
+ const spi::Selection&,
+ spi::IncludedVersions versions,
+ spi::Context&);
+
+ spi::IterateResult iterate(spi::IteratorId,
+ uint64_t maxByteSize, spi::Context&) const;
+
+ spi::Result destroyIterator(spi::IteratorId, spi::Context&);
+
+ spi::Result deleteBucket(const spi::Bucket&, spi::Context&);
+
+ spi::Result split(const spi::Bucket& source,
+ const spi::Bucket& target1,
+ const spi::Bucket& target2,
+ spi::Context&);
+
+ spi::Result join(const spi::Bucket& source1,
+ const spi::Bucket& source2,
+ const spi::Bucket& target,
+ spi::Context&);
+
+ spi::Result removeEntry(const spi::Bucket&,
+ spi::Timestamp,
+ spi::Context&);
+};
+
+} // storage
+
diff --git a/storage/src/tests/persistence/persistencequeuetest.cpp b/storage/src/tests/persistence/persistencequeuetest.cpp
new file mode 100644
index 00000000000..06daf2a975c
--- /dev/null
+++ b/storage/src/tests/persistence/persistencequeuetest.cpp
@@ -0,0 +1,103 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/log/log.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <vespa/storageapi/message/bucket.h>
+#include <tests/persistence/persistenceproviderwrapper.h>
+#include <vespa/persistence/dummyimpl/dummypersistence.h>
+#include <tests/persistence/filestorage/filestortestfixture.h>
+#include <tests/persistence/filestorage/forwardingmessagesender.h>
+
+LOG_SETUP(".persistencequeuetest");
+
+namespace storage {
+
+class PersistenceQueueTest : public FileStorTestFixture
+{
+public:
+ void testFetchNextUnlockedMessageIfBucketLocked();
+
+ std::shared_ptr<api::StorageMessage>
+ createPut(uint64_t bucket, uint64_t docIdx);
+
+ void setUp() override;
+ void tearDown() override;
+
+ CPPUNIT_TEST_SUITE(PersistenceQueueTest);
+ CPPUNIT_TEST(testFetchNextUnlockedMessageIfBucketLocked);
+ CPPUNIT_TEST_SUITE_END();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(PersistenceQueueTest);
+
+void
+PersistenceQueueTest::setUp()
+{
+ setupDisks(1);
+ _node->setPersistenceProvider(
+ spi::PersistenceProvider::UP(
+ new spi::dummy::DummyPersistence(_node->getTypeRepo(), 1)));
+}
+
+void
+PersistenceQueueTest::tearDown()
+{
+ _node.reset(0);
+}
+
+std::shared_ptr<api::StorageMessage>
+PersistenceQueueTest::createPut(uint64_t bucket, uint64_t docIdx)
+{
+ std::ostringstream id;
+ id << "id:foo:testdoctype1:n=" << bucket << ":" << docIdx;
+ document::Document::SP doc(
+ _node->getTestDocMan().createDocument("foobar", id.str()));
+ std::shared_ptr<api::PutCommand> cmd(
+ new api::PutCommand(document::BucketId(16, bucket), doc, 1234));
+ cmd->setAddress(api::StorageMessageAddress(
+ "storage", lib::NodeType::STORAGE, 0));
+ return cmd;
+}
+
+void
+PersistenceQueueTest::testFetchNextUnlockedMessageIfBucketLocked()
+{
+ DummyStorageLink top;
+ DummyStorageLink *dummyManager;
+ top.push_back(std::unique_ptr<StorageLink>(
+ dummyManager = new DummyStorageLink));
+ top.open();
+ ForwardingMessageSender messageSender(*dummyManager);
+
+ documentapi::LoadTypeSet loadTypes("raw:");
+ FileStorMetrics metrics(loadTypes.getMetricLoadTypes());
+ metrics.initDiskMetrics(_node->getPartitions().size(),
+ loadTypes.getMetricLoadTypes(), 1);
+
+ FileStorHandler filestorHandler(messageSender, metrics,
+ _node->getPartitions(),
+ _node->getComponentRegister(), 255, 0);
+
+ // Send 2 puts, 2 to the first bucket, 1 to the second. Calling
+ // getNextMessage 2 times should then return a lock on the first bucket,
+ // then subsequently on the second, skipping the already locked bucket.
+ // Puts all have same pri, so order is well defined.
+ filestorHandler.schedule(createPut(1234, 0), 0);
+ filestorHandler.schedule(createPut(1234, 1), 0);
+ filestorHandler.schedule(createPut(5432, 0), 0);
+
+ auto lock0 = filestorHandler.getNextMessage(0, 255);
+ CPPUNIT_ASSERT(lock0.first.get());
+ CPPUNIT_ASSERT_EQUAL(
+ document::BucketId(16, 1234),
+ dynamic_cast<api::PutCommand&>(*lock0.second).getBucketId());
+
+ auto lock1 = filestorHandler.getNextMessage(0, 255);
+ CPPUNIT_ASSERT(lock1.first.get());
+ CPPUNIT_ASSERT_EQUAL(
+ document::BucketId(16, 5432),
+ dynamic_cast<api::PutCommand&>(*lock1.second).getBucketId());
+}
+
+} // namespace storage
diff --git a/storage/src/tests/persistence/persistencetestutils.cpp b/storage/src/tests/persistence/persistencetestutils.cpp
new file mode 100644
index 00000000000..47ec23147f1
--- /dev/null
+++ b/storage/src/tests/persistence/persistencetestutils.cpp
@@ -0,0 +1,412 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+
+#include <vespa/document/datatype/documenttype.h>
+#include <vespa/storageapi/message/persistence.h>
+#include <tests/persistence/persistencetestutils.h>
+#include <vespa/persistence/dummyimpl/dummypersistence.h>
+
+using document::DocumentType;
+using storage::framework::defaultimplementation::AllocationLogic;
+
+namespace storage {
+
+namespace {
+
+ spi::LoadType defaultLoadType(0, "default");
+
+ vdstestlib::DirConfig initialize(uint32_t numDisks) {
+ system(vespalib::make_string("rm -rf vdsroot").c_str());
+ for (uint32_t i = 0; i < numDisks; i++) {
+ system(vespalib::make_string("mkdir -p vdsroot/disks/d%d", i).c_str());
+ }
+ vdstestlib::DirConfig config(getStandardConfig(true));
+ return config;
+ }
+
+ template<typename T>
+ struct ConfigReader : public T::Subscriber
+ {
+ T config;
+
+ ConfigReader(const std::string& configId) {
+ T::subscribe(configId, *this);
+ }
+ void configure(const T& c) { config = c; }
+ };
+}
+
+PersistenceTestEnvironment::PersistenceTestEnvironment(DiskCount numDisks)
+ : _config(initialize(numDisks)),
+ _messageKeeper(),
+ _node(numDisks, NodeIndex(0), _config.getConfigId()),
+ _component(_node.getComponentRegister(), "persistence test env"),
+ _metrics(_component.getLoadTypes()->getMetricLoadTypes())
+{
+ _node.setupDummyPersistence();
+ _metrics.initDiskMetrics(
+ numDisks, _node.getLoadTypes()->getMetricLoadTypes(), 1);
+ _handler.reset(new FileStorHandler(
+ _messageKeeper, _metrics,
+ _node.getPersistenceProvider().getPartitionStates().getList(),
+ _node.getComponentRegister(), 255, 0));
+ for (uint32_t i = 0; i < numDisks; i++) {
+ _diskEnvs.push_back(
+ vespalib::LinkedPtr<PersistenceUtil>(
+ new PersistenceUtil(
+ _config.getConfigId(),
+ _node.getComponentRegister(),
+ *_handler,
+ *_metrics.disks[i]->threads[0],
+ i,
+ 255,
+ _node.getPersistenceProvider())));
+ }
+}
+
+PersistenceTestUtils::PersistenceTestUtils()
+{
+}
+
+PersistenceTestUtils::~PersistenceTestUtils()
+{
+}
+
+std::string
+PersistenceTestUtils::dumpBucket(const document::BucketId& bid,
+ uint16_t disk) {
+ return dynamic_cast<spi::dummy::DummyPersistence&>(_env->_node.getPersistenceProvider()).dumpBucket(spi::Bucket(bid, spi::PartitionId(disk)));
+}
+
+void
+PersistenceTestUtils::setupDisks(uint32_t numDisks) {
+ _env.reset(new PersistenceTestEnvironment(DiskCount(numDisks)));
+}
+
+std::unique_ptr<PersistenceThread>
+PersistenceTestUtils::createPersistenceThread(uint32_t disk)
+{
+ return std::unique_ptr<PersistenceThread>(
+ new PersistenceThread(_env->_node.getComponentRegister(),
+ _env->_config.getConfigId(),
+ getPersistenceProvider(),
+ getEnv()._fileStorHandler,
+ getEnv()._metrics,
+ disk,
+ 255,
+ false));
+}
+
+document::Document::SP
+PersistenceTestUtils::schedulePut(
+ uint32_t location,
+ spi::Timestamp timestamp,
+ uint16_t disk,
+ uint32_t minSize,
+ uint32_t maxSize)
+{
+ document::Document::SP doc(createRandomDocumentAtLocation(
+ location, timestamp, minSize, maxSize));
+ std::shared_ptr<api::StorageMessage> msg(
+ new api::PutCommand(
+ document::BucketId(16, location), doc, timestamp));
+ fsHandler().schedule(msg, disk);
+ return doc;
+}
+
+StorBucketDatabase::WrappedEntry
+PersistenceTestUtils::getBucket(const document::BucketId& id)
+{
+ return _env->_node.getStorageBucketDatabase().get(id, "foo");
+}
+
+StorBucketDatabase::WrappedEntry
+PersistenceTestUtils::createBucket(const document::BucketId& id)
+{
+ return _env->_node.getStorageBucketDatabase().get(
+ id,
+ "foo",
+ StorBucketDatabase::CREATE_IF_NONEXISTING);
+}
+
+spi::PersistenceProvider&
+PersistenceTestUtils::getPersistenceProvider()
+{
+ return _env->_node.getPersistenceProvider();
+}
+
+std::string
+PersistenceTestUtils::getBucketStatus(const document::BucketId& id)
+{
+ std::ostringstream ost;
+ StorBucketDatabase::WrappedEntry entry(
+ _env->_node.getStorageBucketDatabase().get(
+ id, "foo"));
+
+ ost << id << ": ";
+ if (!entry.exist()) {
+ ost << "null";
+ } else {
+ ost << entry->getBucketInfo().getDocumentCount() << "," << entry->disk;
+ }
+
+ return ost.str();
+}
+
+document::Document::SP
+PersistenceTestUtils::doPutOnDisk(
+ uint16_t disk,
+ uint32_t location,
+ spi::Timestamp timestamp,
+ uint32_t minSize,
+ uint32_t maxSize)
+{
+ document::Document::SP doc(createRandomDocumentAtLocation(
+ location, timestamp, minSize, maxSize));
+ spi::Bucket b(document::BucketId(16, location), spi::PartitionId(disk));
+ spi::Context context(defaultLoadType, spi::Priority(0),
+ spi::Trace::TraceLevel(0));
+
+ getPersistenceProvider().createBucket(b, context);
+
+ getPersistenceProvider().put(spi::Bucket(b), timestamp, doc, context);
+
+ getPersistenceProvider().flush(b, context);
+ return doc;
+}
+
+bool
+PersistenceTestUtils::doRemoveOnDisk(
+ uint16_t disk,
+ const document::BucketId& bucketId,
+ const document::DocumentId& docId,
+ spi::Timestamp timestamp,
+ bool persistRemove)
+{
+ spi::Context context(defaultLoadType, spi::Priority(0),
+ spi::Trace::TraceLevel(0));
+ if (persistRemove) {
+ spi::RemoveResult result = getPersistenceProvider().removeIfFound(
+ spi::Bucket(bucketId, spi::PartitionId(disk)),
+ timestamp, docId, context);
+ return result.wasFound();
+ }
+ spi::RemoveResult result = getPersistenceProvider().remove(
+ spi::Bucket(bucketId, spi::PartitionId(disk)),
+ timestamp, docId, context);
+
+ return result.wasFound();
+}
+
+bool
+PersistenceTestUtils::doUnrevertableRemoveOnDisk(
+ uint16_t disk,
+ const document::BucketId& bucketId,
+ const document::DocumentId& docId,
+ spi::Timestamp timestamp)
+{
+ spi::Context context(defaultLoadType, spi::Priority(0),
+ spi::Trace::TraceLevel(0));
+ spi::RemoveResult result = getPersistenceProvider().remove(
+ spi::Bucket(bucketId, spi::PartitionId(disk)),
+ timestamp, docId, context);
+ return result.wasFound();
+}
+
+spi::GetResult
+PersistenceTestUtils::doGetOnDisk(
+ uint16_t disk,
+ const document::BucketId& bucketId,
+ const document::DocumentId& docId,
+ bool headerOnly)
+{
+ document::FieldSet::UP fieldSet(new document::AllFields());
+ spi::Context context(defaultLoadType, spi::Priority(0),
+ spi::Trace::TraceLevel(0));
+ if (headerOnly) {
+ fieldSet.reset(new document::HeaderFields());
+ }
+ return getPersistenceProvider().get(spi::Bucket(
+ bucketId, spi::PartitionId(disk)), *fieldSet, docId, context);
+}
+
+document::DocumentUpdate::SP
+PersistenceTestUtils::createBodyUpdate(
+ const document::DocumentId& docId,
+ const document::FieldValue& updateValue)
+{
+ const DocumentType* docType(_env->_component.getTypeRepo()
+ ->getDocumentType("testdoctype1"));
+ document::DocumentUpdate::SP update(
+ new document::DocumentUpdate(*docType, docId));
+ std::shared_ptr<document::AssignValueUpdate> assignUpdate(
+ new document::AssignValueUpdate(updateValue));
+ document::FieldUpdate fieldUpdate(docType->getField("content"));
+ fieldUpdate.addUpdate(*assignUpdate);
+ update->addUpdate(fieldUpdate);
+ return update;
+}
+
+document::DocumentUpdate::SP
+PersistenceTestUtils::createHeaderUpdate(
+ const document::DocumentId& docId,
+ const document::FieldValue& updateValue)
+{
+ const DocumentType* docType(_env->_component.getTypeRepo()
+ ->getDocumentType("testdoctype1"));
+ document::DocumentUpdate::SP update(
+ new document::DocumentUpdate(*docType, docId));
+ std::shared_ptr<document::AssignValueUpdate> assignUpdate(
+ new document::AssignValueUpdate(updateValue));
+ document::FieldUpdate fieldUpdate(docType->getField("headerval"));
+ fieldUpdate.addUpdate(*assignUpdate);
+ update->addUpdate(fieldUpdate);
+ return update;
+}
+
+uint16_t
+PersistenceTestUtils::getDiskFromBucketDatabaseIfUnset(const document::BucketId& bucket,
+ uint16_t disk)
+{
+ if (disk == 0xffff) {
+ StorBucketDatabase::WrappedEntry entry(
+ getEnv().getBucketDatabase().get(bucket, "createTestBucket"));
+ if (entry.exist()) {
+ return entry->disk;
+ } else {
+ std::ostringstream error;
+ error << bucket << " not in db and disk unset";
+ throw vespalib::IllegalStateException(error.str(), VESPA_STRLOC);
+ }
+ }
+ return disk;
+}
+
+void
+PersistenceTestUtils::doPut(const document::Document::SP& doc,
+ spi::Timestamp time,
+ uint16_t disk,
+ uint16_t usedBits)
+{
+ document::BucketId bucket(
+ _env->_component.getBucketIdFactory().getBucketId(doc->getId()));
+ bucket.setUsedBits(usedBits);
+ disk = getDiskFromBucketDatabaseIfUnset(bucket, disk);
+
+ doPut(doc, bucket, time, disk);
+}
+
+void
+PersistenceTestUtils::doPut(const document::Document::SP& doc,
+ document::BucketId bid,
+ spi::Timestamp time,
+ uint16_t disk)
+{
+ spi::Bucket b(bid, spi::PartitionId(disk));
+ spi::Context context(defaultLoadType, spi::Priority(0),
+ spi::Trace::TraceLevel(0));
+ getPersistenceProvider().createBucket(b, context);
+ getPersistenceProvider().put(b, time, doc, context);
+}
+
+spi::UpdateResult
+PersistenceTestUtils::doUpdate(document::BucketId bid,
+ const document::DocumentUpdate::SP& update,
+ spi::Timestamp time,
+ uint16_t disk)
+{
+ spi::Context context(defaultLoadType, spi::Priority(0),
+ spi::Trace::TraceLevel(0));
+ return getPersistenceProvider().update(
+ spi::Bucket(bid, spi::PartitionId(disk)), time, update, context);
+}
+
+void
+PersistenceTestUtils::doRemove(const document::DocumentId& id, spi::Timestamp time,
+ uint16_t disk, bool unrevertableRemove,
+ uint16_t usedBits)
+{
+ document::BucketId bucket(
+ _env->_component.getBucketIdFactory().getBucketId(id));
+ bucket.setUsedBits(usedBits);
+ disk = getDiskFromBucketDatabaseIfUnset(bucket, disk);
+ spi::Context context(defaultLoadType, spi::Priority(0),
+ spi::Trace::TraceLevel(0));
+ if (unrevertableRemove) {
+ getPersistenceProvider().remove(
+ spi::Bucket(bucket, spi::PartitionId(disk)), time, id, context);
+ } else {
+ spi::RemoveResult result = getPersistenceProvider().removeIfFound(
+ spi::Bucket(bucket, spi::PartitionId(disk)), time, id, context);
+ if (!result.wasFound()) {
+ throw vespalib::IllegalStateException(
+ "Attempted to remove non-existing doc " + id.toString(),
+ VESPA_STRLOC);
+ }
+ }
+}
+
+void
+PersistenceTestUtils::clearBody(document::Document& doc)
+{
+ // FIXME(vekterli): temporary solution while we don't have
+ // fieldset pruning functionality in Document.
+ //doc->getBody().clear();
+ vespalib::nbostream stream;
+ doc.serializeHeader(stream);
+ doc.deserialize(*_env->_component.getTypeRepo(), stream);
+}
+
+document::Document::UP
+PersistenceTestUtils::createRandomDocumentAtLocation(
+ uint64_t location, uint32_t seed,
+ uint32_t minDocSize, uint32_t maxDocSize)
+{
+ return _env->_testDocMan.createRandomDocumentAtLocation(
+ location, seed, minDocSize, maxDocSize);
+}
+
+void
+PersistenceTestUtils::createTestBucket(const document::BucketId& bucket,
+ uint16_t disk)
+{
+
+ uint32_t opsPerType = 2;
+ uint32_t numberOfLocations = 2;
+ uint32_t minDocSize = 0;
+ uint32_t maxDocSize = 128;
+ for (uint32_t useHeaderOnly = 0; useHeaderOnly < 2; ++useHeaderOnly) {
+ bool headerOnly = (useHeaderOnly == 1);
+ for (uint32_t optype=0; optype < 4; ++optype) {
+ for (uint32_t i=0; i<opsPerType; ++i) {
+ uint32_t seed = useHeaderOnly * 10000 + optype * 1000 + i + 1;
+ uint64_t location = (seed % numberOfLocations);
+ location <<= 32;
+ location += (bucket.getRawId() & 0xffffffff);
+ document::Document::SP doc(
+ createRandomDocumentAtLocation(
+ location, seed, minDocSize, maxDocSize));
+ if (headerOnly) {
+ clearBody(*doc);
+ }
+ doPut(doc, spi::Timestamp(seed), disk, bucket.getUsedBits());
+ if (optype == 0) { // Regular put
+ } else if (optype == 1) { // Overwritten later in time
+ document::Document::SP doc2(new document::Document(*doc));
+ doc2->setValue(doc2->getField("content"),
+ document::StringFieldValue("overwritten"));
+ doPut(doc2, spi::Timestamp(seed + 500),
+ disk, bucket.getUsedBits());
+ } else if (optype == 2) { // Removed
+ doRemove(doc->getId(), spi::Timestamp(seed + 500), disk, false,
+ bucket.getUsedBits());
+ } else if (optype == 3) { // Unrevertable removed
+ doRemove(doc->getId(), spi::Timestamp(seed), disk, true,
+ bucket.getUsedBits());
+ }
+ }
+ }
+ }
+}
+
+} // storage
diff --git a/storage/src/tests/persistence/persistencetestutils.h b/storage/src/tests/persistence/persistencetestutils.h
new file mode 100644
index 00000000000..d584b4dce45
--- /dev/null
+++ b/storage/src/tests/persistence/persistencetestutils.h
@@ -0,0 +1,214 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#pragma once
+
+#include <vespa/document/base/testdocman.h>
+#include <vespa/storage/common/messagesender.h>
+#include <vespa/storage/common/storagecomponent.h>
+#include <vespa/storage/persistence/filestorage/filestorhandler.h>
+#include <vespa/storage/persistence/persistenceutil.h>
+#include <vespa/storageframework/defaultimplementation/memory/memorymanager.h>
+#include <tests/common/teststorageapp.h>
+#include <tests/common/testhelper.h>
+#include <vespa/persistence/spi/persistenceprovider.h>
+#include <vespa/storage/persistence/persistencethread.h>
+#include <vespa/persistence/dummyimpl/dummypersistence.h>
+
+namespace storage {
+
+struct MessageKeeper : public MessageSender {
+ std::vector<api::StorageMessage::SP> _msgs;
+
+ void sendCommand(const api::StorageCommand::SP& m) { _msgs.push_back(m); }
+ void sendReply(const api::StorageReply::SP& m) { _msgs.push_back(m); }
+};
+
+struct PersistenceTestEnvironment {
+ PersistenceTestEnvironment(
+ DiskCount numDisks);
+
+ document::TestDocMan _testDocMan;
+ vdstestlib::DirConfig _config;
+ MessageKeeper _messageKeeper;
+ TestServiceLayerApp _node;
+ StorageComponent _component;
+ FileStorMetrics _metrics;
+ std::unique_ptr<FileStorHandler> _handler;
+ std::vector<vespalib::LinkedPtr<PersistenceUtil> > _diskEnvs;
+};
+
+class PersistenceTestUtils : public CppUnit::TestFixture {
+private:
+ std::unique_ptr<PersistenceTestEnvironment> _env;
+
+public:
+ PersistenceTestUtils();
+ virtual ~PersistenceTestUtils();
+
+ document::Document::SP schedulePut(
+ uint32_t location,
+ spi::Timestamp timestamp,
+ uint16_t disk,
+ uint32_t minSize = 0,
+ uint32_t maxSize = 128);
+
+ void setupDisks(uint32_t disks);
+
+ void tearDown() {
+ _env.reset();
+ }
+
+ std::string dumpBucket(const document::BucketId& bid, uint16_t disk = 0);
+
+ PersistenceUtil& getEnv(uint32_t disk = 0)
+ { return *_env->_diskEnvs[disk]; }
+ FileStorHandler& fsHandler() { return *_env->_handler; }
+ FileStorMetrics& metrics() { return _env->_metrics; }
+ MessageKeeper& messageKeeper() { return _env->_messageKeeper; }
+ document::DocumentTypeRepo::SP getTypeRepo() { return _env->_component.getTypeRepo(); }
+ StorageComponent& getComponent() { return _env->_component; }
+ TestServiceLayerApp& getNode() { return _env->_node; }
+
+ StorBucketDatabase::WrappedEntry getBucket(const document::BucketId& id);
+ StorBucketDatabase::WrappedEntry createBucket(const document::BucketId& id);
+
+ std::string getBucketStatus(const document::BucketId& id);
+
+ spi::PersistenceProvider& getPersistenceProvider();
+
+ /**
+ Performs a put to the given disk.
+ Returns the document that was inserted.
+ */
+ document::Document::SP doPutOnDisk(
+ uint16_t disk,
+ uint32_t location,
+ spi::Timestamp timestamp,
+ uint32_t minSize = 0,
+ uint32_t maxSize = 128);
+
+ document::Document::SP doPut(
+ uint32_t location,
+ spi::Timestamp timestamp,
+ uint32_t minSize = 0,
+ uint32_t maxSize = 128)
+ { return doPutOnDisk(0, location, timestamp, minSize, maxSize); }
+
+ /**
+ Performs a remove to the given disk.
+ Returns the new doccount if document was removed, or -1 if not found.
+ */
+ bool doRemoveOnDisk(
+ uint16_t disk,
+ const document::BucketId& bid,
+ const document::DocumentId& id,
+ spi::Timestamp timestamp,
+ bool persistRemove);
+
+ bool doRemove(
+ const document::BucketId& bid,
+ const document::DocumentId& id,
+ spi::Timestamp timestamp,
+ bool persistRemove) {
+ return doRemoveOnDisk(0, bid, id, timestamp, persistRemove);
+ }
+
+ bool doUnrevertableRemoveOnDisk(uint16_t disk,
+ const document::BucketId& bid,
+ const document::DocumentId& id,
+ spi::Timestamp timestamp);
+
+ bool doUnrevertableRemove(const document::BucketId& bid,
+ const document::DocumentId& id,
+ spi::Timestamp timestamp)
+ {
+ return doUnrevertableRemoveOnDisk(0, bid, id, timestamp);
+ }
+
+ /**
+ * Do a remove toward storage set up in test environment.
+ *
+ * @id Document to remove.
+ * @disk If set, use this disk, otherwise lookup in bucket db.
+ * @unrevertableRemove If set, instead of adding put, turn put to remove.
+ * @usedBits Generate bucket to use from docid using this amount of bits.
+ */
+ void doRemove(const document::DocumentId& id, spi::Timestamp, uint16_t disk = 0xffff,
+ bool unrevertableRemove = false, uint16_t usedBits = 16);
+
+ spi::GetResult doGetOnDisk(
+ uint16_t disk,
+ const document::BucketId& bucketId,
+ const document::DocumentId& docId,
+ bool headerOnly);
+
+ spi::GetResult doGet(
+ const document::BucketId& bucketId,
+ const document::DocumentId& docId,
+ bool headerOnly)
+ { return doGetOnDisk(0, bucketId, docId, headerOnly); }
+
+ document::DocumentUpdate::SP createBodyUpdate(
+ const document::DocumentId& id,
+ const document::FieldValue& updateValue);
+
+ document::DocumentUpdate::SP createHeaderUpdate(
+ const document::DocumentId& id,
+ const document::FieldValue& updateValue);
+
+ uint16_t getDiskFromBucketDatabaseIfUnset(const document::BucketId&,
+ uint16_t disk = 0xffff);
+
+ /**
+ * Do a put toward storage set up in test environment.
+ *
+ * @doc Document to put. Use TestDocMan to generate easily.
+ * @disk If set, use this disk, otherwise lookup in bucket db.
+ * @usedBits Generate bucket to use from docid using this amount of bits.
+ */
+ void doPut(const document::Document::SP& doc, spi::Timestamp,
+ uint16_t disk = 0xffff, uint16_t usedBits = 16);
+
+ void doPut(const document::Document::SP& doc,
+ document::BucketId bid,
+ spi::Timestamp time,
+ uint16_t disk = 0);
+
+ spi::UpdateResult doUpdate(document::BucketId bid,
+ const document::DocumentUpdate::SP& update,
+ spi::Timestamp time,
+ uint16_t disk = 0);
+
+ document::Document::UP createRandomDocumentAtLocation(
+ uint64_t location, uint32_t seed,
+ uint32_t minDocSize, uint32_t maxDocSize);
+
+ /**
+ * Create a test bucket with various content representing most states a
+ * bucket can represent. (Such that tests have a nice test bucket to use
+ * that require operations to handle all the various bucket contents.
+ *
+ * @disk If set, use this disk, otherwise lookup in bucket db.
+ */
+ void createTestBucket(const document::BucketId&, uint16_t disk = 0xffff);
+
+ /**
+ * Create a new persistence thread.
+ */
+ std::unique_ptr<PersistenceThread> createPersistenceThread(uint32_t disk);
+
+ /**
+ * In-place modify doc so that it has no more body fields.
+ */
+ void clearBody(document::Document& doc);
+};
+
+class SingleDiskPersistenceTestUtils : public PersistenceTestUtils
+{
+public:
+ void setUp() {
+ setupDisks(1);
+ }
+};
+
+} // storage
+
diff --git a/storage/src/tests/persistence/persistencethread_splittest.cpp b/storage/src/tests/persistence/persistencethread_splittest.cpp
new file mode 100644
index 00000000000..f50e62e0aeb
--- /dev/null
+++ b/storage/src/tests/persistence/persistencethread_splittest.cpp
@@ -0,0 +1,234 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <vespa/storage/persistence/persistencethread.h>
+#include <vespa/storageapi/message/bucketsplitting.h>
+#include <tests/persistence/persistencetestutils.h>
+
+namespace storage {
+namespace {
+ spi::LoadType defaultLoadType(0, "default");
+}
+
+struct PersistenceThread_SplitTest : public SingleDiskPersistenceTestUtils
+{
+ enum SplitCase {
+ TOO_MANY_DOCS_SPLIT_ONCE, // Only one split needed to divide
+ TOO_MANY_DOCS_SPLIT_MULTIPLE_BITS, // Multiple bits needed to divide
+ TOO_MANY_DOCS_ACTUALLY_NOT, // Other copy is too big but not this one
+ // Multi bits needed, but dont do it.
+ TOO_LARGE_DOCS_SPLIT_ONCE,
+ TOO_LARGE_DOCS_SPLIT_MULTIPLE_BITS,
+ TOO_LARGE_DOCS_SINGLE_DOC, // Cannot split single doc even if too large
+ TOO_LARGE_DOCS_ACTUALLY_NOT, // Other copy is too large, not this one
+ // Need to split to X bits to get in line with other copy or distr.
+ SPLIT_TOO_LITTLE_SINGLE_SPLIT, // Split all to one target
+ SPLIT_TOO_LITTLE_JUST_RIGHT, // Just manage to split in two at that lvl
+ SPLIT_TOO_LITTLE_SPLIT_TOWARDS_ENOUGH, // Has to split shorter
+ SPLIT_INCONSISTENT_1_DOC,
+ SPLIT_INCONSISTENT_ALL_DOCS_SAME_GID,
+ };
+
+ void doTest(SplitCase);
+
+ void testTooManyDocsSplitOnce()
+ { doTest(TOO_MANY_DOCS_SPLIT_ONCE); }
+ void testTooManyDocsSplitMulti()
+ { doTest(TOO_MANY_DOCS_SPLIT_MULTIPLE_BITS); }
+ void testTooManyDocsActuallyNot()
+ { doTest(TOO_MANY_DOCS_ACTUALLY_NOT); }
+ void testTooLargeDocsSplitOnce()
+ { doTest(TOO_LARGE_DOCS_SPLIT_ONCE); }
+ void testTooLargeDocsSplitMulti()
+ { doTest(TOO_LARGE_DOCS_SPLIT_MULTIPLE_BITS); }
+ void testTooLargeDocsSingleDoc()
+ { doTest(TOO_LARGE_DOCS_SINGLE_DOC); }
+ void testTooLargeDocsActuallyNot()
+ { doTest(TOO_LARGE_DOCS_ACTUALLY_NOT); }
+ void testSplitTooLittleSingleSplit()
+ { doTest(SPLIT_TOO_LITTLE_SINGLE_SPLIT); }
+ void testSplitTooLittleJustRight()
+ { doTest(SPLIT_TOO_LITTLE_JUST_RIGHT); }
+ void testSplitTooLittleSplitTowardsEnough()
+ { doTest(SPLIT_TOO_LITTLE_SPLIT_TOWARDS_ENOUGH); }
+ void testInconsistentSplitHasOneBitFallbackWhen1Doc() {
+ doTest(SPLIT_INCONSISTENT_1_DOC);
+ }
+ void testInconsistentSplitHasOneBitFallbackWhenAllDocsHaveSameGid() {
+ doTest(SPLIT_INCONSISTENT_ALL_DOCS_SAME_GID);
+ }
+
+ CPPUNIT_TEST_SUITE(PersistenceThread_SplitTest);
+ CPPUNIT_TEST(testTooManyDocsSplitOnce);
+ CPPUNIT_TEST(testTooManyDocsSplitMulti);
+ CPPUNIT_TEST(testTooManyDocsActuallyNot);
+ CPPUNIT_TEST(testTooLargeDocsSplitOnce);
+ CPPUNIT_TEST(testTooLargeDocsSplitMulti);
+ CPPUNIT_TEST(testTooLargeDocsSingleDoc);
+ CPPUNIT_TEST(testTooLargeDocsActuallyNot);
+ CPPUNIT_TEST(testSplitTooLittleSingleSplit);
+ CPPUNIT_TEST(testSplitTooLittleJustRight);
+ CPPUNIT_TEST(testSplitTooLittleSplitTowardsEnough);
+ CPPUNIT_TEST(testInconsistentSplitHasOneBitFallbackWhen1Doc);
+ CPPUNIT_TEST(testInconsistentSplitHasOneBitFallbackWhenAllDocsHaveSameGid);
+ CPPUNIT_TEST_SUITE_END();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(PersistenceThread_SplitTest);
+
+void
+PersistenceThread_SplitTest::doTest(SplitCase splitCase)
+{
+ uint32_t maxCount = 4;
+ uint32_t maxSize = 1000 * 1000;
+ uint32_t maxBits = 58;
+ uint32_t minBits = 1;
+ uint32_t docCount = 8;
+ uint32_t docSize = 100 * 1000;
+ uint32_t currentSplitLevel = 1;
+ uint32_t splitLevelToDivide = 2;
+ uint32_t resultSplitLevel = 2;
+ size_t resultBuckets = 2;
+ bool simulateGidCollision = false;
+ api::ReturnCode error(api::ReturnCode::OK);
+ switch (splitCase) {
+ case TOO_MANY_DOCS_SPLIT_ONCE:
+ break; // Default. Do nothing
+ case TOO_MANY_DOCS_SPLIT_MULTIPLE_BITS:
+ splitLevelToDivide = 3;
+ resultSplitLevel = 3;
+ break;
+ case TOO_MANY_DOCS_ACTUALLY_NOT:
+ splitLevelToDivide = 3;
+ docCount = 2;
+ resultBuckets = 1;
+ break;
+ case TOO_LARGE_DOCS_SPLIT_ONCE:
+ maxCount = 100;
+ docSize = 400 * 1000;
+ break;
+ case TOO_LARGE_DOCS_SPLIT_MULTIPLE_BITS:
+ maxCount = 100;
+ docSize = 400 * 1000;
+ splitLevelToDivide = 3;
+ resultSplitLevel = 3;
+ break;
+ case TOO_LARGE_DOCS_SINGLE_DOC:
+ // It is possible for bucket to be inconsistent being big enough
+ // to split in other copy but this copy has only 1 too big doc.
+ docCount = 1;
+ docSize = 3000 * 1000;
+ splitLevelToDivide = 3;
+ resultBuckets = 1;
+ break;
+ case TOO_LARGE_DOCS_ACTUALLY_NOT:
+ maxCount = 100;
+ splitLevelToDivide = 3;
+ resultSplitLevel = 2;
+ resultBuckets = 1;
+ break;
+ case SPLIT_TOO_LITTLE_SINGLE_SPLIT:
+ maxBits = 5;
+ maxSize = 0;
+ maxCount = 0;
+ splitLevelToDivide = 16;
+ resultSplitLevel = 5;
+ resultBuckets = 1;
+ break;
+ case SPLIT_TOO_LITTLE_JUST_RIGHT:
+ maxBits = 5;
+ maxSize = 0;
+ maxCount = 0;
+ splitLevelToDivide = 5;
+ resultSplitLevel = 5;
+ break;
+ case SPLIT_TOO_LITTLE_SPLIT_TOWARDS_ENOUGH:
+ maxBits = 8;
+ maxSize = 0;
+ maxCount = 0;
+ splitLevelToDivide = 5;
+ resultSplitLevel = 5;
+ break;
+ case SPLIT_INCONSISTENT_1_DOC:
+ docCount = 1;
+ maxSize = 0;
+ maxCount = 0;
+ currentSplitLevel = 16;
+ resultSplitLevel = 17;
+ resultBuckets = 1;
+ break;
+ case SPLIT_INCONSISTENT_ALL_DOCS_SAME_GID:
+ docCount = 2;
+ maxSize = 0;
+ maxCount = 0;
+ currentSplitLevel = 16;
+ resultSplitLevel = 17;
+ resultBuckets = 1;
+ simulateGidCollision = true;
+ break;
+ default:
+ assert(false);
+ }
+
+ uint64_t location = 0;
+ uint64_t splitMask = 1 << (splitLevelToDivide - 1);
+ spi::Context context(defaultLoadType, spi::Priority(0),
+ spi::Trace::TraceLevel(0));
+ spi::Bucket bucket(document::BucketId(currentSplitLevel, 1),
+ spi::PartitionId(0));
+ spi::PersistenceProvider& spi(getPersistenceProvider());
+ spi.deleteBucket(bucket, context);
+ spi.createBucket(bucket, context);
+ document::TestDocMan testDocMan;
+ for (uint32_t i=0; i<docCount; ++i) {
+ uint64_t docloc;
+ uint32_t seed;
+ if (!simulateGidCollision) {
+ docloc = location | (i % 2 == 0 ? 0 : splitMask);
+ seed = i;
+ } else {
+ docloc = location;
+ seed = 0;
+ }
+ document::Document::SP doc(testDocMan.createRandomDocumentAtLocation(
+ docloc, seed, docSize, docSize));
+ spi.put(bucket, spi::Timestamp(1000 + i), doc, context);
+ }
+
+ std::unique_ptr<PersistenceThread> thread(createPersistenceThread(0));
+ getNode().getStateUpdater().setClusterState(
+ lib::ClusterState::CSP(
+ new lib::ClusterState("distributor:1 storage:1")));
+ api::SplitBucketCommand cmd(document::BucketId(currentSplitLevel, 1));
+ cmd.setMaxSplitBits(maxBits);
+ cmd.setMinSplitBits(minBits);
+ cmd.setMinByteSize(maxSize);
+ cmd.setMinDocCount(maxCount);
+ cmd.setSourceIndex(0);
+ MessageTracker::UP result(thread->handleSplitBucket(cmd));
+ api::ReturnCode code(result->getResult());
+ CPPUNIT_ASSERT_EQUAL(error, code);
+ if (!code.success()) return;
+ api::SplitBucketReply& reply(
+ dynamic_cast<api::SplitBucketReply&>(*result->getReply()));
+ std::set<std::string> expected;
+ for (uint32_t i=0; i<resultBuckets; ++i) {
+ document::BucketId b(resultSplitLevel,
+ location | (i == 0 ? 0 : splitMask));
+ std::ostringstream ost;
+ ost << b << " - " << b.getUsedBits();
+ expected.insert(ost.str());
+ }
+ std::set<std::string> actual;
+ for (uint32_t i=0; i<reply.getSplitInfo().size(); ++i) {
+ std::ostringstream ost;
+ document::BucketId b(reply.getSplitInfo()[i].first);
+ ost << b << " - " << b.getUsedBits();
+ actual.insert(ost.str());
+ }
+ CPPUNIT_ASSERT_EQUAL(expected, actual);
+}
+
+} // storage
+
diff --git a/storage/src/tests/persistence/processalltest.cpp b/storage/src/tests/persistence/processalltest.cpp
new file mode 100644
index 00000000000..db75725db6f
--- /dev/null
+++ b/storage/src/tests/persistence/processalltest.cpp
@@ -0,0 +1,262 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/document/base/testdocman.h>
+#include <vespa/storage/persistence/processallhandler.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <vespa/storage/persistence/messages.h>
+#include <vespa/documentapi/loadtypes/loadtype.h>
+#include <tests/persistence/persistencetestutils.h>
+
+namespace storage {
+
+class ProcessAllHandlerTest : public SingleDiskPersistenceTestUtils
+{
+ CPPUNIT_TEST_SUITE(ProcessAllHandlerTest);
+ CPPUNIT_TEST(testRemoveLocation);
+ CPPUNIT_TEST(testRemoveLocationDocumentSubset);
+ CPPUNIT_TEST(testRemoveLocationUnknownDocType);
+ CPPUNIT_TEST(testRemoveLocationBogusSelection);
+ CPPUNIT_TEST(testStat);
+ CPPUNIT_TEST(testStatWithRemove);
+ CPPUNIT_TEST(testStatWholeBucket);
+ CPPUNIT_TEST_SUITE_END();
+
+public:
+ void testRemoveLocation();
+ void testRemoveLocationDocumentSubset();
+ void testRemoveLocationUnknownDocType();
+ void testRemoveLocationEmptySelection();
+ void testRemoveLocationBogusSelection();
+ void testStat();
+ void testStatWithRemove();
+ void testStatWholeBucket();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(ProcessAllHandlerTest);
+
+void
+ProcessAllHandlerTest::testRemoveLocation()
+{
+ document::BucketId bucketId(16, 4);
+ doPut(4, spi::Timestamp(1234));
+ doPut(4, spi::Timestamp(2345));
+
+ api::RemoveLocationCommand removeLocation("id.user == 4", bucketId);
+ ProcessAllHandler handler(getEnv(), getPersistenceProvider());
+ spi::Context context(documentapi::LoadType::DEFAULT, 0, 0);
+ handler.handleRemoveLocation(removeLocation, context);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string(
+ "DocEntry(1234, 1, id:mail:testdoctype1:n=4:3619.html)\n"
+ "DocEntry(2345, 1, id:mail:testdoctype1:n=4:4008.html)\n"),
+ dumpBucket(bucketId));
+}
+
+void
+ProcessAllHandlerTest::testRemoveLocationDocumentSubset()
+{
+ document::BucketId bucketId(16, 4);
+ ProcessAllHandler handler(getEnv(), getPersistenceProvider());
+
+ document::TestDocMan docMan;
+ for (int i = 0; i < 10; ++i) {
+ document::Document::SP doc(docMan.createRandomDocumentAtLocation(4, 1234 + i));
+ doc->setValue(doc->getField("headerval"), document::IntFieldValue(i));
+ doPut(doc, bucketId, spi::Timestamp(100 + i), 0);
+ }
+
+ api::RemoveLocationCommand
+ removeLocation("testdoctype1.headerval % 2 == 0", bucketId);
+ spi::Context context(documentapi::LoadType::DEFAULT, 0, 0);
+ handler.handleRemoveLocation(removeLocation, context);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("DocEntry(100, 1, id:mail:testdoctype1:n=4:3619.html)\n"
+ "DocEntry(101, 0, Doc(id:mail:testdoctype1:n=4:33113.html))\n"
+ "DocEntry(102, 1, id:mail:testdoctype1:n=4:62608.html)\n"
+ "DocEntry(103, 0, Doc(id:mail:testdoctype1:n=4:26566.html))\n"
+ "DocEntry(104, 1, id:mail:testdoctype1:n=4:56061.html)\n"
+ "DocEntry(105, 0, Doc(id:mail:testdoctype1:n=4:20019.html))\n"
+ "DocEntry(106, 1, id:mail:testdoctype1:n=4:49514.html)\n"
+ "DocEntry(107, 0, Doc(id:mail:testdoctype1:n=4:13472.html))\n"
+ "DocEntry(108, 1, id:mail:testdoctype1:n=4:42967.html)\n"
+ "DocEntry(109, 0, Doc(id:mail:testdoctype1:n=4:6925.html))\n"),
+ dumpBucket(bucketId));
+}
+
+void
+ProcessAllHandlerTest::testRemoveLocationUnknownDocType()
+{
+ document::BucketId bucketId(16, 4);
+ doPut(4, spi::Timestamp(1234));
+
+ api::RemoveLocationCommand
+ removeLocation("unknowndoctype.headerval % 2 == 0", bucketId);
+
+ bool gotException = false;
+ try {
+ ProcessAllHandler handler(getEnv(), getPersistenceProvider());
+ spi::Context context(documentapi::LoadType::DEFAULT, 0, 0);
+ handler.handleRemoveLocation(removeLocation, context);
+ } catch (...) {
+ gotException = true;
+ }
+ CPPUNIT_ASSERT(gotException);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("DocEntry(1234, 0, Doc(id:mail:testdoctype1:n=4:3619.html))\n"),
+ dumpBucket(bucketId));
+}
+
+void
+ProcessAllHandlerTest::testRemoveLocationBogusSelection()
+{
+ document::BucketId bucketId(16, 4);
+ doPut(4, spi::Timestamp(1234));
+
+ api::RemoveLocationCommand removeLocation("id.bogus != badgers", bucketId);
+
+ bool gotException = false;
+ try {
+ ProcessAllHandler handler(getEnv(), getPersistenceProvider());
+ spi::Context context(documentapi::LoadType::DEFAULT, 0, 0);
+ handler.handleRemoveLocation(removeLocation, context);
+ } catch (...) {
+ gotException = true;
+ }
+ CPPUNIT_ASSERT(gotException);
+
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("DocEntry(1234, 0, Doc(id:mail:testdoctype1:n=4:3619.html))\n"),
+ dumpBucket(bucketId));
+}
+
+void
+ProcessAllHandlerTest::testStat()
+{
+ document::BucketId bucketId(16, 4);
+ ProcessAllHandler handler(getEnv(), getPersistenceProvider());
+
+ document::TestDocMan docMan;
+ for (int i = 0; i < 10; ++i) {
+ document::Document::SP doc(docMan.createRandomDocumentAtLocation(4, 1234 + i));
+ doc->setValue(doc->getField("headerval"), document::IntFieldValue(i));
+ doPut(doc, bucketId, spi::Timestamp(100 + i), 0);
+ }
+
+ api::StatBucketCommand statBucket(bucketId,
+ "testdoctype1.headerval % 2 == 0");
+ spi::Context context(documentapi::LoadType::DEFAULT, 0, 0);
+ MessageTracker::UP tracker = handler.handleStatBucket(statBucket, context);
+
+ CPPUNIT_ASSERT(tracker->getReply().get());
+ api::StatBucketReply& reply =
+ dynamic_cast<api::StatBucketReply&>(*tracker->getReply().get());
+ CPPUNIT_ASSERT_EQUAL(api::ReturnCode::OK, reply.getResult().getResult());
+
+ vespalib::string expected =
+ "Persistence bucket BucketId(0x4000000000000004), partition 0\n"
+ " Timestamp: 100, Doc(id:mail:testdoctype1:n=4:3619.html), gid(0x0400000092bb8d298934253a), size: 169\n"
+ " Timestamp: 102, Doc(id:mail:testdoctype1:n=4:62608.html), gid(0x04000000ce878d2488413bc4), size: 147\n"
+ " Timestamp: 104, Doc(id:mail:testdoctype1:n=4:56061.html), gid(0x040000002b8f80f0160f6c5c), size: 124\n"
+ " Timestamp: 106, Doc(id:mail:testdoctype1:n=4:49514.html), gid(0x04000000d45ca9abb47567f0), size: 101\n"
+ " Timestamp: 108, Doc(id:mail:testdoctype1:n=4:42967.html), gid(0x04000000f19ece1668e6de48), size: 206\n";
+
+
+ CPPUNIT_ASSERT_EQUAL(expected, reply.getResults());
+}
+
+void
+ProcessAllHandlerTest::testStatWithRemove()
+{
+ document::BucketId bucketId(16, 4);
+ ProcessAllHandler handler(getEnv(), getPersistenceProvider());
+
+ document::TestDocMan docMan;
+ for (int i = 0; i < 10; ++i) {
+ document::Document::SP doc(docMan.createRandomDocumentAtLocation(4, 1234 + i));
+ doc->setValue(doc->getField("headerval"), document::IntFieldValue(i));
+ doPut(doc, bucketId, spi::Timestamp(100 + i), 0);
+ doRemove(bucketId,
+ doc->getId(),
+ spi::Timestamp(200 + i),
+ true);
+ }
+
+ api::StatBucketCommand statBucket(bucketId, "true");
+ spi::Context context(documentapi::LoadType::DEFAULT, 0, 0);
+ MessageTracker::UP tracker = handler.handleStatBucket(statBucket, context);
+
+ CPPUNIT_ASSERT(tracker->getReply().get());
+ api::StatBucketReply& reply =
+ dynamic_cast<api::StatBucketReply&>(*tracker->getReply().get());
+ CPPUNIT_ASSERT_EQUAL(api::ReturnCode::OK, reply.getResult().getResult());
+
+ vespalib::string expected =
+ "Persistence bucket BucketId(0x4000000000000004), partition 0\n"
+ " Timestamp: 100, Doc(id:mail:testdoctype1:n=4:3619.html), gid(0x0400000092bb8d298934253a), size: 169\n"
+ " Timestamp: 101, Doc(id:mail:testdoctype1:n=4:33113.html), gid(0x04000000b121a632741db368), size: 95\n"
+ " Timestamp: 102, Doc(id:mail:testdoctype1:n=4:62608.html), gid(0x04000000ce878d2488413bc4), size: 147\n"
+ " Timestamp: 103, Doc(id:mail:testdoctype1:n=4:26566.html), gid(0x04000000177f8240bdd2bef0), size: 200\n"
+ " Timestamp: 104, Doc(id:mail:testdoctype1:n=4:56061.html), gid(0x040000002b8f80f0160f6c5c), size: 124\n"
+ " Timestamp: 105, Doc(id:mail:testdoctype1:n=4:20019.html), gid(0x040000001550c67f28ea7b03), size: 177\n"
+ " Timestamp: 106, Doc(id:mail:testdoctype1:n=4:49514.html), gid(0x04000000d45ca9abb47567f0), size: 101\n"
+ " Timestamp: 107, Doc(id:mail:testdoctype1:n=4:13472.html), gid(0x040000005d01f3fd960f8098), size: 154\n"
+ " Timestamp: 108, Doc(id:mail:testdoctype1:n=4:42967.html), gid(0x04000000f19ece1668e6de48), size: 206\n"
+ " Timestamp: 109, Doc(id:mail:testdoctype1:n=4:6925.html), gid(0x04000000667c0b3cada830be), size: 130\n"
+ " Timestamp: 200, id:mail:testdoctype1:n=4:3619.html, gid(0x0400000092bb8d298934253a) (remove)\n"
+ " Timestamp: 201, id:mail:testdoctype1:n=4:33113.html, gid(0x04000000b121a632741db368) (remove)\n"
+ " Timestamp: 202, id:mail:testdoctype1:n=4:62608.html, gid(0x04000000ce878d2488413bc4) (remove)\n"
+ " Timestamp: 203, id:mail:testdoctype1:n=4:26566.html, gid(0x04000000177f8240bdd2bef0) (remove)\n"
+ " Timestamp: 204, id:mail:testdoctype1:n=4:56061.html, gid(0x040000002b8f80f0160f6c5c) (remove)\n"
+ " Timestamp: 205, id:mail:testdoctype1:n=4:20019.html, gid(0x040000001550c67f28ea7b03) (remove)\n"
+ " Timestamp: 206, id:mail:testdoctype1:n=4:49514.html, gid(0x04000000d45ca9abb47567f0) (remove)\n"
+ " Timestamp: 207, id:mail:testdoctype1:n=4:13472.html, gid(0x040000005d01f3fd960f8098) (remove)\n"
+ " Timestamp: 208, id:mail:testdoctype1:n=4:42967.html, gid(0x04000000f19ece1668e6de48) (remove)\n"
+ " Timestamp: 209, id:mail:testdoctype1:n=4:6925.html, gid(0x04000000667c0b3cada830be) (remove)\n";
+
+ CPPUNIT_ASSERT_EQUAL(expected, reply.getResults());
+}
+
+
+void
+ProcessAllHandlerTest::testStatWholeBucket()
+{
+ document::BucketId bucketId(16, 4);
+ ProcessAllHandler handler(getEnv(), getPersistenceProvider());
+
+ document::TestDocMan docMan;
+ for (int i = 0; i < 10; ++i) {
+ document::Document::SP doc(docMan.createRandomDocumentAtLocation(4, 1234 + i));
+ doc->setValue(doc->getField("headerval"), document::IntFieldValue(i));
+ doPut(doc, bucketId, spi::Timestamp(100 + i), 0);
+ }
+
+ api::StatBucketCommand statBucket(bucketId, "true");
+ spi::Context context(documentapi::LoadType::DEFAULT, 0, 0);
+ MessageTracker::UP tracker = handler.handleStatBucket(statBucket, context);
+
+ CPPUNIT_ASSERT(tracker->getReply().get());
+ api::StatBucketReply& reply =
+ dynamic_cast<api::StatBucketReply&>(*tracker->getReply().get());
+ CPPUNIT_ASSERT_EQUAL(api::ReturnCode::OK, reply.getResult().getResult());
+
+ vespalib::string expected =
+ "Persistence bucket BucketId(0x4000000000000004), partition 0\n"
+ " Timestamp: 100, Doc(id:mail:testdoctype1:n=4:3619.html), gid(0x0400000092bb8d298934253a), size: 169\n"
+ " Timestamp: 101, Doc(id:mail:testdoctype1:n=4:33113.html), gid(0x04000000b121a632741db368), size: 95\n"
+ " Timestamp: 102, Doc(id:mail:testdoctype1:n=4:62608.html), gid(0x04000000ce878d2488413bc4), size: 147\n"
+ " Timestamp: 103, Doc(id:mail:testdoctype1:n=4:26566.html), gid(0x04000000177f8240bdd2bef0), size: 200\n"
+ " Timestamp: 104, Doc(id:mail:testdoctype1:n=4:56061.html), gid(0x040000002b8f80f0160f6c5c), size: 124\n"
+ " Timestamp: 105, Doc(id:mail:testdoctype1:n=4:20019.html), gid(0x040000001550c67f28ea7b03), size: 177\n"
+ " Timestamp: 106, Doc(id:mail:testdoctype1:n=4:49514.html), gid(0x04000000d45ca9abb47567f0), size: 101\n"
+ " Timestamp: 107, Doc(id:mail:testdoctype1:n=4:13472.html), gid(0x040000005d01f3fd960f8098), size: 154\n"
+ " Timestamp: 108, Doc(id:mail:testdoctype1:n=4:42967.html), gid(0x04000000f19ece1668e6de48), size: 206\n"
+ " Timestamp: 109, Doc(id:mail:testdoctype1:n=4:6925.html), gid(0x04000000667c0b3cada830be), size: 130\n";
+
+ CPPUNIT_ASSERT_EQUAL(expected, reply.getResults());
+}
+
+}
diff --git a/storage/src/tests/persistence/providershutdownwrappertest.cpp b/storage/src/tests/persistence/providershutdownwrappertest.cpp
new file mode 100644
index 00000000000..0731dcb155a
--- /dev/null
+++ b/storage/src/tests/persistence/providershutdownwrappertest.cpp
@@ -0,0 +1,87 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <tests/persistence/persistencetestutils.h>
+#include <tests/persistence/persistenceproviderwrapper.h>
+#include <vespa/storage/persistence/providershutdownwrapper.h>
+
+namespace storage {
+
+class ProviderShutdownWrapperTest : public SingleDiskPersistenceTestUtils
+{
+public:
+ CPPUNIT_TEST_SUITE(ProviderShutdownWrapperTest);
+ CPPUNIT_TEST(testShutdownOnFatalError);
+ CPPUNIT_TEST_SUITE_END();
+
+ void testShutdownOnFatalError();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(ProviderShutdownWrapperTest);
+
+namespace {
+
+class TestShutdownListener
+ : public framework::defaultimplementation::ShutdownListener
+{
+public:
+ TestShutdownListener() : _reason() {}
+
+ void requestShutdown(vespalib::stringref reason) {
+ _reason = reason;
+ }
+
+ bool shutdownRequested() const { return !_reason.empty(); }
+ const vespalib::string& getReason() const { return _reason; }
+private:
+ vespalib::string _reason;
+};
+
+}
+
+void
+ProviderShutdownWrapperTest::testShutdownOnFatalError()
+{
+ // We wrap the wrapper. It's turtles all the way down!
+ PersistenceProviderWrapper providerWrapper(
+ getPersistenceProvider());
+ TestServiceLayerApp app;
+ ServiceLayerComponent component(app.getComponentRegister(), "dummy");
+
+ ProviderShutdownWrapper shutdownWrapper(providerWrapper, component);
+
+ TestShutdownListener shutdownListener;
+
+ app.getComponentRegister().registerShutdownListener(shutdownListener);
+
+ providerWrapper.setResult(
+ spi::Result(spi::Result::FATAL_ERROR, "eject! eject!"));
+ providerWrapper.setFailureMask(
+ PersistenceProviderWrapper::FAIL_ALL_OPERATIONS);
+
+ CPPUNIT_ASSERT(!shutdownListener.shutdownRequested());
+ // This should cause the node to implicitly be shut down
+ shutdownWrapper.getBucketInfo(
+ spi::Bucket(document::BucketId(16, 1234),
+ spi::PartitionId(0)));
+
+ CPPUNIT_ASSERT(shutdownListener.shutdownRequested());
+ CPPUNIT_ASSERT_EQUAL(vespalib::string("eject! eject!"),
+ shutdownListener.getReason());
+
+ // Triggering a new error should not cause shutdown to be requested twice.
+ providerWrapper.setResult(
+ spi::Result(spi::Result::FATAL_ERROR, "boom!"));
+
+ shutdownWrapper.getBucketInfo(
+ spi::Bucket(document::BucketId(16, 1234),
+ spi::PartitionId(0)));
+
+ CPPUNIT_ASSERT_EQUAL(vespalib::string("eject! eject!"),
+ shutdownListener.getReason());
+}
+
+} // ns storage
+
+
diff --git a/storage/src/tests/persistence/splitbitdetectortest.cpp b/storage/src/tests/persistence/splitbitdetectortest.cpp
new file mode 100644
index 00000000000..5cc9c5da721
--- /dev/null
+++ b/storage/src/tests/persistence/splitbitdetectortest.cpp
@@ -0,0 +1,363 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <fstream>
+#include <vespa/storage/persistence/splitbitdetector.h>
+#include <vespa/vespalib/io/fileutil.h>
+#include <vespa/persistence/dummyimpl/dummypersistence.h>
+#include <vespa/document/base/testdocman.h>
+
+
+namespace storage {
+
+namespace {
+ spi::LoadType defaultLoadType(0, "default");
+}
+
+struct SplitBitDetectorTest : public CppUnit::TestFixture {
+ void testSingleUser();
+ void testTwoUsers();
+ void testMaxBits();
+ void testMaxBitsOneBelowMax();
+ void testUnsplittable();
+ void testUnsplittableMinCount();
+ void testEmpty();
+ void testZeroDocLimitFallbacksToOneBitIncreaseWith1Doc();
+ void testZeroDocLimitFallbacksToOneBitIncreaseOnGidCollision();
+ void findBucketCollisionIds();
+
+ spi::DocEntry::LP
+ generateDocEntry(uint32_t userId,
+ uint32_t docNum,
+ spi::Timestamp timestamp)
+ {
+ std::ostringstream ost;
+ ost << "id:storage_test:testdoctype1:n=" << userId << ":" << docNum;
+ return spi::DocEntry::LP(new spi::DocEntry(
+ timestamp, 0, document::DocumentId(ost.str())));
+ };
+
+ CPPUNIT_TEST_SUITE(SplitBitDetectorTest);
+ CPPUNIT_TEST(testSingleUser);
+ CPPUNIT_TEST(testTwoUsers);
+ CPPUNIT_TEST(testMaxBits);
+ CPPUNIT_TEST(testMaxBitsOneBelowMax);
+ CPPUNIT_TEST(testUnsplittable);
+ CPPUNIT_TEST(testUnsplittableMinCount);
+ CPPUNIT_TEST(testEmpty);
+ CPPUNIT_TEST(testZeroDocLimitFallbacksToOneBitIncreaseWith1Doc);
+ CPPUNIT_TEST(testZeroDocLimitFallbacksToOneBitIncreaseOnGidCollision);
+ CPPUNIT_TEST_DISABLED(findBucketCollisionIds);
+ CPPUNIT_TEST_SUITE_END();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(SplitBitDetectorTest);
+
+void
+SplitBitDetectorTest::testTwoUsers()
+{
+ document::TestDocMan testDocMan;
+ spi::dummy::DummyPersistence provider(testDocMan.getTypeRepoSP(), 1);
+ provider.getPartitionStates();
+ spi::Bucket bucket(document::BucketId(1, 1),
+ spi::PartitionId(0));
+ spi::Context context(defaultLoadType, spi::Priority(0),
+ spi::Trace::TraceLevel(0));
+
+ provider.createBucket(bucket, context);
+
+ std::vector<spi::DocEntry::LP> entries;
+ for (uint32_t i = 0; i < 5; ++i) {
+ document::Document::SP doc(
+ testDocMan.createRandomDocumentAtLocation(1, i, 1, 1));
+ provider.put(bucket, spi::Timestamp(1000 + i), doc, context);
+ }
+
+ for (uint32_t i = 5; i < 10; ++i) {
+ document::Document::SP doc(
+ testDocMan.createRandomDocumentAtLocation(3, i, 1, 1));
+ provider.put(bucket, spi::Timestamp(1000 + i), doc, context);
+ }
+
+ SplitBitDetector::Result result(
+ SplitBitDetector::detectSplit(provider, bucket, 58, context));
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("SplitTargets(2: BucketId(0x0800000000000001), "
+ "BucketId(0x0800000000000003))"),
+ result.toString());
+}
+
+void
+SplitBitDetectorTest::testSingleUser()
+{
+ document::TestDocMan testDocMan;
+ spi::dummy::DummyPersistence provider(testDocMan.getTypeRepoSP(), 1);
+ provider.getPartitionStates();
+ spi::Bucket bucket(document::BucketId(1, 1),
+ spi::PartitionId(0));
+ spi::Context context(defaultLoadType, spi::Priority(0),
+ spi::Trace::TraceLevel(0));
+
+ provider.createBucket(bucket, context);
+
+ std::vector<spi::DocEntry::LP> entries;
+ for (uint32_t i = 0; i < 10; ++i) {
+ document::Document::SP doc(
+ testDocMan.createRandomDocumentAtLocation(1, i, 1, 1));
+ provider.put(bucket, spi::Timestamp(1000 + i), doc, context);
+ }
+
+ SplitBitDetector::Result result(
+ SplitBitDetector::detectSplit(provider, bucket, 58, context));
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("SplitTargets(33: BucketId(0x8400000000000001), "
+ "BucketId(0x8400000100000001))"),
+ result.toString());
+}
+
+void
+SplitBitDetectorTest::testMaxBits()
+{
+ document::TestDocMan testDocMan;
+ spi::dummy::DummyPersistence provider(testDocMan.getTypeRepoSP(), 1);
+ provider.getPartitionStates();
+ spi::Bucket bucket(document::BucketId(1, 1),
+ spi::PartitionId(0));
+ spi::Context context(defaultLoadType, spi::Priority(0),
+ spi::Trace::TraceLevel(0));
+ int minContentSize = 1, maxContentSize = 1;
+
+ provider.createBucket(bucket, context);
+
+ std::vector<spi::DocEntry::LP> entries;
+ for (uint32_t seed = 0; seed < 10; ++seed) {
+ int location = 1;
+ document::Document::SP doc(testDocMan.createRandomDocumentAtLocation(
+ location, seed, minContentSize, maxContentSize));
+ provider.put(bucket, spi::Timestamp(1000 + seed), doc, context);
+ }
+
+ SplitBitDetector::Result result(
+ SplitBitDetector::detectSplit(provider, bucket, 3, context));
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("SplitTargets(3: BucketId(0x0c00000000000001), "
+ "[ BucketId(0x0c00000000000005) ])"),
+ result.toString());
+}
+
+void
+SplitBitDetectorTest::testMaxBitsOneBelowMax()
+{
+ document::TestDocMan testDocMan;
+ spi::dummy::DummyPersistence provider(testDocMan.getTypeRepoSP(), 1);
+ provider.getPartitionStates();
+ spi::Bucket bucket(document::BucketId(15, 1), spi::PartitionId(0));
+ spi::Context context(defaultLoadType, spi::Priority(0),
+ spi::Trace::TraceLevel(0));
+ int minContentSize = 1, maxContentSize = 1;
+
+ provider.createBucket(bucket, context);
+
+ std::vector<spi::DocEntry::LP> entries;
+ for (uint32_t seed = 0; seed < 10; ++seed) {
+ int location = 1 | (seed % 2 == 0 ? 0x8000 : 0);
+ document::Document::SP doc(testDocMan.createRandomDocumentAtLocation(
+ location, seed, minContentSize, maxContentSize));
+ provider.put(bucket, spi::Timestamp(1000 + seed), doc, context);
+ }
+
+ //std::cerr << provider.dumpBucket(bucket) << "\n";
+
+ SplitBitDetector::Result result(
+ SplitBitDetector::detectSplit(provider, bucket, 15, context));
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("SplitTargets(error: No use in trying to split "
+ "Bucket(0x3c00000000000001, partition 0) when max split"
+ " bit is set to 15.)"),
+ result.toString());
+
+ result = SplitBitDetector::detectSplit(provider, bucket, 16, context);
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("SplitTargets(16: BucketId(0x4000000000000001), "
+ "BucketId(0x4000000000008001))"),
+ result.toString());
+}
+
+void
+SplitBitDetectorTest::testUnsplittable()
+{
+ document::TestDocMan testDocMan;
+ spi::dummy::DummyPersistence provider(testDocMan.getTypeRepoSP(), 1);
+ provider.getPartitionStates();
+ spi::Bucket bucket(document::BucketId(1, 1),
+ spi::PartitionId(0));
+ spi::Context context(defaultLoadType, spi::Priority(0),
+ spi::Trace::TraceLevel(0));
+
+ provider.createBucket(bucket, context);
+
+ std::vector<spi::DocEntry::LP> entries;
+
+ for (uint32_t i = 0; i < 10; ++i) {
+ document::Document::SP doc(
+ testDocMan.createRandomDocumentAtLocation(1, 1, 1, 1));
+ provider.put(bucket, spi::Timestamp(1000 + i), doc, context);
+ }
+
+ SplitBitDetector::Result result(
+ SplitBitDetector::detectSplit(provider, bucket, 58, context, 100));
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("SplitTargets(58: BucketId(0xe94c074f00000001), "
+ "BucketId(0xeb4c074f00000001))"),
+ result.toString());
+}
+
+void
+SplitBitDetectorTest::testUnsplittableMinCount()
+{
+ document::TestDocMan testDocMan;
+ spi::dummy::DummyPersistence provider(testDocMan.getTypeRepoSP(), 1);
+ provider.getPartitionStates();
+ spi::Bucket bucket(document::BucketId(1, 1),
+ spi::PartitionId(0));
+ spi::Context context(defaultLoadType, spi::Priority(0),
+ spi::Trace::TraceLevel(0));
+
+ provider.createBucket(bucket, context);
+
+ std::vector<spi::DocEntry::LP> entries;
+
+ for (uint32_t i = 0; i < 10; ++i) {
+ document::Document::SP doc(
+ testDocMan.createRandomDocumentAtLocation(1, 1, 1, 1));
+ provider.put(bucket, spi::Timestamp(1000 + i), doc, context);
+ }
+
+ SplitBitDetector::Result result(
+ SplitBitDetector::detectSplit(provider, bucket, 58, context, 5, 0));
+ // Still no other choice than split out to 58 bits regardless of minCount.
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("SplitTargets(58: BucketId(0xe94c074f00000001), "
+ "BucketId(0xeb4c074f00000001))"),
+ result.toString());
+}
+
+
+void
+SplitBitDetectorTest::testEmpty()
+{
+ document::TestDocMan testDocMan;
+ spi::dummy::DummyPersistence provider(testDocMan.getTypeRepoSP(), 1);
+ provider.getPartitionStates();
+ spi::Bucket bucket(document::BucketId(1, 1),
+ spi::PartitionId(0));
+ spi::Context context(defaultLoadType, spi::Priority(0),
+ spi::Trace::TraceLevel(0));
+
+ provider.createBucket(bucket, context);
+
+ SplitBitDetector::Result result(
+ SplitBitDetector::detectSplit(provider, bucket, 58, context));
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("SplitTargets(source empty)"),
+ result.toString());
+}
+
+void
+SplitBitDetectorTest::testZeroDocLimitFallbacksToOneBitIncreaseWith1Doc()
+{
+ document::TestDocMan testDocMan;
+ spi::dummy::DummyPersistence provider(testDocMan.getTypeRepoSP(), 1);
+ provider.getPartitionStates();
+ spi::Bucket bucket(document::BucketId(1, 1),
+ spi::PartitionId(0));
+ spi::Context context(defaultLoadType, spi::Priority(0),
+ spi::Trace::TraceLevel(0));
+
+ provider.createBucket(bucket, context);
+ document::Document::SP doc(
+ testDocMan.createRandomDocumentAtLocation(1, 0, 1, 1));
+ provider.put(bucket, spi::Timestamp(1000), doc, context);
+
+ SplitBitDetector::Result result(
+ SplitBitDetector::detectSplit(provider, bucket, 58, context, 0, 0));
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("SplitTargets(2: BucketId(0x0800000000000001), "
+ "BucketId(0x0800000000000003))"),
+ result.toString());
+}
+
+void
+SplitBitDetectorTest::testZeroDocLimitFallbacksToOneBitIncreaseOnGidCollision()
+{
+ document::TestDocMan testDocMan;
+ spi::dummy::DummyPersistence provider(testDocMan.getTypeRepoSP(), 1);
+ provider.getPartitionStates();
+ spi::Bucket bucket(document::BucketId(1, 1),
+ spi::PartitionId(0));
+ spi::Context context(defaultLoadType, spi::Priority(0),
+ spi::Trace::TraceLevel(0));
+
+ provider.createBucket(bucket, context);
+ document::Document::SP doc(
+ testDocMan.createRandomDocumentAtLocation(1, 0, 1, 1));
+ provider.put(bucket, spi::Timestamp(1000), doc, context);
+ provider.put(bucket, spi::Timestamp(2000), doc, context);
+
+ SplitBitDetector::Result result(
+ SplitBitDetector::detectSplit(provider, bucket, 58, context, 0, 0));
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("SplitTargets(2: BucketId(0x0800000000000001), "
+ "BucketId(0x0800000000000003))"),
+ result.toString());
+}
+
+/**
+ * Not a regular unit test in itself, but more of an utility to find non-unique
+ * document IDs that map to the same 58-bit bucket ID. Disabled by default since
+ * it costs CPU to do this and is not necessary during normal testing.
+ */
+void
+SplitBitDetectorTest::findBucketCollisionIds()
+{
+ using document::DocumentId;
+ using document::BucketId;
+
+ document::BucketIdFactory factory;
+
+ DocumentId targetId("id:foo:music:n=123456:ABCDEFGHIJKLMN");
+ BucketId targetBucket(factory.getBucketId(targetId));
+ char candidateSuffix[] = "ABCDEFGHIJKLMN";
+
+ size_t iterations = 0;
+ constexpr size_t maxIterations = 100000000;
+ while (std::next_permutation(std::begin(candidateSuffix),
+ std::end(candidateSuffix) - 1))
+ {
+ ++iterations;
+
+ DocumentId candidateId(
+ vespalib::make_string("id:foo:music:n=123456:%s",
+ candidateSuffix));
+ BucketId candidateBucket(factory.getBucketId(candidateId));
+ if (targetBucket == candidateBucket) {
+ std::cerr << "\nFound a collision after " << iterations
+ << " iterations!\n"
+ << "target: " << targetId << " -> " << targetBucket
+ << "\ncollision: " << candidateId << " -> "
+ << candidateBucket << "\n";
+ return;
+ }
+
+ if (iterations == maxIterations) {
+ std::cerr << "\nNo collision found after " << iterations
+ << " iterations :[\n";
+ return;
+ }
+ }
+ std::cerr << "\nRan out of permutations after " << iterations
+ << " iterations!\n";
+}
+
+}
diff --git a/storage/src/tests/persistence/testandsettest.cpp b/storage/src/tests/persistence/testandsettest.cpp
new file mode 100644
index 00000000000..984e06dc6e3
--- /dev/null
+++ b/storage/src/tests/persistence/testandsettest.cpp
@@ -0,0 +1,331 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// @author Vegard Sjonfjell
+#include <vespa/fastos/fastos.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <vespa/storage/persistence/persistencethread.h>
+#include <tests/persistence/persistencetestutils.h>
+#include <vespa/documentapi/messagebus/messages/testandsetcondition.h>
+#include <vespa/document/fieldvalue/fieldvalues.h>
+#include <functional>
+
+using std::unique_ptr;
+using std::shared_ptr;
+
+using namespace std::string_literals;
+
+namespace storage {
+
+class TestAndSetTest : public SingleDiskPersistenceTestUtils
+{
+ static constexpr int MIN_DOCUMENT_SIZE = 0;
+ static constexpr int MAX_DOCUMENT_SIZE = 128;
+ static constexpr int RANDOM_SEED = 1234;
+
+ const document::BucketId BUCKET_ID{16, 4};
+ const document::StringFieldValue MISMATCHING_HEADER{"Definitely nothing about loud canines"};
+ const document::StringFieldValue MATCHING_HEADER{"Some string with woofy dog as a substring"};
+ const document::StringFieldValue OLD_CONTENT{"Some old content"};
+ const document::StringFieldValue NEW_CONTENT{"Freshly pressed and squeezed content"};
+
+ unique_ptr<PersistenceThread> thread;
+ shared_ptr<document::Document> testDoc;
+ document::DocumentId testDocId;
+
+public:
+ void setUp() override {
+ SingleDiskPersistenceTestUtils::setUp();
+
+ spi::Context context(
+ spi::LoadType(0, "default"),
+ spi::Priority(0),
+ spi::Trace::TraceLevel(0));
+
+ createBucket(BUCKET_ID);
+ getPersistenceProvider().createBucket(
+ spi::Bucket(BUCKET_ID, spi::PartitionId(0)),
+ context);
+
+ thread = createPersistenceThread(0);
+ testDoc = createTestDocument();
+ testDocId = testDoc->getId();
+ }
+
+ void tearDown() override {
+ thread.reset(nullptr);
+ SingleDiskPersistenceTestUtils::tearDown();
+ }
+
+ void conditional_put_not_executed_on_condition_mismatch();
+ void conditional_put_executed_on_condition_match();
+ void conditional_remove_not_executed_on_condition_mismatch();
+ void conditional_remove_executed_on_condition_match();
+ void conditional_update_not_executed_on_condition_mismatch();
+ void conditional_update_executed_on_condition_match();
+ void invalid_document_selection_should_fail();
+ void non_existing_document_should_fail();
+ void document_with_no_type_should_fail();
+
+ CPPUNIT_TEST_SUITE(TestAndSetTest);
+ CPPUNIT_TEST(conditional_put_not_executed_on_condition_mismatch);
+ CPPUNIT_TEST(conditional_put_executed_on_condition_match);
+ CPPUNIT_TEST(conditional_remove_not_executed_on_condition_mismatch);
+ CPPUNIT_TEST(conditional_remove_executed_on_condition_match);
+ CPPUNIT_TEST(conditional_update_not_executed_on_condition_mismatch);
+ CPPUNIT_TEST(conditional_update_executed_on_condition_match);
+ CPPUNIT_TEST(invalid_document_selection_should_fail);
+ CPPUNIT_TEST(non_existing_document_should_fail);
+ CPPUNIT_TEST(document_with_no_type_should_fail);
+ CPPUNIT_TEST_SUITE_END();
+
+protected:
+ std::unique_ptr<api::UpdateCommand> conditional_update_test(
+ bool matchingHeader,
+ api::Timestamp timestampOne,
+ api::Timestamp timestampTwo);
+
+ document::Document::SP createTestDocument();
+ document::Document::SP retrieveTestDocument();
+ void setTestCondition(api::TestAndSetCommand & command);
+ void putTestDocument(bool matchingHeader, api::Timestamp timestamp);
+ void assertTestDocumentFoundAndMatchesContent(const document::FieldValue & value);
+
+ static std::string expectedDocEntryString(
+ api::Timestamp timestamp,
+ const document::DocumentId & testDocId,
+ spi::DocumentMetaFlags removeFlag = spi::NONE);
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(TestAndSetTest);
+
+void TestAndSetTest::conditional_put_not_executed_on_condition_mismatch()
+{
+ // Put document with mismatching header
+ api::Timestamp timestampOne = 0;
+ putTestDocument(false, timestampOne);
+
+ CPPUNIT_ASSERT_EQUAL(expectedDocEntryString(timestampOne, testDocId), dumpBucket(BUCKET_ID));
+
+ // Conditionally replace document, but fail due to lack of woofy dog
+ api::Timestamp timestampTwo = 1;
+ api::PutCommand putTwo(BUCKET_ID, testDoc, timestampTwo);
+ setTestCondition(putTwo);
+
+ CPPUNIT_ASSERT(thread->handlePut(putTwo)->getResult() == api::ReturnCode::Result::TEST_AND_SET_CONDITION_FAILED);
+ CPPUNIT_ASSERT_EQUAL(expectedDocEntryString(timestampOne, testDocId), dumpBucket(BUCKET_ID));
+}
+
+void TestAndSetTest::conditional_put_executed_on_condition_match()
+{
+ // Put document with matching header
+ api::Timestamp timestampOne = 0;
+ putTestDocument(true, timestampOne);
+
+ CPPUNIT_ASSERT_EQUAL(expectedDocEntryString(timestampOne, testDocId), dumpBucket(BUCKET_ID));
+
+ // Update content of document
+ testDoc->setValue(testDoc->getField("content"), NEW_CONTENT);
+
+ // Conditionally replace document with updated version, succeed in doing so
+ api::Timestamp timestampTwo = 1;
+ api::PutCommand putTwo(BUCKET_ID, testDoc, timestampTwo);
+ setTestCondition(putTwo);
+
+ CPPUNIT_ASSERT(thread->handlePut(putTwo)->getResult() == api::ReturnCode::Result::OK);
+ CPPUNIT_ASSERT_EQUAL(expectedDocEntryString(timestampOne, testDocId) +
+ expectedDocEntryString(timestampTwo, testDocId),
+ dumpBucket(BUCKET_ID));
+
+ assertTestDocumentFoundAndMatchesContent(NEW_CONTENT);
+}
+
+void TestAndSetTest::conditional_remove_not_executed_on_condition_mismatch()
+{
+ // Put document with mismatching header
+ api::Timestamp timestampOne = 0;
+ putTestDocument(false, timestampOne);
+
+ CPPUNIT_ASSERT_EQUAL(expectedDocEntryString(timestampOne, testDocId), dumpBucket(BUCKET_ID));
+
+ // Conditionally remove document, fail in doing so
+ api::Timestamp timestampTwo = 1;
+ api::RemoveCommand remove(BUCKET_ID, testDocId, timestampTwo);
+ setTestCondition(remove);
+
+ CPPUNIT_ASSERT(thread->handleRemove(remove)->getResult() == api::ReturnCode::Result::TEST_AND_SET_CONDITION_FAILED);
+ CPPUNIT_ASSERT_EQUAL(expectedDocEntryString(timestampOne, testDocId), dumpBucket(BUCKET_ID));
+
+ // Assert that the document is still there
+ retrieveTestDocument();
+}
+
+void TestAndSetTest::conditional_remove_executed_on_condition_match()
+{
+ // Put document with matching header
+ api::Timestamp timestampOne = 0;
+ putTestDocument(true, timestampOne);
+
+ CPPUNIT_ASSERT_EQUAL(expectedDocEntryString(timestampOne, testDocId), dumpBucket(BUCKET_ID));
+
+ // Conditionally remove document, succeed in doing so
+ api::Timestamp timestampTwo = 1;
+ api::RemoveCommand remove(BUCKET_ID, testDocId, timestampTwo);
+ setTestCondition(remove);
+
+ CPPUNIT_ASSERT(thread->handleRemove(remove)->getResult() == api::ReturnCode::Result::OK);
+ CPPUNIT_ASSERT_EQUAL(expectedDocEntryString(timestampOne, testDocId) +
+ expectedDocEntryString(timestampTwo, testDocId, spi::REMOVE_ENTRY),
+ dumpBucket(BUCKET_ID));
+}
+
+std::unique_ptr<api::UpdateCommand> TestAndSetTest::conditional_update_test(
+ bool matchingHeader,
+ api::Timestamp timestampOne,
+ api::Timestamp timestampTwo)
+{
+ putTestDocument(matchingHeader, timestampOne);
+
+ auto docUpdate = std::make_shared<document::DocumentUpdate>(testDoc->getType(), testDocId);
+ auto fieldUpdate = document::FieldUpdate(testDoc->getField("content"));
+ fieldUpdate.addUpdate(document::AssignValueUpdate(NEW_CONTENT));
+ docUpdate->addUpdate(fieldUpdate);
+
+ auto updateUp = std::make_unique<api::UpdateCommand>(BUCKET_ID, docUpdate, timestampTwo);
+ setTestCondition(*updateUp);
+ return updateUp;
+}
+
+void TestAndSetTest::conditional_update_not_executed_on_condition_mismatch()
+{
+ api::Timestamp timestampOne = 0;
+ api::Timestamp timestampTwo = 1;
+ auto updateUp = conditional_update_test(false, timestampOne, timestampTwo);
+
+ CPPUNIT_ASSERT(thread->handleUpdate(*updateUp)->getResult() == api::ReturnCode::Result::TEST_AND_SET_CONDITION_FAILED);
+ CPPUNIT_ASSERT_EQUAL(expectedDocEntryString(timestampOne, testDocId),
+ dumpBucket(BUCKET_ID));
+
+ assertTestDocumentFoundAndMatchesContent(OLD_CONTENT);
+}
+
+void TestAndSetTest::conditional_update_executed_on_condition_match()
+{
+ api::Timestamp timestampOne = 0;
+ api::Timestamp timestampTwo = 1;
+ auto updateUp = conditional_update_test(true, timestampOne, timestampTwo);
+
+ CPPUNIT_ASSERT(thread->handleUpdate(*updateUp)->getResult() == api::ReturnCode::Result::OK);
+ CPPUNIT_ASSERT_EQUAL(expectedDocEntryString(timestampOne, testDocId) +
+ expectedDocEntryString(timestampTwo, testDocId),
+ dumpBucket(BUCKET_ID));
+
+ assertTestDocumentFoundAndMatchesContent(NEW_CONTENT);
+}
+
+void TestAndSetTest::invalid_document_selection_should_fail()
+{
+ // Conditionally replace nonexisting document
+ // Fail early since document selection is invalid
+ api::Timestamp timestamp = 0;
+ api::PutCommand put(BUCKET_ID, testDoc, timestamp);
+ put.setCondition(documentapi::TestAndSetCondition("bjarne"));
+
+ CPPUNIT_ASSERT(thread->handlePut(put)->getResult() == api::ReturnCode::Result::ILLEGAL_PARAMETERS);
+ CPPUNIT_ASSERT_EQUAL(""s, dumpBucket(BUCKET_ID));
+}
+
+void TestAndSetTest::non_existing_document_should_fail()
+{
+ // Conditionally replace nonexisting document
+ // Fail since no document exists to match with test and set
+ api::Timestamp timestamp = 0;
+ api::PutCommand put(BUCKET_ID, testDoc, timestamp);
+ setTestCondition(put);
+ thread->handlePut(put);
+
+ CPPUNIT_ASSERT(thread->handlePut(put)->getResult() == api::ReturnCode::Result::TEST_AND_SET_CONDITION_FAILED);
+ CPPUNIT_ASSERT_EQUAL(""s, dumpBucket(BUCKET_ID));
+}
+
+void TestAndSetTest::document_with_no_type_should_fail()
+{
+ // Conditionally replace nonexisting document
+ // Fail since no document exists to match with test and set
+ api::Timestamp timestamp = 0;
+ document::DocumentId legacyDocId("doc:mail:3619.html");
+ api::RemoveCommand remove(BUCKET_ID, legacyDocId, timestamp);
+ setTestCondition(remove);
+
+ auto code = thread->handleRemove(remove)->getResult();
+ CPPUNIT_ASSERT(code == api::ReturnCode::Result::ILLEGAL_PARAMETERS);
+ CPPUNIT_ASSERT(code.getMessage() == "Document id has no doctype");
+ CPPUNIT_ASSERT_EQUAL(""s, dumpBucket(BUCKET_ID));
+}
+
+document::Document::SP
+TestAndSetTest::createTestDocument()
+{
+ auto doc = document::Document::SP(
+ createRandomDocumentAtLocation(
+ BUCKET_ID.getId(),
+ RANDOM_SEED,
+ MIN_DOCUMENT_SIZE,
+ MAX_DOCUMENT_SIZE));
+
+ doc->setValue(doc->getField("content"), OLD_CONTENT);
+ doc->setValue(doc->getField("hstringval"), MISMATCHING_HEADER);
+
+ return doc;
+}
+
+document::Document::SP TestAndSetTest::retrieveTestDocument()
+{
+ api::GetCommand get(BUCKET_ID, testDocId, "[all]");
+ auto tracker = thread->handleGet(get);
+ CPPUNIT_ASSERT(tracker->getResult() == api::ReturnCode::Result::OK);
+
+ auto & reply = static_cast<api::GetReply &>(*tracker->getReply());
+ CPPUNIT_ASSERT(reply.wasFound());
+
+ return reply.getDocument();
+}
+
+void TestAndSetTest::setTestCondition(api::TestAndSetCommand & command)
+{
+ command.setCondition(documentapi::TestAndSetCondition("testdoctype1.hstringval=\"*woofy dog*\""));
+}
+
+void TestAndSetTest::putTestDocument(bool matchingHeader, api::Timestamp timestamp) {
+ if (matchingHeader) {
+ testDoc->setValue(testDoc->getField("hstringval"), MATCHING_HEADER);
+ }
+
+ api::PutCommand put(BUCKET_ID, testDoc, timestamp);
+ thread->handlePut(put);
+}
+
+void TestAndSetTest::assertTestDocumentFoundAndMatchesContent(const document::FieldValue & value)
+{
+ auto doc = retrieveTestDocument();
+ auto & field = doc->getField("content");
+
+ CPPUNIT_ASSERT_EQUAL(*doc->getValue(field), value);
+}
+
+std::string TestAndSetTest::expectedDocEntryString(
+ api::Timestamp timestamp,
+ const document::DocumentId & docId,
+ spi::DocumentMetaFlags removeFlag)
+{
+ std::stringstream ss;
+
+ ss << "DocEntry(" << timestamp << ", " << removeFlag << ", ";
+ if (removeFlag == spi::REMOVE_ENTRY) {
+ ss << docId.toString() << ")\n";
+ } else {
+ ss << "Doc(" << docId.toString() << "))\n";
+ }
+
+ return ss.str();
+}
+
+} // storage
diff --git a/storage/src/tests/pstack_testrunner b/storage/src/tests/pstack_testrunner
new file mode 100755
index 00000000000..320d47f7e35
--- /dev/null
+++ b/storage/src/tests/pstack_testrunner
@@ -0,0 +1,14 @@
+#!/usr/bin/perl -w
+
+use strict;
+
+my @pids = `ps auxww | grep "./testrunner" | grep -v grep`;
+foreach (@pids) {
+ s/^\S+\s+(\d+)\s+.*$/$1/;
+ chomp;
+}
+
+foreach my $pid (@pids) {
+ my $cmd = "pstack $pid";
+ system($cmd) == 0 or die "Failed to run '$cmd'";
+}
diff --git a/storage/src/tests/serverapp/.gitignore b/storage/src/tests/serverapp/.gitignore
new file mode 100644
index 00000000000..333f254ba10
--- /dev/null
+++ b/storage/src/tests/serverapp/.gitignore
@@ -0,0 +1,8 @@
+*.So
+*.lo
+.*.swp
+.depend
+.depend.NEW
+.deps
+.libs
+Makefile
diff --git a/storage/src/tests/storageserver/.gitignore b/storage/src/tests/storageserver/.gitignore
new file mode 100644
index 00000000000..c4098089f09
--- /dev/null
+++ b/storage/src/tests/storageserver/.gitignore
@@ -0,0 +1,13 @@
+*.So
+*.lo
+*.o
+.*.swp
+.config.log
+.depend
+.depend.NEW
+.deps
+.libs
+Makefile
+filestorage
+testrunner
+testrunner.core
diff --git a/storage/src/tests/storageserver/CMakeLists.txt b/storage/src/tests/storageserver/CMakeLists.txt
new file mode 100644
index 00000000000..2e327089b4c
--- /dev/null
+++ b/storage/src/tests/storageserver/CMakeLists.txt
@@ -0,0 +1,17 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_library(storage_teststorageserver
+ SOURCES
+ communicationmanagertest.cpp
+ statemanagertest.cpp
+ documentapiconvertertest.cpp
+ mergethrottlertest.cpp
+ testvisitormessagesession.cpp
+ bouncertest.cpp
+ bucketintegritycheckertest.cpp
+ priorityconvertertest.cpp
+ statereportertest.cpp
+ changedbucketownershiphandlertest.cpp
+ DEPENDS
+ AFTER
+ storage_storageconfig
+)
diff --git a/storage/src/tests/storageserver/bouncertest.cpp b/storage/src/tests/storageserver/bouncertest.cpp
new file mode 100644
index 00000000000..f00e4b19c31
--- /dev/null
+++ b/storage/src/tests/storageserver/bouncertest.cpp
@@ -0,0 +1,285 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <boost/pointer_cast.hpp>
+#include <cppunit/extensions/HelperMacros.h>
+#include <iostream>
+#include <string>
+#include <vespa/storageapi/message/bucket.h>
+#include <vespa/storageapi/message/state.h>
+#include <vespa/storageapi/message/stat.h>
+#include <vespa/vdslib/state/nodestate.h>
+#include <vespa/storage/storageserver/bouncer.h>
+#include <tests/common/teststorageapp.h>
+#include <tests/common/testhelper.h>
+#include <tests/common/dummystoragelink.h>
+#include <vespa/storageapi/message/persistence.h>
+
+namespace storage {
+
+struct BouncerTest : public CppUnit::TestFixture {
+ std::unique_ptr<TestServiceLayerApp> _node;
+ std::unique_ptr<DummyStorageLink> _upper;
+ Bouncer* _manager;
+ DummyStorageLink* _lower;
+
+ BouncerTest();
+
+ void setUp();
+ void tearDown();
+
+ void testFutureTimestamp();
+ void testAllowNotifyBucketChangeEvenWhenDistributorDown();
+ void rejectLowerPrioritizedFeedMessagesWhenConfigured();
+ void doNotRejectHigherPrioritizedFeedMessagesThanConfigured();
+ void rejectionThresholdIsExclusive();
+ void onlyRejectFeedMessagesWhenConfigured();
+ void rejectionIsDisabledByDefaultInConfig();
+ void readOnlyOperationsAreNotRejected();
+ void internalOperationsAreNotRejected();
+ void outOfBoundsConfigValuesThrowException();
+
+ CPPUNIT_TEST_SUITE(BouncerTest);
+ CPPUNIT_TEST(testFutureTimestamp);
+ CPPUNIT_TEST(testAllowNotifyBucketChangeEvenWhenDistributorDown);
+ CPPUNIT_TEST(rejectLowerPrioritizedFeedMessagesWhenConfigured);
+ CPPUNIT_TEST(doNotRejectHigherPrioritizedFeedMessagesThanConfigured);
+ CPPUNIT_TEST(rejectionThresholdIsExclusive);
+ CPPUNIT_TEST(onlyRejectFeedMessagesWhenConfigured);
+ CPPUNIT_TEST(rejectionIsDisabledByDefaultInConfig);
+ CPPUNIT_TEST(readOnlyOperationsAreNotRejected);
+ CPPUNIT_TEST(internalOperationsAreNotRejected);
+ CPPUNIT_TEST(outOfBoundsConfigValuesThrowException);
+ CPPUNIT_TEST_SUITE_END();
+
+ using Priority = api::StorageMessage::Priority;
+
+ static constexpr int RejectionDisabledConfigValue = -1;
+
+ // Note: newThreshold is intentionally int (rather than Priority) in order
+ // to be able to test out of bounds values.
+ void configureRejectionThreshold(int newThreshold);
+
+ std::shared_ptr<api::StorageCommand> createDummyFeedMessage(
+ api::Timestamp timestamp,
+ Priority priority = 0);
+
+ void assertMessageBouncedWithRejection();
+ void assertMessageNotBounced();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(BouncerTest);
+
+BouncerTest::BouncerTest()
+ : _node(),
+ _upper(),
+ _manager(0),
+ _lower(0)
+{
+}
+
+void
+BouncerTest::setUp() {
+ try{
+ vdstestlib::DirConfig config(getStandardConfig(true));
+ _node.reset(new TestServiceLayerApp(
+ DiskCount(1), NodeIndex(2), config.getConfigId()));
+ _upper.reset(new DummyStorageLink());
+ _manager = new Bouncer(_node->getComponentRegister(),
+ config.getConfigId());
+ _lower = new DummyStorageLink();
+ _upper->push_back(std::unique_ptr<StorageLink>(_manager));
+ _upper->push_back(std::unique_ptr<StorageLink>(_lower));
+ _upper->open();
+ } catch (std::exception& e) {
+ std::cerr << "Failed to static initialize objects: " << e.what()
+ << "\n";
+ }
+ _node->getClock().setAbsoluteTimeInSeconds(10);
+}
+
+void
+BouncerTest::tearDown() {
+ _manager = 0;
+ _lower = 0;
+ _upper->close();
+ _upper->flush();
+ _upper.reset(0);
+ _node.reset(0);
+}
+
+std::shared_ptr<api::StorageCommand>
+BouncerTest::createDummyFeedMessage(api::Timestamp timestamp,
+ api::StorageMessage::Priority priority)
+{
+ auto cmd = std::make_shared<api::RemoveCommand>(
+ document::BucketId(0),
+ document::DocumentId("doc:foo:bar"),
+ timestamp);
+ cmd->setPriority(priority);
+ return cmd;
+}
+
+void
+BouncerTest::testFutureTimestamp()
+{
+
+ // Fail when future timestamps (more than 5 seconds) are received.
+ {
+ _upper->sendDown(createDummyFeedMessage(16 * 1000000));
+
+ CPPUNIT_ASSERT_EQUAL(1, (int)_upper->getNumReplies());
+ CPPUNIT_ASSERT_EQUAL(0, (int)_upper->getNumCommands());
+ CPPUNIT_ASSERT_EQUAL(api::ReturnCode::ABORTED,
+ static_cast<api::RemoveReply&>(*_upper->getReply(0)).
+ getResult().getResult());
+ _upper->reset();
+ }
+
+ // Verify that 1 second clock skew is OK
+ {
+ _upper->sendDown(createDummyFeedMessage(11 * 1000000));
+
+ CPPUNIT_ASSERT_EQUAL(0, (int)_upper->getNumReplies());
+ CPPUNIT_ASSERT_EQUAL(1, (int)_lower->getNumCommands());
+ _lower->reset();
+ }
+
+ // Verify that past is OK
+ {
+ _upper->sendDown(createDummyFeedMessage(5 * 1000000));
+
+ CPPUNIT_ASSERT_EQUAL(1, (int)_lower->getNumCommands());
+ }
+
+
+}
+
+void
+BouncerTest::testAllowNotifyBucketChangeEvenWhenDistributorDown()
+{
+ lib::NodeState state(lib::NodeType::DISTRIBUTOR, lib::State::DOWN);
+ _node->getNodeStateUpdater().setReportedNodeState(state);
+ // Trigger Bouncer state update
+ auto clusterState = std::make_shared<lib::ClusterState>(
+ "distributor:3 storage:3");
+ _node->getNodeStateUpdater().setClusterState(clusterState);
+
+
+ document::BucketId bucket(16, 1234);
+ api::BucketInfo info(0x1, 0x2, 0x3);
+ auto cmd = std::make_shared<api::NotifyBucketChangeCommand>(bucket, info);
+ _upper->sendDown(cmd);
+
+ CPPUNIT_ASSERT_EQUAL(size_t(0), _upper->getNumReplies());
+ CPPUNIT_ASSERT_EQUAL(size_t(1), _lower->getNumCommands());
+}
+
+void
+BouncerTest::assertMessageBouncedWithRejection()
+{
+ CPPUNIT_ASSERT_EQUAL(size_t(1), _upper->getNumReplies());
+ CPPUNIT_ASSERT_EQUAL(size_t(0), _upper->getNumCommands());
+ CPPUNIT_ASSERT_EQUAL(api::ReturnCode::REJECTED,
+ static_cast<api::RemoveReply&>(*_upper->getReply(0)).
+ getResult().getResult());
+ CPPUNIT_ASSERT_EQUAL(size_t(0), _lower->getNumCommands());
+}
+
+void
+BouncerTest::assertMessageNotBounced()
+{
+ CPPUNIT_ASSERT_EQUAL(size_t(0), _upper->getNumReplies());
+ CPPUNIT_ASSERT_EQUAL(size_t(1), _lower->getNumCommands());
+}
+
+void
+BouncerTest::configureRejectionThreshold(int newThreshold)
+{
+ using Builder = vespa::config::content::core::StorBouncerConfigBuilder;
+ auto config = std::make_unique<Builder>();
+ config->feedRejectionPriorityThreshold = newThreshold;
+ _manager->configure(std::move(config));
+}
+
+void
+BouncerTest::rejectLowerPrioritizedFeedMessagesWhenConfigured()
+{
+ configureRejectionThreshold(Priority(120));
+ _upper->sendDown(createDummyFeedMessage(11 * 1000000, Priority(121)));
+ assertMessageBouncedWithRejection();
+}
+
+void
+BouncerTest::doNotRejectHigherPrioritizedFeedMessagesThanConfigured()
+{
+ configureRejectionThreshold(Priority(120));
+ _upper->sendDown(createDummyFeedMessage(11 * 1000000, Priority(119)));
+ assertMessageNotBounced();
+}
+
+void
+BouncerTest::rejectionThresholdIsExclusive()
+{
+ configureRejectionThreshold(Priority(120));
+ _upper->sendDown(createDummyFeedMessage(11 * 1000000, Priority(120)));
+ assertMessageNotBounced();
+}
+
+void
+BouncerTest::onlyRejectFeedMessagesWhenConfigured()
+{
+ configureRejectionThreshold(RejectionDisabledConfigValue);
+ // A message with even the lowest priority should not be rejected.
+ _upper->sendDown(createDummyFeedMessage(11 * 1000000, Priority(255)));
+ assertMessageNotBounced();
+}
+
+void
+BouncerTest::rejectionIsDisabledByDefaultInConfig()
+{
+ _upper->sendDown(createDummyFeedMessage(11 * 1000000, Priority(255)));
+ assertMessageNotBounced();
+}
+
+void
+BouncerTest::readOnlyOperationsAreNotRejected()
+{
+ configureRejectionThreshold(Priority(1));
+ // StatBucket is an external operation, but it's not a mutating operation
+ // and should therefore not be blocked.
+ auto cmd = std::make_shared<api::StatBucketCommand>(
+ document::BucketId(16, 5), "");
+ cmd->setPriority(Priority(2));
+ _upper->sendDown(cmd);
+ assertMessageNotBounced();
+}
+
+void
+BouncerTest::internalOperationsAreNotRejected()
+{
+ configureRejectionThreshold(Priority(1));
+ document::BucketId bucket(16, 1234);
+ api::BucketInfo info(0x1, 0x2, 0x3);
+ auto cmd = std::make_shared<api::NotifyBucketChangeCommand>(bucket, info);
+ cmd->setPriority(Priority(2));
+ _upper->sendDown(cmd);
+ assertMessageNotBounced();
+}
+
+void
+BouncerTest::outOfBoundsConfigValuesThrowException()
+{
+ try {
+ configureRejectionThreshold(256);
+ CPPUNIT_FAIL("Upper bound violation not caught");
+ } catch (config::InvalidConfigException) {}
+
+ try {
+ configureRejectionThreshold(-2);
+ CPPUNIT_FAIL("Lower bound violation not caught");
+ } catch (config::InvalidConfigException) {}
+}
+
+} // storage
+
diff --git a/storage/src/tests/storageserver/bucketintegritycheckertest.cpp b/storage/src/tests/storageserver/bucketintegritycheckertest.cpp
new file mode 100644
index 00000000000..88a5546b174
--- /dev/null
+++ b/storage/src/tests/storageserver/bucketintegritycheckertest.cpp
@@ -0,0 +1,302 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <boost/lexical_cast.hpp>
+#include <cppunit/extensions/HelperMacros.h>
+#include <vespa/log/log.h>
+#include <vespa/storage/bucketdb/bucketmanager.h>
+#include <vespa/storage/bucketdb/storbucketdb.h>
+#include <vespa/storage/persistence/filestorage/filestormanager.h>
+#include <vespa/storage/storageserver/bucketintegritychecker.h>
+#include <vespa/storageapi/message/bucket.h>
+#include <vespa/storageapi/message/persistence.h>
+#include <tests/common/testhelper.h>
+#include <tests/common/storagelinktest.h>
+#include <tests/common/dummystoragelink.h>
+#include <vespa/vespalib/io/fileutil.h>
+#include <tests/common/teststorageapp.h>
+
+LOG_SETUP(".test.bucketintegritychecker");
+
+namespace storage {
+
+struct BucketIntegrityCheckerTest : public CppUnit::TestFixture {
+ std::unique_ptr<vdstestlib::DirConfig> _config;
+ std::unique_ptr<TestServiceLayerApp> _node;
+ int _timeout; // Timeout in seconds before aborting
+
+ void setUp() {
+ _timeout = 60*2;
+ _config.reset(new vdstestlib::DirConfig(getStandardConfig(true)));
+ _node.reset(new TestServiceLayerApp(DiskCount(256),
+ NodeIndex(0),
+ _config->getConfigId()));
+ }
+
+ void tearDown() {
+ LOG(info, "Finished test");
+ }
+
+ void testConfig();
+ void testBasicFunctionality();
+ void testTiming();
+
+ CPPUNIT_TEST_SUITE(BucketIntegrityCheckerTest);
+ CPPUNIT_TEST(testConfig);
+ CPPUNIT_TEST(testBasicFunctionality);
+ CPPUNIT_TEST_SUITE_END();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(BucketIntegrityCheckerTest);
+
+void BucketIntegrityCheckerTest::testConfig()
+{
+
+ // Verify that config is read correctly. Given config should not use
+ // any default values.
+ vdstestlib::DirConfig::Config& config(
+ _config->getConfig("stor-integritychecker"));
+ config.set("dailycyclestart", "60");
+ config.set("dailycyclestop", "360");
+ config.set("weeklycycle", "crRc-rc");
+ config.set("maxpending", "2");
+ config.set("mincycletime", "120");
+ config.set("requestdelay", "5");
+
+ BucketIntegrityChecker checker(_config->getConfigId(),
+ _node->getComponentRegister());
+ checker.setMaxThreadWaitTime(framework::MilliSecTime(10));
+ SchedulingOptions& opt(checker.getSchedulingOptions());
+ CPPUNIT_ASSERT_EQUAL(60u, opt._dailyCycleStart);
+ CPPUNIT_ASSERT_EQUAL(360u, opt._dailyCycleStop);
+ CPPUNIT_ASSERT_EQUAL(SchedulingOptions::CONTINUE, opt._dailyStates[0]);
+ CPPUNIT_ASSERT_EQUAL(SchedulingOptions::RUN_CHEAP, opt._dailyStates[1]);
+ CPPUNIT_ASSERT_EQUAL(SchedulingOptions::RUN_FULL, opt._dailyStates[2]);
+ CPPUNIT_ASSERT_EQUAL(SchedulingOptions::CONTINUE, opt._dailyStates[3]);
+ CPPUNIT_ASSERT_EQUAL(SchedulingOptions::DONT_RUN, opt._dailyStates[4]);
+ CPPUNIT_ASSERT_EQUAL(SchedulingOptions::RUN_CHEAP, opt._dailyStates[5]);
+ CPPUNIT_ASSERT_EQUAL(SchedulingOptions::CONTINUE, opt._dailyStates[6]);
+ CPPUNIT_ASSERT_EQUAL(2u, opt._maxPendingCount);
+ CPPUNIT_ASSERT_EQUAL(framework::SecondTime(7200), opt._minCycleTime);
+ CPPUNIT_ASSERT_EQUAL(framework::SecondTime(5), opt._requestDelay);
+}
+
+namespace {
+ /**
+ * Calculate a date based on the following format:
+ * week<#> <day> <hh>:<mm>:<ss>
+ * Examples: "week3 mon 00:30:00"
+ * "week3 tue 04:20:00"
+ * "week9 thi 14:00:24"
+ */
+ time_t getDate(const std::string& datestring) {
+ vespalib::string rest(datestring);
+ int spacePos = rest.find(' ');
+ uint32_t week = strtoul(rest.substr(4, spacePos-4).c_str(), NULL, 0);
+ rest = rest.substr(spacePos+1);
+ vespalib::string wday(rest.substr(0,3));
+ rest = rest.substr(4);
+ uint32_t hours = strtoul(rest.substr(0, 2).c_str(), NULL, 0);
+ uint32_t minutes = strtoul(rest.substr(3, 2).c_str(), NULL, 0);
+ uint32_t seconds = strtoul(rest.substr(6, 2).c_str(), NULL, 0);
+ uint32_t day(0);
+ if (wday == "mon") { day = 1; }
+ else if (wday == "tue") { day = 2; }
+ else if (wday == "wed") { day = 3; }
+ else if (wday == "thi") { day = 4; }
+ else if (wday == "fri") { day = 5; }
+ else if (wday == "sat") { day = 6; }
+ else if (wday == "sun") { day = 0; }
+ else { assert(false); }
+ // Create a start time that points to the start of some week.
+ // A random sunday 00:00:00, which we will use as start of time
+ struct tm mytime;
+ memset(&mytime, 0, sizeof(mytime));
+ mytime.tm_year = 2008 - 1900;
+ mytime.tm_mon = 0;
+ mytime.tm_mday = 1;
+ mytime.tm_hour = 0;
+ mytime.tm_min = 0;
+ mytime.tm_sec = 0;
+ time_t startTime = timegm(&mytime);
+ CPPUNIT_ASSERT(gmtime_r(&startTime, &mytime));
+ while (mytime.tm_wday != 0) {
+ ++mytime.tm_mday;
+ startTime = timegm(&mytime);
+ CPPUNIT_ASSERT(gmtime_r(&startTime, &mytime));
+ }
+ // Add the wanted values to the start time
+ time_t resultTime = startTime;
+ resultTime += week * 7 * 24 * 60 * 60
+ + day * 24 * 60 * 60
+ + hours * 60 * 60
+ + minutes * 60
+ + seconds;
+ // std::cerr << "Time requested " << datestring << ". Got time "
+ // << framework::SecondTime(resultTime).toString() << "\n";
+ return resultTime;
+ }
+
+ void addBucketToDatabase(TestServiceLayerApp& server,
+ const document::BucketId& id, uint8_t disk,
+ uint32_t numDocs, uint32_t crc, uint32_t totalSize)
+ {
+ bucketdb::StorageBucketInfo info;
+ info.setBucketInfo(api::BucketInfo(crc, numDocs, totalSize));
+ info.disk = disk;
+ server.getStorageBucketDatabase().insert(id, info, "foo");
+ }
+
+
+ /**
+ * In tests wanting to only have one pending, only add buckets for one disk
+ * as pending is per disk. If so set singleDisk true.
+ */
+ void addBucketsToDatabase(TestServiceLayerApp& server, bool singleDisk) {
+ addBucketToDatabase(server, document::BucketId(16, 0x123), 0,
+ 14, 0x123, 1024);
+ addBucketToDatabase(server, document::BucketId(16, 0x234), 0,
+ 18, 0x234, 1024);
+ addBucketToDatabase(server, document::BucketId(16, 0x345), 0,
+ 11, 0x345, 2048);
+ addBucketToDatabase(server, document::BucketId(16, 0x456), 0,
+ 13, 0x456, 1280);
+ if (!singleDisk) {
+ addBucketToDatabase(server, document::BucketId(16, 0x567), 1,
+ 20, 0x567, 4096);
+ addBucketToDatabase(server, document::BucketId(16, 0x987), 254,
+ 8, 0x987, 65536);
+ }
+ }
+}
+
+void BucketIntegrityCheckerTest::testBasicFunctionality()
+{
+ _node->getClock().setAbsoluteTimeInSeconds(getDate("week1 sun 00:00:00"));
+ addBucketsToDatabase(*_node, false);
+ DummyStorageLink* dummyLink = 0;
+ {
+ std::unique_ptr<BucketIntegrityChecker> midLink(
+ new BucketIntegrityChecker("", _node->getComponentRegister()));
+ BucketIntegrityChecker& checker(*midLink);
+ checker.setMaxThreadWaitTime(framework::MilliSecTime(10));
+ // Setup and start checker
+ DummyStorageLink topLink;
+ topLink.push_back(StorageLink::UP(midLink.release()));
+ checker.push_back(std::unique_ptr<StorageLink>(
+ dummyLink = new DummyStorageLink()));
+ checker.getSchedulingOptions()._maxPendingCount = 2;
+ checker.getSchedulingOptions()._minCycleTime = framework::SecondTime(60 * 60);
+ topLink.open();
+ // Waiting for system to be initialized
+ FastOS_Thread::Sleep(10); // Give next message chance to come
+ ASSERT_COMMAND_COUNT(0, *dummyLink);
+ topLink.doneInit();
+ checker.bump();
+ // Should have started new run with 2 pending per disk
+ dummyLink->waitForMessages(4, _timeout);
+ FastOS_Thread::Sleep(10); // Give 5th message chance to come
+ ASSERT_COMMAND_COUNT(4, *dummyLink);
+ RepairBucketCommand *cmd1 = dynamic_cast<RepairBucketCommand*>(
+ dummyLink->getCommand(0).get());
+ CPPUNIT_ASSERT_EQUAL(230, (int)cmd1->getPriority());
+ CPPUNIT_ASSERT(cmd1);
+ CPPUNIT_ASSERT_EQUAL(document::BucketId(16, 0x234),
+ cmd1->getBucketId());
+ RepairBucketCommand *cmd2 = dynamic_cast<RepairBucketCommand*>(
+ dummyLink->getCommand(1).get());
+ CPPUNIT_ASSERT(cmd2);
+ CPPUNIT_ASSERT_EQUAL(document::BucketId(16, 0x456),
+ cmd2->getBucketId());
+ RepairBucketCommand *cmd3 = dynamic_cast<RepairBucketCommand*>(
+ dummyLink->getCommand(2).get());
+ CPPUNIT_ASSERT(cmd3);
+ CPPUNIT_ASSERT_EQUAL(document::BucketId(16, 0x567),
+ cmd3->getBucketId());
+ RepairBucketCommand *cmd4 = dynamic_cast<RepairBucketCommand*>(
+ dummyLink->getCommand(3).get());
+ CPPUNIT_ASSERT(cmd4);
+ CPPUNIT_ASSERT_EQUAL(document::BucketId(16, 0x987),
+ cmd4->getBucketId());
+
+ // Answering a message on disk with no more buckets does not trigger new
+ std::shared_ptr<RepairBucketReply> reply1(
+ new RepairBucketReply(*cmd3));
+ CPPUNIT_ASSERT(StorageLinkTest::callOnUp(checker, reply1));
+ FastOS_Thread::Sleep(10); // Give next message chance to come
+ ASSERT_COMMAND_COUNT(4, *dummyLink);
+ // Answering a message on disk with more buckets trigger new repair
+ std::shared_ptr<RepairBucketReply> reply2(
+ new RepairBucketReply(*cmd2));
+ CPPUNIT_ASSERT(StorageLinkTest::callOnUp(checker, reply2));
+ dummyLink->waitForMessages(5, _timeout);
+ FastOS_Thread::Sleep(10); // Give 6th message chance to come
+ ASSERT_COMMAND_COUNT(5, *dummyLink);
+ RepairBucketCommand *cmd5 = dynamic_cast<RepairBucketCommand*>(
+ dummyLink->getCommand(4).get());
+ CPPUNIT_ASSERT(cmd5);
+ CPPUNIT_ASSERT_EQUAL(document::BucketId(16, 0x345),
+ cmd5->getBucketId());
+ // Fail a repair, causing it to be resent later, but first continue
+ // with other bucket.
+ std::shared_ptr<RepairBucketReply> reply3(
+ new RepairBucketReply(*cmd1));
+ reply3->setResult(api::ReturnCode(api::ReturnCode::IGNORED));
+ CPPUNIT_ASSERT(StorageLinkTest::callOnUp(checker, reply3));
+ dummyLink->waitForMessages(6, _timeout);
+ FastOS_Thread::Sleep(10); // Give 7th message chance to come
+ ASSERT_COMMAND_COUNT(6, *dummyLink);
+ RepairBucketCommand *cmd6 = dynamic_cast<RepairBucketCommand*>(
+ dummyLink->getCommand(5).get());
+ CPPUNIT_ASSERT(cmd6);
+ CPPUNIT_ASSERT_EQUAL(document::BucketId(16, 0x123),
+ cmd6->getBucketId());
+ // Fail a repair with not found. That is an acceptable return code.
+ // (No more requests as this was last for that disk)
+ std::shared_ptr<RepairBucketReply> reply4(
+ new RepairBucketReply(*cmd4));
+ reply3->setResult(api::ReturnCode(api::ReturnCode::BUCKET_NOT_FOUND));
+ CPPUNIT_ASSERT(StorageLinkTest::callOnUp(checker, reply4));
+ FastOS_Thread::Sleep(10); // Give 7th message chance to come
+ ASSERT_COMMAND_COUNT(6, *dummyLink);
+
+ // Send a repair reply that actually have corrected the bucket.
+ api::BucketInfo newInfo(0x3456, 4, 8192);
+ std::shared_ptr<RepairBucketReply> reply5(
+ new RepairBucketReply(*cmd5, newInfo));
+ reply5->setAltered(true);
+ CPPUNIT_ASSERT(StorageLinkTest::callOnUp(checker, reply5));
+
+ // Finish run. New iteration should not start yet as min
+ // cycle time has not passed
+ std::shared_ptr<RepairBucketReply> reply6(
+ new RepairBucketReply(*cmd6));
+ CPPUNIT_ASSERT(StorageLinkTest::callOnUp(checker, reply6));
+ dummyLink->waitForMessages(7, _timeout);
+ ASSERT_COMMAND_COUNT(7, *dummyLink);
+ RepairBucketCommand *cmd7 = dynamic_cast<RepairBucketCommand*>(
+ dummyLink->getCommand(6).get());
+ CPPUNIT_ASSERT(cmd7);
+ CPPUNIT_ASSERT_EQUAL(document::BucketId(16, 0x234),
+ cmd7->getBucketId());
+ std::shared_ptr<RepairBucketReply> reply7(
+ new RepairBucketReply(*cmd7));
+ CPPUNIT_ASSERT(StorageLinkTest::callOnUp(checker, reply7));
+ FastOS_Thread::Sleep(10); // Give 8th message chance to come
+ ASSERT_COMMAND_COUNT(7, *dummyLink);
+
+ // Still not time for next iteration
+ dummyLink->reset();
+ _node->getClock().setAbsoluteTimeInSeconds(getDate("week1 sun 00:59:59"));
+ FastOS_Thread::Sleep(10); // Give new run chance to start
+ ASSERT_COMMAND_COUNT(0, *dummyLink);
+
+ // Pass time until next cycle should start
+ dummyLink->reset();
+ _node->getClock().setAbsoluteTimeInSeconds(getDate("week1 sun 01:00:00"));
+ dummyLink->waitForMessages(4, _timeout);
+ ASSERT_COMMAND_COUNT(4, *dummyLink);
+ }
+}
+
+} // storage
diff --git a/storage/src/tests/storageserver/changedbucketownershiphandlertest.cpp b/storage/src/tests/storageserver/changedbucketownershiphandlertest.cpp
new file mode 100644
index 00000000000..3b83d71d8f3
--- /dev/null
+++ b/storage/src/tests/storageserver/changedbucketownershiphandlertest.cpp
@@ -0,0 +1,648 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/document/base/testdocman.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <vespa/storage/bucketdb/storbucketdb.h>
+#include <vespa/storage/persistence/messages.h>
+#include <vespa/storageapi/message/state.h>
+#include <vespa/storageapi/message/bucket.h>
+#include <vespa/storageapi/message/bucketsplitting.h>
+#include <vespa/storageapi/message/removelocation.h>
+#include <vespa/storageapi/message/persistence.h>
+#include <vespa/storageapi/message/multioperation.h>
+#include <tests/common/teststorageapp.h>
+#include <tests/common/testhelper.h>
+#include <tests/common/dummystoragelink.h>
+#include <vespa/storage/storageserver/changedbucketownershiphandler.h>
+#include <memory>
+
+namespace storage {
+
+class ChangedBucketOwnershipHandlerTest : public CppUnit::TestFixture
+{
+ std::unique_ptr<TestServiceLayerApp> _app;
+ std::unique_ptr<DummyStorageLink> _top;
+ ChangedBucketOwnershipHandler* _handler;
+ DummyStorageLink* _bottom;
+ document::TestDocMan _testDocRepo;
+
+ CPPUNIT_TEST_SUITE(ChangedBucketOwnershipHandlerTest);
+ CPPUNIT_TEST(testEnumerateBucketsBelongingOnChangedNodes);
+ CPPUNIT_TEST(testNoPreExistingClusterState);
+ CPPUNIT_TEST(testNoAvailableDistributorsInCurrentState);
+ CPPUNIT_TEST(testNoAvailableDistributorsInCurrentAndNewState);
+ CPPUNIT_TEST(testDownEdgeToNoAvailableDistributors);
+ CPPUNIT_TEST(testOwnershipChangedOnDistributorUpEdge);
+ CPPUNIT_TEST(testDistributionConfigChangeUpdatesOwnership);
+ CPPUNIT_TEST(testAbortOpsWhenNoClusterStateSet);
+ CPPUNIT_TEST(testAbortOutdatedSplit);
+ CPPUNIT_TEST(testAbortOutdatedJoin);
+ CPPUNIT_TEST(testAbortOutdatedSetBucketState);
+ CPPUNIT_TEST(testAbortOutdatedCreateBucket);
+ CPPUNIT_TEST(testAbortOutdatedDeleteBucket);
+ CPPUNIT_TEST(testAbortOutdatedMergeBucket);
+ CPPUNIT_TEST(testAbortOutdatedRemoveLocation);
+ CPPUNIT_TEST(testIdealStateAbortsAreConfigurable);
+ CPPUNIT_TEST(testAbortOutdatedPutOperation);
+ CPPUNIT_TEST(testAbortOutdatedMultiOperation);
+ CPPUNIT_TEST(testAbortOutdatedUpdateCommand);
+ CPPUNIT_TEST(testAbortOutdatedRemoveCommand);
+ CPPUNIT_TEST(testAbortOutdatedRevertCommand);
+ CPPUNIT_TEST(testIdealStateAbortUpdatesMetric);
+ CPPUNIT_TEST(testExternalLoadOpAbortUpdatesMetric);
+ CPPUNIT_TEST(testExternalLoadOpAbortsAreConfigurable);
+ CPPUNIT_TEST_SUITE_END();
+
+ // TODO test: down edge triggered on cluster state with cluster down?
+
+ std::vector<document::BucketId> insertBuckets(
+ uint32_t numBuckets,
+ uint16_t wantedOwner,
+ const lib::ClusterState& state);
+
+ std::shared_ptr<api::SetSystemStateCommand> createStateCmd(
+ const lib::ClusterState& state) const
+ {
+ return std::make_shared<api::SetSystemStateCommand>(state);
+ }
+
+ std::shared_ptr<api::SetSystemStateCommand> createStateCmd(
+ const std::string& stateStr) const
+ {
+ return createStateCmd(lib::ClusterState(stateStr));
+ }
+
+ void applyDistribution(Redundancy, NodeCount);
+ void applyClusterState(const lib::ClusterState&);
+
+ document::BucketId nextOwnedBucket(
+ uint16_t wantedOwner,
+ const lib::ClusterState& state,
+ const document::BucketId& lastId) const;
+
+ document::BucketId getBucketToAbort() const;
+ document::BucketId getBucketToAllow() const;
+
+ void sendAndExpectAbortedCreateBucket(uint16_t fromDistributorIndex);
+
+ template <typename MsgType, typename... MsgParams>
+ bool changeAbortsMessage(MsgParams&&... params);
+
+ lib::ClusterState getDefaultTestClusterState() const {
+ return lib::ClusterState("distributor:4 storage:1");
+ }
+
+public:
+ void testEnumerateBucketsBelongingOnChangedNodes();
+ void testNoPreExistingClusterState();
+ void testNoAvailableDistributorsInCurrentState();
+ void testNoAvailableDistributorsInCurrentAndNewState();
+ void testDownEdgeToNoAvailableDistributors();
+ void testOwnershipChangedOnDistributorUpEdge();
+ void testDistributionConfigChangeUpdatesOwnership();
+ void testAbortOpsWhenNoClusterStateSet();
+ void testAbortOutdatedSplit();
+ void testAbortOutdatedJoin();
+ void testAbortOutdatedSetBucketState();
+ void testAbortOutdatedCreateBucket();
+ void testAbortOutdatedDeleteBucket();
+ void testAbortOutdatedMergeBucket();
+ void testAbortOutdatedRemoveLocation();
+ void testIdealStateAbortsAreConfigurable();
+ void testAbortOutdatedPutOperation();
+ void testAbortOutdatedMultiOperation();
+ void testAbortOutdatedUpdateCommand();
+ void testAbortOutdatedRemoveCommand();
+ void testAbortOutdatedRevertCommand();
+ void testIdealStateAbortUpdatesMetric();
+ void testExternalLoadOpAbortUpdatesMetric();
+ void testExternalLoadOpAbortsAreConfigurable();
+
+ void setUp();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(ChangedBucketOwnershipHandlerTest);
+
+document::BucketId
+ChangedBucketOwnershipHandlerTest::nextOwnedBucket(
+ uint16_t wantedOwner,
+ const lib::ClusterState& state,
+ const document::BucketId& lastId) const
+{
+ uint32_t idx(lastId.getId() + 1);
+ while (true) {
+ document::BucketId candidate(16, idx);
+ uint16_t owner(_app->getDistribution()->getIdealDistributorNode(
+ state, candidate));
+ if (owner == wantedOwner) {
+ return candidate;
+ }
+ ++idx;
+ }
+ assert(!"should never get here");
+}
+
+std::vector<document::BucketId>
+ChangedBucketOwnershipHandlerTest::insertBuckets(uint32_t numBuckets,
+ uint16_t wantedOwner,
+ const lib::ClusterState& state)
+{
+ std::vector<document::BucketId> inserted;
+ document::BucketId bucket;
+ while (inserted.size() < numBuckets) {
+ bucket = nextOwnedBucket(wantedOwner, state, bucket);
+
+ bucketdb::StorageBucketInfo sbi;
+ sbi.setBucketInfo(api::BucketInfo(1, 2, 3));
+ sbi.disk = 0;
+ _app->getStorageBucketDatabase().insert(bucket, sbi, "test");
+ inserted.push_back(bucket);
+ }
+ return inserted;
+}
+
+void
+ChangedBucketOwnershipHandlerTest::setUp()
+{
+ vdstestlib::DirConfig config(getStandardConfig(true));
+
+ _app.reset(new TestServiceLayerApp);
+ _top.reset(new DummyStorageLink);
+ _handler = new ChangedBucketOwnershipHandler(config.getConfigId(),
+ _app->getComponentRegister());
+ _top->push_back(std::unique_ptr<StorageLink>(_handler));
+ _bottom = new DummyStorageLink;
+ _handler->push_back(std::unique_ptr<StorageLink>(_bottom));
+ _top->open();
+
+ // Ensure we're not dependent on config schema default values.
+ std::unique_ptr<vespa::config::content::PersistenceConfigBuilder> pconfig(
+ new vespa::config::content::PersistenceConfigBuilder);
+ pconfig->abortOutdatedMutatingIdealStateOps = true;
+ pconfig->abortOutdatedMutatingExternalLoadOps = true;
+ _handler->configure(std::move(pconfig));
+}
+
+namespace {
+
+template <typename Set, typename K>
+bool has(const Set& s, const K& key) {
+ return s.find(key) != s.end();
+}
+
+template <typename Vec>
+bool
+hasAbortedAllOf(const AbortBucketOperationsCommand::SP& cmd, const Vec& v)
+{
+ for (auto& b : v) {
+ if (!cmd->shouldAbort(b)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+template <typename Vec>
+bool
+hasAbortedNoneOf(const AbortBucketOperationsCommand::SP& cmd, const Vec& v)
+{
+ for (auto& b : v) {
+ if (cmd->shouldAbort(b)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool
+hasOnlySetSystemStateCmdQueued(DummyStorageLink& link) {
+ if (link.getNumCommands() != 1) {
+ std::cerr << "expected 1 command, found"
+ << link.getNumCommands() << "\n";
+ }
+ api::SetSystemStateCommand::SP cmd(
+ std::dynamic_pointer_cast<api::SetSystemStateCommand>(
+ link.getCommand(0)));
+ return (cmd.get() != 0);
+}
+
+}
+
+void
+ChangedBucketOwnershipHandlerTest::applyDistribution(
+ Redundancy redundancy, NodeCount nodeCount)
+{
+ _app->setDistribution(redundancy, nodeCount);
+ _handler->storageDistributionChanged();
+}
+
+void
+ChangedBucketOwnershipHandlerTest::applyClusterState(
+ const lib::ClusterState& state)
+{
+ _app->setClusterState(state);
+ _handler->reloadClusterState();
+}
+
+void
+ChangedBucketOwnershipHandlerTest::testEnumerateBucketsBelongingOnChangedNodes()
+{
+ lib::ClusterState stateBefore("distributor:4 storage:1");
+ applyDistribution(Redundancy(1), NodeCount(4));
+ applyClusterState(stateBefore);
+ auto node1Buckets(insertBuckets(2, 1, stateBefore));
+ auto node3Buckets(insertBuckets(2, 3, stateBefore));
+ // Add some buckets that will not be part of the change set
+ auto node0Buckets(insertBuckets(3, 0, stateBefore));
+ auto node2Buckets(insertBuckets(2, 2, stateBefore));
+
+ _top->sendDown(createStateCmd("distributor:4 .1.s:d .3.s:d storage:1"));
+ // TODO: refactor into own function
+ CPPUNIT_ASSERT_EQUAL(size_t(2), _bottom->getNumCommands());
+ AbortBucketOperationsCommand::SP cmd(
+ std::dynamic_pointer_cast<AbortBucketOperationsCommand>(
+ _bottom->getCommand(0)));
+ CPPUNIT_ASSERT(cmd.get() != 0);
+
+ CPPUNIT_ASSERT(hasAbortedAllOf(cmd, node1Buckets));
+ CPPUNIT_ASSERT(hasAbortedAllOf(cmd, node3Buckets));
+ CPPUNIT_ASSERT(hasAbortedNoneOf(cmd, node0Buckets));
+ CPPUNIT_ASSERT(hasAbortedNoneOf(cmd, node2Buckets));
+
+ // Handler must swallow abort replies
+ _bottom->sendUp(api::StorageMessage::SP(cmd->makeReply().release()));
+ CPPUNIT_ASSERT_EQUAL(size_t(0), _top->getNumReplies());
+}
+
+void
+ChangedBucketOwnershipHandlerTest::testNoPreExistingClusterState()
+{
+ applyDistribution(Redundancy(1), NodeCount(4));
+ lib::ClusterState stateBefore("distributor:4 storage:1");
+ insertBuckets(2, 1, stateBefore);
+ insertBuckets(3, 0, stateBefore);
+ insertBuckets(2, 2, stateBefore);
+
+ _top->sendDown(createStateCmd("distributor:4 .1.s:d .3.s:d storage:1"));
+ CPPUNIT_ASSERT(hasOnlySetSystemStateCmdQueued(*_bottom));
+}
+
+/**
+ * When current state has no distributors and we receive a state with one or
+ * more distributors, we do not send any abort messages since this should
+ * already have been done on the down-edge.
+ */
+void
+ChangedBucketOwnershipHandlerTest::testNoAvailableDistributorsInCurrentState()
+{
+ applyDistribution(Redundancy(1), NodeCount(3));
+ lib::ClusterState insertedState("distributor:3 storage:1");
+ insertBuckets(2, 0, insertedState);
+ insertBuckets(2, 1, insertedState);
+ insertBuckets(2, 2, insertedState);
+ lib::ClusterState downState("distributor:3 .0.s:d .1.s:d .2.s:d storage:1");
+ _app->setClusterState(downState);
+
+ _top->sendDown(createStateCmd("distributor:3 .1.s:d storage:1"));
+ CPPUNIT_ASSERT(hasOnlySetSystemStateCmdQueued(*_bottom));
+}
+
+void
+ChangedBucketOwnershipHandlerTest::testNoAvailableDistributorsInCurrentAndNewState()
+{
+ applyDistribution(Redundancy(1), NodeCount(3));
+ lib::ClusterState insertedState("distributor:3 storage:1");
+ insertBuckets(2, 0, insertedState);
+ insertBuckets(2, 1, insertedState);
+ insertBuckets(2, 2, insertedState);
+ lib::ClusterState stateBefore("distributor:3 .0.s:s .1.s:s .2.s:d storage:1");
+ applyClusterState(stateBefore);
+ lib::ClusterState downState("distributor:3 .0.s:d .1.s:d .2.s:d storage:1");
+
+ _top->sendDown(createStateCmd(downState));
+ CPPUNIT_ASSERT(hasOnlySetSystemStateCmdQueued(*_bottom));
+}
+
+void
+ChangedBucketOwnershipHandlerTest::testDownEdgeToNoAvailableDistributors()
+{
+ lib::ClusterState insertedState("distributor:3 storage:1");
+ applyDistribution(Redundancy(1), NodeCount(3));
+ applyClusterState(insertedState);
+ auto node0Buckets(insertBuckets(2, 0, insertedState));
+ auto node1Buckets(insertBuckets(2, 1, insertedState));
+ auto node2Buckets(insertBuckets(2, 2, insertedState));
+ lib::ClusterState downState("distributor:3 .0.s:d .1.s:s .2.s:s storage:1");
+
+ _top->sendDown(createStateCmd(downState));
+ // TODO: refactor into own function
+ CPPUNIT_ASSERT_EQUAL(size_t(2), _bottom->getNumCommands());
+ AbortBucketOperationsCommand::SP cmd(
+ std::dynamic_pointer_cast<AbortBucketOperationsCommand>(
+ _bottom->getCommand(0)));
+ CPPUNIT_ASSERT(cmd.get() != 0);
+
+ CPPUNIT_ASSERT(hasAbortedAllOf(cmd, node0Buckets));
+ CPPUNIT_ASSERT(hasAbortedAllOf(cmd, node1Buckets));
+ CPPUNIT_ASSERT(hasAbortedAllOf(cmd, node2Buckets));
+}
+
+void
+ChangedBucketOwnershipHandlerTest::testOwnershipChangedOnDistributorUpEdge()
+{
+ lib::ClusterState stateBefore(
+ "version:10 distributor:4 .1.s:d storage:4 .1.s:d");
+ lib::ClusterState stateAfter(
+ "version:11 distributor:4 .1.t:1369990247 storage:4 .1.s:d");
+ applyDistribution(Redundancy(1), NodeCount(4));
+ applyClusterState(stateBefore);
+ // Add buckets that will belong to distributor 1 after it has come back up
+ auto node1Buckets(insertBuckets(2, 1, stateAfter));
+ // Add some buckets that will not be part of the change set
+ auto node0Buckets(insertBuckets(3, 0, stateAfter));
+ auto node2Buckets(insertBuckets(2, 2, stateAfter));
+
+ _top->sendDown(createStateCmd(stateAfter));
+ // TODO: refactor into own function
+ CPPUNIT_ASSERT_EQUAL(size_t(2), _bottom->getNumCommands());
+ AbortBucketOperationsCommand::SP cmd(
+ std::dynamic_pointer_cast<AbortBucketOperationsCommand>(
+ _bottom->getCommand(0)));
+ CPPUNIT_ASSERT(cmd.get() != 0);
+
+ CPPUNIT_ASSERT(hasAbortedAllOf(cmd, node1Buckets));
+ CPPUNIT_ASSERT(hasAbortedNoneOf(cmd, node0Buckets));
+ CPPUNIT_ASSERT(hasAbortedNoneOf(cmd, node2Buckets));
+
+ // Handler must swallow abort replies
+ _bottom->sendUp(api::StorageMessage::SP(cmd->makeReply().release()));
+ CPPUNIT_ASSERT_EQUAL(size_t(0), _top->getNumReplies());
+}
+
+void
+ChangedBucketOwnershipHandlerTest::sendAndExpectAbortedCreateBucket(
+ uint16_t fromDistributorIndex)
+{
+ document::BucketId bucket(16, 6786);
+ auto msg = std::make_shared<api::CreateBucketCommand>(bucket);
+ msg->setSourceIndex(fromDistributorIndex);
+
+ _top->sendDown(msg);
+ std::vector<api::StorageMessage::SP> replies(_top->getRepliesOnce());
+ CPPUNIT_ASSERT_EQUAL(size_t(1), replies.size());
+ api::StorageReply& reply(dynamic_cast<api::StorageReply&>(*replies[0]));
+ CPPUNIT_ASSERT_EQUAL(api::ReturnCode::ABORTED,
+ reply.getResult().getResult());
+}
+
+void
+ChangedBucketOwnershipHandlerTest::testAbortOpsWhenNoClusterStateSet()
+{
+ sendAndExpectAbortedCreateBucket(1);
+}
+
+void
+ChangedBucketOwnershipHandlerTest::testDistributionConfigChangeUpdatesOwnership()
+{
+ lib::ClusterState insertedState("distributor:3 storage:1");
+ applyClusterState(insertedState);
+ applyDistribution(Redundancy(1), NodeCount(3));
+
+ // Apply new distribution config containing only 1 distributor, meaning
+ // any messages sent from >1 must be aborted.
+ applyDistribution(Redundancy(1), NodeCount(1));
+ sendAndExpectAbortedCreateBucket(2);
+}
+
+/**
+ * Generate and dispatch a message of the given type with the provided
+ * aruments as if that message was sent from distributor 1. Messages will
+ * be checked as if the state contains 4 distributors in Up state. This
+ * means that it suffices to send in a message with a bucket that is not
+ * owned by distributor 1 in this state to trigger an abort.
+ */
+template <typename MsgType, typename... MsgParams>
+bool
+ChangedBucketOwnershipHandlerTest::changeAbortsMessage(MsgParams&&... params)
+{
+ auto msg = std::make_shared<MsgType>(std::forward<MsgParams>(params)...);
+ msg->setSourceIndex(1);
+
+ applyDistribution(Redundancy(1), NodeCount(4));
+ applyClusterState(getDefaultTestClusterState());
+
+ _top->sendDown(msg);
+ std::vector<api::StorageMessage::SP> replies(_top->getRepliesOnce());
+ // Test is single-threaded, no need to do any waiting.
+ if (replies.empty()) {
+ return false;
+ } else {
+ CPPUNIT_ASSERT_EQUAL(size_t(1), replies.size());
+ // Make sure the message was actually aborted and not bounced with
+ // some other arbitrary failure code.
+ api::StorageReply& reply(dynamic_cast<api::StorageReply&>(*replies[0]));
+ CPPUNIT_ASSERT_EQUAL(api::ReturnCode::ABORTED,
+ reply.getResult().getResult());
+ return true;
+ }
+}
+
+/**
+ * Returns a bucket that is not owned by the sending distributor (1). More
+ * specifically, it returns a bucket that is owned by distributor 2.
+ */
+document::BucketId
+ChangedBucketOwnershipHandlerTest::getBucketToAbort() const
+{
+ lib::ClusterState state(getDefaultTestClusterState());
+ return nextOwnedBucket(2, state, document::BucketId());
+}
+
+/**
+ * Returns a bucket that _is_ owned by distributor 1 and should thus be
+ * allowed through.
+ */
+document::BucketId
+ChangedBucketOwnershipHandlerTest::getBucketToAllow() const
+{
+ lib::ClusterState state(getDefaultTestClusterState());
+ return nextOwnedBucket(1, state, document::BucketId());
+}
+
+void
+ChangedBucketOwnershipHandlerTest::testAbortOutdatedSplit()
+{
+ CPPUNIT_ASSERT(changeAbortsMessage<api::SplitBucketCommand>(
+ getBucketToAbort()));
+ CPPUNIT_ASSERT(!changeAbortsMessage<api::SplitBucketCommand>(
+ getBucketToAllow()));
+}
+
+void
+ChangedBucketOwnershipHandlerTest::testAbortOutdatedJoin()
+{
+ CPPUNIT_ASSERT(changeAbortsMessage<api::JoinBucketsCommand>(
+ getBucketToAbort()));
+ CPPUNIT_ASSERT(!changeAbortsMessage<api::JoinBucketsCommand>(
+ getBucketToAllow()));
+}
+
+void
+ChangedBucketOwnershipHandlerTest::testAbortOutdatedSetBucketState()
+{
+ CPPUNIT_ASSERT(changeAbortsMessage<api::SetBucketStateCommand>(
+ getBucketToAbort(), api::SetBucketStateCommand::ACTIVE));
+ CPPUNIT_ASSERT(!changeAbortsMessage<api::SetBucketStateCommand>(
+ getBucketToAllow(), api::SetBucketStateCommand::ACTIVE));
+}
+
+void
+ChangedBucketOwnershipHandlerTest::testAbortOutdatedCreateBucket()
+{
+ CPPUNIT_ASSERT(changeAbortsMessage<api::CreateBucketCommand>(
+ getBucketToAbort()));
+ CPPUNIT_ASSERT(!changeAbortsMessage<api::CreateBucketCommand>(
+ getBucketToAllow()));
+}
+
+void
+ChangedBucketOwnershipHandlerTest::testAbortOutdatedDeleteBucket()
+{
+ CPPUNIT_ASSERT(changeAbortsMessage<api::DeleteBucketCommand>(
+ getBucketToAbort()));
+ CPPUNIT_ASSERT(!changeAbortsMessage<api::DeleteBucketCommand>(
+ getBucketToAllow()));
+}
+
+void
+ChangedBucketOwnershipHandlerTest::testAbortOutdatedMergeBucket()
+{
+ std::vector<api::MergeBucketCommand::Node> nodes;
+ CPPUNIT_ASSERT(changeAbortsMessage<api::MergeBucketCommand>(
+ getBucketToAbort(), nodes, 0));
+ CPPUNIT_ASSERT(!changeAbortsMessage<api::MergeBucketCommand>(
+ getBucketToAllow(), nodes, 0));
+}
+
+/**
+ * RemoveLocation is technically an external load class, but since it's also
+ * used as the backing operation for GC we have to treat it as if it were an
+ * ideal state operation class.
+ */
+void
+ChangedBucketOwnershipHandlerTest::testAbortOutdatedRemoveLocation()
+{
+ std::vector<api::MergeBucketCommand::Node> nodes;
+ CPPUNIT_ASSERT(changeAbortsMessage<api::RemoveLocationCommand>(
+ "foo", getBucketToAbort()));
+ CPPUNIT_ASSERT(!changeAbortsMessage<api::RemoveLocationCommand>(
+ "foo", getBucketToAllow()));
+}
+
+void
+ChangedBucketOwnershipHandlerTest::testIdealStateAbortsAreConfigurable()
+{
+ std::unique_ptr<vespa::config::content::PersistenceConfigBuilder> config(
+ new vespa::config::content::PersistenceConfigBuilder);
+ config->abortOutdatedMutatingIdealStateOps = false;
+ _handler->configure(std::move(config));
+ // Should not abort operation, even when ownership has changed.
+ CPPUNIT_ASSERT(!changeAbortsMessage<api::CreateBucketCommand>(
+ getBucketToAbort()));
+}
+
+void
+ChangedBucketOwnershipHandlerTest::testAbortOutdatedPutOperation()
+{
+ document::Document::SP doc(_testDocRepo.createRandomDocumentAtLocation(1));
+ CPPUNIT_ASSERT(changeAbortsMessage<api::PutCommand>(
+ getBucketToAbort(), doc, api::Timestamp(1234)));
+ CPPUNIT_ASSERT(!changeAbortsMessage<api::PutCommand>(
+ getBucketToAllow(), doc, api::Timestamp(1234)));
+}
+
+void
+ChangedBucketOwnershipHandlerTest::testAbortOutdatedMultiOperation()
+{
+ CPPUNIT_ASSERT(changeAbortsMessage<api::MultiOperationCommand>(
+ _testDocRepo.getTypeRepoSP(), getBucketToAbort(), 1024));
+ CPPUNIT_ASSERT(!changeAbortsMessage<api::MultiOperationCommand>(
+ _testDocRepo.getTypeRepoSP(), getBucketToAllow(), 1024));
+}
+
+void
+ChangedBucketOwnershipHandlerTest::testAbortOutdatedUpdateCommand()
+{
+ const document::DocumentType* docType(_testDocRepo.getTypeRepo()
+ .getDocumentType("testdoctype1"));
+ document::DocumentId docId("id:foo:testdoctype1::bar");
+ document::DocumentUpdate::SP update(
+ std::make_shared<document::DocumentUpdate>(*docType, docId));
+ CPPUNIT_ASSERT(changeAbortsMessage<api::UpdateCommand>(
+ getBucketToAbort(), update, api::Timestamp(1234)));
+ CPPUNIT_ASSERT(!changeAbortsMessage<api::UpdateCommand>(
+ getBucketToAllow(), update, api::Timestamp(1234)));
+}
+
+void
+ChangedBucketOwnershipHandlerTest::testAbortOutdatedRemoveCommand()
+{
+ document::DocumentId docId("id:foo:testdoctype1::bar");
+ CPPUNIT_ASSERT(changeAbortsMessage<api::RemoveCommand>(
+ getBucketToAbort(), docId, api::Timestamp(1234)));
+ CPPUNIT_ASSERT(!changeAbortsMessage<api::RemoveCommand>(
+ getBucketToAllow(), docId, api::Timestamp(1234)));
+}
+
+void
+ChangedBucketOwnershipHandlerTest::testAbortOutdatedRevertCommand()
+{
+ std::vector<api::Timestamp> timestamps;
+ CPPUNIT_ASSERT(changeAbortsMessage<api::RevertCommand>(
+ getBucketToAbort(), timestamps));
+ CPPUNIT_ASSERT(!changeAbortsMessage<api::RevertCommand>(
+ getBucketToAllow(), timestamps));
+}
+
+void
+ChangedBucketOwnershipHandlerTest::testIdealStateAbortUpdatesMetric()
+{
+ CPPUNIT_ASSERT(changeAbortsMessage<api::SplitBucketCommand>(
+ getBucketToAbort()));
+ CPPUNIT_ASSERT_EQUAL(
+ uint64_t(1),
+ _handler->getMetrics().idealStateOpsAborted.getValue());
+ CPPUNIT_ASSERT_EQUAL(
+ uint64_t(0),
+ _handler->getMetrics().externalLoadOpsAborted.getValue());
+}
+
+void
+ChangedBucketOwnershipHandlerTest::testExternalLoadOpAbortUpdatesMetric()
+{
+ document::DocumentId docId("id:foo:testdoctype1::bar");
+ CPPUNIT_ASSERT(changeAbortsMessage<api::RemoveCommand>(
+ getBucketToAbort(), docId, api::Timestamp(1234)));
+ CPPUNIT_ASSERT_EQUAL(
+ uint64_t(0),
+ _handler->getMetrics().idealStateOpsAborted.getValue());
+ CPPUNIT_ASSERT_EQUAL(
+ uint64_t(1),
+ _handler->getMetrics().externalLoadOpsAborted.getValue());
+}
+
+void
+ChangedBucketOwnershipHandlerTest::testExternalLoadOpAbortsAreConfigurable()
+{
+ std::unique_ptr<vespa::config::content::PersistenceConfigBuilder> config(
+ new vespa::config::content::PersistenceConfigBuilder);
+ config->abortOutdatedMutatingExternalLoadOps = false;
+ _handler->configure(std::move(config));
+ // Should not abort operation, even when ownership has changed.
+ document::DocumentId docId("id:foo:testdoctype1::bar");
+ CPPUNIT_ASSERT(!changeAbortsMessage<api::RemoveCommand>(
+ getBucketToAbort(), docId, api::Timestamp(1234)));
+}
+
+} // storage
diff --git a/storage/src/tests/storageserver/communicationmanagertest.cpp b/storage/src/tests/storageserver/communicationmanagertest.cpp
new file mode 100644
index 00000000000..fe062a9ee30
--- /dev/null
+++ b/storage/src/tests/storageserver/communicationmanagertest.cpp
@@ -0,0 +1,235 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/storage/storageserver/communicationmanager.h>
+
+#include <vespa/messagebus/testlib/slobrok.h>
+#include <vespa/storageapi/message/persistence.h>
+#include <vespa/storage/frameworkimpl/component/storagecomponentregisterimpl.h>
+#include <vespa/storageframework/defaultimplementation/memory/nomemorymanager.h>
+#include <tests/common/teststorageapp.h>
+#include <tests/common/dummystoragelink.h>
+#include <tests/common/testhelper.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+
+namespace storage {
+
+struct CommunicationManagerTest : public CppUnit::TestFixture {
+ void testSimple();
+ void testDistPendingLimitConfigsArePropagatedToMessageBus();
+ void testStorPendingLimitConfigsArePropagatedToMessageBus();
+ void testCommandsAreDequeuedInPriorityOrder();
+ void testRepliesAreDequeuedInFifoOrder();
+
+ static constexpr uint32_t MESSAGE_WAIT_TIME_SEC = 60;
+
+ void doTestConfigPropagation(bool isContentNode);
+
+ std::shared_ptr<api::StorageCommand> createDummyCommand(
+ api::StorageMessage::Priority priority)
+ {
+ auto cmd = std::make_shared<api::GetCommand>(
+ document::BucketId(0),
+ document::DocumentId("doc::mydoc"),
+ "[all]");
+ cmd->setAddress(api::StorageMessageAddress(
+ "storage", lib::NodeType::STORAGE, 1));
+ cmd->setPriority(priority);
+ return cmd;
+ }
+
+ CPPUNIT_TEST_SUITE(CommunicationManagerTest);
+ CPPUNIT_TEST(testSimple);
+ CPPUNIT_TEST(testDistPendingLimitConfigsArePropagatedToMessageBus);
+ CPPUNIT_TEST(testStorPendingLimitConfigsArePropagatedToMessageBus);
+ CPPUNIT_TEST(testCommandsAreDequeuedInPriorityOrder);
+ CPPUNIT_TEST(testRepliesAreDequeuedInFifoOrder);
+ CPPUNIT_TEST_SUITE_END();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(CommunicationManagerTest);
+
+void CommunicationManagerTest::testSimple()
+{
+ mbus::Slobrok slobrok;
+ vdstestlib::DirConfig distConfig(getStandardConfig(false));
+ vdstestlib::DirConfig storConfig(getStandardConfig(true));
+ distConfig.getConfig("stor-server").set("node_index", "1");
+ storConfig.getConfig("stor-server").set("node_index", "1");
+ addSlobrokConfig(distConfig, slobrok);
+ addSlobrokConfig(storConfig, slobrok);
+
+ // Set up a "distributor" and a "storage" node with communication
+ // managers and a dummy storage link below we can use for testing.
+ TestServiceLayerApp storNode(storConfig.getConfigId());
+ TestDistributorApp distNode(distConfig.getConfigId());
+
+ CommunicationManager distributor(distNode.getComponentRegister(),
+ distConfig.getConfigId());
+ CommunicationManager storage(storNode.getComponentRegister(),
+ storConfig.getConfigId());
+ DummyStorageLink *distributorLink = new DummyStorageLink();
+ DummyStorageLink *storageLink = new DummyStorageLink();
+ distributor.push_back(std::unique_ptr<StorageLink>(distributorLink));
+ storage.push_back(std::unique_ptr<StorageLink>(storageLink));
+ distributor.open();
+ storage.open();
+
+ FastOS_Thread::Sleep(1000);
+
+ // Send a message through from distributor to storage
+ std::shared_ptr<api::StorageCommand> cmd(
+ new api::GetCommand(
+ document::BucketId(0), document::DocumentId("doc::mydoc"), "[all]"));
+ cmd->setAddress(api::StorageMessageAddress(
+ "storage", lib::NodeType::STORAGE, 1));
+ distributorLink->sendUp(cmd);
+ storageLink->waitForMessages(1, MESSAGE_WAIT_TIME_SEC);
+ CPPUNIT_ASSERT(storageLink->getNumCommands() > 0);
+ std::shared_ptr<api::StorageCommand> cmd2(
+ std::dynamic_pointer_cast<api::StorageCommand>(
+ storageLink->getCommand(0)));
+ CPPUNIT_ASSERT_EQUAL(
+ vespalib::string("doc::mydoc"),
+ static_cast<api::GetCommand&>(*cmd2).getDocumentId().toString());
+ // Reply to the message
+ std::shared_ptr<api::StorageReply> reply(cmd2->makeReply().release());
+ storageLink->sendUp(reply);
+ storageLink->sendUp(reply);
+ distributorLink->waitForMessages(1, MESSAGE_WAIT_TIME_SEC);
+ CPPUNIT_ASSERT(distributorLink->getNumCommands() > 0);
+ std::shared_ptr<api::GetReply> reply2(
+ std::dynamic_pointer_cast<api::GetReply>(
+ distributorLink->getCommand(0)));
+ CPPUNIT_ASSERT_EQUAL(false, reply2->wasFound());
+}
+
+void
+CommunicationManagerTest::doTestConfigPropagation(bool isContentNode)
+{
+ mbus::Slobrok slobrok;
+ vdstestlib::DirConfig config(getStandardConfig(isContentNode));
+ config.getConfig("stor-server").set("node_index", "1");
+ auto& cfg = config.getConfig("stor-communicationmanager");
+ cfg.set("mbus_content_node_max_pending_count", "12345");
+ cfg.set("mbus_content_node_max_pending_size", "555666");
+ cfg.set("mbus_distributor_node_max_pending_count", "6789");
+ cfg.set("mbus_distributor_node_max_pending_size", "777888");
+ addSlobrokConfig(config, slobrok);
+
+ std::unique_ptr<TestStorageApp> node;
+ if (isContentNode) {
+ node = std::make_unique<TestServiceLayerApp>(config.getConfigId());
+ } else {
+ node = std::make_unique<TestDistributorApp>(config.getConfigId());
+ }
+
+ CommunicationManager commMgr(node->getComponentRegister(),
+ config.getConfigId());
+ DummyStorageLink *storageLink = new DummyStorageLink();
+ commMgr.push_back(std::unique_ptr<StorageLink>(storageLink));
+ commMgr.open();
+
+ // Outer type is RPCMessageBus, which wraps regular MessageBus.
+ auto& mbus = commMgr.getMessageBus().getMessageBus();
+ if (isContentNode) {
+ CPPUNIT_ASSERT_EQUAL(uint32_t(12345), mbus.getMaxPendingCount());
+ CPPUNIT_ASSERT_EQUAL(uint32_t(555666), mbus.getMaxPendingSize());
+ } else {
+ CPPUNIT_ASSERT_EQUAL(uint32_t(6789), mbus.getMaxPendingCount());
+ CPPUNIT_ASSERT_EQUAL(uint32_t(777888), mbus.getMaxPendingSize());
+ }
+
+ // Test live reconfig of limits.
+ using ConfigBuilder
+ = vespa::config::content::core::StorCommunicationmanagerConfigBuilder;
+ auto liveCfg = std::make_unique<ConfigBuilder>();
+ liveCfg->mbusContentNodeMaxPendingCount = 777777;
+ liveCfg->mbusDistributorNodeMaxPendingCount = 999999;
+
+ commMgr.configure(std::move(liveCfg));
+ if (isContentNode) {
+ CPPUNIT_ASSERT_EQUAL(uint32_t(777777), mbus.getMaxPendingCount());
+ } else {
+ CPPUNIT_ASSERT_EQUAL(uint32_t(999999), mbus.getMaxPendingCount());
+ }
+}
+
+void
+CommunicationManagerTest::testDistPendingLimitConfigsArePropagatedToMessageBus()
+{
+ doTestConfigPropagation(false);
+}
+
+void
+CommunicationManagerTest::testStorPendingLimitConfigsArePropagatedToMessageBus()
+{
+ doTestConfigPropagation(true);
+}
+
+void
+CommunicationManagerTest::testCommandsAreDequeuedInPriorityOrder()
+{
+ mbus::Slobrok slobrok;
+ vdstestlib::DirConfig storConfig(getStandardConfig(true));
+ storConfig.getConfig("stor-server").set("node_index", "1");
+ addSlobrokConfig(storConfig, slobrok);
+ TestServiceLayerApp storNode(storConfig.getConfigId());
+
+ CommunicationManager storage(storNode.getComponentRegister(),
+ storConfig.getConfigId());
+ DummyStorageLink *storageLink = new DummyStorageLink();
+ storage.push_back(std::unique_ptr<StorageLink>(storageLink));
+
+ // Message dequeing does not start before we invoke `open` on the storage
+ // link chain, so we enqueue messages in randomized priority order before
+ // doing so. After starting the thread, we should then get messages down
+ // the chain in a deterministic, prioritized order.
+ // Lower number == higher priority.
+ std::vector<api::StorageMessage::Priority> pris{200, 0, 255, 128};
+ for (auto pri : pris) {
+ storage.enqueue(createDummyCommand(pri));
+ }
+ storage.open();
+ storageLink->waitForMessages(pris.size(), MESSAGE_WAIT_TIME_SEC);
+
+ std::sort(pris.begin(), pris.end());
+ for (size_t i = 0; i < pris.size(); ++i) {
+ // Casting is just to avoid getting mismatched values printed to the
+ // output verbatim as chars.
+ CPPUNIT_ASSERT_EQUAL(
+ uint32_t(pris[i]),
+ uint32_t(storageLink->getCommand(i)->getPriority()));
+ }
+}
+
+void
+CommunicationManagerTest::testRepliesAreDequeuedInFifoOrder()
+{
+ mbus::Slobrok slobrok;
+ vdstestlib::DirConfig storConfig(getStandardConfig(true));
+ storConfig.getConfig("stor-server").set("node_index", "1");
+ addSlobrokConfig(storConfig, slobrok);
+ TestServiceLayerApp storNode(storConfig.getConfigId());
+
+ CommunicationManager storage(storNode.getComponentRegister(),
+ storConfig.getConfigId());
+ DummyStorageLink *storageLink = new DummyStorageLink();
+ storage.push_back(std::unique_ptr<StorageLink>(storageLink));
+
+ std::vector<api::StorageMessage::Priority> pris{200, 0, 255, 128};
+ for (auto pri : pris) {
+ storage.enqueue(createDummyCommand(pri)->makeReply());
+ }
+ storage.open();
+ storageLink->waitForMessages(pris.size(), MESSAGE_WAIT_TIME_SEC);
+
+ // Want FIFO order for replies, not priority-sorted order.
+ for (size_t i = 0; i < pris.size(); ++i) {
+ CPPUNIT_ASSERT_EQUAL(
+ uint32_t(pris[i]),
+ uint32_t(storageLink->getCommand(i)->getPriority()));
+ }
+}
+
+} // storage
diff --git a/storage/src/tests/storageserver/documentapiconvertertest.cpp b/storage/src/tests/storageserver/documentapiconvertertest.cpp
new file mode 100644
index 00000000000..69083352c4a
--- /dev/null
+++ b/storage/src/tests/storageserver/documentapiconvertertest.cpp
@@ -0,0 +1,529 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <cppunit/extensions/HelperMacros.h>
+#include <vespa/document/config/config-documenttypes.h>
+#include <vespa/document/datatype/datatype.h>
+#include <vespa/document/fieldvalue/document.h>
+#include <vespa/document/repo/documenttyperepo.h>
+#include <vespa/documentapi/documentapi.h>
+#include <vespa/messagebus/emptyreply.h>
+#include <vespa/storage/storageserver/documentapiconverter.h>
+#include <vespa/storageapi/message/batch.h>
+#include <vespa/storageapi/message/datagram.h>
+#include <vespa/storageapi/message/multioperation.h>
+#include <vespa/storageapi/message/persistence.h>
+#include <vespa/storageapi/message/visitor.h>
+#include <vespa/vdslib/container/writabledocumentlist.h>
+
+using document::DataType;
+using document::DocIdString;
+using document::Document;
+using document::DocumentId;
+using document::DocumentTypeRepo;
+using document::readDocumenttypesConfig;
+
+namespace storage {
+
+struct DocumentApiConverterTest : public CppUnit::TestFixture
+{
+ std::unique_ptr<DocumentApiConverter> _converter;
+ const DocumentTypeRepo::SP _repo;
+ const DataType& _html_type;
+
+ DocumentApiConverterTest()
+ : _repo(new DocumentTypeRepo(readDocumenttypesConfig(
+ "config-doctypes.cfg"))),
+ _html_type(*_repo->getDocumentType("text/html"))
+ {
+ }
+
+ void setUp() {
+ _converter.reset(new DocumentApiConverter("raw:"));
+ };
+
+ void testPut();
+ void testForwardedPut();
+ void testRemove();
+ void testGet();
+ void testCreateVisitor();
+ void testCreateVisitorHighTimeout();
+ void testCreateVisitorReplyNotReady();
+ void testCreateVisitorReplyLastBucket();
+ void testDestroyVisitor();
+ void testVisitorInfo();
+ void testDocBlock();
+ void testDocBlockWithKeepTimeStamps();
+ void testMultiOperation();
+ void testBatchDocumentUpdate();
+
+ CPPUNIT_TEST_SUITE(DocumentApiConverterTest);
+ CPPUNIT_TEST(testPut);
+ CPPUNIT_TEST(testForwardedPut);
+ CPPUNIT_TEST(testRemove);
+ CPPUNIT_TEST(testGet);
+ CPPUNIT_TEST(testCreateVisitor);
+ CPPUNIT_TEST(testCreateVisitorHighTimeout);
+ CPPUNIT_TEST(testCreateVisitorReplyNotReady);
+ CPPUNIT_TEST(testCreateVisitorReplyLastBucket);
+ CPPUNIT_TEST(testDestroyVisitor);
+ CPPUNIT_TEST(testVisitorInfo);
+ CPPUNIT_TEST(testDocBlock);
+ CPPUNIT_TEST(testDocBlockWithKeepTimeStamps);
+ CPPUNIT_TEST(testMultiOperation);
+ CPPUNIT_TEST(testBatchDocumentUpdate);
+ CPPUNIT_TEST_SUITE_END();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(DocumentApiConverterTest);
+
+void DocumentApiConverterTest::testPut()
+{
+ Document::SP
+ doc(new Document(_html_type, DocumentId(DocIdString("test", "test"))));
+
+ documentapi::PutDocumentMessage putmsg(doc);
+ putmsg.setTimestamp(1234);
+
+ std::unique_ptr<storage::api::StorageCommand> cmd =
+ _converter->toStorageAPI(putmsg, _repo);
+
+ api::PutCommand* pc = dynamic_cast<api::PutCommand*>(cmd.get());
+
+ CPPUNIT_ASSERT(pc);
+ CPPUNIT_ASSERT(pc->getDocument().get() == doc.get());
+
+ std::unique_ptr<mbus::Reply> reply = putmsg.createReply();
+ CPPUNIT_ASSERT(reply.get());
+
+ std::unique_ptr<storage::api::StorageReply> rep = _converter->toStorageAPI(
+ static_cast<documentapi::DocumentReply&>(*reply), *cmd);
+ api::PutReply* pr = dynamic_cast<api::PutReply*>(rep.get());
+ CPPUNIT_ASSERT(pr);
+
+ std::unique_ptr<mbus::Message> mbusmsg =
+ _converter->toDocumentAPI(*pc, _repo);
+
+ documentapi::PutDocumentMessage* mbusput = dynamic_cast<documentapi::PutDocumentMessage*>(mbusmsg.get());
+ CPPUNIT_ASSERT(mbusput);
+ CPPUNIT_ASSERT(mbusput->getDocument().get() == doc.get());
+ CPPUNIT_ASSERT(mbusput->getTimestamp() == 1234);
+};
+
+void DocumentApiConverterTest::testForwardedPut()
+{
+ Document::SP
+ doc(new Document(_html_type, DocumentId(DocIdString("test", "test"))));
+
+ documentapi::PutDocumentMessage* putmsg = new documentapi::PutDocumentMessage(doc);
+ std::unique_ptr<mbus::Reply> reply(((documentapi::DocumentMessage*)putmsg)->createReply());
+ reply->setMessage(std::unique_ptr<mbus::Message>(putmsg));
+
+ std::unique_ptr<storage::api::StorageCommand> cmd =
+ _converter->toStorageAPI(*putmsg, _repo);
+ ((storage::api::PutCommand*)cmd.get())->setTimestamp(1234);
+
+ std::unique_ptr<storage::api::StorageReply> rep = cmd->makeReply();
+ api::PutReply* pr = dynamic_cast<api::PutReply*>(rep.get());
+ CPPUNIT_ASSERT(pr);
+
+ _converter->transferReplyState(*pr, *reply);
+}
+
+void DocumentApiConverterTest::testRemove()
+{
+ documentapi::RemoveDocumentMessage removemsg(document::DocumentId(document::DocIdString("test", "test")));
+ std::unique_ptr<storage::api::StorageCommand> cmd =
+ _converter->toStorageAPI(removemsg, _repo);
+
+ api::RemoveCommand* rc = dynamic_cast<api::RemoveCommand*>(cmd.get());
+
+ CPPUNIT_ASSERT(rc);
+ CPPUNIT_ASSERT_EQUAL(document::DocumentId(document::DocIdString("test", "test")), rc->getDocumentId());
+
+ std::unique_ptr<mbus::Reply> reply = removemsg.createReply();
+ CPPUNIT_ASSERT(reply.get());
+
+ std::unique_ptr<storage::api::StorageReply> rep = _converter->toStorageAPI(
+ static_cast<documentapi::DocumentReply&>(*reply), *cmd);
+ api::RemoveReply* pr = dynamic_cast<api::RemoveReply*>(rep.get());
+ CPPUNIT_ASSERT(pr);
+
+ std::unique_ptr<mbus::Message> mbusmsg =
+ _converter->toDocumentAPI(*rc, _repo);
+
+ documentapi::RemoveDocumentMessage* mbusremove = dynamic_cast<documentapi::RemoveDocumentMessage*>(mbusmsg.get());
+ CPPUNIT_ASSERT(mbusremove);
+ CPPUNIT_ASSERT_EQUAL(document::DocumentId(document::DocIdString("test", "test")), mbusremove->getDocumentId());
+};
+
+void DocumentApiConverterTest::testGet()
+{
+ documentapi::GetDocumentMessage getmsg(
+ document::DocumentId(document::DocIdString("test", "test")),
+ "foo bar");
+
+ std::unique_ptr<storage::api::StorageCommand> cmd =
+ _converter->toStorageAPI(getmsg, _repo);
+
+ api::GetCommand* rc = dynamic_cast<api::GetCommand*>(cmd.get());
+
+ CPPUNIT_ASSERT(rc);
+ CPPUNIT_ASSERT_EQUAL(document::DocumentId(document::DocIdString("test", "test")), rc->getDocumentId());
+ CPPUNIT_ASSERT_EQUAL(vespalib::string("foo bar"), rc->getFieldSet());
+};
+
+void DocumentApiConverterTest::testCreateVisitor()
+{
+ documentapi::CreateVisitorMessage cv(
+ "mylib",
+ "myinstance",
+ "control-dest",
+ "data-dest");
+
+ cv.setTimeRemaining(123456);
+
+ std::unique_ptr<storage::api::StorageCommand> cmd =
+ _converter->toStorageAPI(cv, _repo);
+
+ api::CreateVisitorCommand* pc = dynamic_cast<api::CreateVisitorCommand*>(cmd.get());
+
+ CPPUNIT_ASSERT(pc);
+ CPPUNIT_ASSERT_EQUAL(vespalib::string("mylib"), pc->getLibraryName());
+ CPPUNIT_ASSERT_EQUAL(vespalib::string("myinstance"), pc->getInstanceId());
+ CPPUNIT_ASSERT_EQUAL(vespalib::string("control-dest"), pc->getControlDestination());
+ CPPUNIT_ASSERT_EQUAL(vespalib::string("data-dest"), pc->getDataDestination());
+ CPPUNIT_ASSERT_EQUAL(123456u, pc->getTimeout());
+}
+
+void DocumentApiConverterTest::testCreateVisitorHighTimeout()
+{
+ documentapi::CreateVisitorMessage cv(
+ "mylib",
+ "myinstance",
+ "control-dest",
+ "data-dest");
+
+ cv.setTimeRemaining((uint64_t)std::numeric_limits<uint32_t>::max() + 1); // Will be INT_MAX
+
+ std::unique_ptr<storage::api::StorageCommand> cmd =
+ _converter->toStorageAPI(cv, _repo);
+
+ api::CreateVisitorCommand* pc = dynamic_cast<api::CreateVisitorCommand*>(cmd.get());
+
+ CPPUNIT_ASSERT(pc);
+ CPPUNIT_ASSERT_EQUAL(vespalib::string("mylib"), pc->getLibraryName());
+ CPPUNIT_ASSERT_EQUAL(vespalib::string("myinstance"), pc->getInstanceId());
+ CPPUNIT_ASSERT_EQUAL(vespalib::string("control-dest"), pc->getControlDestination());
+ CPPUNIT_ASSERT_EQUAL(vespalib::string("data-dest"), pc->getDataDestination());
+ CPPUNIT_ASSERT_EQUAL((uint32_t) std::numeric_limits<int32_t>::max(),
+ pc->getTimeout());
+}
+
+void DocumentApiConverterTest::testCreateVisitorReplyNotReady()
+{
+ documentapi::CreateVisitorMessage cv(
+ "mylib",
+ "myinstance",
+ "control-dest",
+ "data-dest");
+
+ std::unique_ptr<storage::api::StorageCommand> cmd =
+ _converter->toStorageAPI(cv, _repo);
+ CPPUNIT_ASSERT(cmd.get());
+ api::CreateVisitorCommand& cvc = dynamic_cast<api::CreateVisitorCommand&>(*cmd);
+
+ api::CreateVisitorReply cvr(cvc);
+ cvr.setResult(api::ReturnCode(api::ReturnCode::NOT_READY, "not ready"));
+
+ std::unique_ptr<documentapi::CreateVisitorReply> reply(
+ dynamic_cast<documentapi::CreateVisitorReply*>(
+ cv.createReply().release()));
+ CPPUNIT_ASSERT(reply.get());
+
+ _converter->transferReplyState(cvr, *reply);
+
+ CPPUNIT_ASSERT_EQUAL((uint32_t)documentapi::DocumentProtocol::ERROR_NODE_NOT_READY, reply->getError(0).getCode());
+
+ CPPUNIT_ASSERT_EQUAL(document::BucketId(INT_MAX), reply->getLastBucket());
+}
+
+
+void DocumentApiConverterTest::testCreateVisitorReplyLastBucket()
+{
+ documentapi::CreateVisitorMessage cv(
+ "mylib",
+ "myinstance",
+ "control-dest",
+ "data-dest");
+
+ std::unique_ptr<storage::api::StorageCommand> cmd =
+ _converter->toStorageAPI(cv, _repo);
+ CPPUNIT_ASSERT(cmd.get());
+ api::CreateVisitorCommand& cvc = dynamic_cast<api::CreateVisitorCommand&>(*cmd);
+
+
+ api::CreateVisitorReply cvr(cvc);
+ cvr.setLastBucket(document::BucketId(123));
+
+
+ std::unique_ptr<documentapi::CreateVisitorReply> reply(
+ dynamic_cast<documentapi::CreateVisitorReply*>(
+ cv.createReply().release()));
+
+ CPPUNIT_ASSERT(reply.get());
+
+ _converter->transferReplyState(cvr, *reply);
+
+ CPPUNIT_ASSERT_EQUAL(document::BucketId(123), reply->getLastBucket());
+}
+
+
+void DocumentApiConverterTest::testDestroyVisitor()
+{
+ documentapi::DestroyVisitorMessage cv("myinstance");
+
+ std::unique_ptr<storage::api::StorageCommand> cmd =
+ _converter->toStorageAPI(cv, _repo);
+
+ api::DestroyVisitorCommand* pc = dynamic_cast<api::DestroyVisitorCommand*>(cmd.get());
+
+ CPPUNIT_ASSERT(pc);
+ CPPUNIT_ASSERT_EQUAL(vespalib::string("myinstance"), pc->getInstanceId());
+}
+
+void
+DocumentApiConverterTest::testVisitorInfo()
+{
+ api::VisitorInfoCommand vicmd;
+ std::vector<api::VisitorInfoCommand::BucketTimestampPair> bucketsCompleted;
+ bucketsCompleted.push_back(api::VisitorInfoCommand::BucketTimestampPair(document::BucketId(16, 1), 0));
+ bucketsCompleted.push_back(api::VisitorInfoCommand::BucketTimestampPair(document::BucketId(16, 2), 0));
+ bucketsCompleted.push_back(api::VisitorInfoCommand::BucketTimestampPair(document::BucketId(16, 4), 0));
+
+ vicmd.setBucketsCompleted(bucketsCompleted);
+
+ std::unique_ptr<mbus::Message> mbusmsg =
+ _converter->toDocumentAPI(vicmd, _repo);
+
+ documentapi::VisitorInfoMessage* mbusvi = dynamic_cast<documentapi::VisitorInfoMessage*>(mbusmsg.get());
+ CPPUNIT_ASSERT(mbusvi);
+ CPPUNIT_ASSERT_EQUAL(document::BucketId(16, 1), mbusvi->getFinishedBuckets()[0]);
+ CPPUNIT_ASSERT_EQUAL(document::BucketId(16, 2), mbusvi->getFinishedBuckets()[1]);
+ CPPUNIT_ASSERT_EQUAL(document::BucketId(16, 4), mbusvi->getFinishedBuckets()[2]);
+
+ std::unique_ptr<mbus::Reply> reply = mbusvi->createReply();
+ CPPUNIT_ASSERT(reply.get());
+
+ std::unique_ptr<storage::api::StorageReply> rep = _converter->toStorageAPI(
+ static_cast<documentapi::DocumentReply&>(*reply), vicmd);
+ api::VisitorInfoReply* pr = dynamic_cast<api::VisitorInfoReply*>(rep.get());
+ CPPUNIT_ASSERT(pr);
+}
+
+void
+DocumentApiConverterTest::testDocBlock()
+{
+ Document::SP
+ doc(new Document(_html_type, DocumentId(DocIdString("test", "test"))));
+
+ char buffer[10000];
+ vdslib::WritableDocumentList docBlock(_repo, buffer, sizeof(buffer));
+ docBlock.addPut(*doc, 100);
+
+ document::BucketIdFactory fac;
+ document::BucketId bucketId = fac.getBucketId(doc->getId());
+ bucketId.setUsedBits(32);
+
+ api::DocBlockCommand dbcmd(bucketId, docBlock, std::shared_ptr<void>());
+
+ dbcmd.setTimeout(123456);
+
+ std::unique_ptr<mbus::Message> mbusmsg =
+ _converter->toDocumentAPI(dbcmd, _repo);
+
+ documentapi::MultiOperationMessage* mbusdb = dynamic_cast<documentapi::MultiOperationMessage*>(mbusmsg.get());
+ CPPUNIT_ASSERT(mbusdb);
+
+ CPPUNIT_ASSERT_EQUAL((uint64_t)123456, mbusdb->getTimeRemaining());
+
+ const vdslib::DocumentList& list = mbusdb->getOperations();
+ CPPUNIT_ASSERT_EQUAL((uint32_t)1, list.size());
+ CPPUNIT_ASSERT_EQUAL(*doc, *dynamic_cast<document::Document*>(list.begin()->getDocument().get()));
+
+ std::unique_ptr<mbus::Reply> reply = mbusdb->createReply();
+ CPPUNIT_ASSERT(reply.get());
+
+ std::unique_ptr<storage::api::StorageReply> rep =
+ _converter->toStorageAPI(static_cast<documentapi::DocumentReply&>(*reply), dbcmd);
+ api::DocBlockReply* pr = dynamic_cast<api::DocBlockReply*>(rep.get());
+ CPPUNIT_ASSERT(pr);
+}
+
+
+void
+DocumentApiConverterTest::testDocBlockWithKeepTimeStamps()
+{
+ char buffer[10000];
+ vdslib::WritableDocumentList docBlock(_repo, buffer, sizeof(buffer));
+ api::DocBlockCommand dbcmd(document::BucketId(0), docBlock, std::shared_ptr<void>());
+
+ {
+ CPPUNIT_ASSERT_EQUAL(dbcmd.keepTimeStamps(), false);
+
+ std::unique_ptr<mbus::Message> mbusmsg =
+ _converter->toDocumentAPI(dbcmd, _repo);
+
+ documentapi::MultiOperationMessage* mbusdb = dynamic_cast<documentapi::MultiOperationMessage*>(mbusmsg.get());
+ CPPUNIT_ASSERT(mbusdb);
+
+ CPPUNIT_ASSERT_EQUAL(mbusdb->keepTimeStamps(), false);
+ }
+
+ {
+ dbcmd.keepTimeStamps(true);
+ CPPUNIT_ASSERT_EQUAL(dbcmd.keepTimeStamps(), true);
+
+ std::unique_ptr<mbus::Message> mbusmsg =
+ _converter->toDocumentAPI(dbcmd, _repo);
+
+ documentapi::MultiOperationMessage* mbusdb = dynamic_cast<documentapi::MultiOperationMessage*>(mbusmsg.get());
+ CPPUNIT_ASSERT(mbusdb);
+
+ CPPUNIT_ASSERT_EQUAL(mbusdb->keepTimeStamps(), true);
+ }
+
+}
+
+
+void
+DocumentApiConverterTest::testMultiOperation()
+{
+ //create a document
+ Document::SP
+ doc(new Document(_html_type, DocumentId(DocIdString("test", "test"))));
+
+ document::BucketIdFactory fac;
+ document::BucketId bucketId = fac.getBucketId(doc->getId());
+ bucketId.setUsedBits(32);
+
+ {
+ documentapi::MultiOperationMessage momsg(_repo, bucketId, 10000);
+
+ vdslib::WritableDocumentList operations(_repo, &(momsg.getBuffer()[0]),
+ momsg.getBuffer().size());
+ operations.addPut(*doc, 100);
+
+ momsg.setOperations(operations);
+
+ CPPUNIT_ASSERT(momsg.getBuffer().size() > 0);
+
+ // Convert it to Storage API
+ std::unique_ptr<api::StorageCommand> stcmd =
+ _converter->toStorageAPI(momsg, _repo);
+
+ api::MultiOperationCommand* mocmd = dynamic_cast<api::MultiOperationCommand*>(stcmd.get());
+ CPPUNIT_ASSERT(mocmd);
+ CPPUNIT_ASSERT(mocmd->getBuffer().size() > 0);
+
+ // Get operations from Storage API message and check document
+ const vdslib::DocumentList& list = mocmd->getOperations();
+ CPPUNIT_ASSERT_EQUAL((uint32_t)1, list.size());
+ CPPUNIT_ASSERT_EQUAL(*doc, *dynamic_cast<document::Document*>(list.begin()->getDocument().get()));
+
+ // Create Storage API Reply
+ std::unique_ptr<api::MultiOperationReply> moreply = std::unique_ptr<api::MultiOperationReply>(new api::MultiOperationReply(*mocmd));
+ CPPUNIT_ASSERT(moreply.get());
+
+ // convert storage api reply to mbus reply.....
+ // ...
+ }
+
+ {
+ api::MultiOperationCommand mocmd(_repo, bucketId, 10000, false);
+ mocmd.getOperations().addPut(*doc, 100);
+
+ // Convert it to documentapi
+ std::unique_ptr<mbus::Message> mbmsg =
+ _converter->toDocumentAPI(mocmd, _repo);
+ documentapi::MultiOperationMessage* momsg = dynamic_cast<documentapi::MultiOperationMessage*>(mbmsg.get());
+ CPPUNIT_ASSERT(momsg);
+
+ // Get operations from Document API msg and check document
+ const vdslib::DocumentList& list = momsg->getOperations();
+ CPPUNIT_ASSERT_EQUAL((uint32_t)1, list.size());
+ CPPUNIT_ASSERT_EQUAL(*doc, *dynamic_cast<document::Document*>(list.begin()->getDocument().get()));
+
+ // Create Document API reply
+ mbus::Reply::UP moreply = momsg->createReply();
+ CPPUNIT_ASSERT(moreply.get());
+
+ //Convert DocumentAPI reply to storageapi reply
+ std::unique_ptr<api::StorageReply> streply =
+ _converter->toStorageAPI(static_cast<documentapi::DocumentReply&>(*moreply), mocmd);
+ api::MultiOperationReply* mostreply = dynamic_cast<api::MultiOperationReply*>(streply.get());
+ CPPUNIT_ASSERT(mostreply);
+
+ }
+}
+
+void
+DocumentApiConverterTest::testBatchDocumentUpdate()
+{
+ std::vector<document::DocumentUpdate::SP > updates;
+
+ {
+ document::DocumentId docId(document::UserDocIdString("userdoc:test:1234:test1"));
+ document::DocumentUpdate::SP update(
+ new document::DocumentUpdate(_html_type, docId));
+ updates.push_back(update);
+ }
+
+ {
+ document::DocumentId docId(document::UserDocIdString("userdoc:test:1234:test2"));
+ document::DocumentUpdate::SP update(
+ new document::DocumentUpdate(_html_type, docId));
+ updates.push_back(update);
+ }
+
+ {
+ document::DocumentId docId(document::UserDocIdString("userdoc:test:1234:test3"));
+ document::DocumentUpdate::SP update(
+ new document::DocumentUpdate(_html_type, docId));
+ updates.push_back(update);
+ }
+
+ std::shared_ptr<documentapi::BatchDocumentUpdateMessage> msg(
+ new documentapi::BatchDocumentUpdateMessage(1234));
+ for (std::size_t i = 0; i < updates.size(); ++i) {
+ msg->addUpdate(updates[i]);
+ }
+
+ std::unique_ptr<storage::api::StorageCommand> cmd =
+ _converter->toStorageAPI(*msg, _repo);
+ api::BatchDocumentUpdateCommand* batchCmd = dynamic_cast<api::BatchDocumentUpdateCommand*>(cmd.get());
+ CPPUNIT_ASSERT(batchCmd);
+ CPPUNIT_ASSERT_EQUAL(updates.size(), batchCmd->getUpdates().size());
+ for (std::size_t i = 0; i < updates.size(); ++i) {
+ CPPUNIT_ASSERT_EQUAL(*updates[i], *batchCmd->getUpdates()[i]);
+ }
+
+ api::BatchDocumentUpdateReply batchReply(*batchCmd);
+ batchReply.getDocumentsNotFound().resize(3);
+ batchReply.getDocumentsNotFound()[0] = true;
+ batchReply.getDocumentsNotFound()[2] = true;
+
+ std::unique_ptr<mbus::Reply> mbusReply = msg->createReply();
+ documentapi::BatchDocumentUpdateReply* mbusBatchReply(
+ dynamic_cast<documentapi::BatchDocumentUpdateReply*>(mbusReply.get()));
+ CPPUNIT_ASSERT(mbusBatchReply != 0);
+
+ _converter->transferReplyState(batchReply, *mbusReply);
+
+ CPPUNIT_ASSERT_EQUAL(std::size_t(3), mbusBatchReply->getDocumentsNotFound().size());
+ CPPUNIT_ASSERT(mbusBatchReply->getDocumentsNotFound()[0] == true);
+ CPPUNIT_ASSERT(mbusBatchReply->getDocumentsNotFound()[1] == false);
+ CPPUNIT_ASSERT(mbusBatchReply->getDocumentsNotFound()[2] == true);
+}
+
+}
diff --git a/storage/src/tests/storageserver/dummystoragelink.cpp b/storage/src/tests/storageserver/dummystoragelink.cpp
new file mode 100644
index 00000000000..7194f1fba3d
--- /dev/null
+++ b/storage/src/tests/storageserver/dummystoragelink.cpp
@@ -0,0 +1,182 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/storageframework/defaultimplementation/clock/realclock.h>
+#include <tests/common/dummystoragelink.h>
+#include <sys/time.h>
+
+namespace storage {
+
+DummyStorageLink* DummyStorageLink::_last(0);
+
+DummyStorageLink::DummyStorageLink()
+ : StorageLink("Dummy storage link"),
+ _commands(),
+ _replies(),
+ _injected(),
+ _autoReply(false),
+ _useDispatch(false),
+ _ignore(false),
+ _waitMonitor()
+{
+ _last = this;
+}
+
+DummyStorageLink::~DummyStorageLink()
+{
+ // Often a chain with dummy link on top is deleted in unit tests.
+ // If they haven't been closed already, close them for a cleaner
+ // shutdown
+ if (getState() == OPENED) {
+ close();
+ flush();
+ }
+ closeNextLink();
+ reset();
+}
+
+bool DummyStorageLink::onDown(const api::StorageMessage::SP& cmd)
+{
+ if (_ignore) {
+ return false;
+ }
+ if (_injected.size() > 0) {
+ vespalib::LockGuard guard(_lock);
+ sendUp(*_injected.begin());
+ _injected.pop_front();
+ } else if (_autoReply) {
+ if (!cmd->getType().isReply()) {
+ std::shared_ptr<api::StorageReply> reply(
+ std::dynamic_pointer_cast<api::StorageCommand>(cmd)
+ ->makeReply().release());
+ reply->setResult(api::ReturnCode(
+ api::ReturnCode::OK, "Automatically generated reply"));
+ sendUp(reply);
+ }
+ }
+ if (isBottom()) {
+ vespalib::MonitorGuard lock(_waitMonitor);
+ {
+ vespalib::LockGuard guard(_lock);
+ _commands.push_back(cmd);
+ }
+ lock.broadcast();
+ return true;
+ }
+ return StorageLink::onDown(cmd);
+}
+
+bool DummyStorageLink::onUp(const api::StorageMessage::SP& reply) {
+ if (isTop()) {
+ vespalib::MonitorGuard lock(_waitMonitor);
+ {
+ vespalib::LockGuard guard(_lock);
+ _replies.push_back(reply);
+ }
+ lock.broadcast();
+ return true;
+ }
+ return StorageLink::onUp(reply);
+
+}
+
+void DummyStorageLink::injectReply(api::StorageReply* reply)
+{
+ assert(reply);
+ vespalib::LockGuard guard(_lock);
+ _injected.push_back(std::shared_ptr<api::StorageReply>(reply));
+}
+
+void DummyStorageLink::reset() {
+ vespalib::MonitorGuard lock(_waitMonitor);
+ vespalib::LockGuard guard(_lock);
+ _commands.clear();
+ _replies.clear();
+ _injected.clear();
+}
+
+void DummyStorageLink::waitForMessages(unsigned int msgCount, int timeout)
+{
+ framework::defaultimplementation::RealClock clock;
+ framework::MilliSecTime endTime(
+ clock.getTimeInMillis() + framework::MilliSecTime(timeout * 1000));
+ vespalib::MonitorGuard lock(_waitMonitor);
+ while (_commands.size() + _replies.size() < msgCount) {
+ if (timeout != 0 && clock.getTimeInMillis() > endTime) {
+ std::ostringstream ost;
+ ost << "Timed out waiting for " << msgCount << " messages to "
+ << "arrive in dummy storage link. Only "
+ << (_commands.size() + _replies.size()) << " messages seen "
+ << "after timout of " << timeout << " seconds was reached.";
+ throw vespalib::IllegalStateException(ost.str(), VESPA_STRLOC);
+ }
+ if (timeout >= 0) {
+ lock.wait((endTime - clock.getTimeInMillis()).getTime());
+ } else {
+ lock.wait();
+ }
+ }
+}
+
+void DummyStorageLink::waitForMessage(const api::MessageType& type, int timeout)
+{
+ framework::defaultimplementation::RealClock clock;
+ framework::MilliSecTime endTime(
+ clock.getTimeInMillis() + framework::MilliSecTime(timeout * 1000));
+ vespalib::MonitorGuard lock(_waitMonitor);
+ while (true) {
+ for (uint32_t i=0; i<_commands.size(); ++i) {
+ if (_commands[i]->getType() == type) return;
+ }
+ for (uint32_t i=0; i<_replies.size(); ++i) {
+ if (_replies[i]->getType() == type) return;
+ }
+ if (timeout != 0 && clock.getTimeInMillis() > endTime) {
+ std::ostringstream ost;
+ ost << "Timed out waiting for " << type << " message to "
+ << "arrive in dummy storage link. Only "
+ << (_commands.size() + _replies.size()) << " messages seen "
+ << "after timout of " << timeout << " seconds was reached.";
+ if (_commands.size() == 1) {
+ ost << " Found command of type " << _commands[0]->getType();
+ }
+ if (_replies.size() == 1) {
+ ost << " Found command of type " << _replies[0]->getType();
+ }
+ throw vespalib::IllegalStateException(ost.str(), VESPA_STRLOC);
+ }
+ if (timeout >= 0) {
+ lock.wait((endTime - clock.getTimeInMillis()).getTime());
+ } else {
+ lock.wait();
+ }
+ }
+}
+
+api::StorageMessage::SP
+DummyStorageLink::getAndRemoveMessage(const api::MessageType& type)
+{
+ vespalib::MonitorGuard lock(_waitMonitor);
+ for (std::vector<api::StorageMessage::SP>::iterator it = _commands.begin();
+ it != _commands.end(); ++it)
+ {
+ if ((*it)->getType() == type) {
+ api::StorageMessage::SP result(*it);
+ _commands.erase(it);
+ return result;
+ }
+ }
+ for (std::vector<api::StorageMessage::SP>::iterator it = _replies.begin();
+ it != _replies.end(); ++it)
+ {
+ if ((*it)->getType() == type) {
+ api::StorageMessage::SP result(*it);
+ _replies.erase(it);
+ return result;
+ }
+ }
+ std::ostringstream ost;
+ ost << "No message of type " << type << " found.";
+ throw vespalib::IllegalStateException(ost.str(), VESPA_STRLOC);
+}
+
+} // storage
diff --git a/storage/src/tests/storageserver/dummystoragelink.h b/storage/src/tests/storageserver/dummystoragelink.h
new file mode 100644
index 00000000000..cb9df8c5642
--- /dev/null
+++ b/storage/src/tests/storageserver/dummystoragelink.h
@@ -0,0 +1,115 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <vespa/vespalib/util/sync.h>
+#include <list>
+#include <sstream>
+#include <vespa/storageapi/messageapi/storagecommand.h>
+#include <string>
+#include <vector>
+#include <vespa/storage/common/storagelink.h>
+#include <vespa/storage/common/bucketmessages.h>
+#include <vespa/storageapi/message/internal.h>
+
+class FastOS_ThreadPool;
+
+namespace storage {
+
+class DummyStorageLink : public StorageLink {
+
+ mutable vespalib::Lock _lock; // to protect below containers:
+ std::vector<api::StorageMessage::SP> _commands;
+ std::vector<api::StorageMessage::SP> _replies;
+ std::list<api::StorageMessage::SP> _injected;
+
+ bool _autoReply;
+ bool _useDispatch;
+ bool _ignore;
+ static DummyStorageLink* _last;
+ vespalib::Monitor _waitMonitor;
+
+public:
+ DummyStorageLink();
+ ~DummyStorageLink();
+
+ bool onDown(const api::StorageMessage::SP&);
+ bool onUp(const api::StorageMessage::SP&);
+
+ void addOnTopOfChain(StorageLink& link) {
+ link.addTestLinkOnTop(this);
+ }
+
+ void print(std::ostream& ost, bool verbose, const std::string& indent) const
+ {
+ (void) verbose;
+ ost << indent << "DummyStorageLink("
+ << "autoreply = " << (_autoReply ? "on" : "off")
+ << ", dispatch = " << (_useDispatch ? "on" : "off")
+ << ", " << _commands.size() << " commands"
+ << ", " << _replies.size() << " replies";
+ if (_injected.size() > 0)
+ ost << ", " << _injected.size() << " injected";
+ ost << ")";
+ }
+
+ void injectReply(api::StorageReply* reply);
+ void reset();
+ void setAutoreply(bool autoReply) { _autoReply = autoReply; }
+ void setIgnore(bool ignore) { _ignore = ignore; }
+ // Timeout is given in seconds
+ void waitForMessages(unsigned int msgCount = 1, int timeout = -1);
+ // Wait for a single message of a given type
+ void waitForMessage(const api::MessageType&, int timeout = -1);
+
+ api::StorageMessage::SP getCommand(size_t i) const {
+ vespalib::LockGuard guard(_lock);
+ api::StorageMessage::SP ret = _commands[i];
+ return ret;
+ }
+ api::StorageMessage::SP getReply(size_t i) const {
+ vespalib::LockGuard guard(_lock);
+ api::StorageMessage::SP ret = _replies[i];
+ return ret;
+ }
+ size_t getNumCommands() const {
+ vespalib::LockGuard guard(_lock);
+ return _commands.size();
+ }
+ size_t getNumReplies() const {
+ vespalib::LockGuard guard(_lock);
+ return _replies.size();
+ }
+
+ const std::vector<api::StorageMessage::SP>& getCommands() const
+ { return _commands; }
+ const std::vector<api::StorageMessage::SP>& getReplies() const
+ { return _replies; }
+
+ std::vector<api::StorageMessage::SP> getCommandsOnce() {
+ vespalib::MonitorGuard lock(_waitMonitor);
+ std::vector<api::StorageMessage::SP> retval;
+ {
+ vespalib::LockGuard guard(_lock);
+ retval.swap(_commands);
+ }
+ return retval;
+ }
+
+ std::vector<api::StorageMessage::SP> getRepliesOnce() {
+ vespalib::MonitorGuard lock(_waitMonitor);
+ std::vector<api::StorageMessage::SP> retval;
+ {
+ vespalib::LockGuard guard(_lock);
+ retval.swap(_replies);
+ }
+ return retval;
+ }
+
+ api::StorageMessage::SP getAndRemoveMessage(const api::MessageType&);
+
+ static DummyStorageLink* getLast() { return _last; }
+};
+
+}
+
diff --git a/storage/src/tests/storageserver/mergethrottlertest.cpp b/storage/src/tests/storageserver/mergethrottlertest.cpp
new file mode 100644
index 00000000000..e705db80788
--- /dev/null
+++ b/storage/src/tests/storageserver/mergethrottlertest.cpp
@@ -0,0 +1,1566 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <cppunit/extensions/HelperMacros.h>
+#include <memory>
+#include <iterator>
+#include <vector>
+#include <algorithm>
+#include <ctime>
+#include <vespa/vespalib/util/document_runnable.h>
+#include <vespa/storage/frameworkimpl/component/storagecomponentregisterimpl.h>
+#include <tests/common/testhelper.h>
+#include <tests/common/storagelinktest.h>
+#include <tests/common/teststorageapp.h>
+#include <tests/common/dummystoragelink.h>
+#include <vespa/storage/storageserver/mergethrottler.h>
+#include <vespa/storage/persistence/messages.h>
+#include <vespa/storageapi/message/bucket.h>
+#include <vespa/storageapi/message/state.h>
+
+using namespace document;
+using namespace storage::api;
+
+namespace storage {
+
+namespace {
+
+struct MergeBuilder
+{
+ document::BucketId _bucket;
+ api::Timestamp _maxTimestamp;
+ std::vector<uint16_t> _nodes;
+ std::vector<uint16_t> _chain;
+ uint64_t _clusterStateVersion;
+
+ MergeBuilder(const document::BucketId& bucket)
+ : _bucket(bucket),
+ _maxTimestamp(1234),
+ _chain(),
+ _clusterStateVersion(1)
+ {
+ nodes(0, 1, 2);
+ }
+
+ MergeBuilder& nodes(uint16_t n0) {
+ _nodes.push_back(n0);
+ return *this;
+ }
+ MergeBuilder& nodes(uint16_t n0, uint16_t n1) {
+ _nodes.push_back(n0);
+ _nodes.push_back(n1);
+ return *this;
+ }
+ MergeBuilder& nodes(uint16_t n0, uint16_t n1, uint16_t n2) {
+ _nodes.push_back(n0);
+ _nodes.push_back(n1);
+ _nodes.push_back(n2);
+ return *this;
+ }
+ MergeBuilder& maxTimestamp(api::Timestamp maxTs) {
+ _maxTimestamp = maxTs;
+ return *this;
+ }
+ MergeBuilder& clusterStateVersion(uint64_t csv) {
+ _clusterStateVersion = csv;
+ return *this;
+ }
+ MergeBuilder& chain(uint16_t n0) {
+ _chain.clear();
+ _chain.push_back(n0);
+ return *this;
+ }
+ MergeBuilder& chain(uint16_t n0, uint16_t n1) {
+ _chain.clear();
+ _chain.push_back(n0);
+ _chain.push_back(n1);
+ return *this;
+ }
+ MergeBuilder& chain(uint16_t n0, uint16_t n1, uint16_t n2) {
+ _chain.clear();
+ _chain.push_back(n0);
+ _chain.push_back(n1);
+ _chain.push_back(n2);
+ return *this;
+ }
+
+ api::MergeBucketCommand::SP create() const {
+ std::vector<api::MergeBucketCommand::Node> n;
+ for (uint32_t i = 0; i < _nodes.size(); ++i) {
+ n.push_back(_nodes[i]);
+ }
+ std::shared_ptr<MergeBucketCommand> cmd(
+ new MergeBucketCommand(_bucket, n, _maxTimestamp,
+ _clusterStateVersion, _chain));
+ StorageMessageAddress address("storage", lib::NodeType::STORAGE, _nodes[0]);
+ cmd->setAddress(address);
+ return cmd;
+ }
+};
+
+std::shared_ptr<api::SetSystemStateCommand>
+makeSystemStateCmd(const std::string& state)
+{
+ return std::make_shared<api::SetSystemStateCommand>(
+ lib::ClusterState(state));
+}
+
+} // anon ns
+
+class MergeThrottlerTest : public CppUnit::TestFixture
+{
+ CPPUNIT_TEST_SUITE(MergeThrottlerTest);
+ CPPUNIT_TEST(testMergesConfig);
+ CPPUNIT_TEST(testChain);
+ CPPUNIT_TEST(testWithSourceOnlyNode);
+ CPPUNIT_TEST(test42DistributorBehavior);
+ CPPUNIT_TEST(test42DistributorBehaviorDoesNotTakeOwnership);
+ CPPUNIT_TEST(testEndOfChainExecutionDoesNotTakeOwnership);
+ CPPUNIT_TEST(testResendHandling);
+ CPPUNIT_TEST(testPriorityQueuing);
+ CPPUNIT_TEST(testCommandInQueueDuplicateOfKnownMerge);
+ CPPUNIT_TEST(testInvalidReceiverNode);
+ CPPUNIT_TEST(testForwardQueuedMerge);
+ CPPUNIT_TEST(testExecuteQueuedMerge);
+ CPPUNIT_TEST(testFlush);
+ CPPUNIT_TEST(testUnseenMergeWithNodeInChain);
+ CPPUNIT_TEST(testMergeWithNewerClusterStateFlushesOutdatedQueued);
+ CPPUNIT_TEST(testUpdatedClusterStateFlushesOutdatedQueued);
+ CPPUNIT_TEST(test42MergesDoNotTriggerFlush);
+ CPPUNIT_TEST(testOutdatedClusterStateMergesAreRejectedOnArrival);
+ CPPUNIT_TEST(testUnknownMergeWithSelfInChain);
+ CPPUNIT_TEST(testBusyReturnedOnFullQueue);
+ CPPUNIT_TEST(testBrokenCycle);
+ CPPUNIT_TEST(testGetBucketDiffCommandNotInActiveSetIsRejected);
+ CPPUNIT_TEST(testApplyBucketDiffCommandNotInActiveSetIsRejected);
+ CPPUNIT_TEST(testNewClusterStateAbortsAllOutdatedActiveMerges);
+ CPPUNIT_TEST_SUITE_END();
+public:
+ void setUp();
+ void tearDown();
+
+ void testMergesConfig();
+ void testChain();
+ void testWithSourceOnlyNode();
+ void test42DistributorBehavior();
+ void test42DistributorBehaviorDoesNotTakeOwnership();
+ void testEndOfChainExecutionDoesNotTakeOwnership();
+ void testResendHandling();
+ void testPriorityQueuing();
+ void testCommandInQueueDuplicateOfKnownMerge();
+ void testInvalidReceiverNode();
+ void testForwardQueuedMerge();
+ void testExecuteQueuedMerge();
+ void testFlush();
+ void testUnseenMergeWithNodeInChain();
+ void testMergeWithNewerClusterStateFlushesOutdatedQueued();
+ void testUpdatedClusterStateFlushesOutdatedQueued();
+ void test42MergesDoNotTriggerFlush();
+ void testOutdatedClusterStateMergesAreRejectedOnArrival();
+ void testUnknownMergeWithSelfInChain();
+ void testBusyReturnedOnFullQueue();
+ void testBrokenCycle();
+ void testGetBucketDiffCommandNotInActiveSetIsRejected();
+ void testApplyBucketDiffCommandNotInActiveSetIsRejected();
+ void testNewClusterStateAbortsAllOutdatedActiveMerges();
+private:
+ static const int _storageNodeCount = 3;
+ static const int _messageWaitTime = 100;
+
+ // Using n storage node links and dummy servers
+ std::vector<std::shared_ptr<DummyStorageLink> > _topLinks;
+ std::vector<std::shared_ptr<TestServiceLayerApp> > _servers;
+ std::vector<MergeThrottler*> _throttlers;
+ std::vector<DummyStorageLink*> _bottomLinks;
+
+ api::MergeBucketCommand::SP sendMerge(const MergeBuilder&);
+
+ void sendAndExpectReply(
+ const std::shared_ptr<api::StorageMessage>& msg,
+ const api::MessageType& expectedReplyType,
+ api::ReturnCode::Result expectedResultCode);
+};
+
+const int MergeThrottlerTest::_storageNodeCount;
+const int MergeThrottlerTest::_messageWaitTime;
+
+CPPUNIT_TEST_SUITE_REGISTRATION(MergeThrottlerTest);
+
+void
+MergeThrottlerTest::setUp()
+{
+ vdstestlib::DirConfig config(getStandardConfig(true));
+
+ for (int i = 0; i < _storageNodeCount; ++i) {
+ std::unique_ptr<TestServiceLayerApp> server(
+ new TestServiceLayerApp(DiskCount(1), NodeIndex(i)));
+ server->setClusterState(lib::ClusterState(
+ "distributor:100 storage:100 version:1"));
+ std::unique_ptr<DummyStorageLink> top;
+
+ top.reset(new DummyStorageLink);
+ MergeThrottler* throttler = new MergeThrottler(config.getConfigId(), server->getComponentRegister());
+ // MergeThrottler will be sandwiched in between two dummy links
+ top->push_back(std::unique_ptr<StorageLink>(throttler));
+ DummyStorageLink* bottom = new DummyStorageLink;
+ throttler->push_back(std::unique_ptr<StorageLink>(bottom));
+
+ _servers.push_back(std::shared_ptr<TestServiceLayerApp>(server.release()));
+ _throttlers.push_back(throttler);
+ _bottomLinks.push_back(bottom);
+ top->open();
+ _topLinks.push_back(std::shared_ptr<DummyStorageLink>(top.release()));
+ }
+}
+
+void
+MergeThrottlerTest::tearDown()
+{
+ for (std::size_t i = 0; i < _topLinks.size(); ++i) {
+ if (_topLinks[i]->getState() == StorageLink::OPENED) {
+ _topLinks[i]->close();
+ _topLinks[i]->flush();
+ }
+ _topLinks[i] = std::shared_ptr<DummyStorageLink>();
+ }
+ _topLinks.clear();
+ _bottomLinks.clear();
+ _throttlers.clear();
+ _servers.clear();
+}
+
+namespace {
+
+template <typename Iterator>
+bool
+checkChain(const StorageMessage::SP& msg,
+ Iterator first, Iterator end)
+{
+ const MergeBucketCommand& cmd =
+ dynamic_cast<const MergeBucketCommand&>(*msg);
+
+ if (cmd.getChain().size() != static_cast<std::size_t>(std::distance(first, end))) {
+ return false;
+ }
+
+ return std::equal(cmd.getChain().begin(), cmd.getChain().end(), first);
+}
+
+void waitUntilMergeQueueIs(MergeThrottler& throttler, std::size_t sz, int timeout)
+{
+ std::time_t start = std::time(0);
+ while (true) {
+ std::size_t count;
+ {
+ vespalib::LockGuard lock(throttler.getStateLock());
+ count = throttler.getMergeQueue().size();
+ }
+ if (count == sz) {
+ break;
+ }
+ std::time_t now = std::time(0);
+ if (now - start > timeout) {
+ std::ostringstream os;
+ os << "Timeout while waiting for merge queue with " << sz << " items. Had "
+ << count << " at timeout.";
+ throw vespalib::IllegalStateException(os.str(), VESPA_STRLOC);
+ }
+ FastOS_Thread::Sleep(1);
+ }
+}
+
+}
+
+// Extremely simple test that just checks that (min|max)_merges_per_node
+// under the stor-server config gets propagated to all the nodes
+void
+MergeThrottlerTest::testMergesConfig()
+{
+ for (int i = 0; i < _storageNodeCount; ++i) {
+ CPPUNIT_ASSERT_EQUAL(uint32_t(25), _throttlers[i]->getThrottlePolicy().getMaxPendingCount());
+ CPPUNIT_ASSERT_EQUAL(std::size_t(20), _throttlers[i]->getMaxQueueSize());
+ }
+}
+
+// Test that a distributor sending a merge to the lowest-index storage
+// node correctly invokes a merge forwarding chain and subsequent unwind.
+void
+MergeThrottlerTest::testChain()
+{
+ uint16_t indices[_storageNodeCount];
+ for (int i = 0; i < _storageNodeCount; ++i) {
+ indices[i] = i;
+ _servers[i]->setClusterState(lib::ClusterState("distributor:100 storage:100 version:123"));
+ }
+
+ BucketId bid(14, 0x1337);
+
+ // Use different node permutations to ensure it works no matter which node is
+ // set as the executor. More specifically, _all_ permutations.
+ do {
+ uint16_t lastNodeIdx = _storageNodeCount - 1;
+ uint16_t executorNode = indices[0];
+
+ //std::cout << "\n----\n";
+ std::vector<MergeBucketCommand::Node> nodes;
+ for (int i = 0; i < _storageNodeCount; ++i) {
+ nodes.push_back(MergeBucketCommand::Node(indices[i], (i + executorNode) % 2 == 0));
+ //std::cout << indices[i] << " ";
+ }
+ //std::cout << "\n";
+ std::shared_ptr<MergeBucketCommand> cmd(
+ new MergeBucketCommand(bid, nodes, UINT_MAX, 123));
+ cmd->setPriority(7);
+ cmd->setTimeout(54321);
+ StorageMessageAddress address("storage", lib::NodeType::STORAGE, 0);
+ cmd->setAddress(address);
+ const uint16_t distributorIndex = 123;
+ cmd->setSourceIndex(distributorIndex); // Dummy distributor index that must be forwarded
+
+ StorageMessage::SP fwd = cmd;
+ StorageMessage::SP fwdToExec;
+
+ // TODO: make generic wrt. _storageNodeCount
+
+ for (int i = 0; i < _storageNodeCount - 1; ++i) {
+ if (i == executorNode) {
+ fwdToExec = fwd;
+ }
+ CPPUNIT_ASSERT_EQUAL(uint16_t(i), _servers[i]->getIndex());
+ // No matter the node order, command is always sent to node 0 -> 1 -> 2 etc
+ _topLinks[i]->sendDown(fwd);
+ _topLinks[i]->waitForMessage(MessageType::MERGEBUCKET, _messageWaitTime);
+
+ //std::cout << "fwd " << i << " -> " << i+1 << "\n";
+
+ // Forwarded merge should not be sent down. Should not be necessary
+ // to lock throttler here, since it should be sleeping like a champion
+ CPPUNIT_ASSERT_EQUAL(std::size_t(0), _bottomLinks[i]->getNumCommands());
+ CPPUNIT_ASSERT_EQUAL(std::size_t(1), _topLinks[i]->getNumReplies());
+ CPPUNIT_ASSERT_EQUAL(std::size_t(1), _throttlers[i]->getActiveMerges().size());
+
+ fwd = _topLinks[i]->getAndRemoveMessage(MessageType::MERGEBUCKET);
+ CPPUNIT_ASSERT_EQUAL(uint16_t(i + 1), fwd->getAddress()->getIndex());
+ CPPUNIT_ASSERT_EQUAL(distributorIndex, dynamic_cast<const StorageCommand&>(*fwd).getSourceIndex());
+ {
+ //uint16_t chain[] = { 0 };
+ std::vector<uint16_t> chain;
+ for (int j = 0; j <= i; ++j) {
+ chain.push_back(j);
+ }
+ CPPUNIT_ASSERT(checkChain(fwd, chain.begin(), chain.end()));
+ }
+ // Ensure priority, cluster state version and timeout is correctly forwarded
+ CPPUNIT_ASSERT_EQUAL(7, static_cast<int>(fwd->getPriority()));
+ CPPUNIT_ASSERT_EQUAL(uint32_t(123), dynamic_cast<const MergeBucketCommand&>(*fwd).getClusterStateVersion());
+ CPPUNIT_ASSERT_EQUAL(uint32_t(54321), dynamic_cast<const StorageCommand&>(*fwd).getTimeout());
+ }
+
+ _topLinks[lastNodeIdx]->sendDown(fwd);
+
+ // If node 2 is the first in the node list, it should immediately execute
+ // the merge. Otherwise, a cycle with the first node should be formed.
+ if (executorNode != lastNodeIdx) {
+ //std::cout << "cycle " << lastNodeIdx << " -> " << executorNode << "\n";
+ _topLinks[lastNodeIdx]->waitForMessage(MessageType::MERGEBUCKET, _messageWaitTime);
+ // Forwarded merge should not be sent down
+ CPPUNIT_ASSERT_EQUAL(std::size_t(0), _bottomLinks[lastNodeIdx]->getNumCommands());
+ CPPUNIT_ASSERT_EQUAL(std::size_t(1), _topLinks[lastNodeIdx]->getNumReplies());
+ CPPUNIT_ASSERT_EQUAL(std::size_t(1), _throttlers[lastNodeIdx]->getActiveMerges().size());
+
+ fwd = _topLinks[lastNodeIdx]->getAndRemoveMessage(MessageType::MERGEBUCKET);
+ CPPUNIT_ASSERT_EQUAL(uint16_t(executorNode), fwd->getAddress()->getIndex());
+ CPPUNIT_ASSERT_EQUAL(distributorIndex, dynamic_cast<const StorageCommand&>(*fwd).getSourceIndex());
+ {
+ std::vector<uint16_t> chain;
+ for (int j = 0; j < _storageNodeCount; ++j) {
+ chain.push_back(j);
+ }
+ CPPUNIT_ASSERT(checkChain(fwd, chain.begin(), chain.end()));
+ }
+ CPPUNIT_ASSERT_EQUAL(7, static_cast<int>(fwd->getPriority()));
+ CPPUNIT_ASSERT_EQUAL(uint32_t(123), dynamic_cast<const MergeBucketCommand&>(*fwd).getClusterStateVersion());
+ CPPUNIT_ASSERT_EQUAL(uint32_t(54321), dynamic_cast<const StorageCommand&>(*fwd).getTimeout());
+
+ _topLinks[executorNode]->sendDown(fwd);
+ }
+
+ _bottomLinks[executorNode]->waitForMessage(MessageType::MERGEBUCKET, _messageWaitTime);
+
+ // Forwarded merge has now been sent down to persistence layer
+ CPPUNIT_ASSERT_EQUAL(std::size_t(1), _bottomLinks[executorNode]->getNumCommands());
+ CPPUNIT_ASSERT_EQUAL(std::size_t(0), _topLinks[executorNode]->getNumReplies()); // No reply sent yet
+ CPPUNIT_ASSERT_EQUAL(std::size_t(1), _throttlers[executorNode]->getActiveMerges().size()); // no re-registering merge
+
+ if (executorNode != lastNodeIdx) {
+ // The MergeBucketCommand that is kept in the executor node should
+ // be the one from the node it initially got it from, NOT the one
+ // from the last node, since the chain has looped
+ CPPUNIT_ASSERT(_throttlers[executorNode]->getActiveMerges().find(bid)
+ != _throttlers[executorNode]->getActiveMerges().end());
+ CPPUNIT_ASSERT_EQUAL(static_cast<StorageMessage*>(fwdToExec.get()),
+ _throttlers[executorNode]->getActiveMerges().find(bid)->second.getMergeCmd().get());
+ }
+
+ // Send reply up from persistence layer to simulate a completed
+ // merge operation. Chain should now unwind properly
+ fwd = _bottomLinks[executorNode]->getAndRemoveMessage(MessageType::MERGEBUCKET);
+ CPPUNIT_ASSERT_EQUAL(7, static_cast<int>(fwd->getPriority()));
+ CPPUNIT_ASSERT_EQUAL(uint32_t(123), dynamic_cast<const MergeBucketCommand&>(*fwd).getClusterStateVersion());
+ CPPUNIT_ASSERT_EQUAL(uint32_t(54321), dynamic_cast<const StorageCommand&>(*fwd).getTimeout());
+
+ std::shared_ptr<MergeBucketReply> reply(
+ new MergeBucketReply(dynamic_cast<const MergeBucketCommand&>(*fwd)));
+ reply->setResult(ReturnCode(ReturnCode::OK, "Great success! :D-|-<"));
+ _bottomLinks[executorNode]->sendUp(reply);
+
+ _topLinks[executorNode]->waitForMessage(MessageType::MERGEBUCKET_REPLY, _messageWaitTime);
+
+ if (executorNode != lastNodeIdx) {
+ // Merge should not be removed yet from executor, since it's pending an unwind
+ CPPUNIT_ASSERT_EQUAL(std::size_t(1), _throttlers[executorNode]->getActiveMerges().size());
+ CPPUNIT_ASSERT_EQUAL(static_cast<StorageMessage*>(fwdToExec.get()),
+ _throttlers[executorNode]->getActiveMerges().find(bid)->second.getMergeCmd().get());
+ }
+ // MergeBucketReply waiting to be sent back to node 2. NOTE: we don't have any
+ // transport context stuff set up here to perform the reply mapping, so we
+ // have to emulate it
+ CPPUNIT_ASSERT_EQUAL(std::size_t(1), _topLinks[executorNode]->getNumReplies());
+
+ StorageMessage::SP unwind = _topLinks[executorNode]->getAndRemoveMessage(MessageType::MERGEBUCKET_REPLY);
+ CPPUNIT_ASSERT_EQUAL(uint16_t(executorNode), unwind->getAddress()->getIndex());
+
+ // eg: 0 -> 2 -> 1 -> 0. Or: 2 -> 1 -> 0 if no cycle
+ for (int i = (executorNode != lastNodeIdx ? _storageNodeCount - 1 : _storageNodeCount - 2); i >= 0; --i) {
+ //std::cout << "unwind " << i << "\n";
+
+ _topLinks[i]->sendDown(unwind);
+ _topLinks[i]->waitForMessage(MessageType::MERGEBUCKET_REPLY, _messageWaitTime);
+
+ CPPUNIT_ASSERT_EQUAL(std::size_t(0), _bottomLinks[i]->getNumCommands());
+ CPPUNIT_ASSERT_EQUAL(std::size_t(1), _topLinks[i]->getNumReplies());
+ CPPUNIT_ASSERT_EQUAL(std::size_t(0), _throttlers[i]->getActiveMerges().size());
+
+ unwind = _topLinks[i]->getAndRemoveMessage(MessageType::MERGEBUCKET_REPLY);
+ CPPUNIT_ASSERT_EQUAL(uint16_t(i), unwind->getAddress()->getIndex());
+ }
+
+ const MergeBucketReply& mbr = dynamic_cast<const MergeBucketReply&>(*unwind);
+
+ CPPUNIT_ASSERT_EQUAL(ReturnCode::OK, mbr.getResult().getResult());
+ CPPUNIT_ASSERT_EQUAL(vespalib::string("Great success! :D-|-<"), mbr.getResult().getMessage());
+ CPPUNIT_ASSERT_EQUAL(bid, mbr.getBucketId());
+
+ } while (std::next_permutation(indices, indices + _storageNodeCount));
+
+ //std::cout << "\n" << *_topLinks[0] << "\n";
+}
+
+void
+MergeThrottlerTest::testWithSourceOnlyNode()
+{
+ BucketId bid(14, 0x1337);
+
+ StorageMessageAddress address("storage", lib::NodeType::STORAGE, 0);
+
+ std::vector<MergeBucketCommand::Node> nodes;
+ nodes.push_back(0);
+ nodes.push_back(2);
+ nodes.push_back(MergeBucketCommand::Node(1, true));
+ std::shared_ptr<MergeBucketCommand> cmd(
+ new MergeBucketCommand(bid, nodes, UINT_MAX, 123));
+
+ cmd->setAddress(address);
+ _topLinks[0]->sendDown(cmd);
+
+ _topLinks[0]->waitForMessage(MessageType::MERGEBUCKET, _messageWaitTime);
+ StorageMessage::SP fwd = _topLinks[0]->getAndRemoveMessage(MessageType::MERGEBUCKET);
+ CPPUNIT_ASSERT_EQUAL(uint16_t(1), fwd->getAddress()->getIndex());
+
+ _topLinks[1]->sendDown(fwd);
+
+ _topLinks[1]->waitForMessage(MessageType::MERGEBUCKET, _messageWaitTime);
+ fwd = _topLinks[1]->getAndRemoveMessage(MessageType::MERGEBUCKET);
+ CPPUNIT_ASSERT_EQUAL(uint16_t(2), fwd->getAddress()->getIndex());
+
+ _topLinks[2]->sendDown(fwd);
+
+ _topLinks[2]->waitForMessage(MessageType::MERGEBUCKET, _messageWaitTime);
+ fwd = _topLinks[2]->getAndRemoveMessage(MessageType::MERGEBUCKET);
+ CPPUNIT_ASSERT_EQUAL(uint16_t(0), fwd->getAddress()->getIndex());
+
+ _topLinks[0]->sendDown(fwd);
+ _bottomLinks[0]->waitForMessage(MessageType::MERGEBUCKET, _messageWaitTime);
+ _bottomLinks[0]->getAndRemoveMessage(MessageType::MERGEBUCKET);
+ std::shared_ptr<MergeBucketReply> reply(
+ new MergeBucketReply(dynamic_cast<const MergeBucketCommand&>(*fwd)));
+ reply->setResult(ReturnCode(ReturnCode::OK, "Great success! :D-|-<"));
+ _bottomLinks[0]->sendUp(reply);
+
+ _topLinks[0]->waitForMessage(MessageType::MERGEBUCKET_REPLY, _messageWaitTime);
+ fwd = _topLinks[0]->getAndRemoveMessage(MessageType::MERGEBUCKET_REPLY);
+ CPPUNIT_ASSERT_EQUAL(uint16_t(0), fwd->getAddress()->getIndex());
+
+ // Assume everything's fine from here on out
+}
+
+// 4.2 distributors don't guarantee they'll send to lowest node
+// index, so we must detect such situations and execute the merge
+// immediately rather than attempt to chain it. Test that this
+// is done correctly.
+void
+MergeThrottlerTest::test42DistributorBehavior()
+{
+ BucketId bid(32, 0xfeef00);
+
+ std::vector<MergeBucketCommand::Node> nodes;
+ nodes.push_back(0);
+ nodes.push_back(1);
+ nodes.push_back(2);
+ std::shared_ptr<MergeBucketCommand> cmd(
+ new MergeBucketCommand(bid, nodes, 1234));
+
+ // Send to node 1, which is not the lowest index
+ StorageMessageAddress address("storage", lib::NodeType::STORAGE, 1);
+
+ cmd->setAddress(address);
+ _topLinks[1]->sendDown(cmd);
+ _bottomLinks[1]->waitForMessage(MessageType::MERGEBUCKET, _messageWaitTime);
+
+ // Should now have been sent to persistence layer
+ CPPUNIT_ASSERT_EQUAL(std::size_t(1), _bottomLinks[1]->getNumCommands());
+ CPPUNIT_ASSERT_EQUAL(std::size_t(0), _topLinks[1]->getNumReplies()); // No reply sent yet
+ CPPUNIT_ASSERT_EQUAL(std::size_t(1), _throttlers[1]->getActiveMerges().size());
+
+ // Send reply up from persistence layer to simulate a completed
+ // merge operation. Merge should be removed from state.
+ _bottomLinks[1]->getAndRemoveMessage(MessageType::MERGEBUCKET);
+ std::shared_ptr<MergeBucketReply> reply(
+ new MergeBucketReply(dynamic_cast<const MergeBucketCommand&>(*cmd)));
+ reply->setResult(ReturnCode(ReturnCode::OK, "Tonight we dine on turtle soup!"));
+ _bottomLinks[1]->sendUp(reply);
+ _topLinks[1]->waitForMessage(MessageType::MERGEBUCKET_REPLY, _messageWaitTime);
+
+ CPPUNIT_ASSERT_EQUAL(std::size_t(0), _bottomLinks[1]->getNumCommands());
+ CPPUNIT_ASSERT_EQUAL(std::size_t(1), _topLinks[1]->getNumReplies());
+ CPPUNIT_ASSERT_EQUAL(std::size_t(0), _throttlers[1]->getActiveMerges().size());
+
+ CPPUNIT_ASSERT_EQUAL(uint64_t(1), _throttlers[1]->getMetrics().local.ok.getValue());
+}
+
+// Test that we don't take ownership of the merge command when we're
+// just passing it through to the persistence layer when receiving
+// a merge command that presumably comes form a 4.2 distributor
+void
+MergeThrottlerTest::test42DistributorBehaviorDoesNotTakeOwnership()
+{
+ BucketId bid(32, 0xfeef00);
+
+ std::vector<MergeBucketCommand::Node> nodes;
+ nodes.push_back(0);
+ nodes.push_back(1);
+ nodes.push_back(2);
+ std::shared_ptr<MergeBucketCommand> cmd(
+ new MergeBucketCommand(bid, nodes, 1234));
+
+ // Send to node 1, which is not the lowest index
+ StorageMessageAddress address("storage", lib::NodeType::STORAGE, 1);
+
+ cmd->setAddress(address);
+ _topLinks[1]->sendDown(cmd);
+ _bottomLinks[1]->waitForMessage(MessageType::MERGEBUCKET, _messageWaitTime);
+
+ // Should now have been sent to persistence layer
+ CPPUNIT_ASSERT_EQUAL(std::size_t(1), _bottomLinks[1]->getNumCommands());
+ CPPUNIT_ASSERT_EQUAL(std::size_t(0), _topLinks[1]->getNumReplies()); // No reply sent yet
+ CPPUNIT_ASSERT_EQUAL(std::size_t(1), _throttlers[1]->getActiveMerges().size());
+
+ _bottomLinks[1]->getAndRemoveMessage(MessageType::MERGEBUCKET);
+
+ // To ensure we don't try to deref any non-owned messages
+ framework::HttpUrlPath path("?xml");
+ std::ostringstream ss;
+ _throttlers[1]->reportStatus(ss, path);
+
+ // Flush throttler (synchronously). Should NOT generate a reply
+ // for the merge command, as it is not owned by the throttler
+ StorageLinkTest::callOnFlush(*_throttlers[1], true);
+
+ CPPUNIT_ASSERT_EQUAL(std::size_t(0), _bottomLinks[1]->getNumCommands());
+ CPPUNIT_ASSERT_EQUAL(std::size_t(0), _topLinks[1]->getNumReplies());
+ CPPUNIT_ASSERT_EQUAL(std::size_t(0), _throttlers[1]->getActiveMerges().size());
+
+ // Send a belated reply from persistence up just to ensure the
+ // throttler doesn't throw a fit if it receives an unknown merge
+ std::shared_ptr<MergeBucketReply> reply(
+ new MergeBucketReply(dynamic_cast<const MergeBucketCommand&>(*cmd)));
+ reply->setResult(ReturnCode(ReturnCode::OK, "Tonight we dine on turtle soup!"));
+ _bottomLinks[1]->sendUp(reply);
+ _topLinks[1]->waitForMessage(MessageType::MERGEBUCKET_REPLY, _messageWaitTime);
+
+ CPPUNIT_ASSERT_EQUAL(std::size_t(0), _bottomLinks[1]->getNumCommands());
+ CPPUNIT_ASSERT_EQUAL(std::size_t(1), _topLinks[1]->getNumReplies());
+ CPPUNIT_ASSERT_EQUAL(std::size_t(0), _throttlers[1]->getActiveMerges().size());
+}
+
+// Test that we don't take ownership of the merge command when we're
+// just passing it through to the persistence layer when we're at the
+// the end of the chain and also the designated executor
+void
+MergeThrottlerTest::testEndOfChainExecutionDoesNotTakeOwnership()
+{
+ BucketId bid(32, 0xfeef00);
+
+ std::vector<MergeBucketCommand::Node> nodes;
+ nodes.push_back(2);
+ nodes.push_back(1);
+ nodes.push_back(0);
+ std::vector<uint16_t> chain;
+ chain.push_back(0);
+ chain.push_back(1);
+ std::shared_ptr<MergeBucketCommand> cmd(
+ new MergeBucketCommand(bid, nodes, 1234, 1, chain));
+
+ // Send to last node, which is not the lowest index
+ StorageMessageAddress address("storage", lib::NodeType::STORAGE, 3);
+
+ cmd->setAddress(address);
+ _topLinks[2]->sendDown(cmd);
+ _bottomLinks[2]->waitForMessage(MessageType::MERGEBUCKET, _messageWaitTime);
+
+ // Should now have been sent to persistence layer
+ CPPUNIT_ASSERT_EQUAL(std::size_t(1), _bottomLinks[2]->getNumCommands());
+ CPPUNIT_ASSERT_EQUAL(std::size_t(0), _topLinks[2]->getNumReplies()); // No reply sent yet
+ CPPUNIT_ASSERT_EQUAL(std::size_t(1), _throttlers[2]->getActiveMerges().size());
+
+ _bottomLinks[2]->getAndRemoveMessage(MessageType::MERGEBUCKET);
+
+ // To ensure we don't try to deref any non-owned messages
+ framework::HttpUrlPath path("");
+ std::ostringstream ss;
+ _throttlers[2]->reportStatus(ss, path);
+
+ // Flush throttler (synchronously). Should NOT generate a reply
+ // for the merge command, as it is not owned by the throttler
+ StorageLinkTest::callOnFlush(*_throttlers[2], true);
+
+ CPPUNIT_ASSERT_EQUAL(std::size_t(0), _bottomLinks[2]->getNumCommands());
+ CPPUNIT_ASSERT_EQUAL(std::size_t(0), _topLinks[2]->getNumReplies());
+ CPPUNIT_ASSERT_EQUAL(std::size_t(0), _throttlers[2]->getActiveMerges().size());
+
+ // Send a belated reply from persistence up just to ensure the
+ // throttler doesn't throw a fit if it receives an unknown merge
+ std::shared_ptr<MergeBucketReply> reply(
+ new MergeBucketReply(dynamic_cast<const MergeBucketCommand&>(*cmd)));
+ reply->setResult(ReturnCode(ReturnCode::OK, "Tonight we dine on turtle soup!"));
+ _bottomLinks[2]->sendUp(reply);
+ _topLinks[2]->waitForMessage(MessageType::MERGEBUCKET_REPLY, _messageWaitTime);
+
+ CPPUNIT_ASSERT_EQUAL(std::size_t(0), _bottomLinks[2]->getNumCommands());
+ CPPUNIT_ASSERT_EQUAL(std::size_t(1), _topLinks[2]->getNumReplies());
+ CPPUNIT_ASSERT_EQUAL(std::size_t(0), _throttlers[2]->getActiveMerges().size());
+}
+
+// Test that nodes resending a merge command won't lead to duplicate
+// state registration/forwarding or erasing the already present state
+// information.
+void
+MergeThrottlerTest::testResendHandling()
+{
+ BucketId bid(32, 0xbadbed);
+
+ std::vector<MergeBucketCommand::Node> nodes;
+ nodes.push_back(0);
+ nodes.push_back(1);
+ nodes.push_back(2);
+ std::shared_ptr<MergeBucketCommand> cmd(
+ new MergeBucketCommand(bid, nodes, 1234));
+
+ StorageMessageAddress address("storage", lib::NodeType::STORAGE, 1);
+
+ cmd->setAddress(address);
+ _topLinks[0]->sendDown(cmd);
+ _topLinks[0]->waitForMessage(MessageType::MERGEBUCKET, _messageWaitTime);
+
+ StorageMessage::SP fwd = _topLinks[0]->getAndRemoveMessage(MessageType::MERGEBUCKET);
+
+ // Resend from "distributor". Just use same message, as that won't matter here
+ _topLinks[0]->sendDown(cmd);
+ _topLinks[0]->waitForMessage(MessageType::MERGEBUCKET_REPLY, _messageWaitTime);
+
+ // Reply should be BUSY
+ StorageMessage::SP reply = _topLinks[0]->getAndRemoveMessage(MessageType::MERGEBUCKET_REPLY);
+
+ CPPUNIT_ASSERT_EQUAL(
+ static_cast<MergeBucketReply&>(*reply).getResult().getResult(),
+ ReturnCode::BUSY);
+
+ _topLinks[1]->sendDown(fwd);
+ _topLinks[1]->waitForMessage(MessageType::MERGEBUCKET, _messageWaitTime);
+ fwd = _topLinks[1]->getAndRemoveMessage(MessageType::MERGEBUCKET);
+
+ _topLinks[2]->sendDown(fwd);
+ _topLinks[2]->waitForMessage(MessageType::MERGEBUCKET, _messageWaitTime);
+ _topLinks[2]->sendDown(fwd);
+ _topLinks[2]->waitForMessage(MessageType::MERGEBUCKET_REPLY, _messageWaitTime);
+
+ // Reply should be BUSY
+ reply = _topLinks[2]->getAndRemoveMessage(MessageType::MERGEBUCKET_REPLY);
+ CPPUNIT_ASSERT_EQUAL(
+ static_cast<MergeBucketReply&>(*reply).getResult().getResult(),
+ ReturnCode::BUSY);
+
+ fwd = _topLinks[2]->getAndRemoveMessage(MessageType::MERGEBUCKET);
+
+ _topLinks[0]->sendDown(fwd);
+ _bottomLinks[0]->waitForMessage(MessageType::MERGEBUCKET, _messageWaitTime);
+ _topLinks[0]->sendDown(fwd);
+ _topLinks[0]->waitForMessage(MessageType::MERGEBUCKET_REPLY, _messageWaitTime);
+
+ reply = _topLinks[0]->getAndRemoveMessage(MessageType::MERGEBUCKET_REPLY);
+ CPPUNIT_ASSERT_EQUAL(
+ static_cast<MergeBucketReply&>(*reply).getResult().getResult(),
+ ReturnCode::BUSY);
+}
+
+void
+MergeThrottlerTest::testPriorityQueuing()
+{
+ // Fill up all active merges
+ std::size_t maxPending = _throttlers[0]->getThrottlePolicy().getMaxPendingCount();
+ std::vector<MergeBucketCommand::Node> nodes;
+ nodes.push_back(0);
+ nodes.push_back(1);
+ nodes.push_back(2);
+ CPPUNIT_ASSERT(maxPending >= 4u);
+ for (std::size_t i = 0; i < maxPending; ++i) {
+ std::shared_ptr<MergeBucketCommand> cmd(
+ new MergeBucketCommand(BucketId(32, 0xf00baa00 + i), nodes, 1234));
+ cmd->setPriority(100);
+ _topLinks[0]->sendDown(cmd);
+ }
+
+ // Wait till we have maxPending replies and 0 queued
+ _topLinks[0]->waitForMessages(maxPending, 5);
+ waitUntilMergeQueueIs(*_throttlers[0], 0, _messageWaitTime);
+
+ // Queue up some merges with different priorities
+ int priorities[4] = { 200, 150, 120, 240 };
+ int sortedPris[4] = { 120, 150, 200, 240 };
+ for (int i = 0; i < 4; ++i) {
+ std::shared_ptr<MergeBucketCommand> cmd(
+ new MergeBucketCommand(BucketId(32, i), nodes, 1234));
+ cmd->setPriority(priorities[i]);
+ _topLinks[0]->sendDown(cmd);
+ }
+
+ waitUntilMergeQueueIs(*_throttlers[0], 4, _messageWaitTime);
+
+ // Remove all but 4 forwarded merges
+ for (std::size_t i = 0; i < maxPending - 4; ++i) {
+ _topLinks[0]->getAndRemoveMessage(MessageType::MERGEBUCKET);
+ }
+ CPPUNIT_ASSERT_EQUAL(std::size_t(0), _topLinks[0]->getNumCommands());
+ CPPUNIT_ASSERT_EQUAL(std::size_t(4), _topLinks[0]->getNumReplies());
+
+ // Now when we start replying to merges, queued merges should be
+ // processed in priority order
+ for (int i = 0; i < 4; ++i) {
+ StorageMessage::SP replyTo = _topLinks[0]->getAndRemoveMessage(MessageType::MERGEBUCKET);
+ std::shared_ptr<MergeBucketReply> reply(
+ new MergeBucketReply(dynamic_cast<const MergeBucketCommand&>(*replyTo)));
+ reply->setResult(ReturnCode(ReturnCode::OK, "whee"));
+ _topLinks[0]->sendDown(reply);
+ }
+
+ _topLinks[0]->waitForMessages(8, _messageWaitTime); // 4 merges, 4 replies
+ waitUntilMergeQueueIs(*_throttlers[0], 0, _messageWaitTime);
+
+ for (int i = 0; i < 4; ++i) {
+ StorageMessage::SP cmd = _topLinks[0]->getAndRemoveMessage(MessageType::MERGEBUCKET);
+ CPPUNIT_ASSERT_EQUAL(uint8_t(sortedPris[i]), cmd->getPriority());
+ }
+}
+
+// Test that we can detect and reject merges that due to resending
+// and potential priority queue sneaking etc may end up with duplicates
+// in the queue for a merge that is already known.
+void
+MergeThrottlerTest::testCommandInQueueDuplicateOfKnownMerge()
+{
+ // Fill up all active merges and 1 queued one
+ std::size_t maxPending = _throttlers[0]->getThrottlePolicy().getMaxPendingCount();
+ CPPUNIT_ASSERT(maxPending < 100);
+ for (std::size_t i = 0; i < maxPending + 1; ++i) {
+ std::vector<MergeBucketCommand::Node> nodes;
+ nodes.push_back(0);
+ nodes.push_back(2 + i);
+ nodes.push_back(5 + i);
+ std::shared_ptr<MergeBucketCommand> cmd(
+ new MergeBucketCommand(BucketId(32, 0xf00baa00 + i), nodes, 1234));
+ cmd->setPriority(100 - i);
+ _topLinks[0]->sendDown(cmd);
+ }
+
+ // Wait till we have maxPending replies and 3 queued
+ _topLinks[0]->waitForMessages(maxPending, _messageWaitTime);
+ waitUntilMergeQueueIs(*_throttlers[0], 1, _messageWaitTime);
+
+ // Add a merge for the same bucket twice to the queue
+ {
+ std::vector<MergeBucketCommand::Node> nodes;
+ nodes.push_back(0);
+ nodes.push_back(12);
+ nodes.push_back(123);
+ std::shared_ptr<MergeBucketCommand> cmd(
+ new MergeBucketCommand(BucketId(32, 0xf000feee), nodes, 1234));
+ _topLinks[0]->sendDown(cmd);
+ }
+ {
+ std::vector<MergeBucketCommand::Node> nodes;
+ nodes.push_back(0);
+ nodes.push_back(124); // Different node set doesn't matter
+ nodes.push_back(14);
+ std::shared_ptr<MergeBucketCommand> cmd(
+ new MergeBucketCommand(BucketId(32, 0xf000feee), nodes, 1234));
+ _topLinks[0]->sendDown(cmd);
+ }
+
+ waitUntilMergeQueueIs(*_throttlers[0], 3, _messageWaitTime);
+
+ StorageMessage::SP fwd = _topLinks[0]->getAndRemoveMessage(MessageType::MERGEBUCKET);
+
+ // Remove and success-reply for 2 merges. This will give enough room
+ // for the 2 first queued merges to be processed, the last one having a
+ // duplicate in the queue.
+ for (int i = 0; i < 2; ++i) {
+ StorageMessage::SP fwd2 = _topLinks[0]->getAndRemoveMessage(MessageType::MERGEBUCKET);
+ std::shared_ptr<MergeBucketReply> reply(
+ new MergeBucketReply(dynamic_cast<const MergeBucketCommand&>(*fwd2)));
+ reply->setResult(ReturnCode(ReturnCode::OK, ""));
+ _topLinks[0]->sendDown(reply);
+ }
+
+ _topLinks[0]->waitForMessages(maxPending + 1, _messageWaitTime);
+ waitUntilMergeQueueIs(*_throttlers[0], 1, _messageWaitTime);
+
+ // Remove all current merge commands/replies so we can work with a clean slate
+ _topLinks[0]->getRepliesOnce();
+ // Send a success-reply for fwd, allowing the duplicate from the queue
+ // to have its moment to shine only to then be struck down mercilessly
+ std::shared_ptr<MergeBucketReply> reply(
+ new MergeBucketReply(dynamic_cast<const MergeBucketCommand&>(*fwd)));
+ reply->setResult(ReturnCode(ReturnCode::OK, ""));
+ _topLinks[0]->sendDown(reply);
+
+ _topLinks[0]->waitForMessages(2, _messageWaitTime);
+ waitUntilMergeQueueIs(*_throttlers[0], 0, _messageWaitTime);
+
+ // First reply is the successful merge reply
+ StorageMessage::SP reply2 = _topLinks[0]->getAndRemoveMessage(MessageType::MERGEBUCKET_REPLY);
+ CPPUNIT_ASSERT_EQUAL(
+ static_cast<MergeBucketReply&>(*reply2).getResult().getResult(),
+ ReturnCode::OK);
+
+ // Second reply should be the BUSY-rejected duplicate
+ StorageMessage::SP reply1 = _topLinks[0]->getAndRemoveMessage(MessageType::MERGEBUCKET_REPLY);
+ CPPUNIT_ASSERT_EQUAL(
+ static_cast<MergeBucketReply&>(*reply1).getResult().getResult(),
+ ReturnCode::BUSY);
+ CPPUNIT_ASSERT(static_cast<MergeBucketReply&>(*reply1).getResult()
+ .getMessage().find("out of date;") != std::string::npos);
+}
+
+// Test that sending a merge command to a node not in the set of
+// to-be-merged nodes is handled gracefully.
+// This is not a scenario that should ever actually happen, but for
+// the sake of robustness, include it anyway.
+void
+MergeThrottlerTest::testInvalidReceiverNode()
+{
+ std::vector<MergeBucketCommand::Node> nodes;
+ nodes.push_back(1);
+ nodes.push_back(5);
+ nodes.push_back(9);
+ std::shared_ptr<MergeBucketCommand> cmd(
+ new MergeBucketCommand(BucketId(32, 0xf00baaaa), nodes, 1234));
+
+ // Send to node with index 0
+ _topLinks[0]->sendDown(cmd);
+ _topLinks[0]->waitForMessage(MessageType::MERGEBUCKET_REPLY, _messageWaitTime);
+
+ StorageMessage::SP reply = _topLinks[0]->getAndRemoveMessage(MessageType::MERGEBUCKET_REPLY);
+ CPPUNIT_ASSERT_EQUAL(
+ static_cast<MergeBucketReply&>(*reply).getResult().getResult(),
+ ReturnCode::REJECTED);
+ CPPUNIT_ASSERT(static_cast<MergeBucketReply&>(*reply).getResult()
+ .getMessage().find("which is not in its forwarding chain") != std::string::npos);
+}
+
+// Test that the throttling policy kicks in after a certain number of
+// merges are forwarded and that the rest are queued in a prioritized
+// order.
+void
+MergeThrottlerTest::testForwardQueuedMerge()
+{
+ // Fill up all active merges and then 3 queued ones
+ std::size_t maxPending = _throttlers[0]->getThrottlePolicy().getMaxPendingCount();
+ CPPUNIT_ASSERT(maxPending < 100);
+ for (std::size_t i = 0; i < maxPending + 3; ++i) {
+ std::vector<MergeBucketCommand::Node> nodes;
+ nodes.push_back(0);
+ nodes.push_back(2 + i);
+ nodes.push_back(5 + i);
+ std::shared_ptr<MergeBucketCommand> cmd(
+ new MergeBucketCommand(BucketId(32, 0xf00baa00 + i), nodes, 1234));
+ cmd->setPriority(100 - i);
+ _topLinks[0]->sendDown(cmd);
+ }
+
+ // Wait till we have maxPending replies and 3 queued
+ _topLinks[0]->waitForMessages(maxPending, _messageWaitTime);
+ waitUntilMergeQueueIs(*_throttlers[0], 3, _messageWaitTime);
+
+ // Merge queue state should not be touched by worker thread now
+ StorageMessage::SP nextMerge = _throttlers[0]->getMergeQueue().begin()->_msg;
+
+ StorageMessage::SP fwd = _topLinks[0]->getAndRemoveMessage(MessageType::MERGEBUCKET);
+
+ // Remove all the rest of the active merges
+ while (!_topLinks[0]->getReplies().empty()) {
+ _topLinks[0]->getAndRemoveMessage(MessageType::MERGEBUCKET);
+ }
+
+ std::shared_ptr<MergeBucketReply> reply(
+ new MergeBucketReply(dynamic_cast<const MergeBucketCommand&>(*fwd)));
+ reply->setResult(ReturnCode(ReturnCode::OK, "Celebrate good times come on"));
+ _topLinks[0]->sendDown(reply);
+ _topLinks[0]->waitForMessage(MessageType::MERGEBUCKET_REPLY, _messageWaitTime); // Success rewind reply
+
+ // Remove reply bound for distributor
+ StorageMessage::SP distReply = _topLinks[0]->getAndRemoveMessage(MessageType::MERGEBUCKET_REPLY);
+ CPPUNIT_ASSERT_EQUAL(
+ static_cast<MergeBucketReply&>(*distReply).getResult().getResult(),
+ ReturnCode::OK);
+
+ waitUntilMergeQueueIs(*_throttlers[0], 2, _messageWaitTime);
+ _topLinks[0]->waitForMessage(MessageType::MERGEBUCKET, _messageWaitTime);
+
+ CPPUNIT_ASSERT_EQUAL(std::size_t(0), _topLinks[0]->getNumCommands());
+ CPPUNIT_ASSERT_EQUAL(std::size_t(1), _topLinks[0]->getNumReplies());
+
+ // First queued merge should now have been registered and forwarded
+ fwd = _topLinks[0]->getAndRemoveMessage(MessageType::MERGEBUCKET);
+
+ CPPUNIT_ASSERT_EQUAL(
+ static_cast<const MergeBucketCommand&>(*fwd).getBucketId(),
+ static_cast<const MergeBucketCommand&>(*nextMerge).getBucketId());
+
+ CPPUNIT_ASSERT(
+ static_cast<const MergeBucketCommand&>(*fwd).getNodes()
+ == static_cast<const MergeBucketCommand&>(*nextMerge).getNodes());
+
+ // Ensure forwarded merge has a higher priority than the next queued one
+ CPPUNIT_ASSERT(fwd->getPriority() < _throttlers[0]->getMergeQueue().begin()->_msg->getPriority());
+
+ CPPUNIT_ASSERT_EQUAL(uint64_t(1), _throttlers[0]->getMetrics().chaining.ok.getValue());
+
+ /*framework::HttpUrlPath path("?xml");
+ _forwarders[0]->reportStatus(std::cerr, path);*/
+}
+
+void
+MergeThrottlerTest::testExecuteQueuedMerge()
+{
+ MergeThrottler& throttler(*_throttlers[1]);
+ DummyStorageLink& topLink(*_topLinks[1]);
+ DummyStorageLink& bottomLink(*_bottomLinks[1]);
+
+ // Fill up all active merges and then 3 queued ones
+ std::size_t maxPending = throttler.getThrottlePolicy().getMaxPendingCount();
+ CPPUNIT_ASSERT(maxPending < 100);
+ for (std::size_t i = 0; i < maxPending + 3; ++i) {
+ std::vector<MergeBucketCommand::Node> nodes;
+ nodes.push_back(1);
+ nodes.push_back(5 + i);
+ nodes.push_back(7 + i);
+ std::shared_ptr<MergeBucketCommand> cmd(
+ new MergeBucketCommand(BucketId(32, 0xf00baa00 + i), nodes, 1234, 1));
+ cmd->setPriority(250 - i + 5);
+ topLink.sendDown(cmd);
+ }
+
+ // Wait till we have maxPending replies and 3 queued
+ topLink.waitForMessages(maxPending, _messageWaitTime);
+ waitUntilMergeQueueIs(throttler, 3, _messageWaitTime);
+
+ // Sneak in a higher priority message that is bound to be executed
+ // on the given node
+ {
+ std::vector<MergeBucketCommand::Node> nodes;
+ nodes.push_back(1);
+ nodes.push_back(0);
+ std::vector<uint16_t> chain;
+ chain.push_back(0);
+ std::shared_ptr<MergeBucketCommand> cmd(
+ new MergeBucketCommand(BucketId(32, 0x1337), nodes, 1234, 1, chain));
+ cmd->setPriority(0);
+ topLink.sendDown(cmd);
+ }
+
+ waitUntilMergeQueueIs(throttler, 4, _messageWaitTime);
+
+ // Merge queue state should not be touched by worker thread now
+ StorageMessage::SP nextMerge(throttler.getMergeQueue().begin()->_msg);
+ /*StorageMessage::SP nextMerge;
+ {
+ vespalib::LockGuard lock(_throttlers[0]->getStateLock());
+ // Dirty: have to check internal state
+ nextMerge = _throttlers[0]->getMergeQueue().begin()->_msg;
+ }*/
+
+ CPPUNIT_ASSERT_EQUAL(
+ BucketId(32, 0x1337),
+ dynamic_cast<const MergeBucketCommand&>(*nextMerge).getBucketId());
+
+ StorageMessage::SP fwd(topLink.getAndRemoveMessage(MessageType::MERGEBUCKET));
+
+ // Remove all the rest of the active merges
+ while (!topLink.getReplies().empty()) {
+ topLink.getAndRemoveMessage(MessageType::MERGEBUCKET);
+ }
+
+ // Free up a merge slot
+ std::shared_ptr<MergeBucketReply> reply(
+ new MergeBucketReply(dynamic_cast<const MergeBucketCommand&>(*fwd)));
+ reply->setResult(ReturnCode(ReturnCode::OK, "Celebrate good times come on"));
+ topLink.sendDown(reply);
+
+ topLink.waitForMessage(MessageType::MERGEBUCKET_REPLY, _messageWaitTime);
+ // Remove chain reply
+ StorageMessage::SP distReply(topLink.getAndRemoveMessage(MessageType::MERGEBUCKET_REPLY));
+ CPPUNIT_ASSERT_EQUAL(
+ static_cast<MergeBucketReply&>(*distReply).getResult().getResult(),
+ ReturnCode::OK);
+
+ waitUntilMergeQueueIs(throttler, 3, _messageWaitTime);
+ bottomLink.waitForMessage(MessageType::MERGEBUCKET, _messageWaitTime);
+
+ CPPUNIT_ASSERT_EQUAL(std::size_t(0), topLink.getNumCommands());
+ CPPUNIT_ASSERT_EQUAL(std::size_t(0), topLink.getNumReplies());
+ CPPUNIT_ASSERT_EQUAL(std::size_t(1), bottomLink.getNumCommands());
+
+ // First queued merge should now have been registered and sent down
+ StorageMessage::SP cmd(bottomLink.getAndRemoveMessage(MessageType::MERGEBUCKET));
+
+ CPPUNIT_ASSERT_EQUAL(
+ static_cast<const MergeBucketCommand&>(*cmd).getBucketId(),
+ static_cast<const MergeBucketCommand&>(*nextMerge).getBucketId());
+
+ CPPUNIT_ASSERT(
+ static_cast<const MergeBucketCommand&>(*cmd).getNodes()
+ == static_cast<const MergeBucketCommand&>(*nextMerge).getNodes());
+}
+
+void
+MergeThrottlerTest::testFlush()
+{
+ // Fill up all active merges and then 3 queued ones
+ std::size_t maxPending = _throttlers[0]->getThrottlePolicy().getMaxPendingCount();
+ CPPUNIT_ASSERT(maxPending < 100);
+ for (std::size_t i = 0; i < maxPending + 3; ++i) {
+ std::vector<MergeBucketCommand::Node> nodes;
+ nodes.push_back(0);
+ nodes.push_back(1);
+ nodes.push_back(2);
+ std::shared_ptr<MergeBucketCommand> cmd(
+ new MergeBucketCommand(BucketId(32, 0xf00baa00 + i), nodes, 1234, 1));
+ _topLinks[0]->sendDown(cmd);
+ }
+
+ // Wait till we have maxPending replies and 3 queued
+ _topLinks[0]->waitForMessages(maxPending, _messageWaitTime);
+ waitUntilMergeQueueIs(*_throttlers[0], 3, _messageWaitTime);
+
+ // Remove all forwarded commands
+ uint32_t removed = _topLinks[0]->getRepliesOnce().size();
+ CPPUNIT_ASSERT(removed >= 5);
+
+ // Flush the storage link, triggering an abort of all commands
+ // no matter what their current state is.
+ _topLinks[0]->close();
+ _topLinks[0]->flush();
+ _topLinks[0]->waitForMessages(maxPending + 3 - removed, _messageWaitTime);
+
+ while (!_topLinks[0]->getReplies().empty()) {
+ StorageMessage::SP reply = _topLinks[0]->getAndRemoveMessage(MessageType::MERGEBUCKET_REPLY);
+ CPPUNIT_ASSERT_EQUAL(
+ ReturnCode::ABORTED,
+ static_cast<const MergeBucketReply&>(*reply).getResult().getResult());
+ }
+ // NOTE: merges that have been immediately executed (i.e. not cycled)
+ // on the node should _not_ be replied to, since they're not owned
+ // by the throttler at that point in time
+}
+
+// If a node goes down and another node has a merge chained through it in
+// its queue, the original node can receive a final chain hop forwarding
+// it knows nothing about when it comes back up. If this is not handled
+// properly, it will attempt to forward this node again with a bogus
+// index. This should be implicitly handled by checking for a full node
+void
+MergeThrottlerTest::testUnseenMergeWithNodeInChain()
+{
+ std::vector<MergeBucketCommand::Node> nodes;
+ nodes.push_back(0);
+ nodes.push_back(5);
+ nodes.push_back(9);
+ std::vector<uint16_t> chain;
+ chain.push_back(0);
+ chain.push_back(5);
+ chain.push_back(9);
+ std::shared_ptr<MergeBucketCommand> cmd(
+ new MergeBucketCommand(BucketId(32, 0xdeadbeef), nodes, 1234, 1, chain));
+
+ StorageMessageAddress address("storage", lib::NodeType::STORAGE, 9);
+
+ cmd->setAddress(address);
+ _topLinks[0]->sendDown(cmd);
+
+ // First, test that we get rejected when processing merge immediately
+ // Should get a rejection in return
+ _topLinks[0]->waitForMessage(MessageType::MERGEBUCKET_REPLY, _messageWaitTime);
+ StorageMessage::SP reply = _topLinks[0]->getAndRemoveMessage(MessageType::MERGEBUCKET_REPLY);
+ CPPUNIT_ASSERT_EQUAL(
+ ReturnCode::REJECTED,
+ dynamic_cast<const MergeBucketReply&>(*reply).getResult().getResult());
+
+ // Second, test that we get rejected before queueing up. This is to
+ // avoid a hypothetical deadlock scenario.
+ // Fill up all active merges
+ {
+
+ std::size_t maxPending(
+ _throttlers[0]->getThrottlePolicy().getMaxPendingCount());
+ for (std::size_t i = 0; i < maxPending; ++i) {
+ std::shared_ptr<MergeBucketCommand> fillCmd(
+ new MergeBucketCommand(BucketId(32, 0xf00baa00 + i), nodes, 1234));
+ _topLinks[0]->sendDown(fillCmd);
+ }
+ }
+
+ _topLinks[0]->sendDown(cmd);
+
+ _topLinks[0]->waitForMessage(MessageType::MERGEBUCKET_REPLY, _messageWaitTime);
+ reply = _topLinks[0]->getAndRemoveMessage(MessageType::MERGEBUCKET_REPLY);
+ CPPUNIT_ASSERT_EQUAL(
+ ReturnCode::REJECTED,
+ dynamic_cast<const MergeBucketReply&>(*reply).getResult().getResult());
+}
+
+void
+MergeThrottlerTest::testMergeWithNewerClusterStateFlushesOutdatedQueued()
+{
+ // Fill up all active merges and then 3 queued ones with the same
+ // system state
+ std::size_t maxPending = _throttlers[0]->getThrottlePolicy().getMaxPendingCount();
+ CPPUNIT_ASSERT(maxPending < 100);
+ std::vector<api::StorageMessage::Id> ids;
+ for (std::size_t i = 0; i < maxPending + 3; ++i) {
+ std::vector<MergeBucketCommand::Node> nodes;
+ nodes.push_back(0);
+ nodes.push_back(1);
+ nodes.push_back(2);
+ std::shared_ptr<MergeBucketCommand> cmd(
+ new MergeBucketCommand(BucketId(32, 0xf00baa00 + i), nodes, 1234, 1));
+ ids.push_back(cmd->getMsgId());
+ _topLinks[0]->sendDown(cmd);
+ }
+
+ // Wait till we have maxPending replies and 3 queued
+ _topLinks[0]->waitForMessages(maxPending, _messageWaitTime);
+ waitUntilMergeQueueIs(*_throttlers[0], 3, _messageWaitTime);
+
+ // Send down merge with newer system state
+ {
+ std::vector<MergeBucketCommand::Node> nodes;
+ nodes.push_back(0);
+ nodes.push_back(1);
+ nodes.push_back(2);
+ std::shared_ptr<MergeBucketCommand> cmd(
+ new MergeBucketCommand(BucketId(32, 0x12345678), nodes, 1234, 2));
+ ids.push_back(cmd->getMsgId());
+ _topLinks[0]->sendDown(cmd);
+ }
+
+ // Queue should now be flushed with all messages being returned with
+ // WRONG_DISTRIBUTION
+ _topLinks[0]->waitForMessages(maxPending + 3, _messageWaitTime);
+ waitUntilMergeQueueIs(*_throttlers[0], 1, _messageWaitTime);
+
+ for (int i = 0; i < 3; ++i) {
+ StorageMessage::SP reply = _topLinks[0]->getAndRemoveMessage(MessageType::MERGEBUCKET_REPLY);
+ CPPUNIT_ASSERT_EQUAL(
+ static_cast<MergeBucketReply&>(*reply).getResult().getResult(),
+ ReturnCode::WRONG_DISTRIBUTION);
+ CPPUNIT_ASSERT_EQUAL(1u, static_cast<MergeBucketReply&>(*reply).getClusterStateVersion());
+ CPPUNIT_ASSERT_EQUAL(ids[maxPending + i], reply->getMsgId());
+ }
+
+ CPPUNIT_ASSERT_EQUAL(uint64_t(3), _throttlers[0]->getMetrics().chaining.failures.wrongdistribution.getValue());
+}
+
+void
+MergeThrottlerTest::testUpdatedClusterStateFlushesOutdatedQueued()
+{
+ // State is version 1. Send down several merges with state version 2.
+ std::size_t maxPending = _throttlers[0]->getThrottlePolicy().getMaxPendingCount();
+ CPPUNIT_ASSERT(maxPending < 100);
+ std::vector<api::StorageMessage::Id> ids;
+ for (std::size_t i = 0; i < maxPending + 3; ++i) {
+ std::vector<MergeBucketCommand::Node> nodes;
+ nodes.push_back(0);
+ nodes.push_back(1);
+ nodes.push_back(2);
+ std::shared_ptr<MergeBucketCommand> cmd(
+ new MergeBucketCommand(BucketId(32, 0xf00baa00 + i), nodes, 1234, 2));
+ ids.push_back(cmd->getMsgId());
+ _topLinks[0]->sendDown(cmd);
+ }
+
+ // Wait till we have maxPending replies and 4 queued
+ _topLinks[0]->waitForMessages(maxPending, _messageWaitTime);
+ waitUntilMergeQueueIs(*_throttlers[0], 3, _messageWaitTime);
+
+ // Send down new system state (also set it explicitly)
+ _servers[0]->setClusterState(lib::ClusterState("distributor:100 storage:100 version:3"));
+ std::shared_ptr<api::SetSystemStateCommand> stateCmd(
+ new api::SetSystemStateCommand(lib::ClusterState("distributor:100 storage:100 version:3")));
+ _topLinks[0]->sendDown(stateCmd);
+
+ // Queue should now be flushed with all being replied to with WRONG_DISTRIBUTION
+ waitUntilMergeQueueIs(*_throttlers[0], 0, _messageWaitTime);
+ _topLinks[0]->waitForMessages(maxPending + 3, 5);
+
+ for (int i = 0; i < 3; ++i) {
+ StorageMessage::SP reply = _topLinks[0]->getAndRemoveMessage(MessageType::MERGEBUCKET_REPLY);
+ CPPUNIT_ASSERT_EQUAL(
+ static_cast<MergeBucketReply&>(*reply).getResult().getResult(),
+ ReturnCode::WRONG_DISTRIBUTION);
+ CPPUNIT_ASSERT_EQUAL(2u, static_cast<MergeBucketReply&>(*reply).getClusterStateVersion());
+ CPPUNIT_ASSERT_EQUAL(ids[maxPending + i], reply->getMsgId());
+ }
+
+ CPPUNIT_ASSERT_EQUAL(uint64_t(3), _throttlers[0]->getMetrics().chaining.failures.wrongdistribution.getValue());
+}
+
+void
+MergeThrottlerTest::test42MergesDoNotTriggerFlush()
+{
+ // Fill up all active merges and then 1 queued one
+ std::size_t maxPending = _throttlers[0]->getThrottlePolicy().getMaxPendingCount();
+ CPPUNIT_ASSERT(maxPending < 100);
+ for (std::size_t i = 0; i < maxPending + 1; ++i) {
+ std::vector<MergeBucketCommand::Node> nodes;
+ nodes.push_back(0);
+ nodes.push_back(1);
+ nodes.push_back(2);
+ std::shared_ptr<MergeBucketCommand> cmd(
+ new MergeBucketCommand(BucketId(32, 0xf00baa00 + i), nodes, 1234, 1));
+ _topLinks[0]->sendDown(cmd);
+ }
+
+ // Wait till we have maxPending replies and 1 queued
+ _topLinks[0]->waitForMessages(maxPending, _messageWaitTime);
+ waitUntilMergeQueueIs(*_throttlers[0], 1, _messageWaitTime);
+
+ StorageMessage::SP fwd = _topLinks[0]->getAndRemoveMessage(MessageType::MERGEBUCKET);
+
+ // Remove all the rest of the active merges
+ while (!_topLinks[0]->getReplies().empty()) {
+ _topLinks[0]->getAndRemoveMessage(MessageType::MERGEBUCKET);
+ }
+
+ // Send down a merge with a cluster state version of 0, which should
+ // be ignored and queued as usual
+ {
+ std::vector<MergeBucketCommand::Node> nodes;
+ nodes.push_back(0);
+ nodes.push_back(1);
+ nodes.push_back(2);
+ std::shared_ptr<MergeBucketCommand> cmd(
+ new MergeBucketCommand(BucketId(32, 0xbaaadbed), nodes, 1234, 0));
+ _topLinks[0]->sendDown(cmd);
+ }
+
+ waitUntilMergeQueueIs(*_throttlers[0], 2, _messageWaitTime);
+
+ CPPUNIT_ASSERT_EQUAL(std::size_t(0), _topLinks[0]->getNumCommands());
+ CPPUNIT_ASSERT_EQUAL(std::size_t(0), _topLinks[0]->getNumReplies());
+
+ CPPUNIT_ASSERT_EQUAL(uint64_t(0), _throttlers[0]->getMetrics().local.failures.wrongdistribution.getValue());
+}
+
+// Test that a merge that arrive with a state version that is less than
+// that of the node is rejected immediately
+void
+MergeThrottlerTest::testOutdatedClusterStateMergesAreRejectedOnArrival()
+{
+ _servers[0]->setClusterState(lib::ClusterState("distributor:100 storage:100 version:10"));
+
+ // Send down a merge with a cluster state version of 9, which should
+ // be rejected
+ {
+ std::vector<MergeBucketCommand::Node> nodes;
+ nodes.push_back(0);
+ nodes.push_back(1);
+ nodes.push_back(2);
+ std::shared_ptr<MergeBucketCommand> cmd(
+ new MergeBucketCommand(BucketId(32, 0xfeef00), nodes, 1234, 9));
+ _topLinks[0]->sendDown(cmd);
+ }
+
+ _topLinks[0]->waitForMessages(1, _messageWaitTime);
+
+ StorageMessage::SP reply = _topLinks[0]->getAndRemoveMessage(MessageType::MERGEBUCKET_REPLY);
+ CPPUNIT_ASSERT_EQUAL(
+ static_cast<MergeBucketReply&>(*reply).getResult().getResult(),
+ ReturnCode::WRONG_DISTRIBUTION);
+
+ CPPUNIT_ASSERT_EQUAL(uint64_t(1), _throttlers[0]->getMetrics().chaining.failures.wrongdistribution.getValue());
+}
+
+// Test erroneous case where node receives merge where the merge does
+// not exist in the state, but it exists in the chain without the chain
+// being full. This is something that shouldn't happen, but must still
+// not crash the node
+void
+MergeThrottlerTest::testUnknownMergeWithSelfInChain()
+{
+ BucketId bid(32, 0xbadbed);
+
+ std::vector<MergeBucketCommand::Node> nodes;
+ nodes.push_back(0);
+ nodes.push_back(1);
+ nodes.push_back(2);
+ std::vector<uint16_t> chain;
+ chain.push_back(0);
+ std::shared_ptr<MergeBucketCommand> cmd(
+ new MergeBucketCommand(bid, nodes, 1234, 1, chain));
+
+ StorageMessageAddress address("storage", lib::NodeType::STORAGE, 1);
+
+ cmd->setAddress(address);
+ _topLinks[0]->sendDown(cmd);
+ _topLinks[0]->waitForMessage(MessageType::MERGEBUCKET_REPLY, _messageWaitTime);
+
+ StorageMessage::SP reply = _topLinks[0]->getAndRemoveMessage(MessageType::MERGEBUCKET_REPLY);
+
+ CPPUNIT_ASSERT_EQUAL(
+ ReturnCode::REJECTED,
+ static_cast<MergeBucketReply&>(*reply).getResult().getResult());
+}
+
+void
+MergeThrottlerTest::testBusyReturnedOnFullQueue()
+{
+ std::size_t maxPending = _throttlers[0]->getThrottlePolicy().getMaxPendingCount();
+ std::size_t maxQueue = _throttlers[0]->getMaxQueueSize();
+ CPPUNIT_ASSERT(maxPending < 100);
+ for (std::size_t i = 0; i < maxPending + maxQueue; ++i) {
+ std::vector<MergeBucketCommand::Node> nodes;
+ nodes.push_back(0);
+ nodes.push_back(1);
+ nodes.push_back(2);
+ std::shared_ptr<MergeBucketCommand> cmd(
+ new MergeBucketCommand(BucketId(32, 0xf00000 + i), nodes, 1234, 1));
+ _topLinks[0]->sendDown(cmd);
+ }
+
+ // Wait till we have maxPending replies and maxQueue queued
+ _topLinks[0]->waitForMessages(maxPending, _messageWaitTime);
+ waitUntilMergeQueueIs(*_throttlers[0], maxQueue, _messageWaitTime);
+
+ // Clear all forwarded merges
+ _topLinks[0]->getRepliesOnce();
+ // Send down another merge which should be immediately busy-returned
+ {
+ std::vector<MergeBucketCommand::Node> nodes;
+ nodes.push_back(0);
+ nodes.push_back(1);
+ nodes.push_back(2);
+ std::shared_ptr<MergeBucketCommand> cmd(
+ new MergeBucketCommand(BucketId(32, 0xf000baaa), nodes, 1234, 1));
+ _topLinks[0]->sendDown(cmd);
+ }
+ _topLinks[0]->waitForMessage(MessageType::MERGEBUCKET_REPLY, _messageWaitTime);
+ StorageMessage::SP reply = _topLinks[0]->getAndRemoveMessage(MessageType::MERGEBUCKET_REPLY);
+
+ CPPUNIT_ASSERT_EQUAL(
+ BucketId(32, 0xf000baaa),
+ static_cast<MergeBucketReply&>(*reply).getBucketId());
+
+ CPPUNIT_ASSERT_EQUAL(
+ ReturnCode::BUSY,
+ static_cast<MergeBucketReply&>(*reply).getResult().getResult());
+
+ CPPUNIT_ASSERT_EQUAL(uint64_t(0),
+ _throttlers[0]->getMetrics().chaining
+ .failures.busy.getValue());
+ CPPUNIT_ASSERT_EQUAL(uint64_t(1),
+ _throttlers[0]->getMetrics().local
+ .failures.busy.getValue());
+}
+
+void
+MergeThrottlerTest::testBrokenCycle()
+{
+ std::vector<MergeBucketCommand::Node> nodes;
+ nodes.push_back(1);
+ nodes.push_back(0);
+ nodes.push_back(2);
+ {
+ std::vector<uint16_t> chain;
+ chain.push_back(0);
+ std::shared_ptr<MergeBucketCommand> cmd(
+ new MergeBucketCommand(BucketId(32, 0xfeef00), nodes, 1234, 1, chain));
+ _topLinks[1]->sendDown(cmd);
+ }
+
+ _topLinks[1]->waitForMessage(MessageType::MERGEBUCKET, _messageWaitTime);
+ StorageMessage::SP fwd = _topLinks[1]->getAndRemoveMessage(MessageType::MERGEBUCKET);
+ CPPUNIT_ASSERT_EQUAL(uint16_t(2), fwd->getAddress()->getIndex());
+
+ // Send cycled merge which will be executed
+ {
+ std::vector<uint16_t> chain;
+ chain.push_back(0);
+ chain.push_back(1);
+ chain.push_back(2);
+ std::shared_ptr<MergeBucketCommand> cmd(
+ new MergeBucketCommand(BucketId(32, 0xfeef00), nodes, 1234, 1, chain));
+ _topLinks[1]->sendDown(cmd);
+ }
+
+ _bottomLinks[1]->waitForMessage(MessageType::MERGEBUCKET, _messageWaitTime);
+ StorageMessage::SP cycled = _bottomLinks[1]->getAndRemoveMessage(MessageType::MERGEBUCKET);
+
+ // Now, node 2 goes down, auto sending back a failed merge
+ std::shared_ptr<MergeBucketReply> nodeDownReply(
+ new MergeBucketReply(dynamic_cast<const MergeBucketCommand&>(*fwd)));
+ nodeDownReply->setResult(ReturnCode(ReturnCode::NOT_CONNECTED, "Node went sightseeing"));
+
+ _topLinks[1]->sendDown(nodeDownReply);
+ // Merge reply also arrives from persistence
+ std::shared_ptr<MergeBucketReply> persistenceReply(
+ new MergeBucketReply(dynamic_cast<const MergeBucketCommand&>(*cycled)));
+ persistenceReply->setResult(ReturnCode(ReturnCode::ABORTED, "Oh dear"));
+ _bottomLinks[1]->sendUp(persistenceReply);
+
+ // Should now be two replies from node 1, one to node 2 and one to node 0
+ // since we must handle broken chains
+ _topLinks[1]->waitForMessages(2, _messageWaitTime);
+ // Unwind reply shares the result of the persistence reply
+ for (int i = 0; i < 2; ++i) {
+ StorageMessage::SP reply = _topLinks[1]->getAndRemoveMessage(MessageType::MERGEBUCKET_REPLY);
+ CPPUNIT_ASSERT_EQUAL(api::ReturnCode(ReturnCode::ABORTED, "Oh dear"),
+ static_cast<MergeBucketReply&>(*reply).getResult());
+ }
+
+ // Make sure it has been removed from the internal state so we can
+ // send new merges for the bucket
+ {
+ std::vector<uint16_t> chain;
+ chain.push_back(0);
+ std::shared_ptr<MergeBucketCommand> cmd(
+ new MergeBucketCommand(BucketId(32, 0xfeef00), nodes, 1234, 1, chain));
+ _topLinks[1]->sendDown(cmd);
+ }
+
+ _topLinks[1]->waitForMessage(MessageType::MERGEBUCKET, 5);
+ fwd = _topLinks[1]->getAndRemoveMessage(MessageType::MERGEBUCKET);
+ CPPUNIT_ASSERT_EQUAL(uint16_t(2), fwd->getAddress()->getIndex());
+}
+
+void
+MergeThrottlerTest::sendAndExpectReply(
+ const std::shared_ptr<api::StorageMessage>& msg,
+ const api::MessageType& expectedReplyType,
+ api::ReturnCode::Result expectedResultCode)
+{
+ _topLinks[0]->sendDown(msg);
+ _topLinks[0]->waitForMessage(expectedReplyType, _messageWaitTime);
+ StorageMessage::SP reply(_topLinks[0]->getAndRemoveMessage(
+ expectedReplyType));
+ api::StorageReply& storageReply(
+ dynamic_cast<api::StorageReply&>(*reply));
+ CPPUNIT_ASSERT_EQUAL(expectedResultCode,
+ storageReply.getResult().getResult());
+}
+
+void
+MergeThrottlerTest::testGetBucketDiffCommandNotInActiveSetIsRejected()
+{
+ document::BucketId bucket(16, 1234);
+ std::vector<api::GetBucketDiffCommand::Node> nodes;
+ std::shared_ptr<api::GetBucketDiffCommand> getDiffCmd(
+ new api::GetBucketDiffCommand(bucket, nodes, api::Timestamp(1234)));
+
+ sendAndExpectReply(getDiffCmd,
+ api::MessageType::GETBUCKETDIFF_REPLY,
+ api::ReturnCode::ABORTED);
+ CPPUNIT_ASSERT_EQUAL(std::size_t(0), _bottomLinks[0]->getNumCommands());
+}
+
+void
+MergeThrottlerTest::testApplyBucketDiffCommandNotInActiveSetIsRejected()
+{
+ document::BucketId bucket(16, 1234);
+ std::vector<api::GetBucketDiffCommand::Node> nodes;
+ std::shared_ptr<api::ApplyBucketDiffCommand> applyDiffCmd(
+ new api::ApplyBucketDiffCommand(bucket, nodes, api::Timestamp(1234)));
+
+ sendAndExpectReply(applyDiffCmd,
+ api::MessageType::APPLYBUCKETDIFF_REPLY,
+ api::ReturnCode::ABORTED);
+ CPPUNIT_ASSERT_EQUAL(std::size_t(0), _bottomLinks[0]->getNumCommands());
+}
+
+api::MergeBucketCommand::SP
+MergeThrottlerTest::sendMerge(const MergeBuilder& builder)
+{
+ api::MergeBucketCommand::SP cmd(builder.create());
+ _topLinks[builder._nodes[0]]->sendDown(cmd);
+ return cmd;
+}
+
+void
+MergeThrottlerTest::testNewClusterStateAbortsAllOutdatedActiveMerges()
+{
+ document::BucketId bucket(16, 6789);
+ _throttlers[0]->getThrottlePolicy().setMaxPendingCount(1);
+
+ // Merge will be forwarded (i.e. active).
+ sendMerge(MergeBuilder(bucket).clusterStateVersion(10));
+ _topLinks[0]->waitForMessage(MessageType::MERGEBUCKET, _messageWaitTime);
+ StorageMessage::SP fwd(_topLinks[0]->getAndRemoveMessage(
+ MessageType::MERGEBUCKET));
+
+ _topLinks[0]->sendDown(makeSystemStateCmd(
+ "version:11 distributor:100 storage:100"));
+ // Cannot send reply until we're unwinding
+ CPPUNIT_ASSERT_EQUAL(std::size_t(0), _topLinks[0]->getNumReplies());
+
+ // Trying to diff the bucket should now fail
+ {
+ std::shared_ptr<api::GetBucketDiffCommand> getDiffCmd(
+ new api::GetBucketDiffCommand(bucket, {}, api::Timestamp(123)));
+
+ sendAndExpectReply(getDiffCmd,
+ api::MessageType::GETBUCKETDIFF_REPLY,
+ api::ReturnCode::ABORTED);
+ }
+}
+
+// TODO test message queue aborting (use rendezvous functionality--make guard)
+
+} // namespace storage
diff --git a/storage/src/tests/storageserver/priorityconvertertest.cpp b/storage/src/tests/storageserver/priorityconvertertest.cpp
new file mode 100644
index 00000000000..ecbdcfb6b91
--- /dev/null
+++ b/storage/src/tests/storageserver/priorityconvertertest.cpp
@@ -0,0 +1,104 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/documentapi/documentapi.h>
+#include <vespa/storage/storageserver/priorityconverter.h>
+#include <tests/common/testhelper.h>
+
+namespace storage {
+
+struct PriorityConverterTest : public CppUnit::TestFixture
+{
+ std::unique_ptr<PriorityConverter> _converter;
+
+ void setUp() {
+ vdstestlib::DirConfig config(getStandardConfig(true));
+ _converter.reset(new PriorityConverter(config.getConfigId()));
+ };
+
+ void testNormalUsage();
+ void testLowestPriorityIsReturnedForUnknownCode();
+
+ CPPUNIT_TEST_SUITE(PriorityConverterTest);
+ CPPUNIT_TEST(testNormalUsage);
+ CPPUNIT_TEST(testLowestPriorityIsReturnedForUnknownCode);
+ CPPUNIT_TEST_SUITE_END();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(PriorityConverterTest);
+
+void PriorityConverterTest::testNormalUsage()
+{
+ for (int p=0; p<16; ++p) {
+ CPPUNIT_ASSERT_EQUAL(
+ (uint8_t)(50+p*10),
+ _converter->toStoragePriority(
+ static_cast<documentapi::Priority::Value>(p)));
+ }
+ for (int i=0; i<256; ++i) {
+ uint8_t p = i;
+ if (p <= 50) {
+ CPPUNIT_ASSERT_EQUAL(documentapi::Priority::PRI_HIGHEST,
+ _converter->toDocumentPriority(p));
+ } else if (p <= 60) {
+ CPPUNIT_ASSERT_EQUAL(documentapi::Priority::PRI_VERY_HIGH,
+ _converter->toDocumentPriority(p));
+ } else if (p <= 70) {
+ CPPUNIT_ASSERT_EQUAL(documentapi::Priority::PRI_HIGH_1,
+ _converter->toDocumentPriority(p));
+ } else if (p <= 80) {
+ CPPUNIT_ASSERT_EQUAL(documentapi::Priority::PRI_HIGH_2,
+ _converter->toDocumentPriority(p));
+ } else if (p <= 90) {
+ CPPUNIT_ASSERT_EQUAL(documentapi::Priority::PRI_HIGH_3,
+ _converter->toDocumentPriority(p));
+ } else if (p <= 100) {
+ CPPUNIT_ASSERT_EQUAL(documentapi::Priority::PRI_NORMAL_1,
+ _converter->toDocumentPriority(p));
+ } else if (p <= 110) {
+ CPPUNIT_ASSERT_EQUAL(documentapi::Priority::PRI_NORMAL_2,
+ _converter->toDocumentPriority(p));
+ } else if (p <= 120) {
+ CPPUNIT_ASSERT_EQUAL(documentapi::Priority::PRI_NORMAL_3,
+ _converter->toDocumentPriority(p));
+ } else if (p <= 130) {
+ CPPUNIT_ASSERT_EQUAL(documentapi::Priority::PRI_NORMAL_4,
+ _converter->toDocumentPriority(p));
+ } else if (p <= 140) {
+ CPPUNIT_ASSERT_EQUAL(documentapi::Priority::PRI_NORMAL_5,
+ _converter->toDocumentPriority(p));
+ } else if (p <= 150) {
+ CPPUNIT_ASSERT_EQUAL(documentapi::Priority::PRI_NORMAL_6,
+ _converter->toDocumentPriority(p));
+ } else if (p <= 160) {
+ CPPUNIT_ASSERT_EQUAL(documentapi::Priority::PRI_LOW_1,
+ _converter->toDocumentPriority(p));
+ } else if (p <= 170) {
+ CPPUNIT_ASSERT_EQUAL(documentapi::Priority::PRI_LOW_2,
+ _converter->toDocumentPriority(p));
+ } else if (p <= 180) {
+ CPPUNIT_ASSERT_EQUAL(documentapi::Priority::PRI_LOW_3,
+ _converter->toDocumentPriority(p));
+ } else if (p <= 190) {
+ CPPUNIT_ASSERT_EQUAL(documentapi::Priority::PRI_VERY_LOW,
+ _converter->toDocumentPriority(p));
+ } else if (p <= 200) {
+ CPPUNIT_ASSERT_EQUAL(documentapi::Priority::PRI_LOWEST,
+ _converter->toDocumentPriority(p));
+ } else {
+ CPPUNIT_ASSERT_EQUAL(documentapi::Priority::PRI_LOWEST,
+ _converter->toDocumentPriority(p));
+ }
+ }
+}
+
+
+void
+PriorityConverterTest::testLowestPriorityIsReturnedForUnknownCode()
+{
+ CPPUNIT_ASSERT_EQUAL(255,
+ static_cast<int>(_converter->toStoragePriority(
+ static_cast<documentapi::Priority::Value>(123))));
+}
+
+}
diff --git a/storage/src/tests/storageserver/statemanagertest.cpp b/storage/src/tests/storageserver/statemanagertest.cpp
new file mode 100644
index 00000000000..68a35ac37d9
--- /dev/null
+++ b/storage/src/tests/storageserver/statemanagertest.cpp
@@ -0,0 +1,264 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <boost/pointer_cast.hpp>
+#include <cppunit/extensions/HelperMacros.h>
+#include <iostream>
+#include <vespa/metrics/metricmanager.h>
+#include <string>
+#include <vespa/storageapi/message/bucket.h>
+#include <vespa/storageapi/message/state.h>
+#include <vespa/vdslib/state/nodestate.h>
+#include <vespa/storage/frameworkimpl/component/storagecomponentregisterimpl.h>
+#include <vespa/storage/storageserver/statemanager.h>
+#include <vespa/storage/common/hostreporter/hostinfo.h>
+#include <tests/common/teststorageapp.h>
+#include <tests/common/testhelper.h>
+#include <tests/common/dummystoragelink.h>
+#include <vespa/vespalib/data/slime/type.h>
+
+using storage::lib::NodeState;
+using storage::lib::NodeType;
+using storage::lib::State;
+using storage::lib::ClusterState;
+
+namespace storage {
+
+struct StateManagerTest : public CppUnit::TestFixture {
+ std::unique_ptr<TestServiceLayerApp> _node;
+ std::unique_ptr<DummyStorageLink> _upper;
+ std::unique_ptr<metrics::MetricManager> _metricManager;
+ StateManager* _manager;
+ DummyStorageLink* _lower;
+
+ StateManagerTest();
+
+ void setUp();
+ void tearDown();
+
+ void testSystemState();
+ void testReportedNodeState();
+ void testClusterStateVersion();
+
+ CPPUNIT_TEST_SUITE(StateManagerTest);
+ CPPUNIT_TEST(testSystemState);
+ CPPUNIT_TEST(testReportedNodeState);
+ CPPUNIT_TEST(testClusterStateVersion);
+ CPPUNIT_TEST_SUITE_END();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(StateManagerTest);
+
+StateManagerTest::StateManagerTest()
+ : _node(),
+ _upper(),
+ _manager(0),
+ _lower(0)
+{
+}
+
+void
+StateManagerTest::setUp() {
+ try{
+ vdstestlib::DirConfig config(getStandardConfig(true));
+ _node.reset(new TestServiceLayerApp(DiskCount(1), NodeIndex(2)));
+ // Clock will increase 1 sec per call.
+ _node->getClock().setAbsoluteTimeInSeconds(1);
+ _metricManager.reset(new metrics::MetricManager);
+ _upper.reset(new DummyStorageLink());
+ _manager = new StateManager(_node->getComponentRegister(),
+ *_metricManager,
+ std::unique_ptr<HostInfo>(new HostInfo));
+ _lower = new DummyStorageLink();
+ _upper->push_back(StorageLink::UP(_manager));
+ _upper->push_back(StorageLink::UP(_lower));
+ _upper->open();
+ } catch (std::exception& e) {
+ std::cerr << "Failed to static initialize objects: " << e.what()
+ << "\n";
+ }
+}
+
+void
+StateManagerTest::tearDown() {
+ CPPUNIT_ASSERT_EQUAL(size_t(0), _lower->getNumReplies());
+ CPPUNIT_ASSERT_EQUAL(size_t(0), _lower->getNumCommands());
+ CPPUNIT_ASSERT_EQUAL(size_t(0), _upper->getNumReplies());
+ CPPUNIT_ASSERT_EQUAL(size_t(0), _upper->getNumCommands());
+ _manager = 0;
+ _lower = 0;
+ _upper->close();
+ _upper->flush();
+ _upper.reset(0);
+ _node.reset(0);
+ _metricManager.reset();
+}
+
+#define GET_ONLY_OK_REPLY(varname) \
+{ \
+ CPPUNIT_ASSERT_EQUAL(size_t(1), _upper->getNumReplies()); \
+ CPPUNIT_ASSERT(_upper->getReply(0)->getType().isReply()); \
+ varname = std::dynamic_pointer_cast<api::StorageReply>( \
+ _upper->getReply(0)); \
+ CPPUNIT_ASSERT(varname != 0); \
+ _upper->reset(); \
+ CPPUNIT_ASSERT_EQUAL(api::ReturnCode(api::ReturnCode::OK), \
+ varname->getResult()); \
+}
+
+void
+StateManagerTest::testSystemState()
+{
+ std::shared_ptr<api::StorageReply> reply;
+ // Verify initial state on startup
+ ClusterState::CSP currentState = _manager->getSystemState();
+ CPPUNIT_ASSERT_EQUAL(std::string("cluster:d"),
+ currentState->toString(false));
+
+ NodeState::CSP currentNodeState = _manager->getCurrentNodeState();
+ CPPUNIT_ASSERT_EQUAL(std::string("s:d"), currentNodeState->toString(false));
+
+ ClusterState sendState("storage:4 .2.s:m");
+ std::shared_ptr<api::SetSystemStateCommand> cmd(
+ new api::SetSystemStateCommand(sendState));
+ _upper->sendDown(cmd);
+ GET_ONLY_OK_REPLY(reply);
+
+ currentState = _manager->getSystemState();
+ CPPUNIT_ASSERT_EQUAL(sendState, *currentState);
+
+ currentNodeState = _manager->getCurrentNodeState();
+ CPPUNIT_ASSERT_EQUAL(std::string("s:m"), currentNodeState->toString(false));
+}
+
+namespace {
+ struct MyStateListener : public StateListener {
+ const NodeStateUpdater& updater;
+ lib::NodeState current;
+ std::ostringstream ost;
+
+ MyStateListener(const NodeStateUpdater& upd)
+ : updater(upd), current(*updater.getReportedNodeState()) {}
+
+ void handleNewState()
+ {
+ ost << current << " -> ";
+ current = *updater.getReportedNodeState();
+ ost << current << "\n";
+ }
+ };
+}
+
+void
+StateManagerTest::testReportedNodeState()
+{
+ std::shared_ptr<api::StorageReply> reply;
+ // Add a state listener to check that we get events.
+ MyStateListener stateListener(*_manager);
+ _manager->addStateListener(stateListener);
+ // Test that initial state is initializing
+ NodeState::CSP nodeState = _manager->getReportedNodeState();
+ CPPUNIT_ASSERT_EQUAL(std::string("s:i b:58 i:0 t:1"), nodeState->toString(false));
+ // Test that it works to update the state
+ {
+ NodeStateUpdater::Lock::SP lock(_manager->grabStateChangeLock());
+ NodeState ns(*_manager->getReportedNodeState());
+ ns.setState(State::UP);
+ _manager->setReportedNodeState(ns);
+ }
+ // And that we get the change both through state interface
+ nodeState = _manager->getReportedNodeState();
+ CPPUNIT_ASSERT_EQUAL(std::string("s:u b:58 t:1"),
+ nodeState->toString(false));
+ // And get node state command (no expected state)
+ std::shared_ptr<api::GetNodeStateCommand> cmd(
+ new api::GetNodeStateCommand(lib::NodeState::UP()));
+ _upper->sendDown(cmd);
+ GET_ONLY_OK_REPLY(reply);
+ CPPUNIT_ASSERT_EQUAL(api::MessageType::GETNODESTATE_REPLY,
+ reply->getType());
+ nodeState.reset(new NodeState(
+ dynamic_cast<api::GetNodeStateReply&>(*reply).getNodeState()));
+ CPPUNIT_ASSERT_EQUAL(std::string("s:u b:58 t:1"),
+ nodeState->toString(false));
+ // We should also get it with wrong expected state
+ cmd.reset(new api::GetNodeStateCommand(lib::NodeState::UP(new NodeState(NodeType::STORAGE, State::INITIALIZING))));
+ _upper->sendDown(cmd);
+ GET_ONLY_OK_REPLY(reply);
+ CPPUNIT_ASSERT_EQUAL(api::MessageType::GETNODESTATE_REPLY,
+ reply->getType());
+ nodeState.reset(new NodeState(
+ dynamic_cast<api::GetNodeStateReply&>(*reply).getNodeState()));
+ CPPUNIT_ASSERT_EQUAL(std::string("s:u b:58 t:1"),
+ nodeState->toString(false));
+ // With correct wanted state we should not get response right away
+ cmd.reset(new api::GetNodeStateCommand(
+ lib::NodeState::UP(new NodeState("s:u b:58 t:1", &NodeType::STORAGE))));
+ _upper->sendDown(cmd);
+ CPPUNIT_ASSERT_EQUAL(size_t(0), _upper->getNumReplies());
+ // But when we update state, we get the reply
+ {
+ NodeStateUpdater::Lock::SP lock(_manager->grabStateChangeLock());
+ NodeState ns(*_manager->getReportedNodeState());
+ ns.setState(State::STOPPING);
+ ns.setDescription("Stopping node");
+ _manager->setReportedNodeState(ns);
+ }
+
+ GET_ONLY_OK_REPLY(reply);
+ CPPUNIT_ASSERT_EQUAL(api::MessageType::GETNODESTATE_REPLY,
+ reply->getType());
+ nodeState.reset(new NodeState(
+ dynamic_cast<api::GetNodeStateReply&>(*reply).getNodeState()));
+ CPPUNIT_ASSERT_EQUAL(std::string("s:s b:58 t:1 m:Stopping\\x20node"),
+ nodeState->toString(false));
+
+ // Removing state listener, it stops getting updates
+ _manager->removeStateListener(stateListener);
+ // Do another update which listener should not get..
+ {
+ NodeStateUpdater::Lock::SP lock(_manager->grabStateChangeLock());
+ NodeState ns(*_manager->getReportedNodeState());
+ ns.setState(State::UP);
+ _manager->setReportedNodeState(ns);
+ }
+ std::string expectedEvents =
+ "s:i b:58 i:0 t:1 -> s:u b:58 t:1\n"
+ "s:u b:58 t:1 -> s:s b:58 t:1 m:Stopping\\x20node\n";
+ CPPUNIT_ASSERT_EQUAL(expectedEvents, stateListener.ost.str());
+}
+
+void
+StateManagerTest::testClusterStateVersion()
+{
+ ClusterState state(*_manager->getSystemState());
+ state.setVersion(123);
+ _manager->setClusterState(state);
+
+ std::string nodeInfoString(_manager->getNodeInfo());
+ vespalib::slime::Memory goldenMemory(nodeInfoString);
+ vespalib::Slime nodeInfo;
+ vespalib::slime::JsonFormat::decode(nodeInfoString, nodeInfo);
+
+ vespalib::slime::Symbol lookupSymbol =
+ nodeInfo.lookup("cluster-state-version");
+ if (lookupSymbol.undefined()) {
+ CPPUNIT_FAIL("No cluster-state-version was found in the node info");
+ }
+
+ auto& cursor = nodeInfo.get();
+ auto& clusterStateVersionCursor = cursor["cluster-state-version"];
+ if (!clusterStateVersionCursor.valid()) {
+ CPPUNIT_FAIL("No cluster-state-version was found in the node info");
+ }
+
+ if (clusterStateVersionCursor.type().getId() != vespalib::slime::LONG::ID) {
+ CPPUNIT_FAIL("No cluster-state-version was found in the node info");
+ }
+
+ int version = clusterStateVersionCursor.asLong();
+ CPPUNIT_ASSERT_EQUAL(123, version);
+}
+
+} // storage
+
diff --git a/storage/src/tests/storageserver/statereportertest.cpp b/storage/src/tests/storageserver/statereportertest.cpp
new file mode 100644
index 00000000000..ef1592bce80
--- /dev/null
+++ b/storage/src/tests/storageserver/statereportertest.cpp
@@ -0,0 +1,279 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <cppunit/extensions/HelperMacros.h>
+#include <vespa/log/log.h>
+#include <vespa/storageframework/defaultimplementation/clock/fakeclock.h>
+#include <vespa/storage/persistence/filestorage/filestormanager.h>
+#include <vespa/storage/storageserver/applicationgenerationfetcher.h>
+#include <vespa/storage/storageserver/statereporter.h>
+#include <tests/common/teststorageapp.h>
+#include <tests/common/testhelper.h>
+#include <tests/common/dummystoragelink.h>
+
+LOG_SETUP(".test.statereporter");
+
+namespace storage {
+
+class DummyApplicationGenerationFether : public ApplicationGenerationFetcher {
+public:
+ virtual int64_t getGeneration() const { return 1; }
+ virtual std::string getComponentName() const { return "component"; }
+};
+
+struct StateReporterTest : public CppUnit::TestFixture {
+ FastOS_ThreadPool _threadPool;
+ framework::defaultimplementation::FakeClock* _clock;
+ std::unique_ptr<TestServiceLayerApp> _node;
+ std::unique_ptr<DummyStorageLink> _top;
+ DummyApplicationGenerationFether _generationFetcher;
+ std::unique_ptr<StateReporter> _stateReporter;
+ std::unique_ptr<vdstestlib::DirConfig> _config;
+ std::unique_ptr<metrics::MetricSet> _topSet;
+ std::unique_ptr<metrics::MetricManager> _metricManager;
+ std::shared_ptr<FileStorMetrics> _filestorMetrics;
+
+ StateReporterTest();
+
+ void setUp();
+ void tearDown();
+ void runLoad(uint32_t count = 1);
+
+ void testReportConfigGeneration();
+ void testReportHealth();
+ void testReportMetrics();
+
+ CPPUNIT_TEST_SUITE(StateReporterTest);
+ CPPUNIT_TEST(testReportConfigGeneration);
+ CPPUNIT_TEST(testReportHealth);
+ CPPUNIT_TEST(testReportMetrics);
+ CPPUNIT_TEST_SUITE_END();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(StateReporterTest);
+
+namespace {
+ struct MetricClock : public metrics::MetricManager::Timer
+ {
+ framework::Clock& _clock;
+ MetricClock(framework::Clock& c) : _clock(c) {}
+ virtual time_t getTime() const
+ { return _clock.getTimeInSeconds().getTime(); }
+ virtual time_t getTimeInMilliSecs() const
+ { return _clock.getTimeInMillis().getTime(); }
+ };
+}
+
+StateReporterTest::StateReporterTest()
+ : _threadPool(256*1024),
+ _clock(0),
+ _top(),
+ _stateReporter()
+{
+}
+
+void StateReporterTest::setUp() {
+ assert(system("rm -rf vdsroot") == 0);
+ _config.reset(new vdstestlib::DirConfig(getStandardConfig(true)));
+ try {
+ _node.reset(new TestServiceLayerApp(DiskCount(4), NodeIndex(0),
+ _config->getConfigId()));
+ _node->setupDummyPersistence();
+ _clock = &_node->getClock();
+ _clock->setAbsoluteTimeInSeconds(1000000);
+ _top.reset(new DummyStorageLink);
+ } catch (config::InvalidConfigException& e) {
+ fprintf(stderr, "%s\n", e.what());
+ }
+ _metricManager.reset(new metrics::MetricManager(
+ std::unique_ptr<metrics::MetricManager::Timer>(
+ new MetricClock(*_clock))));
+ _topSet.reset(new metrics::MetricSet("vds", "", ""));
+ {
+ metrics::MetricLockGuard guard(_metricManager->getMetricLock());
+ _metricManager->registerMetric(guard, *_topSet);
+ }
+
+ _stateReporter.reset(new StateReporter(
+ _node->getComponentRegister(),
+ *_metricManager,
+ _generationFetcher,
+ "status"));
+
+ uint16_t diskCount = _node->getPartitions().size();
+ documentapi::LoadTypeSet::SP loadTypes(_node->getLoadTypes());
+
+ _filestorMetrics.reset(new FileStorMetrics(
+ _node->getLoadTypes()->getMetricLoadTypes()));
+ _filestorMetrics->initDiskMetrics(
+ diskCount, loadTypes->getMetricLoadTypes(), 1);
+ _topSet->registerMetric(*_filestorMetrics);
+
+ _metricManager->init(_config->getConfigId(), _node->getThreadPool());
+}
+
+void StateReporterTest::tearDown() {
+ _metricManager->stop();
+ _stateReporter.reset(0);
+ _topSet.reset(0);
+ _metricManager.reset(0);
+ _top.reset(0);
+ _node.reset(0);
+ _config.reset(0);
+ _filestorMetrics.reset();
+}
+
+#define PARSE_JSON(jsonData) \
+vespalib::Slime slime; \
+{ \
+ using namespace vespalib::slime; \
+ size_t parsed = JsonFormat::decode(Memory(jsonData), slime); \
+ SimpleBuffer buffer; \
+ JsonFormat::encode(slime, buffer, false); \
+ if (jsonData.size() != parsed) { \
+ std::ostringstream error; \
+ error << "Failed to parse JSON: '\n" \
+ << jsonData << "'\n:" << buffer.get().make_string() << "\n"; \
+ CPPUNIT_ASSERT_EQUAL_MSG(error.str(), jsonData.size(), parsed); \
+ } \
+}
+
+#define ASSERT_GENERATION(jsonData, component, generation) \
+{ \
+ PARSE_JSON(jsonData); \
+ CPPUNIT_ASSERT_EQUAL( \
+ generation, \
+ slime.get()["config"][component]["generation"].asDouble()); \
+}
+
+#define ASSERT_NODE_STATUS(jsonData, code, message) \
+{ \
+ PARSE_JSON(jsonData); \
+ CPPUNIT_ASSERT_EQUAL( \
+ vespalib::string(code), \
+ slime.get()["status"]["code"].asString().make_string()); \
+ CPPUNIT_ASSERT_EQUAL( \
+ vespalib::string(message), \
+ slime.get()["status"]["message"].asString().make_string()); \
+}
+
+#define ASSERT_METRIC_GET_PUT(jsonData, expGetCount, expPutCount) \
+{ \
+ PARSE_JSON(jsonData); \
+ double getCount = -1; \
+ double putCount = -1; \
+ size_t metricCount = slime.get()["metrics"]["values"].children(); \
+ /*std::cerr << "\nmetric count=" << metricCount << "\n";*/ \
+ for (size_t j=0; j<metricCount; j++) { \
+ const vespalib::string name = slime.get()["metrics"]["values"][j]["name"] \
+ .asString().make_string(); \
+ if (name.compare("vds.filestor.alldisks.allthreads." \
+ "get.sum.count") == 0) \
+ { \
+ getCount = slime.get()["metrics"]["values"][j]["values"]["count"] \
+ .asDouble(); \
+ } else if (name.compare("vds.filestor.alldisks.allthreads." \
+ "put.sum.count") == 0) \
+ { \
+ putCount = slime.get()["metrics"]["values"][j]["values"]["count"] \
+ .asDouble(); \
+ } \
+ } \
+ CPPUNIT_ASSERT_EQUAL(expGetCount, getCount); \
+ CPPUNIT_ASSERT_EQUAL(expPutCount, putCount); \
+ CPPUNIT_ASSERT(metricCount > 100); \
+}
+
+
+void StateReporterTest::testReportConfigGeneration() {
+ std::ostringstream ost;
+ framework::HttpUrlPath path("/state/v1/config");
+ _stateReporter->reportStatus(ost, path);
+ std::string jsonData = ost.str();
+ //std::cerr << "\nConfig: " << jsonData << "\n";
+ ASSERT_GENERATION(jsonData, "component", 1.0);
+}
+
+void StateReporterTest::testReportHealth() {
+ const int stateCount = 7;
+ const lib::NodeState nodeStates[stateCount] = {
+ lib::NodeState(lib::NodeType::STORAGE, lib::State::UNKNOWN),
+ lib::NodeState(lib::NodeType::STORAGE, lib::State::MAINTENANCE),
+ lib::NodeState(lib::NodeType::STORAGE, lib::State::DOWN),
+ lib::NodeState(lib::NodeType::STORAGE, lib::State::STOPPING),
+ lib::NodeState(lib::NodeType::STORAGE, lib::State::INITIALIZING),
+ lib::NodeState(lib::NodeType::STORAGE, lib::State::RETIRED),
+ lib::NodeState(lib::NodeType::STORAGE, lib::State::UP)
+ };
+ const char* codes[stateCount] = {
+ "down",
+ "down",
+ "down",
+ "down",
+ "down",
+ "down",
+ "up"
+ };
+ const char* messages[stateCount] = {
+ "Node state: Unknown",
+ "Node state: Maintenance",
+ "Node state: Down",
+ "Node state: Stopping",
+ "Node state: Initializing, init progress 0",
+ "Node state: Retired",
+ ""
+ };
+
+ framework::HttpUrlPath path("/state/v1/health");
+ for (int i=0; i<stateCount; i++) {
+ _node->getStateUpdater().setCurrentNodeState(nodeStates[i]);
+ std::ostringstream ost;
+ _stateReporter->reportStatus(ost, path);
+ std::string jsonData = ost.str();
+ //std::cerr << "\nHealth " << i << ":" << jsonData << "\n";
+ ASSERT_NODE_STATUS(jsonData, codes[i], messages[i]);
+ }
+}
+
+void StateReporterTest::testReportMetrics() {
+ FileStorDiskMetrics& disk0(*_filestorMetrics->disks[0]);
+ FileStorThreadMetrics& thread0(*disk0.threads[0]);
+
+ LOG(info, "Adding to get metric");
+
+ using documentapi::LoadType;
+ thread0.get[LoadType::DEFAULT].count.inc(1);
+
+ LOG(info, "Waiting for 5 minute snapshot to be taken");
+ // Wait until active metrics have been added to 5 min snapshot and reset
+ for (uint32_t i=0; i<6; ++i) {
+ _clock->addSecondsToTime(60);
+ _metricManager->timeChangedNotification();
+ while (
+ uint64_t(_metricManager->getLastProcessedTime())
+ < _clock->getTimeInSeconds().getTime())
+ {
+ FastOS_Thread::Sleep(1);
+ }
+ }
+ LOG(info, "5 minute snapshot should have been taken. Adding put count");
+
+ thread0.put[LoadType::DEFAULT].count.inc(1);
+
+ const int pathCount = 2;
+ const char* paths[pathCount] = {
+ "/state/v1/metrics",
+ "/state/v1/metrics?consumer=status"
+ };
+
+ for (int i=0; i<pathCount; i++) {
+ framework::HttpUrlPath path(paths[i]);
+ std::ostringstream ost;
+ _stateReporter->reportStatus(ost, path);
+ std::string jsonData = ost.str();
+ //std::cerr << "\nMetrics:" << jsonData << "\n";
+ ASSERT_METRIC_GET_PUT(jsonData, 1.0, 0.0);
+ }
+ }
+
+} // storage
diff --git a/storage/src/tests/storageserver/testvisitormessagesession.cpp b/storage/src/tests/storageserver/testvisitormessagesession.cpp
new file mode 100644
index 00000000000..e814f6cf229
--- /dev/null
+++ b/storage/src/tests/storageserver/testvisitormessagesession.cpp
@@ -0,0 +1,78 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <tests/storageserver/testvisitormessagesession.h>
+#include <vespa/storageframework/defaultimplementation/clock/realclock.h>
+
+namespace storage {
+
+TestVisitorMessageSession::~TestVisitorMessageSession()
+{
+}
+
+TestVisitorMessageSession::TestVisitorMessageSession(VisitorThread& t,
+ Visitor& v,
+ const mbus::Error& autoReplyError,
+ bool autoReply)
+ : _autoReplyError(autoReplyError),
+ _autoReply(autoReply),
+ thread(t),
+ visitor(v),
+ pendingCount(0)
+{
+}
+
+void
+TestVisitorMessageSession::reply(mbus::Reply::UP rep) {
+ {
+ vespalib::MonitorGuard guard(_waitMonitor);
+ pendingCount--;
+ }
+ thread.handleMessageBusReply(std::move(rep), visitor);
+}
+
+mbus::Result
+TestVisitorMessageSession::send(
+ std::unique_ptr<documentapi::DocumentMessage> message)
+{
+ vespalib::MonitorGuard guard(_waitMonitor);
+ if (_autoReply) {
+ pendingCount++;
+ mbus::Reply::UP rep = message->createReply();
+ rep->setMessage(mbus::Message::UP(message.release()));
+ if (_autoReplyError.getCode() == mbus::ErrorCode::NONE) {
+ reply(std::move(rep));
+ return mbus::Result();
+ } else {
+ return mbus::Result(_autoReplyError,
+ std::unique_ptr<mbus::Message>(message.release()));
+ }
+ } else {
+ pendingCount++;
+ sentMessages.push_back(
+ vespalib::LinkedPtr<documentapi::DocumentMessage>(
+ message.release()));
+ guard.broadcast();
+ return mbus::Result();
+ }
+}
+
+void
+TestVisitorMessageSession::waitForMessages(unsigned int msgCount) {
+ framework::defaultimplementation::RealClock clock;
+ framework::MilliSecTime endTime(
+ clock.getTimeInMillis() + framework::MilliSecTime(60 * 1000));
+
+ vespalib::MonitorGuard guard(_waitMonitor);
+ while (sentMessages.size() < msgCount) {
+ if (clock.getTimeInMillis() > endTime) {
+ throw vespalib::IllegalStateException(
+ vespalib::make_string("Timed out waiting for %u messages "
+ "in test visitor session", msgCount),
+ VESPA_STRLOC);
+ }
+ guard.wait(1000);
+ }
+};
+
+}
diff --git a/storage/src/tests/storageserver/testvisitormessagesession.h b/storage/src/tests/storageserver/testvisitormessagesession.h
new file mode 100644
index 00000000000..3ae6ccafb84
--- /dev/null
+++ b/storage/src/tests/storageserver/testvisitormessagesession.h
@@ -0,0 +1,79 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <deque>
+#include <vespa/storage/visiting/visitormessagesession.h>
+#include <vespa/storage/visiting/visitorthread.h>
+#include <vespa/documentapi/messagebus/messages/documentmessage.h>
+#include <vespa/storage/storageserver/priorityconverter.h>
+
+namespace storage {
+
+class TestVisitorMessageSession : public VisitorMessageSession
+{
+private:
+ vespalib::Monitor _waitMonitor;
+ mbus::Error _autoReplyError;
+ bool _autoReply;
+
+public:
+ typedef std::unique_ptr<TestVisitorMessageSession> UP;
+
+ VisitorThread& thread;
+ Visitor& visitor;
+ uint32_t pendingCount;
+
+ ~TestVisitorMessageSession();
+
+ std::deque<vespalib::LinkedPtr<documentapi::DocumentMessage> > sentMessages;
+
+ TestVisitorMessageSession(VisitorThread& t,
+ Visitor& v,
+ const mbus::Error& autoReplyError,
+ bool autoReply);
+
+ void reply(mbus::Reply::UP rep);
+
+ uint32_t pending() { return pendingCount; }
+
+ mbus::Result send(std::unique_ptr<documentapi::DocumentMessage> message);
+
+ void waitForMessages(unsigned int msgCount);
+
+ vespalib::Monitor& getMonitor() { return _waitMonitor; }
+};
+
+struct TestVisitorMessageSessionFactory : public VisitorMessageSessionFactory
+{
+ vespalib::Lock _accessLock;
+ std::vector<TestVisitorMessageSession*> _visitorSessions;
+ mbus::Error _autoReplyError;
+ bool _createAutoReplyVisitorSessions;
+ PriorityConverter _priConverter;
+
+ TestVisitorMessageSessionFactory(vespalib::stringref configId = "")
+ : _createAutoReplyVisitorSessions(false),
+ _priConverter(configId) {}
+
+ VisitorMessageSession::UP createSession(Visitor& v, VisitorThread& vt) {
+ vespalib::LockGuard lock(_accessLock);
+ TestVisitorMessageSession::UP session(
+ new TestVisitorMessageSession(
+ vt,
+ v,
+ _autoReplyError,
+ _createAutoReplyVisitorSessions));
+ _visitorSessions.push_back(session.get());
+ return VisitorMessageSession::UP(std::move(session));
+ }
+
+ documentapi::Priority::Value toDocumentPriority(uint8_t storagePriority) const
+ {
+ return _priConverter.toDocumentPriority(storagePriority);
+ }
+
+};
+
+} // storage
+
diff --git a/storage/src/tests/storageutil/.gitignore b/storage/src/tests/storageutil/.gitignore
new file mode 100644
index 00000000000..a080232d5f3
--- /dev/null
+++ b/storage/src/tests/storageutil/.gitignore
@@ -0,0 +1,13 @@
+*.So
+*.lo
+*.o
+.*.swp
+.config.log
+.depend
+.depend.NEW
+.deps
+.libs
+Makefile
+statefile*
+testrunner
+testrunner.core
diff --git a/storage/src/tests/storageutil/CMakeLists.txt b/storage/src/tests/storageutil/CMakeLists.txt
new file mode 100644
index 00000000000..a48895352e8
--- /dev/null
+++ b/storage/src/tests/storageutil/CMakeLists.txt
@@ -0,0 +1,10 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_library(storage_teststorageutil
+ SOURCES
+ functortest.cpp
+ charttest.cpp
+ palettetest.cpp
+ DEPENDS
+ AFTER
+ storage_storageconfig
+)
diff --git a/storage/src/tests/storageutil/charttest.cpp b/storage/src/tests/storageutil/charttest.cpp
new file mode 100644
index 00000000000..d9ce3d6f1b4
--- /dev/null
+++ b/storage/src/tests/storageutil/charttest.cpp
@@ -0,0 +1,66 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/storage/storageutil/piechart.h>
+
+#include <fstream>
+#include <vespa/vdstestlib/cppunit/macros.h>
+
+namespace storage {
+
+struct PieChartTest : public CppUnit::TestFixture
+{
+ void setUp() {}
+ void tearDown() {}
+
+ void testWriteHtmlFile();
+
+ CPPUNIT_TEST_SUITE(PieChartTest);
+ CPPUNIT_TEST(testWriteHtmlFile);
+ CPPUNIT_TEST_SUITE_END();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(PieChartTest);
+
+namespace {
+ void printHtmlFile(const std::string& filename, const PieChart& chart) {
+ std::ofstream out(filename.c_str());
+ out << "<html>\n"
+ << " <head>\n"
+ << " ";
+ PieChart::printHtmlHeadAdditions(out, " ");
+ out << "\n <title>Pie example</title>\n"
+ << " </head>\n"
+ << " <body>\n"
+ << " ";
+ chart.printCanvas(out, 500, 400);
+ out << "\n ";
+ chart.printScript(out, " ");
+ out << "\n </body>\n"
+ << "</html>\n";
+ out.close();
+ }
+}
+
+void
+PieChartTest::testWriteHtmlFile()
+{
+ {
+ PieChart chart("mypie");
+ chart.add(10, "put");
+ chart.add(20, "get");
+ chart.add(50, "free");
+
+ printHtmlFile("piefile.html", chart);
+ }
+ {
+ PieChart chart("mypie", PieChart::SCHEME_CUSTOM);
+ chart.add(10, "put", PieChart::RED);
+ chart.add(20, "get", PieChart::GREEN);
+ chart.add(50, "free", PieChart::BLUE);
+
+ printHtmlFile("piefile-customcols.html", chart);
+ }
+}
+
+} // storage
diff --git a/storage/src/tests/storageutil/functortest.cpp b/storage/src/tests/storageutil/functortest.cpp
new file mode 100644
index 00000000000..00b9f5450cb
--- /dev/null
+++ b/storage/src/tests/storageutil/functortest.cpp
@@ -0,0 +1,55 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <cppunit/extensions/HelperMacros.h>
+#include <list>
+#include <string>
+#include <algorithm>
+#include <vespa/storage/storageutil/functor.h>
+
+class Functor_Test : public CppUnit::TestFixture {
+ CPPUNIT_TEST_SUITE(Functor_Test);
+ CPPUNIT_TEST(testReplace);
+ CPPUNIT_TEST(testDeletePointer);
+ CPPUNIT_TEST_SUITE_END();
+
+public:
+
+protected:
+ void testReplace();
+ void testDeletePointer();
+};
+
+using namespace storage;
+using namespace std;
+
+CPPUNIT_TEST_SUITE_REGISTRATION(Functor_Test);
+
+void Functor_Test::testReplace()
+{
+ string source("this.is.a.string.with.many.dots.");
+ for_each(source.begin(), source.end(), Functor::Replace<char>('.', '_'));
+ CPPUNIT_ASSERT_EQUAL(string("this_is_a_string_with_many_dots_"), source);
+}
+
+namespace {
+
+ static int instanceCounter = 0;
+
+ class TestClass {
+ public:
+ TestClass() { instanceCounter++; }
+ ~TestClass() { instanceCounter--; }
+ };
+}
+
+void Functor_Test::testDeletePointer()
+{
+ list<TestClass*> mylist;
+ mylist.push_back(new TestClass());
+ mylist.push_back(new TestClass());
+ mylist.push_back(new TestClass());
+ CPPUNIT_ASSERT_EQUAL(3, instanceCounter);
+ for_each(mylist.begin(), mylist.end(), Functor::DeletePointer());
+ CPPUNIT_ASSERT_EQUAL(0, instanceCounter);
+}
diff --git a/storage/src/tests/storageutil/palettetest.cpp b/storage/src/tests/storageutil/palettetest.cpp
new file mode 100644
index 00000000000..ffc2dd091ee
--- /dev/null
+++ b/storage/src/tests/storageutil/palettetest.cpp
@@ -0,0 +1,33 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/storage/storageutil/palette.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+
+namespace storage {
+
+struct PaletteTest : public CppUnit::TestFixture {
+ void setUp() {}
+ void tearDown() {}
+
+ void testNormalUsage();
+
+ CPPUNIT_TEST_SUITE(PaletteTest);
+ CPPUNIT_TEST(testNormalUsage);
+ CPPUNIT_TEST_SUITE_END();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(PaletteTest);
+
+void
+PaletteTest::testNormalUsage()
+{
+ std::ofstream out("palette.html");
+ out << "<html><body>\n";
+ Palette palette(75);
+ palette.printHtmlTablePalette(out);
+ out << "</body></html>\n";
+ out.close();
+}
+
+} // storage
diff --git a/storage/src/tests/storageutil/recordflatfiletest.cpp b/storage/src/tests/storageutil/recordflatfiletest.cpp
new file mode 100644
index 00000000000..e08dd88dc67
--- /dev/null
+++ b/storage/src/tests/storageutil/recordflatfiletest.cpp
@@ -0,0 +1,314 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <cppunit/extensions/HelperMacros.h>
+#include <iostream>
+#include <string>
+#include <vespa/storage/storageutil/recordflatfile.h>
+
+using namespace document;
+using namespace storage;
+using namespace std;
+using namespace document;
+
+class RecordFlatFile_Test : public CppUnit::TestFixture {
+ CPPUNIT_TEST_SUITE(RecordFlatFile_Test);
+ CPPUNIT_TEST(testAdd);
+ CPPUNIT_TEST(testUpdate);
+ CPPUNIT_TEST(testRemove);
+ CPPUNIT_TEST(testExists);
+ CPPUNIT_TEST(testGetRecord);
+ CPPUNIT_TEST(testClear);
+ CPPUNIT_TEST(testSimpleUsage);
+ CPPUNIT_TEST(testValid);
+ CPPUNIT_TEST_SUITE_END();
+
+ string _testFile;
+ unsigned int _chunkSize;
+
+ void setupTestFile();
+
+public:
+ void setUp();
+
+ RecordFlatFile_Test(void)
+ : _testFile(),
+ _chunkSize(0)
+ {
+ }
+
+protected:
+ void testAdd();
+ void testUpdate();
+ void testRemove();
+ void testExists();
+ void testGetRecord();
+ void testClear();
+ void testSimpleUsage();
+ void testValid();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(RecordFlatFile_Test);
+
+namespace {
+
+ const bool debug = false;
+
+ class MyRecord {
+ private:
+ unsigned int _id;
+ unsigned int _value;
+ unsigned int _valid;
+
+ public:
+ MyRecord(void)
+ : _id(0u),
+ _value(0u),
+ _valid(0u)
+ {
+ }
+ MyRecord(unsigned int id, unsigned int value, bool valid = true)
+ : _id(id), _value(value), _valid(valid ? 0 : 0xFFFFFFFF) {}
+
+ const unsigned int& getId() const { return _id; }
+ unsigned int getValue() const { return _value; }
+ void setValue(unsigned int value) { _value = value; }
+ bool isValid() const { return (_valid == 0); }
+
+ bool operator==(const MyRecord& record) const {
+ return (_id == record._id && _value == record._value);
+ }
+ };
+
+ ostream& operator<<(ostream& out, MyRecord record) {
+ out << "MyRecord(" << record.getId() << ", " << record.getValue()
+ << ")";
+ return out;
+ }
+
+ class BlockMessage {
+ private:
+ string _name;
+ static unsigned int _indent;
+
+ public:
+ BlockMessage(const string& name) : _name(name) {
+ if (debug) {
+ for (unsigned int i=0; i<_indent; i++) cout << " ";
+ cout << "Block started: " << _name << "\n" << flush;
+ }
+ _indent++;
+ }
+ ~BlockMessage() {
+ _indent--;
+ if (debug) {
+ for (unsigned int i=0; i<_indent; i++) cout << " ";
+ cout << "Block completed: " << _name << "\n" << flush;
+ }
+ }
+ };
+
+ unsigned int BlockMessage::_indent(0);
+
+}
+
+void RecordFlatFile_Test::setUp() {
+ _testFile = "recordflatfile.testfile";
+ _chunkSize = 4;
+}
+
+void RecordFlatFile_Test::setupTestFile() {
+ BlockMessage message("setupTestFile()");
+ RecordFlatFile<MyRecord, unsigned int> flatfile(_testFile, _chunkSize);
+ flatfile.clear();
+ for (unsigned int i=1; i<=8; ++i) {
+ flatfile.add(MyRecord(i, 10+i));
+ }
+ CPPUNIT_ASSERT_EQUAL(8u, flatfile.getSize());
+ for (unsigned int i=1; i<=8; ++i) {
+ CPPUNIT_ASSERT_EQUAL(MyRecord(i, 10+i), *flatfile[i-1]);
+ }
+}
+
+
+void RecordFlatFile_Test::testAdd() {
+ BlockMessage message("testAdd()");
+ setupTestFile();
+ RecordFlatFile<MyRecord, unsigned int> flatfile(_testFile, _chunkSize);
+ flatfile.add(MyRecord(9, 19));
+ CPPUNIT_ASSERT_EQUAL(9u, flatfile.getSize());
+ CPPUNIT_ASSERT_EQUAL(MyRecord(1, 11), *flatfile[0]);
+ CPPUNIT_ASSERT_EQUAL(MyRecord(2, 12), *flatfile[1]);
+ CPPUNIT_ASSERT_EQUAL(MyRecord(7, 17), *flatfile[6]);
+ CPPUNIT_ASSERT_EQUAL(MyRecord(8, 18), *flatfile[7]);
+ CPPUNIT_ASSERT_EQUAL(MyRecord(9, 19), *flatfile[8]);
+}
+
+void RecordFlatFile_Test::testUpdate() {
+ BlockMessage message("testUpdate()");
+ setupTestFile();
+ RecordFlatFile<MyRecord, unsigned int> flatfile(_testFile, _chunkSize);
+ CPPUNIT_ASSERT(!flatfile.update(MyRecord(0, 20)));
+ CPPUNIT_ASSERT(flatfile.update(MyRecord(4, 19)));
+ CPPUNIT_ASSERT_EQUAL(8u, flatfile.getSize());
+ CPPUNIT_ASSERT_EQUAL(MyRecord(1, 11), *flatfile[0]);
+ CPPUNIT_ASSERT_EQUAL(MyRecord(3, 13), *flatfile[2]);
+ CPPUNIT_ASSERT_EQUAL(MyRecord(4, 19), *flatfile[3]);
+ CPPUNIT_ASSERT_EQUAL(MyRecord(5, 15), *flatfile[4]);
+ CPPUNIT_ASSERT_EQUAL(MyRecord(8, 18), *flatfile[7]);
+}
+
+void RecordFlatFile_Test::testRemove() {
+ BlockMessage message("testRemove()");
+ setupTestFile();
+ RecordFlatFile<MyRecord, unsigned int> flatfile(_testFile, _chunkSize);
+ flatfile.remove(3);
+ CPPUNIT_ASSERT_EQUAL(7u, flatfile.getSize());
+ CPPUNIT_ASSERT_EQUAL(MyRecord(1, 11), *flatfile[0]);
+ CPPUNIT_ASSERT_EQUAL(MyRecord(2, 12), *flatfile[1]);
+ CPPUNIT_ASSERT_EQUAL(MyRecord(8, 18), *flatfile[2]);
+ CPPUNIT_ASSERT_EQUAL(MyRecord(4, 14), *flatfile[3]);
+ CPPUNIT_ASSERT_EQUAL(MyRecord(5, 15), *flatfile[4]);
+ CPPUNIT_ASSERT_EQUAL(MyRecord(6, 16), *flatfile[5]);
+ CPPUNIT_ASSERT_EQUAL(MyRecord(7, 17), *flatfile[6]);
+}
+
+void RecordFlatFile_Test::testExists() {
+ BlockMessage message("testExists()");
+ setupTestFile();
+ RecordFlatFile<MyRecord, unsigned int> flatfile(_testFile, _chunkSize);
+ CPPUNIT_ASSERT(flatfile.exists(3));
+ CPPUNIT_ASSERT(flatfile.exists(1));
+ CPPUNIT_ASSERT(!flatfile.exists(11));
+ CPPUNIT_ASSERT(flatfile.exists(6));
+ CPPUNIT_ASSERT(flatfile.exists(5));
+ CPPUNIT_ASSERT(!flatfile.exists(0));
+}
+
+void RecordFlatFile_Test::testGetRecord() {
+ BlockMessage message("testGetRecord()");
+ setupTestFile();
+ RecordFlatFile<MyRecord, unsigned int> flatfile(_testFile, _chunkSize);
+ CPPUNIT_ASSERT_EQUAL(MyRecord(4, 14), *flatfile.getRecord(4));
+ CPPUNIT_ASSERT(flatfile.getRecord(0).get() == 0);
+}
+
+void RecordFlatFile_Test::testClear() {
+ try{
+ BlockMessage message("testClear()");
+ setupTestFile();
+ RecordFlatFile<MyRecord, unsigned int> flatfile(_testFile, _chunkSize);
+ flatfile.clear();
+ struct stat filestats;
+ CPPUNIT_ASSERT(stat(_testFile.c_str(), &filestats) == -1);
+ } catch (exception& e) {
+ cerr << "Caught exception '" << e.what() << "' in testClear()" << endl;
+ throw;
+ }
+}
+
+void RecordFlatFile_Test::testSimpleUsage()
+{
+ BlockMessage message("testSimpleUsage()");
+ RecordFlatFile<MyRecord, unsigned int> flatfile("recordflatfile.testfile");
+ flatfile.clear();
+
+ CPPUNIT_ASSERT_EQUAL(false, flatfile.exists(34u));
+ CPPUNIT_ASSERT_EQUAL((MyRecord*) 0, flatfile.getRecord(23u).get());
+
+ MyRecord record1(12, 54);
+ MyRecord record2(34, 62);
+
+ flatfile.add(record1);
+ flatfile.add(record2);
+
+ CPPUNIT_ASSERT_EQUAL(true, flatfile.exists(12u));
+ CPPUNIT_ASSERT_EQUAL((MyRecord*) 0, flatfile.getRecord(23u).get());
+ unique_ptr<MyRecord> result(flatfile.getRecord(34u));
+ CPPUNIT_ASSERT(result.get() != 0);
+ CPPUNIT_ASSERT_EQUAL(62u, result->getValue());
+
+ record2.setValue(67);
+ flatfile.update(record2);
+
+ unique_ptr<MyRecord> result2(flatfile.getRecord(34u));
+ CPPUNIT_ASSERT(result2.get() != 0);
+ CPPUNIT_ASSERT_EQUAL(67u, result2->getValue());
+
+ flatfile.remove(12);
+ CPPUNIT_ASSERT_EQUAL(false, flatfile.exists(12u));
+
+ flatfile.clear();
+ CPPUNIT_ASSERT_EQUAL(false, flatfile.exists(34u));
+}
+
+void RecordFlatFile_Test::testValid()
+{
+ BlockMessage message("testValid()");
+ RecordFlatFile<MyRecord, unsigned int> flatfile("recordflatfile.testfile");
+ flatfile.clear();
+
+ MyRecord record1(12, 54, true);
+ MyRecord record2(34, 62, false);
+ MyRecord record3(15, 69, true);
+ MyRecord record4(50, 93, false);
+
+ // Test that valid entries doesn't generate errors
+ flatfile.add(record1);
+ CPPUNIT_ASSERT(!flatfile.errorsFound());
+ CPPUNIT_ASSERT_EQUAL((size_t) 0, flatfile.getErrors().size());
+
+ // Test that invalid entries do
+ flatfile.add(record2);
+ CPPUNIT_ASSERT(flatfile.errorsFound());
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, flatfile.getErrors().size());
+ string expected("Adding invalid record '34' to file "
+ "recordflatfile.testfile.");
+ CPPUNIT_ASSERT_EQUAL(expected, *flatfile.getErrors().begin());
+
+ // Checking that errors are kept if not cleared
+ flatfile.add(record3);
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, flatfile.getErrors().size());
+ CPPUNIT_ASSERT_EQUAL(expected, *flatfile.getErrors().begin());
+
+ // Checking that clearing errors work
+ flatfile.clearErrors();
+ CPPUNIT_ASSERT_EQUAL((size_t) 0, flatfile.getErrors().size());
+
+ flatfile.add(record4);
+ flatfile.clearErrors();
+
+ // Checking that entries read in get method generates warning
+ unique_ptr<MyRecord> result(flatfile.getRecord(12));
+ CPPUNIT_ASSERT_EQUAL((size_t) 0, flatfile.getErrors().size());
+ result = flatfile.getRecord(15);
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, flatfile.getErrors().size());
+ expected = "Found corrupted entry in file recordflatfile.testfile";
+ CPPUNIT_ASSERT_EQUAL(expected, *flatfile.getErrors().begin());
+ flatfile.clearErrors();
+
+ // Checking that reading invalid entries generate exception
+ try{
+ result = flatfile.getRecord(50);
+ CPPUNIT_FAIL("Expected exception");
+ } catch (IoException& e) {
+ expected = "IoException(): Entry requested '50' is corrupted in file "
+ "recordflatfile.testfile at getRecord in";
+ string actual(e.what());
+ if (actual.size() > expected.size())
+ actual = actual.substr(0, expected.size());
+ CPPUNIT_ASSERT_EQUAL(expected, actual);
+ }
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, flatfile.getErrors().size());
+ expected = "Found corrupted entry in file recordflatfile.testfile";
+ CPPUNIT_ASSERT_EQUAL(expected, *flatfile.getErrors().begin());
+ flatfile.clearErrors();
+
+ // Check that you get warning when deleting if last entry is invalid
+ flatfile.remove(12);
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, flatfile.getErrors().size());
+ expected = "Last entry in file recordflatfile.testfile is invalid";
+ CPPUNIT_ASSERT_EQUAL(expected, *flatfile.getErrors().begin());
+
+ flatfile.clear();
+}
diff --git a/storage/src/tests/subscriptions/.gitignore b/storage/src/tests/subscriptions/.gitignore
new file mode 100644
index 00000000000..04a221b8052
--- /dev/null
+++ b/storage/src/tests/subscriptions/.gitignore
@@ -0,0 +1,8 @@
+*.So
+*.lo
+.depend
+.depend.NEW
+.deps
+.libs
+Makefile
+features.h
diff --git a/storage/src/tests/systemtests/.gitignore b/storage/src/tests/systemtests/.gitignore
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/storage/src/tests/systemtests/.gitignore
diff --git a/storage/src/tests/testhelper.cpp b/storage/src/tests/testhelper.cpp
new file mode 100644
index 00000000000..c4074aa1ac6
--- /dev/null
+++ b/storage/src/tests/testhelper.cpp
@@ -0,0 +1,175 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <tests/testhelper.h>
+
+#include <vespa/log/log.h>
+#include <vespa/vespalib/io/fileutil.h>
+
+LOG_SETUP(".testhelper");
+
+namespace storage {
+
+namespace {
+ bool useNewStorageCore() {
+ if ( // Unit test directory
+ vespalib::fileExists("use_new_storage_core") ||
+ // src/cpp directory
+ vespalib::fileExists("../use_new_storage_core") ||
+ // Top build directory where storage-HEAD remains
+ vespalib::fileExists("../../../../use_new_storage_core"))
+ {
+ std::cerr << "Using new storage core for unit tests\n";
+ return true;
+ }
+ return false;
+ }
+ bool newStorageCore(useNewStorageCore());
+}
+
+void addStorageDistributionConfig(vdstestlib::DirConfig& dc)
+{
+ vdstestlib::DirConfig::Config* config;
+ config = &dc.getConfig("stor-distribution", true);
+ config->clear();
+ config->set("group[1]");
+ config->set("group[0].name", "invalid");
+ config->set("group[0].index", "invalid");
+ config->set("group[0].nodes[50]");
+ config->set("redundancy", "2");
+
+ for (uint32_t i = 0; i < 50; i++) {
+ std::ostringstream key; key << "group[0].nodes[" << i << "].index";
+ std::ostringstream val; val << i;
+ config->set(key.str(), val.str());
+ }
+}
+
+vdstestlib::DirConfig getStandardConfig(bool storagenode) {
+ std::string clusterName("storage");
+ vdstestlib::DirConfig dc;
+ vdstestlib::DirConfig::Config* config;
+ config = &dc.addConfig("fleetcontroller");
+ config->set("cluster_name", clusterName);
+ config->set("index", "0");
+ config->set("zookeeper_server", "\"\"");
+ config->set("total_distributor_count", "10");
+ config->set("total_storage_count", "10");
+ config = &dc.addConfig("upgrading");
+ config = &dc.addConfig("load-type");
+ config = &dc.addConfig("bucket");
+ config = &dc.addConfig("messagebus");
+ config = &dc.addConfig("stor-prioritymapping");
+ config = &dc.addConfig("stor-bucketdbupdater");
+ config = &dc.addConfig("stor-bucket-init");
+ config = &dc.addConfig("metricsmanager");
+ config->set("consumer[1]");
+ config->set("consumer[0].name", "\"status\"");
+ config->set("consumer[0].addedmetrics[1]");
+ config->set("consumer[0].addedmetrics[0]", "\"*\"");
+ config = &dc.addConfig("stor-communicationmanager");
+ config->set("rpcport", "0");
+ config->set("mbusport", "0");
+ config = &dc.addConfig("stor-bucketdb");
+ config->set("chunklevel", "0");
+ config = &dc.addConfig("stor-distributormanager");
+ config->set("splitcount", "1000000");
+ config->set("splitsize", "1000000");
+ config->set("joincount", "0");
+ config->set("joinsize", "0");
+ config = &dc.addConfig("stor-opslogger");
+ config = &dc.addConfig("persistence");
+ config->set("abort_operations_with_changed_bucket_ownership", "true");
+ config = &dc.addConfig("stor-filestor");
+ // Easier to see what goes wrong with only 1 thread per disk.
+ config->set("minimum_file_meta_slots", "2");
+ config->set("minimum_file_header_block_size", "368");
+ config->set("minimum_file_size", "4096");
+ config->set("threads[1]");
+ config->set("threads[0].lowestpri 255");
+ config->set("dir_spread", "4");
+ config->set("dir_levels", "0");
+ config->set("use_new_core", newStorageCore ? "true" : "false");
+ config->set("maximum_versions_of_single_document_stored", "0");
+ //config->set("enable_slotfile_cache", "false");
+ // Unit tests typically use fake low time values, so don't complain
+ // about them or compact/delete them by default. Override in tests testing that
+ // behavior
+ config->set("time_future_limit", "5");
+ config->set("time_past_limit", "2000000000");
+ config->set("keep_remove_time_period", "2000000000");
+ config->set("revert_time_period", "2000000000");
+ // Don't want test to call exit()
+ config->set("fail_disk_after_error_count", "0");
+ config = &dc.addConfig("stor-bouncer");
+ config = &dc.addConfig("stor-integritychecker");
+ config = &dc.addConfig("stor-bucketmover");
+ config = &dc.addConfig("stor-messageforwarder");
+ config = &dc.addConfig("stor-server");
+ config->set("cluster_name", clusterName);
+ config->set("enable_dead_lock_detector", "false");
+ config->set("enable_dead_lock_detector_warnings", "false");
+ config->set("max_merges_per_node", "25");
+ config->set("max_merge_queue_size", "20");
+ config->set("root_folder",
+ (storagenode ? "vdsroot" : "vdsroot.distributor"));
+ config->set("is_distributor",
+ (storagenode ? "false" : "true"));
+ config = &dc.addConfig("stor-devices");
+ config->set("root_folder",
+ (storagenode ? "vdsroot" : "vdsroot.distributor"));
+ config = &dc.addConfig("stor-status");
+ config->set("httpport", "0");
+ config = &dc.addConfig("stor-visitor");
+ config->set("defaultdocblocksize", "8192");
+ // By default, need "old" behaviour of maxconcurrent
+ config->set("maxconcurrentvisitors_fixed", "4");
+ config->set("maxconcurrentvisitors_variable", "0");
+ config = &dc.addConfig("stor-visitordispatcher");
+ addFileConfig(dc, "documenttypes", "config-doctypes.cfg");
+ addStorageDistributionConfig(dc);
+ return dc;
+}
+
+void addSlobrokConfig(vdstestlib::DirConfig& dc,
+ const mbus::Slobrok& slobrok)
+{
+ std::ostringstream ost;
+ ost << "tcp/localhost:" << slobrok.port();
+ vdstestlib::DirConfig::Config* config;
+ config = &dc.getConfig("slobroks", true);
+ config->clear();
+ config->set("slobrok[1]");
+ config->set("slobrok[0].connectionspec", ost.str());
+}
+
+void addFileConfig(vdstestlib::DirConfig& dc,
+ const std::string& configDefName,
+ const std::string& fileName)
+{
+ vdstestlib::DirConfig::Config* config;
+ config = &dc.getConfig(configDefName, true);
+ config->clear();
+ std::ifstream in(fileName.c_str());
+ std::string line;
+ while (std::getline(in, line, '\n')) {
+ std::string::size_type pos = line.find(' ');
+ if (pos == std::string::npos) {
+ config->set(line);
+ } else {
+ config->set(line.substr(0, pos), line.substr(pos + 1));
+ }
+ }
+ in.close();
+}
+
+TestName::TestName(const std::string& n)
+ : name(n)
+{
+ LOG(debug, "Starting test %s", name.c_str());
+}
+
+TestName::~TestName() {
+ LOG(debug, "Done with test %s", name.c_str());
+}
+
+} // storage
diff --git a/storage/src/tests/testhelper.h b/storage/src/tests/testhelper.h
new file mode 100644
index 00000000000..be2c3e7ec66
--- /dev/null
+++ b/storage/src/tests/testhelper.h
@@ -0,0 +1,58 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#pragma once
+#include <vespa/vdstestlib/cppunit/dirconfig.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+
+
+#include <fstream>
+#include <vespa/fastos/fastos.h>
+#include <vespa/messagebus/testlib/slobrok.h>
+#include <sstream>
+
+#define ASSERT_REPLY_COUNT(count, dummylink) \
+ { \
+ std::ostringstream msgost; \
+ if ((dummylink).getNumReplies() != count) { \
+ for (uint32_t ijx=0; ijx<(dummylink).getNumReplies(); ++ijx) { \
+ msgost << (dummylink).getReply(ijx)->toString(true) << "\n"; \
+ } \
+ } \
+ CPPUNIT_ASSERT_EQUAL_MSG(msgost.str(), size_t(count), \
+ (dummylink).getNumReplies()); \
+ }
+#define ASSERT_COMMAND_COUNT(count, dummylink) \
+ { \
+ std::ostringstream msgost; \
+ if ((dummylink).getNumCommands() != count) { \
+ for (uint32_t ijx=0; ijx<(dummylink).getNumCommands(); ++ijx) { \
+ msgost << (dummylink).getCommand(ijx)->toString(true) << "\n"; \
+ } \
+ } \
+ CPPUNIT_ASSERT_EQUAL_MSG(msgost.str(), size_t(count), \
+ (dummylink).getNumCommands()); \
+ }
+
+namespace storage {
+
+void addFileConfig(vdstestlib::DirConfig& dc,
+ const std::string& configDefName,
+ const std::string& fileName);
+
+
+void addStorageDistributionConfig(vdstestlib::DirConfig& dc);
+
+vdstestlib::DirConfig getStandardConfig(bool storagenode);
+
+void addSlobrokConfig(vdstestlib::DirConfig& dc,
+ const mbus::Slobrok& slobrok);
+
+// Class used to print start and end of test. Enable debug when you want to see
+// which test creates what output or where we get stuck
+struct TestName {
+ std::string name;
+ TestName(const std::string& n);
+ ~TestName();
+};
+
+} // storage
+
diff --git a/storage/src/tests/testrunner.cpp b/storage/src/tests/testrunner.cpp
new file mode 100644
index 00000000000..5d8dc8d4c1f
--- /dev/null
+++ b/storage/src/tests/testrunner.cpp
@@ -0,0 +1,15 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <iostream>
+#include <vespa/log/log.h>
+#include <vespa/vdstestlib/cppunit/cppunittestrunner.h>
+
+LOG_SETUP("storagecppunittests");
+
+int
+main(int argc, char **argv)
+{
+ vdstestlib::CppUnitTestRunner testRunner;
+ return testRunner.run(argc, argv);
+}
diff --git a/storage/src/tests/visiting/.gitignore b/storage/src/tests/visiting/.gitignore
new file mode 100644
index 00000000000..184e5d1c936
--- /dev/null
+++ b/storage/src/tests/visiting/.gitignore
@@ -0,0 +1,12 @@
+*.So
+*.lo
+*.o
+.*.swp
+.config.log
+.depend
+.depend.NEW
+.deps
+.libs
+Makefile
+testrunner
+testrunner.core
diff --git a/storage/src/tests/visiting/CMakeLists.txt b/storage/src/tests/visiting/CMakeLists.txt
new file mode 100644
index 00000000000..60e130c003c
--- /dev/null
+++ b/storage/src/tests/visiting/CMakeLists.txt
@@ -0,0 +1,11 @@
+# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_library(storage_testvisiting
+ SOURCES
+ commandqueuetest.cpp
+ visitormanagertest.cpp
+ visitortest.cpp
+ memory_bounded_trace_test.cpp
+ DEPENDS
+ AFTER
+ storage_storageconfig
+)
diff --git a/storage/src/tests/visiting/commandqueuetest.cpp b/storage/src/tests/visiting/commandqueuetest.cpp
new file mode 100644
index 00000000000..5d6da5f7ea5
--- /dev/null
+++ b/storage/src/tests/visiting/commandqueuetest.cpp
@@ -0,0 +1,223 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/fastos/fastos.h>
+#include <vespa/storageframework/defaultimplementation/clock/fakeclock.h>
+#include <vespa/storage/visiting/commandqueue.h>
+#include <vespa/storageapi/message/visitor.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+
+using vespalib::string;
+
+namespace storage {
+
+struct CommandQueueTest : public CppUnit::TestFixture
+{
+ void testFIFO();
+ void testFIFOWithPriorities();
+ void testReleaseOldest();
+ void testReleaseLowestPriority();
+ void testDeleteIterator();
+
+ CPPUNIT_TEST_SUITE(CommandQueueTest);
+ CPPUNIT_TEST(testFIFO);
+ CPPUNIT_TEST(testFIFOWithPriorities);
+ CPPUNIT_TEST(testReleaseOldest);
+ CPPUNIT_TEST(testReleaseLowestPriority);
+ CPPUNIT_TEST(testDeleteIterator);
+ CPPUNIT_TEST_SUITE_END();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(CommandQueueTest);
+
+namespace {
+ std::shared_ptr<api::CreateVisitorCommand> getCommand(
+ const vespalib::stringref & name, int timeout,
+ uint8_t priority = 0)
+ {
+ vespalib::asciistream ost;
+ ost << name << " t=" << timeout << " p=" << static_cast<unsigned int>(priority);
+ // Piggyback name in document selection
+ std::shared_ptr<api::CreateVisitorCommand> cmd(
+ new api::CreateVisitorCommand("", "", ost.str()));
+ cmd->setQueueTimeout(timeout);
+ cmd->setPriority(priority);
+ return cmd;
+ }
+
+ const vespalib::string &
+ getCommandString(const std::shared_ptr<api::CreateVisitorCommand>& cmd)
+ {
+ return cmd->getDocumentSelection();
+ }
+
+}
+
+void CommandQueueTest::testFIFO() {
+ framework::defaultimplementation::FakeClock clock;
+ CommandQueue<api::CreateVisitorCommand> queue(clock);
+ CPPUNIT_ASSERT(queue.empty());
+ // Use all default priorities, meaning what comes out should be in the same order
+ // as what went in
+ queue.add(getCommand("first", 1));
+ queue.add(getCommand("second", 10));
+ queue.add(getCommand("third", 5));
+ queue.add(getCommand("fourth", 0));
+ queue.add(getCommand("fifth", 3));
+ queue.add(getCommand("sixth", 14));
+ queue.add(getCommand("seventh", 7));
+
+ CPPUNIT_ASSERT(!queue.empty());
+ std::vector<std::shared_ptr<api::CreateVisitorCommand> > commands;
+ for (;;) {
+ std::shared_ptr<api::CreateVisitorCommand> cmd(
+ queue.releaseNextCommand().first);
+ if (cmd.get() == 0) break;
+ commands.push_back(cmd);
+ }
+ CPPUNIT_ASSERT_EQUAL(size_t(7), commands.size());
+ CPPUNIT_ASSERT_EQUAL(string("first t=1 p=0"), getCommandString(commands[0]));
+ CPPUNIT_ASSERT_EQUAL(string("second t=10 p=0"), getCommandString(commands[1]));
+ CPPUNIT_ASSERT_EQUAL(string("third t=5 p=0"), getCommandString(commands[2]));
+ CPPUNIT_ASSERT_EQUAL(string("fourth t=0 p=0"), getCommandString(commands[3]));
+ CPPUNIT_ASSERT_EQUAL(string("fifth t=3 p=0"), getCommandString(commands[4]));
+ CPPUNIT_ASSERT_EQUAL(string("sixth t=14 p=0"), getCommandString(commands[5]));
+ CPPUNIT_ASSERT_EQUAL(string("seventh t=7 p=0"), getCommandString(commands[6]));
+}
+
+void CommandQueueTest::testFIFOWithPriorities() {
+ framework::defaultimplementation::FakeClock clock;
+ CommandQueue<api::CreateVisitorCommand> queue(clock);
+ CPPUNIT_ASSERT(queue.empty());
+
+ queue.add(getCommand("first", 1, 10));
+ CPPUNIT_ASSERT_EQUAL(string("first t=1 p=10"), getCommandString(queue.peekLowestPriorityCommand()));
+ queue.add(getCommand("second", 10, 22));
+ queue.add(getCommand("third", 5, 9));
+ CPPUNIT_ASSERT_EQUAL(string("second t=10 p=22"), getCommandString(queue.peekLowestPriorityCommand()));
+ queue.add(getCommand("fourth", 0, 22));
+ queue.add(getCommand("fifth", 3, 22));
+ CPPUNIT_ASSERT_EQUAL(string("fifth t=3 p=22"), getCommandString(queue.peekLowestPriorityCommand()));
+ queue.add(getCommand("sixth", 14, 50));
+ queue.add(getCommand("seventh", 7, 0));
+
+ CPPUNIT_ASSERT_EQUAL(string("sixth t=14 p=50"), getCommandString(queue.peekLowestPriorityCommand()));
+
+ CPPUNIT_ASSERT(!queue.empty());
+ std::vector<std::shared_ptr<api::CreateVisitorCommand> > commands;
+ for (;;) {
+ std::shared_ptr<api::CreateVisitorCommand> cmdPeek(queue.peekNextCommand());
+ std::shared_ptr<api::CreateVisitorCommand> cmd(queue.releaseNextCommand().first);
+ if (cmd.get() == 0 || cmdPeek != cmd) break;
+ commands.push_back(cmd);
+ }
+ CPPUNIT_ASSERT_EQUAL(size_t(7), commands.size());
+ CPPUNIT_ASSERT_EQUAL(string("seventh t=7 p=0"), getCommandString(commands[0]));
+ CPPUNIT_ASSERT_EQUAL(string("third t=5 p=9"), getCommandString(commands[1]));
+ CPPUNIT_ASSERT_EQUAL(string("first t=1 p=10"), getCommandString(commands[2]));
+ CPPUNIT_ASSERT_EQUAL(string("second t=10 p=22"), getCommandString(commands[3]));
+ CPPUNIT_ASSERT_EQUAL(string("fourth t=0 p=22"), getCommandString(commands[4]));
+ CPPUNIT_ASSERT_EQUAL(string("fifth t=3 p=22"), getCommandString(commands[5]));
+ CPPUNIT_ASSERT_EQUAL(string("sixth t=14 p=50"), getCommandString(commands[6]));
+}
+
+void CommandQueueTest::testReleaseOldest() {
+ framework::defaultimplementation::FakeClock clock(framework::defaultimplementation::FakeClock::FAKE_ABSOLUTE);
+ CommandQueue<api::CreateVisitorCommand> queue(clock);
+ CPPUNIT_ASSERT(queue.empty());
+ queue.add(getCommand("first", 10));
+ queue.add(getCommand("second", 100));
+ queue.add(getCommand("third", 1000));
+ queue.add(getCommand("fourth", 5));
+ queue.add(getCommand("fifth", 3000));
+ queue.add(getCommand("sixth", 400));
+ queue.add(getCommand("seventh", 700));
+ CPPUNIT_ASSERT_EQUAL(7u, queue.size());
+
+ typedef CommandQueue<api::CreateVisitorCommand>::CommandEntry CommandEntry;
+ std::list<CommandEntry> timedOut(queue.releaseTimedOut());
+ CPPUNIT_ASSERT(timedOut.empty());
+ clock.addMilliSecondsToTime(400 * 1000);
+ timedOut = queue.releaseTimedOut();
+ CPPUNIT_ASSERT_EQUAL(size_t(4), timedOut.size());
+ std::ostringstream ost;
+ for (std::list<CommandEntry>::const_iterator it = timedOut.begin();
+ it != timedOut.end(); ++it)
+ {
+ ost << getCommandString(it->_command) << "\n";
+ }
+ CPPUNIT_ASSERT_EQUAL(std::string(
+ "fourth t=5 p=0\n"
+ "first t=10 p=0\n"
+ "second t=100 p=0\n"
+ "sixth t=400 p=0\n"), ost.str());
+ CPPUNIT_ASSERT_EQUAL(3u, queue.size());
+}
+
+void CommandQueueTest::testReleaseLowestPriority() {
+ framework::defaultimplementation::FakeClock clock;
+ CommandQueue<api::CreateVisitorCommand> queue(clock);
+ CPPUNIT_ASSERT(queue.empty());
+
+ queue.add(getCommand("first", 1, 10));
+ queue.add(getCommand("second", 10, 22));
+ queue.add(getCommand("third", 5, 9));
+ queue.add(getCommand("fourth", 0, 22));
+ queue.add(getCommand("fifth", 3, 22));
+ queue.add(getCommand("sixth", 14, 50));
+ queue.add(getCommand("seventh", 7, 0));
+ CPPUNIT_ASSERT_EQUAL(7u, queue.size());
+
+ std::vector<std::shared_ptr<api::CreateVisitorCommand> > commands;
+ for (;;) {
+ std::shared_ptr<api::CreateVisitorCommand> cmdPeek(queue.peekLowestPriorityCommand());
+ std::pair<std::shared_ptr<api::CreateVisitorCommand>, uint64_t> cmd(
+ queue.releaseLowestPriorityCommand());
+ if (cmd.first.get() == 0 || cmdPeek != cmd.first) break;
+ commands.push_back(cmd.first);
+ }
+ CPPUNIT_ASSERT_EQUAL(size_t(7), commands.size());
+ CPPUNIT_ASSERT_EQUAL(string("sixth t=14 p=50"), getCommandString(commands[0]));
+ CPPUNIT_ASSERT_EQUAL(string("fifth t=3 p=22"), getCommandString(commands[1]));
+ CPPUNIT_ASSERT_EQUAL(string("fourth t=0 p=22"), getCommandString(commands[2]));
+ CPPUNIT_ASSERT_EQUAL(string("second t=10 p=22"), getCommandString(commands[3]));
+ CPPUNIT_ASSERT_EQUAL(string("first t=1 p=10"), getCommandString(commands[4]));
+ CPPUNIT_ASSERT_EQUAL(string("third t=5 p=9"), getCommandString(commands[5]));
+ CPPUNIT_ASSERT_EQUAL(string("seventh t=7 p=0"), getCommandString(commands[6]));
+}
+
+void CommandQueueTest::testDeleteIterator() {
+ framework::defaultimplementation::FakeClock clock;
+ CommandQueue<api::CreateVisitorCommand> queue(clock);
+ CPPUNIT_ASSERT(queue.empty());
+ queue.add(getCommand("first", 10));
+ queue.add(getCommand("second", 100));
+ queue.add(getCommand("third", 1000));
+ queue.add(getCommand("fourth", 5));
+ queue.add(getCommand("fifth", 3000));
+ queue.add(getCommand("sixth", 400));
+ queue.add(getCommand("seventh", 700));
+ CPPUNIT_ASSERT_EQUAL(7u, queue.size());
+
+ CommandQueue<api::CreateVisitorCommand>::iterator it = queue.begin();
+ ++it; ++it;
+ queue.erase(it);
+ CPPUNIT_ASSERT_EQUAL(6u, queue.size());
+
+ std::vector<std::shared_ptr<api::CreateVisitorCommand> > cmds;
+ for (;;) {
+ std::shared_ptr<api::CreateVisitorCommand> cmd(
+ std::dynamic_pointer_cast<api::CreateVisitorCommand>(
+ queue.releaseNextCommand().first));
+ if (cmd.get() == 0) break;
+ cmds.push_back(cmd);
+ }
+ CPPUNIT_ASSERT_EQUAL(size_t(6), cmds.size());
+ CPPUNIT_ASSERT_EQUAL(string("first t=10 p=0"), getCommandString(cmds[0]));
+ CPPUNIT_ASSERT_EQUAL(string("second t=100 p=0"), getCommandString(cmds[1]));
+ CPPUNIT_ASSERT_EQUAL(string("fourth t=5 p=0"), getCommandString(cmds[2]));
+ CPPUNIT_ASSERT_EQUAL(string("fifth t=3000 p=0"), getCommandString(cmds[3]));
+ CPPUNIT_ASSERT_EQUAL(string("sixth t=400 p=0"), getCommandString(cmds[4]));
+ CPPUNIT_ASSERT_EQUAL(string("seventh t=700 p=0"), getCommandString(cmds[5]));
+}
+
+}
+
diff --git a/storage/src/tests/visiting/memory_bounded_trace_test.cpp b/storage/src/tests/visiting/memory_bounded_trace_test.cpp
new file mode 100644
index 00000000000..85eae12fc34
--- /dev/null
+++ b/storage/src/tests/visiting/memory_bounded_trace_test.cpp
@@ -0,0 +1,131 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <vespa/storage/visiting/memory_bounded_trace.h>
+
+namespace storage {
+
+class MemoryBoundedTraceTest : public CppUnit::TestFixture
+{
+ CPPUNIT_TEST_SUITE(MemoryBoundedTraceTest);
+ CPPUNIT_TEST(noMemoryReportedUsedWhenEmpty);
+ CPPUNIT_TEST(memoryUsedIsStringLengthForLeafNode);
+ CPPUNIT_TEST(memoryUsedIsAccumulatedRecursivelyForNonLeafNodes);
+ CPPUNIT_TEST(traceNodesCanBeMovedAndImplicitlyCleared);
+ CPPUNIT_TEST(movedTraceTreeIsMarkedAsStrict);
+ CPPUNIT_TEST(canNotAddMoreNodesWhenMemoryUsedExceedsUpperBound);
+ CPPUNIT_TEST(movedTreeIncludesStatsNodeWhenNodesOmitted);
+ CPPUNIT_TEST_SUITE_END();
+
+public:
+ void noMemoryReportedUsedWhenEmpty();
+ void memoryUsedIsStringLengthForLeafNode();
+ void memoryUsedIsAccumulatedRecursivelyForNonLeafNodes();
+ void traceNodesCanBeMovedAndImplicitlyCleared();
+ void movedTraceTreeIsMarkedAsStrict();
+ void canNotAddMoreNodesWhenMemoryUsedExceedsUpperBound();
+ void movedTreeIncludesStatsNodeWhenNodesOmitted();
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(MemoryBoundedTraceTest);
+
+void
+MemoryBoundedTraceTest::noMemoryReportedUsedWhenEmpty()
+{
+ MemoryBoundedTrace trace(100);
+ CPPUNIT_ASSERT_EQUAL(size_t(0), trace.getApproxMemoryUsed());
+}
+
+void
+MemoryBoundedTraceTest::memoryUsedIsStringLengthForLeafNode()
+{
+ MemoryBoundedTrace trace(100);
+ CPPUNIT_ASSERT(trace.add(mbus::TraceNode("hello world", 0)));
+ CPPUNIT_ASSERT_EQUAL(size_t(11), trace.getApproxMemoryUsed());
+}
+
+void
+MemoryBoundedTraceTest::memoryUsedIsAccumulatedRecursivelyForNonLeafNodes()
+{
+ MemoryBoundedTrace trace(100);
+ mbus::TraceNode innerNode;
+ innerNode.addChild("hello world");
+ innerNode.addChild("goodbye moon");
+ CPPUNIT_ASSERT(trace.add(innerNode));
+ CPPUNIT_ASSERT_EQUAL(size_t(23), trace.getApproxMemoryUsed());
+}
+
+void
+MemoryBoundedTraceTest::traceNodesCanBeMovedAndImplicitlyCleared()
+{
+ MemoryBoundedTrace trace(100);
+ CPPUNIT_ASSERT(trace.add(mbus::TraceNode("hello world", 0)));
+ mbus::TraceNode target;
+ trace.moveTraceTo(target);
+ CPPUNIT_ASSERT_EQUAL(uint32_t(1), target.getNumChildren());
+ CPPUNIT_ASSERT_EQUAL(size_t(0), trace.getApproxMemoryUsed());
+
+ mbus::TraceNode emptinessCheck;
+ trace.moveTraceTo(emptinessCheck);
+ CPPUNIT_ASSERT_EQUAL(uint32_t(0), emptinessCheck.getNumChildren());
+}
+
+/**
+ * We want trace subtrees to be strictly ordered so that the message about
+ * omitted traces will remain soundly as the last ordered node. There is no
+ * particular performance reason for not having strict mode enabled to the
+ * best of my knowledge, since the internal backing data structure is an
+ * ordered vector anyhow.
+ */
+void
+MemoryBoundedTraceTest::movedTraceTreeIsMarkedAsStrict()
+{
+ MemoryBoundedTrace trace(100);
+ CPPUNIT_ASSERT(trace.add(mbus::TraceNode("hello world", 0)));
+ mbus::TraceNode target;
+ trace.moveTraceTo(target);
+ CPPUNIT_ASSERT_EQUAL(uint32_t(1), target.getNumChildren());
+ CPPUNIT_ASSERT(target.getChild(0).isStrict());
+}
+
+void
+MemoryBoundedTraceTest::canNotAddMoreNodesWhenMemoryUsedExceedsUpperBound()
+{
+ // Note: we allow one complete node tree to exceed the bounds, but as soon
+ // as the bound is exceeded no further nodes can be added.
+ MemoryBoundedTrace trace(10);
+ CPPUNIT_ASSERT(trace.add(mbus::TraceNode("hello world", 0)));
+ CPPUNIT_ASSERT_EQUAL(size_t(11), trace.getApproxMemoryUsed());
+
+ CPPUNIT_ASSERT(!trace.add(mbus::TraceNode("the quick red fox runs across "
+ "the freeway", 0)));
+ CPPUNIT_ASSERT_EQUAL(size_t(11), trace.getApproxMemoryUsed());
+
+ mbus::TraceNode target;
+ trace.moveTraceTo(target);
+ // Twice nested node (root -> added trace tree -> leaf with txt).
+ CPPUNIT_ASSERT_EQUAL(uint32_t(1), target.getNumChildren());
+ CPPUNIT_ASSERT(target.getChild(0).getNumChildren() >= 1);
+ CPPUNIT_ASSERT_EQUAL(vespalib::string("hello world"),
+ target.getChild(0).getChild(0).getNote());
+}
+
+void
+MemoryBoundedTraceTest::movedTreeIncludesStatsNodeWhenNodesOmitted()
+{
+ MemoryBoundedTrace trace(5);
+ CPPUNIT_ASSERT(trace.add(mbus::TraceNode("abcdef", 0)));
+ CPPUNIT_ASSERT(!trace.add(mbus::TraceNode("ghijkjlmn", 0)));
+
+ mbus::TraceNode target;
+ trace.moveTraceTo(target);
+ CPPUNIT_ASSERT_EQUAL(uint32_t(1), target.getNumChildren());
+ CPPUNIT_ASSERT_EQUAL(uint32_t(2), target.getChild(0).getNumChildren());
+ vespalib::string expected("Trace too large; omitted 1 subsequent trace "
+ "trees containing a total of 9 bytes");
+ CPPUNIT_ASSERT_EQUAL(expected, target.getChild(0).getChild(1).getNote());
+}
+
+} // storage
+
diff --git a/storage/src/tests/visiting/visitormanagertest.cpp b/storage/src/tests/visiting/visitormanagertest.cpp
new file mode 100644
index 00000000000..d782abf7d54
--- /dev/null
+++ b/storage/src/tests/visiting/visitormanagertest.cpp
@@ -0,0 +1,1172 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/document/datatype/datatype.h>
+#include <vespa/document/fieldvalue/intfieldvalue.h>
+#include <vespa/document/fieldvalue/stringfieldvalue.h>
+#include <vespa/document/fieldvalue/rawfieldvalue.h>
+#include <vespa/log/log.h>
+#include <vespa/storageapi/message/datagram.h>
+#include <vespa/storageapi/message/persistence.h>
+#include <vespa/storageapi/message/visitor.h>
+#include <vector>
+#include <vespa/storage/persistence/filestorage/filestormanager.h>
+#include <vespa/storage/visiting/visitormanager.h>
+#include <vespa/storageframework/defaultimplementation/clock/realclock.h>
+#include <tests/common/teststorageapp.h>
+#include <tests/common/testhelper.h>
+#include <tests/common/dummystoragelink.h>
+#include <tests/storageserver/testvisitormessagesession.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <vespa/vdslib/container/visitorordering.h>
+#include <vespa/documentapi/messagebus/messages/multioperationmessage.h>
+#include <vespa/documentapi/messagebus/messages/putdocumentmessage.h>
+#include <vespa/documentapi/messagebus/messages/removedocumentmessage.h>
+
+
+LOG_SETUP(".visitormanagertest");
+
+namespace storage {
+namespace {
+ typedef std::vector<api::StorageMessage::SP> msg_ptr_vector;
+}
+
+class VisitorManagerTest : public CppUnit::TestFixture
+{
+private:
+ CPPUNIT_TEST_SUITE(VisitorManagerTest);
+ CPPUNIT_TEST(testNormalUsage);
+ CPPUNIT_TEST(testResending);
+ CPPUNIT_TEST(testVisitEmptyBucket);
+ CPPUNIT_TEST(testMultiBucketVisit);
+ CPPUNIT_TEST(testNoBuckets);
+ CPPUNIT_TEST(testVisitPutsAndRemoves);
+ CPPUNIT_TEST(testVisitWithTimeframeAndSelection);
+ CPPUNIT_TEST(testVisitWithTimeframeAndBogusSelection);
+ CPPUNIT_TEST(testVisitorCallbacks);
+ CPPUNIT_TEST(testVisitorCleanup);
+ CPPUNIT_TEST(testAbortOnFailedVisitorInfo);
+ CPPUNIT_TEST(testAbortOnFieldPathError);
+ CPPUNIT_TEST(testVisitorQueueTimeout);
+ CPPUNIT_TEST(testVisitorProcessingTimeout);
+ CPPUNIT_TEST(testPrioritizedVisitorQueing);
+ CPPUNIT_TEST(testPrioritizedMaxConcurrentVisitors);
+ CPPUNIT_TEST(testVisitorQueingZeroQueueSize);
+ CPPUNIT_TEST(testHitCounter);
+ CPPUNIT_TEST(testStatusPage);
+ CPPUNIT_TEST_SUITE_END();
+
+ static uint32_t docCount;
+ std::vector<document::Document::SP > _documents;
+ std::unique_ptr<TestVisitorMessageSessionFactory> _messageSessionFactory;
+ std::unique_ptr<TestServiceLayerApp> _node;
+ std::unique_ptr<DummyStorageLink> _top;
+ VisitorManager* _manager;
+
+public:
+ VisitorManagerTest() : _node() {}
+
+ // Not using setUp since can't throw exception out of it.
+ void initializeTest();
+ void addSomeRemoves(bool removeAll = false);
+ void tearDown();
+ TestVisitorMessageSession& getSession(uint32_t n);
+ uint64_t verifyCreateVisitorReply(
+ api::ReturnCode::Result expectedResult,
+ int checkStatsDocsVisited = -1,
+ int checkStatsBytesVisited = -1);
+ void getMessagesAndReply(
+ int expectedCount,
+ TestVisitorMessageSession& session,
+ std::vector<document::Document::SP >& docs,
+ std::vector<document::DocumentId>& docIds,
+ api::ReturnCode::Result returnCode = api::ReturnCode::OK,
+ documentapi::Priority::Value priority = documentapi::Priority::PRI_NORMAL_4);
+ uint32_t getMatchingDocuments(std::vector<document::Document::SP >& docs);
+
+ void testNormalUsage();
+ void testResending();
+ void testVisitEmptyBucket();
+ void testMultiBucketVisit();
+ void testNoBuckets();
+ void testVisitPutsAndRemoves();
+ void testVisitWithTimeframeAndSelection();
+ void testVisitWithTimeframeAndBogusSelection();
+ void testVisitorCallbacks();
+ void testVisitorCleanup();
+ void testAbortOnFailedVisitorInfo();
+ void testAbortOnFieldPathError();
+ void testVisitorQueueTimeout();
+ void testVisitorProcessingTimeout();
+ void testPrioritizedVisitorQueing();
+ void testPrioritizedMaxConcurrentVisitors();
+ void testVisitorQueingZeroQueueSize();
+ void testHitCounter();
+ void testStatusPage();
+};
+
+uint32_t VisitorManagerTest::docCount = 10;
+
+CPPUNIT_TEST_SUITE_REGISTRATION(VisitorManagerTest);
+
+void
+VisitorManagerTest::initializeTest()
+{
+ LOG(debug, "Initializing test");
+ vdstestlib::DirConfig config(getStandardConfig(true));
+ config.getConfig("stor-visitor").set("visitorthreads", "1");
+
+ try {
+ _messageSessionFactory.reset(
+ new TestVisitorMessageSessionFactory(config.getConfigId()));
+ _node.reset(
+ new TestServiceLayerApp(config.getConfigId()));
+ _node->setupDummyPersistence();
+ _node->getStateUpdater().setClusterState(
+ lib::ClusterState::CSP(
+ new lib::ClusterState("storage:1 distributor:1")));
+ _top.reset(new DummyStorageLink());
+ _top->push_back(std::unique_ptr<StorageLink>(_manager
+ = new VisitorManager(
+ config.getConfigId(), _node->getComponentRegister(),
+ *_messageSessionFactory)));
+ _top->push_back(std::unique_ptr<StorageLink>(new FileStorManager(
+ config.getConfigId(), _node->getPartitions(), _node->getPersistenceProvider(), _node->getComponentRegister())));
+ _manager->setTimeBetweenTicks(10);
+ _top->open();
+ } catch (config::InvalidConfigException& e) {
+ fprintf(stderr, "%s\n", e.what());
+ }
+ // Adding some documents so database isn't empty
+ api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 0);
+ std::string content(
+ "To be, or not to be: that is the question:\n"
+ "Whether 'tis nobler in the mind to suffer\n"
+ "The slings and arrows of outrageous fortune,\n"
+ "Or to take arms against a sea of troubles,\n"
+ "And by opposing end them? To die: to sleep;\n"
+ "No more; and by a sleep to say we end\n"
+ "The heart-ache and the thousand natural shocks\n"
+ "That flesh is heir to, 'tis a consummation\n"
+ "Devoutly to be wish'd. To die, to sleep;\n"
+ "To sleep: perchance to dream: ay, there's the rub;\n"
+ "For in that sleep of death what dreams may come\n"
+ "When we have shuffled off this mortal coil,\n"
+ "Must give us pause: there's the respect\n"
+ "That makes calamity of so long life;\n"
+ "For who would bear the whips and scorns of time,\n"
+ "The oppressor's wrong, the proud man's contumely,\n"
+ "The pangs of despised love, the law's delay,\n"
+ "The insolence of office and the spurns\n"
+ "That patient merit of the unworthy takes,\n"
+ "When he himself might his quietus make\n"
+ "With a bare bodkin? who would fardels bear,\n"
+ "To grunt and sweat under a weary life,\n"
+ "But that the dread of something after death,\n"
+ "The undiscover'd country from whose bourn\n"
+ "No traveller returns, puzzles the will\n"
+ "And makes us rather bear those ills we have\n"
+ "Than fly to others that we know not of?\n"
+ "Thus conscience does make cowards of us all;\n"
+ "And thus the native hue of resolution\n"
+ "Is sicklied o'er with the pale cast of thought,\n"
+ "And enterprises of great pith and moment\n"
+ "With this regard their currents turn awry,\n"
+ "And lose the name of action. - Soft you now!\n"
+ "The fair Ophelia! Nymph, in thy orisons\n"
+ "Be all my sins remember'd.\n");
+ for (uint32_t i=0; i<docCount; ++i) {
+ std::ostringstream uri;
+ uri << "userdoc:test:" << i % 10 << ":http://www.ntnu.no/"
+ << i << ".html";
+
+ _documents.push_back(document::Document::SP(
+ _node->getTestDocMan().createDocument(content, uri.str())));
+ const document::DocumentType& type(_documents.back()->getType());
+ _documents.back()->setValue(type.getField("headerval"),
+ document::IntFieldValue(i % 4));
+ }
+ for (uint32_t i=0; i<10; ++i) {
+ document::BucketId bid(16, i);
+
+ std::shared_ptr<api::CreateBucketCommand> cmd(
+ new api::CreateBucketCommand(bid));
+ cmd->setAddress(address);
+ cmd->setSourceIndex(0);
+ _top->sendDown(cmd);
+ _top->waitForMessages(1, 60);
+ _top->reset();
+
+ StorBucketDatabase::WrappedEntry entry(
+ _node->getStorageBucketDatabase().get(bid, "",
+ StorBucketDatabase::CREATE_IF_NONEXISTING));
+ entry->disk = 0;
+ entry.write();
+ }
+ for (uint32_t i=0; i<docCount; ++i) {
+ document::BucketId bid(16, i);
+
+ std::shared_ptr<api::PutCommand> cmd(
+ new api::PutCommand(bid, _documents[i], i+1));
+ cmd->setAddress(address);
+ _top->sendDown(cmd);
+ _top->waitForMessages(1, 60);
+ const msg_ptr_vector replies = _top->getRepliesOnce();
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, replies.size());
+ std::shared_ptr<api::PutReply> reply(
+ std::dynamic_pointer_cast<api::PutReply>(
+ replies[0]));
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(api::ReturnCode(api::ReturnCode::OK),
+ reply->getResult());
+ }
+ LOG(debug, "Done initializing test");
+}
+
+void
+VisitorManagerTest::addSomeRemoves(bool removeAll)
+{
+ framework::defaultimplementation::FakeClock clock;
+ api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 0);
+ for (uint32_t i=0; i<docCount; i += (removeAll ? 1 : 4)) {
+ // Add it to the database
+ document::BucketId bid(16, i % 10);
+ std::shared_ptr<api::RemoveCommand> cmd(
+ new api::RemoveCommand(
+ bid, _documents[i]->getId(), clock.getTimeInMicros().getTime() + docCount + i + 1));
+ cmd->setAddress(address);
+ _top->sendDown(cmd);
+ _top->waitForMessages(1, 60);
+ const msg_ptr_vector replies = _top->getRepliesOnce();
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, replies.size());
+ std::shared_ptr<api::RemoveReply> reply(
+ std::dynamic_pointer_cast<api::RemoveReply>(
+ replies[0]));
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(api::ReturnCode(api::ReturnCode::OK),
+ reply->getResult());
+ }
+}
+
+void
+VisitorManagerTest::tearDown()
+{
+ if (_top.get() != 0) {
+ _top->close();
+ _top->flush();
+ _top.reset(0);
+ }
+ _node.reset(0);
+ _messageSessionFactory.reset(0);
+ _manager = 0;
+}
+
+TestVisitorMessageSession&
+VisitorManagerTest::getSession(uint32_t n)
+{
+ // Wait until we have started the visitor
+ const std::vector<TestVisitorMessageSession*>& sessions(
+ _messageSessionFactory->_visitorSessions);
+ framework::defaultimplementation::RealClock clock;
+ framework::MilliSecTime endTime(
+ clock.getTimeInMillis() + framework::MilliSecTime(30 * 1000));
+ while (true) {
+ {
+ vespalib::LockGuard lock(_messageSessionFactory->_accessLock);
+ if (sessions.size() > n) {
+ return *sessions[n];
+ }
+ }
+ if (clock.getTimeInMillis() > endTime) {
+ throw vespalib::IllegalStateException(
+ "Timed out waiting for visitor session", VESPA_STRLOC);
+ }
+ FastOS_Thread::Sleep(10);
+ }
+ throw std::logic_error("unreachable");
+}
+
+void
+VisitorManagerTest::getMessagesAndReply(
+ int expectedCount,
+ TestVisitorMessageSession& session,
+ std::vector<document::Document::SP >& docs,
+ std::vector<document::DocumentId>& docIds,
+ api::ReturnCode::Result result,
+ documentapi::Priority::Value priority)
+{
+ for (int i = 0; i < expectedCount; i++) {
+ session.waitForMessages(i + 1);
+ mbus::Reply::UP reply;
+ {
+ vespalib::MonitorGuard guard(session.getMonitor());
+
+ CPPUNIT_ASSERT_EQUAL(priority,
+ session.sentMessages[i]->getPriority());
+
+ switch (session.sentMessages[i]->getType()) {
+ case documentapi::DocumentProtocol::MESSAGE_PUTDOCUMENT:
+ docs.push_back(static_cast<documentapi::PutDocumentMessage&>(
+ *session.sentMessages[i]).getDocument());
+ break;
+ case documentapi::DocumentProtocol::MESSAGE_REMOVEDOCUMENT:
+ docIds.push_back(static_cast<documentapi::RemoveDocumentMessage&>(
+ *session.sentMessages[i]).getDocumentId());
+ break;
+ default:
+ break;
+ }
+
+ reply = session.sentMessages[i]->createReply();
+ reply->swapState(*session.sentMessages[i]);
+ reply->setMessage(
+ mbus::Message::UP(session.sentMessages[i].release()));
+
+ if (result != api::ReturnCode::OK) {
+ reply->addError(mbus::Error(result, "Generic error"));
+ }
+ }
+
+ session.reply(std::move(reply));
+ }
+}
+
+uint64_t
+VisitorManagerTest::verifyCreateVisitorReply(
+ api::ReturnCode::Result expectedResult,
+ int checkStatsDocsVisited,
+ int checkStatsBytesVisited)
+{
+ _top->waitForMessages(1, 60);
+ const msg_ptr_vector replies = _top->getRepliesOnce();
+ CPPUNIT_ASSERT_EQUAL(1, (int)replies.size());
+
+ std::shared_ptr<api::StorageMessage> msg(replies[0]);
+
+ CPPUNIT_ASSERT_EQUAL(api::MessageType::VISITOR_CREATE_REPLY, msg->getType());
+
+ std::shared_ptr<api::CreateVisitorReply> reply(
+ std::dynamic_pointer_cast<api::CreateVisitorReply>(msg));
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(expectedResult, reply->getResult().getResult());
+
+ if (checkStatsDocsVisited >= 0) {
+ CPPUNIT_ASSERT_EQUAL(checkStatsDocsVisited,
+ int(reply->getVisitorStatistics().getDocumentsVisited()));
+ }
+ if (checkStatsBytesVisited >= 0) {
+ CPPUNIT_ASSERT_EQUAL(checkStatsBytesVisited,
+ int(reply->getVisitorStatistics().getBytesVisited()));
+ }
+
+ return reply->getMsgId();
+}
+
+uint32_t
+VisitorManagerTest::getMatchingDocuments(std::vector<document::Document::SP >& docs) {
+ uint32_t equalCount = 0;
+ for (uint32_t i=0; i<docs.size(); ++i) {
+ for (uint32_t j=0; j<_documents.size(); ++j) {
+ if (docs[i]->getId() == _documents[j]->getId()
+ && *docs[i] == *_documents[j])
+
+ {
+ equalCount++;
+ }
+ }
+ }
+
+ return equalCount;
+}
+
+void
+VisitorManagerTest::testHitCounter()
+{
+ document::OrderingSpecification spec(document::OrderingSpecification::ASCENDING, 42, 7, 2);
+ Visitor::HitCounter hitCounter(&spec);
+
+ hitCounter.addHit(document::DocumentId("orderdoc(7,2):mail:1234:42:foo"), 450);
+ hitCounter.addHit(document::DocumentId("orderdoc(7,2):mail:1234:49:foo"), 450);
+ hitCounter.addHit(document::DocumentId("orderdoc(7,2):mail:1234:60:foo"), 450);
+ hitCounter.addHit(document::DocumentId("orderdoc(7,2):mail:1234:10:foo"), 450);
+ hitCounter.addHit(document::DocumentId("orderdoc(7,2):mail:1234:21:foo"), 450);
+
+ CPPUNIT_ASSERT_EQUAL(3, (int)hitCounter.getFirstPassHits());
+ CPPUNIT_ASSERT_EQUAL(1350, (int)hitCounter.getFirstPassBytes());
+ CPPUNIT_ASSERT_EQUAL(2, (int)hitCounter.getSecondPassHits());
+ CPPUNIT_ASSERT_EQUAL(900, (int)hitCounter.getSecondPassBytes());
+}
+
+namespace {
+
+int getTotalSerializedSize(const std::vector<document::Document::SP>& docs)
+{
+ int total = 0;
+ for (size_t i = 0; i < docs.size(); ++i) {
+ total += int(docs[i]->serialize()->getLength());
+ }
+ return total;
+}
+
+}
+
+void
+VisitorManagerTest::testNormalUsage()
+{
+ initializeTest();
+ api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 0);
+ std::shared_ptr<api::CreateVisitorCommand> cmd(
+ new api::CreateVisitorCommand("DumpVisitor", "testvis", ""));
+ cmd->addBucketToBeVisited(document::BucketId(16, 3));
+ cmd->setAddress(address);
+ cmd->setControlDestination("foo/bar");
+ _top->sendDown(cmd);
+ std::vector<document::Document::SP > docs;
+ std::vector<document::DocumentId> docIds;
+
+ // Should receive one multioperation message (bucket 3 has one document).
+ getMessagesAndReply(1, getSession(0), docs, docIds);
+
+ // All data has been replied to, expecting to get a create visitor reply
+ verifyCreateVisitorReply(api::ReturnCode::OK,
+ int(docs.size()),
+ getTotalSerializedSize(docs));
+
+ CPPUNIT_ASSERT_EQUAL(1u, getMatchingDocuments(docs));
+ CPPUNIT_ASSERT(!_manager->hasPendingMessageState());
+}
+
+void
+VisitorManagerTest::testResending()
+{
+ initializeTest();
+ api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 0);
+ std::shared_ptr<api::CreateVisitorCommand> cmd(
+ new api::CreateVisitorCommand("DumpVisitor", "testvis", ""));
+ cmd->addBucketToBeVisited(document::BucketId(16, 3));
+ cmd->setAddress(address);
+ cmd->setControlDestination("foo/bar");
+ _top->sendDown(cmd);
+ std::vector<document::Document::SP > docs;
+ std::vector<document::DocumentId> docIds;
+
+ TestVisitorMessageSession& session = getSession(0);
+ getMessagesAndReply(1, session, docs, docIds, api::ReturnCode::NOT_READY);
+
+ {
+ session.waitForMessages(2);
+
+ documentapi::DocumentMessage* msg = session.sentMessages[1].get();
+
+ mbus::Reply::UP reply = msg->createReply();
+
+ CPPUNIT_ASSERT_EQUAL((uint32_t)documentapi::DocumentProtocol::MESSAGE_VISITORINFO,
+ session.sentMessages[1]->getType());
+ reply->swapState(*session.sentMessages[1]);
+ reply->setMessage(mbus::Message::UP(session.sentMessages[1].release()));
+ session.reply(std::move(reply));
+ }
+
+ _node->getClock().addSecondsToTime(1);
+
+ {
+ session.waitForMessages(3);
+
+ documentapi::DocumentMessage* msg = session.sentMessages[2].get();
+
+ mbus::Reply::UP reply = msg->createReply();
+
+ reply->swapState(*session.sentMessages[2]);
+ reply->setMessage(mbus::Message::UP(session.sentMessages[2].release()));
+ session.reply(std::move(reply));
+ }
+
+ // All data has been replied to, expecting to get a create visitor reply
+ verifyCreateVisitorReply(api::ReturnCode::OK);
+}
+
+void
+VisitorManagerTest::testVisitEmptyBucket()
+{
+ initializeTest();
+ addSomeRemoves(true);
+ api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 0);
+ std::shared_ptr<api::CreateVisitorCommand> cmd(
+ new api::CreateVisitorCommand("DumpVisitor", "testvis", ""));
+ cmd->addBucketToBeVisited(document::BucketId(16, 3));
+
+ cmd->setAddress(address);
+ _top->sendDown(cmd);
+
+ // All data has been replied to, expecting to get a create visitor reply
+ verifyCreateVisitorReply(api::ReturnCode::OK);
+}
+
+void
+VisitorManagerTest::testMultiBucketVisit()
+{
+ initializeTest();
+ api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 0);
+ std::shared_ptr<api::CreateVisitorCommand> cmd(
+ new api::CreateVisitorCommand("DumpVisitor", "testvis", ""));
+ for (uint32_t i=0; i<10; ++i) {
+ cmd->addBucketToBeVisited(document::BucketId(16, i));
+ }
+ cmd->setAddress(address);
+ cmd->setDataDestination("fooclient.0");
+ _top->sendDown(cmd);
+ std::vector<document::Document::SP > docs;
+ std::vector<document::DocumentId> docIds;
+
+ // Should receive one multioperation message for each bucket
+ getMessagesAndReply(10, getSession(0), docs, docIds);
+
+ // All data has been replied to, expecting to get a create visitor reply
+ verifyCreateVisitorReply(api::ReturnCode::OK);
+
+ CPPUNIT_ASSERT_EQUAL(docCount, getMatchingDocuments(docs));
+}
+
+void
+VisitorManagerTest::testNoBuckets()
+{
+ initializeTest();
+ api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 0);
+ std::shared_ptr<api::CreateVisitorCommand> cmd(
+ new api::CreateVisitorCommand("DumpVisitor", "testvis", ""));
+
+ cmd->setAddress(address);
+ _top->sendDown(cmd);
+
+ // Should get one reply; a CreateVisitorReply with error since no
+ // buckets where specified in the CreateVisitorCommand
+ _top->waitForMessages(1, 60);
+ const msg_ptr_vector replies = _top->getRepliesOnce();
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, replies.size());
+ std::shared_ptr<api::CreateVisitorReply> reply(
+ std::dynamic_pointer_cast<api::CreateVisitorReply>(
+ replies[0]));
+ // Verify that cast went ok => it was a CreateVisitorReply message
+ CPPUNIT_ASSERT(reply.get());
+ api::ReturnCode ret(api::ReturnCode::ILLEGAL_PARAMETERS,
+ "No buckets specified");
+ CPPUNIT_ASSERT_EQUAL(ret, reply->getResult());
+}
+
+void VisitorManagerTest::testVisitPutsAndRemoves()
+{
+ initializeTest();
+ addSomeRemoves();
+ api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 0);
+ std::shared_ptr<api::CreateVisitorCommand> cmd(
+ new api::CreateVisitorCommand("DumpVisitor", "testvis", ""));
+ cmd->setAddress(address);
+ cmd->setVisitRemoves();
+ for (uint32_t i=0; i<10; ++i) {
+ cmd->addBucketToBeVisited(document::BucketId(16, i));
+ }
+ _top->sendDown(cmd);
+ std::vector<document::Document::SP > docs;
+ std::vector<document::DocumentId> docIds;
+
+ getMessagesAndReply(10, getSession(0), docs, docIds);
+
+ verifyCreateVisitorReply(api::ReturnCode::OK);
+
+ CPPUNIT_ASSERT_EQUAL(
+ docCount - (docCount + 3) / 4,
+ getMatchingDocuments(docs));
+
+ CPPUNIT_ASSERT_EQUAL(
+ (size_t) (docCount + 3) / 4,
+ docIds.size());
+}
+
+void VisitorManagerTest::testVisitWithTimeframeAndSelection()
+{
+ initializeTest();
+ api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 0);
+ std::shared_ptr<api::CreateVisitorCommand> cmd(
+ new api::CreateVisitorCommand("DumpVisitor", "testvis",
+ "testdoctype1.headerval < 2"));
+ cmd->setFromTime(3);
+ cmd->setToTime(8);
+ for (uint32_t i=0; i<10; ++i) {
+ cmd->addBucketToBeVisited(document::BucketId(16, i));
+ }
+ cmd->setAddress(address);
+ _top->sendDown(cmd);
+ std::vector<document::Document::SP > docs;
+ std::vector<document::DocumentId> docIds;
+
+ getMessagesAndReply(2, getSession(0), docs, docIds);
+
+ verifyCreateVisitorReply(api::ReturnCode::OK);
+
+ CPPUNIT_ASSERT_EQUAL((size_t) 2, docs.size());
+ std::set<std::string> expected;
+ expected.insert("userdoc:test:4:http://www.ntnu.no/4.html");
+ expected.insert("userdoc:test:5:http://www.ntnu.no/5.html");
+ std::set<std::string> actual;
+ for (uint32_t i=0; i<docs.size(); ++i) {
+ actual.insert(docs[i]->getId().toString());
+ }
+ CPPUNIT_ASSERT_EQUAL(expected, actual);
+}
+
+void VisitorManagerTest::testVisitWithTimeframeAndBogusSelection()
+{
+ initializeTest();
+ api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 0);
+ std::shared_ptr<api::CreateVisitorCommand> cmd(
+ new api::CreateVisitorCommand("DumpVisitor", "testvis",
+ "DocType(testdoctype1---///---) XXX BAD Field(headerval) < 2"));
+ cmd->setFromTime(3);
+ cmd->setToTime(8);
+ for (uint32_t i=0; i<10; ++i) {
+ cmd->addBucketToBeVisited(document::BucketId(16, i));
+ }
+ cmd->setAddress(address);
+
+ _top->sendDown(cmd);
+ _top->waitForMessages(1, 60);
+ const msg_ptr_vector replies = _top->getRepliesOnce();
+ CPPUNIT_ASSERT_EQUAL((size_t) 1, replies.size());
+
+ api::StorageReply* reply = dynamic_cast<api::StorageReply*>(
+ replies.front().get());
+ CPPUNIT_ASSERT(reply);
+ CPPUNIT_ASSERT_EQUAL(api::ReturnCode::ILLEGAL_PARAMETERS,
+ reply->getResult().getResult());
+}
+
+void
+VisitorManagerTest::testVisitorCallbacks()
+{
+ initializeTest();
+ std::ostringstream replydata;
+ api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 0);
+ std::shared_ptr<api::CreateVisitorCommand> cmd(
+ new api::CreateVisitorCommand("TestVisitor", "testvis", ""));
+ cmd->addBucketToBeVisited(document::BucketId(16, 3));
+ cmd->addBucketToBeVisited(document::BucketId(16, 5));
+ cmd->setAddress(address);
+ _top->sendDown(cmd);
+
+ // Wait until we have started the visitor
+ TestVisitorMessageSession& session = getSession(0);
+
+ for (uint32_t i = 0; i < 6; i++) {
+ session.waitForMessages(i + 1);
+ mbus::Reply::UP reply;
+ {
+ vespalib::MonitorGuard guard(session.getMonitor());
+
+ CPPUNIT_ASSERT_EQUAL((uint32_t)documentapi::DocumentProtocol::MESSAGE_MAPVISITOR, session.sentMessages[i]->getType());
+
+ documentapi::MapVisitorMessage* mapvisitormsg(
+ static_cast<documentapi::MapVisitorMessage*>(session.sentMessages[i].get()));
+
+ replydata << mapvisitormsg->getData().get("msg");
+
+ reply = mapvisitormsg->createReply();
+ reply->swapState(*session.sentMessages[i]);
+ reply->setMessage(mbus::Message::UP(session.sentMessages[i].release()));
+ }
+ session.reply(std::move(reply));
+ }
+
+ // All data has been replied to, expecting to get a create visitor reply
+ verifyCreateVisitorReply(api::ReturnCode::OK);
+
+ CPPUNIT_ASSERT_SUBSTRING_COUNT(replydata.str(), 1, "Starting visitor");
+ CPPUNIT_ASSERT_SUBSTRING_COUNT(replydata.str(), 2, "Handling block of 1 documents");
+ CPPUNIT_ASSERT_SUBSTRING_COUNT(replydata.str(), 2, "completedBucket");
+ CPPUNIT_ASSERT_SUBSTRING_COUNT(replydata.str(), 1, "completedVisiting");
+}
+
+void
+VisitorManagerTest::testVisitorCleanup()
+{
+ initializeTest();
+ api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 0);
+
+ // Start a bunch of invalid visitors
+ for (uint32_t i=0; i<10; ++i) {
+ std::ostringstream ost;
+ ost << "testvis" << i;
+ std::shared_ptr<api::CreateVisitorCommand> cmd(
+ new api::CreateVisitorCommand("InvalidVisitor", ost.str(), ""));
+ cmd->addBucketToBeVisited(document::BucketId(16, 3));
+ cmd->setAddress(address);
+ cmd->setQueueTimeout(0);
+ _top->sendDown(cmd);
+ _top->waitForMessages(i+1, 60);
+ }
+
+ // Start a bunch of visitors
+ for (uint32_t i=0; i<10; ++i) {
+ std::ostringstream ost;
+ ost << "testvis" << (i + 10);
+ std::shared_ptr<api::CreateVisitorCommand> cmd(
+ new api::CreateVisitorCommand("DumpVisitor", ost.str(), ""));
+ cmd->addBucketToBeVisited(document::BucketId(16, 3));
+ cmd->setAddress(address);
+ cmd->setQueueTimeout(0);
+ _top->sendDown(cmd);
+ }
+
+
+ // Should get 14 immediate replies - 10 failures and 4 busy
+ {
+ _top->waitForMessages(14, 60);
+ const msg_ptr_vector replies = _top->getRepliesOnce();
+
+ int failures = 0;
+ int busy = 0;
+
+ for (uint32_t i=0; i< 14; ++i) {
+ std::shared_ptr<api::StorageMessage> msg(replies[i]);
+ CPPUNIT_ASSERT_EQUAL(api::MessageType::VISITOR_CREATE_REPLY, msg->getType());
+ std::shared_ptr<api::CreateVisitorReply> reply(
+ std::dynamic_pointer_cast<api::CreateVisitorReply>(msg));
+ CPPUNIT_ASSERT(reply.get());
+
+ if (i < 10) {
+ if (api::ReturnCode::ILLEGAL_PARAMETERS == reply->getResult().getResult()) {
+ failures++;
+ } else {
+ std::cerr << reply->getResult() << "\n";
+ }
+ } else {
+ if (api::ReturnCode::BUSY == reply->getResult().getResult()) {
+ busy++;
+ }
+ }
+ }
+
+ CPPUNIT_ASSERT_EQUAL(10, failures);
+ CPPUNIT_ASSERT_EQUAL(4, busy);
+ }
+
+ // Finish a visitor
+ std::vector<document::Document::SP > docs;
+ std::vector<document::DocumentId> docIds;
+
+ getMessagesAndReply(1, getSession(0), docs, docIds);
+
+ // Should get a reply for the visitor.
+ verifyCreateVisitorReply(api::ReturnCode::OK);
+
+ // Fail a visitor
+ getMessagesAndReply(1, getSession(1), docs, docIds, api::ReturnCode::INTERNAL_FAILURE);
+
+ // Should get a reply for the visitor.
+ verifyCreateVisitorReply(api::ReturnCode::INTERNAL_FAILURE);
+
+ while (_manager->getActiveVisitorCount() > 2) {
+ FastOS_Thread::Sleep(10);
+ }
+
+ // Start a bunch of more visitors
+ for (uint32_t i=0; i<10; ++i) {
+ std::ostringstream ost;
+ ost << "testvis" << (i + 24);
+ std::shared_ptr<api::CreateVisitorCommand> cmd(
+ new api::CreateVisitorCommand("DumpVisitor", ost.str(), ""));
+ cmd->addBucketToBeVisited(document::BucketId(16, 3));
+ cmd->setAddress(address);
+ cmd->setQueueTimeout(0);
+ _top->sendDown(cmd);
+ }
+
+ // Should now get 8 busy.
+ _top->waitForMessages(8, 60);
+ const msg_ptr_vector replies = _top->getRepliesOnce();
+ CPPUNIT_ASSERT_EQUAL(8, (int)replies.size());
+
+ for (uint32_t i=0; i< replies.size(); ++i) {
+ std::shared_ptr<api::StorageMessage> msg(replies[i]);
+ CPPUNIT_ASSERT_EQUAL(api::MessageType::VISITOR_CREATE_REPLY, msg->getType());
+ std::shared_ptr<api::CreateVisitorReply> reply(
+ std::dynamic_pointer_cast<api::CreateVisitorReply>(msg));
+ CPPUNIT_ASSERT(reply.get());
+
+ CPPUNIT_ASSERT_EQUAL(api::ReturnCode::BUSY, reply->getResult().getResult());
+ }
+}
+
+void
+VisitorManagerTest::testAbortOnFailedVisitorInfo()
+{
+ initializeTest();
+ api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 0);
+
+ {
+ std::shared_ptr<api::CreateVisitorCommand> cmd(
+ new api::CreateVisitorCommand("DumpVisitor", "testvis", ""));
+ cmd->addBucketToBeVisited(document::BucketId(16, 3));
+ cmd->setAddress(address);
+ cmd->setQueueTimeout(0);
+ _top->sendDown(cmd);
+ }
+
+ uint32_t visitorRepliesReceived = 0;
+ uint32_t oki = 0;
+ uint32_t failed = 0;
+
+ std::vector<document::Document::SP > docs;
+ std::vector<document::DocumentId> docIds;
+
+ TestVisitorMessageSession& session = getSession(0);
+ getMessagesAndReply(1, session, docs, docIds, api::ReturnCode::NOT_READY);
+
+ {
+ session.waitForMessages(2);
+
+ documentapi::DocumentMessage* cmd = session.sentMessages[1].get();
+
+ mbus::Reply::UP reply = cmd->createReply();
+
+ CPPUNIT_ASSERT_EQUAL((uint32_t)documentapi::DocumentProtocol::MESSAGE_VISITORINFO, session.sentMessages[1]->getType());
+ reply->swapState(*session.sentMessages[1]);
+ reply->setMessage(mbus::Message::UP(session.sentMessages[1].release()));
+ reply->addError(mbus::Error(api::ReturnCode::NOT_CONNECTED, "Me no ready"));
+ session.reply(std::move(reply));
+ }
+
+ _top->waitForMessages(1, 60);
+ const msg_ptr_vector replies = _top->getRepliesOnce();
+ for (uint32_t i=0; i< replies.size(); ++i) {
+ std::shared_ptr<api::StorageMessage> msg(replies[i]);
+ if (msg->getType() == api::MessageType::VISITOR_CREATE_REPLY)
+ {
+ ++visitorRepliesReceived;
+ std::shared_ptr<api::CreateVisitorReply> reply(
+ std::dynamic_pointer_cast<api::CreateVisitorReply>(msg));
+ CPPUNIT_ASSERT(reply.get());
+ if (reply->getResult().success()) {
+ ++oki;
+ std::cerr << "\n" << reply->toString(true) << "\n";
+ } else {
+ ++failed;
+ }
+ }
+ }
+
+ std::ostringstream errmsg;
+ errmsg << "oki " << oki << ", failed " << failed;
+
+ CPPUNIT_ASSERT_EQUAL_MSG(errmsg.str(), 0u, oki);
+ CPPUNIT_ASSERT_EQUAL_MSG(errmsg.str(), 1u, failed);
+}
+
+void
+VisitorManagerTest::testAbortOnFieldPathError()
+{
+ initializeTest();
+ api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 0);
+
+ // Use bogus field path to force error to happen
+ std::shared_ptr<api::CreateVisitorCommand> cmd(
+ new api::CreateVisitorCommand("DumpVisitor",
+ "testvis",
+ "testdoctype1.headerval{bogus} == 1234"));
+ cmd->addBucketToBeVisited(document::BucketId(16, 3));
+ cmd->setAddress(address);
+ cmd->setQueueTimeout(0);
+ _top->sendDown(cmd);
+
+ verifyCreateVisitorReply(api::ReturnCode::ILLEGAL_PARAMETERS);
+}
+
+void
+VisitorManagerTest::testVisitorQueueTimeout()
+{
+ initializeTest();
+ api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 0);
+ _manager->enforceQueueUsage();
+
+ {
+ vespalib::MonitorGuard guard(_manager->getThread(0).getQueueMonitor());
+
+ std::shared_ptr<api::CreateVisitorCommand> cmd(
+ new api::CreateVisitorCommand("DumpVisitor", "testvis", ""));
+ cmd->addBucketToBeVisited(document::BucketId(16, 3));
+ cmd->setAddress(address);
+ cmd->setQueueTimeout(1);
+ cmd->setTimeout(100 * 1000 * 1000);
+ _top->sendDown(cmd);
+
+ _node->getClock().addSecondsToTime(1000);
+ }
+
+ // Don't answer any messages. Make sure we timeout anyways.
+ uint32_t visitorRepliesReceived = 0;
+
+ _top->waitForMessages(1, 60);
+ const msg_ptr_vector replies = _top->getRepliesOnce();
+ std::shared_ptr<api::StorageMessage> msg(replies[0]);
+
+ CPPUNIT_ASSERT_EQUAL(api::MessageType::VISITOR_CREATE_REPLY, msg->getType());
+ ++visitorRepliesReceived;
+ std::shared_ptr<api::CreateVisitorReply> reply(
+ std::dynamic_pointer_cast<api::CreateVisitorReply>(msg));
+ CPPUNIT_ASSERT_EQUAL(api::ReturnCode(api::ReturnCode::BUSY,
+ "Visitor timed out in visitor queue"),
+ reply->getResult());
+}
+
+void
+VisitorManagerTest::testVisitorProcessingTimeout()
+{
+ initializeTest();
+ api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 0);
+
+ std::shared_ptr<api::CreateVisitorCommand> cmd(
+ new api::CreateVisitorCommand("DumpVisitor", "testvis", ""));
+ cmd->addBucketToBeVisited(document::BucketId(16, 3));
+ cmd->setAddress(address);
+ cmd->setQueueTimeout(0);
+ cmd->setTimeout(100);
+ _top->sendDown(cmd);
+
+ // Wait for Put before increasing the clock
+ TestVisitorMessageSession& session = getSession(0);
+ session.waitForMessages(1);
+
+ _node->getClock().addSecondsToTime(1000);
+
+ // Don't answer any messages. Make sure we timeout anyways.
+ uint32_t visitorRepliesReceived = 0;
+
+ _top->waitForMessages(1, 60);
+ const msg_ptr_vector replies = _top->getRepliesOnce();
+ std::shared_ptr<api::StorageMessage> msg(replies[0]);
+
+ CPPUNIT_ASSERT_EQUAL(api::MessageType::VISITOR_CREATE_REPLY, msg->getType());
+ ++visitorRepliesReceived;
+ std::shared_ptr<api::CreateVisitorReply> reply(
+ std::dynamic_pointer_cast<api::CreateVisitorReply>(msg));
+ CPPUNIT_ASSERT_EQUAL(api::ReturnCode::ABORTED,
+ reply->getResult().getResult());
+}
+
+namespace {
+ uint32_t nextVisitor = 0;
+
+ api::StorageMessage::Id
+ sendCreateVisitor(uint32_t timeout, DummyStorageLink& top, uint8_t priority = 127) {
+ std::ostringstream ost;
+ ost << "testvis" << ++nextVisitor;
+ api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 0);
+ std::shared_ptr<api::CreateVisitorCommand> cmd(
+ new api::CreateVisitorCommand("DumpVisitor", ost.str(), ""));
+ cmd->addBucketToBeVisited(document::BucketId(16, 3));
+ cmd->setAddress(address);
+ cmd->setQueueTimeout(timeout);
+ cmd->setPriority(priority);
+ top.sendDown(cmd);
+ return cmd->getMsgId();
+ }
+}
+
+void
+VisitorManagerTest::testPrioritizedVisitorQueing()
+{
+ framework::HttpUrlPath path("?verbose=true&allvisitors=true");
+ initializeTest();
+
+ _manager->setMaxConcurrentVisitors(4);
+ _manager->setMaxVisitorQueueSize(4);
+
+ api::StorageMessage::Id ids[10] = { 0 };
+
+ // First 4 should just start..
+ for (uint32_t i = 0; i < 4; ++i) {
+ ids[i] = sendCreateVisitor(i, *_top, i);
+ }
+
+ // Next ones should be queued - (Better not finish before we get here)
+ // Submit with higher priorities
+ for (uint32_t i = 0; i < 4; ++i) {
+ ids[i + 4] = sendCreateVisitor(1000, *_top, 100 - i);
+ }
+
+ // Queue is now full with a pri 100 visitor at its end
+ // Send a lower pri visitor that will be busy-returned immediately
+ ids[8] = sendCreateVisitor(1000, *_top, 130);
+
+ CPPUNIT_ASSERT_EQUAL(ids[8], verifyCreateVisitorReply(api::ReturnCode::BUSY));
+
+ // Send a higher pri visitor that will take the place of pri 100 visitor
+ ids[9] = sendCreateVisitor(1000, *_top, 60);
+
+ CPPUNIT_ASSERT_EQUAL(ids[4], verifyCreateVisitorReply(api::ReturnCode::BUSY));
+
+ // Finish the first visitor
+ std::vector<document::Document::SP > docs;
+ std::vector<document::DocumentId> docIds;
+ getMessagesAndReply(1, getSession(0), docs, docIds, api::ReturnCode::OK,
+ documentapi::Priority::PRI_HIGHEST);
+ verifyCreateVisitorReply(api::ReturnCode::OK);
+
+ // We should now start the highest priority visitor.
+ getMessagesAndReply(1, getSession(4), docs, docIds, api::ReturnCode::OK,
+ documentapi::Priority::PRI_VERY_HIGH);
+ CPPUNIT_ASSERT_EQUAL(ids[9], verifyCreateVisitorReply(api::ReturnCode::OK));
+}
+
+void
+VisitorManagerTest::testPrioritizedMaxConcurrentVisitors() {
+ framework::HttpUrlPath path("?verbose=true&allvisitors=true");
+ initializeTest();
+
+ api::StorageMessage::Id ids[17] = { 0 };
+
+ // Number of concurrent visitors is in [4, 8], depending on priority
+ // Max concurrent:
+ // [0, 1): 4
+ // [1, 64): 3
+ // [64, 128): 2
+ // [128, 192): 1
+ // [192, 256): 0
+ _manager->setMaxConcurrentVisitors(4, 4);
+ _manager->setMaxVisitorQueueSize(6);
+
+ // First 4 should just start..
+ for (uint32_t i = 0; i < 4; ++i) {
+ ids[i] = sendCreateVisitor(i, *_top, i);
+ }
+
+ // Low pri messages; get put into queue
+ for (uint32_t i = 0; i < 6; ++i) {
+ ids[i + 4] = sendCreateVisitor(1000, *_top, 203 - i);
+ }
+
+ // Higher pri message: fits happily into 1 extra concurrent slot
+ ids[10] = sendCreateVisitor(1000, *_top, 190);
+
+ // Should punch pri203 msg out of the queue -> busy
+ ids[11] = sendCreateVisitor(1000, *_top, 197);
+
+ CPPUNIT_ASSERT_EQUAL(ids[4], verifyCreateVisitorReply(api::ReturnCode::BUSY));
+
+ // No concurrency slots left for this message -> busy
+ ids[12] = sendCreateVisitor(1000, *_top, 204);
+
+ CPPUNIT_ASSERT_EQUAL(ids[12], verifyCreateVisitorReply(api::ReturnCode::BUSY));
+
+ // Gets a concurrent slot
+ ids[13] = sendCreateVisitor(1000, *_top, 80);
+
+ // Kicks pri 202 out of the queue -> busy
+ ids[14] = sendCreateVisitor(1000, *_top, 79);
+
+ CPPUNIT_ASSERT_EQUAL(ids[5], verifyCreateVisitorReply(api::ReturnCode::BUSY));
+
+ // Gets a concurrent slot
+ ids[15] = sendCreateVisitor(1000, *_top, 63);
+
+ // Very Important Visitor(tm) gets a concurrent slot
+ ids[16] = sendCreateVisitor(1000, *_top, 0);
+
+ std::vector<document::Document::SP > docs;
+ std::vector<document::DocumentId> docIds;
+
+ std::set<uint64_t> finishedVisitors;
+
+ // Verify that the correct visitors are running.
+ for (int i = 0; i < 8; i++) {
+ documentapi::Priority::Value priority =
+ documentapi::Priority::PRI_HIGHEST; // ids 0-3,16
+ if (i == 4) {
+ priority = documentapi::Priority::PRI_VERY_LOW; // ids 10
+ } else if (i == 5) {
+ priority = documentapi::Priority::PRI_HIGH_2; // ids 13
+ } else if (i == 6) {
+ priority = documentapi::Priority::PRI_HIGH_1; // ids 15
+ }
+ getMessagesAndReply(1, getSession(i), docs, docIds, api::ReturnCode::OK,
+ priority);
+ finishedVisitors.insert(verifyCreateVisitorReply(api::ReturnCode::OK));
+ }
+
+ for (int i = 0; i < 4; i++) {
+ CPPUNIT_ASSERT(finishedVisitors.find(ids[i]) != finishedVisitors.end());
+ }
+
+ CPPUNIT_ASSERT(finishedVisitors.find(ids[10]) != finishedVisitors.end());
+ CPPUNIT_ASSERT(finishedVisitors.find(ids[13]) != finishedVisitors.end());
+ CPPUNIT_ASSERT(finishedVisitors.find(ids[15]) != finishedVisitors.end());
+ CPPUNIT_ASSERT(finishedVisitors.find(ids[16]) != finishedVisitors.end());
+
+ finishedVisitors.clear();
+
+ for (int i = 8; i < 14; i++) {
+ documentapi::Priority::Value priority =
+ documentapi::Priority::PRI_LOWEST; // ids 6-9,11
+ if (i == 8) {
+ priority = documentapi::Priority::PRI_HIGH_2; // ids 14
+ }
+ getMessagesAndReply(1, getSession(i), docs, docIds, api::ReturnCode::OK,
+ priority);
+ uint64_t msgId = verifyCreateVisitorReply(api::ReturnCode::OK);
+ finishedVisitors.insert(msgId);
+ }
+
+ for (int i = 6; i < 10; i++) {
+ CPPUNIT_ASSERT(finishedVisitors.find(ids[i]) != finishedVisitors.end());
+ }
+
+ CPPUNIT_ASSERT(finishedVisitors.find(ids[11]) != finishedVisitors.end());
+ CPPUNIT_ASSERT(finishedVisitors.find(ids[14]) != finishedVisitors.end());
+}
+
+void
+VisitorManagerTest::testVisitorQueingZeroQueueSize() {
+ framework::HttpUrlPath path("?verbose=true&allvisitors=true");
+ initializeTest();
+
+ _manager->setMaxConcurrentVisitors(4);
+ _manager->setMaxVisitorQueueSize(0);
+
+ // First 4 should just start..
+ for (uint32_t i = 0; i < 4; ++i) {
+ sendCreateVisitor(i, *_top, i);
+ }
+ // Queue size is zero, all visitors will be busy-returned
+ for (uint32_t i = 0; i < 5; ++i) {
+ sendCreateVisitor(1000, *_top, 100 - i);
+ verifyCreateVisitorReply(api::ReturnCode::BUSY);
+ }
+}
+
+void
+VisitorManagerTest::testStatusPage() {
+ framework::HttpUrlPath path("?verbose=true&allvisitors=true");
+ initializeTest();
+
+ _manager->setMaxConcurrentVisitors(1, 1);
+ _manager->setMaxVisitorQueueSize(6);
+ // 1 running, 1 queued
+ sendCreateVisitor(1000000, *_top, 1);
+ sendCreateVisitor(1000000, *_top, 128);
+
+ TestVisitorMessageSession& session = getSession(0);
+ session.waitForMessages(1);
+
+ std::ostringstream ss;
+ static_cast<framework::HtmlStatusReporter&>(*_manager).reportHtmlStatus(ss, path);
+
+ std::string str(ss.str());
+ CPPUNIT_ASSERT(str.find("Currently running visitors") != std::string::npos);
+ // Should be propagated to visitor thread
+ CPPUNIT_ASSERT(str.find("Running 1 visitors") != std::string::npos); // 1 active
+ CPPUNIT_ASSERT(str.find("waiting visitors 1") != std::string::npos); // 1 queued
+ CPPUNIT_ASSERT(str.find("Visitor thread 0") != std::string::npos);
+ CPPUNIT_ASSERT(str.find("Disconnected visitor timeout") != std::string::npos); // verbose per thread
+ CPPUNIT_ASSERT(str.find("Message #1 <b>putdocumentmessage</b>") != std::string::npos); // 1 active
+}
+
+}
diff --git a/storage/src/tests/visiting/visitortest.cpp b/storage/src/tests/visiting/visitortest.cpp
new file mode 100644
index 00000000000..aed08a676b8
--- /dev/null
+++ b/storage/src/tests/visiting/visitortest.cpp
@@ -0,0 +1,1023 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include <vespa/document/datatype/datatype.h>
+#include <vespa/document/fieldvalue/intfieldvalue.h>
+#include <vespa/document/fieldvalue/stringfieldvalue.h>
+#include <vespa/document/fieldvalue/rawfieldvalue.h>
+#include <vespa/log/log.h>
+#include <vespa/storageapi/message/datagram.h>
+#include <vespa/storageapi/message/persistence.h>
+#include <vespa/storageapi/message/visitor.h>
+#include <vespa/storage/persistence/filestorage/filestormanager.h>
+#include <vespa/storage/visiting/visitormanager.h>
+#include <tests/common/testhelper.h>
+#include <tests/common/teststorageapp.h>
+#include <tests/common/dummystoragelink.h>
+#include <tests/storageserver/testvisitormessagesession.h>
+#include <vespa/vdstestlib/cppunit/macros.h>
+#include <vespa/vdslib/container/visitorordering.h>
+#include <vespa/documentapi/messagebus/messages/multioperationmessage.h>
+#include <vespa/documentapi/messagebus/messages/putdocumentmessage.h>
+#include <vespa/documentapi/messagebus/messages/removedocumentmessage.h>
+#include <vector>
+#include <thread>
+#include <chrono>
+
+LOG_SETUP(".visitortest");
+
+using namespace std::chrono_literals;
+
+namespace storage {
+
+namespace {
+
+using msg_ptr_vector = std::vector<api::StorageMessage::SP>;
+
+struct TestParams
+{
+ TestParams& iteratorsPerBucket(uint32_t n) {
+ _iteratorsPerBucket = n;
+ return *this;
+ }
+ TestParams& maxVisitorMemoryUsage(uint32_t bytes) {
+ _maxVisitorMemoryUsage = bytes;
+ return *this;
+ }
+ TestParams& parallelBuckets(uint32_t n) {
+ _parallelBuckets = n;
+ return *this;
+ }
+ TestParams& autoReplyError(const mbus::Error& error) {
+ _autoReplyError = error;
+ return *this;
+ }
+
+ uint32_t _iteratorsPerBucket {1};
+ uint32_t _maxVisitorMemoryUsage {UINT32_MAX};
+ uint32_t _parallelBuckets {1};
+ mbus::Error _autoReplyError;
+};
+
+}
+
+class VisitorTest : public CppUnit::TestFixture
+{
+private:
+ CPPUNIT_TEST_SUITE(VisitorTest);
+ CPPUNIT_TEST(testNormalUsage);
+ CPPUNIT_TEST(testFailedCreateIterator);
+ CPPUNIT_TEST(testFailedGetIter);
+ CPPUNIT_TEST(testMultipleFailedGetIter);
+ CPPUNIT_TEST(testDocumentAPIClientError);
+ CPPUNIT_TEST(testNoDocumentAPIResendingForFailedVisitor);
+ CPPUNIT_TEST(testIteratorCreatedForFailedVisitor);
+ CPPUNIT_TEST(testFailedDocumentAPISend);
+ CPPUNIT_TEST(testNoVisitorNotificationForTransientFailures);
+ CPPUNIT_TEST(testNotificationSentIfTransientErrorRetriedManyTimes);
+ CPPUNIT_TEST(testNoMbusTracingIfTraceLevelIsZero);
+ CPPUNIT_TEST(testReplyContainsTraceIfTraceLevelAboveZero);
+ CPPUNIT_TEST(testNoMoreIteratorsSentWhileMemoryUsedAboveLimit);
+ CPPUNIT_TEST(testDumpVisitorInvokesStrongReadConsistencyIteration);
+ CPPUNIT_TEST(testTestVisitorInvokesWeakReadConsistencyIteration);
+ CPPUNIT_TEST_SUITE_END();
+
+ static uint32_t docCount;
+ std::vector<document::Document::SP > _documents;
+ std::unique_ptr<TestVisitorMessageSessionFactory> _messageSessionFactory;
+ std::unique_ptr<TestServiceLayerApp> _node;
+ std::unique_ptr<DummyStorageLink> _top;
+ DummyStorageLink* _bottom;
+ VisitorManager* _manager;
+
+public:
+ VisitorTest() : _node() {}
+
+ void testNormalUsage();
+ void testFailedCreateIterator();
+ void testFailedGetIter();
+ void testMultipleFailedGetIter();
+ void testDocumentAPIClientError();
+ void testNoDocumentAPIResendingForFailedVisitor();
+ void testIteratorCreatedForFailedVisitor();
+ void testFailedDocumentAPISend();
+ void testNoVisitorNotificationForTransientFailures();
+ void testNotificationSentIfTransientErrorRetriedManyTimes();
+ void testNoMbusTracingIfTraceLevelIsZero();
+ void testReplyContainsTraceIfTraceLevelAboveZero();
+ void testNoMoreIteratorsSentWhileMemoryUsedAboveLimit();
+ void testDumpVisitorInvokesStrongReadConsistencyIteration();
+ void testTestVisitorInvokesWeakReadConsistencyIteration();
+ // TODO:
+ void testVisitMultipleBuckets() {}
+
+ // Not using setUp since can't throw exception out of it.
+ void initializeTest(const TestParams& params = TestParams());
+
+ struct VisitorOptions {
+ std::string visitorType{"dumpvisitor"};
+
+ VisitorOptions() {}
+
+ VisitorOptions& withVisitorType(vespalib::stringref type) {
+ visitorType = type;
+ return *this;
+ }
+ };
+
+ std::shared_ptr<api::CreateVisitorCommand> makeCreateVisitor(
+ const VisitorOptions& options = VisitorOptions());
+ void tearDown();
+ bool waitUntilNoActiveVisitors();
+ TestVisitorMessageSession& getSession(uint32_t n);
+ uint64_t verifyCreateVisitorReply(
+ api::ReturnCode::Result expectedResult,
+ int checkStatsDocsVisited = -1,
+ int checkStatsBytesVisited = -1);
+ void getMessagesAndReply(
+ int expectedCount,
+ TestVisitorMessageSession& session,
+ std::vector<document::Document::SP >& docs,
+ std::vector<document::DocumentId>& docIds,
+ std::vector<std::string>& infoMessages,
+ api::ReturnCode::Result returnCode = api::ReturnCode::OK);
+ uint32_t getMatchingDocuments(std::vector<document::Document::SP >& docs);
+
+private:
+ void doTestVisitorInstanceHasConsistencyLevel(
+ vespalib::stringref visitorType,
+ spi::ReadConsistency expectedConsistency);
+
+ template <typename T>
+ std::vector<std::shared_ptr<T> >
+ fetchMultipleCommands(DummyStorageLink& link, size_t count);
+
+ template <typename T>
+ std::shared_ptr<T>
+ fetchSingleCommand(DummyStorageLink& link);
+
+ void sendGetIterReply(GetIterCommand& cmd,
+ const api::ReturnCode& result =
+ api::ReturnCode(api::ReturnCode::OK),
+ uint32_t maxDocuments = 0,
+ bool overrideCompleted = false);
+ void sendCreateIteratorReply(uint64_t iteratorId = 1234);
+ std::shared_ptr<api::CreateVisitorReply> doCompleteVisitingSession(
+ const std::shared_ptr<api::CreateVisitorCommand>& cmd);
+
+ void sendInitialCreateVisitorAndGetIterRound();
+
+ int64_t getFailedVisitorDestinationReplyCount() const {
+ // There's no metric manager attached to these tests, so even if the
+ // test should magically freeze here for 5+ minutes, nothing should
+ // come in and wipe our accumulated failure metrics.
+ // Only 1 visitor thread running, so we know it has the metrics.
+ const auto& metrics = _manager->getThread(0).getMetrics();
+ auto loadType = documentapi::LoadType::DEFAULT;
+ return metrics.visitorDestinationFailureReplies[loadType].getCount();
+ }
+};
+
+uint32_t VisitorTest::docCount = 10;
+
+CPPUNIT_TEST_SUITE_REGISTRATION(VisitorTest);
+
+void
+VisitorTest::initializeTest(const TestParams& params)
+{
+ LOG(debug, "Initializing test");
+ vdstestlib::DirConfig config(getStandardConfig(true));
+ config.getConfig("stor-visitor").set("visitorthreads", "1");
+ config.getConfig("stor-visitor").set(
+ "iterators_per_bucket",
+ std::to_string(params._iteratorsPerBucket));
+ config.getConfig("stor-visitor").set(
+ "defaultparalleliterators",
+ std::to_string(params._parallelBuckets));
+ config.getConfig("stor-visitor").set(
+ "visitor_memory_usage_limit",
+ std::to_string(params._maxVisitorMemoryUsage));
+
+ system("chmod 755 vdsroot 2>/dev/null");
+ system("rm -rf vdsroot* 2>/dev/null");
+ assert(system("mkdir -p vdsroot/disks/d0") == 0);
+ assert(system("mkdir -p vdsroot/disks/d1") == 0);
+
+ try {
+ _messageSessionFactory.reset(
+ new TestVisitorMessageSessionFactory(config.getConfigId()));
+ if (params._autoReplyError.getCode() != mbus::ErrorCode::NONE) {
+ _messageSessionFactory->_autoReplyError = params._autoReplyError;
+ _messageSessionFactory->_createAutoReplyVisitorSessions = true;
+ }
+ _node.reset(new TestServiceLayerApp(config.getConfigId()));
+ _top.reset(new DummyStorageLink());
+ _top->push_back(std::unique_ptr<StorageLink>(_manager
+ = new VisitorManager(
+ config.getConfigId(),
+ _node->getComponentRegister(), *_messageSessionFactory)));
+ _bottom = new DummyStorageLink();
+ _top->push_back(std::unique_ptr<StorageLink>(_bottom));
+ _manager->setTimeBetweenTicks(10);
+ _top->open();
+ } catch (config::InvalidConfigException& e) {
+ fprintf(stderr, "%s\n", e.what());
+ }
+ std::string content(
+ "To be, or not to be: that is the question:\n"
+ "Whether 'tis nobler in the mind to suffer\n"
+ "The slings and arrows of outrageous fortune,\n"
+ "Or to take arms against a sea of troubles,\n"
+ "And by opposing end them? To die: to sleep;\n"
+ "No more; and by a sleep to say we end\n"
+ "The heart-ache and the thousand natural shocks\n"
+ "That flesh is heir to, 'tis a consummation\n"
+ "Devoutly to be wish'd. To die, to sleep;\n"
+ "To sleep: perchance to dream: ay, there's the rub;\n"
+ "For in that sleep of death what dreams may come\n"
+ "When we have shuffled off this mortal coil,\n"
+ "Must give us pause: there's the respect\n"
+ "That makes calamity of so long life;\n"
+ "For who would bear the whips and scorns of time,\n"
+ "The oppressor's wrong, the proud man's contumely,\n"
+ "The pangs of despised love, the law's delay,\n"
+ "The insolence of office and the spurns\n"
+ "That patient merit of the unworthy takes,\n"
+ "When he himself might his quietus make\n"
+ "With a bare bodkin? who would fardels bear,\n"
+ "To grunt and sweat under a weary life,\n"
+ "But that the dread of something after death,\n"
+ "The undiscover'd country from whose bourn\n"
+ "No traveller returns, puzzles the will\n"
+ "And makes us rather bear those ills we have\n"
+ "Than fly to others that we know not of?\n"
+ "Thus conscience does make cowards of us all;\n"
+ "And thus the native hue of resolution\n"
+ "Is sicklied o'er with the pale cast of thought,\n"
+ "And enterprises of great pith and moment\n"
+ "With this regard their currents turn awry,\n"
+ "And lose the name of action. - Soft you now!\n"
+ "The fair Ophelia! Nymph, in thy orisons\n"
+ "Be all my sins remember'd.\n");
+ _documents.clear();
+ for (uint32_t i=0; i<docCount; ++i) {
+ std::ostringstream uri;
+ uri << "userdoc:test:" << i % 10 << ":http://www.ntnu.no/"
+ << i << ".html";
+
+ _documents.push_back(document::Document::SP(
+ _node->getTestDocMan().createDocument(content, uri.str())));
+ const document::DocumentType& type(_documents.back()->getType());
+ _documents.back()->setValue(type.getField("headerval"),
+ document::IntFieldValue(i % 4));
+ }
+ LOG(debug, "Done initializing test");
+}
+
+void
+VisitorTest::tearDown()
+{
+ if (_top.get() != 0) {
+ _top->close();
+ _top->flush();
+ _top.reset(0);
+ }
+ _node.reset(0);
+ _messageSessionFactory.reset(0);
+ _manager = 0;
+}
+
+bool
+VisitorTest::waitUntilNoActiveVisitors()
+{
+ int i = 0;
+ for (; i < 1000; ++i) {
+ if (_manager->getActiveVisitorCount() == 0) {
+ return true;
+ }
+ std::this_thread::sleep_for(10ms);
+ }
+ return false;
+}
+
+TestVisitorMessageSession&
+VisitorTest::getSession(uint32_t n)
+{
+ // Wait until we have started the visitor
+ const std::vector<TestVisitorMessageSession*>& sessions(
+ _messageSessionFactory->_visitorSessions);
+ framework::defaultimplementation::RealClock clock;
+ framework::MilliSecTime endTime(
+ clock.getTimeInMillis() + framework::MilliSecTime(30 * 1000));
+ while (true) {
+ {
+ vespalib::LockGuard lock(_messageSessionFactory->_accessLock);
+ if (sessions.size() > n) {
+ return *sessions[n];
+ }
+ }
+ if (clock.getTimeInMillis() > endTime) {
+ throw vespalib::IllegalStateException(
+ "Timed out waiting for visitor session", VESPA_STRLOC);
+ }
+ std::this_thread::sleep_for(10ms);
+ }
+ throw std::logic_error("unreachable");
+}
+
+void
+VisitorTest::getMessagesAndReply(
+ int expectedCount,
+ TestVisitorMessageSession& session,
+ std::vector<document::Document::SP >& docs,
+ std::vector<document::DocumentId>& docIds,
+ std::vector<std::string>& infoMessages,
+ api::ReturnCode::Result result)
+{
+ for (int i = 0; i < expectedCount; i++) {
+ session.waitForMessages(1);
+ mbus::Reply::UP reply;
+ {
+ vespalib::MonitorGuard guard(session.getMonitor());
+ CPPUNIT_ASSERT(!session.sentMessages.empty());
+ vespalib::LinkedPtr<documentapi::DocumentMessage> msg(
+ session.sentMessages.front());
+ CPPUNIT_ASSERT(msg->getPriority() < 16);
+
+ switch (msg->getType()) {
+ case documentapi::DocumentProtocol::MESSAGE_PUTDOCUMENT:
+ docs.push_back(
+ static_cast<documentapi::PutDocumentMessage&>(*msg)
+ .getDocument());
+ break;
+ case documentapi::DocumentProtocol::MESSAGE_REMOVEDOCUMENT:
+ docIds.push_back(
+ static_cast<documentapi::RemoveDocumentMessage&>(*msg)
+ .getDocumentId());
+ break;
+ case documentapi::DocumentProtocol::MESSAGE_VISITORINFO:
+ infoMessages.push_back(
+ static_cast<documentapi::VisitorInfoMessage&>(*msg)
+ .getErrorMessage());
+ break;
+ default:
+ break;
+ }
+
+ reply = msg->createReply();
+ reply->swapState(*msg);
+
+ session.sentMessages.pop_front(); // Release linked ptr ref.
+ reply->setMessage(mbus::Message::UP(msg.release()));
+
+ if (result != api::ReturnCode::OK) {
+ reply->addError(mbus::Error(result, "Generic error"));
+ }
+ }
+ session.reply(std::move(reply));
+ }
+}
+
+uint64_t
+VisitorTest::verifyCreateVisitorReply(
+ api::ReturnCode::Result expectedResult,
+ int checkStatsDocsVisited,
+ int checkStatsBytesVisited)
+{
+ _top->waitForMessages(1, 60);
+ const msg_ptr_vector replies = _top->getRepliesOnce();
+ CPPUNIT_ASSERT_EQUAL(1, (int)replies.size());
+
+ std::shared_ptr<api::StorageMessage> msg(replies[0]);
+
+ CPPUNIT_ASSERT_EQUAL(api::MessageType::VISITOR_CREATE_REPLY, msg->getType());
+
+ std::shared_ptr<api::CreateVisitorReply> reply(
+ std::dynamic_pointer_cast<api::CreateVisitorReply>(msg));
+ CPPUNIT_ASSERT(reply.get());
+ CPPUNIT_ASSERT_EQUAL(expectedResult, reply->getResult().getResult());
+
+ if (checkStatsDocsVisited >= 0) {
+ CPPUNIT_ASSERT_EQUAL(checkStatsDocsVisited,
+ int(reply->getVisitorStatistics().getDocumentsVisited()));
+ }
+ if (checkStatsBytesVisited >= 0) {
+ CPPUNIT_ASSERT_EQUAL(checkStatsBytesVisited,
+ int(reply->getVisitorStatistics().getBytesVisited()));
+ }
+
+ return reply->getMsgId();
+}
+
+uint32_t
+VisitorTest::getMatchingDocuments(std::vector<document::Document::SP >& docs) {
+ uint32_t equalCount = 0;
+ for (uint32_t i=0; i<docs.size(); ++i) {
+ for (uint32_t j=0; j<_documents.size(); ++j) {
+ if (*docs[i] == *_documents[j] &&
+ docs[i]->getId() == _documents[j]->getId())
+ {
+ equalCount++;
+ }
+ }
+ }
+
+ return equalCount;
+}
+
+void
+VisitorTest::sendGetIterReply(GetIterCommand& cmd,
+ const api::ReturnCode& result,
+ uint32_t maxDocuments,
+ bool overrideCompleted)
+{
+ GetIterReply::SP reply(new GetIterReply(cmd));
+ if (result.failed()) {
+ reply->setResult(result);
+ _bottom->sendUp(reply);
+ return;
+ }
+ assert(maxDocuments < _documents.size());
+ size_t documentCount = maxDocuments != 0 ? maxDocuments : _documents.size();
+ for (size_t i = 0; i < documentCount; ++i) {
+ reply->getEntries().push_back(
+ spi::DocEntry::LP(
+ new spi::DocEntry(
+ spi::Timestamp(1000 + i),
+ spi::NONE,
+ document::Document::UP(_documents[i]->clone()))));
+ }
+ if (documentCount == _documents.size() || overrideCompleted) {
+ reply->setCompleted();
+ }
+ _bottom->sendUp(reply);
+}
+
+template <typename T>
+std::vector<std::shared_ptr<T> >
+VisitorTest::fetchMultipleCommands(DummyStorageLink& link, size_t count)
+{
+ link.waitForMessages(count, 60);
+ std::vector<api::StorageMessage::SP> msgs(link.getCommandsOnce());
+ std::vector<std::shared_ptr<T> > fetched;
+ if (msgs.size() != count) {
+ std::ostringstream oss;
+ oss << "Expected "
+ << count
+ << " messages, got "
+ << msgs.size()
+ << ":\n";
+ for (size_t i = 0; i < msgs.size(); ++i) {
+ oss << i << ": " << *msgs[i] << "\n";
+ }
+ CPPUNIT_FAIL(oss.str());
+ }
+ for (size_t i = 0; i < count; ++i) {
+ std::shared_ptr<T> ret(std::dynamic_pointer_cast<T>(msgs[i]));
+ if (!ret) {
+ std::ostringstream oss;
+ oss << "Expected message of type "
+ << typeid(T).name()
+ << ", but got "
+ << msgs[0]->toString();
+ CPPUNIT_FAIL(oss.str());
+ }
+ fetched.push_back(ret);
+ }
+ return fetched;
+}
+
+template <typename T>
+std::shared_ptr<T>
+VisitorTest::fetchSingleCommand(DummyStorageLink& link)
+{
+ std::vector<std::shared_ptr<T> > ret(
+ fetchMultipleCommands<T>(link, 1));
+ return ret[0];
+}
+
+std::shared_ptr<api::CreateVisitorCommand>
+VisitorTest::makeCreateVisitor(const VisitorOptions& options)
+{
+ api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 0);
+ std::shared_ptr<api::CreateVisitorCommand> cmd(
+ new api::CreateVisitorCommand(options.visitorType, "testvis", ""));
+ cmd->addBucketToBeVisited(document::BucketId(16, 3));
+ cmd->setAddress(address);
+ cmd->setMaximumPendingReplyCount(UINT32_MAX);
+ cmd->setControlDestination("foo/bar");
+ return cmd;
+}
+
+void
+VisitorTest::sendCreateIteratorReply(uint64_t iteratorId)
+{
+ CreateIteratorCommand::SP createCmd(
+ fetchSingleCommand<CreateIteratorCommand>(*_bottom));
+ spi::IteratorId id(iteratorId);
+ api::StorageReply::SP reply(
+ new CreateIteratorReply(*createCmd, id));
+ _bottom->sendUp(reply);
+}
+
+void
+VisitorTest::testNormalUsage()
+{
+ initializeTest();
+ std::shared_ptr<api::CreateVisitorCommand> cmd(
+ makeCreateVisitor());
+ _top->sendDown(cmd);
+
+ CreateIteratorCommand::SP createCmd(
+ fetchSingleCommand<CreateIteratorCommand>(*_bottom));
+ CPPUNIT_ASSERT_EQUAL(uint8_t(0), createCmd->getPriority()); // Highest pri
+ spi::IteratorId id(1234);
+ api::StorageReply::SP reply(
+ new CreateIteratorReply(*createCmd, id));
+ _bottom->sendUp(reply);
+
+ GetIterCommand::SP getIterCmd(
+ fetchSingleCommand<GetIterCommand>(*_bottom));
+ CPPUNIT_ASSERT_EQUAL(spi::IteratorId(1234),
+ getIterCmd->getIteratorId());
+
+ sendGetIterReply(*getIterCmd);
+
+ std::vector<document::Document::SP> docs;
+ std::vector<document::DocumentId> docIds;
+ std::vector<std::string> infoMessages;
+ getMessagesAndReply(_documents.size(), getSession(0), docs, docIds, infoMessages);
+ CPPUNIT_ASSERT_EQUAL(size_t(0), infoMessages.size());
+ CPPUNIT_ASSERT_EQUAL(size_t(0), docIds.size());
+
+ DestroyIteratorCommand::SP destroyIterCmd(
+ fetchSingleCommand<DestroyIteratorCommand>(*_bottom));
+
+ verifyCreateVisitorReply(api::ReturnCode::OK);
+ CPPUNIT_ASSERT(waitUntilNoActiveVisitors());
+ CPPUNIT_ASSERT_EQUAL(0L, getFailedVisitorDestinationReplyCount());
+}
+
+void
+VisitorTest::testFailedCreateIterator()
+{
+ initializeTest();
+ std::shared_ptr<api::CreateVisitorCommand> cmd(
+ makeCreateVisitor());
+ cmd->addBucketToBeVisited(document::BucketId(16, 4));
+ _top->sendDown(cmd);
+
+ CreateIteratorCommand::SP createCmd(
+ fetchSingleCommand<CreateIteratorCommand>(*_bottom));
+ spi::IteratorId id(0);
+ api::StorageReply::SP reply(
+ new CreateIteratorReply(*createCmd, id));
+ reply->setResult(api::ReturnCode(api::ReturnCode::INTERNAL_FAILURE));
+ _bottom->sendUp(reply);
+
+ verifyCreateVisitorReply(api::ReturnCode::INTERNAL_FAILURE, 0, 0);
+ CPPUNIT_ASSERT(waitUntilNoActiveVisitors());
+}
+
+void
+VisitorTest::testFailedGetIter()
+{
+ initializeTest();
+ std::shared_ptr<api::CreateVisitorCommand> cmd(
+ makeCreateVisitor());
+ _top->sendDown(cmd);
+ sendCreateIteratorReply();
+
+ GetIterCommand::SP getIterCmd(
+ fetchSingleCommand<GetIterCommand>(*_bottom));
+ CPPUNIT_ASSERT_EQUAL(spi::IteratorId(1234),
+ getIterCmd->getIteratorId());
+
+ sendGetIterReply(*getIterCmd,
+ api::ReturnCode(api::ReturnCode::BUCKET_NOT_FOUND));
+
+ DestroyIteratorCommand::SP destroyIterCmd(
+ fetchSingleCommand<DestroyIteratorCommand>(*_bottom));
+
+ verifyCreateVisitorReply(api::ReturnCode::BUCKET_NOT_FOUND, 0, 0);
+ CPPUNIT_ASSERT(waitUntilNoActiveVisitors());
+}
+
+void
+VisitorTest::testMultipleFailedGetIter()
+{
+ initializeTest(TestParams().iteratorsPerBucket(2));
+ std::shared_ptr<api::CreateVisitorCommand> cmd(
+ makeCreateVisitor());
+ _top->sendDown(cmd);
+ sendCreateIteratorReply();
+
+ std::vector<GetIterCommand::SP> getIterCmds(
+ fetchMultipleCommands<GetIterCommand>(*_bottom, 2));
+
+ sendGetIterReply(*getIterCmds[0],
+ api::ReturnCode(api::ReturnCode::BUCKET_NOT_FOUND));
+
+ // Wait for an "appropriate" amount of time so that wrongful logic
+ // will send a DestroyIteratorCommand before all pending GetIters
+ // have been replied to.
+ std::this_thread::sleep_for(100ms);
+
+ CPPUNIT_ASSERT_EQUAL(size_t(0), _bottom->getNumCommands());
+
+ sendGetIterReply(*getIterCmds[1],
+ api::ReturnCode(api::ReturnCode::BUCKET_DELETED));
+
+ DestroyIteratorCommand::SP destroyIterCmd(
+ fetchSingleCommand<DestroyIteratorCommand>(*_bottom));
+
+ verifyCreateVisitorReply(api::ReturnCode::BUCKET_DELETED, 0, 0);
+ CPPUNIT_ASSERT(waitUntilNoActiveVisitors());
+}
+
+void
+VisitorTest::testDocumentAPIClientError()
+{
+ initializeTest();
+ std::shared_ptr<api::CreateVisitorCommand> cmd(
+ makeCreateVisitor());
+ _top->sendDown(cmd);
+ sendCreateIteratorReply();
+
+ {
+ GetIterCommand::SP getIterCmd(
+ fetchSingleCommand<GetIterCommand>(*_bottom));
+ CPPUNIT_ASSERT_EQUAL(spi::IteratorId(1234),
+ getIterCmd->getIteratorId());
+
+ sendGetIterReply(*getIterCmd, api::ReturnCode(api::ReturnCode::OK), 1);
+ }
+
+ std::vector<document::Document::SP> docs;
+ std::vector<document::DocumentId> docIds;
+ std::vector<std::string> infoMessages;
+ getMessagesAndReply(1, getSession(0), docs, docIds, infoMessages,
+ api::ReturnCode::INTERNAL_FAILURE);
+ // INTERNAL_FAILURE is critical, so no visitor info sent
+ CPPUNIT_ASSERT_EQUAL(size_t(0), infoMessages.size());
+
+ std::this_thread::sleep_for(100ms);
+
+ {
+ GetIterCommand::SP getIterCmd(
+ fetchSingleCommand<GetIterCommand>(*_bottom));
+ CPPUNIT_ASSERT_EQUAL(spi::IteratorId(1234),
+ getIterCmd->getIteratorId());
+
+ sendGetIterReply(*getIterCmd);
+ }
+
+ DestroyIteratorCommand::SP destroyIterCmd(
+ fetchSingleCommand<DestroyIteratorCommand>(*_bottom));
+
+ verifyCreateVisitorReply(api::ReturnCode::INTERNAL_FAILURE);
+ CPPUNIT_ASSERT(waitUntilNoActiveVisitors());
+}
+
+void
+VisitorTest::testNoDocumentAPIResendingForFailedVisitor()
+{
+ initializeTest();
+ std::shared_ptr<api::CreateVisitorCommand> cmd(
+ makeCreateVisitor());
+ _top->sendDown(cmd);
+ sendCreateIteratorReply();
+
+ {
+ GetIterCommand::SP getIterCmd(
+ fetchSingleCommand<GetIterCommand>(*_bottom));
+ CPPUNIT_ASSERT_EQUAL(spi::IteratorId(1234),
+ getIterCmd->getIteratorId());
+
+ sendGetIterReply(*getIterCmd, api::ReturnCode(api::ReturnCode::OK), 2, true);
+ }
+
+ std::vector<document::Document::SP> docs;
+ std::vector<document::DocumentId> docIds;
+ std::vector<std::string> infoMessages;
+ // Use non-critical result. Visitor info message should be received
+ // after we send a NOT_CONNECTED reply. Failing this message as well
+ // should cause the entire visitor to fail.
+ getMessagesAndReply(3, getSession(0), docs, docIds, infoMessages,
+ api::ReturnCode::NOT_CONNECTED);
+ CPPUNIT_ASSERT_EQUAL(size_t(1), infoMessages.size());
+ CPPUNIT_ASSERT_EQUAL(
+ std::string("[From content node 0] NOT_CONNECTED: Generic error"),
+ infoMessages[0]);
+
+ DestroyIteratorCommand::SP destroyIterCmd(
+ fetchSingleCommand<DestroyIteratorCommand>(*_bottom));
+
+ verifyCreateVisitorReply(api::ReturnCode::NOT_CONNECTED);
+ CPPUNIT_ASSERT(waitUntilNoActiveVisitors());
+ CPPUNIT_ASSERT_EQUAL(3L, getFailedVisitorDestinationReplyCount());
+}
+
+void
+VisitorTest::testIteratorCreatedForFailedVisitor()
+{
+ initializeTest(TestParams().iteratorsPerBucket(1).parallelBuckets(2));
+ std::shared_ptr<api::CreateVisitorCommand> cmd(
+ makeCreateVisitor());
+ cmd->addBucketToBeVisited(document::BucketId(16, 4));
+ _top->sendDown(cmd);
+
+ std::vector<CreateIteratorCommand::SP> createCmds(
+ fetchMultipleCommands<CreateIteratorCommand>(*_bottom, 2));
+ {
+ spi::IteratorId id(0);
+ api::StorageReply::SP reply(
+ new CreateIteratorReply(*createCmds[0], id));
+ reply->setResult(api::ReturnCode(api::ReturnCode::INTERNAL_FAILURE));
+ _bottom->sendUp(reply);
+ }
+ {
+ spi::IteratorId id(1234);
+ api::StorageReply::SP reply(
+ new CreateIteratorReply(*createCmds[1], id));
+ _bottom->sendUp(reply);
+ }
+ // Want to immediately receive destroyiterator for newly created
+ // iterator, since we cannot use it anyway when the visitor has failed.
+ DestroyIteratorCommand::SP destroyCmd(
+ fetchSingleCommand<DestroyIteratorCommand>(*_bottom));
+
+ verifyCreateVisitorReply(api::ReturnCode::INTERNAL_FAILURE, 0, 0);
+ CPPUNIT_ASSERT(waitUntilNoActiveVisitors());
+}
+
+/**
+ * Test that if a visitor fails to send a document API message outright
+ * (i.e. a case where it will never get a reply), the session is failed
+ * and the visitor terminates cleanly without counting the failed message
+ * as pending.
+ */
+void
+VisitorTest::testFailedDocumentAPISend()
+{
+ initializeTest(TestParams().autoReplyError(
+ mbus::Error(mbus::ErrorCode::HANDSHAKE_FAILED,
+ "abandon ship!")));
+ std::shared_ptr<api::CreateVisitorCommand> cmd(
+ makeCreateVisitor());
+ cmd->addBucketToBeVisited(document::BucketId(16, 4));
+ _top->sendDown(cmd);
+
+ sendCreateIteratorReply();
+ GetIterCommand::SP getIterCmd(
+ fetchSingleCommand<GetIterCommand>(*_bottom));
+ CPPUNIT_ASSERT_EQUAL(spi::IteratorId(1234),
+ getIterCmd->getIteratorId());
+ sendGetIterReply(*getIterCmd,
+ api::ReturnCode(api::ReturnCode::OK),
+ 2,
+ true);
+
+ DestroyIteratorCommand::SP destroyIterCmd(
+ fetchSingleCommand<DestroyIteratorCommand>(*_bottom));
+
+ verifyCreateVisitorReply(
+ static_cast<api::ReturnCode::Result>(
+ mbus::ErrorCode::HANDSHAKE_FAILED),
+ 0,
+ 0);
+ CPPUNIT_ASSERT(waitUntilNoActiveVisitors());
+ // We currently don't count failures to send in this metric; send failures
+ // indicate a message bus problem and already log a warning when they happen
+ CPPUNIT_ASSERT_EQUAL(0L, getFailedVisitorDestinationReplyCount());
+}
+
+void
+VisitorTest::sendInitialCreateVisitorAndGetIterRound()
+{
+ std::shared_ptr<api::CreateVisitorCommand> cmd(
+ makeCreateVisitor());
+ _top->sendDown(cmd);
+ sendCreateIteratorReply();
+
+ {
+ GetIterCommand::SP getIterCmd(
+ fetchSingleCommand<GetIterCommand>(*_bottom));
+ sendGetIterReply(*getIterCmd, api::ReturnCode(api::ReturnCode::OK),
+ 1, true);
+ }
+}
+
+void
+VisitorTest::testNoVisitorNotificationForTransientFailures()
+{
+ initializeTest();
+ sendInitialCreateVisitorAndGetIterRound();
+
+ std::vector<document::Document::SP> docs;
+ std::vector<document::DocumentId> docIds;
+ std::vector<std::string> infoMessages;
+ // Have to make sure time increases in visitor thread so that resend
+ // times are reached.
+ _node->getClock().setFakeCycleMode();
+ // Should not get info message for BUCKET_DELETED, but resend of Put.
+ getMessagesAndReply(1, getSession(0), docs, docIds, infoMessages,
+ api::ReturnCode::BUCKET_DELETED);
+ CPPUNIT_ASSERT_EQUAL(size_t(0), infoMessages.size());
+ // Should not get info message for BUCKET_NOT_FOUND, but resend of Put.
+ getMessagesAndReply(1, getSession(0), docs, docIds, infoMessages,
+ api::ReturnCode::BUCKET_NOT_FOUND);
+ CPPUNIT_ASSERT_EQUAL(size_t(0), infoMessages.size());
+ // MessageBus error codes guaranteed to fit in return code result.
+ // Should not get info message for SESSION_BUSY, but resend of Put.
+ getMessagesAndReply(1, getSession(0), docs, docIds, infoMessages,
+ static_cast<api::ReturnCode::Result>(
+ mbus::ErrorCode::SESSION_BUSY));
+ CPPUNIT_ASSERT_EQUAL(size_t(0), infoMessages.size());
+ // WRONG_DISTRIBUTION should not be reported, as it will happen all the
+ // time when initiating remote migrations et al.
+ getMessagesAndReply(1, getSession(0), docs, docIds, infoMessages,
+ api::ReturnCode::WRONG_DISTRIBUTION);
+ CPPUNIT_ASSERT_EQUAL(size_t(0), infoMessages.size());
+
+ // Complete message successfully to finish the visitor.
+ getMessagesAndReply(1, getSession(0), docs, docIds, infoMessages,
+ api::ReturnCode::OK);
+ CPPUNIT_ASSERT_EQUAL(size_t(0), infoMessages.size());
+
+ fetchSingleCommand<DestroyIteratorCommand>(*_bottom);
+
+ verifyCreateVisitorReply(api::ReturnCode::OK);
+ CPPUNIT_ASSERT(waitUntilNoActiveVisitors());
+}
+
+void
+VisitorTest::testNotificationSentIfTransientErrorRetriedManyTimes()
+{
+ constexpr size_t retries(
+ Visitor::TRANSIENT_ERROR_RETRIES_BEFORE_NOTIFY);
+
+ initializeTest();
+ sendInitialCreateVisitorAndGetIterRound();
+
+ std::vector<document::Document::SP> docs;
+ std::vector<document::DocumentId> docIds;
+ std::vector<std::string> infoMessages;
+ // Have to make sure time increases in visitor thread so that resend
+ // times are reached.
+ _node->getClock().setFakeCycleMode();
+ for (size_t attempt = 0; attempt < retries; ++attempt) {
+ getMessagesAndReply(1, getSession(0), docs, docIds, infoMessages,
+ api::ReturnCode::WRONG_DISTRIBUTION);
+ CPPUNIT_ASSERT_EQUAL(size_t(0), infoMessages.size());
+ }
+ // Should now have a client notification along for the ride.
+ // This has to be ACKed as OK or the visitor will fail.
+ getMessagesAndReply(2, getSession(0), docs, docIds, infoMessages,
+ api::ReturnCode::OK);
+ CPPUNIT_ASSERT_EQUAL(size_t(1), infoMessages.size());
+ // TODO(vekterli) ideally we'd want to test that this happens only once
+ // per message, but this seems frustratingly complex to do currently.
+ fetchSingleCommand<DestroyIteratorCommand>(*_bottom);
+
+ verifyCreateVisitorReply(api::ReturnCode::OK);
+ CPPUNIT_ASSERT(waitUntilNoActiveVisitors());
+}
+
+std::shared_ptr<api::CreateVisitorReply>
+VisitorTest::doCompleteVisitingSession(
+ const std::shared_ptr<api::CreateVisitorCommand>& cmd)
+{
+ initializeTest();
+ _top->sendDown(cmd);
+ sendCreateIteratorReply();
+
+ GetIterCommand::SP getIterCmd(
+ fetchSingleCommand<GetIterCommand>(*_bottom));
+ sendGetIterReply(*getIterCmd,
+ api::ReturnCode(api::ReturnCode::OK),
+ 1,
+ true);
+
+ std::vector<document::Document::SP> docs;
+ std::vector<document::DocumentId> docIds;
+ std::vector<std::string> infoMessages;
+ getMessagesAndReply(1, getSession(0), docs, docIds, infoMessages);
+
+ DestroyIteratorCommand::SP destroyIterCmd(
+ fetchSingleCommand<DestroyIteratorCommand>(*_bottom));
+
+ _top->waitForMessages(1, 60);
+ const msg_ptr_vector replies = _top->getRepliesOnce();
+ CPPUNIT_ASSERT_EQUAL(size_t(1), replies.size());
+
+ std::shared_ptr<api::StorageMessage> msg(replies[0]);
+
+ CPPUNIT_ASSERT_EQUAL(api::MessageType::VISITOR_CREATE_REPLY,
+ msg->getType());
+ return std::dynamic_pointer_cast<api::CreateVisitorReply>(msg);
+}
+
+void
+VisitorTest::testNoMbusTracingIfTraceLevelIsZero()
+{
+ std::shared_ptr<api::CreateVisitorCommand> cmd(makeCreateVisitor());
+ cmd->getTrace().setLevel(0);
+ auto reply = doCompleteVisitingSession(cmd);
+ CPPUNIT_ASSERT(reply->getTrace().getRoot().isEmpty());
+}
+
+void
+VisitorTest::testReplyContainsTraceIfTraceLevelAboveZero()
+{
+ std::shared_ptr<api::CreateVisitorCommand> cmd(makeCreateVisitor());
+ cmd->getTrace().setLevel(1);
+ auto reply = doCompleteVisitingSession(cmd);
+ CPPUNIT_ASSERT(!reply->getTrace().getRoot().isEmpty());
+}
+
+void
+VisitorTest::testNoMoreIteratorsSentWhileMemoryUsedAboveLimit()
+{
+ initializeTest(TestParams().maxVisitorMemoryUsage(1)
+ .parallelBuckets(1)
+ .iteratorsPerBucket(1));
+ std::shared_ptr<api::CreateVisitorCommand> cmd(
+ makeCreateVisitor());
+ _top->sendDown(cmd);
+ sendCreateIteratorReply();
+
+ GetIterCommand::SP getIterCmd(
+ fetchSingleCommand<GetIterCommand>(*_bottom));
+ sendGetIterReply(*getIterCmd,
+ api::ReturnCode(api::ReturnCode::OK),
+ 1);
+
+ // Pending Document API message towards client; memory usage should prevent
+ // visitor from sending down additional GetIter messages until the pending
+ // client message has been replied to and cleared from the internal state.
+ getSession(0).waitForMessages(1);
+ // Note that it's possible for this test to exhibit false negatives (but not
+ // false positives) since the _absence_ of a message means we don't have any
+ // kind of explicit barrier with which we can synchronize the test and the
+ // running visitor thread.
+ std::this_thread::sleep_for(100ms);
+ CPPUNIT_ASSERT_EQUAL(size_t(0), _bottom->getNumCommands());
+
+ std::vector<document::Document::SP> docs;
+ std::vector<document::DocumentId> docIds;
+ std::vector<std::string> infoMessages;
+ getMessagesAndReply(1, getSession(0), docs, docIds, infoMessages);
+
+ // 2nd round of GetIter now allowed. Send reply indicating completion.
+ getIterCmd = fetchSingleCommand<GetIterCommand>(*_bottom);
+ sendGetIterReply(*getIterCmd,
+ api::ReturnCode(api::ReturnCode::OK),
+ 1,
+ true);
+
+ getMessagesAndReply(1, getSession(0), docs, docIds, infoMessages);
+
+ DestroyIteratorCommand::SP destroyIterCmd(
+ fetchSingleCommand<DestroyIteratorCommand>(*_bottom));
+
+ verifyCreateVisitorReply(api::ReturnCode::OK);
+ CPPUNIT_ASSERT(waitUntilNoActiveVisitors());
+}
+
+void
+VisitorTest::doTestVisitorInstanceHasConsistencyLevel(
+ vespalib::stringref visitorType,
+ spi::ReadConsistency expectedConsistency)
+{
+ initializeTest();
+ std::shared_ptr<api::CreateVisitorCommand> cmd(
+ makeCreateVisitor(VisitorOptions().withVisitorType(visitorType)));
+ _top->sendDown(cmd);
+
+ auto createCmd = fetchSingleCommand<CreateIteratorCommand>(*_bottom);
+ CPPUNIT_ASSERT_EQUAL(expectedConsistency,
+ createCmd->getReadConsistency());
+}
+
+void
+VisitorTest::testDumpVisitorInvokesStrongReadConsistencyIteration()
+{
+ doTestVisitorInstanceHasConsistencyLevel(
+ "dumpvisitor", spi::ReadConsistency::STRONG);
+}
+
+// NOTE: SearchVisitor cannot be tested here since it's in a separate module
+// which depends on _this_ module for compilation. Instead we let TestVisitor
+// use weak consistency, as this is just some internal stuff not used for/by
+// any external client use cases. Our primary concern is to test that each
+// visitor subclass might report its own read consistency requirement and that
+// this is carried along to the CreateIteratorCommand.
+void
+VisitorTest::testTestVisitorInvokesWeakReadConsistencyIteration()
+{
+ doTestVisitorInstanceHasConsistencyLevel(
+ "testvisitor", spi::ReadConsistency::WEAK);
+}
+
+} // namespace storage