summaryrefslogtreecommitdiffstats
path: root/storage
diff options
context:
space:
mode:
authorTor Brede Vekterli <vekterli@verizonmedia.com>2019-06-12 11:10:34 +0000
committerTor Brede Vekterli <vekterli@verizonmedia.com>2019-06-12 13:36:48 +0000
commit42962a7e289842579849c47b46ee406061397c0e (patch)
tree42ff76361ae629b48c659ec739bcd7bf5dbca61a /storage
parentf190b82bc3eb6ab18852977b4277b9993d68417f (diff)
Convert persistence tests from CppUnit to GTest
Diffstat (limited to 'storage')
-rw-r--r--storage/src/tests/CMakeLists.txt2
-rw-r--r--storage/src/tests/persistence/CMakeLists.txt13
-rw-r--r--storage/src/tests/persistence/common/CMakeLists.txt1
-rw-r--r--storage/src/tests/persistence/common/filestortestfixture.cpp8
-rw-r--r--storage/src/tests/persistence/common/filestortestfixture.h19
-rw-r--r--storage/src/tests/persistence/diskmoveoperationhandlertest.cpp27
-rw-r--r--storage/src/tests/persistence/filestorage/CMakeLists.txt11
-rw-r--r--storage/src/tests/persistence/filestorage/deactivatebucketstest.cpp25
-rw-r--r--storage/src/tests/persistence/filestorage/deletebuckettest.cpp26
-rw-r--r--storage/src/tests/persistence/filestorage/filestormanagertest.cpp1531
-rw-r--r--storage/src/tests/persistence/filestorage/filestormodifiedbucketstest.cpp51
-rw-r--r--storage/src/tests/persistence/filestorage/mergeblockingtest.cpp160
-rw-r--r--storage/src/tests/persistence/filestorage/modifiedbucketcheckertest.cpp118
-rw-r--r--storage/src/tests/persistence/filestorage/operationabortingtest.cpp172
-rw-r--r--storage/src/tests/persistence/filestorage/sanitycheckeddeletetest.cpp34
-rw-r--r--storage/src/tests/persistence/filestorage/singlebucketjointest.cpp21
-rw-r--r--storage/src/tests/persistence/mergehandlertest.cpp773
-rw-r--r--storage/src/tests/persistence/persistencequeuetest.cpp82
-rw-r--r--storage/src/tests/persistence/persistencetestutils.cpp9
-rw-r--r--storage/src/tests/persistence/persistencetestutils.h7
-rw-r--r--storage/src/tests/persistence/persistencethread_splittest.cpp121
-rw-r--r--storage/src/tests/persistence/processalltest.cpp148
-rw-r--r--storage/src/tests/persistence/provider_error_wrapper_test.cpp61
-rw-r--r--storage/src/tests/persistence/splitbitdetectortest.cpp254
-rw-r--r--storage/src/tests/persistence/testandsettest.cpp153
25 files changed, 1254 insertions, 2573 deletions
diff --git a/storage/src/tests/CMakeLists.txt b/storage/src/tests/CMakeLists.txt
index 0e0a46f969e..53113ea0eb1 100644
--- a/storage/src/tests/CMakeLists.txt
+++ b/storage/src/tests/CMakeLists.txt
@@ -11,8 +11,6 @@ vespa_add_executable(storage_testrunner_app TEST
storage_testcommon
storage_testhostreporter
storage_testdistributor
- storage_testpersistence
- storage_testfilestorage
)
vespa_add_test(
diff --git a/storage/src/tests/persistence/CMakeLists.txt b/storage/src/tests/persistence/CMakeLists.txt
index e71cf10962a..76361e1d419 100644
--- a/storage/src/tests/persistence/CMakeLists.txt
+++ b/storage/src/tests/persistence/CMakeLists.txt
@@ -1,6 +1,8 @@
# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-vespa_add_library(storage_testpersistence TEST
+
+vespa_add_executable(storage_persistence_gtest_runner_app TEST
SOURCES
+ bucketownershipnotifiertest.cpp
diskmoveoperationhandlertest.cpp
mergehandlertest.cpp
persistencequeuetest.cpp
@@ -10,15 +12,6 @@ vespa_add_library(storage_testpersistence TEST
provider_error_wrapper_test.cpp
splitbitdetectortest.cpp
testandsettest.cpp
- DEPENDS
- storage
- storage_testdistributor
- storage_testpersistence_common
-)
-
-vespa_add_executable(storage_persistence_gtest_runner_app TEST
- SOURCES
- bucketownershipnotifiertest.cpp
gtest_runner.cpp
DEPENDS
storage
diff --git a/storage/src/tests/persistence/common/CMakeLists.txt b/storage/src/tests/persistence/common/CMakeLists.txt
index 7910336c141..53ec3fd7c0c 100644
--- a/storage/src/tests/persistence/common/CMakeLists.txt
+++ b/storage/src/tests/persistence/common/CMakeLists.txt
@@ -4,6 +4,7 @@ vespa_add_library(storage_testpersistence_common TEST
filestortestfixture.cpp
persistenceproviderwrapper.cpp
DEPENDS
+ gtest
persistence
storage_testcommon
)
diff --git a/storage/src/tests/persistence/common/filestortestfixture.cpp b/storage/src/tests/persistence/common/filestortestfixture.cpp
index 835b8ef1044..63b7885fc53 100644
--- a/storage/src/tests/persistence/common/filestortestfixture.cpp
+++ b/storage/src/tests/persistence/common/filestortestfixture.cpp
@@ -34,7 +34,7 @@ FileStorTestFixture::setupPersistenceThreads(uint32_t threads)
// Default provider setup which should work out of the box for most tests.
void
-FileStorTestFixture::setUp()
+FileStorTestFixture::SetUp()
{
setupPersistenceThreads(1);
_node->setPersistenceProvider(
@@ -42,7 +42,7 @@ FileStorTestFixture::setUp()
}
void
-FileStorTestFixture::tearDown()
+FileStorTestFixture::TearDown()
{
_node.reset();
}
@@ -73,10 +73,8 @@ FileStorTestFixture::bucketExistsInDb(const document::BucketId& bucket) const
FileStorTestFixture::TestFileStorComponents::TestFileStorComponents(
FileStorTestFixture& fixture,
- const char* testName,
const StorageLinkInjector& injector)
- : _testName(testName),
- _fixture(fixture),
+ : _fixture(fixture),
manager(new FileStorManager(fixture._config->getConfigId(),
fixture._node->getPartitions(),
fixture._node->getPersistenceProvider(),
diff --git a/storage/src/tests/persistence/common/filestortestfixture.h b/storage/src/tests/persistence/common/filestortestfixture.h
index c46f9de24fc..a8c32a409ec 100644
--- a/storage/src/tests/persistence/common/filestortestfixture.h
+++ b/storage/src/tests/persistence/common/filestortestfixture.h
@@ -9,10 +9,11 @@
#include <tests/common/dummystoragelink.h>
#include <tests/common/teststorageapp.h>
#include <tests/common/testhelper.h>
+#include <vespa/vespalib/gtest/gtest.h>
namespace storage {
-class FileStorTestFixture : public CppUnit::TestFixture
+class FileStorTestFixture : public ::testing::Test
{
public:
static spi::LoadType defaultLoadType;
@@ -26,8 +27,8 @@ public:
typedef uint32_t DocumentIndex;
typedef uint64_t PutTimestamp;
- void setUp() override;
- void tearDown() override;
+ void SetUp() override;
+ void TearDown() override;
void setupPersistenceThreads(uint32_t diskCount);
void createBucket(const document::BucketId& bid);
bool bucketExistsInDb(const document::BucketId& bucket) const;
@@ -53,7 +54,7 @@ public:
void
expectNoReplies(DummyStorageLink& link) {
- CPPUNIT_ASSERT_EQUAL(size_t(0), link.getNumReplies());
+ EXPECT_EQ(0, link.getNumReplies());
}
template <typename ReplyType>
@@ -65,12 +66,10 @@ public:
api::StorageReply* reply(
dynamic_cast<ReplyType*>(link.getReply(0).get()));
if (reply == 0) {
- std::ostringstream ss;
- ss << "got unexpected reply "
- << link.getReply(0)->toString(true);
- CPPUNIT_FAIL(ss.str());
+ FAIL() << "got unexpected reply "
+ << link.getReply(0)->toString(true);
}
- CPPUNIT_ASSERT_EQUAL(result, reply->getResult().getResult());
+ EXPECT_EQ(result, reply->getResult().getResult());
}
template <typename ReplyType>
@@ -89,14 +88,12 @@ public:
struct TestFileStorComponents
{
private:
- TestName _testName;
FileStorTestFixture& _fixture;
public:
DummyStorageLink top;
FileStorManager* manager;
TestFileStorComponents(FileStorTestFixture& fixture,
- const char* testName,
const StorageLinkInjector& i = NoOpStorageLinkInjector());
void sendDummyGet(const document::BucketId& bid);
diff --git a/storage/src/tests/persistence/diskmoveoperationhandlertest.cpp b/storage/src/tests/persistence/diskmoveoperationhandlertest.cpp
index def9dd6ec6e..0dd3285e5f3 100644
--- a/storage/src/tests/persistence/diskmoveoperationhandlertest.cpp
+++ b/storage/src/tests/persistence/diskmoveoperationhandlertest.cpp
@@ -1,34 +1,18 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include <vespa/storage/persistence/diskmoveoperationhandler.h>
-#include <vespa/vdstestlib/cppunit/macros.h>
#include <vespa/storage/persistence/messages.h>
#include <tests/persistence/persistencetestutils.h>
#include <vespa/document/test/make_document_bucket.h>
using document::test::makeDocumentBucket;
+using namespace ::testing;
namespace storage {
-class DiskMoveOperationHandlerTest : public PersistenceTestUtils
-{
- CPPUNIT_TEST_SUITE(DiskMoveOperationHandlerTest);
- CPPUNIT_TEST(testSimple);
- CPPUNIT_TEST_SUITE_END();
+struct DiskMoveOperationHandlerTest : PersistenceTestUtils {};
-public:
- void testSimple();
- void testTargetExists();
- void testTargetWithOverlap();
-
- void insertDocumentInBucket(uint64_t location, uint64_t timestamp, document::BucketId bucket);
-};
-
-CPPUNIT_TEST_SUITE_REGISTRATION(DiskMoveOperationHandlerTest);
-
-void
-DiskMoveOperationHandlerTest::testSimple()
-{
+TEST_F(DiskMoveOperationHandlerTest, simple) {
setupDisks(10);
// Create bucket 16, 4 on disk 3.
@@ -51,9 +35,8 @@ DiskMoveOperationHandlerTest::testSimple()
spi::Context context(documentapi::LoadType::DEFAULT, 0, 0);
diskMoveHandler.handleBucketDiskMove(move, context);
- CPPUNIT_ASSERT_EQUAL(
- std::string("BucketId(0x4000000000000004): 10,4"),
- getBucketStatus(document::BucketId(16,4)));
+ EXPECT_EQ("BucketId(0x4000000000000004): 10,4",
+ getBucketStatus(document::BucketId(16,4)));
}
}
diff --git a/storage/src/tests/persistence/filestorage/CMakeLists.txt b/storage/src/tests/persistence/filestorage/CMakeLists.txt
index 3827b6ac319..5209bcce73d 100644
--- a/storage/src/tests/persistence/filestorage/CMakeLists.txt
+++ b/storage/src/tests/persistence/filestorage/CMakeLists.txt
@@ -1,7 +1,6 @@
# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-# TODO: Remove test library when all tests have been migrated to gtest.
-vespa_add_library(storage_testfilestorage TEST
+vespa_add_executable(storage_filestorage_gtest_runner_app TEST
SOURCES
deactivatebucketstest.cpp
deletebuckettest.cpp
@@ -12,14 +11,6 @@ vespa_add_library(storage_testfilestorage TEST
operationabortingtest.cpp
sanitycheckeddeletetest.cpp
singlebucketjointest.cpp
- DEPENDS
- storage
- storageapi
- storage_testpersistence_common
-)
-
-vespa_add_executable(storage_filestorage_gtest_runner_app TEST
- SOURCES
gtest_runner.cpp
DEPENDS
storage
diff --git a/storage/src/tests/persistence/filestorage/deactivatebucketstest.cpp b/storage/src/tests/persistence/filestorage/deactivatebucketstest.cpp
index f9375790ebb..18f8a235453 100644
--- a/storage/src/tests/persistence/filestorage/deactivatebucketstest.cpp
+++ b/storage/src/tests/persistence/filestorage/deactivatebucketstest.cpp
@@ -1,6 +1,5 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/vdstestlib/cppunit/macros.h>
#include <vespa/storageapi/message/bucket.h>
#include <vespa/storageapi/message/state.h>
#include <vespa/persistence/spi/test.h>
@@ -9,35 +8,25 @@
#include <tests/persistence/common/filestortestfixture.h>
using storage::spi::test::makeSpiBucket;
+using namespace ::testing;
namespace storage {
-class DeactivateBucketsTest : public FileStorTestFixture
-{
+struct DeactivateBucketsTest : FileStorTestFixture {
bool isActive(const document::BucketId&) const;
-public:
- void bucketsInDatabaseDeactivatedWhenNodeDownInClusterState();
-
- CPPUNIT_TEST_SUITE(DeactivateBucketsTest);
- CPPUNIT_TEST(bucketsInDatabaseDeactivatedWhenNodeDownInClusterState);
- CPPUNIT_TEST_SUITE_END();
};
-CPPUNIT_TEST_SUITE_REGISTRATION(DeactivateBucketsTest);
-
bool
DeactivateBucketsTest::isActive(const document::BucketId& bucket) const
{
StorBucketDatabase::WrappedEntry entry(
_node->getStorageBucketDatabase().get(bucket, "foo"));
- CPPUNIT_ASSERT(entry.exist());
+ assert(entry.exist());
return entry->info.isActive();
}
-void
-DeactivateBucketsTest::bucketsInDatabaseDeactivatedWhenNodeDownInClusterState()
-{
- TestFileStorComponents c(*this, "bucketsInDatabaseDeactivatedWhenNodeDownInClusterState");
+TEST_F(DeactivateBucketsTest, buckets_in_database_deactivated_when_node_down_in_cluster_state) {
+ TestFileStorComponents c(*this);
// Must set state to up first, or down-edge case won't trigger.
std::string upState("storage:2 distributor:2");
_node->getStateUpdater().setClusterState(
@@ -55,13 +44,13 @@ DeactivateBucketsTest::bucketsInDatabaseDeactivatedWhenNodeDownInClusterState()
entry->info = serviceLayerInfo;
entry.write();
}
- CPPUNIT_ASSERT(isActive(bucket));
+ EXPECT_TRUE(isActive(bucket));
std::string downState("storage:2 .1.s:d distributor:2");
_node->getStateUpdater().setClusterState(
lib::ClusterState::CSP(new lib::ClusterState(downState)));
// Buckets should have been deactivated in content layer
- CPPUNIT_ASSERT(!isActive(bucket));
+ EXPECT_FALSE(isActive(bucket));
}
} // namespace storage
diff --git a/storage/src/tests/persistence/filestorage/deletebuckettest.cpp b/storage/src/tests/persistence/filestorage/deletebuckettest.cpp
index ec3e02e85b8..81c9525b78f 100644
--- a/storage/src/tests/persistence/filestorage/deletebuckettest.cpp
+++ b/storage/src/tests/persistence/filestorage/deletebuckettest.cpp
@@ -1,7 +1,6 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include <vespa/log/log.h>
-#include <vespa/vdstestlib/cppunit/macros.h>
#include <vespa/storageapi/message/bucket.h>
#include <tests/persistence/common/persistenceproviderwrapper.h>
#include <vespa/document/test/make_document_bucket.h>
@@ -14,26 +13,15 @@ using document::test::makeDocumentBucket;
namespace storage {
-class DeleteBucketTest : public FileStorTestFixture
-{
-public:
- void testDeleteAbortsOperationsForBucket();
-
- CPPUNIT_TEST_SUITE(DeleteBucketTest);
- CPPUNIT_TEST(testDeleteAbortsOperationsForBucket);
- CPPUNIT_TEST_SUITE_END();
+struct DeleteBucketTest : FileStorTestFixture {
};
-CPPUNIT_TEST_SUITE_REGISTRATION(DeleteBucketTest);
-
-void
-DeleteBucketTest::testDeleteAbortsOperationsForBucket()
-{
- TestFileStorComponents c(*this, "testDeleteAbortsOperationsForBucket");
+TEST_F(DeleteBucketTest, delete_aborts_operations_for_bucket) {
+ TestFileStorComponents c(*this);
document::BucketId bucket(16, 1);
createBucket(bucket);
- LOG(info, "TEST STAGE: taking resume guard");
+ LOG(debug, "TEST STAGE: taking resume guard");
{
ResumeGuard rg(c.manager->getFileStorHandler().pause());
// First put may or may not be queued, since pausing might race with
@@ -51,7 +39,7 @@ DeleteBucketTest::testDeleteAbortsOperationsForBucket()
// with having to check that _at least_ 1 reply had BUCKET_DELETED. Joy!
c.top.waitForMessages(2, 60 * 2);
std::vector <api::StorageMessage::SP> msgs(c.top.getRepliesOnce());
- CPPUNIT_ASSERT_EQUAL(size_t(2), msgs.size());
+ ASSERT_EQ(2, msgs.size());
int numDeleted = 0;
for (uint32_t i = 0; i < 2; ++i) {
api::StorageReply& reply(dynamic_cast<api::StorageReply&>(*msgs[i]));
@@ -59,8 +47,8 @@ DeleteBucketTest::testDeleteAbortsOperationsForBucket()
++numDeleted;
}
}
- CPPUNIT_ASSERT(numDeleted >= 1);
- LOG(info, "TEST STAGE: done, releasing resume guard");
+ ASSERT_GE(numDeleted, 1);
+ LOG(debug, "TEST STAGE: done, releasing resume guard");
}
// Ensure we don't shut down persistence threads before DeleteBucket op has completed
c.top.waitForMessages(1, 60*2);
diff --git a/storage/src/tests/persistence/filestorage/filestormanagertest.cpp b/storage/src/tests/persistence/filestorage/filestormanagertest.cpp
index 3888b7503e2..f6b8fc3b3f0 100644
--- a/storage/src/tests/persistence/filestorage/filestormanagertest.cpp
+++ b/storage/src/tests/persistence/filestorage/filestormanagertest.cpp
@@ -1,6 +1,6 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <tests/common/testhelper.h>
+#include <tests/common/testhelper.h> // FIXME
#include <tests/common/dummystoragelink.h>
#include <tests/common/teststorageapp.h>
#include <tests/persistence/filestorage/forwardingmessagesender.h>
@@ -20,6 +20,7 @@
#include <vespa/persistence/spi/test.h>
#include <vespa/config/common/exceptions.h>
#include <vespa/fastos/file.h>
+#include <vespa/vespalib/gtest/gtest.h>
#include <atomic>
#include <vespa/log/log.h>
@@ -30,20 +31,21 @@ using document::Document;
using namespace storage::api;
using storage::spi::test::makeSpiBucket;
using document::test::makeDocumentBucket;
+using namespace ::testing;
#define ASSERT_SINGLE_REPLY(replytype, reply, link, time) \
-reply = 0; \
+reply = nullptr; \
try{ \
link.waitForMessages(1, time); \
- CPPUNIT_ASSERT_EQUAL((size_t)1, link.getNumReplies()); \
+ ASSERT_EQ(1, link.getNumReplies()); \
reply = dynamic_cast<replytype*>(link.getReply(0).get()); \
- if (reply == 0) { \
- CPPUNIT_FAIL("Got reply of unexpected type: " \
- + link.getReply(0)->getType().toString()); \
+ if (reply == nullptr) { \
+ FAIL() << "Got reply of unexpected type: " \
+ << link.getReply(0)->getType().toString(); \
} \
} catch (vespalib::Exception& e) { \
- reply = 0; \
- CPPUNIT_FAIL("Failed to find single reply in time"); \
+ reply = nullptr; \
+ FAIL() << "Failed to find single reply in time"; \
}
namespace storage {
@@ -56,7 +58,7 @@ struct TestFileStorComponents;
}
-struct FileStorManagerTest : public CppUnit::TestFixture {
+struct FileStorManagerTest : Test{
enum {LONG_WAITTIME=60};
unique_ptr<TestServiceLayerApp> _node;
std::unique_ptr<vdstestlib::DirConfig> config;
@@ -67,84 +69,8 @@ struct FileStorManagerTest : public CppUnit::TestFixture {
FileStorManagerTest() : _node(), _waitTime(LONG_WAITTIME) {}
- void setUp() override;
- void tearDown() override;
-
- void testPut();
- void testHeaderOnlyPut();
- void testFlush();
- void testRemapSplit();
- void testHandlerPriority();
- void testHandlerMulti();
- void testHandlerTimeout();
- void testHandlerPause();
- void testHandlerPausedMultiThread();
- void testPriority();
- void testSplit1();
- void testSplitSingleGroup();
- void testSplitEmptyTargetWithRemappedOps();
- void testNotifyOnSplitSourceOwnershipChanged();
- void testJoin();
- void testVisiting();
- void testRemoveLocation();
- void testDeleteBucket();
- void testDeleteBucketRejectOutdatedBucketInfo();
- void testDeleteBucketWithInvalidBucketInfo();
- void testNoTimestamps();
- void testEqualTimestamps();
- void testGetIter();
- void testSetBucketActiveState();
- void testNotifyOwnerDistributorOnOutdatedSetBucketState();
- void testGetBucketDiffImplicitCreateBucket();
- void testMergeBucketImplicitCreateBucket();
- void testNewlyCreatedBucketIsReady();
- void testCreateBucketSetsActiveFlagInDatabaseAndReply();
- void testStateChange();
- void testRepairNotifiesDistributorOnChange();
- void testDiskMove();
- void put_command_size_is_added_to_metric();
- void update_command_size_is_added_to_metric();
- void remove_command_size_is_added_to_metric();
- void get_command_size_is_added_to_metric();
-
- CPPUNIT_TEST_SUITE(FileStorManagerTest);
- CPPUNIT_TEST(testPut);
- CPPUNIT_TEST(testHeaderOnlyPut);
- CPPUNIT_TEST(testFlush);
- CPPUNIT_TEST(testRemapSplit);
- CPPUNIT_TEST(testHandlerPriority);
- CPPUNIT_TEST(testHandlerMulti);
- CPPUNIT_TEST(testHandlerTimeout);
- CPPUNIT_TEST(testHandlerPause);
- CPPUNIT_TEST(testHandlerPausedMultiThread);
- CPPUNIT_TEST(testPriority);
- CPPUNIT_TEST(testSplit1);
- CPPUNIT_TEST(testSplitSingleGroup);
- CPPUNIT_TEST(testSplitEmptyTargetWithRemappedOps);
- CPPUNIT_TEST(testNotifyOnSplitSourceOwnershipChanged);
- CPPUNIT_TEST(testJoin);
- CPPUNIT_TEST(testVisiting);
- CPPUNIT_TEST(testRemoveLocation);
- CPPUNIT_TEST(testDeleteBucket);
- CPPUNIT_TEST(testDeleteBucketRejectOutdatedBucketInfo);
- CPPUNIT_TEST(testDeleteBucketWithInvalidBucketInfo);
- CPPUNIT_TEST(testNoTimestamps);
- CPPUNIT_TEST(testEqualTimestamps);
- CPPUNIT_TEST(testGetIter);
- CPPUNIT_TEST(testSetBucketActiveState);
- CPPUNIT_TEST(testNotifyOwnerDistributorOnOutdatedSetBucketState);
- CPPUNIT_TEST(testGetBucketDiffImplicitCreateBucket);
- CPPUNIT_TEST(testMergeBucketImplicitCreateBucket);
- CPPUNIT_TEST(testNewlyCreatedBucketIsReady);
- CPPUNIT_TEST(testCreateBucketSetsActiveFlagInDatabaseAndReply);
- CPPUNIT_TEST(testStateChange);
- CPPUNIT_TEST(testRepairNotifiesDistributorOnChange);
- CPPUNIT_TEST(testDiskMove);
- CPPUNIT_TEST(put_command_size_is_added_to_metric);
- CPPUNIT_TEST(update_command_size_is_added_to_metric);
- CPPUNIT_TEST(remove_command_size_is_added_to_metric);
- CPPUNIT_TEST(get_command_size_is_added_to_metric);
- CPPUNIT_TEST_SUITE_END();
+ void SetUp() override;
+ void TearDown() override;
void createBucket(document::BucketId bid, uint16_t disk)
{
@@ -239,8 +165,6 @@ struct FileStorManagerTest : public CppUnit::TestFixture {
}
};
-CPPUNIT_TEST_SUITE_REGISTRATION(FileStorManagerTest);
-
std::string findFile(const std::string& path, const std::string& file) {
FastOS_DirectoryScan dirScan(path.c_str());
while (dirScan.ReadNext()) {
@@ -285,17 +209,12 @@ std::unique_ptr<DiskThread> createThread(vdstestlib::DirConfig& config,
namespace {
-struct TestFileStorComponents
-{
-private:
- TestName _testName;
-public:
+struct TestFileStorComponents {
DummyStorageLink top;
FileStorManager* manager;
- TestFileStorComponents(FileStorManagerTest& test, const char* testName)
- : _testName(testName),
- manager(new FileStorManager(test.config->getConfigId(),
+ explicit TestFileStorComponents(FileStorManagerTest& test)
+ : manager(new FileStorManager(test.config->getConfigId(),
test._node->getPartitions(),
test._node->getPersistenceProvider(),
test._node->getComponentRegister()))
@@ -308,21 +227,18 @@ public:
}
void
-FileStorManagerTest::setUp()
+FileStorManagerTest::SetUp()
{
setupDisks(1);
}
void
-FileStorManagerTest::tearDown()
+FileStorManagerTest::TearDown()
{
_node.reset(0);
}
-void
-FileStorManagerTest::testHeaderOnlyPut()
-{
- TestName testName("testHeaderOnlyPut");
+TEST_F(FileStorManagerTest, header_only_put) {
// Setting up manager
DummyStorageLink top;
FileStorManager *manager;
@@ -340,69 +256,56 @@ FileStorManagerTest::testHeaderOnlyPut()
// Putting it
{
- std::shared_ptr<api::PutCommand> cmd(
- new api::PutCommand(makeDocumentBucket(bid), doc, 105));
+ auto cmd = std::make_shared<api::PutCommand>(makeDocumentBucket(bid), doc, 105);
cmd->setAddress(address);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::PutReply> reply(
- std::dynamic_pointer_cast<api::PutReply>(
- top.getReply(0)));
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::PutReply>(top.getReply(0));
top.reset();
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
- CPPUNIT_ASSERT_EQUAL(1, (int)reply->getBucketInfo().getDocumentCount());
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
+ EXPECT_EQ(1, reply->getBucketInfo().getDocumentCount());
}
doc->setValue(doc->getField("headerval"), document::IntFieldValue(42));
// Putting it again, this time with header only
{
- std::shared_ptr<api::PutCommand> cmd(
- new api::PutCommand(makeDocumentBucket(bid), doc, 124));
+ auto cmd = std::make_shared<api::PutCommand>(makeDocumentBucket(bid), doc, 124);
cmd->setUpdateTimestamp(105);
cmd->setAddress(address);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::PutReply> reply(
- std::dynamic_pointer_cast<api::PutReply>(
- top.getReply(0)));
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::PutReply>(top.getReply(0));
top.reset();
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode::OK, reply->getResult().getResult());
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode::OK, reply->getResult().getResult());
}
// Getting it
{
- std::shared_ptr<api::GetCommand> cmd(new api::GetCommand(
- makeDocumentBucket(bid), doc->getId(), "[all]"));
+ auto cmd = std::make_shared<api::GetCommand>(makeDocumentBucket(bid), doc->getId(), "[all]");
cmd->setAddress(address);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ ASSERT_EQ(1, top.getNumReplies());
std::shared_ptr<api::GetReply> reply2(
std::dynamic_pointer_cast<api::GetReply>(
top.getReply(0)));
top.reset();
- CPPUNIT_ASSERT(reply2.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply2->getResult());
- CPPUNIT_ASSERT_EQUAL(doc->getId().toString(),
- reply2->getDocumentId().toString());
- // Ensure partial update was done, but other things are equal
- document::FieldValue::UP value(
- reply2->getDocument()->getValue(doc->getField("headerval")));
- CPPUNIT_ASSERT(value.get());
- CPPUNIT_ASSERT_EQUAL(42, dynamic_cast<document::IntFieldValue&>(
- *value).getAsInt());
+ ASSERT_TRUE(reply2.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply2->getResult());
+ EXPECT_EQ(doc->getId().toString(), reply2->getDocumentId().toString());
+ // Ensure partial update was done, but other things are equal
+ auto value = reply2->getDocument()->getValue(doc->getField("headerval"));
+ ASSERT_TRUE(value.get());
+ EXPECT_EQ(42, dynamic_cast<document::IntFieldValue&>(*value).getAsInt());
reply2->getDocument()->remove("headerval");
doc->remove("headerval");
- CPPUNIT_ASSERT_EQUAL(*doc, *reply2->getDocument());
+ EXPECT_EQ(*doc, *reply2->getDocument());
}
}
-void
-FileStorManagerTest::testPut()
-{
- TestName testName("testPut");
+TEST_F(FileStorManagerTest, put) {
// Setting up manager
DummyStorageLink top;
FileStorManager *manager;
@@ -420,25 +323,20 @@ FileStorManagerTest::testPut()
// Putting it
{
- std::shared_ptr<api::PutCommand> cmd(
- new api::PutCommand(makeDocumentBucket(bid), doc, 105));
+ auto cmd = std::make_shared<api::PutCommand>(makeDocumentBucket(bid), doc, 105);
cmd->setAddress(address);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::PutReply> reply(
- std::dynamic_pointer_cast<api::PutReply>(
- top.getReply(0)));
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::PutReply>(top.getReply(0));
top.reset();
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
- CPPUNIT_ASSERT_EQUAL(1, (int)reply->getBucketInfo().getDocumentCount());
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
+ EXPECT_EQ(1, reply->getBucketInfo().getDocumentCount());
}
}
-void
-FileStorManagerTest::testDiskMove()
-{
+TEST_F(FileStorManagerTest, disk_move) {
setupDisks(2);
// Setting up manager
@@ -458,27 +356,24 @@ FileStorManagerTest::testDiskMove()
// Putting it
{
- std::shared_ptr<api::PutCommand> cmd(
- new api::PutCommand(makeDocumentBucket(bid), doc, 105));
+ auto cmd = std::make_shared<api::PutCommand>(makeDocumentBucket(bid), doc, 105);
cmd->setAddress(address);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::PutReply> reply(
- std::dynamic_pointer_cast<api::PutReply>(
- top.getReply(0)));
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::PutReply>(top.getReply(0));
top.reset();
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
- CPPUNIT_ASSERT_EQUAL(1, (int)reply->getBucketInfo().getDocumentCount());
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
+ EXPECT_EQ(1, reply->getBucketInfo().getDocumentCount());
}
{
StorBucketDatabase::WrappedEntry entry(
_node->getStorageBucketDatabase().get(bid, "foo"));
- CPPUNIT_ASSERT_EQUAL(0, (int)entry->disk);
- CPPUNIT_ASSERT_EQUAL(
+ EXPECT_EQ(0, entry->disk);
+ EXPECT_EQ(
vespalib::string(
"BucketInfo(crc 0x28cc441f, docCount 1, totDocSize 114, "
"ready true, active false)"),
@@ -486,26 +381,24 @@ FileStorManagerTest::testDiskMove()
}
{
- std::shared_ptr<BucketDiskMoveCommand> cmd(
- new BucketDiskMoveCommand(makeDocumentBucket(bid), 0, 1));
+ auto cmd = std::make_shared<BucketDiskMoveCommand>(makeDocumentBucket(bid), 0, 1);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<BucketDiskMoveReply> reply(
- std::dynamic_pointer_cast<BucketDiskMoveReply>(top.getReply(0)));
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<BucketDiskMoveReply>(top.getReply(0));
top.reset();
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
- CPPUNIT_ASSERT_EQUAL(1, (int)reply->getBucketInfo().getDocumentCount());
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
+ EXPECT_EQ(1, reply->getBucketInfo().getDocumentCount());
}
{
StorBucketDatabase::WrappedEntry entry(
_node->getStorageBucketDatabase().get(bid, "foo"));
- CPPUNIT_ASSERT_EQUAL(1, (int)entry->disk);
- CPPUNIT_ASSERT_EQUAL(
+ EXPECT_EQ(1, entry->disk);
+ EXPECT_EQ(
vespalib::string(
"BucketInfo(crc 0x28cc441f, docCount 1, totDocSize 114, "
"ready true, active false)"),
@@ -513,11 +406,7 @@ FileStorManagerTest::testDiskMove()
}
}
-
-void
-FileStorManagerTest::testStateChange()
-{
- TestName testName("testStateChange");
+TEST_F(FileStorManagerTest, state_change) {
// Setting up manager
DummyStorageLink top;
FileStorManager *manager;
@@ -528,17 +417,13 @@ FileStorManagerTest::testStateChange()
top.open();
setClusterState("storage:3 distributor:3");
-
- CPPUNIT_ASSERT_EQUAL(true, getDummyPersistence().getClusterState().nodeUp());
+ EXPECT_TRUE(getDummyPersistence().getClusterState().nodeUp());
setClusterState("storage:3 .0.s:d distributor:3");
-
- CPPUNIT_ASSERT_EQUAL(false, getDummyPersistence().getClusterState().nodeUp());
+ EXPECT_FALSE(getDummyPersistence().getClusterState().nodeUp());
}
-void
-FileStorManagerTest::testRepairNotifiesDistributorOnChange()
-{
+TEST_F(FileStorManagerTest, repair_notifies_distributor_on_change) {
// Setting up manager
DummyStorageLink top;
FileStorManager *manager;
@@ -555,9 +440,8 @@ FileStorManagerTest::testRepairNotifiesDistributorOnChange()
for (uint32_t i = 0; i < 3; ++i) {
document::DocumentId docId(vespalib::make_string("userdoc:ns:1:%d", i));
- Document::SP doc(new Document(*_testdoctype1, docId));
- std::shared_ptr<api::PutCommand> cmd(
- new api::PutCommand(makeDocumentBucket(document::BucketId(16, 1)), doc, i + 1));
+ auto doc = std::make_shared<Document>(*_testdoctype1, docId);
+ auto cmd = std::make_shared<api::PutCommand>(makeDocumentBucket(document::BucketId(16, 1)), doc, i + 1);
cmd->setAddress(address);
top.sendDown(cmd);
}
@@ -567,13 +451,12 @@ FileStorManagerTest::testRepairNotifiesDistributorOnChange()
getDummyPersistence().simulateMaintenanceFailure();
- std::shared_ptr<RepairBucketCommand> cmd(
- new RepairBucketCommand(makeDocumentBucket(document::BucketId(16, 1)), 0));
+ auto cmd = std::make_shared<RepairBucketCommand>(makeDocumentBucket(document::BucketId(16, 1)), 0);
top.sendDown(cmd);
top.waitForMessages(2, _waitTime);
- CPPUNIT_ASSERT_EQUAL(
+ EXPECT_EQ(
std::string("NotifyBucketChangeCommand(BucketId(0x4000000000000001), "
"BucketInfo(crc 0x2625a314, docCount 2, totDocSize 154, "
"ready true, active false))"), top.getReply(0)->toString());
@@ -581,22 +464,18 @@ FileStorManagerTest::testRepairNotifiesDistributorOnChange()
top.close();
}
-
-void
-FileStorManagerTest::testFlush()
-{
- TestName testName("testFlush");
- // Setting up manager
+TEST_F(FileStorManagerTest, flush) {
+ // Setting up manager
DummyStorageLink top;
FileStorManager *manager;
top.push_back(unique_ptr<StorageLink>(manager = new FileStorManager(
config->getConfigId(), _node->getPartitions(), _node->getPersistenceProvider(), _node->getComponentRegister())));
top.open();
api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 3);
- // Creating a document to test with
+ // Creating a document to test with
document::DocumentId docId("doc:crawler:http://www.ntnu.no/");
- Document::SP doc(new Document(*_testdoctype1, docId));
+ auto doc = std::make_shared<Document>(*_testdoctype1, docId);
document::BucketId bid(4000);
static const uint32_t msgCount = 10;
@@ -604,8 +483,7 @@ FileStorManagerTest::testFlush()
// Generating many put commands
std::vector<std::shared_ptr<api::StorageCommand> > _commands;
for (uint32_t i=0; i<msgCount; ++i) {
- std::shared_ptr<api::PutCommand> cmd(
- new api::PutCommand(makeDocumentBucket(bid), doc, i+1));
+ auto cmd = std::make_shared<api::PutCommand>(makeDocumentBucket(bid), doc, i+1);
cmd->setAddress(address);
_commands.push_back(cmd);
}
@@ -614,13 +492,10 @@ FileStorManagerTest::testFlush()
}
top.close();
top.flush();
- CPPUNIT_ASSERT_EQUAL((size_t) msgCount, top.getNumReplies());
+ EXPECT_EQ(msgCount, top.getNumReplies());
}
-void
-FileStorManagerTest::testHandlerPriority()
-{
- TestName testName("testHandlerPriority");
+TEST_F(FileStorManagerTest, handler_priority) {
// Setup a filestorthread to test
DummyStorageLink top;
DummyStorageLink *dummyManager;
@@ -638,7 +513,7 @@ FileStorManagerTest::testHandlerPriority()
FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(), _node->getComponentRegister());
filestorHandler.setGetNextMessageTimeout(50);
uint32_t stripeId = filestorHandler.getNextStripeId(0);
- CPPUNIT_ASSERT_EQUAL(0u, stripeId);
+ ASSERT_EQ(0u, stripeId);
std::string content("Here is some content which is in all documents");
std::ostringstream uri;
@@ -657,15 +532,14 @@ FileStorManagerTest::testHandlerPriority()
filestorHandler.schedule(cmd, 0);
}
- CPPUNIT_ASSERT_EQUAL(15, (int)filestorHandler.getNextMessage(0, stripeId).second->getPriority());
- CPPUNIT_ASSERT_EQUAL(30, (int)filestorHandler.getNextMessage(0, stripeId).second->getPriority());
- CPPUNIT_ASSERT_EQUAL(45, (int)filestorHandler.getNextMessage(0, stripeId).second->getPriority());
- CPPUNIT_ASSERT_EQUAL(60, (int)filestorHandler.getNextMessage(0, stripeId).second->getPriority());
- CPPUNIT_ASSERT_EQUAL(75, (int)filestorHandler.getNextMessage(0, stripeId).second->getPriority());
+ ASSERT_EQ(15, filestorHandler.getNextMessage(0, stripeId).second->getPriority());
+ ASSERT_EQ(30, filestorHandler.getNextMessage(0, stripeId).second->getPriority());
+ ASSERT_EQ(45, filestorHandler.getNextMessage(0, stripeId).second->getPriority());
+ ASSERT_EQ(60, filestorHandler.getNextMessage(0, stripeId).second->getPriority());
+ ASSERT_EQ(75, filestorHandler.getNextMessage(0, stripeId).second->getPriority());
}
-class MessagePusherThread : public document::Runnable
-{
+class MessagePusherThread : public document::Runnable {
public:
FileStorHandler& _handler;
Document::SP _doc;
@@ -673,7 +547,7 @@ public:
std::atomic<bool> _threadDone;
MessagePusherThread(FileStorHandler& handler, Document::SP doc);
- ~MessagePusherThread();
+ ~MessagePusherThread() override;
void run() override {
while (!_done) {
@@ -690,7 +564,7 @@ public:
};
MessagePusherThread::MessagePusherThread(FileStorHandler& handler, Document::SP doc)
- : _handler(handler), _doc(doc), _done(false), _threadDone(false)
+ : _handler(handler), _doc(std::move(doc)), _done(false), _threadDone(false)
{}
MessagePusherThread::~MessagePusherThread() = default;
@@ -704,7 +578,7 @@ public:
std::atomic<bool> _failed;
std::atomic<bool> _threadDone;
- MessageFetchingThread(FileStorHandler& handler)
+ explicit MessageFetchingThread(FileStorHandler& handler)
: _threadId(handler.getNextStripeId(0)), _handler(handler), _config(0), _fetchedCount(0), _done(false),
_failed(false), _threadDone(false)
{}
@@ -729,10 +603,7 @@ public:
};
};
-void
-FileStorManagerTest::testHandlerPausedMultiThread()
-{
- TestName testName("testHandlerPausedMultiThread");
+TEST_F(FileStorManagerTest, handler_paused_multi_thread) {
// Setup a filestorthread to test
DummyStorageLink top;
DummyStorageLink *dummyManager;
@@ -767,23 +638,19 @@ FileStorManagerTest::testHandlerPausedMultiThread()
ResumeGuard guard = filestorHandler.pause();
thread._config.fetch_add(1);
uint32_t count = thread._fetchedCount;
- CPPUNIT_ASSERT_EQUAL(count, thread._fetchedCount.load());
+ ASSERT_EQ(count, thread._fetchedCount.load());
}
pushthread._done = true;
thread._done = true;
- CPPUNIT_ASSERT(!thread._failed);
+ ASSERT_FALSE(thread._failed);
while (!pushthread._threadDone || !thread._threadDone) {
FastOS_Thread::Sleep(1);
}
}
-
-void
-FileStorManagerTest::testHandlerPause()
-{
- TestName testName("testHandlerPriority");
+TEST_F(FileStorManagerTest, handler_pause) {
// Setup a filestorthread to test
DummyStorageLink top;
DummyStorageLink *dummyManager;
@@ -818,15 +685,15 @@ FileStorManagerTest::testHandlerPause()
filestorHandler.schedule(cmd, 0);
}
- CPPUNIT_ASSERT_EQUAL(15, (int)filestorHandler.getNextMessage(0, stripeId).second->getPriority());
+ ASSERT_EQ(15, filestorHandler.getNextMessage(0, stripeId).second->getPriority());
{
ResumeGuard guard = filestorHandler.pause();
(void)guard;
- CPPUNIT_ASSERT(filestorHandler.getNextMessage(0, stripeId).second.get() == NULL);
+ ASSERT_EQ(filestorHandler.getNextMessage(0, stripeId).second.get(), nullptr);
}
- CPPUNIT_ASSERT_EQUAL(30, (int)filestorHandler.getNextMessage(0, stripeId).second->getPriority());
+ ASSERT_EQ(30, filestorHandler.getNextMessage(0, stripeId).second->getPriority());
}
namespace {
@@ -842,10 +709,7 @@ uint64_t getPutTime(api::StorageMessage::SP& msg)
}
-void
-FileStorManagerTest::testRemapSplit()
-{
- TestName testName("testRemapSplit");
+TEST_F(FileStorManagerTest, remap_split) {
// Setup a filestorthread to test
DummyStorageLink top;
DummyStorageLink *dummyManager;
@@ -878,36 +742,31 @@ FileStorManagerTest::testRemapSplit()
filestorHandler.schedule(std::make_shared<api::PutCommand>(makeDocumentBucket(bucket2), doc2, i + 10), 0);
}
- CPPUNIT_ASSERT_EQUAL(std::string("BucketId(0x40000000000004d2): Put(BucketId(0x40000000000004d2), userdoc:footype:1234:bar, timestamp 1, size 108) (priority: 127)\n"
- "BucketId(0x40000000000011d7): Put(BucketId(0x40000000000011d7), userdoc:footype:4567:bar, timestamp 11, size 108) (priority: 127)\n"
- "BucketId(0x40000000000004d2): Put(BucketId(0x40000000000004d2), userdoc:footype:1234:bar, timestamp 2, size 108) (priority: 127)\n"
- "BucketId(0x40000000000011d7): Put(BucketId(0x40000000000011d7), userdoc:footype:4567:bar, timestamp 12, size 108) (priority: 127)\n"
- "BucketId(0x40000000000004d2): Put(BucketId(0x40000000000004d2), userdoc:footype:1234:bar, timestamp 3, size 108) (priority: 127)\n"
- "BucketId(0x40000000000011d7): Put(BucketId(0x40000000000011d7), userdoc:footype:4567:bar, timestamp 13, size 108) (priority: 127)\n"),
- filestorHandler.dumpQueue(0));
+ EXPECT_EQ("BucketId(0x40000000000004d2): Put(BucketId(0x40000000000004d2), userdoc:footype:1234:bar, timestamp 1, size 108) (priority: 127)\n"
+ "BucketId(0x40000000000011d7): Put(BucketId(0x40000000000011d7), userdoc:footype:4567:bar, timestamp 11, size 108) (priority: 127)\n"
+ "BucketId(0x40000000000004d2): Put(BucketId(0x40000000000004d2), userdoc:footype:1234:bar, timestamp 2, size 108) (priority: 127)\n"
+ "BucketId(0x40000000000011d7): Put(BucketId(0x40000000000011d7), userdoc:footype:4567:bar, timestamp 12, size 108) (priority: 127)\n"
+ "BucketId(0x40000000000004d2): Put(BucketId(0x40000000000004d2), userdoc:footype:1234:bar, timestamp 3, size 108) (priority: 127)\n"
+ "BucketId(0x40000000000011d7): Put(BucketId(0x40000000000011d7), userdoc:footype:4567:bar, timestamp 13, size 108) (priority: 127)\n",
+ filestorHandler.dumpQueue(0));
FileStorHandler::RemapInfo a(makeDocumentBucket(document::BucketId(17, 1234)), 0);
FileStorHandler::RemapInfo b(makeDocumentBucket(document::BucketId(17, 1234 | 1 << 16)), 0);
filestorHandler.remapQueueAfterSplit(FileStorHandler::RemapInfo(makeDocumentBucket(bucket1), 0), a, b);
- CPPUNIT_ASSERT(a.foundInQueue);
- CPPUNIT_ASSERT(!b.foundInQueue);
-
- CPPUNIT_ASSERT_EQUAL(std::string(
- "BucketId(0x40000000000011d7): Put(BucketId(0x40000000000011d7), userdoc:footype:4567:bar, timestamp 11, size 108) (priority: 127)\n"
- "BucketId(0x40000000000011d7): Put(BucketId(0x40000000000011d7), userdoc:footype:4567:bar, timestamp 12, size 108) (priority: 127)\n"
- "BucketId(0x40000000000011d7): Put(BucketId(0x40000000000011d7), userdoc:footype:4567:bar, timestamp 13, size 108) (priority: 127)\n"
- "BucketId(0x44000000000004d2): Put(BucketId(0x44000000000004d2), userdoc:footype:1234:bar, timestamp 1, size 108) (priority: 127)\n"
- "BucketId(0x44000000000004d2): Put(BucketId(0x44000000000004d2), userdoc:footype:1234:bar, timestamp 2, size 108) (priority: 127)\n"
- "BucketId(0x44000000000004d2): Put(BucketId(0x44000000000004d2), userdoc:footype:1234:bar, timestamp 3, size 108) (priority: 127)\n"),
- filestorHandler.dumpQueue(0));
+ ASSERT_TRUE(a.foundInQueue);
+ ASSERT_FALSE(b.foundInQueue);
+ EXPECT_EQ("BucketId(0x40000000000011d7): Put(BucketId(0x40000000000011d7), userdoc:footype:4567:bar, timestamp 11, size 108) (priority: 127)\n"
+ "BucketId(0x40000000000011d7): Put(BucketId(0x40000000000011d7), userdoc:footype:4567:bar, timestamp 12, size 108) (priority: 127)\n"
+ "BucketId(0x40000000000011d7): Put(BucketId(0x40000000000011d7), userdoc:footype:4567:bar, timestamp 13, size 108) (priority: 127)\n"
+ "BucketId(0x44000000000004d2): Put(BucketId(0x44000000000004d2), userdoc:footype:1234:bar, timestamp 1, size 108) (priority: 127)\n"
+ "BucketId(0x44000000000004d2): Put(BucketId(0x44000000000004d2), userdoc:footype:1234:bar, timestamp 2, size 108) (priority: 127)\n"
+ "BucketId(0x44000000000004d2): Put(BucketId(0x44000000000004d2), userdoc:footype:1234:bar, timestamp 3, size 108) (priority: 127)\n",
+ filestorHandler.dumpQueue(0));
}
-void
-FileStorManagerTest::testHandlerMulti()
-{
- TestName testName("testHandlerMulti");
+TEST_F(FileStorManagerTest, handler_multi) {
// Setup a filestorthread to test
DummyStorageLink top;
DummyStorageLink *dummyManager;
@@ -946,29 +805,25 @@ FileStorManagerTest::testHandlerMulti()
{
FileStorHandler::LockedMessage lock = filestorHandler.getNextMessage(0, stripeId);
- CPPUNIT_ASSERT_EQUAL((uint64_t)1, getPutTime(lock.second));
+ ASSERT_EQ(1, getPutTime(lock.second));
lock = filestorHandler.getNextMessage(0, stripeId, lock);
- CPPUNIT_ASSERT_EQUAL((uint64_t)2, getPutTime(lock.second));
+ ASSERT_EQ(2, getPutTime(lock.second));
lock = filestorHandler.getNextMessage(0, stripeId, lock);
- CPPUNIT_ASSERT_EQUAL((uint64_t)3, getPutTime(lock.second));
+ ASSERT_EQ(3, getPutTime(lock.second));
}
{
FileStorHandler::LockedMessage lock = filestorHandler.getNextMessage(0, stripeId);
- CPPUNIT_ASSERT_EQUAL((uint64_t)11, getPutTime(lock.second));
+ ASSERT_EQ(11, getPutTime(lock.second));
lock = filestorHandler.getNextMessage(0, stripeId, lock);
- CPPUNIT_ASSERT_EQUAL((uint64_t)12, getPutTime(lock.second));
+ ASSERT_EQ(12, getPutTime(lock.second));
}
}
-
-void
-FileStorManagerTest::testHandlerTimeout()
-{
- TestName testName("testHandlerTimeout");
+TEST_F(FileStorManagerTest, handler_timeout) {
// Setup a filestorthread to test
DummyStorageLink top;
DummyStorageLink *dummyManager;
@@ -1018,20 +873,17 @@ FileStorManagerTest::testHandlerTimeout()
for (;;) {
auto lock = filestorHandler.getNextMessage(0, stripeId);
if (lock.first.get()) {
- CPPUNIT_ASSERT_EQUAL(uint8_t(200), lock.second->getPriority());
+ ASSERT_EQ(200, lock.second->getPriority());
break;
}
}
- CPPUNIT_ASSERT_EQUAL(size_t(1), top.getNumReplies());
- CPPUNIT_ASSERT_EQUAL(api::ReturnCode::TIMEOUT,
- static_cast<api::StorageReply&>(*top.getReply(0)).getResult().getResult());
+ ASSERT_EQ(1, top.getNumReplies());
+ EXPECT_EQ(api::ReturnCode::TIMEOUT,
+ static_cast<api::StorageReply&>(*top.getReply(0)).getResult().getResult());
}
-void
-FileStorManagerTest::testPriority()
-{
- TestName testName("testPriority");
+TEST_F(FileStorManagerTest, priority) {
// Setup a filestorthread to test
DummyStorageLink top;
DummyStorageLink *dummyManager;
@@ -1080,11 +932,8 @@ FileStorManagerTest::testPriority()
for (uint32_t i=0; i<documents.size(); ++i) {
document::BucketId bucket(16, factory.getBucketId(documents[i]->getId()).getRawId());
- std::shared_ptr<api::PutCommand> cmd(
- new api::PutCommand(makeDocumentBucket(bucket), documents[i], 100 + i));
- std::unique_ptr<api::StorageMessageAddress> address(
- new api::StorageMessageAddress(
- "storage", lib::NodeType::STORAGE, 3));
+ auto cmd = std::make_shared<api::PutCommand>(makeDocumentBucket(bucket), documents[i], 100 + i);
+ auto address = std::make_unique<api::StorageMessageAddress>("storage", lib::NodeType::STORAGE, 3);
cmd->setAddress(*address);
cmd->setPriority(i * 2);
filestorHandler.schedule(cmd, 0);
@@ -1094,35 +943,31 @@ FileStorManagerTest::testPriority()
// Wait until everything is done.
int count = 0;
- while (documents.size() != top.getNumReplies() && count < 1000) {
- FastOS_Thread::Sleep(100);
+ while (documents.size() != top.getNumReplies() && count < 10000) {
+ FastOS_Thread::Sleep(10);
count++;
}
- CPPUNIT_ASSERT(count < 1000);
+ ASSERT_LT(count, 10000);
for (uint32_t i = 0; i < documents.size(); i++) {
std::shared_ptr<api::PutReply> reply(
std::dynamic_pointer_cast<api::PutReply>(
top.getReply(i)));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
- reply->getResult());
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
}
// Verify that thread 1 gets documents over 50 pri
- CPPUNIT_ASSERT_EQUAL(uint64_t(documents.size()),
- metrics.disks[0]->threads[0]->operations.getValue()
- + metrics.disks[0]->threads[1]->operations.getValue());
+ EXPECT_EQ(documents.size(),
+ metrics.disks[0]->threads[0]->operations.getValue()
+ + metrics.disks[0]->threads[1]->operations.getValue());
// Closing file stor handler before threads are deleted, such that
// file stor threads getNextMessage calls returns.
filestorHandler.close();
}
-void
-FileStorManagerTest::testSplit1()
-{
- TestName testName("testSplit1");
- // Setup a filestorthread to test
+TEST_F(FileStorManagerTest, split1) {
+ // Setup a filestorthread to test
DummyStorageLink top;
DummyStorageLink *dummyManager;
top.push_back(std::unique_ptr<StorageLink>(
@@ -1137,7 +982,7 @@ FileStorManagerTest::testSplit1()
std::unique_ptr<DiskThread> thread(createThread(
*config, *_node, _node->getPersistenceProvider(),
filestorHandler, *metrics.disks[0]->threads[0], 0));
- // Creating documents to test with. Different gids, 2 locations.
+ // Creating documents to test with. Different gids, 2 locations.
std::vector<document::Document::SP > documents;
for (uint32_t i=0; i<20; ++i) {
std::string content("Here is some content which is in all documents");
@@ -1161,60 +1006,45 @@ FileStorManagerTest::testSplit1()
_node->getPersistenceProvider().createBucket(
makeSpiBucket(bucket), context);
- std::shared_ptr<api::PutCommand> cmd(
- new api::PutCommand(makeDocumentBucket(bucket), documents[i], 100 + i));
- std::unique_ptr<api::StorageMessageAddress> address(
- new api::StorageMessageAddress(
- "storage", lib::NodeType::STORAGE, 3));
+ auto cmd = std::make_shared<api::PutCommand>(makeDocumentBucket(bucket), documents[i], 100 + i);
+ auto address = std::make_unique<api::StorageMessageAddress>("storage", lib::NodeType::STORAGE, 3);
cmd->setAddress(*address);
cmd->setSourceIndex(0);
filestorHandler.schedule(cmd, 0);
filestorHandler.flush(true);
LOG(debug, "Got %zu replies", top.getNumReplies());
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::PutReply> reply(
- std::dynamic_pointer_cast<api::PutReply>(
- top.getReply(0)));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
- reply->getResult());
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::PutReply>(top.getReply(0));
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
top.reset();
// Delete every 5th document to have delete entries in file too
if (i % 5 == 0) {
- std::shared_ptr<api::RemoveCommand> rcmd(
- new api::RemoveCommand(
- makeDocumentBucket(bucket), documents[i]->getId(), 1000000 + 100 + i));
+ auto rcmd = std::make_shared<api::RemoveCommand>(
+ makeDocumentBucket(bucket), documents[i]->getId(), 1000000 + 100 + i);
rcmd->setAddress(*address);
filestorHandler.schedule(rcmd, 0);
filestorHandler.flush(true);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::RemoveReply> rreply(
- std::dynamic_pointer_cast<api::RemoveReply>(
- top.getReply(0)));
- CPPUNIT_ASSERT_MSG(top.getReply(0)->getType().toString(),
- rreply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
- rreply->getResult());
+ ASSERT_EQ(1, top.getNumReplies());
+ auto rreply = std::dynamic_pointer_cast<api::RemoveReply>(top.getReply(0));
+ ASSERT_TRUE(rreply.get()) << top.getReply(0)->getType().toString();
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), rreply->getResult());
top.reset();
}
}
// Perform a split, check that locations are split
{
- std::shared_ptr<api::SplitBucketCommand> cmd(
- new api::SplitBucketCommand(makeDocumentBucket(document::BucketId(16, 1))));
+ auto cmd = std::make_shared<api::SplitBucketCommand>(makeDocumentBucket(document::BucketId(16, 1)));
cmd->setSourceIndex(0);
filestorHandler.schedule(cmd, 0);
filestorHandler.flush(true);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::SplitBucketReply> reply(
- std::dynamic_pointer_cast<api::SplitBucketReply>(
- top.getReply(0)));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
- reply->getResult());
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::SplitBucketReply>(top.getReply(0));
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
top.reset();
}
@@ -1222,37 +1052,30 @@ FileStorManagerTest::testSplit1()
for (uint32_t i=0; i<documents.size(); ++i) {
document::BucketId bucket(
17, i % 3 == 0 ? 0x10001 : 0x0100001);
- std::shared_ptr<api::GetCommand> cmd(
- new api::GetCommand(makeDocumentBucket(bucket), documents[i]->getId(), "[all]"));
- api::StorageMessageAddress address(
- "storage", lib::NodeType::STORAGE, 3);
+ auto cmd = std::make_shared<api::GetCommand>(
+ makeDocumentBucket(bucket), documents[i]->getId(), "[all]");
+ api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 3);
cmd->setAddress(address);
filestorHandler.schedule(cmd, 0);
filestorHandler.flush(true);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::GetReply> reply(
- std::dynamic_pointer_cast<api::GetReply>(
- top.getReply(0)));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(i % 5 != 0 ? true : false, reply->wasFound());
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::GetReply>(top.getReply(0));
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(((i % 5) != 0), reply->wasFound());
top.reset();
}
// Keep splitting location 1 until we gidsplit
for (int i=17; i<=32; ++i) {
- std::shared_ptr<api::SplitBucketCommand> cmd(
- new api::SplitBucketCommand(
- makeDocumentBucket(document::BucketId(i, 0x0100001))));
+ auto cmd = std::make_shared<api::SplitBucketCommand>(
+ makeDocumentBucket(document::BucketId(i, 0x0100001)));
cmd->setSourceIndex(0);
filestorHandler.schedule(cmd, 0);
filestorHandler.flush(true);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::SplitBucketReply> reply(
- std::dynamic_pointer_cast<api::SplitBucketReply>(
- top.getReply(0)));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
- reply->getResult());
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::SplitBucketReply>(top.getReply(0));
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
top.reset();
}
@@ -1265,19 +1088,16 @@ FileStorManagerTest::testSplit1()
bucket = document::BucketId(33, factory.getBucketId(
documents[i]->getId()).getRawId());
}
- std::shared_ptr<api::GetCommand> cmd(
- new api::GetCommand(makeDocumentBucket(bucket), documents[i]->getId(), "[all]"));
- api::StorageMessageAddress address(
- "storage", lib::NodeType::STORAGE, 3);
+ auto cmd = std::make_shared<api::GetCommand>(
+ makeDocumentBucket(bucket), documents[i]->getId(), "[all]");
+ api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 3);
cmd->setAddress(address);
filestorHandler.schedule(cmd, 0);
filestorHandler.flush(true);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::GetReply> reply(
- std::dynamic_pointer_cast<api::GetReply>(
- top.getReply(0)));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(i % 5 != 0 ? true : false, reply->wasFound());
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::GetReply>(top.getReply(0));
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(((i % 5) != 0), reply->wasFound());
top.reset();
}
}
@@ -1286,11 +1106,8 @@ FileStorManagerTest::testSplit1()
filestorHandler.close();
}
-void
-FileStorManagerTest::testSplitSingleGroup()
-{
- TestName testName("testSplitSingleGroup");
- // Setup a filestorthread to test
+TEST_F(FileStorManagerTest, split_single_group) {
+ // Setup a filestorthread to test
DummyStorageLink top;
DummyStorageLink *dummyManager;
top.push_back(std::unique_ptr<StorageLink>(
@@ -1312,79 +1129,62 @@ FileStorManagerTest::testSplitSingleGroup()
std::unique_ptr<DiskThread> thread(createThread(
*config, *_node, _node->getPersistenceProvider(),
filestorHandler, *metrics.disks[0]->threads[0], 0));
- // Creating documents to test with. Different gids, 2 locations.
- std::vector<document::Document::SP > documents;
+ // Creating documents to test with. Different gids, 2 locations.
+ std::vector<document::Document::SP> documents;
for (uint32_t i=0; i<20; ++i) {
std::string content("Here is some content for all documents");
std::ostringstream uri;
uri << "userdoc:footype:" << (state ? 0x10001 : 0x0100001)
<< ":mydoc-" << i;
- Document::SP doc(createDocument(
- content, uri.str()).release());
- documents.push_back(doc);
+ documents.emplace_back(createDocument(content, uri.str()));
}
document::BucketIdFactory factory;
- // Populate bucket with the given data
+ // Populate bucket with the given data
for (uint32_t i=0; i<documents.size(); ++i) {
document::BucketId bucket(16, factory.getBucketId(
documents[i]->getId()).getRawId());
- _node->getPersistenceProvider().createBucket(
- makeSpiBucket(bucket), context);
+ _node->getPersistenceProvider().createBucket(makeSpiBucket(bucket), context);
- std::shared_ptr<api::PutCommand> cmd(
- new api::PutCommand(makeDocumentBucket(bucket), documents[i], 100 + i));
- api::StorageMessageAddress address(
- "storage", lib::NodeType::STORAGE, 3);
+ auto cmd = std::make_shared<api::PutCommand>(makeDocumentBucket(bucket), documents[i], 100 + i);
+ api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 3);
cmd->setAddress(address);
filestorHandler.schedule(cmd, 0);
filestorHandler.flush(true);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::PutReply> reply(
- std::dynamic_pointer_cast<api::PutReply>(
- top.getReply(0)));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
- reply->getResult());
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::PutReply>(top.getReply(0));
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
top.reset();
}
- // Perform a split, check that locations are split
+ // Perform a split, check that locations are split
{
- std::shared_ptr<api::SplitBucketCommand> cmd(
- new api::SplitBucketCommand(makeDocumentBucket(document::BucketId(16, 1))));
+ auto cmd = std::make_shared<api::SplitBucketCommand>(makeDocumentBucket(document::BucketId(16, 1)));
cmd->setSourceIndex(0);
filestorHandler.schedule(cmd, 0);
filestorHandler.flush(true);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::SplitBucketReply> reply(
- std::dynamic_pointer_cast<api::SplitBucketReply>(
- top.getReply(0)));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
- reply->getResult());
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::SplitBucketReply>(top.getReply(0));
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
top.reset();
}
-
// Test that the documents are all still there
for (uint32_t i=0; i<documents.size(); ++i) {
document::BucketId bucket(17, state ? 0x10001 : 0x00001);
- std::shared_ptr<api::GetCommand> cmd(
- new api::GetCommand(makeDocumentBucket(bucket), documents[i]->getId(), "[all]"));
- api::StorageMessageAddress address(
- "storage", lib::NodeType::STORAGE, 3);
+ auto cmd = std::make_shared<api::GetCommand>
+ (makeDocumentBucket(bucket), documents[i]->getId(), "[all]");
+ api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 3);
cmd->setAddress(address);
filestorHandler.schedule(cmd, 0);
filestorHandler.flush(true);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::GetReply> reply(
- std::dynamic_pointer_cast<api::GetReply>(
- top.getReply(0)));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
- reply->getResult());
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::GetReply>(top.getReply(0));
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
top.reset();
}
// Closing file stor handler before threads are deleted, such that
@@ -1415,21 +1215,16 @@ FileStorManagerTest::putDoc(DummyStorageLink& top,
cmd->setPriority(120);
filestorHandler.schedule(cmd, 0);
filestorHandler.flush(true);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ ASSERT_EQ(1, top.getNumReplies());
std::shared_ptr<api::PutReply> reply(
std::dynamic_pointer_cast<api::PutReply>(
top.getReply(0)));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
- reply->getResult());
+ ASSERT_TRUE(reply.get());
+ ASSERT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
top.reset();
}
-void
-FileStorManagerTest::testSplitEmptyTargetWithRemappedOps()
-{
- TestName testName("testSplitEmptyTargetWithRemappedOps");
-
+TEST_F(FileStorManagerTest, split_empty_target_with_remapped_ops) {
DummyStorageLink top;
DummyStorageLink *dummyManager;
top.push_back(std::unique_ptr<StorageLink>(
@@ -1450,7 +1245,7 @@ FileStorManagerTest::testSplitEmptyTargetWithRemappedOps()
api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 3);
for (uint32_t i=0; i<10; ++i) {
- putDoc(top, filestorHandler, source, i);
+ ASSERT_NO_FATAL_FAILURE(putDoc(top, filestorHandler, source, i));
}
// Send split followed by a put that is bound for a target bucket that
@@ -1459,54 +1254,42 @@ FileStorManagerTest::testSplitEmptyTargetWithRemappedOps()
// the persistence provider deleting it internally.
// Make sure we block the operation queue until we've scheduled all
// the operations.
- std::unique_ptr<ResumeGuard> resumeGuard(
- new ResumeGuard(filestorHandler.pause()));
+ auto resumeGuard = std::make_unique<ResumeGuard>(filestorHandler.pause());
- std::shared_ptr<api::SplitBucketCommand> splitCmd(
- new api::SplitBucketCommand(makeDocumentBucket(source)));
+ auto splitCmd = std::make_shared<api::SplitBucketCommand>(makeDocumentBucket(source));
splitCmd->setPriority(120);
splitCmd->setSourceIndex(0);
document::DocumentId docId(
vespalib::make_string("userdoc:ns:%d:1234", 0x100001));
- Document::SP doc(new Document(*_testdoctype1, docId));
- std::shared_ptr<api::PutCommand> putCmd(
- new api::PutCommand(makeDocumentBucket(source), doc, 1001));
+ auto doc = std::make_shared<Document>(*_testdoctype1, docId);
+ auto putCmd = std::make_shared<api::PutCommand>(makeDocumentBucket(source), doc, 1001);
putCmd->setAddress(address);
putCmd->setPriority(120);
filestorHandler.schedule(splitCmd, 0);
filestorHandler.schedule(putCmd, 0);
- resumeGuard.reset(0); // Unpause
+ resumeGuard.reset(); // Unpause
filestorHandler.flush(true);
top.waitForMessages(2, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 2, top.getNumReplies());
+ ASSERT_EQ(2, top.getNumReplies());
{
- std::shared_ptr<api::SplitBucketReply> reply(
- std::dynamic_pointer_cast<api::SplitBucketReply>(
- top.getReply(0)));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
- reply->getResult());
+ auto reply = std::dynamic_pointer_cast<api::SplitBucketReply>(top.getReply(0));
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
}
{
- std::shared_ptr<api::PutReply> reply(
- std::dynamic_pointer_cast<api::PutReply>(
- top.getReply(1)));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
- reply->getResult());
+ auto reply = std::dynamic_pointer_cast<api::PutReply>(top.getReply(1));
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
}
top.reset();
}
-void
-FileStorManagerTest::testNotifyOnSplitSourceOwnershipChanged()
-{
- TestName testName("testSplit1");
+TEST_F(FileStorManagerTest, notify_on_split_source_ownership_changed) {
// Setup a filestorthread to test
DummyStorageLink top;
DummyStorageLink *dummyManager;
@@ -1525,11 +1308,10 @@ FileStorManagerTest::testNotifyOnSplitSourceOwnershipChanged()
document::BucketId source(getFirstBucketNotOwnedByDistributor(0));
createBucket(source, 0);
for (uint32_t i=0; i<10; ++i) {
- putDoc(top, filestorHandler, source, i);
+ ASSERT_NO_FATAL_FAILURE(putDoc(top, filestorHandler, source, i));
}
- std::shared_ptr<api::SplitBucketCommand> splitCmd(
- new api::SplitBucketCommand(makeDocumentBucket(source)));
+ auto splitCmd = std::make_shared<api::SplitBucketCommand>(makeDocumentBucket(source));
splitCmd->setPriority(120);
splitCmd->setSourceIndex(0); // Source not owned by this distributor.
@@ -1537,25 +1319,18 @@ FileStorManagerTest::testNotifyOnSplitSourceOwnershipChanged()
filestorHandler.flush(true);
top.waitForMessages(4, _waitTime); // 3 notify cmds + split reply
- CPPUNIT_ASSERT_EQUAL(size_t(4), top.getNumReplies());
+ ASSERT_EQ(4, top.getNumReplies());
for (int i = 0; i < 3; ++i) {
- CPPUNIT_ASSERT_EQUAL(api::MessageType::NOTIFYBUCKETCHANGE,
- top.getReply(i)->getType());
+ ASSERT_EQ(api::MessageType::NOTIFYBUCKETCHANGE, top.getReply(i)->getType());
}
- std::shared_ptr<api::SplitBucketReply> reply(
- std::dynamic_pointer_cast<api::SplitBucketReply>(
- top.getReply(3)));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
- reply->getResult());
+ auto reply = std::dynamic_pointer_cast<api::SplitBucketReply>(top.getReply(3));
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
}
-void
-FileStorManagerTest::testJoin()
-{
- TestName testName("testJoin");
- // Setup a filestorthread to test
+TEST_F(FileStorManagerTest, join) {
+ // Setup a filestorthread to test
DummyStorageLink top;
DummyStorageLink *dummyManager;
top.push_back(std::unique_ptr<StorageLink>(
@@ -1570,15 +1345,13 @@ FileStorManagerTest::testJoin()
std::unique_ptr<DiskThread> thread(createThread(
*config, *_node, _node->getPersistenceProvider(),
filestorHandler, *metrics.disks[0]->threads[0], 0));
- // Creating documents to test with. Different gids, 2 locations.
+ // Creating documents to test with. Different gids, 2 locations.
std::vector<document::Document::SP > documents;
for (uint32_t i=0; i<20; ++i) {
std::string content("Here is some content which is in all documents");
std::ostringstream uri;
-
uri << "userdoc:footype:" << (i % 3 == 0 ? 0x10001 : 0x0100001) << ":mydoc-" << i;
- Document::SP doc(createDocument(content, uri.str()).release());
- documents.push_back(doc);
+ documents.emplace_back(createDocument(content, uri.str()));
}
document::BucketIdFactory factory;
@@ -1586,7 +1359,7 @@ FileStorManagerTest::testJoin()
createBucket(document::BucketId(17, 0x10001), 0);
{
- // Populate bucket with the given data
+ // Populate bucket with the given data
for (uint32_t i=0; i<documents.size(); ++i) {
document::BucketId bucket(17, factory.getBucketId(documents[i]->getId()).getRawId());
auto cmd = std::make_shared<api::PutCommand>(makeDocumentBucket(bucket), documents[i], 100 + i);
@@ -1594,66 +1367,57 @@ FileStorManagerTest::testJoin()
cmd->setAddress(*address);
filestorHandler.schedule(cmd, 0);
filestorHandler.flush(true);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ ASSERT_EQ(1, top.getNumReplies());
auto reply = std::dynamic_pointer_cast<api::PutReply>(top.getReply(0));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
top.reset();
- // Delete every 5th document to have delete entries in file too
- if (i % 5 == 0) {
- auto rcmd = std::make_shared<api::RemoveCommand>(makeDocumentBucket(bucket),
- documents[i]->getId(), 1000000 + 100 + i);
+ // Delete every 5th document to have delete entries in file too
+ if ((i % 5) == 0) {
+ auto rcmd = std::make_shared<api::RemoveCommand>(
+ makeDocumentBucket(bucket), documents[i]->getId(), 1000000 + 100 + i);
rcmd->setAddress(*address);
filestorHandler.schedule(rcmd, 0);
filestorHandler.flush(true);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ ASSERT_EQ(1, top.getNumReplies());
auto rreply = std::dynamic_pointer_cast<api::RemoveReply>(top.getReply(0));
- CPPUNIT_ASSERT_MSG(top.getReply(0)->getType().toString(),
- rreply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
- rreply->getResult());
+ ASSERT_TRUE(rreply.get()) << top.getReply(0)->getType().toString();
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), rreply->getResult());
top.reset();
}
}
LOG(debug, "Starting the actual join after populating data");
- // Perform a join, check that other files are gone
+ // Perform a join, check that other files are gone
{
- std::shared_ptr<api::JoinBucketsCommand> cmd(
- new api::JoinBucketsCommand(makeDocumentBucket(document::BucketId(16, 1))));
- cmd->getSourceBuckets().push_back(document::BucketId(17, 0x00001));
- cmd->getSourceBuckets().push_back(document::BucketId(17, 0x10001));
+ auto cmd = std::make_shared<api::JoinBucketsCommand>(makeDocumentBucket(document::BucketId(16, 1)));
+ cmd->getSourceBuckets().emplace_back(document::BucketId(17, 0x00001));
+ cmd->getSourceBuckets().emplace_back(document::BucketId(17, 0x10001));
filestorHandler.schedule(cmd, 0);
filestorHandler.flush(true);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::JoinBucketsReply> reply(
- std::dynamic_pointer_cast<api::JoinBucketsReply>(
- top.getReply(0)));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
- reply->getResult());
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::JoinBucketsReply>(top.getReply(0));
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
top.reset();
}
// Test that the documents have gotten into the file.
for (uint32_t i=0; i<documents.size(); ++i) {
document::BucketId bucket(16, 1);
- std::shared_ptr<api::GetCommand> cmd(
- new api::GetCommand(makeDocumentBucket(bucket), documents[i]->getId(), "[all]"));
- api::StorageMessageAddress address(
- "storage", lib::NodeType::STORAGE, 3);
+ auto cmd = std::make_shared<api::GetCommand>(
+ makeDocumentBucket(bucket), documents[i]->getId(), "[all]");
+ api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 3);
cmd->setAddress(address);
filestorHandler.schedule(cmd, 0);
filestorHandler.flush(true);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::GetReply> reply(
- std::dynamic_pointer_cast<api::GetReply>(
- top.getReply(0)));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(i % 5 != 0 ? true : false, reply->wasFound());
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::GetReply>(top.getReply(0));
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(((i % 5) != 0), reply->wasFound());
top.reset();
}
}
- // Closing file stor handler before threads are deleted, such that
- // file stor threads getNextMessage calls returns.
+ // Closing file stor handler before threads are deleted, such that
+ // file stor threads getNextMessage calls returns.
filestorHandler.close();
}
@@ -1673,30 +1437,24 @@ createIterator(DummyStorageLink& link,
spi::Selection(spi::DocumentSelection(docSel));
selection.setFromTimestamp(spi::Timestamp(fromTime.getTime()));
selection.setToTimestamp(spi::Timestamp(toTime.getTime()));
- CreateIteratorCommand::SP createIterCmd(
- new CreateIteratorCommand(makeDocumentBucket(bucket),
- selection,
- headerOnly ? "[header]" : "[all]",
- spi::NEWEST_DOCUMENT_ONLY));
+ auto createIterCmd = std::make_shared<CreateIteratorCommand>(
+ makeDocumentBucket(bucket), selection,
+ headerOnly ? "[header]" : "[all]",
+ spi::NEWEST_DOCUMENT_ONLY);
link.sendDown(createIterCmd);
link.waitForMessages(1, FileStorManagerTest::LONG_WAITTIME);
- CPPUNIT_ASSERT_EQUAL(size_t(1), link.getNumReplies());
- std::shared_ptr<CreateIteratorReply> reply(
- std::dynamic_pointer_cast<CreateIteratorReply>(
- link.getReply(0)));
- CPPUNIT_ASSERT(reply.get());
+ assert(link.getNumReplies() == 1);
+ auto reply = std::dynamic_pointer_cast<CreateIteratorReply>(link.getReply(0));
+ assert(reply.get());
link.reset();
- CPPUNIT_ASSERT(reply->getResult().success());
+ assert(reply->getResult().success());
return reply->getIteratorId();
}
}
-void
-FileStorManagerTest::testVisiting()
-{
- TestName testName("testVisiting");
- // Setting up manager
+TEST_F(FileStorManagerTest, visiting) {
+ // Setting up manager
DummyStorageLink top;
FileStorManager *manager;
top.push_back(unique_ptr<StorageLink>(manager = new FileStorManager(
@@ -1705,9 +1463,10 @@ FileStorManagerTest::testVisiting()
// Adding documents to two buckets which we are going to visit
// We want one bucket in one slotfile, and one bucket with a file split
uint32_t docCount = 50;
- std::vector<document::BucketId> ids(2);
- ids[0] = document::BucketId(16, 1);
- ids[1] = document::BucketId(16, 2);
+ std::vector<document::BucketId> ids = {
+ document::BucketId(16, 1),
+ document::BucketId(16, 2)
+ };
createBucket(ids[0], 0);
createBucket(ids[1], 0);
@@ -1719,8 +1478,7 @@ FileStorManagerTest::testVisiting()
uri << "userdoc:crawler:" << (i < 3 ? 1 : 2) << ":"
<< randomizer.nextUint32() << ".html";
- Document::SP doc(createDocument(
- content, uri.str()).release());
+ Document::SP doc(createDocument(content, uri.str()));
const document::DocumentType& type(doc->getType());
if (i < 30) {
doc->setValue(type.getField("hstringval"),
@@ -1729,71 +1487,61 @@ FileStorManagerTest::testVisiting()
doc->setValue(type.getField("hstringval"),
document::StringFieldValue("Jane Doe"));
}
- std::shared_ptr<api::PutCommand> cmd(new api::PutCommand(
- makeDocumentBucket(ids[i < 3 ? 0 : 1]), doc, i+1));
+ auto cmd = std::make_shared<api::PutCommand>(
+ makeDocumentBucket(ids[(i < 3) ? 0 : 1]), doc, i+1);
top.sendDown(cmd);
}
top.waitForMessages(docCount, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) docCount, top.getNumReplies());
- // Check nodestate with splitting
+ ASSERT_EQ(docCount, top.getNumReplies());
+ // Check nodestate with splitting
{
api::BucketInfo info;
for (uint32_t i=3; i<docCount; ++i) {
- std::shared_ptr<api::BucketInfoReply> reply(
- std::dynamic_pointer_cast<api::BucketInfoReply>(
- top.getReply(i)));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_MESSAGE(reply->getResult().toString(),
- reply->getResult().success());
+ auto reply = std::dynamic_pointer_cast<api::BucketInfoReply>(top.getReply(i));
+ ASSERT_TRUE(reply.get());
+ ASSERT_TRUE(reply->getResult().success()) << reply->getResult().toString();
info = reply->getBucketInfo();
}
- CPPUNIT_ASSERT_EQUAL(docCount-3, info.getDocumentCount());
+ EXPECT_EQ(docCount - 3, info.getDocumentCount());
}
top.reset();
- // Visit bucket with no split, using no selection
+ // Visit bucket with no split, using no selection
{
spi::IteratorId iterId(createIterator(top, ids[0], "true"));
auto cmd = std::make_shared<GetIterCommand>(makeDocumentBucket(ids[0]), iterId, 16*1024);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL(size_t(1), top.getNumReplies());
- std::shared_ptr<GetIterReply> reply(
- std::dynamic_pointer_cast<GetIterReply>(top.getReply(0)));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
- CPPUNIT_ASSERT_EQUAL(ids[0], reply->getBucketId());
- CPPUNIT_ASSERT_EQUAL(size_t(3), reply->getEntries().size());
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<GetIterReply>(top.getReply(0));
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
+ EXPECT_EQ(ids[0], reply->getBucketId());
+ EXPECT_EQ(3, reply->getEntries().size());
top.reset();
}
- // Visit bucket with split, using selection
+ // Visit bucket with split, using selection
{
uint32_t totalDocs = 0;
- spi::IteratorId iterId(
- createIterator(top,
- ids[1],
- "testdoctype1.hstringval = \"John Doe\""));
+ spi::IteratorId iterId(createIterator(top, ids[1], "testdoctype1.hstringval = \"John Doe\""));
while (true) {
auto cmd = std::make_shared<GetIterCommand>(makeDocumentBucket(ids[1]), iterId, 16*1024);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<GetIterReply> reply(
- std::dynamic_pointer_cast<GetIterReply>(
- top.getReply(0)));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
- reply->getResult());
- CPPUNIT_ASSERT_EQUAL(ids[1], reply->getBucketId());
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<GetIterReply>(top.getReply(0));
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
+ EXPECT_EQ(ids[1], reply->getBucketId());
totalDocs += reply->getEntries().size();
top.reset();
if (reply->isCompleted()) {
break;
}
}
- CPPUNIT_ASSERT_EQUAL(27u, totalDocs);
+ EXPECT_EQ(27u, totalDocs);
}
- // Visit bucket with min and max timestamps set, headers only
+ // Visit bucket with min and max timestamps set, headers only
{
document::BucketId bucket(16, 2);
spi::IteratorId iterId(
@@ -1808,37 +1556,24 @@ FileStorManagerTest::testVisiting()
auto cmd = std::make_shared<GetIterCommand>(makeDocumentBucket(ids[1]), iterId, 16*1024);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL(size_t(1), top.getNumReplies());
- std::shared_ptr<GetIterReply> reply(
- std::dynamic_pointer_cast<GetIterReply>(
- top.getReply(0)));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
- reply->getResult());
- CPPUNIT_ASSERT_EQUAL(bucket, reply->getBucketId());
-/* Header only is a VDS-specific thing.
-
- for (size_t i = 0; i < reply->getEntries().size(); ++i) {
- CPPUNIT_ASSERT(reply->getEntries()[i]->getDocument()
- ->getBody().empty());
- }
-*/
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<GetIterReply>(top.getReply(0));
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
+ EXPECT_EQ(bucket, reply->getBucketId());
totalDocs += reply->getEntries().size();
top.reset();
if (reply->isCompleted()) {
break;
}
}
- CPPUNIT_ASSERT_EQUAL(11u, totalDocs);
+ EXPECT_EQ(11u, totalDocs);
}
}
-void
-FileStorManagerTest::testRemoveLocation()
-{
- TestName testName("testRemoveLocation");
- // Setting up manager
+TEST_F(FileStorManagerTest, remove_location) {
+ // Setting up manager
DummyStorageLink top;
FileStorManager *manager;
top.push_back(unique_ptr<StorageLink>(manager =
@@ -1853,55 +1588,44 @@ FileStorManagerTest::testRemoveLocation()
for (uint32_t i=0; i<=10; ++i) {
std::ostringstream docid;
docid << "userdoc:ns:" << (i << 8) << ":foo";
- Document::SP doc(createDocument(
- "some content", docid.str()).release());
- std::shared_ptr<api::PutCommand> cmd(
- new api::PutCommand(makeDocumentBucket(bid), doc, 1000 + i));
+ Document::SP doc(createDocument("some content", docid.str()));
+ auto cmd = std::make_shared<api::PutCommand>(makeDocumentBucket(bid), doc, 1000 + i);
cmd->setAddress(address);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::PutReply> reply(
- std::dynamic_pointer_cast<api::PutReply>(
- top.getReply(0)));
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::PutReply>(top.getReply(0));
top.reset();
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
- CPPUNIT_ASSERT_EQUAL(i + 1u, reply->getBucketInfo().getDocumentCount());
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
+ EXPECT_EQ(i + 1u, reply->getBucketInfo().getDocumentCount());
}
- // Issuing remove location command
+ // Issuing remove location command
{
- std::shared_ptr<api::RemoveLocationCommand> cmd(
- new api::RemoveLocationCommand("id.user % 512 == 0", makeDocumentBucket(bid)));
- //new api::RemoveLocationCommand("id.user == 1", bid));
+ auto cmd = std::make_shared<api::RemoveLocationCommand>("id.user % 512 == 0", makeDocumentBucket(bid));
cmd->setAddress(address);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::RemoveLocationReply> reply(
- std::dynamic_pointer_cast<api::RemoveLocationReply>(
- top.getReply(0)));
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::RemoveLocationReply>(top.getReply(0));
top.reset();
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
- CPPUNIT_ASSERT_EQUAL(5u, reply->getBucketInfo().getDocumentCount());
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
+ EXPECT_EQ(5u, reply->getBucketInfo().getDocumentCount());
}
}
-void FileStorManagerTest::testDeleteBucket()
-{
- TestName testName("testDeleteBucket");
- // Setting up manager
+TEST_F(FileStorManagerTest, delete_bucket) {
+ // Setting up manager
DummyStorageLink top;
FileStorManager *manager;
top.push_back(unique_ptr<StorageLink>(manager = new FileStorManager(
config->getConfigId(), _node->getPartitions(), _node->getPersistenceProvider(), _node->getComponentRegister())));
top.open();
- api::StorageMessageAddress address(
- "storage", lib::NodeType::STORAGE, 2);
- // Creating a document to test with
+ api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 2);
+ // Creating a document to test with
document::DocumentId docId("userdoc:crawler:4000:http://www.ntnu.no/");
- Document::SP doc(new Document(*_testdoctype1, docId));
+ auto doc = std::make_shared<Document>(*_testdoctype1, docId);
document::BucketId bid(16, 4000);
createBucket(bid, 0);
@@ -1909,52 +1633,42 @@ void FileStorManagerTest::testDeleteBucket()
api::BucketInfo bucketInfo;
// Putting it
{
- std::shared_ptr<api::PutCommand> cmd(
- new api::PutCommand(makeDocumentBucket(bid), doc, 105));
+ auto cmd = std::make_shared<api::PutCommand>(makeDocumentBucket(bid), doc, 105);
cmd->setAddress(address);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::PutReply> reply(
- std::dynamic_pointer_cast<api::PutReply>(
- top.getReply(0)));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::PutReply>(top.getReply(0));
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
- CPPUNIT_ASSERT_EQUAL(1, (int)reply->getBucketInfo().getDocumentCount());
+ EXPECT_EQ(1, reply->getBucketInfo().getDocumentCount());
bucketInfo = reply->getBucketInfo();
top.reset();
}
// Delete bucket
{
- std::shared_ptr<api::DeleteBucketCommand> cmd(
- new api::DeleteBucketCommand(makeDocumentBucket(bid)));
+ auto cmd = std::make_shared<api::DeleteBucketCommand>(makeDocumentBucket(bid));
cmd->setAddress(address);
cmd->setBucketInfo(bucketInfo);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::DeleteBucketReply> reply(
- std::dynamic_pointer_cast<api::DeleteBucketReply>(
- top.getReply(0)));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::DeleteBucketReply>(top.getReply(0));
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
}
}
-void
-FileStorManagerTest::testDeleteBucketRejectOutdatedBucketInfo()
-{
- TestName testName("testDeleteBucketRejectOutdatedBucketInfo");
+TEST_F(FileStorManagerTest, delete_bucket_rejects_outdated_bucket_info) {
// Setting up manager
DummyStorageLink top;
FileStorManager *manager;
top.push_back(unique_ptr<StorageLink>(manager = new FileStorManager(
config->getConfigId(), _node->getPartitions(), _node->getPersistenceProvider(), _node->getComponentRegister())));
top.open();
- api::StorageMessageAddress address(
- "storage", lib::NodeType::STORAGE, 2);
+ api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 2);
// Creating a document to test with
document::DocumentId docId("userdoc:crawler:4000:http://www.ntnu.no/");
Document::SP doc(new Document(*_testdoctype1, docId));
@@ -1966,40 +1680,32 @@ FileStorManagerTest::testDeleteBucketRejectOutdatedBucketInfo()
// Putting it
{
- std::shared_ptr<api::PutCommand> cmd(
- new api::PutCommand(makeDocumentBucket(bid), doc, 105));
+ auto cmd = std::make_shared<api::PutCommand>(makeDocumentBucket(bid), doc, 105);
cmd->setAddress(address);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::PutReply> reply(
- std::dynamic_pointer_cast<api::PutReply>(
- top.getReply(0)));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::PutReply>(top.getReply(0));
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
- CPPUNIT_ASSERT_EQUAL(1, (int)reply->getBucketInfo().getDocumentCount());
+ EXPECT_EQ(1, reply->getBucketInfo().getDocumentCount());
bucketInfo = reply->getBucketInfo();
top.reset();
}
// Attempt to delete bucket, but with non-matching bucketinfo
{
- std::shared_ptr<api::DeleteBucketCommand> cmd(
- new api::DeleteBucketCommand(makeDocumentBucket(bid)));
+ auto cmd = std::make_shared<api::DeleteBucketCommand>(makeDocumentBucket(bid));
cmd->setBucketInfo(api::BucketInfo(0xf000baaa, 1, 123, 1, 456));
cmd->setAddress(address);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::DeleteBucketReply> reply(
- std::dynamic_pointer_cast<api::DeleteBucketReply>(
- top.getReply(0)));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(
- ReturnCode::REJECTED,
- reply->getResult().getResult());
- CPPUNIT_ASSERT_EQUAL(bucketInfo, reply->getBucketInfo());
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::DeleteBucketReply>(top.getReply(0));
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode::REJECTED, reply->getResult().getResult());
+ EXPECT_EQ(bucketInfo, reply->getBucketInfo());
}
}
@@ -2007,193 +1713,51 @@ FileStorManagerTest::testDeleteBucketRejectOutdatedBucketInfo()
* Test that receiving a DeleteBucketCommand with invalid
* BucketInfo deletes the bucket and does not fail the operation.
*/
-void
-FileStorManagerTest::testDeleteBucketWithInvalidBucketInfo()
-{
- TestName testName("testDeleteBucketWithInvalidBucketInfo");
+TEST_F(FileStorManagerTest, delete_bucket_with_invalid_bucket_info){
// Setting up manager
DummyStorageLink top;
FileStorManager *manager;
top.push_back(unique_ptr<StorageLink>(manager = new FileStorManager(
config->getConfigId(), _node->getPartitions(), _node->getPersistenceProvider(), _node->getComponentRegister())));
top.open();
- api::StorageMessageAddress address(
- "storage", lib::NodeType::STORAGE, 2);
+ api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 2);
// Creating a document to test with
document::DocumentId docId("userdoc:crawler:4000:http://www.ntnu.no/");
- Document::SP doc(new Document(*_testdoctype1, docId));
+ auto doc = std::make_shared<Document>(*_testdoctype1, docId);
document::BucketId bid(16, 4000);
createBucket(bid, 0);
// Putting it
{
- std::shared_ptr<api::PutCommand> cmd(
- new api::PutCommand(makeDocumentBucket(bid), doc, 105));
+ auto cmd = std::make_shared<api::PutCommand>(makeDocumentBucket(bid), doc, 105);
cmd->setAddress(address);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::PutReply> reply(
- std::dynamic_pointer_cast<api::PutReply>(
- top.getReply(0)));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
- CPPUNIT_ASSERT_EQUAL(1, (int)reply->getBucketInfo().getDocumentCount());
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::PutReply>(top.getReply(0));
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
+ EXPECT_EQ(1, reply->getBucketInfo().getDocumentCount());
top.reset();
}
// Attempt to delete bucket with invalid bucketinfo
{
- std::shared_ptr<api::DeleteBucketCommand> cmd(
- new api::DeleteBucketCommand(makeDocumentBucket(bid)));
+ auto cmd = std::make_shared<api::DeleteBucketCommand>(makeDocumentBucket(bid));
cmd->setAddress(address);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::DeleteBucketReply> reply(
- std::dynamic_pointer_cast<api::DeleteBucketReply>(
- top.getReply(0)));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(
- ReturnCode::OK,
- reply->getResult().getResult());
- CPPUNIT_ASSERT_EQUAL(api::BucketInfo(), reply->getBucketInfo());
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::DeleteBucketReply>(top.getReply(0));
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode::OK, reply->getResult().getResult());
+ EXPECT_EQ(api::BucketInfo(), reply->getBucketInfo());
}
}
-namespace {
-
- /**
- * Utility storage link, sending data to the given links instead of through
- * a regular chain.
- */
- struct MidLink : public StorageLink {
- StorageLink& _up;
-
- public:
- MidLink(std::unique_ptr<StorageLink> down, StorageLink& up)
- : StorageLink("MidLink"), _up(up)
- {
- push_back(std::move(down));
- }
- ~MidLink() {
- closeNextLink();
- }
-
- void print(std::ostream& out, bool, const std::string&) const override { out << "MidLink"; }
- bool onUp(const std::shared_ptr<api::StorageMessage> & msg) override {
- if (!_up.onUp(msg)) {
- _up.sendUp(msg);
- }
- return true;
- }
-
- };
-
- /**
- * Utility class, connecting two storage links below it, sending
- * messages coming up from one down the other (providing address is set
- * correctly.)
- */
- class BinaryStorageLink : public DummyStorageLink {
- vespalib::Lock _lock;
- std::set<api::StorageMessage::Id> _seen;
- MidLink _left;
- MidLink _right;
- uint16_t _leftAddr;
- uint16_t _rightAddr;
-
- public:
- BinaryStorageLink(uint16_t leftAddr, std::unique_ptr<StorageLink> left,
- uint16_t rightAddr, std::unique_ptr<StorageLink> right)
- : _left(std::move(left), *this),
- _right(std::move(right), *this),
- _leftAddr(leftAddr),
- _rightAddr(rightAddr) {}
-
- void print(std::ostream& out, bool, const std::string&) const override { out << "BinaryStorageLink"; }
-
- bool onDown(const std::shared_ptr<api::StorageMessage> & msg) override {
-// LOG(debug, "onDown Received msg: ->%s, %s %llu\n", msg->getAddress() ? msg->getAddress()->toString().c_str() : "(null)", msg->toString().c_str(), msg->getMsgId());
-
- vespalib::LockGuard lock(_lock);
- _seen.insert(msg->getMsgId());
- return sendOn(msg);
- }
-
- bool sendOn(const std::shared_ptr<api::StorageMessage> & msg) {
- if (msg->getAddress()) {
- uint16_t address = msg->getAddress()->getIndex();
- if ((address == _leftAddr && !msg->getType().isReply()) ||
- (address == _rightAddr && msg->getType().isReply()))
- {
- if (!_left.onDown(msg)) {
- _left.sendDown(msg);
- }
- } else if ((address == _rightAddr && !msg->getType().isReply()) ||
- (address == _leftAddr && msg->getType().isReply()))
- {
- if (!_right.onDown(msg)) {
- _right.sendDown(msg);
- }
- } else {
- std::ostringstream ost;
- ost << "Address " << address << " is neither " << _leftAddr
- << " or " << _rightAddr << " in message " << *msg
- << ".\n";
- CPPUNIT_FAIL(ost.str());
- }
- }
- return true;
- }
-
- bool onUp(const std::shared_ptr<api::StorageMessage> & msg) override {
- // LOG(debug, "onUp Received msg: ->%s, %s %llu\n", msg->getAddress() ? msg->getAddress()->toString().c_str() : "(null)", msg->toString().c_str(), msg->getMsgId());
-
- vespalib::LockGuard lock(_lock);
- std::set<api::StorageMessage::Id>::iterator it
- = _seen.find(msg->getMsgId());
- // If message originated from the outside
- if (it != _seen.end()) {
- LOG(debug, "Have seen this message before, storing");
-
- _seen.erase(it);
- return DummyStorageLink::onUp(msg);
- // If it originated from below, send it down again.
- } else if (msg->getType() == api::MessageType::NOTIFYBUCKETCHANGE) {
- // Just throw away notify bucket change
- return true;
- } else {
- LOG(debug, "Never seen %s, sending on!",
- msg->toString().c_str());
-
- return sendOn(msg);
- }
- }
-
- void onFlush(bool downwards) override {
- if (downwards) {
- _left.flush();
- _right.flush();
- }
- }
- void onOpen() override {
- _left.open();
- _right.open();
- }
- void onClose() override {
- _left.close();
- _right.close();
- }
- };
-}
-
-void
-FileStorManagerTest::testNoTimestamps()
-{
- TestName testName("testNoTimestamps");
- // Setting up manager
+TEST_F(FileStorManagerTest, no_timestamps) {
+ // Setting up manager
DummyStorageLink top;
FileStorManager *manager;
top.push_back(unique_ptr<StorageLink>(manager =
@@ -2201,7 +1765,7 @@ FileStorManagerTest::testNoTimestamps()
top.open();
api::StorageMessageAddress address(
"storage", lib::NodeType::STORAGE, 3);
- // Creating a document to test with
+ // Creating a document to test with
Document::SP doc(createDocument(
"some content", "doc:crawler:http://www.ntnu.no/").release());
document::BucketId bid(16, 4000);
@@ -2210,53 +1774,41 @@ FileStorManagerTest::testNoTimestamps()
// Putting it
{
- std::shared_ptr<api::PutCommand> cmd(
- new api::PutCommand(makeDocumentBucket(bid), doc, 0));
+ auto cmd = std::make_shared<api::PutCommand>(makeDocumentBucket(bid), doc, 0);
cmd->setAddress(address);
- CPPUNIT_ASSERT_EQUAL((api::Timestamp)0, cmd->getTimestamp());
+ EXPECT_EQ(api::Timestamp(0), cmd->getTimestamp());
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::PutReply> reply(
- std::dynamic_pointer_cast<api::PutReply>(
- top.getReply(0)));
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::PutReply>(top.getReply(0));
top.reset();
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode::REJECTED,
- reply->getResult().getResult());
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode::REJECTED, reply->getResult().getResult());
}
- // Removing it
+ // Removing it
{
- std::shared_ptr<api::RemoveCommand> cmd(
- new api::RemoveCommand(makeDocumentBucket(bid), doc->getId(), 0));
+ auto cmd = std::make_shared<api::RemoveCommand>(makeDocumentBucket(bid), doc->getId(), 0);
cmd->setAddress(address);
- CPPUNIT_ASSERT_EQUAL((api::Timestamp)0, cmd->getTimestamp());
+ EXPECT_EQ(api::Timestamp(0), cmd->getTimestamp());
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::RemoveReply> reply(
- std::dynamic_pointer_cast<api::RemoveReply>(
- top.getReply(0)));
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::RemoveReply>(top.getReply(0));
top.reset();
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode::REJECTED,
- reply->getResult().getResult());
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode::REJECTED, reply->getResult().getResult());
}
}
-void
-FileStorManagerTest::testEqualTimestamps()
-{
- TestName testName("testEqualTimestamps");
- // Setting up manager
+TEST_F(FileStorManagerTest, equal_timestamps) {
+ // Setting up manager
DummyStorageLink top;
FileStorManager *manager;
top.push_back(unique_ptr<StorageLink>(manager =
new FileStorManager(config->getConfigId(), _node->getPartitions(), _node->getPersistenceProvider(), _node->getComponentRegister())));
top.open();
- api::StorageMessageAddress address(
- "storage", lib::NodeType::STORAGE, 3);
- // Creating a document to test with
+ api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 3);
+ // Creating a document to test with
document::BucketId bid(16, 4000);
createBucket(bid, 0);
@@ -2264,20 +1816,16 @@ FileStorManagerTest::testEqualTimestamps()
// Putting it
{
Document::SP doc(createDocument(
- "some content", "userdoc:crawler:4000:http://www.ntnu.no/")
- .release());
- std::shared_ptr<api::PutCommand> cmd(
- new api::PutCommand(makeDocumentBucket(bid), doc, 100));
+ "some content", "userdoc:crawler:4000:http://www.ntnu.no/"));
+ auto cmd = std::make_shared<api::PutCommand>(makeDocumentBucket(bid), doc, 100);
cmd->setAddress(address);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::PutReply> reply(
- std::dynamic_pointer_cast<api::PutReply>(
- top.getReply(0)));
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::PutReply>(top.getReply(0));
top.reset();
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode::OK, reply->getResult().getResult());
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode::OK, reply->getResult().getResult());
}
// Putting it on same timestamp again
@@ -2285,48 +1833,36 @@ FileStorManagerTest::testEqualTimestamps()
// have to accept this)
{
Document::SP doc(createDocument(
- "some content", "userdoc:crawler:4000:http://www.ntnu.no/")
- .release());
- std::shared_ptr<api::PutCommand> cmd(
- new api::PutCommand(makeDocumentBucket(bid), doc, 100));
+ "some content", "userdoc:crawler:4000:http://www.ntnu.no/"));
+ auto cmd = std::make_shared<api::PutCommand>(makeDocumentBucket(bid), doc, 100);
cmd->setAddress(address);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::PutReply> reply(
- std::dynamic_pointer_cast<api::PutReply>(
- top.getReply(0)));
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::PutReply>(top.getReply(0));
top.reset();
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode::OK, reply->getResult().getResult());
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode::OK, reply->getResult().getResult());
}
// Putting the doc with other id. Now we should fail
{
Document::SP doc(createDocument(
- "some content", "userdoc:crawler:4000:http://www.ntnu.nu/")
- .release());
- std::shared_ptr<api::PutCommand> cmd(
- new api::PutCommand(makeDocumentBucket(bid), doc, 100));
+ "some content", "userdoc:crawler:4000:http://www.ntnu.nu/"));
+ auto cmd = std::make_shared<api::PutCommand>(makeDocumentBucket(bid), doc, 100);
cmd->setAddress(address);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::PutReply> reply(
- std::dynamic_pointer_cast<api::PutReply>(
- top.getReply(0)));
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::PutReply>(top.getReply(0));
top.reset();
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode::TIMESTAMP_EXIST,
- reply->getResult().getResult());
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode::TIMESTAMP_EXIST, reply->getResult().getResult());
}
}
-void
-FileStorManagerTest::testGetIter()
-{
- TestName testName("testGetIter");
- // Setting up manager
+TEST_F(FileStorManagerTest, get_iter) {
+ // Setting up manager
DummyStorageLink top;
FileStorManager *manager;
top.push_back(unique_ptr<StorageLink>(manager =
@@ -2339,85 +1875,70 @@ FileStorManagerTest::testGetIter()
createBucket(bid, 0);
std::vector<Document::SP > docs;
- // Creating some documents to test with
+ // Creating some documents to test with
for (uint32_t i=0; i<10; ++i) {
std::ostringstream id;
id << "userdoc:crawler:4000:http://www.ntnu.no/" << i;
- docs.push_back(
+ docs.emplace_back(
Document::SP(
_node->getTestDocMan().createRandomDocumentAtLocation(
4000, i, 400, 400)));
}
api::BucketInfo bucketInfo;
- // Putting all docs to have something to visit
+ // Putting all docs to have something to visit
for (uint32_t i=0; i<docs.size(); ++i) {
- std::shared_ptr<api::PutCommand> cmd(
- new api::PutCommand(makeDocumentBucket(bid), docs[i], 100 + i));
+ auto cmd = std::make_shared<api::PutCommand>(makeDocumentBucket(bid), docs[i], 100 + i);
cmd->setAddress(address);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::PutReply> reply(
- std::dynamic_pointer_cast<api::PutReply>(
- top.getReply(0)));
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::PutReply>(top.getReply(0));
top.reset();
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
bucketInfo = reply->getBucketInfo();
}
- // Sending a getiter request that will only visit some of the docs
+ // Sending a getiter request that will only visit some of the docs
spi::IteratorId iterId(createIterator(top, bid, ""));
{
- std::shared_ptr<GetIterCommand> cmd(
- new GetIterCommand(makeDocumentBucket(bid), iterId, 2048));
+ auto cmd = std::make_shared<GetIterCommand>(makeDocumentBucket(bid), iterId, 2048);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<GetIterReply> reply(
- std::dynamic_pointer_cast<GetIterReply>(
- top.getReply(0)));
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<GetIterReply>(top.getReply(0));
top.reset();
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
- CPPUNIT_ASSERT(reply->getEntries().size() > 0);
- CPPUNIT_ASSERT(reply->getEntries().size() < docs.size());
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
+ EXPECT_GT(reply->getEntries().size(), 0);
+ EXPECT_LT(reply->getEntries().size(), docs.size());
}
- // Normal case of get iter is testing through visitor tests.
- // Testing specific situation where file is deleted while visiting here
+ // Normal case of get iter is testing through visitor tests.
+ // Testing specific situation where file is deleted while visiting here
{
- std::shared_ptr<api::DeleteBucketCommand> cmd(
- new api::DeleteBucketCommand(makeDocumentBucket(bid)));
+ auto cmd = std::make_shared<api::DeleteBucketCommand>(makeDocumentBucket(bid));
cmd->setBucketInfo(bucketInfo);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::DeleteBucketReply> reply(
- std::dynamic_pointer_cast<api::DeleteBucketReply>(
- top.getReply(0)));
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::DeleteBucketReply>(top.getReply(0));
top.reset();
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
}
{
auto cmd = std::make_shared<GetIterCommand>(makeDocumentBucket(bid), iterId, 2048);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL(size_t(1), top.getNumReplies());
- std::shared_ptr<GetIterReply> reply(
- std::dynamic_pointer_cast<GetIterReply>(
- top.getReply(0)));
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<GetIterReply>(top.getReply(0));
top.reset();
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode::BUCKET_NOT_FOUND,
- reply->getResult().getResult());
- CPPUNIT_ASSERT(reply->getEntries().empty());
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode::BUCKET_NOT_FOUND, reply->getResult().getResult());
+ EXPECT_TRUE(reply->getEntries().empty());
}
}
-void
-FileStorManagerTest::testSetBucketActiveState()
-{
- TestName testName("testSetBucketActiveState");
+TEST_F(FileStorManagerTest, set_bucket_active_state) {
DummyStorageLink top;
FileStorManager* manager(
new FileStorManager(config->getConfigId(),
@@ -2433,83 +1954,70 @@ FileStorManagerTest::testSetBucketActiveState()
const uint16_t disk = 0;
createBucket(bid, disk);
- spi::dummy::DummyPersistence& provider(
- dynamic_cast<spi::dummy::DummyPersistence&>(_node->getPersistenceProvider()));
- CPPUNIT_ASSERT(!provider.isActive(makeSpiBucket(bid, spi::PartitionId(disk))));
+ auto& provider = dynamic_cast<spi::dummy::DummyPersistence&>(_node->getPersistenceProvider());
+ EXPECT_FALSE(provider.isActive(makeSpiBucket(bid, spi::PartitionId(disk))));
{
- std::shared_ptr<api::SetBucketStateCommand> cmd(
- new api::SetBucketStateCommand(
- makeDocumentBucket(bid), api::SetBucketStateCommand::ACTIVE));
+ auto cmd = std::make_shared<api::SetBucketStateCommand>(
+ makeDocumentBucket(bid), api::SetBucketStateCommand::ACTIVE);
cmd->setAddress(address);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::SetBucketStateReply> reply(
- std::dynamic_pointer_cast<api::SetBucketStateReply>(
- top.getReply(0)));
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::SetBucketStateReply>(top.getReply(0));
top.reset();
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
}
- CPPUNIT_ASSERT(provider.isActive(makeSpiBucket(bid, spi::PartitionId(disk))));
+ EXPECT_TRUE(provider.isActive(makeSpiBucket(bid, spi::PartitionId(disk))));
{
StorBucketDatabase::WrappedEntry entry(
_node->getStorageBucketDatabase().get(
bid, "foo"));
- CPPUNIT_ASSERT(entry->info.isActive());
+ EXPECT_TRUE(entry->info.isActive());
}
// Trigger bucket info to be read back into the database
{
- std::shared_ptr<ReadBucketInfo> cmd(
- new ReadBucketInfo(makeDocumentBucket(bid)));
+ auto cmd = std::make_shared<ReadBucketInfo>(makeDocumentBucket(bid));
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<ReadBucketInfoReply> reply(
- std::dynamic_pointer_cast<ReadBucketInfoReply>(
- top.getReply(0)));
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<ReadBucketInfoReply>(top.getReply(0));
top.reset();
- CPPUNIT_ASSERT(reply.get());
+ ASSERT_TRUE(reply.get());
}
// Should not have lost active flag
{
StorBucketDatabase::WrappedEntry entry(
_node->getStorageBucketDatabase().get(
bid, "foo"));
- CPPUNIT_ASSERT(entry->info.isActive());
+ EXPECT_TRUE(entry->info.isActive());
}
{
- std::shared_ptr<api::SetBucketStateCommand> cmd(
- new api::SetBucketStateCommand(
- makeDocumentBucket(bid), api::SetBucketStateCommand::INACTIVE));
+ auto cmd = std::make_shared<api::SetBucketStateCommand>(
+ makeDocumentBucket(bid), api::SetBucketStateCommand::INACTIVE);
cmd->setAddress(address);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::SetBucketStateReply> reply(
- std::dynamic_pointer_cast<api::SetBucketStateReply>(
- top.getReply(0)));
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::SetBucketStateReply>(top.getReply(0));
top.reset();
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
}
- CPPUNIT_ASSERT(!provider.isActive(makeSpiBucket(bid, spi::PartitionId(disk))));
+ EXPECT_FALSE(provider.isActive(makeSpiBucket(bid, spi::PartitionId(disk))));
{
StorBucketDatabase::WrappedEntry entry(
_node->getStorageBucketDatabase().get(
bid, "foo"));
- CPPUNIT_ASSERT(!entry->info.isActive());
+ EXPECT_FALSE(entry->info.isActive());
}
}
-void
-FileStorManagerTest::testNotifyOwnerDistributorOnOutdatedSetBucketState()
-{
- TestName testName("testNotifyOwnerDistributorOnOutdatedSetBucketState");
+TEST_F(FileStorManagerTest, notify_owner_distributor_on_outdated_set_bucket_state) {
DummyStorageLink top;
FileStorManager* manager(
new FileStorManager(config->getConfigId(),
@@ -2522,47 +2030,37 @@ FileStorManagerTest::testNotifyOwnerDistributorOnOutdatedSetBucketState()
top.open();
document::BucketId bid(getFirstBucketNotOwnedByDistributor(0));
- CPPUNIT_ASSERT(bid.getRawId() != 0);
+ ASSERT_NE(bid.getRawId(), 0);
createBucket(bid, 0);
- std::shared_ptr<api::SetBucketStateCommand> cmd(
- new api::SetBucketStateCommand(
- makeDocumentBucket(bid), api::SetBucketStateCommand::ACTIVE));
- cmd->setAddress(api::StorageMessageAddress(
- "cluster", lib::NodeType::STORAGE, 1));
+ auto cmd = std::make_shared<api::SetBucketStateCommand>(
+ makeDocumentBucket(bid), api::SetBucketStateCommand::ACTIVE);
+ cmd->setAddress(api::StorageMessageAddress("cluster", lib::NodeType::STORAGE, 1));
cmd->setSourceIndex(0);
top.sendDown(cmd);
top.waitForMessages(2, _waitTime);
- CPPUNIT_ASSERT_EQUAL(size_t(2), top.getNumReplies());
+ ASSERT_EQ(2, top.getNumReplies());
// Not necessarily deterministic order.
int idxOffset = 0;
if (top.getReply(0)->getType() != api::MessageType::NOTIFYBUCKETCHANGE) {
++idxOffset;
}
- std::shared_ptr<api::NotifyBucketChangeCommand> notifyCmd(
- std::dynamic_pointer_cast<api::NotifyBucketChangeCommand>(
- top.getReply(idxOffset)));
- std::shared_ptr<api::SetBucketStateReply> stateReply(
- std::dynamic_pointer_cast<api::SetBucketStateReply>(
- top.getReply(1 - idxOffset)));
+ auto notifyCmd = std::dynamic_pointer_cast<api::NotifyBucketChangeCommand>(top.getReply(idxOffset));
+ auto stateReply = std::dynamic_pointer_cast<api::SetBucketStateReply>(top.getReply(1 - idxOffset));
- CPPUNIT_ASSERT(stateReply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
- stateReply->getResult());
+ ASSERT_TRUE(stateReply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), stateReply->getResult());
- CPPUNIT_ASSERT(notifyCmd.get());
- CPPUNIT_ASSERT_EQUAL(uint16_t(1), notifyCmd->getAddress()->getIndex());
+ ASSERT_TRUE(notifyCmd.get());
+ EXPECT_EQ(1, notifyCmd->getAddress()->getIndex());
// Not necessary for this to be set since distributor does not insert this
// info into its db, but useful for debugging purposes.
- CPPUNIT_ASSERT(notifyCmd->getBucketInfo().isActive());
+ EXPECT_TRUE(notifyCmd->getBucketInfo().isActive());
}
-void
-FileStorManagerTest::testGetBucketDiffImplicitCreateBucket()
-{
- TestName testName("testGetBucketDiffImplicitCreateBucket");
+TEST_F(FileStorManagerTest, GetBucketDiff_implicitly_creates_bucket) {
DummyStorageLink top;
FileStorManager* manager(
new FileStorManager(config->getConfigId(),
@@ -2575,34 +2073,26 @@ FileStorManagerTest::testGetBucketDiffImplicitCreateBucket()
document::BucketId bid(16, 4000);
- std::vector<api::MergeBucketCommand::Node> nodes;
- nodes.push_back(1);
- nodes.push_back(0);
+ std::vector<api::MergeBucketCommand::Node> nodes = {1, 0};
- std::shared_ptr<api::GetBucketDiffCommand> cmd(
- new api::GetBucketDiffCommand(makeDocumentBucket(bid), nodes, Timestamp(1000)));
- cmd->setAddress(api::StorageMessageAddress(
- "cluster", lib::NodeType::STORAGE, 1));
+ auto cmd = std::make_shared<api::GetBucketDiffCommand>(makeDocumentBucket(bid), nodes, Timestamp(1000));
+ cmd->setAddress(api::StorageMessageAddress("cluster", lib::NodeType::STORAGE, 1));
cmd->setSourceIndex(0);
top.sendDown(cmd);
api::GetBucketDiffReply* reply;
ASSERT_SINGLE_REPLY(api::GetBucketDiffReply, reply, top, _waitTime);
- CPPUNIT_ASSERT_EQUAL(api::ReturnCode(api::ReturnCode::OK),
- reply->getResult());
+ EXPECT_EQ(api::ReturnCode(api::ReturnCode::OK), reply->getResult());
{
StorBucketDatabase::WrappedEntry entry(
_node->getStorageBucketDatabase().get(
bid, "foo"));
- CPPUNIT_ASSERT(entry.exist());
- CPPUNIT_ASSERT(entry->info.isReady());
+ ASSERT_TRUE(entry.exist());
+ EXPECT_TRUE(entry->info.isReady());
}
}
-void
-FileStorManagerTest::testMergeBucketImplicitCreateBucket()
-{
- TestName testName("testMergeBucketImplicitCreateBucket");
+TEST_F(FileStorManagerTest, merge_bucket_implicitly_creates_bucket) {
DummyStorageLink top;
FileStorManager* manager(
new FileStorManager(config->getConfigId(),
@@ -2615,14 +2105,10 @@ FileStorManagerTest::testMergeBucketImplicitCreateBucket()
document::BucketId bid(16, 4000);
- std::vector<api::MergeBucketCommand::Node> nodes;
- nodes.push_back(1);
- nodes.push_back(2);
+ std::vector<api::MergeBucketCommand::Node> nodes = {1, 2};
- std::shared_ptr<api::MergeBucketCommand> cmd(
- new api::MergeBucketCommand(makeDocumentBucket(bid), nodes, Timestamp(1000)));
- cmd->setAddress(api::StorageMessageAddress(
- "cluster", lib::NodeType::STORAGE, 1));
+ auto cmd = std::make_shared<api::MergeBucketCommand>(makeDocumentBucket(bid), nodes, Timestamp(1000));
+ cmd->setAddress(api::StorageMessageAddress("cluster", lib::NodeType::STORAGE, 1));
cmd->setSourceIndex(0);
top.sendDown(cmd);
@@ -2632,15 +2118,12 @@ FileStorManagerTest::testMergeBucketImplicitCreateBucket()
StorBucketDatabase::WrappedEntry entry(
_node->getStorageBucketDatabase().get(
bid, "foo"));
- CPPUNIT_ASSERT(entry.exist());
- CPPUNIT_ASSERT(entry->info.isReady());
+ ASSERT_TRUE(entry.exist());
+ EXPECT_TRUE(entry->info.isReady());
}
}
-void
-FileStorManagerTest::testNewlyCreatedBucketIsReady()
-{
- TestName testName("testNewlyCreatedBucketIsReady");
+TEST_F(FileStorManagerTest, newly_created_bucket_is_ready) {
DummyStorageLink top;
FileStorManager* manager(
new FileStorManager(config->getConfigId(),
@@ -2653,31 +2136,26 @@ FileStorManagerTest::testNewlyCreatedBucketIsReady()
document::BucketId bid(16, 4000);
- std::shared_ptr<api::CreateBucketCommand> cmd(
- new api::CreateBucketCommand(makeDocumentBucket(bid)));
- cmd->setAddress(api::StorageMessageAddress(
- "cluster", lib::NodeType::STORAGE, 1));
+ auto cmd = std::make_shared<api::CreateBucketCommand>(makeDocumentBucket(bid));
+ cmd->setAddress(api::StorageMessageAddress("cluster", lib::NodeType::STORAGE, 1));
cmd->setSourceIndex(0);
top.sendDown(cmd);
api::CreateBucketReply* reply;
ASSERT_SINGLE_REPLY(api::CreateBucketReply, reply, top, _waitTime);
- CPPUNIT_ASSERT_EQUAL(api::ReturnCode(api::ReturnCode::OK),
- reply->getResult());
+ EXPECT_EQ(api::ReturnCode(api::ReturnCode::OK), reply->getResult());
{
StorBucketDatabase::WrappedEntry entry(
_node->getStorageBucketDatabase().get(
bid, "foo"));
- CPPUNIT_ASSERT(entry.exist());
- CPPUNIT_ASSERT(entry->info.isReady());
- CPPUNIT_ASSERT(!entry->info.isActive());
+ ASSERT_TRUE(entry.exist());
+ EXPECT_TRUE(entry->info.isReady());
+ EXPECT_FALSE(entry->info.isActive());
}
}
-void
-FileStorManagerTest::testCreateBucketSetsActiveFlagInDatabaseAndReply()
-{
- TestFileStorComponents c(*this, "testNotifyOnSplitSourceOwnershipChanged");
+TEST_F(FileStorManagerTest, create_bucket_sets_active_flag_in_database_and_reply) {
+ TestFileStorComponents c(*this);
setClusterState("storage:2 distributor:1");
document::BucketId bid(16, 4000);
@@ -2691,15 +2169,14 @@ FileStorManagerTest::testCreateBucketSetsActiveFlagInDatabaseAndReply()
api::CreateBucketReply* reply;
ASSERT_SINGLE_REPLY(api::CreateBucketReply, reply, c.top, _waitTime);
- CPPUNIT_ASSERT_EQUAL(api::ReturnCode(api::ReturnCode::OK),
- reply->getResult());
+ EXPECT_EQ(api::ReturnCode(api::ReturnCode::OK), reply->getResult());
{
StorBucketDatabase::WrappedEntry entry(
_node->getStorageBucketDatabase().get(
bid, "foo"));
- CPPUNIT_ASSERT(entry.exist());
- CPPUNIT_ASSERT(entry->info.isReady());
- CPPUNIT_ASSERT(entry->info.isActive());
+ ASSERT_TRUE(entry.exist());
+ EXPECT_TRUE(entry->info.isReady());
+ EXPECT_TRUE(entry->info.isActive());
}
}
@@ -2710,11 +2187,11 @@ void FileStorManagerTest::assert_request_size_set(TestFileStorComponents& c, std
cmd->setAddress(address);
c.top.sendDown(cmd);
c.top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL(static_cast<int64_t>(cmd->getApproxByteSize()), metric.request_size.getLast());
+ EXPECT_EQ(static_cast<int64_t>(cmd->getApproxByteSize()), metric.request_size.getLast());
}
-void FileStorManagerTest::put_command_size_is_added_to_metric() {
- TestFileStorComponents c(*this, "put_command_size_is_added_to_metric");
+TEST_F(FileStorManagerTest, put_command_size_is_added_to_metric) {
+ TestFileStorComponents c(*this);
document::BucketId bucket(16, 4000);
createBucket(bucket, 0);
auto cmd = std::make_shared<api::PutCommand>(
@@ -2723,8 +2200,8 @@ void FileStorManagerTest::put_command_size_is_added_to_metric() {
assert_request_size_set(c, std::move(cmd), thread_metrics_of(*c.manager)->put[defaultLoadType]);
}
-void FileStorManagerTest::update_command_size_is_added_to_metric() {
- TestFileStorComponents c(*this, "update_command_size_is_added_to_metric");
+TEST_F(FileStorManagerTest, update_command_size_is_added_to_metric) {
+ TestFileStorComponents c(*this);
document::BucketId bucket(16, 4000);
createBucket(bucket, 0);
auto update = std::make_shared<document::DocumentUpdate>(
@@ -2737,8 +2214,8 @@ void FileStorManagerTest::update_command_size_is_added_to_metric() {
assert_request_size_set(c, std::move(cmd), thread_metrics_of(*c.manager)->update[defaultLoadType]);
}
-void FileStorManagerTest::remove_command_size_is_added_to_metric() {
- TestFileStorComponents c(*this, "remove_command_size_is_added_to_metric");
+TEST_F(FileStorManagerTest, remove_command_size_is_added_to_metric) {
+ TestFileStorComponents c(*this);
document::BucketId bucket(16, 4000);
createBucket(bucket, 0);
auto cmd = std::make_shared<api::RemoveCommand>(
@@ -2747,8 +2224,8 @@ void FileStorManagerTest::remove_command_size_is_added_to_metric() {
assert_request_size_set(c, std::move(cmd), thread_metrics_of(*c.manager)->remove[defaultLoadType]);
}
-void FileStorManagerTest::get_command_size_is_added_to_metric() {
- TestFileStorComponents c(*this, "get_command_size_is_added_to_metric");
+TEST_F(FileStorManagerTest, get_command_size_is_added_to_metric) {
+ TestFileStorComponents c(*this);
document::BucketId bucket(16, 4000);
createBucket(bucket, 0);
auto cmd = std::make_shared<api::GetCommand>(
diff --git a/storage/src/tests/persistence/filestorage/filestormodifiedbucketstest.cpp b/storage/src/tests/persistence/filestorage/filestormodifiedbucketstest.cpp
index 1fab3a8bcc1..e21dde006dc 100644
--- a/storage/src/tests/persistence/filestorage/filestormodifiedbucketstest.cpp
+++ b/storage/src/tests/persistence/filestorage/filestormodifiedbucketstest.cpp
@@ -1,6 +1,5 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/vdstestlib/cppunit/macros.h>
#include <vespa/storageapi/message/bucket.h>
#include <vespa/storage/persistence/filestorage/modifiedbucketchecker.h>
#include <vespa/persistence/spi/test.h>
@@ -9,6 +8,7 @@
#include <tests/persistence/common/filestortestfixture.h>
using storage::spi::test::makeSpiBucket;
+using namespace ::testing;
namespace storage {
@@ -16,26 +16,14 @@ namespace storage {
* Effectively an integration test between the ModifiedBucketChecker storage
* link and the behavior of the filestor component.
*/
-class FileStorModifiedBucketsTest : public FileStorTestFixture
-{
-public:
- void modifiedBucketsSendNotifyBucketChange();
- void fileStorRepliesToRecheckBucketCommands();
-
+struct FileStorModifiedBucketsTest : FileStorTestFixture {
void modifyBuckets(uint32_t first, uint32_t count);
spi::dummy::DummyPersistence& getDummyPersistence() {
return dynamic_cast<spi::dummy::DummyPersistence&>(_node->getPersistenceProvider());
}
-
- CPPUNIT_TEST_SUITE(FileStorModifiedBucketsTest);
- CPPUNIT_TEST(modifiedBucketsSendNotifyBucketChange);
- CPPUNIT_TEST(fileStorRepliesToRecheckBucketCommands);
- CPPUNIT_TEST_SUITE_END();
};
-CPPUNIT_TEST_SUITE_REGISTRATION(FileStorModifiedBucketsTest);
-
namespace {
struct BucketCheckerInjector : FileStorTestFixture::StorageLinkInjector
@@ -48,20 +36,19 @@ struct BucketCheckerInjector : FileStorTestFixture::StorageLinkInjector
_fixture(fixture)
{}
void inject(DummyStorageLink& link) const override {
- link.push_back(std::unique_ptr<ModifiedBucketChecker>(
- new ModifiedBucketChecker(_node.getComponentRegister(),
- _node.getPersistenceProvider(),
- _fixture._config->getConfigId())));
+ link.push_back(std::make_unique<ModifiedBucketChecker>(
+ _node.getComponentRegister(),
+ _node.getPersistenceProvider(),
+ _fixture._config->getConfigId()));
}
};
void
assertIsNotifyCommandWithActiveBucket(api::StorageMessage& msg)
{
- api::NotifyBucketChangeCommand& cmd(
- dynamic_cast<api::NotifyBucketChangeCommand&>(msg));
- CPPUNIT_ASSERT(cmd.getBucketInfo().isActive());
- CPPUNIT_ASSERT_EQUAL(
+ auto& cmd = dynamic_cast<api::NotifyBucketChangeCommand&>(msg);
+ ASSERT_TRUE(cmd.getBucketInfo().isActive());
+ ASSERT_EQ(
vespalib::string("StorageMessageAddress(Storage protocol, "
"cluster storage, nodetype distributor, index 0)"),
cmd.getAddress()->toString());
@@ -83,11 +70,9 @@ FileStorModifiedBucketsTest::modifyBuckets(uint32_t first, uint32_t count)
getDummyPersistence().setModifiedBuckets(buckets);
}
-void
-FileStorModifiedBucketsTest::modifiedBucketsSendNotifyBucketChange()
-{
+TEST_F(FileStorModifiedBucketsTest, modified_buckets_send_notify_bucket_change) {
BucketCheckerInjector bcj(*_node, *this);
- TestFileStorComponents c(*this, "modifiedBucketsSendNotifyBucketChange", bcj);
+ TestFileStorComponents c(*this, bcj);
setClusterState("storage:1 distributor:1");
uint32_t numBuckets = 10;
@@ -104,21 +89,19 @@ FileStorModifiedBucketsTest::modifiedBucketsSendNotifyBucketChange()
c.top.waitForMessages(numBuckets, MSG_WAIT_TIME);
for (uint32_t i = 0; i < 10; ++i) {
- assertIsNotifyCommandWithActiveBucket(*c.top.getReply(i));
+ ASSERT_NO_FATAL_FAILURE(assertIsNotifyCommandWithActiveBucket(*c.top.getReply(i)));
StorBucketDatabase::WrappedEntry entry(
_node->getStorageBucketDatabase().get(
document::BucketId(16, i), "foo"));
- CPPUNIT_ASSERT(entry->info.isActive());
+ EXPECT_TRUE(entry->info.isActive());
}
}
-void
-FileStorModifiedBucketsTest::fileStorRepliesToRecheckBucketCommands()
-{
+TEST_F(FileStorModifiedBucketsTest, file_stor_replies_to_recheck_bucket_commands) {
BucketCheckerInjector bcj(*_node, *this);
- TestFileStorComponents c(*this, "fileStorRepliesToRecheckBucketCommands", bcj);
+ TestFileStorComponents c(*this, bcj);
setClusterState("storage:1 distributor:1");
document::BucketId bucket(16, 0);
@@ -129,7 +112,7 @@ FileStorModifiedBucketsTest::fileStorRepliesToRecheckBucketCommands()
modifyBuckets(0, 1);
c.top.waitForMessages(1, MSG_WAIT_TIME);
- assertIsNotifyCommandWithActiveBucket(*c.top.getReply(0));
+ ASSERT_NO_FATAL_FAILURE(assertIsNotifyCommandWithActiveBucket(*c.top.getReply(0)));
// If we don't reply to the recheck bucket commands, we won't trigger
// a new round of getModifiedBuckets and recheck commands.
@@ -137,7 +120,7 @@ FileStorModifiedBucketsTest::fileStorRepliesToRecheckBucketCommands()
createBucket(makeSpiBucket(document::BucketId(16, 1)));
modifyBuckets(1, 1);
c.top.waitForMessages(1, MSG_WAIT_TIME);
- assertIsNotifyCommandWithActiveBucket(*c.top.getReply(0));
+ ASSERT_NO_FATAL_FAILURE(assertIsNotifyCommandWithActiveBucket(*c.top.getReply(0)));
}
} // storage
diff --git a/storage/src/tests/persistence/filestorage/mergeblockingtest.cpp b/storage/src/tests/persistence/filestorage/mergeblockingtest.cpp
index d4cec415937..d9582cec585 100644
--- a/storage/src/tests/persistence/filestorage/mergeblockingtest.cpp
+++ b/storage/src/tests/persistence/filestorage/mergeblockingtest.cpp
@@ -1,20 +1,18 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vector>
-#include <vespa/vdstestlib/cppunit/macros.h>
+#include <vespa/document/test/make_document_bucket.h>
#include <vespa/storage/persistence/messages.h>
#include <tests/persistence/common/persistenceproviderwrapper.h>
#include <vespa/persistence/dummyimpl/dummypersistence.h>
#include <tests/persistence/common/filestortestfixture.h>
-#include <vespa/document/test/make_document_bucket.h>
+#include <vector>
using document::test::makeDocumentBucket;
+using namespace ::testing;
namespace storage {
-class MergeBlockingTest : public FileStorTestFixture
-{
-public:
+struct MergeBlockingTest : public FileStorTestFixture {
void setupDisks() {
FileStorTestFixture::setupPersistenceThreads(1);
_node->setPersistenceProvider(
@@ -22,32 +20,11 @@ public:
new spi::dummy::DummyPersistence(_node->getTypeRepo(), 1)));
}
-public:
- void testRejectMergeForInconsistentInnerBucket();
- void testRejectMergeForInconsistentLeafBucket();
- void testRejectGetBucketDiffWithInconsistentBucket();
- void testRejectApplyDiffWhenBucketHasBecomeInconsistent();
- void testRejectApplyReplyWhenBucketHasBecomeInconsistent();
- void testRejectGetDiffReplyWhenBucketHasBecomeInconsistent();
- void testRejectMergeWhenLowUsedBitCount();
-
- void setUp() override;
-
- CPPUNIT_TEST_SUITE(MergeBlockingTest);
- CPPUNIT_TEST(testRejectMergeForInconsistentInnerBucket);
- CPPUNIT_TEST(testRejectMergeForInconsistentLeafBucket);
- CPPUNIT_TEST(testRejectGetBucketDiffWithInconsistentBucket);
- CPPUNIT_TEST(testRejectApplyDiffWhenBucketHasBecomeInconsistent);
- CPPUNIT_TEST(testRejectApplyReplyWhenBucketHasBecomeInconsistent);
- CPPUNIT_TEST(testRejectGetDiffReplyWhenBucketHasBecomeInconsistent);
- CPPUNIT_TEST(testRejectMergeWhenLowUsedBitCount);
- CPPUNIT_TEST_SUITE_END();
+ void SetUp() override;
};
-CPPUNIT_TEST_SUITE_REGISTRATION(MergeBlockingTest);
-
void
-MergeBlockingTest::setUp()
+MergeBlockingTest::SetUp()
{
setupDisks();
}
@@ -67,25 +44,18 @@ assignCommandMeta(api::StorageCommand& msg) {
std::vector<api::MergeBucketCommand::Node>
getNodes() {
- std::vector<api::MergeBucketCommand::Node> nodes;
- nodes.push_back(0);
- nodes.push_back(1);
- return nodes;
+ return std::vector<api::MergeBucketCommand::Node>({0, 1});
}
std::vector<api::MergeBucketCommand::Node>
getNodesWithForwarding() {
- std::vector<api::MergeBucketCommand::Node> nodes;
- nodes.push_back(0);
- nodes.push_back(1);
- nodes.push_back(2);
- return nodes;
+ return std::vector<api::MergeBucketCommand::Node>({0, 1, 2});
}
std::shared_ptr<api::MergeBucketCommand>
createMerge(const document::BucketId& bucket) {
- std::shared_ptr<api::MergeBucketCommand> cmd(
- new api::MergeBucketCommand(makeDocumentBucket(bucket), getNodes(), api::Timestamp(1000)));
+ auto cmd = std::make_shared<api::MergeBucketCommand>(
+ makeDocumentBucket(bucket), getNodes(), api::Timestamp(1000));
assignCommandMeta(*cmd);
return cmd;
}
@@ -94,8 +64,8 @@ std::shared_ptr<api::GetBucketDiffCommand>
createGetDiff(const document::BucketId& bucket,
const std::vector<api::MergeBucketCommand::Node>& nodes)
{
- std::shared_ptr<api::GetBucketDiffCommand> cmd(
- new api::GetBucketDiffCommand(makeDocumentBucket(bucket), nodes, api::Timestamp(1000)));
+ auto cmd = std::make_shared<api::GetBucketDiffCommand>(
+ makeDocumentBucket(bucket), nodes, api::Timestamp(1000));
assignCommandMeta(*cmd);
return cmd;
}
@@ -103,8 +73,7 @@ createGetDiff(const document::BucketId& bucket,
std::shared_ptr<api::ApplyBucketDiffCommand>
createApplyDiff(const document::BucketId& bucket,
const std::vector<api::MergeBucketCommand::Node>& nodes) {
- std::shared_ptr<api::ApplyBucketDiffCommand> cmd(
- new api::ApplyBucketDiffCommand(makeDocumentBucket(bucket), nodes, 1024*1024));
+ auto cmd = std::make_shared<api::ApplyBucketDiffCommand>(makeDocumentBucket(bucket), nodes, 1024*1024);
assignCommandMeta(*cmd);
return cmd;
}
@@ -115,127 +84,104 @@ const document::BucketId innerBucket2(15, 1);
}
-void
-MergeBlockingTest::testRejectMergeForInconsistentInnerBucket()
-{
- TestFileStorComponents c(*this, "testRejectMergeForInconsistentInnerBucket");
+TEST_F(MergeBlockingTest, reject_merge_for_inconsistent_inner_bucket) {
+ TestFileStorComponents c(*this);
createBucket(leafBucket);
- std::shared_ptr<api::MergeBucketCommand> cmd(createMerge(innerBucket));
+ auto cmd = createMerge(innerBucket);
c.top.sendDown(cmd);
- expectAbortedReply<api::MergeBucketReply>(c.top);
- CPPUNIT_ASSERT(!bucketExistsInDb(innerBucket));
+ ASSERT_NO_FATAL_FAILURE(expectAbortedReply<api::MergeBucketReply>(c.top));
+ EXPECT_FALSE(bucketExistsInDb(innerBucket));
}
-void
-MergeBlockingTest::testRejectMergeForInconsistentLeafBucket()
-{
- TestFileStorComponents c(*this, "testRejectMergeForInconsistentInnerBucket");
+TEST_F(MergeBlockingTest, reject_merge_for_inconsistent_leaf_bucket) {
+ TestFileStorComponents c(*this);
createBucket(innerBucket);
- std::shared_ptr<api::MergeBucketCommand> cmd(createMerge(leafBucket));
+ auto cmd = createMerge(leafBucket);
c.top.sendDown(cmd);
- expectAbortedReply<api::MergeBucketReply>(c.top);
- CPPUNIT_ASSERT(!bucketExistsInDb(leafBucket));
+ ASSERT_NO_FATAL_FAILURE(expectAbortedReply<api::MergeBucketReply>(c.top));
+ EXPECT_FALSE(bucketExistsInDb(leafBucket));
}
-void
-MergeBlockingTest::testRejectGetBucketDiffWithInconsistentBucket()
-{
- TestFileStorComponents c(*this, "testRejectGetBucketDiffWithInconsistentBucket");
- CPPUNIT_ASSERT(innerBucket.contains(leafBucket));
+TEST_F(MergeBlockingTest, reject_get_diff_with_inconsistent_bucket) {
+ TestFileStorComponents c(*this);
+ ASSERT_TRUE(innerBucket.contains(leafBucket));
createBucket(innerBucket);
- std::shared_ptr<api::GetBucketDiffCommand> cmd(createGetDiff(leafBucket, getNodes()));
+ auto cmd = createGetDiff(leafBucket, getNodes());
c.top.sendDown(cmd);
- expectAbortedReply<api::GetBucketDiffReply>(c.top);
- CPPUNIT_ASSERT(!bucketExistsInDb(leafBucket));
+ ASSERT_NO_FATAL_FAILURE(expectAbortedReply<api::GetBucketDiffReply>(c.top));
+ EXPECT_FALSE(bucketExistsInDb(leafBucket));
}
-void
-MergeBlockingTest::testRejectApplyDiffWhenBucketHasBecomeInconsistent()
-{
- TestFileStorComponents c(*this, "testRejectApplyDiffWhenBucketHasBecomeInconsistent");
+TEST_F(MergeBlockingTest, reject_apply_diff_when_bucket_has_become_inconsistent) {
+ TestFileStorComponents c(*this);
createBucket(leafBucket);
createBucket(innerBucket);
- std::shared_ptr<api::ApplyBucketDiffCommand> applyDiff(
- createApplyDiff(innerBucket, getNodes()));
+ auto applyDiff = createApplyDiff(innerBucket, getNodes());
c.top.sendDown(applyDiff);
- expectAbortedReply<api::ApplyBucketDiffReply>(c.top);
+ ASSERT_NO_FATAL_FAILURE(expectAbortedReply<api::ApplyBucketDiffReply>(c.top));
}
-void
-MergeBlockingTest::testRejectApplyReplyWhenBucketHasBecomeInconsistent()
-{
- TestFileStorComponents c(*this, "testRejectApplyReplyWhenBucketHasBecomeInconsistent");
+TEST_F(MergeBlockingTest, reject_apply_diff_reply_when_bucket_has_become_inconsistent) {
+ TestFileStorComponents c(*this);
createBucket(innerBucket);
- std::shared_ptr<api::ApplyBucketDiffCommand> applyDiff(
- createApplyDiff(innerBucket, getNodesWithForwarding()));
+ auto applyDiff = createApplyDiff(innerBucket, getNodesWithForwarding());
c.top.sendDown(applyDiff);
c.top.waitForMessages(1, MSG_WAIT_TIME);
- api::StorageMessage::SP fwdDiff(
- c.top.getAndRemoveMessage(api::MessageType::APPLYBUCKETDIFF));
- api::ApplyBucketDiffCommand& diffCmd(
- dynamic_cast<api::ApplyBucketDiffCommand&>(*fwdDiff));
+ auto fwdDiff = c.top.getAndRemoveMessage(api::MessageType::APPLYBUCKETDIFF);
+ auto& diffCmd = dynamic_cast<api::ApplyBucketDiffCommand&>(*fwdDiff);
- api::ApplyBucketDiffReply::SP diffReply(
- new api::ApplyBucketDiffReply(diffCmd));
+ auto diffReply = std::make_shared<api::ApplyBucketDiffReply>(diffCmd);
createBucket(leafBucket);
c.top.sendDown(diffReply);
- expectAbortedReply<api::ApplyBucketDiffReply>(c.top);
+ ASSERT_NO_FATAL_FAILURE(expectAbortedReply<api::ApplyBucketDiffReply>(c.top));
}
-void
-MergeBlockingTest::testRejectGetDiffReplyWhenBucketHasBecomeInconsistent()
-{
- TestFileStorComponents c(*this, "testRejectGetDiffReplyWhenBucketHasBecomeInconsistent");
+TEST_F(MergeBlockingTest, reject_get_diff_reply_when_bucket_has_become_inconsistent) {
+ TestFileStorComponents c(*this);
createBucket(innerBucket);
- std::shared_ptr<api::GetBucketDiffCommand> getDiff(
- createGetDiff(innerBucket, getNodesWithForwarding()));
+ auto getDiff = createGetDiff(innerBucket, getNodesWithForwarding());
c.top.sendDown(getDiff);
c.top.waitForMessages(1, MSG_WAIT_TIME);
- api::StorageMessage::SP fwdDiff(
- c.top.getAndRemoveMessage(api::MessageType::GETBUCKETDIFF));
- api::GetBucketDiffCommand& diffCmd(
- dynamic_cast<api::GetBucketDiffCommand&>(*fwdDiff));
+ auto fwdDiff = c.top.getAndRemoveMessage(api::MessageType::GETBUCKETDIFF);
+ auto& diffCmd = dynamic_cast<api::GetBucketDiffCommand&>(*fwdDiff);
- api::GetBucketDiffReply::SP diffReply(
- new api::GetBucketDiffReply(diffCmd));
+ auto diffReply = std::make_shared<api::GetBucketDiffReply>(diffCmd);
createBucket(innerBucket2);
c.top.sendDown(diffReply);
- expectAbortedReply<api::GetBucketDiffReply>(c.top);
+ ASSERT_NO_FATAL_FAILURE(expectAbortedReply<api::GetBucketDiffReply>(c.top));
}
/**
* Test case for buckets in ticket 6389558, comment #4.
*/
-void
-MergeBlockingTest::testRejectMergeWhenLowUsedBitCount()
-{
+TEST_F(MergeBlockingTest, reject_merge_when_low_used_bit_count) {
document::BucketId superBucket(1, 0x1);
document::BucketId subBucket(2, 0x1);
- CPPUNIT_ASSERT(superBucket.contains(subBucket));
+ ASSERT_TRUE(superBucket.contains(subBucket));
- TestFileStorComponents c(*this, "testRejectMergeWithInconsistentBucket");
+ TestFileStorComponents c(*this);
createBucket(superBucket);
- std::shared_ptr<api::MergeBucketCommand> cmd(createMerge(subBucket));
+ auto cmd = createMerge(subBucket);
c.top.sendDown(cmd);
- expectAbortedReply<api::MergeBucketReply>(c.top);
- CPPUNIT_ASSERT(!bucketExistsInDb(subBucket));
+ ASSERT_NO_FATAL_FAILURE(expectAbortedReply<api::MergeBucketReply>(c.top));
+ EXPECT_FALSE(bucketExistsInDb(subBucket));
}
} // ns storage
diff --git a/storage/src/tests/persistence/filestorage/modifiedbucketcheckertest.cpp b/storage/src/tests/persistence/filestorage/modifiedbucketcheckertest.cpp
index e6412ba9fd1..1660fed9e38 100644
--- a/storage/src/tests/persistence/filestorage/modifiedbucketcheckertest.cpp
+++ b/storage/src/tests/persistence/filestorage/modifiedbucketcheckertest.cpp
@@ -1,39 +1,25 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/vdstestlib/cppunit/macros.h>
#include <tests/common/testhelper.h>
#include <tests/common/dummystoragelink.h>
#include <tests/common/teststorageapp.h>
#include <vespa/persistence/dummyimpl/dummypersistence.h>
#include <vespa/storage/persistence/filestorage/modifiedbucketchecker.h>
#include <vespa/config/common/exceptions.h>
+#include <vespa/vespalib/gtest/gtest.h>
+
+using namespace ::testing;
namespace storage {
-class ModifiedBucketCheckerTest : public CppUnit::TestFixture
-{
-public:
+struct ModifiedBucketCheckerTest : Test {
enum {
MESSAGE_WAIT_TIME = 60*2
};
- void setUp() override;
- void tearDown() override;
-
- void testModifiedBucketThreadSendsRecheckBucketCommands();
- void testDoNotCheckModifiedBucketsIfAlreadyPending();
- void testBucketCheckerOnlySwallowsRecheckBucketReplies();
- void testRecheckRequestsAreChunked();
- void testInvalidChunkSizeConfigIsRejected();
-
- CPPUNIT_TEST_SUITE(ModifiedBucketCheckerTest);
- CPPUNIT_TEST(testModifiedBucketThreadSendsRecheckBucketCommands);
- CPPUNIT_TEST(testDoNotCheckModifiedBucketsIfAlreadyPending);
- CPPUNIT_TEST(testBucketCheckerOnlySwallowsRecheckBucketReplies);
- CPPUNIT_TEST(testRecheckRequestsAreChunked);
- CPPUNIT_TEST(testInvalidChunkSizeConfigIsRejected);
- CPPUNIT_TEST_SUITE_END();
-private:
+ void SetUp() override;
+ void TearDown() override;
+
spi::dummy::DummyPersistence& getDummyPersistence() {
return static_cast<spi::dummy::DummyPersistence&>(
_node->getPersistenceProvider());
@@ -51,10 +37,8 @@ private:
std::unique_ptr<vdstestlib::DirConfig> _config;
};
-CPPUNIT_TEST_SUITE_REGISTRATION(ModifiedBucketCheckerTest);
-
void
-ModifiedBucketCheckerTest::setUp()
+ModifiedBucketCheckerTest::SetUp()
{
_config.reset(new vdstestlib::DirConfig(getStandardConfig(true)));
_node.reset(new TestServiceLayerApp(DiskCount(1), NodeIndex(0),
@@ -71,12 +55,12 @@ ModifiedBucketCheckerTest::setUp()
}
void
-ModifiedBucketCheckerTest::tearDown()
+ModifiedBucketCheckerTest::TearDown()
{
_top->close();
- _top.reset(0);
- _node.reset(0);
- _config.reset(0);
+ _top.reset();
+ _node.reset();
+ _config.reset();
}
void
@@ -95,10 +79,8 @@ ModifiedBucketCheckerTest::replyToAll(
uint32_t firstBucket)
{
for (uint32_t i = 0; i < messages.size(); ++i) {
- RecheckBucketInfoCommand& cmd(
- dynamic_cast<RecheckBucketInfoCommand&>(*messages[i]));
- CPPUNIT_ASSERT_EQUAL(document::BucketId(16, i+firstBucket),
- cmd.getBucketId());
+ auto& cmd = dynamic_cast<RecheckBucketInfoCommand&>(*messages[i]);
+ ASSERT_EQ(document::BucketId(16, i + firstBucket), cmd.getBucketId());
_bottom->sendUp(cmd.makeReply());
}
}
@@ -108,114 +90,94 @@ ModifiedBucketCheckerTest::expectCommandsAndSendReplies(
uint32_t count, uint32_t firstBucket)
{
std::vector<api::StorageMessage::SP> messages(_bottom->getCommandsOnce());
- CPPUNIT_ASSERT_EQUAL(size_t(count), messages.size());
+ ASSERT_EQ(count, messages.size());
replyToAll(messages, firstBucket);
}
-void
-ModifiedBucketCheckerTest::testModifiedBucketThreadSendsRecheckBucketCommands()
-{
+TEST_F(ModifiedBucketCheckerTest, modified_bucket_thread_sends_recheck_bucket_commands) {
_top->open(); // Multi-threaded test
modifyBuckets(3, 0);
// Should now get 3 RecheckBucketInfo commands down the dummy link.
_bottom->waitForMessages(3, MESSAGE_WAIT_TIME);
- expectCommandsAndSendReplies(3, 0);
+ ASSERT_NO_FATAL_FAILURE(expectCommandsAndSendReplies(3, 0));
// No replies should reach top link
- CPPUNIT_ASSERT_EQUAL(size_t(0), _top->getNumReplies());
+ EXPECT_EQ(0, _top->getNumReplies());
}
-void
-ModifiedBucketCheckerTest::testDoNotCheckModifiedBucketsIfAlreadyPending()
-{
+TEST_F(ModifiedBucketCheckerTest, do_not_check_modified_buckets_if_already_pending) {
_handler->setUnitTestingSingleThreadedMode();
_top->open();
modifyBuckets(3, 0);
_handler->tick();
- std::vector<api::StorageMessage::SP> messages(_bottom->getCommandsOnce());
- CPPUNIT_ASSERT_EQUAL(size_t(3), messages.size());
+ auto messages = _bottom->getCommandsOnce();
+ ASSERT_EQ(3, messages.size());
modifyBuckets(3, 3);
_handler->tick();
- expectCommandsAndSendReplies(0, 0);
+ ASSERT_NO_FATAL_FAILURE(expectCommandsAndSendReplies(0, 0));
// After replies received, tick should send new requests again.
- replyToAll(messages, 0);
+ ASSERT_NO_FATAL_FAILURE(replyToAll(messages, 0));
_handler->tick(); // global bucket space ==> nothing to do
- expectCommandsAndSendReplies(0, 0);
+ ASSERT_NO_FATAL_FAILURE(expectCommandsAndSendReplies(0, 0));
_handler->tick();
- expectCommandsAndSendReplies(3, 3);
+ ASSERT_NO_FATAL_FAILURE(expectCommandsAndSendReplies(3, 3));
}
-void
-ModifiedBucketCheckerTest::testBucketCheckerOnlySwallowsRecheckBucketReplies()
-{
+TEST_F(ModifiedBucketCheckerTest, bucket_checker_only_swallows_recheck_bucket_replies) {
_top->open();
DestroyIteratorCommand cmd(spi::IteratorId(123));
_bottom->sendUp(api::StorageMessage::SP(cmd.makeReply()));
- CPPUNIT_ASSERT_EQUAL(size_t(1), _top->getNumReplies());
+ ASSERT_EQ(1, _top->getNumReplies());
}
-void
-ModifiedBucketCheckerTest::testRecheckRequestsAreChunked()
-{
+TEST_F(ModifiedBucketCheckerTest, recheck_requests_are_chunked) {
namespace cfgns = vespa::config::content::core;
_handler->setUnitTestingSingleThreadedMode();
_top->open();
cfgns::StorServerConfigBuilder cfgBuilder;
cfgBuilder.bucketRecheckingChunkSize = 2;
- _handler->configure(std::unique_ptr<cfgns::StorServerConfig>(
- new cfgns::StorServerConfig(cfgBuilder)));
+ _handler->configure(std::make_unique<cfgns::StorServerConfig>(cfgBuilder));
modifyBuckets(5, 0);
_handler->tick();
modifyBuckets(1, 10); // should not be checked yet;
// Rechecks should now be done in 3 chunks of 2, 2 and 1 each, respectively.
- expectCommandsAndSendReplies(2, 0);
+ ASSERT_NO_FATAL_FAILURE(expectCommandsAndSendReplies(2, 0));
_handler->tick();
- expectCommandsAndSendReplies(2, 2);
+ ASSERT_NO_FATAL_FAILURE(expectCommandsAndSendReplies(2, 2));
_handler->tick();
- expectCommandsAndSendReplies(1, 4);
+ ASSERT_NO_FATAL_FAILURE(expectCommandsAndSendReplies(1, 4));
_handler->tick(); // global bucket space ==> nothing to do
- expectCommandsAndSendReplies(0, 0);
+ ASSERT_NO_FATAL_FAILURE(expectCommandsAndSendReplies(0, 0));
// New round of fetching
_handler->tick();
- expectCommandsAndSendReplies(1, 10);
+ ASSERT_NO_FATAL_FAILURE( expectCommandsAndSendReplies(1, 10));
_handler->tick(); // global bucket space ==> nothing to do
- expectCommandsAndSendReplies(0, 0);
+ ASSERT_NO_FATAL_FAILURE(expectCommandsAndSendReplies(0, 0));
// And done!
_handler->tick();
- expectCommandsAndSendReplies(0, 0);
+ ASSERT_NO_FATAL_FAILURE(expectCommandsAndSendReplies(0, 0));
_handler->tick(); // global bucket space ==> nothing to do
- expectCommandsAndSendReplies(0, 0);
+ ASSERT_NO_FATAL_FAILURE(expectCommandsAndSendReplies(0, 0));
}
-
-void
-ModifiedBucketCheckerTest::testInvalidChunkSizeConfigIsRejected()
-{
+TEST_F(ModifiedBucketCheckerTest, invalid_chunk_size_config_is_rejected) {
namespace cfgns = vespa::config::content::core;
_handler->setUnitTestingSingleThreadedMode();
_top->open();
cfgns::StorServerConfigBuilder cfgBuilder;
cfgBuilder.bucketRecheckingChunkSize = 0;
- try {
- _handler->configure(std::unique_ptr<cfgns::StorServerConfig>(
- new cfgns::StorServerConfig(cfgBuilder)));
- CPPUNIT_FAIL("Expected bad config to be rejected");
- } catch (const config::InvalidConfigException&) {
- // Happy days
- } catch (...) {
- CPPUNIT_FAIL("Got unexpected exception");
- }
+ EXPECT_THROW(_handler->configure(std::make_unique<cfgns::StorServerConfig>(cfgBuilder)),
+ config::InvalidConfigException);
}
// RecheckBucketInfoCommand handling is done in persistence threads,
// so that functionality is tested in the filestor tests.
} // ns storage
-
diff --git a/storage/src/tests/persistence/filestorage/operationabortingtest.cpp b/storage/src/tests/persistence/filestorage/operationabortingtest.cpp
index e12f48bcdea..0d43f8a9020 100644
--- a/storage/src/tests/persistence/filestorage/operationabortingtest.cpp
+++ b/storage/src/tests/persistence/filestorage/operationabortingtest.cpp
@@ -1,6 +1,5 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/vdstestlib/cppunit/macros.h>
#include <vespa/storage/persistence/messages.h>
#include <tests/persistence/common/persistenceproviderwrapper.h>
#include <vespa/persistence/dummyimpl/dummypersistence.h>
@@ -9,11 +8,13 @@
#include <vespa/vespalib/util/barrier.h>
#include <vespa/vespalib/util/thread.h>
#include <vespa/vespalib/stllike/hash_set_insert.hpp>
+#include <vespa/vespalib/gtest/gtest.h>
#include <vespa/log/log.h>
LOG_SETUP(".operationabortingtest");
using document::test::makeDocumentBucket;
+using namespace ::testing;
namespace storage {
@@ -78,9 +79,7 @@ spi::LoadType defaultLoadType(0, "default");
}
-class OperationAbortingTest : public FileStorTestFixture
-{
-public:
+struct OperationAbortingTest : FileStorTestFixture {
spi::PersistenceProvider::UP _dummyProvider;
BlockingMockProvider* _blockingProvider;
std::unique_ptr<vespalib::Barrier> _queueBarrier;
@@ -99,32 +98,14 @@ public:
const std::vector<document::BucketId>& okReplies,
const std::vector<document::BucketId>& abortedGetDiffs);
- void doTestSpecificOperationsNotAborted(const char* testName,
- const std::vector<api::StorageMessage::SP>& msgs,
+ void doTestSpecificOperationsNotAborted(const std::vector<api::StorageMessage::SP>& msgs,
bool shouldCreateBucketInitially);
api::BucketInfo getBucketInfoFromDB(const document::BucketId&) const;
-public:
- void testAbortMessageClearsRelevantQueuedOperations();
- void testWaitForCurrentOperationCompletionForAbortedBucket();
- void testDoNotAbortCreateBucketCommands();
- void testDoNotAbortRecheckBucketCommands();
- void testDoNotAbortDeleteBucketCommands();
-
- void setUp() override;
-
- CPPUNIT_TEST_SUITE(OperationAbortingTest);
- CPPUNIT_TEST(testAbortMessageClearsRelevantQueuedOperations);
- CPPUNIT_TEST(testWaitForCurrentOperationCompletionForAbortedBucket);
- CPPUNIT_TEST(testDoNotAbortCreateBucketCommands);
- CPPUNIT_TEST(testDoNotAbortRecheckBucketCommands);
- CPPUNIT_TEST(testDoNotAbortDeleteBucketCommands);
- CPPUNIT_TEST_SUITE_END();
+ void SetUp() override;
};
-CPPUNIT_TEST_SUITE_REGISTRATION(OperationAbortingTest);
-
namespace {
template <typename T, typename Collection>
@@ -136,7 +117,7 @@ existsIn(const T& elem, const Collection& collection) {
}
void
-OperationAbortingTest::setUp()
+OperationAbortingTest::SetUp()
{
}
@@ -146,35 +127,34 @@ OperationAbortingTest::validateReplies(DummyStorageLink& link, size_t repliesTot
const std::vector<document::BucketId>& abortedGetDiffs)
{
link.waitForMessages(repliesTotal, MSG_WAIT_TIME);
- CPPUNIT_ASSERT_EQUAL(repliesTotal, link.getNumReplies());
+ ASSERT_EQ(repliesTotal, link.getNumReplies());
for (uint32_t i = 0; i < repliesTotal; ++i) {
api::StorageReply& reply(dynamic_cast<api::StorageReply&>(*link.getReply(i)));
- LOG(info, "Checking reply %s", reply.toString(true).c_str());
+ LOG(debug, "Checking reply %s", reply.toString(true).c_str());
switch (static_cast<uint32_t>(reply.getType().getId())) {
case api::MessageType::PUT_REPLY_ID:
case api::MessageType::CREATEBUCKET_REPLY_ID:
case api::MessageType::DELETEBUCKET_REPLY_ID:
case api::MessageType::GET_REPLY_ID:
- CPPUNIT_ASSERT_EQUAL(api::ReturnCode::OK, resultOf(reply));
+ ASSERT_EQ(api::ReturnCode::OK, resultOf(reply));
break;
case api::MessageType::GETBUCKETDIFF_REPLY_ID:
{
- api::GetBucketDiffReply& gr(
- static_cast<api::GetBucketDiffReply&>(reply));
+ auto& gr = static_cast<api::GetBucketDiffReply&>(reply);
if (existsIn(gr.getBucketId(), abortedGetDiffs)) {
- CPPUNIT_ASSERT_EQUAL(api::ReturnCode::ABORTED, resultOf(reply));
+ ASSERT_EQ(api::ReturnCode::ABORTED, resultOf(reply));
} else {
- CPPUNIT_ASSERT(existsIn(gr.getBucketId(), okReplies));
- CPPUNIT_ASSERT_EQUAL(api::ReturnCode::OK, resultOf(reply));
+ ASSERT_TRUE(existsIn(gr.getBucketId(), okReplies));
+ ASSERT_EQ(api::ReturnCode::OK, resultOf(reply));
}
break;
}
case api::MessageType::INTERNAL_REPLY_ID:
- CPPUNIT_ASSERT_EQUAL(api::ReturnCode::OK, resultOf(reply));
+ ASSERT_EQ(api::ReturnCode::OK, resultOf(reply));
break;
default:
- CPPUNIT_FAIL("got unknown reply type");
+ FAIL() << "got unknown reply type";
}
}
}
@@ -187,12 +167,12 @@ class ExplicitBucketSetPredicate : public AbortBucketOperationsCommand::AbortPre
bool doShouldAbort(const document::Bucket &bucket) const override;
public:
- ~ExplicitBucketSetPredicate();
+ ~ExplicitBucketSetPredicate() override;
template <typename Iterator>
ExplicitBucketSetPredicate(Iterator first, Iterator last)
: _bucketsToAbort(first, last)
- { }
+ {}
const BucketSet& getBucketsToAbort() const {
return _bucketsToAbort;
@@ -204,7 +184,7 @@ ExplicitBucketSetPredicate::doShouldAbort(const document::Bucket &bucket) const
return _bucketsToAbort.find(bucket.getBucketId()) != _bucketsToAbort.end();
}
-ExplicitBucketSetPredicate::~ExplicitBucketSetPredicate() { }
+ExplicitBucketSetPredicate::~ExplicitBucketSetPredicate() = default;
template <typename Container>
AbortBucketOperationsCommand::SP
@@ -216,18 +196,16 @@ makeAbortCmd(const Container& buckets)
}
-void
-OperationAbortingTest::testAbortMessageClearsRelevantQueuedOperations()
-{
+TEST_F(OperationAbortingTest, abort_message_clears_relevant_queued_operations) {
setupProviderAndBarriers(2);
- TestFileStorComponents c(*this, "testAbortMessageClearsRelevantQueuedOperations");
+ TestFileStorComponents c(*this);
document::BucketId bucket(16, 1);
createBucket(bucket);
- LOG(info, "Sending put to trigger thread barrier");
+ LOG(debug, "Sending put to trigger thread barrier");
c.sendPut(bucket, DocumentIndex(0), PutTimestamp(1000));
- LOG(info, "waiting for test and persistence thread to reach barriers");
+ LOG(debug, "waiting for test and persistence thread to reach barriers");
_queueBarrier->await();
- LOG(info, "barrier passed");
+ LOG(debug, "barrier passed");
/*
* All load we send down to filestor from now on wil be enqueued, as the
* persistence thread is blocked.
@@ -235,12 +213,14 @@ OperationAbortingTest::testAbortMessageClearsRelevantQueuedOperations()
* Cannot abort the bucket we're blocking the thread on since we'll
* deadlock the test if we do.
*/
- std::vector<document::BucketId> bucketsToAbort;
- bucketsToAbort.push_back(document::BucketId(16, 3));
- bucketsToAbort.push_back(document::BucketId(16, 5));
- std::vector<document::BucketId> bucketsToKeep;
- bucketsToKeep.push_back(document::BucketId(16, 2));
- bucketsToKeep.push_back(document::BucketId(16, 4));
+ std::vector<document::BucketId> bucketsToAbort = {
+ document::BucketId(16, 3),
+ document::BucketId(16, 5)
+ };
+ std::vector<document::BucketId> bucketsToKeep = {
+ document::BucketId(16, 2),
+ document::BucketId(16, 4)
+ };
for (uint32_t i = 0; i < bucketsToAbort.size(); ++i) {
createBucket(bucketsToAbort[i]);
@@ -251,17 +231,17 @@ OperationAbortingTest::testAbortMessageClearsRelevantQueuedOperations()
c.sendDummyGetDiff(bucketsToKeep[i]);
}
- AbortBucketOperationsCommand::SP abortCmd(makeAbortCmd(bucketsToAbort));
+ auto abortCmd = makeAbortCmd(bucketsToAbort);
c.top.sendDown(abortCmd);
- LOG(info, "waiting on completion barrier");
+ LOG(debug, "waiting on completion barrier");
_completionBarrier->await();
// put+abort+get replies
size_t expectedMsgs(2 + bucketsToAbort.size() + bucketsToKeep.size());
- LOG(info, "barrier passed, waiting for %zu replies", expectedMsgs);
+ LOG(debug, "barrier passed, waiting for %zu replies", expectedMsgs);
- validateReplies(c.top, expectedMsgs, bucketsToKeep, bucketsToAbort);
+ ASSERT_NO_FATAL_FAILURE(validateReplies(c.top, expectedMsgs, bucketsToKeep, bucketsToAbort));
}
namespace {
@@ -302,29 +282,27 @@ public:
* impose sufficient ordering guarantees that it never provides false positives
* as long as the tested functionality is in fact correct.
*/
-void
-OperationAbortingTest::testWaitForCurrentOperationCompletionForAbortedBucket()
-{
+TEST_F(OperationAbortingTest, wait_for_current_operation_completion_for_aborted_bucket) {
setupProviderAndBarriers(3);
- TestFileStorComponents c(*this, "testWaitForCurrentOperationCompletionForAbortedBucket");
+ TestFileStorComponents c(*this);
document::BucketId bucket(16, 1);
createBucket(bucket);
- LOG(info, "Sending put to trigger thread barrier");
+ LOG(debug, "Sending put to trigger thread barrier");
c.sendPut(bucket, DocumentIndex(0), PutTimestamp(1000));
std::vector<document::BucketId> abortSet { bucket };
- AbortBucketOperationsCommand::SP abortCmd(makeAbortCmd(abortSet));
+ auto abortCmd = makeAbortCmd(abortSet);
SendTask sendTask(abortCmd, *_queueBarrier, c.top);
vespalib::Thread thread(sendTask);
thread.start();
- LOG(info, "waiting for threads to reach barriers");
+ LOG(debug, "waiting for threads to reach barriers");
_queueBarrier->await();
- LOG(info, "barrier passed");
+ LOG(debug, "barrier passed");
- LOG(info, "waiting on completion barrier");
+ LOG(debug, "waiting on completion barrier");
_completionBarrier->await();
thread.stop();
@@ -333,31 +311,27 @@ OperationAbortingTest::testWaitForCurrentOperationCompletionForAbortedBucket()
// If waiting works, put reply shall always be ordered before the internal
// reply, as it must finish processing fully before the abort returns.
c.top.waitForMessages(2, MSG_WAIT_TIME);
- CPPUNIT_ASSERT_EQUAL(size_t(2), c.top.getNumReplies());
- CPPUNIT_ASSERT_EQUAL(api::MessageType::PUT_REPLY, c.top.getReply(0)->getType());
- CPPUNIT_ASSERT_EQUAL(api::MessageType::INTERNAL_REPLY, c.top.getReply(1)->getType());
+ ASSERT_EQ(2, c.top.getNumReplies());
+ EXPECT_EQ(api::MessageType::PUT_REPLY, c.top.getReply(0)->getType());
+ EXPECT_EQ(api::MessageType::INTERNAL_REPLY, c.top.getReply(1)->getType());
}
-void
-OperationAbortingTest::testDoNotAbortCreateBucketCommands()
-{
+TEST_F(OperationAbortingTest, do_not_abort_create_bucket_commands) {
document::BucketId bucket(16, 1);
std::vector<api::StorageMessage::SP> msgs;
- msgs.push_back(api::StorageMessage::SP(new api::CreateBucketCommand(makeDocumentBucket(bucket))));
+ msgs.emplace_back(std::make_shared<api::CreateBucketCommand>(makeDocumentBucket(bucket)));
- bool shouldCreateBucketInitially(false);
- doTestSpecificOperationsNotAborted("testDoNotAbortCreateBucketCommands", msgs, shouldCreateBucketInitially);
+ bool shouldCreateBucketInitially = false;
+ doTestSpecificOperationsNotAborted(msgs, shouldCreateBucketInitially);
}
-void
-OperationAbortingTest::testDoNotAbortRecheckBucketCommands()
-{
+TEST_F(OperationAbortingTest, do_not_abort_recheck_bucket_commands) {
document::BucketId bucket(16, 1);
std::vector<api::StorageMessage::SP> msgs;
- msgs.push_back(api::StorageMessage::SP(new RecheckBucketInfoCommand(makeDocumentBucket(bucket))));
+ msgs.emplace_back(std::make_shared<RecheckBucketInfoCommand>(makeDocumentBucket(bucket)));
- bool shouldCreateBucketInitially(true);
- doTestSpecificOperationsNotAborted("testDoNotAbortRecheckBucketCommands", msgs, shouldCreateBucketInitially);
+ bool shouldCreateBucketInitially = true;
+ doTestSpecificOperationsNotAborted(msgs, shouldCreateBucketInitially);
}
api::BucketInfo
@@ -365,29 +339,25 @@ OperationAbortingTest::getBucketInfoFromDB(const document::BucketId& id) const
{
StorBucketDatabase::WrappedEntry entry(
_node->getStorageBucketDatabase().get(id, "foo", StorBucketDatabase::CREATE_IF_NONEXISTING));
- CPPUNIT_ASSERT(entry.exist());
+ assert(entry.exist());
return entry->info;
}
-void
-OperationAbortingTest::testDoNotAbortDeleteBucketCommands()
-{
+TEST_F(OperationAbortingTest, do_not_abort_delete_bucket_commands) {
document::BucketId bucket(16, 1);
std::vector<api::StorageMessage::SP> msgs;
- api::DeleteBucketCommand::SP cmd(new api::DeleteBucketCommand(makeDocumentBucket(bucket)));
- msgs.push_back(cmd);
+ msgs.emplace_back(std::make_shared<api::DeleteBucketCommand>(makeDocumentBucket(bucket)));
- bool shouldCreateBucketInitially(true);
- doTestSpecificOperationsNotAborted("testDoNotAbortRecheckBucketCommands", msgs, shouldCreateBucketInitially);
+ bool shouldCreateBucketInitially = true;
+ doTestSpecificOperationsNotAborted(msgs, shouldCreateBucketInitially);
}
void
-OperationAbortingTest::doTestSpecificOperationsNotAborted(const char* testName,
- const std::vector<api::StorageMessage::SP>& msgs,
+OperationAbortingTest::doTestSpecificOperationsNotAborted(const std::vector<api::StorageMessage::SP>& msgs,
bool shouldCreateBucketInitially)
{
setupProviderAndBarriers(2);
- TestFileStorComponents c(*this, testName);
+ TestFileStorComponents c(*this);
document::BucketId bucket(16, 1);
document::BucketId blockerBucket(16, 2);
@@ -395,11 +365,11 @@ OperationAbortingTest::doTestSpecificOperationsNotAborted(const char* testName,
createBucket(bucket);
}
createBucket(blockerBucket);
- LOG(info, "Sending put to trigger thread barrier");
+ LOG(debug, "Sending put to trigger thread barrier");
c.sendPut(blockerBucket, DocumentIndex(0), PutTimestamp(1000));
- LOG(info, "waiting for test and persistence thread to reach barriers");
+ LOG(debug, "waiting for test and persistence thread to reach barriers");
_queueBarrier->await();
- LOG(info, "barrier passed");
+ LOG(debug, "barrier passed");
uint32_t expectedCreateBuckets = 0;
uint32_t expectedDeleteBuckets = 0;
@@ -413,7 +383,7 @@ OperationAbortingTest::doTestSpecificOperationsNotAborted(const char* testName,
break;
case api::MessageType::DELETEBUCKET_ID:
{
- api::DeleteBucketCommand& delCmd(dynamic_cast<api::DeleteBucketCommand&>(*msgs[i]));
+ auto& delCmd = dynamic_cast<api::DeleteBucketCommand&>(*msgs[i]);
delCmd.setBucketInfo(getBucketInfoFromDB(delCmd.getBucketId()));
}
++expectedDeleteBuckets;
@@ -424,7 +394,7 @@ OperationAbortingTest::doTestSpecificOperationsNotAborted(const char* testName,
++expectedBucketInfoInvocations;
break;
default:
- CPPUNIT_FAIL("unsupported message type");
+ FAIL() << "unsupported message type";
}
c.top.sendDown(msgs[i]);
}
@@ -433,7 +403,7 @@ OperationAbortingTest::doTestSpecificOperationsNotAborted(const char* testName,
AbortBucketOperationsCommand::SP abortCmd(makeAbortCmd(abortSet));
c.top.sendDown(abortCmd);
- LOG(info, "waiting on completion barrier");
+ LOG(debug, "waiting on completion barrier");
_completionBarrier->await();
// At this point, the recheck command is still either enqueued, is processing
@@ -443,7 +413,7 @@ OperationAbortingTest::doTestSpecificOperationsNotAborted(const char* testName,
// put+abort+get + any other creates/deletes/rechecks
size_t expectedMsgs(3 + expectedCreateBuckets + expectedDeleteBuckets + expectedRecheckReplies);
- LOG(info, "barrier passed, waiting for %zu replies", expectedMsgs);
+ LOG(debug, "barrier passed, waiting for %zu replies", expectedMsgs);
std::vector<document::BucketId> okReplies;
okReplies.push_back(bucket);
@@ -451,10 +421,10 @@ OperationAbortingTest::doTestSpecificOperationsNotAborted(const char* testName,
std::vector<document::BucketId> abortedGetDiffs;
validateReplies(c.top, expectedMsgs, okReplies, abortedGetDiffs);
- CPPUNIT_ASSERT_EQUAL(expectedBucketInfoInvocations, _blockingProvider->_bucketInfoInvocations);
- CPPUNIT_ASSERT_EQUAL(expectedCreateBuckets + (shouldCreateBucketInitially ? 2 : 1),
- _blockingProvider->_createBucketInvocations);
- CPPUNIT_ASSERT_EQUAL(expectedDeleteBuckets, _blockingProvider->_deleteBucketInvocations);
+ ASSERT_EQ(expectedBucketInfoInvocations, _blockingProvider->_bucketInfoInvocations);
+ ASSERT_EQ(expectedCreateBuckets + (shouldCreateBucketInitially ? 2 : 1),
+ _blockingProvider->_createBucketInvocations);
+ ASSERT_EQ(expectedDeleteBuckets, _blockingProvider->_deleteBucketInvocations);
}
} // storage
diff --git a/storage/src/tests/persistence/filestorage/sanitycheckeddeletetest.cpp b/storage/src/tests/persistence/filestorage/sanitycheckeddeletetest.cpp
index 961d2628052..787a63a618c 100644
--- a/storage/src/tests/persistence/filestorage/sanitycheckeddeletetest.cpp
+++ b/storage/src/tests/persistence/filestorage/sanitycheckeddeletetest.cpp
@@ -1,6 +1,5 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/vdstestlib/cppunit/macros.h>
#include <vespa/storageapi/message/bucket.h>
#include <vespa/persistence/spi/test.h>
#include <tests/persistence/common/persistenceproviderwrapper.h>
@@ -10,26 +9,16 @@
using storage::spi::test::makeSpiBucket;
using document::test::makeDocumentBucket;
+using namespace ::testing;
namespace storage {
-class SanityCheckedDeleteTest : public FileStorTestFixture {
-public:
- void delete_bucket_fails_when_provider_out_of_sync();
- void differing_document_sizes_not_considered_out_of_sync();
-
- CPPUNIT_TEST_SUITE(SanityCheckedDeleteTest);
- CPPUNIT_TEST(delete_bucket_fails_when_provider_out_of_sync);
- CPPUNIT_TEST(differing_document_sizes_not_considered_out_of_sync);
- CPPUNIT_TEST_SUITE_END();
-
+struct SanityCheckedDeleteTest : FileStorTestFixture {
spi::BucketInfo send_put_and_get_bucket_info(TestFileStorComponents &c, const spi::Bucket &spiBucket);
};
-CPPUNIT_TEST_SUITE_REGISTRATION(SanityCheckedDeleteTest);
-
-void SanityCheckedDeleteTest::delete_bucket_fails_when_provider_out_of_sync() {
- TestFileStorComponents c(*this, "delete_bucket_fails_when_provider_out_of_sync");
+TEST_F(SanityCheckedDeleteTest, delete_bucket_fails_when_provider_out_of_sync) {
+ TestFileStorComponents c(*this);
document::BucketId bucket(8, 123);
document::BucketId syncBucket(8, 234);
spi::Bucket spiBucket(makeSpiBucket(bucket));
@@ -55,11 +44,10 @@ void SanityCheckedDeleteTest::delete_bucket_fails_when_provider_out_of_sync() {
c.top.sendDown(cmd);
c.top.waitForMessages(1, MSG_WAIT_TIME);
api::StorageMessage::SP reply(c.top.getReply(0));
- api::DeleteBucketReply& deleteReply(
- dynamic_cast<api::DeleteBucketReply&>(*reply));
+ auto& deleteReply = dynamic_cast<api::DeleteBucketReply&>(*reply);
// Reply happens in a filestor manager context and before the sanity
// check kicks in, meaning it will always be OK.
- CPPUNIT_ASSERT_EQUAL(api::ReturnCode::OK, resultOf(deleteReply));
+ ASSERT_EQ(api::ReturnCode::OK, resultOf(deleteReply));
// At this point we do not know if the scheduled delete has been
// executed; it may still be in the persistence queue.
// Send a put to another bucket to serialize the operation (guaranteed
@@ -69,8 +57,8 @@ void SanityCheckedDeleteTest::delete_bucket_fails_when_provider_out_of_sync() {
// Should still be able to get identical bucket info for bucket.
spi::BucketInfoResult infoResult(
_node->getPersistenceProvider().getBucketInfo(spiBucket));
- CPPUNIT_ASSERT_MSG(infoResult.getErrorMessage(), !infoResult.hasError());
- CPPUNIT_ASSERT(infoBefore == infoResult.getBucketInfo());
+ ASSERT_FALSE(infoResult.hasError()) << infoResult.getErrorMessage();
+ EXPECT_TRUE(infoBefore == infoResult.getBucketInfo());
}
spi::BucketInfo SanityCheckedDeleteTest::send_put_and_get_bucket_info(
@@ -83,8 +71,8 @@ spi::BucketInfo SanityCheckedDeleteTest::send_put_and_get_bucket_info(
return _node->getPersistenceProvider().getBucketInfo(spiBucket).getBucketInfo();
}
-void SanityCheckedDeleteTest::differing_document_sizes_not_considered_out_of_sync() {
- TestFileStorComponents c(*this, "differing_document_sizes_not_considered_out_of_sync");
+TEST_F(SanityCheckedDeleteTest, differing_document_sizes_not_considered_out_of_sync) {
+ TestFileStorComponents c(*this);
document::BucketId bucket(8, 123);
spi::Bucket spiBucket(makeSpiBucket(bucket));
@@ -100,7 +88,7 @@ void SanityCheckedDeleteTest::differing_document_sizes_not_considered_out_of_syn
// Bucket should now well and truly be gone. Will trigger a getBucketInfo error response.
spi::BucketInfoResult info_post_delete(
_node->getPersistenceProvider().getBucketInfo(spiBucket));
- CPPUNIT_ASSERT_MSG(info_post_delete.getErrorMessage(), info_post_delete.hasError());
+ ASSERT_TRUE(info_post_delete.hasError()) << info_post_delete.getErrorMessage();
}
} // namespace storage
diff --git a/storage/src/tests/persistence/filestorage/singlebucketjointest.cpp b/storage/src/tests/persistence/filestorage/singlebucketjointest.cpp
index 8e6340c930c..cda052c787a 100644
--- a/storage/src/tests/persistence/filestorage/singlebucketjointest.cpp
+++ b/storage/src/tests/persistence/filestorage/singlebucketjointest.cpp
@@ -1,7 +1,6 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include <vespa/log/log.h>
-#include <vespa/vdstestlib/cppunit/macros.h>
#include <vespa/storageapi/message/bucket.h>
#include <vespa/storageapi/message/bucketsplitting.h>
#include <tests/persistence/common/persistenceproviderwrapper.h>
@@ -12,25 +11,15 @@
LOG_SETUP(".singlebucketjointest");
using document::test::makeDocumentBucket;
+using namespace ::testing;
namespace storage {
-class SingleBucketJoinTest : public FileStorTestFixture
-{
-public:
- void testPersistenceCanHandleSingleBucketJoin();
-
- CPPUNIT_TEST_SUITE(SingleBucketJoinTest);
- CPPUNIT_TEST(testPersistenceCanHandleSingleBucketJoin);
- CPPUNIT_TEST_SUITE_END();
+struct SingleBucketJoinTest : FileStorTestFixture {
};
-CPPUNIT_TEST_SUITE_REGISTRATION(SingleBucketJoinTest);
-
-void
-SingleBucketJoinTest::testPersistenceCanHandleSingleBucketJoin()
-{
- TestFileStorComponents c(*this, "testPersistenceCanHandleSingleBucketJoin");
+TEST_F(SingleBucketJoinTest, persistence_can_handle_single_bucket_join) {
+ TestFileStorComponents c(*this);
document::BucketId targetBucket(16, 1);
document::BucketId sourceBucket(17, 1);
@@ -47,7 +36,7 @@ SingleBucketJoinTest::testPersistenceCanHandleSingleBucketJoin()
c.top.sendDown(cmd);
// If single bucket join locking is not working properly, this
// will hang forever.
- expectOkReply<api::JoinBucketsReply>(c.top);
+ ASSERT_NO_FATAL_FAILURE(expectOkReply<api::JoinBucketsReply>(c.top));
}
} // namespace storage
diff --git a/storage/src/tests/persistence/mergehandlertest.cpp b/storage/src/tests/persistence/mergehandlertest.cpp
index 4378814d27b..0c291a179ae 100644
--- a/storage/src/tests/persistence/mergehandlertest.cpp
+++ b/storage/src/tests/persistence/mergehandlertest.cpp
@@ -2,23 +2,23 @@
#include <vespa/document/base/testdocman.h>
#include <vespa/storage/persistence/mergehandler.h>
-#include <vespa/vdstestlib/cppunit/macros.h>
#include <tests/persistence/persistencetestutils.h>
#include <tests/persistence/common/persistenceproviderwrapper.h>
#include <tests/distributor/messagesenderstub.h>
#include <vespa/document/test/make_document_bucket.h>
#include <vespa/vespalib/objects/nbostream.h>
+#include <gmock/gmock.h>
#include <cmath>
#include <vespa/log/log.h>
LOG_SETUP(".test.persistence.handler.merge");
using document::test::makeDocumentBucket;
+using namespace ::testing;
namespace storage {
-struct MergeHandlerTest : public SingleDiskPersistenceTestUtils
-{
+struct MergeHandlerTest : SingleDiskPersistenceTestUtils {
uint32_t _location; // Location used for all merge tests
document::Bucket _bucket; // Bucket used for all merge tests
uint64_t _maxTimestamp;
@@ -29,77 +29,16 @@ struct MergeHandlerTest : public SingleDiskPersistenceTestUtils
template <typename T>
std::shared_ptr<T> fetchSingleMessage();
- void setUp() override;
+ void SetUp() override;
enum ChainPos { FRONT, MIDDLE, BACK };
void setUpChain(ChainPos);
- // Test a regular merge bucket command fetching data, including
- // puts, removes, unrevertable removes & duplicates.
- void testMergeBucketCommand();
- // Test that a simplistic merge with nothing to actually merge,
- // sends get bucket diff through the entire chain of 3 nodes.
void testGetBucketDiffChain(bool midChain);
- void testGetBucketDiffMidChain() { testGetBucketDiffChain(true); }
- void testGetBucketDiffEndOfChain() { testGetBucketDiffChain(false); }
- // Test that a simplistic merge with nothing to actually merge,
- // sends apply bucket diff through the entire chain of 3 nodes.
void testApplyBucketDiffChain(bool midChain);
- void testApplyBucketDiffMidChain() { testApplyBucketDiffChain(true); }
- void testApplyBucketDiffEndOfChain() { testApplyBucketDiffChain(false); }
- // Test that a simplistic merge with one thing to actually merge,
- // sends correct commands and finish.
- void testMasterMessageFlow();
- // Test that a simplistic merge with 1 doc to actually merge,
- // sends apply bucket diff through the entire chain of 3 nodes.
- void testApplyBucketDiffChain();
- void testMergeUnrevertableRemove();
- void testChunkedApplyBucketDiff();
- void testChunkLimitPartiallyFilledDiff();
- void testMaxTimestamp();
- void testSPIFlushGuard();
- void testBucketNotFoundInDb();
- void testMergeProgressSafeGuard();
- void testSafeGuardNotInvokedWhenHasMaskChanges();
- void testEntryRemovedAfterGetBucketDiff();
-
- void testMergeBucketSPIFailures();
- void testGetBucketDiffSPIFailures();
- void testApplyBucketDiffSPIFailures();
- void testGetBucketDiffReplySPIFailures();
- void testApplyBucketDiffReplySPIFailures();
-
- void testRemoveFromDiff();
-
- void testRemovePutOnExistingTimestamp();
-
- CPPUNIT_TEST_SUITE(MergeHandlerTest);
- CPPUNIT_TEST(testMergeBucketCommand);
- CPPUNIT_TEST(testGetBucketDiffMidChain);
- CPPUNIT_TEST(testGetBucketDiffEndOfChain);
- CPPUNIT_TEST(testApplyBucketDiffMidChain);
- CPPUNIT_TEST(testApplyBucketDiffEndOfChain);
- CPPUNIT_TEST(testMasterMessageFlow);
- CPPUNIT_TEST(testMergeUnrevertableRemove);
- CPPUNIT_TEST(testChunkedApplyBucketDiff);
- CPPUNIT_TEST(testChunkLimitPartiallyFilledDiff);
- CPPUNIT_TEST(testMaxTimestamp);
- CPPUNIT_TEST(testSPIFlushGuard);
- CPPUNIT_TEST(testBucketNotFoundInDb);
- CPPUNIT_TEST(testMergeProgressSafeGuard);
- CPPUNIT_TEST(testSafeGuardNotInvokedWhenHasMaskChanges);
- CPPUNIT_TEST(testEntryRemovedAfterGetBucketDiff);
- CPPUNIT_TEST(testMergeBucketSPIFailures);
- CPPUNIT_TEST(testGetBucketDiffSPIFailures);
- CPPUNIT_TEST(testApplyBucketDiffSPIFailures);
- CPPUNIT_TEST(testGetBucketDiffReplySPIFailures);
- CPPUNIT_TEST(testApplyBucketDiffReplySPIFailures);
- CPPUNIT_TEST(testRemoveFromDiff);
- CPPUNIT_TEST(testRemovePutOnExistingTimestamp);
- CPPUNIT_TEST_SUITE_END();
// @TODO Add test to test that buildBucketInfo and mergeLists create minimal list (wrong sorting screws this up)
-private:
+
void fillDummyApplyDiff(std::vector<api::ApplyBucketDiffCommand::Entry>& diff);
std::shared_ptr<api::ApplyBucketDiffCommand> createDummyApplyDiff(
int timestampOffset,
@@ -119,7 +58,7 @@ private:
class HandlerInvoker
{
public:
- virtual ~HandlerInvoker() {}
+ virtual ~HandlerInvoker() = default;
virtual void beforeInvoke(MergeHandlerTest&, MergeHandler&, spi::Context&) {}
virtual void invoke(MergeHandlerTest&, MergeHandler&, spi::Context&) = 0;
virtual std::string afterInvoke(MergeHandlerTest&, MergeHandler&) = 0;
@@ -176,7 +115,7 @@ private:
{
public:
HandleGetBucketDiffReplyInvoker();
- ~HandleGetBucketDiffReplyInvoker();
+ ~HandleGetBucketDiffReplyInvoker() override;
void beforeInvoke(MergeHandlerTest&, MergeHandler&, spi::Context&) override;
void invoke(MergeHandlerTest&, MergeHandler&, spi::Context&) override;
std::string afterInvoke(MergeHandlerTest&, MergeHandler&) override;
@@ -200,7 +139,7 @@ private:
{
public:
HandleApplyBucketDiffReplyInvoker();
- ~HandleApplyBucketDiffReplyInvoker();
+ ~HandleApplyBucketDiffReplyInvoker() override;
void beforeInvoke(MergeHandlerTest&, MergeHandler&, spi::Context&) override;
void invoke(MergeHandlerTest&, MergeHandler&, spi::Context&) override;
std::string afterInvoke(MergeHandlerTest&, MergeHandler&) override;
@@ -217,33 +156,30 @@ private:
const ExpectedExceptionSpec& spec);
};
-CPPUNIT_TEST_SUITE_REGISTRATION(MergeHandlerTest);
-
-
-MergeHandlerTest::HandleGetBucketDiffReplyInvoker::HandleGetBucketDiffReplyInvoker() {}
-MergeHandlerTest::HandleGetBucketDiffReplyInvoker::~HandleGetBucketDiffReplyInvoker() {}
+MergeHandlerTest::HandleGetBucketDiffReplyInvoker::HandleGetBucketDiffReplyInvoker() = default;
+MergeHandlerTest::HandleGetBucketDiffReplyInvoker::~HandleGetBucketDiffReplyInvoker() = default;
MergeHandlerTest::HandleApplyBucketDiffReplyInvoker::HandleApplyBucketDiffReplyInvoker()
: _counter(0),
_stub(),
_applyCmd()
{}
-MergeHandlerTest::HandleApplyBucketDiffReplyInvoker::~HandleApplyBucketDiffReplyInvoker() {}
+MergeHandlerTest::HandleApplyBucketDiffReplyInvoker::~HandleApplyBucketDiffReplyInvoker() = default;
void
-MergeHandlerTest::setUp() {
+MergeHandlerTest::SetUp() {
_context.reset(new spi::Context(documentapi::LoadType::DEFAULT, 0, 0));
- SingleDiskPersistenceTestUtils::setUp();
+ SingleDiskPersistenceTestUtils::SetUp();
_location = 1234;
_bucket = makeDocumentBucket(document::BucketId(16, _location));
_maxTimestamp = 11501;
- LOG(info, "Creating %s in bucket database", _bucket.toString().c_str());
+ LOG(debug, "Creating %s in bucket database", _bucket.toString().c_str());
bucketdb::StorageBucketInfo bucketDBEntry;
bucketDBEntry.disk = 0;
getEnv().getBucketDatabase(_bucket.getBucketSpace()).insert(_bucket.getBucketId(), bucketDBEntry, "mergetestsetup");
- LOG(info, "Creating bucket to merge");
+ LOG(debug, "Creating bucket to merge");
createTestBucket(_bucket);
setUpChain(FRONT);
@@ -261,30 +197,28 @@ MergeHandlerTest::setUpChain(ChainPos pos) {
}
}
-void
-MergeHandlerTest::testMergeBucketCommand()
-{
+// Test a regular merge bucket command fetching data, including
+// puts, removes, unrevertable removes & duplicates.
+TEST_F(MergeHandlerTest, merge_bucket_command) {
MergeHandler handler(getPersistenceProvider(), getEnv());
- LOG(info, "Handle a merge bucket command");
+ LOG(debug, "Handle a merge bucket command");
api::MergeBucketCommand cmd(_bucket, _nodes, _maxTimestamp);
cmd.setSourceIndex(1234);
MessageTracker::UP tracker = handler.handleMergeBucket(cmd, *_context);
- LOG(info, "Check state");
- CPPUNIT_ASSERT_EQUAL(size_t(1), messageKeeper()._msgs.size());
- CPPUNIT_ASSERT_EQUAL(api::MessageType::GETBUCKETDIFF,
- messageKeeper()._msgs[0]->getType());
- api::GetBucketDiffCommand& cmd2(dynamic_cast<api::GetBucketDiffCommand&>(
- *messageKeeper()._msgs[0]));
- CPPUNIT_ASSERT_EQUAL(_nodes, cmd2.getNodes());
- std::vector<api::GetBucketDiffCommand::Entry> diff(cmd2.getDiff());
- CPPUNIT_ASSERT_EQUAL(size_t(17), diff.size());
- CPPUNIT_ASSERT_EQUAL(uint16_t(1), cmd2.getAddress()->getIndex());
- CPPUNIT_ASSERT_EQUAL(uint16_t(1234), cmd2.getSourceIndex());
+ LOG(debug, "Check state");
+ ASSERT_EQ(1, messageKeeper()._msgs.size());
+ ASSERT_EQ(api::MessageType::GETBUCKETDIFF, messageKeeper()._msgs[0]->getType());
+ auto& cmd2 = dynamic_cast<api::GetBucketDiffCommand&>(*messageKeeper()._msgs[0]);
+ EXPECT_THAT(_nodes, ContainerEq(cmd2.getNodes()));
+ auto diff = cmd2.getDiff();
+ EXPECT_EQ(17, diff.size());
+ EXPECT_EQ(1, cmd2.getAddress()->getIndex());
+ EXPECT_EQ(1234, cmd2.getSourceIndex());
tracker->generateReply(cmd);
- CPPUNIT_ASSERT(!tracker->getReply().get());
+ EXPECT_FALSE(tracker->getReply().get());
}
void
@@ -293,288 +227,137 @@ MergeHandlerTest::testGetBucketDiffChain(bool midChain)
setUpChain(midChain ? MIDDLE : BACK);
MergeHandler handler(getPersistenceProvider(), getEnv());
- LOG(info, "Verifying that get bucket diff is sent on");
+ LOG(debug, "Verifying that get bucket diff is sent on");
api::GetBucketDiffCommand cmd(_bucket, _nodes, _maxTimestamp);
MessageTracker::UP tracker1 = handler.handleGetBucketDiff(cmd, *_context);
api::StorageMessage::SP replySent = tracker1->getReply();
if (midChain) {
- LOG(info, "Check state");
- CPPUNIT_ASSERT_EQUAL(size_t(1), messageKeeper()._msgs.size());
- CPPUNIT_ASSERT_EQUAL(api::MessageType::GETBUCKETDIFF,
- messageKeeper()._msgs[0]->getType());
- api::GetBucketDiffCommand& cmd2(
- dynamic_cast<api::GetBucketDiffCommand&>(
- *messageKeeper()._msgs[0]));
- CPPUNIT_ASSERT_EQUAL(_nodes, cmd2.getNodes());
- std::vector<api::GetBucketDiffCommand::Entry> diff(cmd2.getDiff());
- CPPUNIT_ASSERT_EQUAL(size_t(17), diff.size());
- CPPUNIT_ASSERT_EQUAL(uint16_t(1), cmd2.getAddress()->getIndex());
-
- LOG(info, "Verifying that replying the diff sends on back");
- api::GetBucketDiffReply::UP reply(new api::GetBucketDiffReply(cmd2));
-
- CPPUNIT_ASSERT(!replySent.get());
+ LOG(debug, "Check state");
+ ASSERT_EQ(1, messageKeeper()._msgs.size());
+ ASSERT_EQ(api::MessageType::GETBUCKETDIFF, messageKeeper()._msgs[0]->getType());
+ auto& cmd2 = dynamic_cast<api::GetBucketDiffCommand&>(*messageKeeper()._msgs[0]);
+ EXPECT_THAT(_nodes, ContainerEq(cmd2.getNodes()));
+ auto diff = cmd2.getDiff();
+ EXPECT_EQ(17, diff.size());
+ EXPECT_EQ(1, cmd2.getAddress()->getIndex());
+
+ LOG(debug, "Verifying that replying the diff sends on back");
+ auto reply = std::make_unique<api::GetBucketDiffReply>(cmd2);
+
+ ASSERT_FALSE(replySent.get());
MessageSenderStub stub;
handler.handleGetBucketDiffReply(*reply, stub);
- CPPUNIT_ASSERT_EQUAL(1, (int)stub.replies.size());
+ ASSERT_EQ(1, stub.replies.size());
replySent = stub.replies[0];
}
- api::GetBucketDiffReply::SP reply2(
- std::dynamic_pointer_cast<api::GetBucketDiffReply>(
- replySent));
- CPPUNIT_ASSERT(reply2.get());
-
- CPPUNIT_ASSERT_EQUAL(_nodes, reply2->getNodes());
- std::vector<api::GetBucketDiffCommand::Entry> diff(reply2->getDiff());
- CPPUNIT_ASSERT_EQUAL(size_t(17), diff.size());
+ auto reply2 = std::dynamic_pointer_cast<api::GetBucketDiffReply>(replySent);
+ ASSERT_TRUE(reply2.get());
+
+ EXPECT_THAT(_nodes, ContainerEq(reply2->getNodes()));
+ auto diff = reply2->getDiff();
+ EXPECT_EQ(17, diff.size());
+}
+
+TEST_F(MergeHandlerTest, get_bucket_diff_mid_chain) {
+ testGetBucketDiffChain(true);
+}
+
+TEST_F(MergeHandlerTest, get_bucket_diff_end_of_chain) {
+ testGetBucketDiffChain(false);
}
+// Test that a simplistic merge with 1 doc to actually merge,
+// sends apply bucket diff through the entire chain of 3 nodes.
void
MergeHandlerTest::testApplyBucketDiffChain(bool midChain)
{
setUpChain(midChain ? MIDDLE : BACK);
MergeHandler handler(getPersistenceProvider(), getEnv());
- LOG(info, "Verifying that apply bucket diff is sent on");
+ LOG(debug, "Verifying that apply bucket diff is sent on");
api::ApplyBucketDiffCommand cmd(_bucket, _nodes, _maxTimestamp);
MessageTracker::UP tracker1 = handler.handleApplyBucketDiff(cmd, *_context);
api::StorageMessage::SP replySent = tracker1->getReply();
if (midChain) {
- LOG(info, "Check state");
- CPPUNIT_ASSERT_EQUAL(size_t(1), messageKeeper()._msgs.size());
- CPPUNIT_ASSERT_EQUAL(api::MessageType::APPLYBUCKETDIFF,
- messageKeeper()._msgs[0]->getType());
- api::ApplyBucketDiffCommand& cmd2(
- dynamic_cast<api::ApplyBucketDiffCommand&>(
- *messageKeeper()._msgs[0]));
- CPPUNIT_ASSERT_EQUAL(_nodes, cmd2.getNodes());
- std::vector<api::ApplyBucketDiffCommand::Entry> diff(cmd2.getDiff());
- CPPUNIT_ASSERT_EQUAL(size_t(0), diff.size());
- CPPUNIT_ASSERT_EQUAL(uint16_t(1), cmd2.getAddress()->getIndex());
-
- CPPUNIT_ASSERT(!replySent.get());
-
- LOG(info, "Verifying that replying the diff sends on back");
- api::ApplyBucketDiffReply::UP reply(
- new api::ApplyBucketDiffReply(cmd2));
+ LOG(debug, "Check state");
+ ASSERT_EQ(1, messageKeeper()._msgs.size());
+ ASSERT_EQ(api::MessageType::APPLYBUCKETDIFF, messageKeeper()._msgs[0]->getType());
+ auto& cmd2 = dynamic_cast<api::ApplyBucketDiffCommand&>(*messageKeeper()._msgs[0]);
+ EXPECT_THAT(_nodes, ContainerEq(cmd2.getNodes()));
+ auto diff = cmd2.getDiff();
+ EXPECT_EQ(0, diff.size());
+ EXPECT_EQ(1, cmd2.getAddress()->getIndex());
+
+ EXPECT_FALSE(replySent.get());
+
+ LOG(debug, "Verifying that replying the diff sends on back");
+ auto reply = std::make_unique<api::ApplyBucketDiffReply>(cmd2);
MessageSenderStub stub;
handler.handleApplyBucketDiffReply(*reply, stub);
- CPPUNIT_ASSERT_EQUAL(1, (int)stub.replies.size());
+ ASSERT_EQ(1, stub.replies.size());
replySent = stub.replies[0];
}
- api::ApplyBucketDiffReply::SP reply2(
- std::dynamic_pointer_cast<api::ApplyBucketDiffReply>(replySent));
- CPPUNIT_ASSERT(reply2.get());
+ auto reply2 = std::dynamic_pointer_cast<api::ApplyBucketDiffReply>(replySent);
+ ASSERT_TRUE(reply2.get());
- CPPUNIT_ASSERT_EQUAL(_nodes, reply2->getNodes());
- std::vector<api::ApplyBucketDiffCommand::Entry> diff(reply2->getDiff());
- CPPUNIT_ASSERT_EQUAL(size_t(0), diff.size());
+ EXPECT_THAT(_nodes, ContainerEq(reply2->getNodes()));
+ auto diff = reply2->getDiff();
+ EXPECT_EQ(0, diff.size());
}
-void
-MergeHandlerTest::testMasterMessageFlow()
-{
+TEST_F(MergeHandlerTest, apply_bucket_diff_mid_chain) {
+ testApplyBucketDiffChain(true);
+}
+
+TEST_F(MergeHandlerTest, apply_bucket_diff_end_of_chain) {
+ testApplyBucketDiffChain(false);
+}
+
+// Test that a simplistic merge with one thing to actually merge,
+// sends correct commands and finish.
+TEST_F(MergeHandlerTest, master_message_flow) {
MergeHandler handler(getPersistenceProvider(), getEnv());
- LOG(info, "Handle a merge bucket command");
+ LOG(debug, "Handle a merge bucket command");
api::MergeBucketCommand cmd(_bucket, _nodes, _maxTimestamp);
handler.handleMergeBucket(cmd, *_context);
- LOG(info, "Check state");
- CPPUNIT_ASSERT_EQUAL(size_t(1), messageKeeper()._msgs.size());
- CPPUNIT_ASSERT_EQUAL(api::MessageType::GETBUCKETDIFF,
- messageKeeper()._msgs[0]->getType());
- api::GetBucketDiffCommand& cmd2(dynamic_cast<api::GetBucketDiffCommand&>(
- *messageKeeper()._msgs[0]));
-
- api::GetBucketDiffReply::UP reply(new api::GetBucketDiffReply(cmd2));
- // End of chain can remove entries all have. This should end up with
- // one entry master node has other node don't have
+ LOG(debug, "Check state");
+ ASSERT_EQ(1, messageKeeper()._msgs.size());
+ ASSERT_EQ(api::MessageType::GETBUCKETDIFF, messageKeeper()._msgs[0]->getType());
+ auto& cmd2 = dynamic_cast<api::GetBucketDiffCommand&>(*messageKeeper()._msgs[0]);
+
+ auto reply = std::make_unique<api::GetBucketDiffReply>(cmd2);
+ // End of chain can remove entries all have. This should end up with
+ // one entry master node has other node don't have
reply->getDiff().resize(1);
handler.handleGetBucketDiffReply(*reply, messageKeeper());
- LOG(info, "Check state");
- CPPUNIT_ASSERT_EQUAL(size_t(2), messageKeeper()._msgs.size());
- CPPUNIT_ASSERT_EQUAL(api::MessageType::APPLYBUCKETDIFF,
- messageKeeper()._msgs[1]->getType());
- api::ApplyBucketDiffCommand& cmd3(
- dynamic_cast<api::ApplyBucketDiffCommand&>(
- *messageKeeper()._msgs[1]));
- api::ApplyBucketDiffReply::UP reply2(new api::ApplyBucketDiffReply(cmd3));
- CPPUNIT_ASSERT_EQUAL(size_t(1), reply2->getDiff().size());
- reply2->getDiff()[0]._entry._hasMask |= 2;
+ LOG(debug, "Check state");
+ ASSERT_EQ(2, messageKeeper()._msgs.size());
+ ASSERT_EQ(api::MessageType::APPLYBUCKETDIFF, messageKeeper()._msgs[1]->getType());
+ auto& cmd3 = dynamic_cast<api::ApplyBucketDiffCommand&>(*messageKeeper()._msgs[1]);
+ auto reply2 = std::make_unique<api::ApplyBucketDiffReply>(cmd3);
+ ASSERT_EQ(1, reply2->getDiff().size());
+ reply2->getDiff()[0]._entry._hasMask |= 2u;
MessageSenderStub stub;
handler.handleApplyBucketDiffReply(*reply2, stub);
- CPPUNIT_ASSERT_EQUAL(1, (int)stub.replies.size());
-
- api::MergeBucketReply::SP reply3(
- std::dynamic_pointer_cast<api::MergeBucketReply>(stub.replies[0]));
- CPPUNIT_ASSERT(reply3.get());
-
- CPPUNIT_ASSERT_EQUAL(_nodes, reply3->getNodes());
- CPPUNIT_ASSERT(reply3->getResult().success());
- CPPUNIT_ASSERT(!fsHandler().isMerging(_bucket));
-}
-
-void
-MergeHandlerTest::testMergeUnrevertableRemove()
-{
-/*
- MergeHandler handler(getPersistenceProvider(), getEnv());
-
- LOG(info, "Handle a merge bucket command");
- api::MergeBucketCommand cmd(_bucket, _nodes, _maxTimestamp);
- {
- MessageTracker tracker;
- handler.handleMergeBucket(cmd, tracker);
- }
-
- LOG(info, "Check state");
- CPPUNIT_ASSERT_EQUAL(uint64_t(1), messageKeeper()._msgs.size());
- CPPUNIT_ASSERT_EQUAL(api::MessageType::GETBUCKETDIFF,
- messageKeeper()._msgs[0]->getType());
- api::GetBucketDiffCommand& cmd2(
- dynamic_cast<api::GetBucketDiffCommand&>(
- *messageKeeper()._msgs[0]));
-
- api::GetBucketDiffReply::UP reply(new api::GetBucketDiffReply(cmd2));
-
- std::vector<Timestamp> docTimestamps;
- for (int i = 0; i < 4; ++i) {
- docTimestamps.push_back(Timestamp(reply->getDiff()[i]._timestamp));
- }
- CPPUNIT_ASSERT(reply->getDiff().size() >= 4);
- reply->getDiff().resize(4);
- // Add one non-unrevertable entry for existing timestamp which
- // should not be added
- reply->getDiff()[0]._flags |= Types::DELETED;
- reply->getDiff()[0]._bodySize = 0;
- reply->getDiff()[0]._hasMask = 2;
- // Add a unrevertable entry which should be modified
- reply->getDiff()[1]._flags |= Types::DELETED | Types::DELETED_IN_PLACE;
- reply->getDiff()[1]._bodySize = 0;
- reply->getDiff()[1]._hasMask = 2;
- // Add one non-unrevertable entry that is a duplicate put
- // which should not be added or fail the merge.
- LOG(info, "duplicate put has timestamp %zu and flags %u",
- reply->getDiff()[2]._timestamp,
- reply->getDiff()[2]._flags);
- reply->getDiff()[2]._hasMask = 2;
- // Add one unrevertable entry for a timestamp that does not exist
- reply->getDiff()[3]._flags |= Types::DELETED | Types::DELETED_IN_PLACE;
- reply->getDiff()[3]._timestamp = 12345678;
- reply->getDiff()[3]._bodySize = 0;
- reply->getDiff()[3]._hasMask = 2;
- {
- MessageTracker tracker;
- handler.handleGetBucketDiffReply(*reply, tracker);
- }
-
- LOG(info, "%s", reply->toString(true).c_str());
-
- LOG(info, "Create bucket diff reply");
- CPPUNIT_ASSERT_EQUAL(uint64_t(2), messageKeeper()._msgs.size());
- CPPUNIT_ASSERT_EQUAL(api::MessageType::APPLYBUCKETDIFF,
- messageKeeper()._msgs[1]->getType());
- api::ApplyBucketDiffCommand& cmd3(
- dynamic_cast<api::ApplyBucketDiffCommand&>(
- *messageKeeper()._msgs[1]));
- api::ApplyBucketDiffReply::UP reply2(
- new api::ApplyBucketDiffReply(cmd3));
- CPPUNIT_ASSERT_EQUAL(size_t(4), reply2->getDiff().size());
-
- memfile::DataLocation headerLocs[4];
- std::vector<DocumentId> documentIds;
- // So deserialization won't fail, we need some kind of header blob
- // for each entry
-
- for (int i = 0; i < 4; ++i) {
- api::ApplyBucketDiffReply::Entry& entry = reply2->getDiff()[i];
- CPPUNIT_ASSERT_EQUAL(uint16_t(2), entry._entry._hasMask);
-
- memfile::MemFilePtr file(getMemFile(_bucket));
- const memfile::MemSlot* slot = file->getSlotAtTime(docTimestamps[i]);
- CPPUNIT_ASSERT(slot != NULL);
- LOG(info, "Processing slot %s", slot->toString().c_str());
- CPPUNIT_ASSERT(slot->hasBodyContent());
- documentIds.push_back(file->getDocumentId(*slot));
- entry._docName = documentIds.back().toString();
- headerLocs[i] = slot->getLocation(HEADER);
-
- document::Document::UP doc(file->getDocument(*slot, ALL));
- {
- vespalib::nbostream stream;
- doc->serializeHeader(stream);
- std::vector<char> buf(
- stream.peek(), stream.peek() + stream.size());
- entry._headerBlob.swap(buf);
- }
- // Put duplicate needs body blob as well
- if (i == 2) {
- vespalib::nbostream stream;
- doc->serializeBody(stream);
- std::vector<char> buf(
- stream.peek(), stream.peek() + stream.size());
- entry._bodyBlob.swap(buf);
- }
- }
-
- LOG(info, "%s", reply2->toString(true).c_str());
-
- MessageTracker tracker;
- handler.handleApplyBucketDiffReply(*reply2, tracker);
-
- CPPUNIT_ASSERT(tracker._sendReply);
- api::MergeBucketReply::SP reply3(
- std::dynamic_pointer_cast<api::MergeBucketReply>(
- tracker._reply));
- CPPUNIT_ASSERT(reply3.get());
-
- CPPUNIT_ASSERT_EQUAL(_nodes, reply3->getNodes());
- CPPUNIT_ASSERT(reply3->getResult().success());
-
- memfile::MemFilePtr file(getMemFile(_bucket));
- // Existing timestamp should not be modified by
- // non-unrevertable entry
- {
- const memfile::MemSlot* slot = file->getSlotAtTime(
- Timestamp(reply->getDiff()[0]._timestamp));
- CPPUNIT_ASSERT(slot != NULL);
- CPPUNIT_ASSERT(!slot->deleted());
- }
- // Ensure unrevertable remove for existing put was merged in OK
- {
- const memfile::MemSlot* slot = file->getSlotAtTime(
- Timestamp(reply->getDiff()[1]._timestamp));
- CPPUNIT_ASSERT(slot != NULL);
- CPPUNIT_ASSERT(slot->deleted());
- CPPUNIT_ASSERT(slot->deletedInPlace());
- CPPUNIT_ASSERT(!slot->hasBodyContent());
- // Header location should not have changed
- CPPUNIT_ASSERT_EQUAL(headerLocs[1], slot->getLocation(HEADER));
- }
+ ASSERT_EQ(1, stub.replies.size());
- // Non-existing timestamp unrevertable remove should be added as
- // entry with doc id-only header
- {
- const memfile::MemSlot* slot = file->getSlotAtTime(
- Timestamp(reply->getDiff()[3]._timestamp));
- CPPUNIT_ASSERT(slot != NULL);
- CPPUNIT_ASSERT(slot->deleted());
- CPPUNIT_ASSERT(slot->deletedInPlace());
- CPPUNIT_ASSERT(!slot->hasBodyContent());
- CPPUNIT_ASSERT_EQUAL(documentIds[3], file->getDocumentId(*slot));
- }
+ auto reply3 = std::dynamic_pointer_cast<api::MergeBucketReply>(stub.replies[0]);
+ ASSERT_TRUE(reply3.get());
-*/
+ EXPECT_THAT(_nodes, ContainerEq(reply3->getNodes()));
+ EXPECT_TRUE(reply3->getResult().success());
+ EXPECT_FALSE(fsHandler().isMerging(_bucket));
}
template <typename T>
@@ -631,9 +414,7 @@ getFilledDataSize(const std::vector<api::ApplyBucketDiffCommand::Entry>& diff)
}
-void
-MergeHandlerTest::testChunkedApplyBucketDiff()
-{
+TEST_F(MergeHandlerTest, chunked_apply_bucket_diff) {
uint32_t docSize = 1024;
uint32_t docCount = 10;
uint32_t maxChunkSize = docSize * 3;
@@ -643,14 +424,12 @@ MergeHandlerTest::testChunkedApplyBucketDiff()
MergeHandler handler(getPersistenceProvider(), getEnv(), maxChunkSize);
- LOG(info, "Handle a merge bucket command");
+ LOG(debug, "Handle a merge bucket command");
api::MergeBucketCommand cmd(_bucket, _nodes, _maxTimestamp);
handler.handleMergeBucket(cmd, *_context);
- std::shared_ptr<api::GetBucketDiffCommand> getBucketDiffCmd(
- fetchSingleMessage<api::GetBucketDiffCommand>());
- api::GetBucketDiffReply::UP getBucketDiffReply(
- new api::GetBucketDiffReply(*getBucketDiffCmd));
+ auto getBucketDiffCmd = fetchSingleMessage<api::GetBucketDiffCommand>();
+ auto getBucketDiffReply = std::make_unique<api::GetBucketDiffReply>(*getBucketDiffCmd);
handler.handleGetBucketDiffReply(*getBucketDiffReply, messageKeeper());
@@ -659,14 +438,12 @@ MergeHandlerTest::testChunkedApplyBucketDiff()
api::MergeBucketReply::SP reply;
while (seen.size() != totalDiffs) {
- std::shared_ptr<api::ApplyBucketDiffCommand> applyBucketDiffCmd(
- fetchSingleMessage<api::ApplyBucketDiffCommand>());
+ auto applyBucketDiffCmd = fetchSingleMessage<api::ApplyBucketDiffCommand>();
- LOG(info, "Test that we get chunked diffs in ApplyBucketDiff");
- std::vector<api::ApplyBucketDiffCommand::Entry>& diff(
- applyBucketDiffCmd->getDiff());
- CPPUNIT_ASSERT(getFilledCount(diff) < totalDiffs);
- CPPUNIT_ASSERT(getFilledDataSize(diff) <= maxChunkSize);
+ LOG(debug, "Test that we get chunked diffs in ApplyBucketDiff");
+ auto& diff = applyBucketDiffCmd->getDiff();
+ ASSERT_LT(getFilledCount(diff), totalDiffs);
+ ASSERT_LE(getFilledDataSize(diff), maxChunkSize);
// Include node 1 in hasmask for all diffs to indicate it's done
// Also remember the diffs we've seen thus far to ensure chunking
@@ -675,39 +452,33 @@ MergeHandlerTest::testChunkedApplyBucketDiff()
if (!diff[i].filled()) {
continue;
}
- diff[i]._entry._hasMask |= 2;
- std::pair<std::set<spi::Timestamp>::iterator, bool> inserted(
- seen.insert(spi::Timestamp(diff[i]._entry._timestamp)));
+ diff[i]._entry._hasMask |= 2u;
+ auto inserted = seen.emplace(spi::Timestamp(diff[i]._entry._timestamp));
if (!inserted.second) {
- std::ostringstream ss;
- ss << "Diff for " << diff[i]
- << " has already been seen in another ApplyBucketDiff";
- CPPUNIT_FAIL(ss.str());
+ FAIL() << "Diff for " << diff[i]
+ << " has already been seen in another ApplyBucketDiff";
}
}
- api::ApplyBucketDiffReply::UP applyBucketDiffReply(
- new api::ApplyBucketDiffReply(*applyBucketDiffCmd));
+ auto applyBucketDiffReply = std::make_unique<api::ApplyBucketDiffReply>(*applyBucketDiffCmd);
{
handler.handleApplyBucketDiffReply(*applyBucketDiffReply, messageKeeper());
- if (messageKeeper()._msgs.size()) {
- CPPUNIT_ASSERT(!reply.get());
+ if (!messageKeeper()._msgs.empty()) {
+ ASSERT_FALSE(reply.get());
reply = std::dynamic_pointer_cast<api::MergeBucketReply>(
messageKeeper()._msgs[messageKeeper()._msgs.size() - 1]);
}
}
}
- LOG(info, "Done with applying diff");
+ LOG(debug, "Done with applying diff");
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(_nodes, reply->getNodes());
- CPPUNIT_ASSERT(reply->getResult().success());
+ ASSERT_TRUE(reply.get());
+ EXPECT_THAT(_nodes, ContainerEq(reply->getNodes()));
+ EXPECT_TRUE(reply->getResult().success());
}
-void
-MergeHandlerTest::testChunkLimitPartiallyFilledDiff()
-{
+TEST_F(MergeHandlerTest, chunk_limit_partially_filled_diff) {
setUpChain(FRONT);
uint32_t docSize = 1024;
@@ -731,24 +502,20 @@ MergeHandlerTest::testChunkLimitPartiallyFilledDiff()
}
setUpChain(MIDDLE);
- std::shared_ptr<api::ApplyBucketDiffCommand> applyBucketDiffCmd(
- new api::ApplyBucketDiffCommand(_bucket, _nodes, maxChunkSize));
+ auto applyBucketDiffCmd = std::make_shared<api::ApplyBucketDiffCommand>(_bucket, _nodes, maxChunkSize);
applyBucketDiffCmd->getDiff() = applyDiff;
MergeHandler handler(
getPersistenceProvider(), getEnv(), maxChunkSize);
handler.handleApplyBucketDiff(*applyBucketDiffCmd, *_context);
- std::shared_ptr<api::ApplyBucketDiffCommand> fwdDiffCmd(
- fetchSingleMessage<api::ApplyBucketDiffCommand>());
+ auto fwdDiffCmd = fetchSingleMessage<api::ApplyBucketDiffCommand>();
// Should not fill up more than chunk size allows for
- CPPUNIT_ASSERT_EQUAL(size_t(2), getFilledCount(fwdDiffCmd->getDiff()));
- CPPUNIT_ASSERT(getFilledDataSize(fwdDiffCmd->getDiff()) <= maxChunkSize);
+ EXPECT_EQ(2, getFilledCount(fwdDiffCmd->getDiff()));
+ EXPECT_LE(getFilledDataSize(fwdDiffCmd->getDiff()), maxChunkSize);
}
-void
-MergeHandlerTest::testMaxTimestamp()
-{
+TEST_F(MergeHandlerTest, max_timestamp) {
doPut(1234, spi::Timestamp(_maxTimestamp + 10), 1024, 1024);
MergeHandler handler(getPersistenceProvider(), getEnv());
@@ -756,11 +523,10 @@ MergeHandlerTest::testMaxTimestamp()
api::MergeBucketCommand cmd(_bucket, _nodes, _maxTimestamp);
handler.handleMergeBucket(cmd, *_context);
- std::shared_ptr<api::GetBucketDiffCommand> getCmd(
- fetchSingleMessage<api::GetBucketDiffCommand>());
+ auto getCmd = fetchSingleMessage<api::GetBucketDiffCommand>();
- CPPUNIT_ASSERT(!getCmd->getDiff().empty());
- CPPUNIT_ASSERT(getCmd->getDiff().back()._timestamp <= _maxTimestamp);
+ ASSERT_FALSE(getCmd->getDiff().empty());
+ EXPECT_LE(getCmd->getDiff().back()._timestamp, _maxTimestamp);
}
void
@@ -819,8 +585,7 @@ MergeHandlerTest::createDummyApplyDiff(int timestampOffset,
fillDummyApplyDiff(applyDiff);
}
- std::shared_ptr<api::ApplyBucketDiffCommand> applyBucketDiffCmd(
- new api::ApplyBucketDiffCommand(_bucket, _nodes, 1024*1024));
+ auto applyBucketDiffCmd = std::make_shared<api::ApplyBucketDiffCommand>(_bucket, _nodes, 1024*1024);
applyBucketDiffCmd->getDiff() = applyDiff;
return applyBucketDiffCmd;
}
@@ -855,109 +620,86 @@ MergeHandlerTest::createDummyGetBucketDiff(int timestampOffset,
diff.push_back(e);
}
- std::shared_ptr<api::GetBucketDiffCommand> getBucketDiffCmd(
- new api::GetBucketDiffCommand(_bucket, _nodes, 1024*1024));
+ auto getBucketDiffCmd = std::make_shared<api::GetBucketDiffCommand>(_bucket, _nodes, 1024*1024);
getBucketDiffCmd->getDiff() = diff;
return getBucketDiffCmd;
}
-void
-MergeHandlerTest::testSPIFlushGuard()
-{
+TEST_F(MergeHandlerTest, spi_flush_guard) {
PersistenceProviderWrapper providerWrapper(
getPersistenceProvider());
MergeHandler handler(providerWrapper, getEnv());
providerWrapper.setResult(
- spi::Result(spi::Result::PERMANENT_ERROR,
- "who you gonna call?"));
+ spi::Result(spi::Result::PERMANENT_ERROR, "who you gonna call?"));
setUpChain(MIDDLE);
// Fail applying unrevertable remove
providerWrapper.setFailureMask(
PersistenceProviderWrapper::FAIL_REMOVE);
providerWrapper.clearOperationLog();
+
try {
handler.handleApplyBucketDiff(*createDummyApplyDiff(6000), *_context);
- CPPUNIT_FAIL("No exception thrown on failing in-place remove");
+ FAIL() << "No exception thrown on failing in-place remove";
} catch (const std::runtime_error& e) {
- CPPUNIT_ASSERT(std::string(e.what()).find("Failed remove")
- != std::string::npos);
+ EXPECT_TRUE(std::string(e.what()).find("Failed remove") != std::string::npos);
}
// Test that we always flush after applying diff locally, even when
// errors are encountered.
const std::vector<std::string>& opLog(providerWrapper.getOperationLog());
- CPPUNIT_ASSERT(!opLog.empty());
- CPPUNIT_ASSERT_EQUAL(
- std::string("flush(Bucket(0x40000000000004d2, partition 0))"),
- opLog.back());
+ ASSERT_FALSE(opLog.empty());
+ EXPECT_EQ("flush(Bucket(0x40000000000004d2, partition 0))", opLog.back());
}
-void
-MergeHandlerTest::testBucketNotFoundInDb()
-{
+TEST_F(MergeHandlerTest, bucket_not_found_in_db) {
MergeHandler handler(getPersistenceProvider(), getEnv());
// Send merge for unknown bucket
api::MergeBucketCommand cmd(makeDocumentBucket(document::BucketId(16, 6789)), _nodes, _maxTimestamp);
MessageTracker::UP tracker = handler.handleMergeBucket(cmd, *_context);
- CPPUNIT_ASSERT(tracker->getResult().isBucketDisappearance());
+ EXPECT_TRUE(tracker->getResult().isBucketDisappearance());
}
-void
-MergeHandlerTest::testMergeProgressSafeGuard()
-{
+TEST_F(MergeHandlerTest, merge_progress_safe_guard) {
MergeHandler handler(getPersistenceProvider(), getEnv());
api::MergeBucketCommand cmd(_bucket, _nodes, _maxTimestamp);
handler.handleMergeBucket(cmd, *_context);
- std::shared_ptr<api::GetBucketDiffCommand> getBucketDiffCmd(
- fetchSingleMessage<api::GetBucketDiffCommand>());
- api::GetBucketDiffReply::UP getBucketDiffReply(
- new api::GetBucketDiffReply(*getBucketDiffCmd));
+ auto getBucketDiffCmd = fetchSingleMessage<api::GetBucketDiffCommand>();
+ auto getBucketDiffReply = std::make_unique<api::GetBucketDiffReply>(*getBucketDiffCmd);
handler.handleGetBucketDiffReply(*getBucketDiffReply, messageKeeper());
- std::shared_ptr<api::ApplyBucketDiffCommand> applyBucketDiffCmd(
- fetchSingleMessage<api::ApplyBucketDiffCommand>());
- api::ApplyBucketDiffReply::UP applyBucketDiffReply(
- new api::ApplyBucketDiffReply(*applyBucketDiffCmd));
+ auto applyBucketDiffCmd = fetchSingleMessage<api::ApplyBucketDiffCommand>();
+ auto applyBucketDiffReply = std::make_unique<api::ApplyBucketDiffReply>(*applyBucketDiffCmd);
MessageSenderStub stub;
handler.handleApplyBucketDiffReply(*applyBucketDiffReply, stub);
- CPPUNIT_ASSERT_EQUAL(1, (int)stub.replies.size());
+ ASSERT_EQ(1, stub.replies.size());
- api::MergeBucketReply::SP mergeReply(
- std::dynamic_pointer_cast<api::MergeBucketReply>(
- stub.replies[0]));
- CPPUNIT_ASSERT(mergeReply.get());
- CPPUNIT_ASSERT(mergeReply->getResult().getResult()
- == api::ReturnCode::INTERNAL_FAILURE);
+ auto mergeReply = std::dynamic_pointer_cast<api::MergeBucketReply>(stub.replies[0]);
+ ASSERT_TRUE(mergeReply.get());
+ EXPECT_EQ(mergeReply->getResult().getResult(), api::ReturnCode::INTERNAL_FAILURE);
}
-void
-MergeHandlerTest::testSafeGuardNotInvokedWhenHasMaskChanges()
-{
+TEST_F(MergeHandlerTest, safe_guard_not_invoked_when_has_mask_changes) {
MergeHandler handler(getPersistenceProvider(), getEnv());
_nodes.clear();
- _nodes.push_back(api::MergeBucketCommand::Node(0, false));
- _nodes.push_back(api::MergeBucketCommand::Node(1, false));
- _nodes.push_back(api::MergeBucketCommand::Node(2, false));
+ _nodes.emplace_back(0, false);
+ _nodes.emplace_back(1, false);
+ _nodes.emplace_back(2, false);
api::MergeBucketCommand cmd(_bucket, _nodes, _maxTimestamp);
handler.handleMergeBucket(cmd, *_context);
- std::shared_ptr<api::GetBucketDiffCommand> getBucketDiffCmd(
- fetchSingleMessage<api::GetBucketDiffCommand>());
- api::GetBucketDiffReply::UP getBucketDiffReply(
- new api::GetBucketDiffReply(*getBucketDiffCmd));
+ auto getBucketDiffCmd = fetchSingleMessage<api::GetBucketDiffCommand>();
+ auto getBucketDiffReply = std::make_unique<api::GetBucketDiffReply>(*getBucketDiffCmd);
handler.handleGetBucketDiffReply(*getBucketDiffReply, messageKeeper());
- std::shared_ptr<api::ApplyBucketDiffCommand> applyBucketDiffCmd(
- fetchSingleMessage<api::ApplyBucketDiffCommand>());
- api::ApplyBucketDiffReply::UP applyBucketDiffReply(
- new api::ApplyBucketDiffReply(*applyBucketDiffCmd));
- CPPUNIT_ASSERT(!applyBucketDiffReply->getDiff().empty());
+ auto applyBucketDiffCmd = fetchSingleMessage<api::ApplyBucketDiffCommand>();
+ auto applyBucketDiffReply = std::make_unique<api::ApplyBucketDiffReply>(*applyBucketDiffCmd);
+ ASSERT_FALSE(applyBucketDiffReply->getDiff().empty());
// Change a hasMask to indicate something changed during merging.
applyBucketDiffReply->getDiff()[0]._entry._hasMask = 0x5;
@@ -965,21 +707,15 @@ MergeHandlerTest::testSafeGuardNotInvokedWhenHasMaskChanges()
LOG(debug, "sending apply bucket diff reply");
handler.handleApplyBucketDiffReply(*applyBucketDiffReply, stub);
- CPPUNIT_ASSERT_EQUAL(1, (int)stub.commands.size());
+ ASSERT_EQ(1, stub.commands.size());
- api::ApplyBucketDiffCommand::SP applyBucketDiffCmd2(
- std::dynamic_pointer_cast<api::ApplyBucketDiffCommand>(
- stub.commands[0]));
- CPPUNIT_ASSERT(applyBucketDiffCmd2.get());
- CPPUNIT_ASSERT_EQUAL(applyBucketDiffCmd->getDiff().size(),
- applyBucketDiffCmd2->getDiff().size());
- CPPUNIT_ASSERT_EQUAL(uint16_t(0x5),
- applyBucketDiffCmd2->getDiff()[0]._entry._hasMask);
+ auto applyBucketDiffCmd2 = std::dynamic_pointer_cast<api::ApplyBucketDiffCommand>(stub.commands[0]);
+ ASSERT_TRUE(applyBucketDiffCmd2.get());
+ ASSERT_EQ(applyBucketDiffCmd->getDiff().size(), applyBucketDiffCmd2->getDiff().size());
+ EXPECT_EQ(0x5, applyBucketDiffCmd2->getDiff()[0]._entry._hasMask);
}
-void
-MergeHandlerTest::testEntryRemovedAfterGetBucketDiff()
-{
+TEST_F(MergeHandlerTest, entry_removed_after_get_bucket_diff) {
MergeHandler handler(getPersistenceProvider(), getEnv());
std::vector<api::ApplyBucketDiffCommand::Entry> applyDiff;
{
@@ -990,22 +726,18 @@ MergeHandlerTest::testEntryRemovedAfterGetBucketDiff()
applyDiff.push_back(e);
}
setUpChain(BACK);
- std::shared_ptr<api::ApplyBucketDiffCommand> applyBucketDiffCmd(
- new api::ApplyBucketDiffCommand(_bucket, _nodes, 1024*1024));
+ auto applyBucketDiffCmd = std::make_shared<api::ApplyBucketDiffCommand>(_bucket, _nodes, 1024*1024);
applyBucketDiffCmd->getDiff() = applyDiff;
- MessageTracker::UP tracker = handler.handleApplyBucketDiff(*applyBucketDiffCmd, *_context);
+ auto tracker = handler.handleApplyBucketDiff(*applyBucketDiffCmd, *_context);
- api::ApplyBucketDiffReply::SP applyBucketDiffReply(
- std::dynamic_pointer_cast<api::ApplyBucketDiffReply>(
- tracker->getReply()));
- CPPUNIT_ASSERT(applyBucketDiffReply.get());
-
- std::vector<api::ApplyBucketDiffCommand::Entry>& diff(
- applyBucketDiffReply->getDiff());
- CPPUNIT_ASSERT_EQUAL(size_t(1), diff.size());
- CPPUNIT_ASSERT(!diff[0].filled());
- CPPUNIT_ASSERT_EQUAL(uint16_t(0x0), diff[0]._entry._hasMask);
+ auto applyBucketDiffReply = std::dynamic_pointer_cast<api::ApplyBucketDiffReply>(tracker->getReply());
+ ASSERT_TRUE(applyBucketDiffReply.get());
+
+ auto& diff = applyBucketDiffReply->getDiff();
+ ASSERT_EQ(1, diff.size());
+ EXPECT_FALSE(diff[0].filled());
+ EXPECT_EQ(0x0, diff[0]._entry._hasMask);
}
std::string
@@ -1090,15 +822,11 @@ MergeHandlerTest::HandleMergeBucketInvoker::invoke(
handler.handleMergeBucket(cmd, context);
}
-void
-MergeHandlerTest::testMergeBucketSPIFailures()
-{
- PersistenceProviderWrapper providerWrapper(
- getPersistenceProvider());
+TEST_F(MergeHandlerTest, merge_bucket_spi_failures) {
+ PersistenceProviderWrapper providerWrapper(getPersistenceProvider());
MergeHandler handler(providerWrapper, getEnv());
providerWrapper.setResult(
- spi::Result(spi::Result::PERMANENT_ERROR,
- "who you gonna call?"));
+ spi::Result(spi::Result::PERMANENT_ERROR, "who you gonna call?"));
setUpChain(MIDDLE);
ExpectedExceptionSpec exceptions[] = {
@@ -1112,11 +840,7 @@ MergeHandlerTest::testMergeBucketSPIFailures()
for (ExceptionIterator it = exceptions; it != last; ++it) {
HandleMergeBucketInvoker invoker;
- CPPUNIT_ASSERT_EQUAL(std::string(),
- doTestSPIException(handler,
- providerWrapper,
- invoker,
- *it));
+ EXPECT_EQ("", doTestSPIException(handler, providerWrapper, invoker, *it));
}
}
@@ -1130,15 +854,11 @@ MergeHandlerTest::HandleGetBucketDiffInvoker::invoke(
handler.handleGetBucketDiff(cmd, context);
}
-void
-MergeHandlerTest::testGetBucketDiffSPIFailures()
-{
- PersistenceProviderWrapper providerWrapper(
- getPersistenceProvider());
+TEST_F(MergeHandlerTest, get_bucket_diff_spi_failures) {
+ PersistenceProviderWrapper providerWrapper(getPersistenceProvider());
MergeHandler handler(providerWrapper, getEnv());
providerWrapper.setResult(
- spi::Result(spi::Result::PERMANENT_ERROR,
- "who you gonna call?"));
+ spi::Result(spi::Result::PERMANENT_ERROR, "who you gonna call?"));
setUpChain(MIDDLE);
ExpectedExceptionSpec exceptions[] = {
@@ -1153,11 +873,7 @@ MergeHandlerTest::testGetBucketDiffSPIFailures()
for (ExceptionIterator it = exceptions; it != last; ++it) {
HandleGetBucketDiffInvoker invoker;
- CPPUNIT_ASSERT_EQUAL(std::string(),
- doTestSPIException(handler,
- providerWrapper,
- invoker,
- *it));
+ EXPECT_EQ("", doTestSPIException(handler, providerWrapper, invoker, *it));
}
}
@@ -1173,15 +889,11 @@ MergeHandlerTest::HandleApplyBucketDiffInvoker::invoke(
handler.handleApplyBucketDiff(*cmd, context);
}
-void
-MergeHandlerTest::testApplyBucketDiffSPIFailures()
-{
- PersistenceProviderWrapper providerWrapper(
- getPersistenceProvider());
+TEST_F(MergeHandlerTest, apply_bucket_diff_spi_failures) {
+ PersistenceProviderWrapper providerWrapper(getPersistenceProvider());
MergeHandler handler(providerWrapper, getEnv());
providerWrapper.setResult(
- spi::Result(spi::Result::PERMANENT_ERROR,
- "who you gonna call?"));
+ spi::Result(spi::Result::PERMANENT_ERROR, "who you gonna call?"));
setUpChain(MIDDLE);
ExpectedExceptionSpec exceptions[] = {
@@ -1197,15 +909,11 @@ MergeHandlerTest::testApplyBucketDiffSPIFailures()
for (ExceptionIterator it = exceptions; it != last; ++it) {
HandleApplyBucketDiffInvoker invoker;
- CPPUNIT_ASSERT_EQUAL(std::string(),
- doTestSPIException(handler,
- providerWrapper,
- invoker,
- *it));
+ EXPECT_EQ("", doTestSPIException(handler, providerWrapper, invoker, *it));
// Casual, in-place testing of bug 6752085.
// This will fail if we give NaN to the metric in question.
- CPPUNIT_ASSERT(std::isfinite(getEnv()._metrics
- .mergeAverageDataReceivedNeeded.getLast()));
+ EXPECT_TRUE(std::isfinite(getEnv()._metrics
+ .mergeAverageDataReceivedNeeded.getLast()));
}
}
@@ -1248,15 +956,11 @@ MergeHandlerTest::HandleGetBucketDiffReplyInvoker::afterInvoke(
api::ReturnCode::INTERNAL_FAILURE);
}
-void
-MergeHandlerTest::testGetBucketDiffReplySPIFailures()
-{
- PersistenceProviderWrapper providerWrapper(
- getPersistenceProvider());
+TEST_F(MergeHandlerTest, get_bucket_diff_reply_spi_failures) {
+ PersistenceProviderWrapper providerWrapper(getPersistenceProvider());
MergeHandler handler(providerWrapper, getEnv());
providerWrapper.setResult(
- spi::Result(spi::Result::PERMANENT_ERROR,
- "who you gonna call?"));
+ spi::Result(spi::Result::PERMANENT_ERROR, "who you gonna call?"));
HandleGetBucketDiffReplyInvoker invoker;
setUpChain(FRONT);
@@ -1270,11 +974,7 @@ MergeHandlerTest::testGetBucketDiffReplySPIFailures()
ExceptionIterator last = exceptions + sizeof(exceptions)/sizeof(exceptions[0]);
for (ExceptionIterator it = exceptions; it != last; ++it) {
- CPPUNIT_ASSERT_EQUAL(std::string(),
- doTestSPIException(handler,
- providerWrapper,
- invoker,
- *it));
+ EXPECT_EQ("", doTestSPIException(handler, providerWrapper, invoker, *it));
}
}
@@ -1289,23 +989,20 @@ MergeHandlerTest::HandleApplyBucketDiffReplyInvoker::beforeInvoke(
if (getChainPos() == FRONT) {
api::MergeBucketCommand cmd(test._bucket, test._nodes, test._maxTimestamp);
handler.handleMergeBucket(cmd, context);
- std::shared_ptr<api::GetBucketDiffCommand> diffCmd(
- test.fetchSingleMessage<api::GetBucketDiffCommand>());
- std::shared_ptr<api::GetBucketDiffCommand> dummyDiff(
- test.createDummyGetBucketDiff(100000 * _counter, 0x4));
+ auto diffCmd = test.fetchSingleMessage<api::GetBucketDiffCommand>();
+ auto dummyDiff = test.createDummyGetBucketDiff(100000 * _counter, 0x4);
diffCmd->getDiff() = dummyDiff->getDiff();
api::GetBucketDiffReply diffReply(*diffCmd);
handler.handleGetBucketDiffReply(diffReply, _stub);
- CPPUNIT_ASSERT_EQUAL(size_t(1), _stub.commands.size());
+ assert(_stub.commands.size() == 1);
_applyCmd = std::dynamic_pointer_cast<api::ApplyBucketDiffCommand>(
_stub.commands[0]);
} else {
// Pretend last node in chain has data and that it will be fetched when
// chain is unwinded.
- std::shared_ptr<api::ApplyBucketDiffCommand> cmd(
- test.createDummyApplyDiff(100000 * _counter, 0x4, false));
+ auto cmd = test.createDummyApplyDiff(100000 * _counter, 0x4, false);
handler.handleApplyBucketDiff(*cmd, context);
_applyCmd = test.fetchSingleMessage<api::ApplyBucketDiffCommand>();
}
@@ -1345,11 +1042,8 @@ MergeHandlerTest::HandleApplyBucketDiffReplyInvoker::afterInvoke(
}
}
-void
-MergeHandlerTest::testApplyBucketDiffReplySPIFailures()
-{
- PersistenceProviderWrapper providerWrapper(
- getPersistenceProvider());
+TEST_F(MergeHandlerTest, apply_bucket_diff_reply_spi_failures) {
+ PersistenceProviderWrapper providerWrapper(getPersistenceProvider());
HandleApplyBucketDiffReplyInvoker invoker;
for (int i = 0; i < 2; ++i) {
ChainPos pos(i == 0 ? FRONT : MIDDLE);
@@ -1357,8 +1051,7 @@ MergeHandlerTest::testApplyBucketDiffReplySPIFailures()
invoker.setChainPos(pos);
MergeHandler handler(providerWrapper, getEnv());
providerWrapper.setResult(
- spi::Result(spi::Result::PERMANENT_ERROR,
- "who you gonna call?"));
+ spi::Result(spi::Result::PERMANENT_ERROR, "who you gonna call?"));
ExpectedExceptionSpec exceptions[] = {
{ PersistenceProviderWrapper::FAIL_CREATE_ITERATOR, "create iterator" },
@@ -1372,18 +1065,12 @@ MergeHandlerTest::testApplyBucketDiffReplySPIFailures()
ExceptionIterator last = exceptions + sizeof(exceptions)/sizeof(exceptions[0]);
for (ExceptionIterator it = exceptions; it != last; ++it) {
- CPPUNIT_ASSERT_EQUAL(std::string(),
- doTestSPIException(handler,
- providerWrapper,
- invoker,
- *it));
+ EXPECT_EQ("", doTestSPIException(handler, providerWrapper, invoker, *it));
}
}
}
-void
-MergeHandlerTest::testRemoveFromDiff()
-{
+TEST_F(MergeHandlerTest, remove_from_diff) {
framework::defaultimplementation::FakeClock clock;
MergeStatus status(clock, documentapi::LoadType::DEFAULT, 0, 0);
@@ -1408,8 +1095,8 @@ MergeHandlerTest::testRemoveFromDiff()
applyDiff[1]._entry._flags = 0x3;
applyDiff[1]._entry._hasMask = 0x7;
- CPPUNIT_ASSERT(status.removeFromDiff(applyDiff, 0x7));
- CPPUNIT_ASSERT(status.diff.empty());
+ EXPECT_TRUE(status.removeFromDiff(applyDiff, 0x7));
+ EXPECT_TRUE(status.diff.empty());
}
status.diff.insert(status.diff.end(), diff.begin(), diff.end());
@@ -1424,8 +1111,8 @@ MergeHandlerTest::testRemoveFromDiff()
applyDiff[1]._entry._flags = 0x3;
applyDiff[1]._entry._hasMask = 0x6;
- CPPUNIT_ASSERT(!status.removeFromDiff(applyDiff, 0x7));
- CPPUNIT_ASSERT_EQUAL(size_t(2), status.diff.size());
+ EXPECT_FALSE(status.removeFromDiff(applyDiff, 0x7));
+ EXPECT_EQ(2, status.diff.size());
}
status.diff.clear();
@@ -1442,14 +1129,12 @@ MergeHandlerTest::testRemoveFromDiff()
applyDiff[1]._entry._flags = 0x3;
applyDiff[1]._entry._hasMask = 0x5;
- CPPUNIT_ASSERT(status.removeFromDiff(applyDiff, 0x7));
- CPPUNIT_ASSERT_EQUAL(size_t(2), status.diff.size());
+ EXPECT_TRUE(status.removeFromDiff(applyDiff, 0x7));
+ EXPECT_EQ(2, status.diff.size());
}
}
-void
-MergeHandlerTest::testRemovePutOnExistingTimestamp()
-{
+TEST_F(MergeHandlerTest, remove_put_on_existing_timestamp) {
setUpChain(BACK);
document::TestDocMan docMan;
@@ -1469,22 +1154,20 @@ MergeHandlerTest::testRemovePutOnExistingTimestamp()
applyDiff.push_back(e);
}
- std::shared_ptr<api::ApplyBucketDiffCommand> applyBucketDiffCmd(
- new api::ApplyBucketDiffCommand(_bucket, _nodes, 1024*1024));
+ auto applyBucketDiffCmd = std::make_shared<api::ApplyBucketDiffCommand>(_bucket, _nodes, 1024*1024);
applyBucketDiffCmd->getDiff() = applyDiff;
- MessageTracker::UP tracker = handler.handleApplyBucketDiff(*applyBucketDiffCmd, *_context);
+ auto tracker = handler.handleApplyBucketDiff(*applyBucketDiffCmd, *_context);
- api::ApplyBucketDiffReply::SP applyBucketDiffReply(
+ auto applyBucketDiffReply =
std::dynamic_pointer_cast<api::ApplyBucketDiffReply>(
- tracker->getReply()));
- CPPUNIT_ASSERT(applyBucketDiffReply.get());
+ tracker->getReply());
+ ASSERT_TRUE(applyBucketDiffReply.get());
api::MergeBucketCommand cmd(_bucket, _nodes, _maxTimestamp);
handler.handleMergeBucket(cmd, *_context);
- std::shared_ptr<api::GetBucketDiffCommand> getBucketDiffCmd(
- fetchSingleMessage<api::GetBucketDiffCommand>());
+ auto getBucketDiffCmd = fetchSingleMessage<api::GetBucketDiffCommand>();
// Timestamp should now be a regular remove
bool foundTimestamp = false;
@@ -1492,14 +1175,14 @@ MergeHandlerTest::testRemovePutOnExistingTimestamp()
const api::GetBucketDiffCommand::Entry& e(
getBucketDiffCmd->getDiff()[i]);
if (e._timestamp == ts) {
- CPPUNIT_ASSERT_EQUAL(
+ EXPECT_EQ(
uint16_t(MergeHandler::IN_USE | MergeHandler::DELETED),
e._flags);
foundTimestamp = true;
break;
}
}
- CPPUNIT_ASSERT(foundTimestamp);
+ EXPECT_TRUE(foundTimestamp);
}
} // storage
diff --git a/storage/src/tests/persistence/persistencequeuetest.cpp b/storage/src/tests/persistence/persistencequeuetest.cpp
index a212e65efe8..be276dd7f9d 100644
--- a/storage/src/tests/persistence/persistencequeuetest.cpp
+++ b/storage/src/tests/persistence/persistencequeuetest.cpp
@@ -1,7 +1,6 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include <vespa/log/log.h>
-#include <vespa/vdstestlib/cppunit/macros.h>
#include <vespa/storageapi/message/bucket.h>
#include <tests/persistence/common/persistenceproviderwrapper.h>
#include <vespa/persistence/dummyimpl/dummypersistence.h>
@@ -12,31 +11,16 @@
LOG_SETUP(".persistencequeuetest");
using document::test::makeDocumentBucket;
+using namespace ::testing;
namespace storage {
class PersistenceQueueTest : public FileStorTestFixture {
public:
- void testFetchNextUnlockedMessageIfBucketLocked();
- void shared_locked_operations_allow_concurrent_bucket_access();
- void exclusive_locked_operation_not_started_if_shared_op_active();
- void shared_locked_operation_not_started_if_exclusive_op_active();
- void exclusive_locked_operation_not_started_if_exclusive_op_active();
- void operation_batching_not_allowed_across_different_lock_modes();
-
std::shared_ptr<api::StorageMessage> createPut(uint64_t bucket, uint64_t docIdx);
std::shared_ptr<api::StorageMessage> createGet(uint64_t bucket) const;
- void setUp() override;
-
- CPPUNIT_TEST_SUITE(PersistenceQueueTest);
- CPPUNIT_TEST(testFetchNextUnlockedMessageIfBucketLocked);
- CPPUNIT_TEST(shared_locked_operations_allow_concurrent_bucket_access);
- CPPUNIT_TEST(exclusive_locked_operation_not_started_if_shared_op_active);
- CPPUNIT_TEST(shared_locked_operation_not_started_if_exclusive_op_active);
- CPPUNIT_TEST(exclusive_locked_operation_not_started_if_exclusive_op_active);
- CPPUNIT_TEST(operation_batching_not_allowed_across_different_lock_modes);
- CPPUNIT_TEST_SUITE_END();
+ void SetUp() override;
struct Fixture {
FileStorTestFixture& parent;
@@ -55,8 +39,6 @@ public:
static constexpr uint16_t _disk = 0;
};
-CPPUNIT_TEST_SUITE_REGISTRATION(PersistenceQueueTest);
-
PersistenceQueueTest::Fixture::Fixture(FileStorTestFixture& parent_)
: parent(parent_),
top(),
@@ -82,7 +64,7 @@ PersistenceQueueTest::Fixture::Fixture(FileStorTestFixture& parent_)
PersistenceQueueTest::Fixture::~Fixture() = default;
-void PersistenceQueueTest::setUp() {
+void PersistenceQueueTest::SetUp() {
setupPersistenceThreads(1);
_node->setPersistenceProvider(std::make_unique<spi::dummy::DummyPersistence>(_node->getTypeRepo(), 1));
}
@@ -103,7 +85,7 @@ std::shared_ptr<api::StorageMessage> PersistenceQueueTest::createGet(uint64_t bu
return cmd;
}
-void PersistenceQueueTest::testFetchNextUnlockedMessageIfBucketLocked() {
+TEST_F(PersistenceQueueTest, fetch_next_unlocked_message_if_bucket_locked) {
Fixture f(*this);
// Send 2 puts, 2 to the first bucket, 1 to the second. Calling
// getNextMessage 2 times should then return a lock on the first bucket,
@@ -114,91 +96,91 @@ void PersistenceQueueTest::testFetchNextUnlockedMessageIfBucketLocked() {
f.filestorHandler->schedule(createPut(5432, 0), _disk);
auto lock0 = f.filestorHandler->getNextMessage(_disk, f.stripeId);
- CPPUNIT_ASSERT(lock0.first.get());
- CPPUNIT_ASSERT_EQUAL(document::BucketId(16, 1234),
- dynamic_cast<api::PutCommand&>(*lock0.second).getBucketId());
+ ASSERT_TRUE(lock0.first.get());
+ EXPECT_EQ(document::BucketId(16, 1234),
+ dynamic_cast<api::PutCommand&>(*lock0.second).getBucketId());
auto lock1 = f.filestorHandler->getNextMessage(_disk, f.stripeId);
- CPPUNIT_ASSERT(lock1.first.get());
- CPPUNIT_ASSERT_EQUAL(document::BucketId(16, 5432),
- dynamic_cast<api::PutCommand&>(*lock1.second).getBucketId());
+ ASSERT_TRUE(lock1.first.get());
+ EXPECT_EQ(document::BucketId(16, 5432),
+ dynamic_cast<api::PutCommand&>(*lock1.second).getBucketId());
}
-void PersistenceQueueTest::shared_locked_operations_allow_concurrent_bucket_access() {
+TEST_F(PersistenceQueueTest, shared_locked_operations_allow_concurrent_bucket_access) {
Fixture f(*this);
f.filestorHandler->schedule(createGet(1234), _disk);
f.filestorHandler->schedule(createGet(1234), _disk);
auto lock0 = f.filestorHandler->getNextMessage(_disk, f.stripeId);
- CPPUNIT_ASSERT(lock0.first.get());
- CPPUNIT_ASSERT_EQUAL(api::LockingRequirements::Shared, lock0.first->lockingRequirements());
+ ASSERT_TRUE(lock0.first.get());
+ EXPECT_EQ(api::LockingRequirements::Shared, lock0.first->lockingRequirements());
// Even though we already have a lock on the bucket, Gets allow shared locking and we
// should therefore be able to get another lock.
auto lock1 = f.filestorHandler->getNextMessage(_disk, f.stripeId);
- CPPUNIT_ASSERT(lock1.first.get());
- CPPUNIT_ASSERT_EQUAL(api::LockingRequirements::Shared, lock1.first->lockingRequirements());
+ ASSERT_TRUE(lock1.first.get());
+ EXPECT_EQ(api::LockingRequirements::Shared, lock1.first->lockingRequirements());
}
-void PersistenceQueueTest::exclusive_locked_operation_not_started_if_shared_op_active() {
+TEST_F(PersistenceQueueTest, exclusive_locked_operation_not_started_if_shared_op_active) {
Fixture f(*this);
f.filestorHandler->schedule(createGet(1234), _disk);
f.filestorHandler->schedule(createPut(1234, 0), _disk);
auto lock0 = f.filestorHandler->getNextMessage(_disk, f.stripeId);
- CPPUNIT_ASSERT(lock0.first.get());
- CPPUNIT_ASSERT_EQUAL(api::LockingRequirements::Shared, lock0.first->lockingRequirements());
+ ASSERT_TRUE(lock0.first.get());
+ EXPECT_EQ(api::LockingRequirements::Shared, lock0.first->lockingRequirements());
// Expected to time out
auto lock1 = f.filestorHandler->getNextMessage(_disk, f.stripeId);
- CPPUNIT_ASSERT(!lock1.first.get());
+ ASSERT_FALSE(lock1.first.get());
}
-void PersistenceQueueTest::shared_locked_operation_not_started_if_exclusive_op_active() {
+TEST_F(PersistenceQueueTest, shared_locked_operation_not_started_if_exclusive_op_active) {
Fixture f(*this);
f.filestorHandler->schedule(createPut(1234, 0), _disk);
f.filestorHandler->schedule(createGet(1234), _disk);
auto lock0 = f.filestorHandler->getNextMessage(_disk, f.stripeId);
- CPPUNIT_ASSERT(lock0.first.get());
- CPPUNIT_ASSERT_EQUAL(api::LockingRequirements::Exclusive, lock0.first->lockingRequirements());
+ ASSERT_TRUE(lock0.first.get());
+ EXPECT_EQ(api::LockingRequirements::Exclusive, lock0.first->lockingRequirements());
// Expected to time out
auto lock1 = f.filestorHandler->getNextMessage(_disk, f.stripeId);
- CPPUNIT_ASSERT(!lock1.first.get());
+ ASSERT_FALSE(lock1.first.get());
}
-void PersistenceQueueTest::exclusive_locked_operation_not_started_if_exclusive_op_active() {
+TEST_F(PersistenceQueueTest, exclusive_locked_operation_not_started_if_exclusive_op_active) {
Fixture f(*this);
f.filestorHandler->schedule(createPut(1234, 0), _disk);
f.filestorHandler->schedule(createPut(1234, 0), _disk);
auto lock0 = f.filestorHandler->getNextMessage(_disk, f.stripeId);
- CPPUNIT_ASSERT(lock0.first.get());
- CPPUNIT_ASSERT_EQUAL(api::LockingRequirements::Exclusive, lock0.first->lockingRequirements());
+ ASSERT_TRUE(lock0.first.get());
+ EXPECT_EQ(api::LockingRequirements::Exclusive, lock0.first->lockingRequirements());
// Expected to time out
auto lock1 = f.filestorHandler->getNextMessage(_disk, f.stripeId);
- CPPUNIT_ASSERT(!lock1.first.get());
+ ASSERT_FALSE(lock1.first.get());
}
-void PersistenceQueueTest::operation_batching_not_allowed_across_different_lock_modes() {
+TEST_F(PersistenceQueueTest, operation_batching_not_allowed_across_different_lock_modes) {
Fixture f(*this);
f.filestorHandler->schedule(createPut(1234, 0), _disk);
f.filestorHandler->schedule(createGet(1234), _disk);
auto lock0 = f.filestorHandler->getNextMessage(_disk, f.stripeId);
- CPPUNIT_ASSERT(lock0.first);
- CPPUNIT_ASSERT(lock0.second);
- CPPUNIT_ASSERT_EQUAL(api::LockingRequirements::Exclusive, lock0.first->lockingRequirements());
+ ASSERT_TRUE(lock0.first);
+ ASSERT_TRUE(lock0.second);
+ EXPECT_EQ(api::LockingRequirements::Exclusive, lock0.first->lockingRequirements());
f.filestorHandler->getNextMessage(_disk, f.stripeId, lock0);
- CPPUNIT_ASSERT(!lock0.second);
+ ASSERT_FALSE(lock0.second);
}
} // namespace storage
diff --git a/storage/src/tests/persistence/persistencetestutils.cpp b/storage/src/tests/persistence/persistencetestutils.cpp
index 327deaf7e82..e32fc056413 100644
--- a/storage/src/tests/persistence/persistencetestutils.cpp
+++ b/storage/src/tests/persistence/persistencetestutils.cpp
@@ -64,13 +64,8 @@ PersistenceTestEnvironment::PersistenceTestEnvironment(DiskCount numDisks, const
}
}
-PersistenceTestUtils::PersistenceTestUtils()
-{
-}
-
-PersistenceTestUtils::~PersistenceTestUtils()
-{
-}
+PersistenceTestUtils::PersistenceTestUtils() = default;
+PersistenceTestUtils::~PersistenceTestUtils() = default;
std::string
PersistenceTestUtils::dumpBucket(const document::BucketId& bid, uint16_t disk) {
diff --git a/storage/src/tests/persistence/persistencetestutils.h b/storage/src/tests/persistence/persistencetestutils.h
index 8f883115e9d..e418765ecac 100644
--- a/storage/src/tests/persistence/persistencetestutils.h
+++ b/storage/src/tests/persistence/persistencetestutils.h
@@ -11,6 +11,7 @@
#include <vespa/persistence/spi/persistenceprovider.h>
#include <vespa/persistence/dummyimpl/dummypersistence.h>
#include <vespa/document/base/testdocman.h>
+#include <vespa/vespalib/gtest/gtest.h>
namespace storage {
@@ -34,7 +35,7 @@ struct PersistenceTestEnvironment {
std::vector<std::unique_ptr<PersistenceUtil> > _diskEnvs;
};
-class PersistenceTestUtils : public CppUnit::TestFixture {
+class PersistenceTestUtils : public testing::Test {
public:
std::unique_ptr<PersistenceTestEnvironment> _env;
@@ -50,7 +51,7 @@ public:
void setupDisks(uint32_t disks);
- void tearDown() override {
+ void TearDown() override {
_env.reset();
}
@@ -202,7 +203,7 @@ public:
class SingleDiskPersistenceTestUtils : public PersistenceTestUtils
{
public:
- void setUp() override {
+ void SetUp() override {
setupDisks(1);
}
};
diff --git a/storage/src/tests/persistence/persistencethread_splittest.cpp b/storage/src/tests/persistence/persistencethread_splittest.cpp
index 9c10b9987e0..ea7dce96e0c 100644
--- a/storage/src/tests/persistence/persistencethread_splittest.cpp
+++ b/storage/src/tests/persistence/persistencethread_splittest.cpp
@@ -1,6 +1,5 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/vdstestlib/cppunit/macros.h>
#include <vespa/storage/persistence/persistencethread.h>
#include <vespa/storageapi/message/bucketsplitting.h>
#include <vespa/persistence/spi/test.h>
@@ -9,14 +8,14 @@
using storage::spi::test::makeSpiBucket;
using document::test::makeDocumentBucket;
+using namespace ::testing;
namespace storage {
namespace {
- spi::LoadType defaultLoadType(0, "default");
+spi::LoadType defaultLoadType(0, "default");
}
-struct PersistenceThread_SplitTest : public SingleDiskPersistenceTestUtils
-{
+struct PersistenceThreadSplitTest : public SingleDiskPersistenceTestUtils {
enum SplitCase {
TOO_MANY_DOCS_SPLIT_ONCE, // Only one split needed to divide
TOO_MANY_DOCS_SPLIT_MULTIPLE_BITS, // Multiple bits needed to divide
@@ -26,7 +25,7 @@ struct PersistenceThread_SplitTest : public SingleDiskPersistenceTestUtils
TOO_LARGE_DOCS_SPLIT_MULTIPLE_BITS,
TOO_LARGE_DOCS_SINGLE_DOC, // Cannot split single doc even if too large
TOO_LARGE_DOCS_ACTUALLY_NOT, // Other copy is too large, not this one
- // Need to split to X bits to get in line with other copy or distr.
+ // Need to split to X bits to get in line with other copy or distr.
SPLIT_TOO_LITTLE_SINGLE_SPLIT, // Split all to one target
SPLIT_TOO_LITTLE_JUST_RIGHT, // Just manage to split in two at that lvl
SPLIT_TOO_LITTLE_SPLIT_TOWARDS_ENOUGH, // Has to split shorter
@@ -35,54 +34,60 @@ struct PersistenceThread_SplitTest : public SingleDiskPersistenceTestUtils
};
void doTest(SplitCase);
+};
- void testTooManyDocsSplitOnce()
- { doTest(TOO_MANY_DOCS_SPLIT_ONCE); }
- void testTooManyDocsSplitMulti()
- { doTest(TOO_MANY_DOCS_SPLIT_MULTIPLE_BITS); }
- void testTooManyDocsActuallyNot()
- { doTest(TOO_MANY_DOCS_ACTUALLY_NOT); }
- void testTooLargeDocsSplitOnce()
- { doTest(TOO_LARGE_DOCS_SPLIT_ONCE); }
- void testTooLargeDocsSplitMulti()
- { doTest(TOO_LARGE_DOCS_SPLIT_MULTIPLE_BITS); }
- void testTooLargeDocsSingleDoc()
- { doTest(TOO_LARGE_DOCS_SINGLE_DOC); }
- void testTooLargeDocsActuallyNot()
- { doTest(TOO_LARGE_DOCS_ACTUALLY_NOT); }
- void testSplitTooLittleSingleSplit()
- { doTest(SPLIT_TOO_LITTLE_SINGLE_SPLIT); }
- void testSplitTooLittleJustRight()
- { doTest(SPLIT_TOO_LITTLE_JUST_RIGHT); }
- void testSplitTooLittleSplitTowardsEnough()
- { doTest(SPLIT_TOO_LITTLE_SPLIT_TOWARDS_ENOUGH); }
- void testInconsistentSplitHasOneBitFallbackWhen1Doc() {
- doTest(SPLIT_INCONSISTENT_1_DOC);
- }
- void testInconsistentSplitHasOneBitFallbackWhenAllDocsHaveSameGid() {
- doTest(SPLIT_INCONSISTENT_ALL_DOCS_SAME_GID);
- }
+TEST_F(PersistenceThreadSplitTest, split_single_bit_for_too_many_docs) {
+ doTest(TOO_MANY_DOCS_SPLIT_ONCE);
+}
- CPPUNIT_TEST_SUITE(PersistenceThread_SplitTest);
- CPPUNIT_TEST(testTooManyDocsSplitOnce);
- CPPUNIT_TEST(testTooManyDocsSplitMulti);
- CPPUNIT_TEST(testTooManyDocsActuallyNot);
- CPPUNIT_TEST(testTooLargeDocsSplitOnce);
- CPPUNIT_TEST(testTooLargeDocsSplitMulti);
- CPPUNIT_TEST(testTooLargeDocsSingleDoc);
- CPPUNIT_TEST(testTooLargeDocsActuallyNot);
- CPPUNIT_TEST(testSplitTooLittleSingleSplit);
- CPPUNIT_TEST(testSplitTooLittleJustRight);
- CPPUNIT_TEST(testSplitTooLittleSplitTowardsEnough);
- CPPUNIT_TEST(testInconsistentSplitHasOneBitFallbackWhen1Doc);
- CPPUNIT_TEST(testInconsistentSplitHasOneBitFallbackWhenAllDocsHaveSameGid);
- CPPUNIT_TEST_SUITE_END();
-};
+TEST_F(PersistenceThreadSplitTest, bucket_split_requires_multiple_bit_increase_for_too_many_docs) {
+ doTest(TOO_MANY_DOCS_SPLIT_MULTIPLE_BITS);
+}
-CPPUNIT_TEST_SUITE_REGISTRATION(PersistenceThread_SplitTest);
+TEST_F(PersistenceThreadSplitTest, false_positive_too_many_docs) {
+ doTest(TOO_MANY_DOCS_ACTUALLY_NOT);
+}
+
+TEST_F(PersistenceThreadSplitTest, split_single_bit_for_too_large_docs) {
+ doTest(TOO_LARGE_DOCS_SPLIT_ONCE);
+}
+
+TEST_F(PersistenceThreadSplitTest, bucket_split_requires_multiple_bit_increase_for_too_large_docs) {
+ doTest(TOO_LARGE_DOCS_SPLIT_MULTIPLE_BITS);
+}
+
+TEST_F(PersistenceThreadSplitTest, cannot_split_bucket_with_single_too_large_document) {
+ doTest(TOO_LARGE_DOCS_SINGLE_DOC);
+}
+
+TEST_F(PersistenceThreadSplitTest, false_positive_too_large_docs) {
+ doTest(TOO_LARGE_DOCS_ACTUALLY_NOT);
+}
+
+TEST_F(PersistenceThreadSplitTest, request_can_specify_minimum_split_bit_count) {
+ doTest(SPLIT_TOO_LITTLE_SINGLE_SPLIT);
+}
+
+// TODO verify that name actually matches what test does...
+TEST_F(PersistenceThreadSplitTest, can_split_into_2_targets_at_max_split_level) {
+ doTest(SPLIT_TOO_LITTLE_JUST_RIGHT);
+}
+
+// TODO verify that name actually matches what test does...
+TEST_F(PersistenceThreadSplitTest, actual_split_level_can_be_lower_than_max_level) {
+ doTest(SPLIT_TOO_LITTLE_SPLIT_TOWARDS_ENOUGH);
+}
+
+TEST_F(PersistenceThreadSplitTest, inconsistent_split_has_one_bit_fallback_when_1_doc) {
+ doTest(SPLIT_INCONSISTENT_1_DOC);
+}
+
+TEST_F(PersistenceThreadSplitTest, inconsistent_split_has_one_bit_fallback_when_all_docs_have_same_gid) {
+ doTest(SPLIT_INCONSISTENT_ALL_DOCS_SAME_GID);
+}
void
-PersistenceThread_SplitTest::doTest(SplitCase splitCase)
+PersistenceThreadSplitTest::doTest(SplitCase splitCase)
{
uint32_t maxCount = 4;
uint32_t maxSize = 1000 * 1000;
@@ -119,8 +124,8 @@ PersistenceThread_SplitTest::doTest(SplitCase splitCase)
resultSplitLevel = 3;
break;
case TOO_LARGE_DOCS_SINGLE_DOC:
- // It is possible for bucket to be inconsistent being big enough
- // to split in other copy but this copy has only 1 too big doc.
+ // It is possible for bucket to be inconsistent being big enough
+ // to split in other copy but this copy has only 1 too big doc.
docCount = 1;
docSize = 3000 * 1000;
splitLevelToDivide = 3;
@@ -176,7 +181,7 @@ PersistenceThread_SplitTest::doTest(SplitCase splitCase)
}
uint64_t location = 0;
- uint64_t splitMask = 1 << (splitLevelToDivide - 1);
+ uint64_t splitMask = 1ULL << (splitLevelToDivide - 1);
spi::Context context(defaultLoadType, spi::Priority(0),
spi::Trace::TraceLevel(0));
spi::Bucket bucket(makeSpiBucket(document::BucketId(currentSplitLevel, 1)));
@@ -201,8 +206,7 @@ PersistenceThread_SplitTest::doTest(SplitCase splitCase)
std::unique_ptr<PersistenceThread> thread(createPersistenceThread(0));
getNode().getStateUpdater().setClusterState(
- lib::ClusterState::CSP(
- new lib::ClusterState("distributor:1 storage:1")));
+ std::make_shared<lib::ClusterState>("distributor:1 storage:1"));
api::SplitBucketCommand cmd(makeDocumentBucket(document::BucketId(currentSplitLevel, 1)));
cmd.setMaxSplitBits(maxBits);
cmd.setMinSplitBits(minBits);
@@ -211,10 +215,11 @@ PersistenceThread_SplitTest::doTest(SplitCase splitCase)
cmd.setSourceIndex(0);
MessageTracker::UP result(thread->handleSplitBucket(cmd));
api::ReturnCode code(result->getResult());
- CPPUNIT_ASSERT_EQUAL(error, code);
- if (!code.success()) return;
- api::SplitBucketReply& reply(
- dynamic_cast<api::SplitBucketReply&>(*result->getReply()));
+ EXPECT_EQ(error, code);
+ if (!code.success()) {
+ return;
+ }
+ auto& reply = dynamic_cast<api::SplitBucketReply&>(*result->getReply());
std::set<std::string> expected;
for (uint32_t i=0; i<resultBuckets; ++i) {
document::BucketId b(resultSplitLevel,
@@ -230,7 +235,7 @@ PersistenceThread_SplitTest::doTest(SplitCase splitCase)
ost << b << " - " << b.getUsedBits();
actual.insert(ost.str());
}
- CPPUNIT_ASSERT_EQUAL(expected, actual);
+ EXPECT_EQ(expected, actual);
}
} // storage
diff --git a/storage/src/tests/persistence/processalltest.cpp b/storage/src/tests/persistence/processalltest.cpp
index 11754d50961..2bf7f7c3855 100644
--- a/storage/src/tests/persistence/processalltest.cpp
+++ b/storage/src/tests/persistence/processalltest.cpp
@@ -2,44 +2,20 @@
#include <vespa/document/base/testdocman.h>
#include <vespa/storage/persistence/processallhandler.h>
-#include <vespa/vdstestlib/cppunit/macros.h>
#include <vespa/storage/persistence/messages.h>
#include <vespa/documentapi/loadtypes/loadtype.h>
#include <tests/persistence/persistencetestutils.h>
#include <vespa/document/test/make_document_bucket.h>
using document::test::makeDocumentBucket;
+using namespace ::testing;
namespace storage {
-class ProcessAllHandlerTest : public SingleDiskPersistenceTestUtils
-{
- CPPUNIT_TEST_SUITE(ProcessAllHandlerTest);
- CPPUNIT_TEST(testRemoveLocation);
- CPPUNIT_TEST(testRemoveLocationDocumentSubset);
- CPPUNIT_TEST(testRemoveLocationUnknownDocType);
- CPPUNIT_TEST(testRemoveLocationBogusSelection);
- CPPUNIT_TEST(testStat);
- CPPUNIT_TEST(testStatWithRemove);
- CPPUNIT_TEST(testStatWholeBucket);
- CPPUNIT_TEST_SUITE_END();
-
-public:
- void testRemoveLocation();
- void testRemoveLocationDocumentSubset();
- void testRemoveLocationUnknownDocType();
- void testRemoveLocationEmptySelection();
- void testRemoveLocationBogusSelection();
- void testStat();
- void testStatWithRemove();
- void testStatWholeBucket();
+class ProcessAllHandlerTest : public SingleDiskPersistenceTestUtils {
};
-CPPUNIT_TEST_SUITE_REGISTRATION(ProcessAllHandlerTest);
-
-void
-ProcessAllHandlerTest::testRemoveLocation()
-{
+TEST_F(ProcessAllHandlerTest, remove_location) {
document::BucketId bucketId(16, 4);
doPut(4, spi::Timestamp(1234));
doPut(4, spi::Timestamp(2345));
@@ -49,16 +25,12 @@ ProcessAllHandlerTest::testRemoveLocation()
spi::Context context(documentapi::LoadType::DEFAULT, 0, 0);
handler.handleRemoveLocation(removeLocation, context);
- CPPUNIT_ASSERT_EQUAL(
- std::string(
- "DocEntry(1234, 1, id:mail:testdoctype1:n=4:3619.html)\n"
- "DocEntry(2345, 1, id:mail:testdoctype1:n=4:4008.html)\n"),
- dumpBucket(bucketId));
+ EXPECT_EQ("DocEntry(1234, 1, id:mail:testdoctype1:n=4:3619.html)\n"
+ "DocEntry(2345, 1, id:mail:testdoctype1:n=4:4008.html)\n",
+ dumpBucket(bucketId));
}
-void
-ProcessAllHandlerTest::testRemoveLocationDocumentSubset()
-{
+TEST_F(ProcessAllHandlerTest, remove_location_document_subset) {
document::BucketId bucketId(16, 4);
ProcessAllHandler handler(getEnv(), getPersistenceProvider());
@@ -74,70 +46,49 @@ ProcessAllHandlerTest::testRemoveLocationDocumentSubset()
spi::Context context(documentapi::LoadType::DEFAULT, 0, 0);
handler.handleRemoveLocation(removeLocation, context);
- CPPUNIT_ASSERT_EQUAL(
- std::string("DocEntry(100, 1, id:mail:testdoctype1:n=4:3619.html)\n"
- "DocEntry(101, 0, Doc(id:mail:testdoctype1:n=4:33113.html))\n"
- "DocEntry(102, 1, id:mail:testdoctype1:n=4:62608.html)\n"
- "DocEntry(103, 0, Doc(id:mail:testdoctype1:n=4:26566.html))\n"
- "DocEntry(104, 1, id:mail:testdoctype1:n=4:56061.html)\n"
- "DocEntry(105, 0, Doc(id:mail:testdoctype1:n=4:20019.html))\n"
- "DocEntry(106, 1, id:mail:testdoctype1:n=4:49514.html)\n"
- "DocEntry(107, 0, Doc(id:mail:testdoctype1:n=4:13472.html))\n"
- "DocEntry(108, 1, id:mail:testdoctype1:n=4:42967.html)\n"
- "DocEntry(109, 0, Doc(id:mail:testdoctype1:n=4:6925.html))\n"),
- dumpBucket(bucketId));
+ EXPECT_EQ("DocEntry(100, 1, id:mail:testdoctype1:n=4:3619.html)\n"
+ "DocEntry(101, 0, Doc(id:mail:testdoctype1:n=4:33113.html))\n"
+ "DocEntry(102, 1, id:mail:testdoctype1:n=4:62608.html)\n"
+ "DocEntry(103, 0, Doc(id:mail:testdoctype1:n=4:26566.html))\n"
+ "DocEntry(104, 1, id:mail:testdoctype1:n=4:56061.html)\n"
+ "DocEntry(105, 0, Doc(id:mail:testdoctype1:n=4:20019.html))\n"
+ "DocEntry(106, 1, id:mail:testdoctype1:n=4:49514.html)\n"
+ "DocEntry(107, 0, Doc(id:mail:testdoctype1:n=4:13472.html))\n"
+ "DocEntry(108, 1, id:mail:testdoctype1:n=4:42967.html)\n"
+ "DocEntry(109, 0, Doc(id:mail:testdoctype1:n=4:6925.html))\n",
+ dumpBucket(bucketId));
}
-void
-ProcessAllHandlerTest::testRemoveLocationUnknownDocType()
-{
+TEST_F(ProcessAllHandlerTest, remove_location_throws_exception_on_unknown_doc_type) {
document::BucketId bucketId(16, 4);
doPut(4, spi::Timestamp(1234));
api::RemoveLocationCommand
removeLocation("unknowndoctype.headerval % 2 == 0", makeDocumentBucket(bucketId));
- bool gotException = false;
- try {
- ProcessAllHandler handler(getEnv(), getPersistenceProvider());
- spi::Context context(documentapi::LoadType::DEFAULT, 0, 0);
- handler.handleRemoveLocation(removeLocation, context);
- } catch (...) {
- gotException = true;
- }
- CPPUNIT_ASSERT(gotException);
+ ProcessAllHandler handler(getEnv(), getPersistenceProvider());
+ spi::Context context(documentapi::LoadType::DEFAULT, 0, 0);
+ ASSERT_THROW(handler.handleRemoveLocation(removeLocation, context), std::exception);
- CPPUNIT_ASSERT_EQUAL(
- std::string("DocEntry(1234, 0, Doc(id:mail:testdoctype1:n=4:3619.html))\n"),
- dumpBucket(bucketId));
+ EXPECT_EQ("DocEntry(1234, 0, Doc(id:mail:testdoctype1:n=4:3619.html))\n",
+ dumpBucket(bucketId));
}
-void
-ProcessAllHandlerTest::testRemoveLocationBogusSelection()
-{
+TEST_F(ProcessAllHandlerTest, remove_location_throws_exception_on_bogus_selection) {
document::BucketId bucketId(16, 4);
doPut(4, spi::Timestamp(1234));
api::RemoveLocationCommand removeLocation("id.bogus != badgers", makeDocumentBucket(bucketId));
- bool gotException = false;
- try {
- ProcessAllHandler handler(getEnv(), getPersistenceProvider());
- spi::Context context(documentapi::LoadType::DEFAULT, 0, 0);
- handler.handleRemoveLocation(removeLocation, context);
- } catch (...) {
- gotException = true;
- }
- CPPUNIT_ASSERT(gotException);
+ ProcessAllHandler handler(getEnv(), getPersistenceProvider());
+ spi::Context context(documentapi::LoadType::DEFAULT, 0, 0);
+ ASSERT_THROW(handler.handleRemoveLocation(removeLocation, context), std::exception);
- CPPUNIT_ASSERT_EQUAL(
- std::string("DocEntry(1234, 0, Doc(id:mail:testdoctype1:n=4:3619.html))\n"),
- dumpBucket(bucketId));
+ EXPECT_EQ("DocEntry(1234, 0, Doc(id:mail:testdoctype1:n=4:3619.html))\n",
+ dumpBucket(bucketId));
}
-void
-ProcessAllHandlerTest::testStat()
-{
+TEST_F(ProcessAllHandlerTest, bucket_stat_request_returns_document_metadata_matching_selection) {
document::BucketId bucketId(16, 4);
ProcessAllHandler handler(getEnv(), getPersistenceProvider());
@@ -153,10 +104,9 @@ ProcessAllHandlerTest::testStat()
spi::Context context(documentapi::LoadType::DEFAULT, 0, 0);
MessageTracker::UP tracker = handler.handleStatBucket(statBucket, context);
- CPPUNIT_ASSERT(tracker->getReply().get());
- api::StatBucketReply& reply =
- dynamic_cast<api::StatBucketReply&>(*tracker->getReply().get());
- CPPUNIT_ASSERT_EQUAL(api::ReturnCode::OK, reply.getResult().getResult());
+ ASSERT_TRUE(tracker->getReply().get());
+ auto& reply = dynamic_cast<api::StatBucketReply&>(*tracker->getReply().get());
+ EXPECT_EQ(api::ReturnCode::OK, reply.getResult().getResult());
vespalib::string expected =
"Persistence bucket BucketId(0x4000000000000004), partition 0\n"
@@ -167,12 +117,10 @@ ProcessAllHandlerTest::testStat()
" Timestamp: 108, Doc(id:mail:testdoctype1:n=4:42967.html), gid(0x04000000f19ece1668e6de48), size: 206\n";
- CPPUNIT_ASSERT_EQUAL(expected, reply.getResults());
+ EXPECT_EQ(expected, reply.getResults());
}
-void
-ProcessAllHandlerTest::testStatWithRemove()
-{
+TEST_F(ProcessAllHandlerTest, stat_bucket_request_can_returned_removed_entries) {
document::BucketId bucketId(16, 4);
ProcessAllHandler handler(getEnv(), getPersistenceProvider());
@@ -191,10 +139,9 @@ ProcessAllHandlerTest::testStatWithRemove()
spi::Context context(documentapi::LoadType::DEFAULT, 0, 0);
MessageTracker::UP tracker = handler.handleStatBucket(statBucket, context);
- CPPUNIT_ASSERT(tracker->getReply().get());
- api::StatBucketReply& reply =
- dynamic_cast<api::StatBucketReply&>(*tracker->getReply().get());
- CPPUNIT_ASSERT_EQUAL(api::ReturnCode::OK, reply.getResult().getResult());
+ ASSERT_TRUE(tracker->getReply().get());
+ auto& reply = dynamic_cast<api::StatBucketReply&>(*tracker->getReply().get());
+ EXPECT_EQ(api::ReturnCode::OK, reply.getResult().getResult());
vespalib::string expected =
"Persistence bucket BucketId(0x4000000000000004), partition 0\n"
@@ -219,13 +166,11 @@ ProcessAllHandlerTest::testStatWithRemove()
" Timestamp: 208, id:mail:testdoctype1:n=4:42967.html, gid(0x04000000f19ece1668e6de48) (remove)\n"
" Timestamp: 209, id:mail:testdoctype1:n=4:6925.html, gid(0x04000000667c0b3cada830be) (remove)\n";
- CPPUNIT_ASSERT_EQUAL(expected, reply.getResults());
+ EXPECT_EQ(expected, reply.getResults());
}
-
-void
-ProcessAllHandlerTest::testStatWholeBucket()
-{
+// TODO is this test neccessary? Seems to not test anything more than the above tests
+TEST_F(ProcessAllHandlerTest, bucket_stat_request_can_return_all_put_entries_in_bucket) {
document::BucketId bucketId(16, 4);
ProcessAllHandler handler(getEnv(), getPersistenceProvider());
@@ -240,10 +185,9 @@ ProcessAllHandlerTest::testStatWholeBucket()
spi::Context context(documentapi::LoadType::DEFAULT, 0, 0);
MessageTracker::UP tracker = handler.handleStatBucket(statBucket, context);
- CPPUNIT_ASSERT(tracker->getReply().get());
- api::StatBucketReply& reply =
- dynamic_cast<api::StatBucketReply&>(*tracker->getReply().get());
- CPPUNIT_ASSERT_EQUAL(api::ReturnCode::OK, reply.getResult().getResult());
+ ASSERT_TRUE(tracker->getReply().get());
+ auto& reply = dynamic_cast<api::StatBucketReply&>(*tracker->getReply().get());
+ EXPECT_EQ(api::ReturnCode::OK, reply.getResult().getResult());
vespalib::string expected =
"Persistence bucket BucketId(0x4000000000000004), partition 0\n"
@@ -258,7 +202,7 @@ ProcessAllHandlerTest::testStatWholeBucket()
" Timestamp: 108, Doc(id:mail:testdoctype1:n=4:42967.html), gid(0x04000000f19ece1668e6de48), size: 206\n"
" Timestamp: 109, Doc(id:mail:testdoctype1:n=4:6925.html), gid(0x04000000667c0b3cada830be), size: 130\n";
- CPPUNIT_ASSERT_EQUAL(expected, reply.getResults());
+ EXPECT_EQ(expected, reply.getResults());
}
}
diff --git a/storage/src/tests/persistence/provider_error_wrapper_test.cpp b/storage/src/tests/persistence/provider_error_wrapper_test.cpp
index b6b87b33666..36238abb238 100644
--- a/storage/src/tests/persistence/provider_error_wrapper_test.cpp
+++ b/storage/src/tests/persistence/provider_error_wrapper_test.cpp
@@ -1,6 +1,5 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/vdstestlib/cppunit/macros.h>
#include <vespa/persistence/spi/test.h>
#include <tests/persistence/persistencetestutils.h>
#include <tests/persistence/common/persistenceproviderwrapper.h>
@@ -9,25 +8,9 @@ using storage::spi::test::makeSpiBucket;
namespace storage {
-class ProviderErrorWrapperTest : public SingleDiskPersistenceTestUtils {
-public:
- CPPUNIT_TEST_SUITE(ProviderErrorWrapperTest);
- CPPUNIT_TEST(fatal_error_invokes_listener);
- CPPUNIT_TEST(resource_exhaustion_error_invokes_listener);
- CPPUNIT_TEST(listener_not_invoked_on_success);
- CPPUNIT_TEST(listener_not_invoked_on_regular_errors);
- CPPUNIT_TEST(multiple_listeners_can_be_registered);
- CPPUNIT_TEST_SUITE_END();
-
- void fatal_error_invokes_listener();
- void resource_exhaustion_error_invokes_listener();
- void listener_not_invoked_on_success();
- void listener_not_invoked_on_regular_errors();
- void multiple_listeners_can_be_registered();
+struct ProviderErrorWrapperTest : SingleDiskPersistenceTestUtils {
};
-CPPUNIT_TEST_SUITE_REGISTRATION(ProviderErrorWrapperTest);
-
namespace {
struct MockErrorListener : ProviderErrorListener {
@@ -70,61 +53,61 @@ struct Fixture {
void check_no_listener_invoked_for_error(MockErrorListener& listener, spi::Result::ErrorType error) {
providerWrapper.setResult(spi::Result(error, "beep boop"));
perform_spi_operation();
- CPPUNIT_ASSERT(!listener._seen_fatal_error);
- CPPUNIT_ASSERT(!listener._seen_resource_exhaustion_error);
+ EXPECT_FALSE(listener._seen_fatal_error);
+ EXPECT_FALSE(listener._seen_resource_exhaustion_error);
}
};
}
-void ProviderErrorWrapperTest::fatal_error_invokes_listener() {
+TEST_F(ProviderErrorWrapperTest, fatal_error_invokes_listener) {
Fixture f(getPersistenceProvider());
auto listener = std::make_shared<MockErrorListener>();
f.errorWrapper.register_error_listener(listener);
f.providerWrapper.setResult(spi::Result(spi::Result::FATAL_ERROR, "eject! eject!"));
- CPPUNIT_ASSERT(!listener->_seen_fatal_error);
+ EXPECT_FALSE(listener->_seen_fatal_error);
f.perform_spi_operation();
- CPPUNIT_ASSERT(!listener->_seen_resource_exhaustion_error);
- CPPUNIT_ASSERT(listener->_seen_fatal_error);
- CPPUNIT_ASSERT_EQUAL(vespalib::string("eject! eject!"), listener->_fatal_error);
+ EXPECT_FALSE(listener->_seen_resource_exhaustion_error);
+ EXPECT_TRUE(listener->_seen_fatal_error);
+ EXPECT_EQ(vespalib::string("eject! eject!"), listener->_fatal_error);
}
-void ProviderErrorWrapperTest::resource_exhaustion_error_invokes_listener() {
+TEST_F(ProviderErrorWrapperTest, resource_exhaustion_error_invokes_listener) {
Fixture f(getPersistenceProvider());
auto listener = std::make_shared<MockErrorListener>();
f.errorWrapper.register_error_listener(listener);
f.providerWrapper.setResult(spi::Result(spi::Result::RESOURCE_EXHAUSTED, "out of juice"));
- CPPUNIT_ASSERT(!listener->_seen_resource_exhaustion_error);
+ EXPECT_FALSE(listener->_seen_resource_exhaustion_error);
f.perform_spi_operation();
- CPPUNIT_ASSERT(!listener->_seen_fatal_error);
- CPPUNIT_ASSERT(listener->_seen_resource_exhaustion_error);
- CPPUNIT_ASSERT_EQUAL(vespalib::string("out of juice"), listener->_resource_exhaustion_error);
+ EXPECT_FALSE(listener->_seen_fatal_error);
+ EXPECT_TRUE(listener->_seen_resource_exhaustion_error);
+ EXPECT_EQ(vespalib::string("out of juice"), listener->_resource_exhaustion_error);
}
-void ProviderErrorWrapperTest::listener_not_invoked_on_success() {
+TEST_F(ProviderErrorWrapperTest, listener_not_invoked_on_success) {
Fixture f(getPersistenceProvider());
auto listener = std::make_shared<MockErrorListener>();
f.errorWrapper.register_error_listener(listener);
f.perform_spi_operation();
- CPPUNIT_ASSERT(!listener->_seen_fatal_error);
- CPPUNIT_ASSERT(!listener->_seen_resource_exhaustion_error);
+ EXPECT_FALSE(listener->_seen_fatal_error);
+ EXPECT_FALSE(listener->_seen_resource_exhaustion_error);
}
-void ProviderErrorWrapperTest::listener_not_invoked_on_regular_errors() {
+TEST_F(ProviderErrorWrapperTest, listener_not_invoked_on_regular_errors) {
Fixture f(getPersistenceProvider());
auto listener = std::make_shared<MockErrorListener>();
f.errorWrapper.register_error_listener(listener);
- f.check_no_listener_invoked_for_error(*listener, spi::Result::TRANSIENT_ERROR);
- f.check_no_listener_invoked_for_error(*listener, spi::Result::PERMANENT_ERROR);
+ EXPECT_NO_FATAL_FAILURE(f.check_no_listener_invoked_for_error(*listener, spi::Result::TRANSIENT_ERROR));
+ EXPECT_NO_FATAL_FAILURE(f.check_no_listener_invoked_for_error(*listener, spi::Result::PERMANENT_ERROR));
}
-void ProviderErrorWrapperTest::multiple_listeners_can_be_registered() {
+TEST_F(ProviderErrorWrapperTest, multiple_listeners_can_be_registered) {
Fixture f(getPersistenceProvider());
auto listener1 = std::make_shared<MockErrorListener>();
auto listener2 = std::make_shared<MockErrorListener>();
@@ -134,8 +117,8 @@ void ProviderErrorWrapperTest::multiple_listeners_can_be_registered() {
f.providerWrapper.setResult(spi::Result(spi::Result::RESOURCE_EXHAUSTED, "out of juice"));
f.perform_spi_operation();
- CPPUNIT_ASSERT(listener1->_seen_resource_exhaustion_error);
- CPPUNIT_ASSERT(listener2->_seen_resource_exhaustion_error);
+ EXPECT_TRUE(listener1->_seen_resource_exhaustion_error);
+ EXPECT_TRUE(listener2->_seen_resource_exhaustion_error);
}
} // ns storage
diff --git a/storage/src/tests/persistence/splitbitdetectortest.cpp b/storage/src/tests/persistence/splitbitdetectortest.cpp
index 01baa8f4e98..a2d17117886 100644
--- a/storage/src/tests/persistence/splitbitdetectortest.cpp
+++ b/storage/src/tests/persistence/splitbitdetectortest.cpp
@@ -1,6 +1,5 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/vdstestlib/cppunit/macros.h>
#include <vespa/vespalib/util/stringfmt.h>
#include <vespa/storage/persistence/splitbitdetector.h>
#include <vespa/vespalib/io/fileutil.h>
@@ -9,67 +8,37 @@
#include <vespa/document/base/testdocman.h>
#include <vespa/document/bucket/bucketidfactory.h>
#include <vespa/metrics/loadmetric.h>
+#include <vespa/vespalib/gtest/gtest.h>
#include <algorithm>
using storage::spi::test::makeSpiBucket;
+using namespace ::testing;
namespace storage {
namespace {
- spi::LoadType defaultLoadType(0, "default");
+spi::LoadType defaultLoadType(0, "default");
}
-struct SplitBitDetectorTest : public CppUnit::TestFixture {
- void testSingleUser();
- void testTwoUsers();
- void testMaxBits();
- void testMaxBitsOneBelowMax();
- void testUnsplittable();
- void testUnsplittableMinCount();
- void testEmpty();
- void testZeroDocLimitFallbacksToOneBitIncreaseWith1Doc();
- void testZeroDocLimitFallbacksToOneBitIncreaseOnGidCollision();
- void findBucketCollisionIds();
-
- spi::DocEntry::UP
- generateDocEntry(uint32_t userId,
- uint32_t docNum,
- spi::Timestamp timestamp)
+struct SplitBitDetectorTest : Test {
+ document::TestDocMan testDocMan;
+ spi::dummy::DummyPersistence provider;
+ spi::Bucket bucket;
+ spi::Context context;
+
+ SplitBitDetectorTest()
+ : testDocMan(),
+ provider(testDocMan.getTypeRepoSP(), 1),
+ bucket(makeSpiBucket(document::BucketId(1, 1))),
+ context(defaultLoadType, spi::Priority(0),
+ spi::Trace::TraceLevel(0))
{
- std::ostringstream ost;
- ost << "id:storage_test:testdoctype1:n=" << userId << ":" << docNum;
- return spi::DocEntry::UP(new spi::DocEntry(
- timestamp, 0, document::DocumentId(ost.str())));
- };
-
- CPPUNIT_TEST_SUITE(SplitBitDetectorTest);
- CPPUNIT_TEST(testSingleUser);
- CPPUNIT_TEST(testTwoUsers);
- CPPUNIT_TEST(testMaxBits);
- CPPUNIT_TEST(testMaxBitsOneBelowMax);
- CPPUNIT_TEST(testUnsplittable);
- CPPUNIT_TEST(testUnsplittableMinCount);
- CPPUNIT_TEST(testEmpty);
- CPPUNIT_TEST(testZeroDocLimitFallbacksToOneBitIncreaseWith1Doc);
- CPPUNIT_TEST(testZeroDocLimitFallbacksToOneBitIncreaseOnGidCollision);
- CPPUNIT_TEST_DISABLED(findBucketCollisionIds);
- CPPUNIT_TEST_SUITE_END();
+ provider.getPartitionStates();
+ provider.createBucket(bucket, context);
+ }
};
-CPPUNIT_TEST_SUITE_REGISTRATION(SplitBitDetectorTest);
-
-void
-SplitBitDetectorTest::testTwoUsers()
-{
- document::TestDocMan testDocMan;
- spi::dummy::DummyPersistence provider(testDocMan.getTypeRepoSP(), 1);
- provider.getPartitionStates();
- spi::Bucket bucket(makeSpiBucket(document::BucketId(1, 1)));
- spi::Context context(defaultLoadType, spi::Priority(0),
- spi::Trace::TraceLevel(0));
-
- provider.createBucket(bucket, context);
-
+TEST_F(SplitBitDetectorTest, two_users) {
std::vector<spi::DocEntry::UP> entries;
for (uint32_t i = 0; i < 5; ++i) {
document::Document::SP doc(
@@ -85,24 +54,12 @@ SplitBitDetectorTest::testTwoUsers()
SplitBitDetector::Result result(
SplitBitDetector::detectSplit(provider, bucket, 58, context));
- CPPUNIT_ASSERT_EQUAL(
- std::string("SplitTargets(2: BucketId(0x0800000000000001), "
- "BucketId(0x0800000000000003))"),
- result.toString());
+ EXPECT_EQ("SplitTargets(2: BucketId(0x0800000000000001), "
+ "BucketId(0x0800000000000003))",
+ result.toString());
}
-void
-SplitBitDetectorTest::testSingleUser()
-{
- document::TestDocMan testDocMan;
- spi::dummy::DummyPersistence provider(testDocMan.getTypeRepoSP(), 1);
- provider.getPartitionStates();
- spi::Bucket bucket(makeSpiBucket(document::BucketId(1, 1)));
- spi::Context context(defaultLoadType, spi::Priority(0),
- spi::Trace::TraceLevel(0));
-
- provider.createBucket(bucket, context);
-
+TEST_F(SplitBitDetectorTest, single_user) {
std::vector<spi::DocEntry::UP> entries;
for (uint32_t i = 0; i < 10; ++i) {
document::Document::SP doc(
@@ -112,25 +69,14 @@ SplitBitDetectorTest::testSingleUser()
SplitBitDetector::Result result(
SplitBitDetector::detectSplit(provider, bucket, 58, context));
- CPPUNIT_ASSERT_EQUAL(
- std::string("SplitTargets(33: BucketId(0x8400000000000001), "
- "BucketId(0x8400000100000001))"),
- result.toString());
+ EXPECT_EQ("SplitTargets(33: BucketId(0x8400000000000001), "
+ "BucketId(0x8400000100000001))",
+ result.toString());
}
-void
-SplitBitDetectorTest::testMaxBits()
-{
- document::TestDocMan testDocMan;
- spi::dummy::DummyPersistence provider(testDocMan.getTypeRepoSP(), 1);
- provider.getPartitionStates();
- spi::Bucket bucket(makeSpiBucket(document::BucketId(1, 1)));
- spi::Context context(defaultLoadType, spi::Priority(0),
- spi::Trace::TraceLevel(0));
+TEST_F(SplitBitDetectorTest, max_bits) {
int minContentSize = 1, maxContentSize = 1;
- provider.createBucket(bucket, context);
-
std::vector<spi::DocEntry::UP> entries;
for (uint32_t seed = 0; seed < 10; ++seed) {
int location = 1;
@@ -141,62 +87,39 @@ SplitBitDetectorTest::testMaxBits()
SplitBitDetector::Result result(
SplitBitDetector::detectSplit(provider, bucket, 3, context));
- CPPUNIT_ASSERT_EQUAL(
- std::string("SplitTargets(3: BucketId(0x0c00000000000001), "
- "[ BucketId(0x0c00000000000005) ])"),
- result.toString());
+ EXPECT_EQ("SplitTargets(3: BucketId(0x0c00000000000001), "
+ "[ BucketId(0x0c00000000000005) ])",
+ result.toString());
}
-void
-SplitBitDetectorTest::testMaxBitsOneBelowMax()
-{
- document::TestDocMan testDocMan;
- spi::dummy::DummyPersistence provider(testDocMan.getTypeRepoSP(), 1);
- provider.getPartitionStates();
- spi::Bucket bucket(makeSpiBucket(document::BucketId(15, 1)));
- spi::Context context(defaultLoadType, spi::Priority(0),
- spi::Trace::TraceLevel(0));
+TEST_F(SplitBitDetectorTest, max_bits_one_below_max) {
+ spi::Bucket my_bucket(makeSpiBucket(document::BucketId(15, 1)));
int minContentSize = 1, maxContentSize = 1;
- provider.createBucket(bucket, context);
+ provider.createBucket(my_bucket, context);
std::vector<spi::DocEntry::UP> entries;
for (uint32_t seed = 0; seed < 10; ++seed) {
int location = 1 | (seed % 2 == 0 ? 0x8000 : 0);
document::Document::SP doc(testDocMan.createRandomDocumentAtLocation(
location, seed, minContentSize, maxContentSize));
- provider.put(bucket, spi::Timestamp(1000 + seed), doc, context);
+ provider.put(my_bucket, spi::Timestamp(1000 + seed), doc, context);
}
- //std::cerr << provider.dumpBucket(bucket) << "\n";
-
SplitBitDetector::Result result(
- SplitBitDetector::detectSplit(provider, bucket, 15, context));
- CPPUNIT_ASSERT_EQUAL(
- std::string("SplitTargets(error: No use in trying to split "
- "Bucket(0x3c00000000000001, partition 0) when max split"
- " bit is set to 15.)"),
- result.toString());
-
- result = SplitBitDetector::detectSplit(provider, bucket, 16, context);
- CPPUNIT_ASSERT_EQUAL(
- std::string("SplitTargets(16: BucketId(0x4000000000000001), "
- "BucketId(0x4000000000008001))"),
- result.toString());
+ SplitBitDetector::detectSplit(provider, my_bucket, 15, context));
+ EXPECT_EQ("SplitTargets(error: No use in trying to split "
+ "Bucket(0x3c00000000000001, partition 0) when max split"
+ " bit is set to 15.)",
+ result.toString());
+
+ result = SplitBitDetector::detectSplit(provider, my_bucket, 16, context);
+ EXPECT_EQ("SplitTargets(16: BucketId(0x4000000000000001), "
+ "BucketId(0x4000000000008001))",
+ result.toString());
}
-void
-SplitBitDetectorTest::testUnsplittable()
-{
- document::TestDocMan testDocMan;
- spi::dummy::DummyPersistence provider(testDocMan.getTypeRepoSP(), 1);
- provider.getPartitionStates();
- spi::Bucket bucket(makeSpiBucket(document::BucketId(1, 1)));
- spi::Context context(defaultLoadType, spi::Priority(0),
- spi::Trace::TraceLevel(0));
-
- provider.createBucket(bucket, context);
-
+TEST_F(SplitBitDetectorTest, unsplittable) {
std::vector<spi::DocEntry::UP> entries;
for (uint32_t i = 0; i < 10; ++i) {
@@ -207,24 +130,12 @@ SplitBitDetectorTest::testUnsplittable()
SplitBitDetector::Result result(
SplitBitDetector::detectSplit(provider, bucket, 58, context, 100));
- CPPUNIT_ASSERT_EQUAL(
- std::string("SplitTargets(58: BucketId(0xe94c074f00000001), "
- "BucketId(0xeb4c074f00000001))"),
- result.toString());
+ EXPECT_EQ("SplitTargets(58: BucketId(0xe94c074f00000001), "
+ "BucketId(0xeb4c074f00000001))",
+ result.toString());
}
-void
-SplitBitDetectorTest::testUnsplittableMinCount()
-{
- document::TestDocMan testDocMan;
- spi::dummy::DummyPersistence provider(testDocMan.getTypeRepoSP(), 1);
- provider.getPartitionStates();
- spi::Bucket bucket(makeSpiBucket(document::BucketId(1, 1)));
- spi::Context context(defaultLoadType, spi::Priority(0),
- spi::Trace::TraceLevel(0));
-
- provider.createBucket(bucket, context);
-
+TEST_F(SplitBitDetectorTest, unsplittable_min_count) {
std::vector<spi::DocEntry::UP> entries;
for (uint32_t i = 0; i < 10; ++i) {
@@ -236,66 +147,30 @@ SplitBitDetectorTest::testUnsplittableMinCount()
SplitBitDetector::Result result(
SplitBitDetector::detectSplit(provider, bucket, 58, context, 5, 0));
// Still no other choice than split out to 58 bits regardless of minCount.
- CPPUNIT_ASSERT_EQUAL(
- std::string("SplitTargets(58: BucketId(0xe94c074f00000001), "
- "BucketId(0xeb4c074f00000001))"),
- result.toString());
+ EXPECT_EQ("SplitTargets(58: BucketId(0xe94c074f00000001), "
+ "BucketId(0xeb4c074f00000001))",
+ result.toString());
}
-
-void
-SplitBitDetectorTest::testEmpty()
-{
- document::TestDocMan testDocMan;
- spi::dummy::DummyPersistence provider(testDocMan.getTypeRepoSP(), 1);
- provider.getPartitionStates();
- spi::Bucket bucket(makeSpiBucket(document::BucketId(1, 1)));
- spi::Context context(defaultLoadType, spi::Priority(0),
- spi::Trace::TraceLevel(0));
-
- provider.createBucket(bucket, context);
-
+TEST_F(SplitBitDetectorTest, empty) {
SplitBitDetector::Result result(
SplitBitDetector::detectSplit(provider, bucket, 58, context));
- CPPUNIT_ASSERT_EQUAL(
- std::string("SplitTargets(source empty)"),
- result.toString());
+ EXPECT_EQ("SplitTargets(source empty)", result.toString());
}
-void
-SplitBitDetectorTest::testZeroDocLimitFallbacksToOneBitIncreaseWith1Doc()
-{
- document::TestDocMan testDocMan;
- spi::dummy::DummyPersistence provider(testDocMan.getTypeRepoSP(), 1);
- provider.getPartitionStates();
- spi::Bucket bucket(makeSpiBucket(document::BucketId(1, 1)));
- spi::Context context(defaultLoadType, spi::Priority(0),
- spi::Trace::TraceLevel(0));
-
- provider.createBucket(bucket, context);
+TEST_F(SplitBitDetectorTest, zero_doc_limit_falls_back_to_one_bit_increase_with_1_doc) {
document::Document::SP doc(
testDocMan.createRandomDocumentAtLocation(1, 0, 1, 1));
provider.put(bucket, spi::Timestamp(1000), doc, context);
SplitBitDetector::Result result(
SplitBitDetector::detectSplit(provider, bucket, 58, context, 0, 0));
- CPPUNIT_ASSERT_EQUAL(
- std::string("SplitTargets(2: BucketId(0x0800000000000001), "
- "BucketId(0x0800000000000003))"),
- result.toString());
+ EXPECT_EQ("SplitTargets(2: BucketId(0x0800000000000001), "
+ "BucketId(0x0800000000000003))",
+ result.toString());
}
-void
-SplitBitDetectorTest::testZeroDocLimitFallbacksToOneBitIncreaseOnGidCollision()
-{
- document::TestDocMan testDocMan;
- spi::dummy::DummyPersistence provider(testDocMan.getTypeRepoSP(), 1);
- provider.getPartitionStates();
- spi::Bucket bucket(makeSpiBucket(document::BucketId(1, 1)));
- spi::Context context(defaultLoadType, spi::Priority(0),
- spi::Trace::TraceLevel(0));
-
- provider.createBucket(bucket, context);
+TEST_F(SplitBitDetectorTest, zero_doc_limit_falls_back_to_one_bit_increase_on_gid_collision) {
document::Document::SP doc(
testDocMan.createRandomDocumentAtLocation(1, 0, 1, 1));
provider.put(bucket, spi::Timestamp(1000), doc, context);
@@ -303,10 +178,9 @@ SplitBitDetectorTest::testZeroDocLimitFallbacksToOneBitIncreaseOnGidCollision()
SplitBitDetector::Result result(
SplitBitDetector::detectSplit(provider, bucket, 58, context, 0, 0));
- CPPUNIT_ASSERT_EQUAL(
- std::string("SplitTargets(2: BucketId(0x0800000000000001), "
- "BucketId(0x0800000000000003))"),
- result.toString());
+ EXPECT_EQ("SplitTargets(2: BucketId(0x0800000000000001), "
+ "BucketId(0x0800000000000003))",
+ result.toString());
}
/**
@@ -314,9 +188,7 @@ SplitBitDetectorTest::testZeroDocLimitFallbacksToOneBitIncreaseOnGidCollision()
* document IDs that map to the same 58-bit bucket ID. Disabled by default since
* it costs CPU to do this and is not necessary during normal testing.
*/
-void
-SplitBitDetectorTest::findBucketCollisionIds()
-{
+TEST_F(SplitBitDetectorTest, DISABLED_find_bucket_collision_ids) {
using document::DocumentId;
using document::BucketId;
diff --git a/storage/src/tests/persistence/testandsettest.cpp b/storage/src/tests/persistence/testandsettest.cpp
index 197aa95fc22..4c4a7c9a0be 100644
--- a/storage/src/tests/persistence/testandsettest.cpp
+++ b/storage/src/tests/persistence/testandsettest.cpp
@@ -1,6 +1,5 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
// @author Vegard Sjonfjell
-#include <vespa/vdstestlib/cppunit/macros.h>
#include <vespa/storage/persistence/persistencethread.h>
#include <tests/persistence/persistencetestutils.h>
#include <vespa/document/test/make_document_bucket.h>
@@ -14,14 +13,13 @@
using std::unique_ptr;
using std::shared_ptr;
-using namespace std::string_literals;
using storage::spi::test::makeSpiBucket;
using document::test::makeDocumentBucket;
+using namespace ::testing;
namespace storage {
-class TestAndSetTest : public SingleDiskPersistenceTestUtils
-{
+struct TestAndSetTest : SingleDiskPersistenceTestUtils {
static constexpr int MIN_DOCUMENT_SIZE = 0;
static constexpr int MAX_DOCUMENT_SIZE = 128;
static constexpr int RANDOM_SEED = 1234;
@@ -36,9 +34,8 @@ class TestAndSetTest : public SingleDiskPersistenceTestUtils
shared_ptr<document::Document> testDoc;
document::DocumentId testDocId;
-public:
- void setUp() override {
- SingleDiskPersistenceTestUtils::setUp();
+ void SetUp() override {
+ SingleDiskPersistenceTestUtils::SetUp();
spi::Context context(
spi::LoadType(0, "default"),
@@ -55,38 +52,11 @@ public:
testDocId = testDoc->getId();
}
- void tearDown() override {
+ void TearDown() override {
thread.reset(nullptr);
- SingleDiskPersistenceTestUtils::tearDown();
+ SingleDiskPersistenceTestUtils::TearDown();
}
- void conditional_put_not_executed_on_condition_mismatch();
- void conditional_put_executed_on_condition_match();
- void conditional_remove_not_executed_on_condition_mismatch();
- void conditional_remove_executed_on_condition_match();
- void conditional_update_not_executed_on_condition_mismatch();
- void conditional_update_executed_on_condition_match();
- void conditional_update_not_executed_when_no_document_and_no_auto_create();
- void conditional_update_executed_when_no_document_but_auto_create_is_enabled();
- void invalid_document_selection_should_fail();
- void conditional_put_to_non_existing_document_should_fail();
- void document_with_no_type_should_fail();
-
- CPPUNIT_TEST_SUITE(TestAndSetTest);
- CPPUNIT_TEST(conditional_put_not_executed_on_condition_mismatch);
- CPPUNIT_TEST(conditional_put_executed_on_condition_match);
- CPPUNIT_TEST(conditional_remove_not_executed_on_condition_mismatch);
- CPPUNIT_TEST(conditional_remove_executed_on_condition_match);
- CPPUNIT_TEST(conditional_update_not_executed_on_condition_mismatch);
- CPPUNIT_TEST(conditional_update_executed_on_condition_match);
- CPPUNIT_TEST(conditional_update_not_executed_when_no_document_and_no_auto_create);
- CPPUNIT_TEST(conditional_update_executed_when_no_document_but_auto_create_is_enabled);
- CPPUNIT_TEST(invalid_document_selection_should_fail);
- CPPUNIT_TEST(conditional_put_to_non_existing_document_should_fail);
- CPPUNIT_TEST(document_with_no_type_should_fail);
- CPPUNIT_TEST_SUITE_END();
-
-protected:
std::unique_ptr<api::UpdateCommand> conditional_update_test(
bool createIfMissing,
api::Timestamp updateTimestamp);
@@ -103,32 +73,29 @@ protected:
spi::DocumentMetaFlags removeFlag = spi::NONE);
};
-CPPUNIT_TEST_SUITE_REGISTRATION(TestAndSetTest);
-
-void TestAndSetTest::conditional_put_not_executed_on_condition_mismatch()
-{
+TEST_F(TestAndSetTest, conditional_put_not_executed_on_condition_mismatch) {
// Put document with mismatching header
api::Timestamp timestampOne = 0;
putTestDocument(false, timestampOne);
- CPPUNIT_ASSERT_EQUAL(expectedDocEntryString(timestampOne, testDocId), dumpBucket(BUCKET_ID));
+ EXPECT_EQ(expectedDocEntryString(timestampOne, testDocId), dumpBucket(BUCKET_ID));
// Conditionally replace document, but fail due to lack of woofy dog
api::Timestamp timestampTwo = 1;
api::PutCommand putTwo(makeDocumentBucket(BUCKET_ID), testDoc, timestampTwo);
setTestCondition(putTwo);
- CPPUNIT_ASSERT(thread->handlePut(putTwo)->getResult() == api::ReturnCode::Result::TEST_AND_SET_CONDITION_FAILED);
- CPPUNIT_ASSERT_EQUAL(expectedDocEntryString(timestampOne, testDocId), dumpBucket(BUCKET_ID));
+ ASSERT_EQ(thread->handlePut(putTwo)->getResult().getResult(),
+ api::ReturnCode::Result::TEST_AND_SET_CONDITION_FAILED);
+ EXPECT_EQ(expectedDocEntryString(timestampOne, testDocId), dumpBucket(BUCKET_ID));
}
-void TestAndSetTest::conditional_put_executed_on_condition_match()
-{
+TEST_F(TestAndSetTest, conditional_put_executed_on_condition_match) {
// Put document with matching header
api::Timestamp timestampOne = 0;
putTestDocument(true, timestampOne);
- CPPUNIT_ASSERT_EQUAL(expectedDocEntryString(timestampOne, testDocId), dumpBucket(BUCKET_ID));
+ EXPECT_EQ(expectedDocEntryString(timestampOne, testDocId), dumpBucket(BUCKET_ID));
// Update content of document
testDoc->setValue(testDoc->getField("content"), NEW_CONTENT);
@@ -138,51 +105,50 @@ void TestAndSetTest::conditional_put_executed_on_condition_match()
api::PutCommand putTwo(makeDocumentBucket(BUCKET_ID), testDoc, timestampTwo);
setTestCondition(putTwo);
- CPPUNIT_ASSERT(thread->handlePut(putTwo)->getResult() == api::ReturnCode::Result::OK);
- CPPUNIT_ASSERT_EQUAL(expectedDocEntryString(timestampOne, testDocId) +
- expectedDocEntryString(timestampTwo, testDocId),
- dumpBucket(BUCKET_ID));
+ ASSERT_EQ(thread->handlePut(putTwo)->getResult().getResult(), api::ReturnCode::Result::OK);
+ EXPECT_EQ(expectedDocEntryString(timestampOne, testDocId) +
+ expectedDocEntryString(timestampTwo, testDocId),
+ dumpBucket(BUCKET_ID));
assertTestDocumentFoundAndMatchesContent(NEW_CONTENT);
}
-void TestAndSetTest::conditional_remove_not_executed_on_condition_mismatch()
-{
+TEST_F(TestAndSetTest, conditional_remove_not_executed_on_condition_mismatch) {
// Put document with mismatching header
api::Timestamp timestampOne = 0;
putTestDocument(false, timestampOne);
- CPPUNIT_ASSERT_EQUAL(expectedDocEntryString(timestampOne, testDocId), dumpBucket(BUCKET_ID));
+ EXPECT_EQ(expectedDocEntryString(timestampOne, testDocId), dumpBucket(BUCKET_ID));
// Conditionally remove document, fail in doing so
api::Timestamp timestampTwo = 1;
api::RemoveCommand remove(makeDocumentBucket(BUCKET_ID), testDocId, timestampTwo);
setTestCondition(remove);
- CPPUNIT_ASSERT(thread->handleRemove(remove)->getResult() == api::ReturnCode::Result::TEST_AND_SET_CONDITION_FAILED);
- CPPUNIT_ASSERT_EQUAL(expectedDocEntryString(timestampOne, testDocId), dumpBucket(BUCKET_ID));
+ ASSERT_EQ(thread->handleRemove(remove)->getResult().getResult(),
+ api::ReturnCode::Result::TEST_AND_SET_CONDITION_FAILED);
+ EXPECT_EQ(expectedDocEntryString(timestampOne, testDocId), dumpBucket(BUCKET_ID));
// Assert that the document is still there
retrieveTestDocument();
}
-void TestAndSetTest::conditional_remove_executed_on_condition_match()
-{
+TEST_F(TestAndSetTest, conditional_remove_executed_on_condition_match) {
// Put document with matching header
api::Timestamp timestampOne = 0;
putTestDocument(true, timestampOne);
- CPPUNIT_ASSERT_EQUAL(expectedDocEntryString(timestampOne, testDocId), dumpBucket(BUCKET_ID));
+ EXPECT_EQ(expectedDocEntryString(timestampOne, testDocId), dumpBucket(BUCKET_ID));
// Conditionally remove document, succeed in doing so
api::Timestamp timestampTwo = 1;
api::RemoveCommand remove(makeDocumentBucket(BUCKET_ID), testDocId, timestampTwo);
setTestCondition(remove);
- CPPUNIT_ASSERT(thread->handleRemove(remove)->getResult() == api::ReturnCode::Result::OK);
- CPPUNIT_ASSERT_EQUAL(expectedDocEntryString(timestampOne, testDocId) +
- expectedDocEntryString(timestampTwo, testDocId, spi::REMOVE_ENTRY),
- dumpBucket(BUCKET_ID));
+ ASSERT_EQ(thread->handleRemove(remove)->getResult().getResult(), api::ReturnCode::Result::OK);
+ EXPECT_EQ(expectedDocEntryString(timestampOne, testDocId) +
+ expectedDocEntryString(timestampTwo, testDocId, spi::REMOVE_ENTRY),
+ dumpBucket(BUCKET_ID));
}
std::unique_ptr<api::UpdateCommand> TestAndSetTest::conditional_update_test(
@@ -200,66 +166,63 @@ std::unique_ptr<api::UpdateCommand> TestAndSetTest::conditional_update_test(
return updateUp;
}
-void TestAndSetTest::conditional_update_not_executed_on_condition_mismatch()
-{
+TEST_F(TestAndSetTest, conditional_update_not_executed_on_condition_mismatch) {
api::Timestamp timestampOne = 0;
api::Timestamp timestampTwo = 1;
putTestDocument(false, timestampOne);
auto updateUp = conditional_update_test(false, timestampTwo);
- CPPUNIT_ASSERT(thread->handleUpdate(*updateUp)->getResult() == api::ReturnCode::Result::TEST_AND_SET_CONDITION_FAILED);
- CPPUNIT_ASSERT_EQUAL(expectedDocEntryString(timestampOne, testDocId),
- dumpBucket(BUCKET_ID));
+ ASSERT_EQ(thread->handleUpdate(*updateUp)->getResult().getResult(),
+ api::ReturnCode::Result::TEST_AND_SET_CONDITION_FAILED);
+ EXPECT_EQ(expectedDocEntryString(timestampOne, testDocId), dumpBucket(BUCKET_ID));
assertTestDocumentFoundAndMatchesContent(OLD_CONTENT);
}
-void TestAndSetTest::conditional_update_executed_on_condition_match()
-{
+TEST_F(TestAndSetTest, conditional_update_executed_on_condition_match) {
api::Timestamp timestampOne = 0;
api::Timestamp timestampTwo = 1;
putTestDocument(true, timestampOne);
auto updateUp = conditional_update_test(false, timestampTwo);
- CPPUNIT_ASSERT(thread->handleUpdate(*updateUp)->getResult() == api::ReturnCode::Result::OK);
- CPPUNIT_ASSERT_EQUAL(expectedDocEntryString(timestampOne, testDocId) +
- expectedDocEntryString(timestampTwo, testDocId),
- dumpBucket(BUCKET_ID));
+ ASSERT_EQ(thread->handleUpdate(*updateUp)->getResult().getResult(), api::ReturnCode::Result::OK);
+ EXPECT_EQ(expectedDocEntryString(timestampOne, testDocId) +
+ expectedDocEntryString(timestampTwo, testDocId),
+ dumpBucket(BUCKET_ID));
assertTestDocumentFoundAndMatchesContent(NEW_CONTENT);
}
-void TestAndSetTest::conditional_update_not_executed_when_no_document_and_no_auto_create() {
+TEST_F(TestAndSetTest, conditional_update_not_executed_when_no_document_and_no_auto_create) {
api::Timestamp updateTimestamp = 200;
auto updateUp = conditional_update_test(false, updateTimestamp);
- CPPUNIT_ASSERT(thread->handleUpdate(*updateUp)->getResult() == api::ReturnCode::Result::TEST_AND_SET_CONDITION_FAILED);
- CPPUNIT_ASSERT_EQUAL(""s, dumpBucket(BUCKET_ID));
+ ASSERT_EQ(thread->handleUpdate(*updateUp)->getResult().getResult(),
+ api::ReturnCode::Result::TEST_AND_SET_CONDITION_FAILED);
+ EXPECT_EQ("", dumpBucket(BUCKET_ID));
}
-void TestAndSetTest::conditional_update_executed_when_no_document_but_auto_create_is_enabled() {
+TEST_F(TestAndSetTest, conditional_update_executed_when_no_document_but_auto_create_is_enabled) {
api::Timestamp updateTimestamp = 200;
auto updateUp = conditional_update_test(true, updateTimestamp);
- CPPUNIT_ASSERT(thread->handleUpdate(*updateUp)->getResult() == api::ReturnCode::Result::OK);
- CPPUNIT_ASSERT_EQUAL(expectedDocEntryString(updateTimestamp, testDocId), dumpBucket(BUCKET_ID));
+ ASSERT_EQ(thread->handleUpdate(*updateUp)->getResult().getResult(), api::ReturnCode::Result::OK);
+ EXPECT_EQ(expectedDocEntryString(updateTimestamp, testDocId), dumpBucket(BUCKET_ID));
assertTestDocumentFoundAndMatchesContent(NEW_CONTENT);
}
-void TestAndSetTest::invalid_document_selection_should_fail()
-{
+TEST_F(TestAndSetTest, invalid_document_selection_should_fail) {
// Conditionally replace nonexisting document
// Fail early since document selection is invalid
api::Timestamp timestamp = 0;
api::PutCommand put(makeDocumentBucket(BUCKET_ID), testDoc, timestamp);
put.setCondition(documentapi::TestAndSetCondition("bjarne"));
- CPPUNIT_ASSERT(thread->handlePut(put)->getResult() == api::ReturnCode::Result::ILLEGAL_PARAMETERS);
- CPPUNIT_ASSERT_EQUAL(""s, dumpBucket(BUCKET_ID));
+ ASSERT_EQ(thread->handlePut(put)->getResult().getResult(), api::ReturnCode::Result::ILLEGAL_PARAMETERS);
+ EXPECT_EQ("", dumpBucket(BUCKET_ID));
}
-void TestAndSetTest::conditional_put_to_non_existing_document_should_fail()
-{
+TEST_F(TestAndSetTest, conditional_put_to_non_existing_document_should_fail) {
// Conditionally replace nonexisting document
// Fail since no document exists to match with test and set
api::Timestamp timestamp = 0;
@@ -267,12 +230,12 @@ void TestAndSetTest::conditional_put_to_non_existing_document_should_fail()
setTestCondition(put);
thread->handlePut(put);
- CPPUNIT_ASSERT(thread->handlePut(put)->getResult() == api::ReturnCode::Result::TEST_AND_SET_CONDITION_FAILED);
- CPPUNIT_ASSERT_EQUAL(""s, dumpBucket(BUCKET_ID));
+ ASSERT_EQ(thread->handlePut(put)->getResult().getResult(),
+ api::ReturnCode::Result::TEST_AND_SET_CONDITION_FAILED);
+ EXPECT_EQ("", dumpBucket(BUCKET_ID));
}
-void TestAndSetTest::document_with_no_type_should_fail()
-{
+TEST_F(TestAndSetTest, document_with_no_type_should_fail) {
// Conditionally replace nonexisting document
// Fail since no document exists to match with test and set
api::Timestamp timestamp = 0;
@@ -281,9 +244,9 @@ void TestAndSetTest::document_with_no_type_should_fail()
setTestCondition(remove);
auto code = thread->handleRemove(remove)->getResult();
- CPPUNIT_ASSERT(code == api::ReturnCode::Result::ILLEGAL_PARAMETERS);
- CPPUNIT_ASSERT(code.getMessage() == "Document id has no doctype");
- CPPUNIT_ASSERT_EQUAL(""s, dumpBucket(BUCKET_ID));
+ EXPECT_EQ(code.getResult(), api::ReturnCode::Result::ILLEGAL_PARAMETERS);
+ EXPECT_EQ(code.getMessage(), "Document id has no doctype");
+ EXPECT_EQ("", dumpBucket(BUCKET_ID));
}
document::Document::SP
@@ -306,10 +269,10 @@ document::Document::SP TestAndSetTest::retrieveTestDocument()
{
api::GetCommand get(makeDocumentBucket(BUCKET_ID), testDocId, "[all]");
auto tracker = thread->handleGet(get);
- CPPUNIT_ASSERT(tracker->getResult() == api::ReturnCode::Result::OK);
+ assert(tracker->getResult() == api::ReturnCode::Result::OK);
auto & reply = static_cast<api::GetReply &>(*tracker->getReply());
- CPPUNIT_ASSERT(reply.wasFound());
+ assert(reply.wasFound());
return reply.getDocument();
}
@@ -333,7 +296,7 @@ void TestAndSetTest::assertTestDocumentFoundAndMatchesContent(const document::Fi
auto doc = retrieveTestDocument();
auto & field = doc->getField("content");
- CPPUNIT_ASSERT_EQUAL(*doc->getValue(field), value);
+ EXPECT_EQ(*doc->getValue(field), value);
}
std::string TestAndSetTest::expectedDocEntryString(