summaryrefslogtreecommitdiffstats
path: root/storage
diff options
context:
space:
mode:
authorTor Brede Vekterli <vekterli@verizonmedia.com>2019-06-07 11:28:45 +0000
committerTor Brede Vekterli <vekterli@verizonmedia.com>2019-06-07 11:28:45 +0000
commit00919ba69a4f126e5952a00fabf58eb24d602fc4 (patch)
tree5bf2efa227e81b8bd5fdba2af61e2945f58dd1bd /storage
parentad1547f8b67fcffce21d2f499b028381d0fbd472 (diff)
Convert BucketManagerTest and InitializerTest to gtest
Still some residual vdstestlib CppUnit traces that will need cleaning up later.
Diffstat (limited to 'storage')
-rw-r--r--storage/src/tests/bucketdb/CMakeLists.txt4
-rw-r--r--storage/src/tests/bucketdb/bucketmanagertest.cpp506
-rw-r--r--storage/src/tests/bucketdb/initializertest.cpp507
-rw-r--r--storage/src/vespa/storage/common/storagelinkqueued.cpp2
4 files changed, 250 insertions, 769 deletions
diff --git a/storage/src/tests/bucketdb/CMakeLists.txt b/storage/src/tests/bucketdb/CMakeLists.txt
index a349d4b4463..cac79c77362 100644
--- a/storage/src/tests/bucketdb/CMakeLists.txt
+++ b/storage/src/tests/bucketdb/CMakeLists.txt
@@ -3,8 +3,6 @@
# TODO: Remove test library when all tests have been migrated to gtest.
vespa_add_library(storage_testbucketdb TEST
SOURCES
- bucketmanagertest.cpp
- initializertest.cpp
judyarraytest.cpp
judymultimaptest.cpp
lockablemaptest.cpp
@@ -16,7 +14,9 @@ vespa_add_library(storage_testbucketdb TEST
vespa_add_executable(storage_bucketdb_gtest_runner_app TEST
SOURCES
bucketinfotest.cpp
+ bucketmanagertest.cpp
gtest_runner.cpp
+ initializertest.cpp
DEPENDS
storage
storage_testcommon
diff --git a/storage/src/tests/bucketdb/bucketmanagertest.cpp b/storage/src/tests/bucketdb/bucketmanagertest.cpp
index 09fe310e97e..1f72347b7ed 100644
--- a/storage/src/tests/bucketdb/bucketmanagertest.cpp
+++ b/storage/src/tests/bucketdb/bucketmanagertest.cpp
@@ -1,12 +1,13 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/config/helper/configgetter.h>
-#include <cppunit/extensions/HelperMacros.h>
+#include <vespa/config/helper/configgetter.hpp>
#include <vespa/document/config/config-documenttypes.h>
#include <vespa/document/datatype/documenttype.h>
#include <vespa/document/fieldvalue/document.h>
#include <vespa/document/update/documentupdate.h>
#include <vespa/document/repo/documenttyperepo.h>
+#include <vespa/document/test/make_document_bucket.h>
+#include <vespa/document/test/make_bucket_space.h>
#include <vespa/storage/bucketdb/bucketmanager.h>
#include <vespa/storage/common/global_bucket_space_distribution_converter.h>
#include <vespa/storage/persistence/filestorage/filestormanager.h>
@@ -16,13 +17,10 @@
#include <tests/common/teststorageapp.h>
#include <tests/common/dummystoragelink.h>
#include <tests/common/testhelper.h>
-#include <vespa/document/test/make_document_bucket.h>
-#include <vespa/document/test/make_bucket_space.h>
#include <vespa/vdslib/state/random.h>
#include <vespa/vespalib/io/fileutil.h>
-#include <vespa/vespalib/testkit/testapp.h>
#include <vespa/vespalib/util/stringfmt.h>
-#include <vespa/config/helper/configgetter.hpp>
+#include <vespa/vespalib/gtest/gtest.h>
#include <future>
#include <vespa/log/log.h>
@@ -35,6 +33,7 @@ using document::DocumentType;
using document::DocumentTypeRepo;
using document::test::makeDocumentBucket;
using document::test::makeBucketSpace;
+using namespace ::testing;
namespace storage {
@@ -57,41 +56,8 @@ std::ostream& operator<<(std::ostream& out, const TestBucketInfo& info) {
class ConcurrentOperationFixture;
struct TestParams;
-struct BucketManagerTest : public CppUnit::TestFixture {
+struct BucketManagerTest : public Test {
public:
- CPPUNIT_TEST_SUITE(BucketManagerTest);
- CPPUNIT_TEST(testRequestBucketInfoWithList);
- CPPUNIT_TEST(testDistributionBitGenerationEmpty);
- CPPUNIT_TEST(testDistributionBitChangeOnCreateBucket);
- CPPUNIT_TEST(testMinUsedBitsFromComponentIsHonored);
- CPPUNIT_TEST(testRemoveLastModifiedOK);
- CPPUNIT_TEST(testRemoveLastModifiedFailed);
- CPPUNIT_TEST(testSwallowNotifyBucketChangeReply);
- CPPUNIT_TEST(testMetricsGeneration);
- CPPUNIT_TEST(metrics_are_tracked_per_bucket_space);
- CPPUNIT_TEST(testSplitReplyOrderedAfterBucketReply);
- CPPUNIT_TEST(testJoinReplyOrderedAfterBucketReply);
- CPPUNIT_TEST(testDeleteReplyOrderedAfterBucketReply);
- CPPUNIT_TEST(testOnlyEnqueueWhenProcessingRequest);
- CPPUNIT_TEST(testOrderRepliesAfterBucketSpecificRequest);
- CPPUNIT_TEST(testQueuedRepliesOnlyDispatchedWhenAllProcessingDone);
- CPPUNIT_TEST(testMutationRepliesForSplitBucketAreEnqueued);
- CPPUNIT_TEST(testMutationRepliesForDeletedBucketAreEnqueued);
- CPPUNIT_TEST(testMutationRepliesForJoinedBucketAreEnqueued);
- CPPUNIT_TEST(testConflictingPutRepliesAreEnqueued);
- CPPUNIT_TEST(testConflictingUpdateRepliesAreEnqueued);
- CPPUNIT_TEST(testRemappedMutationIsCheckedAgainstOriginalBucket);
- CPPUNIT_TEST(testBucketConflictSetIsClearedBetweenBlockingRequests);
- CPPUNIT_TEST(testConflictSetOnlyClearedAfterAllBucketRequestsDone);
- CPPUNIT_TEST(testRejectRequestWithMismatchingDistributionHash);
- CPPUNIT_TEST(testDbNotIteratedWhenAllRequestsRejected);
- CPPUNIT_TEST(fall_back_to_legacy_global_distribution_hash_on_mismatch);
-
- // FIXME(vekterli): test is not deterministic and enjoys failing
- // sporadically when running under Valgrind. See bug 5932891.
- CPPUNIT_TEST_IGNORED(testRequestBucketInfoWithState);
- CPPUNIT_TEST_SUITE_END();
-
std::unique_ptr<TestServiceLayerApp> _node;
std::unique_ptr<DummyStorageLink> _top;
BucketManager *_manager;
@@ -101,12 +67,13 @@ public:
uint32_t _emptyBuckets;
document::Document::SP _document;
+ ~BucketManagerTest();
+
void setupTestEnvironment(bool fakePersistenceLayer = true,
bool noDelete = false);
void addBucketsToDB(uint32_t count);
bool wasBlockedDueToLastModified(api::StorageMessage* msg,
uint64_t lastModified);
- bool wasBlockedDueToLastModified(api::StorageMessage::SP msg);
void insertSingleBucket(const document::BucketId& bucket,
const api::BucketInfo& info);
void waitUntilRequestsAreProcessing(size_t nRequests = 1);
@@ -127,53 +94,30 @@ public:
void assertRequestWithBadHashIsRejected(
ConcurrentOperationFixture& fixture);
+protected:
+ void update_min_used_bits() {
+ _manager->updateMinUsedBits();
+ }
+ void trigger_metric_manager_update() {
+ vespalib::Monitor l;
+ _manager->updateMetrics(BucketManager::MetricLockGuard(l));
+ }
- void testRequestBucketInfoWithState();
- void testRequestBucketInfoWithList();
- void testDistributionBitGenerationEmpty();
- void testDistributionBitChangeOnCreateBucket();
- void testMinUsedBitsFromComponentIsHonored();
-
- void testRemoveLastModifiedOK();
- void testRemoveLastModifiedFailed();
-
- void testSwallowNotifyBucketChangeReply();
- void testMetricsGeneration();
- void metrics_are_tracked_per_bucket_space();
- void testSplitReplyOrderedAfterBucketReply();
- void testJoinReplyOrderedAfterBucketReply();
- void testDeleteReplyOrderedAfterBucketReply();
- void testOnlyEnqueueWhenProcessingRequest();
- void testOrderRepliesAfterBucketSpecificRequest();
- void testQueuedRepliesOnlyDispatchedWhenAllProcessingDone();
- void testMutationRepliesForSplitBucketAreEnqueued();
- void testMutationRepliesForDeletedBucketAreEnqueued();
- void testMutationRepliesForJoinedBucketAreEnqueued();
- void testConflictingPutRepliesAreEnqueued();
- void testConflictingUpdateRepliesAreEnqueued();
- void testRemappedMutationIsCheckedAgainstOriginalBucket();
- void testBucketConflictSetIsClearedBetweenBlockingRequests();
- void testConflictSetOnlyClearedAfterAllBucketRequestsDone();
- void testRejectRequestWithMismatchingDistributionHash();
- void testDbNotIteratedWhenAllRequestsRejected();
- void fall_back_to_legacy_global_distribution_hash_on_mismatch();
+ const BucketManagerMetrics& bucket_manager_metrics() const {
+ return *_manager->_metrics;
+ }
public:
- static constexpr uint32_t DIR_SPREAD = 3;
static constexpr uint32_t MESSAGE_WAIT_TIME = 60*2;
-
- void setUp() override {
+ void SetUp() override {
_emptyBuckets = 0;
}
- void tearDown() override {
- }
-
friend class ConcurrentOperationFixture;
};
-CPPUNIT_TEST_SUITE_REGISTRATION(BucketManagerTest);
+BucketManagerTest::~BucketManagerTest() = default;
#define ASSERT_DUMMYLINK_REPLY_COUNT(link, count) \
if (link->getNumReplies() != count) { \
@@ -183,7 +127,7 @@ CPPUNIT_TEST_SUITE_REGISTRATION(BucketManagerTest);
for (uint32_t i=0; i<link->getNumReplies(); ++i) { \
ost << link->getReply(i)->getType() << "\n"; \
} \
- CPPUNIT_FAIL(ost.str()); \
+ FAIL() << ost.str(); \
}
std::string getMkDirDisk(const std::string & rootFolder, int disk) {
@@ -203,34 +147,34 @@ void BucketManagerTest::setupTestEnvironment(bool fakePersistenceLayer,
assert(system(getMkDirDisk(rootFolder, 0).c_str()) == 0);
assert(system(getMkDirDisk(rootFolder, 1).c_str()) == 0);
- std::shared_ptr<const DocumentTypeRepo> repo(new DocumentTypeRepo(
+ auto repo = std::make_shared<const DocumentTypeRepo>(
*ConfigGetter<DocumenttypesConfig>::getConfig(
- "config-doctypes", FileSpec(TEST_PATH("config-doctypes.cfg")))));
- _top.reset(new DummyStorageLink);
- _node.reset(new TestServiceLayerApp(
- DiskCount(2), NodeIndex(0), config.getConfigId()));
+ "config-doctypes", FileSpec("../config-doctypes.cfg")));
+ _top = std::make_unique<DummyStorageLink>();
+ _node = std::make_unique<TestServiceLayerApp>(
+ DiskCount(2), NodeIndex(0), config.getConfigId());
_node->setTypeRepo(repo);
_node->setupDummyPersistence();
- // Set up the 3 links
- StorageLink::UP manager(new BucketManager("", _node->getComponentRegister()));
- _manager = (BucketManager*) manager.get();
+ // Set up the 3 links
+ auto manager = std::make_unique<BucketManager>("", _node->getComponentRegister());
+ _manager = manager.get();
_top->push_back(std::move(manager));
if (fakePersistenceLayer) {
- StorageLink::UP bottom(new DummyStorageLink);
- _bottom = (DummyStorageLink*) bottom.get();
+ auto bottom = std::make_unique<DummyStorageLink>();
+ _bottom = bottom.get();
_top->push_back(std::move(bottom));
} else {
- StorageLink::UP bottom(new FileStorManager(
+ auto bottom = std::make_unique<FileStorManager>(
config.getConfigId(), _node->getPartitions(),
- _node->getPersistenceProvider(), _node->getComponentRegister()));
- _filestorManager = (FileStorManager*) bottom.get();
+ _node->getPersistenceProvider(), _node->getComponentRegister());
+ _filestorManager = bottom.get();
_top->push_back(std::move(bottom));
}
- // Generate a doc to use for testing..
+ // Generate a doc to use for testing..
const DocumentType &type(*_node->getTypeRepo()
->getDocumentType("text/html"));
- _document.reset(new document::Document(type, document::DocumentId(
- document::DocIdString("test", "ntnu"))));
+ _document = std::make_shared<document::Document>(
+ type, document::DocumentId(document::DocIdString("test", "ntnu")));
}
void BucketManagerTest::addBucketsToDB(uint32_t count)
@@ -241,7 +185,7 @@ void BucketManagerTest::addBucketsToDB(uint32_t count)
while (_bucketInfo.size() < count) {
document::BucketId id(16, randomizer.nextUint32());
id = id.stripUnused();
- if (_bucketInfo.size() == 0) {
+ if (_bucketInfo.empty()) {
id = _node->getBucketIdFactory().getBucketId(
_document->getId()).stripUnused();
}
@@ -261,15 +205,13 @@ void BucketManagerTest::addBucketsToDB(uint32_t count)
info.count = 0;
info.crc = 0;
++_emptyBuckets;
- for (std::map<document::BucketId, TestBucketInfo>::iterator it
- = _bucketInfo.begin(); it != _bucketInfo.end(); ++it)
- {
+ for (const auto& bi : _bucketInfo) {
bucketdb::StorageBucketInfo entry;
- entry.disk = it->second.partition;
- entry.setBucketInfo(api::BucketInfo(it->second.crc,
- it->second.count,
- it->second.size));
- _node->getStorageBucketDatabase().insert(it->first, entry, "foo");
+ entry.disk = bi.second.partition;
+ entry.setBucketInfo(api::BucketInfo(bi.second.crc,
+ bi.second.count,
+ bi.second.size));
+ _node->getStorageBucketDatabase().insert(bi.first, entry, "foo");
}
}
@@ -293,27 +235,25 @@ BucketManagerTest::wasBlockedDueToLastModified(api::StorageMessage* msg,
_top->sendDown(api::StorageMessage::SP(msg));
if (_top->getNumReplies() == 1) {
- CPPUNIT_ASSERT_EQUAL(0, (int)_bottom->getNumCommands());
- CPPUNIT_ASSERT(!static_cast<api::StorageReply&>(
- *_top->getReply(0)).getResult().success());
+ assert(_bottom->getNumCommands() == 0);
+ assert(!dynamic_cast<api::StorageReply&>(*_top->getReply(0)).getResult().success());
return true;
} else {
- CPPUNIT_ASSERT_EQUAL(0, (int)_top->getNumReplies());
+ assert(_top->getNumReplies() == 0);
// Check that bucket database now has the operation's timestamp as last modified.
{
StorBucketDatabase::WrappedEntry entry(
_node->getStorageBucketDatabase().get(id, "foo"));
- CPPUNIT_ASSERT_EQUAL(lastModified, entry->info.getLastModified());
+ assert(entry->info.getLastModified() == lastModified);
}
return false;
}
}
-void BucketManagerTest::testRemoveLastModifiedOK()
-{
- CPPUNIT_ASSERT(!wasBlockedDueToLastModified(
+TEST_F(BucketManagerTest, remove_last_modified_ok) {
+ EXPECT_FALSE(wasBlockedDueToLastModified(
new api::RemoveCommand(makeDocumentBucket(document::BucketId(16, 1)),
document::DocumentId("userdoc:m:1:foo"),
api::Timestamp(1235)),
@@ -321,45 +261,37 @@ void BucketManagerTest::testRemoveLastModifiedOK()
}
-void BucketManagerTest::testRemoveLastModifiedFailed()
-{
- CPPUNIT_ASSERT(wasBlockedDueToLastModified(
+TEST_F(BucketManagerTest, remove_last_modified_failed) {
+ EXPECT_TRUE(wasBlockedDueToLastModified(
new api::RemoveCommand(makeDocumentBucket(document::BucketId(16, 1)),
document::DocumentId("userdoc:m:1:foo"),
api::Timestamp(1233)),
1233));
}
-void BucketManagerTest::testDistributionBitGenerationEmpty()
-{
- TestName("BucketManagerTest::testDistributionBitGenerationEmpty()");
+TEST_F(BucketManagerTest, distribution_bit_generation_empty) {
setupTestEnvironment();
_manager->doneInit();
- vespalib::Monitor l;
- _manager->updateMetrics(BucketManager::MetricLockGuard(l));
- CPPUNIT_ASSERT_EQUAL(58u, _node->getStateUpdater().getReportedNodeState()->getMinUsedBits());
+ trigger_metric_manager_update();
+ EXPECT_EQ(58u, _node->getStateUpdater().getReportedNodeState()->getMinUsedBits());
}
-void BucketManagerTest::testDistributionBitChangeOnCreateBucket()
-{
- TestName("BucketManagerTest::testDistributionBitChangeOnCreateBucket()");
+TEST_F(BucketManagerTest, distribution_bit_change_on_create_bucket){
setupTestEnvironment();
addBucketsToDB(30);
_top->open();
_node->getDoneInitializeHandler().notifyDoneInitializing();
_manager->doneInit();
- _manager->updateMinUsedBits();
- CPPUNIT_ASSERT_EQUAL(16u, _node->getStateUpdater().getReportedNodeState()->getMinUsedBits());
+ update_min_used_bits();
+ EXPECT_EQ(16u, _node->getStateUpdater().getReportedNodeState()->getMinUsedBits());
std::shared_ptr<api::CreateBucketCommand> cmd(
new api::CreateBucketCommand(makeDocumentBucket(document::BucketId(4, 5678))));
_top->sendDown(cmd);
- CPPUNIT_ASSERT_EQUAL(4u, _node->getStateUpdater().getReportedNodeState()->getMinUsedBits());
+ EXPECT_EQ(4u, _node->getStateUpdater().getReportedNodeState()->getMinUsedBits());
}
-void BucketManagerTest::testMinUsedBitsFromComponentIsHonored()
-{
- TestName("BucketManagerTest::testMinUsedBitsFromComponentIsHonored()");
+TEST_F(BucketManagerTest, Min_Used_Bits_From_Component_Is_Honored) {
setupTestEnvironment();
// Let these differ in order to test state update behavior.
_node->getComponentRegister().getMinUsedBitsTracker().setMinUsedBits(10);
@@ -377,40 +309,21 @@ void BucketManagerTest::testMinUsedBitsFromComponentIsHonored()
std::shared_ptr<api::CreateBucketCommand> cmd(
new api::CreateBucketCommand(makeDocumentBucket(document::BucketId(12, 5678))));
_top->sendDown(cmd);
- CPPUNIT_ASSERT_EQUAL(13u, _node->getStateUpdater().getReportedNodeState()->getMinUsedBits());
+ EXPECT_EQ(13u, _node->getStateUpdater().getReportedNodeState()->getMinUsedBits());
}
-void BucketManagerTest::testRequestBucketInfoWithState()
-{
- TestName("BucketManagerTest::testRequestBucketInfoWithState()");
- // Test prior to building bucket cache
+// FIXME: non-deterministic test
+TEST_F(BucketManagerTest, IGNORED_request_bucket_info_with_state) {
+ // Test prior to building bucket cache
setupTestEnvironment();
addBucketsToDB(30);
- /* Currently this is just queued up
- {
- std::shared_ptr<api::RequestBucketInfoCommand> cmd(
- new api::RequestBucketInfoCommand(
- 0, lib::ClusterState("distributor:3 .2.s:d storage:1")));
- _top->sendDown(cmd);
- _top->waitForMessages(1, 5);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, _top->getNumReplies());
- std::shared_ptr<api::RequestBucketInfoReply> reply(
- std::dynamic_pointer_cast<api::RequestBucketInfoReply>(
- _top->getReply(0)));
- _top->reset();
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(api::ReturnCode(api::ReturnCode::NOT_READY),
- reply->getResult());
- } */
+
std::vector<lib::ClusterState> states;
- states.push_back(lib::ClusterState("version:0"));
- states.push_back(lib::ClusterState("version:1 distributor:1 storage:1"));
- states.push_back(lib::ClusterState(
- "version:2 distributor:3 .1.s:i .2.s:d storage:4"));
- states.push_back(lib::ClusterState(
- "version:3 distributor:3 .1.s:i .2.s:d storage:4 .3.s:d"));
- states.push_back(lib::ClusterState(
- "version:4 distributor:3 .1.s:i .2.s:d storage:4"));
+ states.emplace_back("version:0");
+ states.emplace_back("version:1 distributor:1 storage:1");
+ states.emplace_back("version:2 distributor:3 .1.s:i .2.s:d storage:4");
+ states.emplace_back("version:3 distributor:3 .1.s:i .2.s:d storage:4 .3.s:d");
+ states.emplace_back("version:4 distributor:3 .1.s:i .2.s:d storage:4");
_node->setClusterState(states.back());
for (uint32_t i=0; i<states.size(); ++i) {
@@ -419,11 +332,11 @@ void BucketManagerTest::testRequestBucketInfoWithState()
_manager->onDown(cmd);
}
- // Send a request bucket info command that will be outdated and failed.
+ // Send a request bucket info command that will be outdated and failed.
std::shared_ptr<api::RequestBucketInfoCommand> cmd1(
new api::RequestBucketInfoCommand(makeBucketSpace(), 0, states[1]));
- // Send two request bucket info commands that will be processed together
- // when the bucket manager is idle, as states are equivalent
+ // Send two request bucket info commands that will be processed together
+ // when the bucket manager is idle, as states are equivalent
std::shared_ptr<api::RequestBucketInfoCommand> cmd2(
new api::RequestBucketInfoCommand(makeBucketSpace(), 0, states[2]));
std::shared_ptr<api::RequestBucketInfoCommand> cmd3(
@@ -457,104 +370,29 @@ void BucketManagerTest::testRequestBucketInfoWithState()
std::shared_ptr<api::RequestBucketInfoReply> reply3(
replies[cmd3->getMsgId()]);
_top->reset();
- CPPUNIT_ASSERT(reply1.get());
- CPPUNIT_ASSERT(reply2.get());
- CPPUNIT_ASSERT(reply3.get());
- CPPUNIT_ASSERT_EQUAL(api::ReturnCode(api::ReturnCode::REJECTED,
+ ASSERT_TRUE(reply1.get());
+ ASSERT_TRUE(reply2.get());
+ ASSERT_TRUE(reply3.get());
+ EXPECT_EQ(api::ReturnCode(api::ReturnCode::REJECTED,
"Ignoring bucket info request for cluster state version 1 as "
"versions from version 2 differs from this state."),
reply1->getResult());
- CPPUNIT_ASSERT_EQUAL(api::ReturnCode(api::ReturnCode::REJECTED,
+ EXPECT_EQ(api::ReturnCode(api::ReturnCode::REJECTED,
"There is already a newer bucket info request for "
"this node from distributor 0"),
reply2->getResult());
- CPPUNIT_ASSERT_EQUAL(api::ReturnCode(api::ReturnCode::OK),
+ EXPECT_EQ(api::ReturnCode(api::ReturnCode::OK),
reply3->getResult());
api::RequestBucketInfoReply::Entry entry;
- CPPUNIT_ASSERT_EQUAL((size_t) 18, reply3->getBucketInfo().size());
+ ASSERT_EQ(18u, reply3->getBucketInfo().size());
entry = api::RequestBucketInfoReply::Entry(
document::BucketId(16, 0xe8c8), api::BucketInfo(0x79d04f78, 11153, 1851385240u));
- CPPUNIT_ASSERT_EQUAL(entry, reply3->getBucketInfo()[0]);
+ EXPECT_EQ(entry, reply3->getBucketInfo()[0]);
}
}
-namespace {
- struct PopenWrapper {
- FILE* _file;
- std::vector<char> _buffer;
- uint32_t _index;
- uint32_t _size;
- bool _eof;
-
- PopenWrapper(const std::string& cmd)
- : _buffer(65536, '\0'), _index(0), _size(0), _eof(false)
- {
- _file = popen(cmd.c_str(), "r");
- if (_file == 0) {
- throw vespalib::Exception("Failed to run '" + cmd
- + "' in popen: " + strerror(errno), VESPA_STRLOC);
- }
- }
-
- const char* getNextLine() {
- if (_eof && _size == 0) return 0;
- // Check if we have a newline waiting
- char* newline = strchr(&_buffer[_index], '\n');
- // If not try to get one
- if (_eof) {
- newline = &_buffer[_index + _size];
- } else if (newline == 0) {
- // If we index is passed half the buffer, reposition
- if (_index > _buffer.size() / 2) {
- memcpy(&_buffer[0], &_buffer[_index], _size);
- _index = 0;
- }
- // Verify we have space to write to
- if (_index + _size >= _buffer.size()) {
- throw vespalib::Exception("No newline could be find in "
- "half the buffer size. Wrapper not designed to "
- "handle that long lines (1)", VESPA_STRLOC);
- }
- // Fill up buffer
- size_t bytesRead = fread(&_buffer[_index + _size],
- 1, _buffer.size() - _index - _size - 1,
- _file);
- if (bytesRead == 0) {
- if (!feof(_file)) {
- throw vespalib::Exception("Failed to run fgets: "
- + std::string(strerror(errno)), VESPA_STRLOC);
- } else {
- _eof = true;
- }
- } else {
- _size += bytesRead;
- }
- newline = strchr(&_buffer[_index], '\n');
- if (newline == 0) {
- if (_eof) {
- if (_size == 0) return 0;
- } else {
- throw vespalib::Exception("No newline could be find in "
- "half the buffer size. Wrapper not designed to "
- "handle that long lines (2)", VESPA_STRLOC);
- }
- }
- }
- *newline = '\0';
- ++newline;
- const char* line = &_buffer[_index];
- uint32_t strlen = (newline - line);
- _index += strlen;
- _size -= strlen;
- return line;
- }
- };
-}
-
-void BucketManagerTest::testRequestBucketInfoWithList()
-{
- TestName("BucketManagerTest::testRequestBucketInfoWithList()");
+TEST_F(BucketManagerTest, request_bucket_info_with_list) {
setupTestEnvironment();
addBucketsToDB(30);
_top->open();
@@ -562,39 +400,26 @@ void BucketManagerTest::testRequestBucketInfoWithList()
_top->doneInit();
{
std::vector<document::BucketId> bids;
- bids.push_back(document::BucketId(16, 0xe8c8));
+ bids.emplace_back(16, 0xe8c8);
- std::shared_ptr<api::RequestBucketInfoCommand> cmd(
- new api::RequestBucketInfoCommand(makeBucketSpace(), bids));
+ auto cmd = std::make_shared<api::RequestBucketInfoCommand>(makeBucketSpace(), bids);
_top->sendDown(cmd);
_top->waitForMessages(1, 5);
ASSERT_DUMMYLINK_REPLY_COUNT(_top, 1);
- std::shared_ptr<api::RequestBucketInfoReply> reply(
- std::dynamic_pointer_cast<api::RequestBucketInfoReply>(
- _top->getReply(0)));
+ auto reply = std::dynamic_pointer_cast<api::RequestBucketInfoReply>(_top->getReply(0));
_top->reset();
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(api::ReturnCode(api::ReturnCode::OK),
- reply->getResult());
- if (reply->getBucketInfo().size() > 1) {
- std::cerr << "Too many replies found\n";
- for (uint32_t i=0; i<reply->getBucketInfo().size(); ++i) {
- std::cerr << reply->getBucketInfo()[i] << "\n";
- }
- }
- CPPUNIT_ASSERT_EQUAL((size_t) 1, reply->getBucketInfo().size());
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(api::ReturnCode(api::ReturnCode::OK), reply->getResult());
+ ASSERT_EQ(1u, reply->getBucketInfo().size());
api::RequestBucketInfoReply::Entry entry(
document::BucketId(16, 0xe8c8),
api::BucketInfo(0x79d04f78, 11153, 1851385240u));
- CPPUNIT_ASSERT_EQUAL(entry, reply->getBucketInfo()[0]);
+ EXPECT_EQ(entry, reply->getBucketInfo()[0]);
}
}
-void
-BucketManagerTest::testSwallowNotifyBucketChangeReply()
-{
- TestName("BucketManagerTest::testSwallowNotifyBucketChangeReply()");
+TEST_F(BucketManagerTest, swallow_notify_bucket_change_reply) {
setupTestEnvironment();
addBucketsToDB(30);
_top->open();
@@ -603,17 +428,14 @@ BucketManagerTest::testSwallowNotifyBucketChangeReply()
api::NotifyBucketChangeCommand cmd(makeDocumentBucket(document::BucketId(1, 16)),
api::BucketInfo());
- std::shared_ptr<api::NotifyBucketChangeReply> reply(
- new api::NotifyBucketChangeReply(cmd));
+ auto reply = std::make_shared<api::NotifyBucketChangeReply>(cmd);
_top->sendDown(reply);
// Should not leave the bucket manager.
- CPPUNIT_ASSERT_EQUAL(0, (int)_bottom->getNumCommands());
+ EXPECT_EQ(0u, _bottom->getNumCommands());
}
-void
-BucketManagerTest::testMetricsGeneration()
-{
+TEST_F(BucketManagerTest, metrics_generation) {
setupTestEnvironment();
_top->open();
// Add 3 buckets; 2 ready, 1 active. 300 docs total, 600 bytes total.
@@ -633,19 +455,18 @@ BucketManagerTest::testMetricsGeneration()
}
_node->getDoneInitializeHandler().notifyDoneInitializing();
_top->doneInit();
- vespalib::Monitor l;
- _manager->updateMetrics(BucketManager::MetricLockGuard(l));
-
- CPPUNIT_ASSERT_EQUAL(size_t(2), _manager->_metrics->disks.size());
- const DataStoredMetrics& m(*_manager->_metrics->disks[0]);
- CPPUNIT_ASSERT_EQUAL(int64_t(3), m.buckets.getLast());
- CPPUNIT_ASSERT_EQUAL(int64_t(300), m.docs.getLast());
- CPPUNIT_ASSERT_EQUAL(int64_t(600), m.bytes.getLast());
- CPPUNIT_ASSERT_EQUAL(int64_t(1), m.active.getLast());
- CPPUNIT_ASSERT_EQUAL(int64_t(2), m.ready.getLast());
+ trigger_metric_manager_update();
+
+ ASSERT_EQ(2u, bucket_manager_metrics().disks.size());
+ const DataStoredMetrics& m(*bucket_manager_metrics().disks[0]);
+ EXPECT_EQ(3, m.buckets.getLast());
+ EXPECT_EQ(300, m.docs.getLast());
+ EXPECT_EQ(600, m.bytes.getLast());
+ EXPECT_EQ(1, m.active.getLast());
+ EXPECT_EQ(2, m.ready.getLast());
}
-void BucketManagerTest::metrics_are_tracked_per_bucket_space() {
+TEST_F(BucketManagerTest, metrics_are_tracked_per_bucket_space) {
setupTestEnvironment();
_top->open();
auto& repo = _node->getComponentRegister().getBucketSpaceRepo();
@@ -669,25 +490,24 @@ void BucketManagerTest::metrics_are_tracked_per_bucket_space() {
}
_node->getDoneInitializeHandler().notifyDoneInitializing();
_top->doneInit();
- vespalib::Monitor l;
- _manager->updateMetrics(BucketManager::MetricLockGuard(l));
+ trigger_metric_manager_update();
- auto& spaces = _manager->_metrics->bucket_spaces;
+ auto& spaces = bucket_manager_metrics().bucket_spaces;
auto default_m = spaces.find(document::FixedBucketSpaces::default_space());
- CPPUNIT_ASSERT(default_m != spaces.end());
- CPPUNIT_ASSERT_EQUAL(int64_t(1), default_m->second->buckets_total.getLast());
- CPPUNIT_ASSERT_EQUAL(int64_t(100), default_m->second->docs.getLast());
- CPPUNIT_ASSERT_EQUAL(int64_t(200), default_m->second->bytes.getLast());
- CPPUNIT_ASSERT_EQUAL(int64_t(0), default_m->second->active_buckets.getLast());
- CPPUNIT_ASSERT_EQUAL(int64_t(1), default_m->second->ready_buckets.getLast());
+ ASSERT_TRUE(default_m != spaces.end());
+ EXPECT_EQ(1, default_m->second->buckets_total.getLast());
+ EXPECT_EQ(100, default_m->second->docs.getLast());
+ EXPECT_EQ(200, default_m->second->bytes.getLast());
+ EXPECT_EQ(0, default_m->second->active_buckets.getLast());
+ EXPECT_EQ(1, default_m->second->ready_buckets.getLast());
auto global_m = spaces.find(document::FixedBucketSpaces::global_space());
- CPPUNIT_ASSERT(global_m != spaces.end());
- CPPUNIT_ASSERT_EQUAL(int64_t(1), global_m->second->buckets_total.getLast());
- CPPUNIT_ASSERT_EQUAL(int64_t(150), global_m->second->docs.getLast());
- CPPUNIT_ASSERT_EQUAL(int64_t(300), global_m->second->bytes.getLast());
- CPPUNIT_ASSERT_EQUAL(int64_t(1), global_m->second->active_buckets.getLast());
- CPPUNIT_ASSERT_EQUAL(int64_t(0), global_m->second->ready_buckets.getLast());
+ ASSERT_TRUE(global_m != spaces.end());
+ EXPECT_EQ(1, global_m->second->buckets_total.getLast());
+ EXPECT_EQ(150, global_m->second->docs.getLast());
+ EXPECT_EQ(300, global_m->second->bytes.getLast());
+ EXPECT_EQ(1, global_m->second->active_buckets.getLast());
+ EXPECT_EQ(0, global_m->second->ready_buckets.getLast());
}
void
@@ -725,7 +545,7 @@ struct WithBuckets {
class ConcurrentOperationFixture {
public:
- ConcurrentOperationFixture(BucketManagerTest& self)
+ explicit ConcurrentOperationFixture(BucketManagerTest& self)
: _self(self),
_state("distributor:1 storage:1")
{
@@ -835,21 +655,20 @@ public:
{
const size_t nTotal = nBucketReplies + 1;
auto replies = awaitAndGetReplies(nTotal);
- CPPUNIT_ASSERT_EQUAL(nTotal, replies.size());
+ ASSERT_EQ(nTotal, replies.size());
for (size_t i = 0; i < nBucketReplies; ++i) {
- CPPUNIT_ASSERT_EQUAL(api::MessageType::REQUESTBUCKETINFO_REPLY,
- replies[i]->getType());
+ ASSERT_EQ(api::MessageType::REQUESTBUCKETINFO_REPLY, replies[i]->getType());
}
- CPPUNIT_ASSERT_EQUAL(msgType, replies[nBucketReplies]->getType());
+ ASSERT_EQ(msgType, replies[nBucketReplies]->getType());
}
void assertReplyOrdering(
const std::vector<const api::MessageType*>& replyTypes)
{
auto replies = awaitAndGetReplies(replyTypes.size());
- CPPUNIT_ASSERT_EQUAL(replyTypes.size(), replies.size());
+ ASSERT_EQ(replyTypes.size(), replies.size());
for (size_t i = 0; i < replyTypes.size(); ++i) {
- CPPUNIT_ASSERT_EQUAL(*replyTypes[i], replies[i]->getType());
+ ASSERT_EQ(*replyTypes[i], replies[i]->getType());
}
}
@@ -901,9 +720,7 @@ private:
lib::ClusterState _state;
};
-void
-BucketManagerTest::testSplitReplyOrderedAfterBucketReply()
-{
+TEST_F(BucketManagerTest, split_reply_ordered_after_bucket_reply) {
ConcurrentOperationFixture fixture(*this);
document::BucketId bucketA(17, 0);
document::BucketId bucketB(17, 1);
@@ -924,9 +741,7 @@ BucketManagerTest::testSplitReplyOrderedAfterBucketReply()
1, api::MessageType::SPLITBUCKET_REPLY);
}
-void
-BucketManagerTest::testJoinReplyOrderedAfterBucketReply()
-{
+TEST_F(BucketManagerTest, join_reply_ordered_after_bucket_reply) {
ConcurrentOperationFixture fixture(*this);
document::BucketId bucketA(17, 0);
document::BucketId bucketB(17, 1 << 16);
@@ -949,9 +764,7 @@ BucketManagerTest::testJoinReplyOrderedAfterBucketReply()
// Technically, deletes being ordered after bucket info replies won't help
// correctness since buckets are removed from the distributor DB upon _sending_
// the delete and not receiving it.
-void
-BucketManagerTest::testDeleteReplyOrderedAfterBucketReply()
-{
+TEST_F(BucketManagerTest, delete_reply_ordered_after_bucket_reply) {
ConcurrentOperationFixture fixture(*this);
document::BucketId bucketA(17, 0);
document::BucketId bucketB(17, 1);
@@ -970,9 +783,7 @@ BucketManagerTest::testDeleteReplyOrderedAfterBucketReply()
1, api::MessageType::DELETEBUCKET_REPLY);
}
-void
-BucketManagerTest::testOnlyEnqueueWhenProcessingRequest()
-{
+TEST_F(BucketManagerTest, only_enqueue_when_processing_request) {
ConcurrentOperationFixture fixture(*this);
document::BucketId bucketA(17, 0);
fixture.setUp(WithBuckets()
@@ -990,9 +801,7 @@ BucketManagerTest::testOnlyEnqueueWhenProcessingRequest()
// differently than full bucket info fetches and are not delegated to the
// worker thread. We still require that any split/joins etc are ordered after
// this reply if their reply is sent up concurrently.
-void
-BucketManagerTest::testOrderRepliesAfterBucketSpecificRequest()
-{
+TEST_F(BucketManagerTest, order_replies_after_bucket_specific_request) {
ConcurrentOperationFixture fixture(*this);
document::BucketId bucketA(17, 0);
fixture.setUp(WithBuckets()
@@ -1025,14 +834,12 @@ BucketManagerTest::testOrderRepliesAfterBucketSpecificRequest()
1, api::MessageType::SPLITBUCKET_REPLY);
}
-// Test is similar to testOrderRepliesAfterBucketSpecificRequest, but has
+// Test is similar to order_replies_after_bucket_specific_request, but has
// two concurrent bucket info request processing instances going on; one in
// the worker thread and one in the message chain itself. Since we only have
// one queue, we must wait with dispatching replies until _all_ processing
// has ceased.
-void
-BucketManagerTest::testQueuedRepliesOnlyDispatchedWhenAllProcessingDone()
-{
+TEST_F(BucketManagerTest, queued_replies_only_dispatched_when_all_processing_done) {
ConcurrentOperationFixture fixture(*this);
document::BucketId bucketA(17, 0);
fixture.setUp(WithBuckets()
@@ -1085,9 +892,9 @@ struct TestParams {
BUILDER_PARAM(std::vector<const api::MessageType*>, expectedOrdering);
};
-TestParams::TestParams() { }
+TestParams::TestParams() = default;
TestParams::TestParams(const TestParams &) = default;
-TestParams::~TestParams() {}
+TestParams::~TestParams() = default;
void
BucketManagerTest::doTestMutationOrdering(
@@ -1140,9 +947,7 @@ BucketManagerTest::doTestConflictingReplyIsEnqueued(
doTestMutationOrdering(fixture, params);
}
-void
-BucketManagerTest::testMutationRepliesForSplitBucketAreEnqueued()
-{
+TEST_F(BucketManagerTest, mutation_replies_for_split_bucket_are_enqueued) {
document::BucketId bucket(17, 0);
doTestConflictingReplyIsEnqueued(
bucket,
@@ -1150,9 +955,7 @@ BucketManagerTest::testMutationRepliesForSplitBucketAreEnqueued()
api::MessageType::SPLITBUCKET_REPLY);
}
-void
-BucketManagerTest::testMutationRepliesForDeletedBucketAreEnqueued()
-{
+TEST_F(BucketManagerTest, mutation_replies_for_deleted_bucket_are_enqueued) {
document::BucketId bucket(17, 0);
doTestConflictingReplyIsEnqueued(
bucket,
@@ -1160,9 +963,7 @@ BucketManagerTest::testMutationRepliesForDeletedBucketAreEnqueued()
api::MessageType::DELETEBUCKET_REPLY);
}
-void
-BucketManagerTest::testMutationRepliesForJoinedBucketAreEnqueued()
-{
+TEST_F(BucketManagerTest, mutation_replies_for_joined_bucket_are_enqueued) {
ConcurrentOperationFixture fixture(*this);
document::BucketId bucketA(17, 0);
document::BucketId bucketB(17, 1 << 16);
@@ -1183,9 +984,7 @@ BucketManagerTest::testMutationRepliesForJoinedBucketAreEnqueued()
doTestMutationOrdering(fixture, params);
}
-void
-BucketManagerTest::testConflictingPutRepliesAreEnqueued()
-{
+TEST_F(BucketManagerTest, conflicting_put_replies_are_enqueued) {
ConcurrentOperationFixture fixture(*this);
document::BucketId bucket(17, 0);
@@ -1200,9 +999,7 @@ BucketManagerTest::testConflictingPutRepliesAreEnqueued()
doTestMutationOrdering(fixture, params);
}
-void
-BucketManagerTest::testConflictingUpdateRepliesAreEnqueued()
-{
+TEST_F(BucketManagerTest, conflicting_update_replies_are_enqueued) {
ConcurrentOperationFixture fixture(*this);
document::BucketId bucket(17, 0);
@@ -1223,9 +1020,7 @@ BucketManagerTest::testConflictingUpdateRepliesAreEnqueued()
* resulting from the operation. We have to make sure remapped operations are
* enqueued as well.
*/
-void
-BucketManagerTest::testRemappedMutationIsCheckedAgainstOriginalBucket()
-{
+TEST_F(BucketManagerTest, remapped_mutation_is_checked_against_original_bucket) {
ConcurrentOperationFixture fixture(*this);
document::BucketId bucket(17, 0);
document::BucketId remappedToBucket(18, 0);
@@ -1263,9 +1058,7 @@ BucketManagerTest::scheduleBucketInfoRequestWithConcurrentOps(
guard.unlock();
}
-void
-BucketManagerTest::testBucketConflictSetIsClearedBetweenBlockingRequests()
-{
+TEST_F(BucketManagerTest, bucket_conflict_set_is_cleared_between_blocking_requests) {
ConcurrentOperationFixture fixture(*this);
document::BucketId firstConflictBucket(17, 0);
document::BucketId secondConflictBucket(18, 0);
@@ -1308,9 +1101,7 @@ BucketManagerTest::sendSingleBucketInfoRequest(const document::BucketId& id)
_top->sendDown(infoCmd);
}
-void
-BucketManagerTest::testConflictSetOnlyClearedAfterAllBucketRequestsDone()
-{
+TEST_F(BucketManagerTest, conflict_set_only_cleared_after_all_bucket_requests_done) {
ConcurrentOperationFixture fixture(*this);
document::BucketId bucketA(16, 0);
document::BucketId bucketB(16, 1);
@@ -1371,22 +1162,17 @@ BucketManagerTest::assertRequestWithBadHashIsRejected(
_top->sendDown(infoCmd);
auto replies = fixture.awaitAndGetReplies(1);
auto& reply = dynamic_cast<api::RequestBucketInfoReply&>(*replies[0]);
- CPPUNIT_ASSERT_EQUAL(api::ReturnCode::REJECTED,
- reply.getResult().getResult());
+ ASSERT_EQ(api::ReturnCode::REJECTED, reply.getResult().getResult());
}
-void
-BucketManagerTest::testRejectRequestWithMismatchingDistributionHash()
-{
+TEST_F(BucketManagerTest, reject_request_with_mismatching_distribution_hash) {
ConcurrentOperationFixture fixture(*this);
document::BucketId bucket(17, 0);
fixture.setUp(WithBuckets().add(bucket, api::BucketInfo(50, 100, 200)));
assertRequestWithBadHashIsRejected(fixture);
}
-void
-BucketManagerTest::testDbNotIteratedWhenAllRequestsRejected()
-{
+TEST_F(BucketManagerTest, db_not_iterated_when_all_requests_rejected) {
ConcurrentOperationFixture fixture(*this);
document::BucketId bucket(17, 0);
fixture.setUp(WithBuckets().add(bucket, api::BucketInfo(50, 100, 200)));
@@ -1405,7 +1191,7 @@ BucketManagerTest::testDbNotIteratedWhenAllRequestsRejected()
}
// TODO remove on Vespa 8 - this is a workaround for https://github.com/vespa-engine/vespa/issues/8475
-void BucketManagerTest::fall_back_to_legacy_global_distribution_hash_on_mismatch() {
+TEST_F(BucketManagerTest, fall_back_to_legacy_global_distribution_hash_on_mismatch) {
ConcurrentOperationFixture f(*this);
f.set_grouped_distribution_configs();
@@ -1416,7 +1202,7 @@ void BucketManagerTest::fall_back_to_legacy_global_distribution_hash_on_mismatch
_top->sendDown(infoCmd);
auto replies = f.awaitAndGetReplies(1);
auto& reply = dynamic_cast<api::RequestBucketInfoReply&>(*replies[0]);
- CPPUNIT_ASSERT_EQUAL(api::ReturnCode::OK, reply.getResult().getResult()); // _not_ REJECTED
+ EXPECT_EQ(api::ReturnCode::OK, reply.getResult().getResult()); // _not_ REJECTED
}
} // storage
diff --git a/storage/src/tests/bucketdb/initializertest.cpp b/storage/src/tests/bucketdb/initializertest.cpp
index 2141dbf4b53..509824d2168 100644
--- a/storage/src/tests/bucketdb/initializertest.cpp
+++ b/storage/src/tests/bucketdb/initializertest.cpp
@@ -2,33 +2,31 @@
/**
* Tests storage initialization without depending on persistence layer.
*/
-#include <vespa/storage/bucketdb/storagebucketdbinitializer.h>
-
#include <vespa/document/base/testdocman.h>
-
+#include <vespa/document/bucket/fixed_bucket_spaces.h>
+#include <vespa/storage/bucketdb/lockablemap.hpp>
+#include <vespa/storage/bucketdb/storagebucketdbinitializer.h>
#include <vespa/storage/persistence/filestorage/filestormanager.h>
#include <vespa/storageapi/message/bucket.h>
#include <vespa/storageapi/message/persistence.h>
#include <vespa/storageapi/message/state.h>
#include <tests/common/teststorageapp.h>
#include <tests/common/dummystoragelink.h>
-#include <tests/common/testhelper.h>
-#include <vespa/vdstestlib/cppunit/dirconfig.h>
-#include <vespa/vdstestlib/cppunit/macros.h>
-#include <vespa/storage/bucketdb/lockablemap.hpp>
-#include <vespa/vdstestlib/cppunit/dirconfig.hpp>
-#include <vespa/document/bucket/fixed_bucket_spaces.h>
+#include <tests/common/testhelper.h> // TODO decouple from CppUnit
+#include <vespa/vdstestlib/cppunit/dirconfig.hpp> // TODO decouple from CppUnit
+#include <vespa/vespalib/gtest/gtest.h>
#include <vespa/log/log.h>
LOG_SETUP(".test.bucketdb.initializing");
using document::FixedBucketSpaces;
+using namespace ::testing;
namespace storage {
typedef uint16_t PartitionId;
-struct InitializerTest : public CppUnit::TestFixture {
+struct InitializerTest : public Test {
class InitParams {
vdstestlib::DirConfig config;
@@ -59,14 +57,8 @@ struct InitializerTest : public CppUnit::TestFixture {
bucketWrongDisk(false),
bucketMultipleDisks(false),
failingListRequest(false),
- failingInfoRequest(false) {}
-
- void setAllFailures() {
- bucketWrongDisk = true;
- bucketMultipleDisks = true;
- failingListRequest = true;
- failingInfoRequest = true;
- }
+ failingInfoRequest(false)
+ {}
vdstestlib::DirConfig& getConfig() {
if (!configFinalized) {
@@ -83,104 +75,45 @@ struct InitializerTest : public CppUnit::TestFixture {
document::TestDocMan _docMan;
- void testInitialization(InitParams& params);
-
- /**
- * Test that the status page can be shown during init without a deadlock
- * or crash or anything. Don't validate much output, it might change.
- */
- void testStatusPage();
-
- /** Test initializing with an empty node. */
- void testInitEmptyNode() {
- InitParams params;
- params.docsPerDisk = 0;
- testInitialization(params);
- }
- /** Test initializing with some data on single disk. */
- void testInitSingleDisk() {
- InitParams params;
- params.diskCount = DiskCount(1);
- testInitialization(params);
- }
- /** Test initializing with multiple disks. */
- void testInitMultiDisk() {
- InitParams params;
- testInitialization(params);
- }
- /** Test initializing with one of the disks being bad. */
- void testInitFailingMiddleDisk() {
- InitParams params;
- params.disksDown.insert(1);
- testInitialization(params);
- }
- /** Test initializing with last disk being bad. */
- void testInitFailingLastDisk() {
- InitParams params;
- params.disksDown.insert(params.diskCount - 1);
- testInitialization(params);
- }
- /** Test initializing with bucket on wrong disk. */
- void testInitBucketOnWrongDisk() {
- InitParams params;
- params.bucketWrongDisk = true;
- params.bucketBitsUsed = 58;
- testInitialization(params);
- }
- /** Test initializing with bucket on multiple disks. */
- void testInitBucketOnMultipleDisks() {
- InitParams params;
- params.bucketMultipleDisks = true;
- params.bucketBitsUsed = 58;
- testInitialization(params);
- }
- /** Test initializing with failing list request. */
- void testInitFailingListRequest() {
- InitParams params;
- params.failingListRequest = true;
- testInitialization(params);
- }
- void testInitFailingInfoRequest() {
- InitParams params;
- params.failingInfoRequest = true;
- testInitialization(params);
- }
- /** Test initializing with everything being wrong at once. */
- void testAllFailures() {
- InitParams params;
- params.docsPerDisk = 100;
- params.diskCount = DiskCount(10);
- params.disksDown.insert(0);
- params.disksDown.insert(2);
- params.disksDown.insert(3);
- params.disksDown.insert(9);
- params.setAllFailures();
- testInitialization(params);
- }
- void testCommandBlockingDuringInit();
-
- void testBucketProgressCalculator();
-
- void testBucketsInitializedByLoad();
-
- CPPUNIT_TEST_SUITE(InitializerTest);
- CPPUNIT_TEST(testInitEmptyNode);
- CPPUNIT_TEST(testInitSingleDisk);
- CPPUNIT_TEST(testInitMultiDisk);
- CPPUNIT_TEST(testInitFailingMiddleDisk);
- CPPUNIT_TEST(testInitFailingLastDisk);
- CPPUNIT_TEST(testInitBucketOnWrongDisk);
- //CPPUNIT_TEST(testInitBucketOnMultipleDisks);
- //CPPUNIT_TEST(testStatusPage);
- //CPPUNIT_TEST(testCommandBlockingDuringInit);
- //CPPUNIT_TEST(testAllFailures);
- CPPUNIT_TEST(testBucketProgressCalculator);
- CPPUNIT_TEST(testBucketsInitializedByLoad);
- CPPUNIT_TEST_SUITE_END();
-
+ void do_test_initialization(InitParams& params);
};
-CPPUNIT_TEST_SUITE_REGISTRATION(InitializerTest);
+/** Test initializing with an empty node. */
+TEST_F(InitializerTest, testInitEmptyNode) {
+ InitParams params;
+ params.docsPerDisk = 0;
+ do_test_initialization(params);
+}
+/** Test initializing with some data on single disk. */
+TEST_F(InitializerTest, testInitSingleDisk) {
+ InitParams params;
+ params.diskCount = DiskCount(1);
+ do_test_initialization(params);
+}
+/** Test initializing with multiple disks. */
+TEST_F(InitializerTest, testInitMultiDisk) {
+ InitParams params;
+ do_test_initialization(params);
+}
+/** Test initializing with one of the disks being bad. */
+TEST_F(InitializerTest, testInitFailingMiddleDisk) {
+ InitParams params;
+ params.disksDown.insert(1);
+ do_test_initialization(params);
+}
+/** Test initializing with last disk being bad. */
+TEST_F(InitializerTest, testInitFailingLastDisk) {
+ InitParams params;
+ params.disksDown.insert(params.diskCount - 1);
+ do_test_initialization(params);
+}
+/** Test initializing with bucket on wrong disk. */
+TEST_F(InitializerTest, testInitBucketOnWrongDisk) {
+ InitParams params;
+ params.bucketWrongDisk = true;
+ params.bucketBitsUsed = 58;
+ do_test_initialization(params);
+}
namespace {
// Data kept on buckets we're using in test.
@@ -202,7 +135,7 @@ struct BucketData {
return copy;
}
};
-// Data reciding on one disk
+// Data residing on one disk
typedef std::map<document::BucketId, BucketData> DiskData;
struct BucketInfoLogger {
std::map<PartitionId, DiskData>& map;
@@ -215,11 +148,8 @@ struct BucketInfoLogger {
{
document::BucketId bucket(
document::BucketId::keyToBucketId(revBucket));
- CPPUNIT_ASSERT(bucket.getRawId() != 0);
- CPPUNIT_ASSERT_MSG(
- "Found invalid bucket in database: " + bucket.toString()
- + " " + entry.getBucketInfo().toString(),
- entry.getBucketInfo().valid());
+ assert(bucket.getRawId() != 0);
+ assert(entry.getBucketInfo().valid());
DiskData& ddata(map[entry.disk]);
BucketData& bdata(ddata[bucket]);
bdata.info = entry.getBucketInfo();
@@ -277,10 +207,10 @@ buildBucketInfo(const document::TestDocMan& docMan,
while (params.disksDown.find(partition) != params.disksDown.end()) {
partition = (partition + 1) % params.diskCount;;
}
- LOG(info, "Putting bucket %s on wrong disk %u instead of %u",
+ LOG(debug, "Putting bucket %s on wrong disk %u instead of %u",
bid.toString().c_str(), partition, correctPart);
}
- LOG(info, "Putting bucket %s on disk %u",
+ LOG(debug, "Putting bucket %s on disk %u",
bid.toString().c_str(), partition);
BucketData& data(result[partition][bid]);
data.info.setDocumentCount(data.info.getDocumentCount() + 1);
@@ -299,84 +229,65 @@ void verifyEqual(std::map<PartitionId, DiskData>& org,
while (part1 != org.end() && part2 != existing.end()) {
if (part1->first < part2->first) {
if (!part1->second.empty()) {
- std::ostringstream ost;
- ost << "No data in partition " << part1->first << " found.";
- CPPUNIT_FAIL(ost.str());
+ FAIL() << "No data in partition " << part1->first << " found.";
}
++part1;
} else if (part1->first > part2->first) {
if (!part2->second.empty()) {
- std::ostringstream ost;
- ost << "Found data in partition " << part2->first
- << " which should not exist.";
- CPPUNIT_FAIL(ost.str());
+ FAIL() << "Found data in partition " << part2->first
+ << " which should not exist.";
}
++part2;
} else {
- DiskData::const_iterator bucket1(part1->second.begin());
- DiskData::const_iterator bucket2(part2->second.begin());
+ auto bucket1 = part1->second.begin();
+ auto bucket2 = part2->second.begin();
while (bucket1 != part1->second.end()
&& bucket2 != part2->second.end())
{
if (bucket1->first < bucket2->first) {
- std::ostringstream ost;
- ost << "No data in partition " << part1->first
- << " for bucket " << bucket1->first << " found.";
- CPPUNIT_FAIL(ost.str());
+ FAIL() << "No data in partition " << part1->first
+ << " for bucket " << bucket1->first << " found.";
} else if (bucket1->first.getId() > bucket2->first.getId())
{
- std::ostringstream ost;
- ost << "Found data in partition " << part2->first
- << " for bucket " << bucket2->first
- << " which should not exist.";
- CPPUNIT_FAIL(ost.str());
+ FAIL() << "Found data in partition " << part2->first
+ << " for bucket " << bucket2->first
+ << " which should not exist.";
} else if (!(bucket1->second.info == bucket2->second.info)) {
- std::ostringstream ost;
- ost << "Bucket " << bucket1->first << " on partition "
- << part1->first << " has bucket info "
- << bucket2->second.info << " and not "
- << bucket1->second.info << " as expected.";
- CPPUNIT_FAIL(ost.str());
+ FAIL() << "Bucket " << bucket1->first << " on partition "
+ << part1->first << " has bucket info "
+ << bucket2->second.info << " and not "
+ << bucket1->second.info << " as expected.";
}
++bucket1;
++bucket2;
++equalCount;
}
if (bucket1 != part1->second.end()) {
- std::ostringstream ost;
- ost << "No data in partition " << part1->first
- << " for bucket " << bucket1->first << " found.";
- CPPUNIT_FAIL(ost.str());
+ FAIL() << "No data in partition " << part1->first
+ << " for bucket " << bucket1->first << " found.";
}
if (bucket2 != part2->second.end()) {
- std::ostringstream ost;
- ost << "Found data in partition " << part2->first
- << " for bucket " << bucket2->first
- << " which should not exist.";
- CPPUNIT_FAIL(ost.str());
+ FAIL() << "Found data in partition " << part2->first
+ << " for bucket " << bucket2->first
+ << " which should not exist.";
}
++part1;
++part2;
}
}
if (part1 != org.end() && !part1->second.empty()) {
- std::ostringstream ost;
- ost << "No data in partition " << part1->first << " found.";
- CPPUNIT_FAIL(ost.str());
+ FAIL() << "No data in partition " << part1->first << " found.";
}
if (part2 != existing.end() && !part2->second.empty()) {
- std::ostringstream ost;
- ost << "Found data in partition " << part2->first
- << " which should not exist.";
- CPPUNIT_FAIL(ost.str());
+ FAIL() << "Found data in partition " << part2->first
+ << " which should not exist.";
}
- //std::cerr << "\n " << equalCount << " buckets were matched. ";
}
struct MessageCallback
{
public:
- virtual ~MessageCallback() {}
+ virtual ~MessageCallback() = default;
virtual void onMessage(const api::StorageMessage&) = 0;
};
@@ -413,7 +324,7 @@ struct FakePersistenceLayer : public StorageLink {
<< "it there.";
fatal(ost.str());
} else {
- DiskData::const_iterator it2(it->second.find(bucket));
+ auto it2 = it->second.find(bucket);
if (it2 == it->second.end()) {
std::ostringstream ost;
ost << "Have no data for " << bucket << " on disk " << partition
@@ -433,10 +344,9 @@ struct FakePersistenceLayer : public StorageLink {
messageCallback->onMessage(*msg);
}
if (msg->getType() == api::MessageType::INTERNAL) {
- api::InternalCommand& cmd(
- dynamic_cast<api::InternalCommand&>(*msg));
+ auto& cmd = dynamic_cast<api::InternalCommand&>(*msg);
if (cmd.getType() == ReadBucketList::ID) {
- ReadBucketList& rbl(dynamic_cast<ReadBucketList&>(cmd));
+ auto& rbl = dynamic_cast<ReadBucketList&>(cmd);
ReadBucketListReply::SP reply(new ReadBucketListReply(rbl));
std::map<PartitionId, DiskData>::const_iterator it(
data.find(rbl.getPartition()));
@@ -448,10 +358,8 @@ struct FakePersistenceLayer : public StorageLink {
fatal(ost.str());
} else {
if (cmd.getBucket().getBucketSpace() == FixedBucketSpaces::default_space()) {
- for (DiskData::const_iterator it2 = it->second.begin();
- it2 != it->second.end(); ++it2)
- {
- reply->getBuckets().push_back(it2->first);
+ for (const auto& bd : it->second) {
+ reply->getBuckets().push_back(bd.first);
}
}
}
@@ -461,7 +369,7 @@ struct FakePersistenceLayer : public StorageLink {
}
sendUp(reply);
} else if (cmd.getType() == ReadBucketInfo::ID) {
- ReadBucketInfo& rbi(dynamic_cast<ReadBucketInfo&>(cmd));
+ auto& rbi = dynamic_cast<ReadBucketInfo&>(cmd);
ReadBucketInfoReply::SP reply(new ReadBucketInfoReply(rbi));
StorBucketDatabase::WrappedEntry entry(
bucketDatabase.get(rbi.getBucketId(), "fakelayer"));
@@ -483,8 +391,7 @@ struct FakePersistenceLayer : public StorageLink {
}
sendUp(reply);
} else if (cmd.getType() == InternalBucketJoinCommand::ID) {
- InternalBucketJoinCommand& ibj(
- dynamic_cast<InternalBucketJoinCommand&>(cmd));
+ auto& ibj = dynamic_cast<InternalBucketJoinCommand&>(cmd);
InternalBucketJoinReply::SP reply(
new InternalBucketJoinReply(ibj));
StorBucketDatabase::WrappedEntry entry(
@@ -521,20 +428,14 @@ struct FakePersistenceLayer : public StorageLink {
} // end of anonymous namespace
-#define CPPUNIT_ASSERT_METRIC_SET(x) \
- CPPUNIT_ASSERT(initializer->getMetrics().x.getValue() > 0);
-
void
-InitializerTest::testInitialization(InitParams& params)
+InitializerTest::do_test_initialization(InitParams& params)
{
std::map<PartitionId, DiskData> data(buildBucketInfo(_docMan, params));
spi::PartitionStateList partitions(params.diskCount);
- for (std::set<uint32_t>::const_iterator it = params.disksDown.begin();
- it != params.disksDown.end(); ++it)
- {
- partitions[*it] = spi::PartitionState(
- spi::PartitionState::DOWN, "Set down in test");
+ for (const auto& p : params.disksDown) {
+ partitions[p] = spi::PartitionState(spi::PartitionState::DOWN, "Set down in test");
}
TestServiceLayerApp node(params.diskCount, params.nodeIndex,
params.getConfig().getConfigId());
@@ -549,233 +450,32 @@ InitializerTest::testInitialization(InitParams& params)
top.push_back(StorageLink::UP(bottom = new FakePersistenceLayer(
data, node.getStorageBucketDatabase())));
- LOG(info, "STARTING INITIALIZATION");
+ LOG(debug, "STARTING INITIALIZATION");
top.open();
- /*
- FileChanger updater(config, nodeIndex, params, orgBucketDatabase);
- if (params.bucketWrongDisk) updater.moveBucketWrongDisk();
- if (params.bucketMultipleDisks) updater.copyBucketWrongDisk();
- if (params.failingListRequest) {
- updater.removeDirPermission(6, 'r');
- updater.removeBucketsFromDBAtPath(6);
- }
- if (params.failingInfoRequest) {
- updater.removeFilePermission();
- orgBucketDatabase.erase(updater.getBucket(8));
- }
- */
-
node.waitUntilInitialized(initializer);
std::map<PartitionId, DiskData> initedBucketDatabase(
createMapFromBucketDatabase(node.getStorageBucketDatabase()));
verifyEqual(data, initedBucketDatabase);
- /*
- if (params.bucketWrongDisk) {
- CPPUNIT_ASSERT_METRIC_SET(_wrongDisk);
- }
- if (params.bucketMultipleDisks) {
- CPPUNIT_ASSERT_METRIC_SET(_joinedCount);
- }
- */
-}
-
-/*
-namespace {
- enum State { LISTING, INFO, DONE };
- void verifyStatusContent(StorageBucketDBInitializer& initializer,
- State state)
- {
- std::ostringstream ost;
- initializer.reportStatus(ost, framework::HttpUrlPath(""));
- std::string status = ost.str();
-
- if (state == LISTING) {
- CPPUNIT_ASSERT_CONTAIN("List phase completed: false", status);
- CPPUNIT_ASSERT_CONTAIN("Initialization completed: false", status);
- } else if (state == INFO) {
- CPPUNIT_ASSERT_CONTAIN("List phase completed: true", status);
- CPPUNIT_ASSERT_CONTAIN("Initialization completed: false", status);
- } else if (state == DONE) {
- CPPUNIT_ASSERT_CONTAIN("List phase completed: true", status);
- CPPUNIT_ASSERT_CONTAIN("Initialization completed: true", status);
- }
- }
-}
-
-void
-InitializerTest::testStatusPage()
-{
- // Set up surrounding system to create a single bucket for us to
- // do init on.
- vdstestlib::DirConfig config(getStandardConfig(true));
- uint16_t nodeIndex(
- config.getConfig("stor-server").getValue("node_index", 0));
- InitParams params;
- params.docsPerDisk = 1;
- params.diskCount = 1;
- std::map<document::BucketId, api::BucketInfo> orgBucketDatabase(
- buildBucketInfo(_docMan, config, nodeIndex, 1, 1, params.disksDown));
- FileChanger updater(config, nodeIndex, params, orgBucketDatabase);
-
- // Set up the initializer.
- DummyStorageServer server(config.getConfigId());
- DummyStorageLink top;
- DummyStorageLink *bottom;
- StorageBucketDBInitializer* initializer;
- top.push_back(StorageLink::UP(initializer = new StorageBucketDBInitializer(
- config.getConfigId(), server)));
- top.push_back(StorageLink::UP(bottom = new DummyStorageLink));
-
- // Grab bucket database lock for bucket to init to lock the initializer
- // in the init stage
- StorBucketDatabase::WrappedEntry entry(
- server.getStorageBucketDatabase().get(
- updater.getBucket(0), "testCommandBlocking",
- StorBucketDatabase::LOCK_IF_NONEXISTING_AND_NOT_CREATING));
- // Start the initializer
- top.open();
- bottom->waitForMessages(1, 30);
- verifyStatusContent(*initializer, LISTING);
- // Attempt to send put. Should be blocked
- // Attempt to send request bucket info. Should be blocked.
- // Attempt to send getNodeState. Should not be blocked.
-
- // Unlock bucket in bucket database so listing step can complete.
- // Await read info request being sent down.
- entry.unlock();
- bottom->waitForMessages(1, 30);
- verifyStatusContent(*initializer, INFO);
-
- ReadBucketInfo& cmd(dynamic_cast<ReadBucketInfo&>(*bottom->getCommand(0)));
- ReadBucketInfoReply::SP reply(new ReadBucketInfoReply(cmd));
- bottom->sendUp(reply);
-
- node.waitUntilInitialized(initializer);
- verifyStatusContent(*initializer, DONE);
-
}
-#define ASSERT_BLOCKED(top, bottom, blocks) \
- if (blocks) { \
- top.waitForMessages(1, 30); \
- CPPUNIT_ASSERT_EQUAL(size_t(1), top.getReplies().size()); \
- CPPUNIT_ASSERT_EQUAL(size_t(0), bottom.getCommands().size()); \
- api::StorageReply& reply(dynamic_cast<api::StorageReply&>( \
- *top.getReply(0))); \
- CPPUNIT_ASSERT_EQUAL(api::ReturnCode::ABORTED, \
- reply.getResult().getResult()); \
- top.reset(); \
- } else { \
- bottom.waitForMessages(1, 30); \
- CPPUNIT_ASSERT_EQUAL(size_t(0), top.getReplies().size()); \
- CPPUNIT_ASSERT_EQUAL(size_t(1), bottom.getCommands().size()); \
- api::StorageCommand& command(dynamic_cast<api::StorageCommand&>( \
- *bottom.getCommand(0))); \
- (void) command; \
- bottom.reset(); \
- }
-
-namespace {
- void verifyBlockingOn(DummyStorageLink& top,
- DummyStorageLink& bottom,
- bool blockEnabled)
- {
- // Attempt to send get. Should be blocked if block enabled
- {
- api::GetCommand::SP cmd(new api::GetCommand(
- document::BucketId(16, 4),
- document::DocumentId("userdoc:ns:4:test"), true));
- top.sendDown(cmd);
- ASSERT_BLOCKED(top, bottom, blockEnabled);
- }
- // Attempt to send request bucket info. Should be blocked if enabled.
- {
- api::RequestBucketInfoCommand::SP cmd(
- new api::RequestBucketInfoCommand(
- 0, lib::ClusterState("")));
- top.sendDown(cmd);
- ASSERT_BLOCKED(top, bottom, blockEnabled);
- }
- // Attempt to send getNodeState. Should not be blocked.
- {
- api::GetNodeStateCommand::SP cmd(new api::GetNodeStateCommand(
- lib::NodeState::UP(0)));
- top.sendDown(cmd);
- ASSERT_BLOCKED(top, bottom, false);
- }
- }
-}
-
-void
-InitializerTest::testCommandBlockingDuringInit()
-{
- // Set up surrounding system to create a single bucket for us to
- // do init on.
- vdstestlib::DirConfig config(getStandardConfig(true));
- uint16_t nodeIndex(
- config.getConfig("stor-server").getValue("node_index", 0));
- InitParams params;
- params.docsPerDisk = 1;
- params.diskCount = 1;
- std::map<document::BucketId, api::BucketInfo> orgBucketDatabase(
- buildBucketInfo(_docMan, config, nodeIndex, 1, 1, params.disksDown));
- FileChanger updater(config, nodeIndex, params, orgBucketDatabase);
-
- // Set up the initializer.
- DummyStorageServer server(config.getConfigId());
- DummyStorageLink top;
- DummyStorageLink *bottom;
- StorageBucketDBInitializer* initializer;
- top.push_back(StorageLink::UP(initializer = new StorageBucketDBInitializer(
- config.getConfigId(), server)));
- top.push_back(StorageLink::UP(bottom = new DummyStorageLink));
-
- // Grab bucket database lock for bucket to init to lock the initializer
- // in the init stage
- StorBucketDatabase::WrappedEntry entry(
- server.getStorageBucketDatabase().get(
- updater.getBucket(0), "testCommandBlocking",
- StorBucketDatabase::LOCK_IF_NONEXISTING_AND_NOT_CREATING));
- // Start the initializer
- top.open();
- verifyBlockingOn(top, *bottom, true);
- // Attempt to send put. Should be blocked
- // Attempt to send request bucket info. Should be blocked.
- // Attempt to send getNodeState. Should not be blocked.
-
- // Unlock bucket in bucket database so listing step can complete.
- // Await read info request being sent down.
- entry.unlock();
- bottom->waitForMessages(1, 30);
- dynamic_cast<ReadBucketInfo&>(*bottom->getCommand(0));
- CPPUNIT_ASSERT(!server.isInitialized());
- bottom->reset();
-
- // Retry - Should now not block
- verifyBlockingOn(top, *bottom, false);
-}
-*/
-
-void
-InitializerTest::testBucketProgressCalculator()
-{
+TEST_F(InitializerTest, bucket_progress_calculator) {
using document::BucketId;
StorageBucketDBInitializer::BucketProgressCalculator calc;
// We consider the given bucket as not being completed, so progress
// will be _up to_, not _including_ the bucket. This means we can never
// reach 1.0, so progress completion must be handled by other logic!
- CPPUNIT_ASSERT_EQUAL(0.0, calc.calculateProgress(BucketId(1, 0)));
- CPPUNIT_ASSERT_EQUAL(0.0, calc.calculateProgress(BucketId(32, 0)));
+ EXPECT_DOUBLE_EQ(0.0, calc.calculateProgress(BucketId(1, 0)));
+ EXPECT_DOUBLE_EQ(0.0, calc.calculateProgress(BucketId(32, 0)));
- CPPUNIT_ASSERT_EQUAL(0.5, calc.calculateProgress(BucketId(1, 1)));
+ EXPECT_DOUBLE_EQ(0.5, calc.calculateProgress(BucketId(1, 1)));
- CPPUNIT_ASSERT_EQUAL(0.25, calc.calculateProgress(BucketId(2, 2)));
- CPPUNIT_ASSERT_EQUAL(0.5, calc.calculateProgress(BucketId(2, 1)));
- CPPUNIT_ASSERT_EQUAL(0.75, calc.calculateProgress(BucketId(2, 3)));
+ EXPECT_DOUBLE_EQ(0.25, calc.calculateProgress(BucketId(2, 2)));
+ EXPECT_DOUBLE_EQ(0.5, calc.calculateProgress(BucketId(2, 1)));
+ EXPECT_DOUBLE_EQ(0.75, calc.calculateProgress(BucketId(2, 3)));
- CPPUNIT_ASSERT_EQUAL(0.875, calc.calculateProgress(BucketId(3, 7)));
+ EXPECT_DOUBLE_EQ(0.875, calc.calculateProgress(BucketId(3, 7)));
}
struct DatabaseInsertCallback : MessageCallback
@@ -809,7 +509,6 @@ struct DatabaseInsertCallback : MessageCallback
_app.getStateUpdater().getReportedNodeState());
double progress(reportedState->getInitProgress().getValue());
LOG(debug, "reported progress is now %g", progress);
- // CppUnit exceptions are swallowed...
if (progress >= 1.0) {
_errors << "progress exceeded 1.0: " << progress << "\n";
}
@@ -835,8 +534,7 @@ struct DatabaseInsertCallback : MessageCallback
}
if (msg.getType() == api::MessageType::INTERNAL) {
- const api::InternalCommand& cmd(
- dynamic_cast<const api::InternalCommand&>(msg));
+ auto& cmd = dynamic_cast<const api::InternalCommand&>(msg);
if (cmd.getType() == ReadBucketInfo::ID) {
if (cmd.getPriority() != _expectedReadBucketPriority) {
_errors << "expected ReadBucketInfo priority of "
@@ -871,9 +569,7 @@ struct DatabaseInsertCallback : MessageCallback
}
};
-void
-InitializerTest::testBucketsInitializedByLoad()
-{
+TEST_F(InitializerTest, buckets_initialized_by_load) {
InitParams params;
params.docsPerDisk = 100;
params.diskCount = DiskCount(1);
@@ -911,8 +607,8 @@ InitializerTest::testBucketsInitializedByLoad()
// has been set.
top.close();
- CPPUNIT_ASSERT(callback._invoked);
- CPPUNIT_ASSERT_EQUAL(std::string(), callback._errors.str());
+ ASSERT_TRUE(callback._invoked);
+ EXPECT_EQ(std::string(), callback._errors.str());
std::map<PartitionId, DiskData> initedBucketDatabase(
createMapFromBucketDatabase(node.getStorageBucketDatabase()));
@@ -922,11 +618,10 @@ InitializerTest::testBucketsInitializedByLoad()
node.getStateUpdater().getReportedNodeState());
double progress(reportedState->getInitProgress().getValue());
- CPPUNIT_ASSERT(progress >= 1.0);
- CPPUNIT_ASSERT(progress < 1.0001);
+ EXPECT_GE(progress, 1.0);
+ EXPECT_LT(progress, 1.0001);
- CPPUNIT_ASSERT_EQUAL(params.bucketBitsUsed,
- reportedState->getMinUsedBits());
+ EXPECT_EQ(params.bucketBitsUsed, reportedState->getMinUsedBits());
}
} // storage
diff --git a/storage/src/vespa/storage/common/storagelinkqueued.cpp b/storage/src/vespa/storage/common/storagelinkqueued.cpp
index 5b3aacd11de..036a1269958 100644
--- a/storage/src/vespa/storage/common/storagelinkqueued.cpp
+++ b/storage/src/vespa/storage/common/storagelinkqueued.cpp
@@ -65,7 +65,7 @@ void StorageLinkQueued::logError(const char* err) {
};
void StorageLinkQueued::logDebug(const char* err) {
- LOG(info, "%s", err);
+ LOG(debug, "%s", err);
};
template class StorageLinkQueued::Dispatcher<storage::api::StorageMessage>;