summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHenning Baldersheim <balder@yahoo-inc.com>2017-10-13 11:49:50 +0200
committerGitHub <noreply@github.com>2017-10-13 11:49:50 +0200
commit025b759b5cf0146cd0cf3f44f686eeace1b28d09 (patch)
tree2140f3e2e130c3e2d31c096c7711e53a1ad41bed
parenteea8c949825803bc6413c42c99b2d34178a2ba6b (diff)
parent909c17c2a8a142a57d65a9c735add7ce0a6deadf (diff)
Merge pull request #3740 from vespa-engine/balder/add-format-security
Balder/add format security
-rw-r--r--build_settings.cmake2
-rw-r--r--filedistribution/src/vespa/filedistribution/distributor/filedownloader.cpp6
-rw-r--r--filedistribution/src/vespa/filedistribution/distributor/filedownloadermanager.cpp4
-rw-r--r--filedistribution/src/vespa/filedistribution/model/filedistributionmodelimpl.cpp16
-rw-r--r--searchcore/src/vespa/searchcore/fdispatch/search/nodemanager.cpp5
-rw-r--r--storage/src/vespa/storage/distributor/bucketdbupdater.cpp135
-rw-r--r--storage/src/vespa/storage/distributor/bucketdbupdater.h78
-rw-r--r--vespalog/src/test/bufferedlogtest.cpp5
-rw-r--r--vespalog/src/test/bufferedlogtest.logger1.cpp4
-rw-r--r--vespalog/src/test/bufferedlogtest.logger1.h2
-rw-r--r--vespalog/src/test/bufferedlogtest.logger2.cpp4
-rw-r--r--vespalog/src/test/bufferedlogtest.logger2.h2
-rw-r--r--vespalog/src/vespa/log/bufferedlogger.h8
-rw-r--r--vespalog/src/vespa/log/log.cpp15
-rw-r--r--vespalog/src/vespa/log/log.h44
15 files changed, 113 insertions, 217 deletions
diff --git a/build_settings.cmake b/build_settings.cmake
index 425a2eddda7..2cccea9b64f 100644
--- a/build_settings.cmake
+++ b/build_settings.cmake
@@ -21,7 +21,7 @@ set(C_WARN_OPTS "-Winline -Wuninitialized -Werror -Wall -W -Wchar-subscripts -Wc
# Warnings that are specific to C++ compilation
# Note: this is not a union of C_WARN_OPTS, since CMAKE_CXX_FLAGS already includes CMAKE_C_FLAGS, which in turn includes C_WARN_OPTS transitively
-set(CXX_SPECIFIC_WARN_OPTS "-Wsuggest-override -Wnon-virtual-dtor")
+set(CXX_SPECIFIC_WARN_OPTS "-Wsuggest-override -Wnon-virtual-dtor -Wformat-security")
# C and C++ compiler flags
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -O3 -fno-omit-frame-pointer ${C_WARN_OPTS} -fPIC ${VESPA_CXX_ABI_FLAGS} -DBOOST_DISABLE_ASSERTS ${VESPA_CPU_ARCH_FLAGS} -mtune=intel ${EXTRA_C_FLAGS}")
diff --git a/filedistribution/src/vespa/filedistribution/distributor/filedownloader.cpp b/filedistribution/src/vespa/filedistribution/distributor/filedownloader.cpp
index 9668e09bcc8..0ab21e8cc42 100644
--- a/filedistribution/src/vespa/filedistribution/distributor/filedownloader.cpp
+++ b/filedistribution/src/vespa/filedistribution/distributor/filedownloader.cpp
@@ -285,7 +285,7 @@ FileDownloader::pathToCompletedFile(const std::string& fileReference) const {
boost::optional<FileDownloader::ResumeDataBuffer>
FileDownloader::getResumeData(const std::string& fileReference) {
- LOG(debug, ("Reading resume data for " + fileReference).c_str());
+ LOG(debug, "Reading resume data for %s ", fileReference.c_str());
try {
fs::path path = (_dbPath / fileReference).string() + resumeDataSuffix;
if (fs::exists(path)) {
@@ -294,7 +294,7 @@ FileDownloader::getResumeData(const std::string& fileReference) {
std::istream_iterator<char> iterator(file), end;
std::copy(iterator, end, std::back_inserter(result));
- LOG(debug, ("Successfully retrieved resume data for " + fileReference).c_str());
+ LOG(debug, "Successfully retrieved resume data for %s ", fileReference.c_str());
if (result.size() < 50) {
LOG(info, "Very small resume file %zu bytes.", result.size());
}
@@ -303,7 +303,7 @@ FileDownloader::getResumeData(const std::string& fileReference) {
}
} catch(...) {
//resume data is only an optimization
- LOG(info, ("Error while reading resume data for " + fileReference).c_str());
+ LOG(info, "Error while reading resume data for %s", fileReference.c_str());
}
return boost::optional<ResumeDataBuffer>();
}
diff --git a/filedistribution/src/vespa/filedistribution/distributor/filedownloadermanager.cpp b/filedistribution/src/vespa/filedistribution/distributor/filedownloadermanager.cpp
index 669cc550003..1763b798f03 100644
--- a/filedistribution/src/vespa/filedistribution/distributor/filedownloadermanager.cpp
+++ b/filedistribution/src/vespa/filedistribution/distributor/filedownloadermanager.cpp
@@ -14,10 +14,10 @@ using filedistribution::Path;
namespace {
void logStartDownload(const std::set<std::string> & filesToDownload) {
std::ostringstream msg;
- msg <<"StartDownloads:" <<std::endl;
+ msg << "StartDownloads:" << std::endl;
std::copy(filesToDownload.begin(), filesToDownload.end(),
std::ostream_iterator<std::string>(msg, "\n"));
- LOG(debug, msg.str().c_str());
+ LOG(debug, "%s", msg.str().c_str());
}
} //anonymous namespace
diff --git a/filedistribution/src/vespa/filedistribution/model/filedistributionmodelimpl.cpp b/filedistribution/src/vespa/filedistribution/model/filedistributionmodelimpl.cpp
index 9ce31e0dd3a..e0338cffd52 100644
--- a/filedistribution/src/vespa/filedistribution/model/filedistributionmodelimpl.cpp
+++ b/filedistribution/src/vespa/filedistribution/model/filedistributionmodelimpl.cpp
@@ -13,11 +13,11 @@ LOG_SETUP(".filedistributionmodel");
namespace fs = boost::filesystem;
using filedistribution::ZKFileDBModel;
+using std::make_shared;
namespace {
//peer format: hostName:port
-
void
addPeerEntry(const std::string& peer,
filedistribution::FileDistributionModelImpl::PeerEntries& result) {
@@ -73,8 +73,7 @@ struct FileDistributionModelImpl::DeployedFilesChangedCallback :
std::weak_ptr<FileDistributionModelImpl> _parent;
- DeployedFilesChangedCallback(
- const std::shared_ptr<FileDistributionModelImpl> & parent)
+ DeployedFilesChangedCallback(const std::shared_ptr<FileDistributionModelImpl> & parent)
:_parent(parent)
{}
@@ -111,7 +110,7 @@ FileDistributionModelImpl::getPeers(const std::string& fileReference, size_t max
LOG(debug, "Found %zu peers for path '%s'", result.size(), path.string().c_str());
return result;
} catch(ZKNodeDoesNotExistsException&) {
- LOG(debug, ("No peer entries available for " + fileReference).c_str());
+ LOG(debug, "No peer entries available for '%s'", fileReference.c_str());
return PeerEntries();
}
}
@@ -119,8 +118,7 @@ FileDistributionModelImpl::getPeers(const std::string& fileReference, size_t max
fs::path
FileDistributionModelImpl::getPeerEntryPath(const std::string& fileReference) {
std::ostringstream entry;
- entry <<_hostName
- <<ZKFileDBModel::_peerEntrySeparator <<_port;
+ entry <<_hostName << ZKFileDBModel::_peerEntrySeparator <<_port;
return _fileDBModel.getPeersPath(fileReference) / entry.str();
}
@@ -167,8 +165,7 @@ std::set<std::string>
FileDistributionModelImpl::getFilesToDownload() {
DeployedFilesToDownload d(_zk.get());
std::vector<std::string> deployed = d.getDeployedFilesToDownload(_hostName,
- DeployedFilesChangedCallback::SP(
- new DeployedFilesChangedCallback(shared_from_this())));
+ make_shared<DeployedFilesChangedCallback>(shared_from_this()));
std::set<std::string> result(deployed.begin(), deployed.end());
@@ -187,8 +184,7 @@ FileDistributionModelImpl::updateActiveFileReferences(
std::sort(sortedFileReferences.begin(), sortedFileReferences.end());
LockGuard guard(_activeFileReferencesMutex);
- bool changed =
- sortedFileReferences != _activeFileReferences;
+ bool changed = sortedFileReferences != _activeFileReferences;
sortedFileReferences.swap(_activeFileReferences);
return changed;
diff --git a/searchcore/src/vespa/searchcore/fdispatch/search/nodemanager.cpp b/searchcore/src/vespa/searchcore/fdispatch/search/nodemanager.cpp
index fd60b16ee70..10f70b2e518 100644
--- a/searchcore/src/vespa/searchcore/fdispatch/search/nodemanager.cpp
+++ b/searchcore/src/vespa/searchcore/fdispatch/search/nodemanager.cpp
@@ -239,10 +239,11 @@ FastS_NodeManager::SetCollDesc(FastS_DataSetCollDesc *configDesc,
break;
FastOS_Thread::Sleep(100);
};
- if (allup)
+ if (allup) {
LOG(debug, "All new engines up after %d ms", rwait);
- else
+ } else {
LOG(debug, "Some new engines still down after %d ms", rwait);
+ }
}
gencnt = SetDataSetCollection(newCollection);
diff --git a/storage/src/vespa/storage/distributor/bucketdbupdater.cpp b/storage/src/vespa/storage/distributor/bucketdbupdater.cpp
index 67942d3d447..ce7f7afc670 100644
--- a/storage/src/vespa/storage/distributor/bucketdbupdater.cpp
+++ b/storage/src/vespa/storage/distributor/bucketdbupdater.cpp
@@ -26,27 +26,23 @@ BucketDBUpdater::BucketDBUpdater(Distributor& owner, ManagedBucketSpace& bucketS
{
}
-BucketDBUpdater::~BucketDBUpdater() {}
+BucketDBUpdater::~BucketDBUpdater() = default;
void
BucketDBUpdater::flush()
{
- for (std::map<uint64_t, BucketRequest>::iterator
- i(_sentMessages.begin()), end(_sentMessages.end());
- i != end; ++i)
- {
+ for (auto & entry : _sentMessages) {
// Cannot sendDown MergeBucketReplies during flushing, since
// all lower links have been closed
- if (i->second._mergeReplyGuard.get()) {
- i->second._mergeReplyGuard->resetReply();
+ if (entry.second._mergeReplyGuard) {
+ entry.second._mergeReplyGuard->resetReply();
}
}
_sentMessages.clear();
}
void
-BucketDBUpdater::print(std::ostream& out, bool verbose,
- const std::string& indent) const
+BucketDBUpdater::print(std::ostream& out, bool verbose, const std::string& indent) const
{
(void) verbose; (void) indent;
out << "BucketDBUpdater";
@@ -55,17 +51,15 @@ BucketDBUpdater::print(std::ostream& out, bool verbose,
bool
BucketDBUpdater::hasPendingClusterState() const
{
- return _pendingClusterState.get() != nullptr;
+ return static_cast<bool>(_pendingClusterState);
}
BucketOwnership
BucketDBUpdater::checkOwnershipInPendingState(const document::BucketId& b) const
{
if (hasPendingClusterState()) {
- const lib::ClusterState& state(
- _pendingClusterState->getNewClusterState());
- const lib::Distribution& distribution(
- _pendingClusterState->getDistribution());
+ const lib::ClusterState& state(_pendingClusterState->getNewClusterState());
+ const lib::Distribution& distribution(_pendingClusterState->getDistribution());
if (!_bucketSpaceComponent.ownsBucketInState(distribution, state, b)) {
return BucketOwnership::createNotOwnedInState(state);
}
@@ -74,22 +68,6 @@ BucketDBUpdater::checkOwnershipInPendingState(const document::BucketId& b) const
}
void
-BucketDBUpdater::clearPending(uint16_t node)
-{
- for (std::map<uint64_t, BucketRequest>::iterator iter(
- _sentMessages.begin()); iter != _sentMessages.end();)
- {
- if (iter->second.targetNode == node) {
- std::map<uint64_t, BucketRequest>::iterator del = iter;
- iter++;
- _sentMessages.erase(del);
- } else {
- iter++;
- }
- }
-}
-
-void
BucketDBUpdater::sendRequestBucketInfo(
uint16_t node,
const document::BucketId& bucket,
@@ -145,9 +123,8 @@ BucketDBUpdater::removeSuperfluousBuckets(
_bucketSpaceComponent.getBucketDatabase().forEach(proc);
- for (uint32_t i = 0; i < proc.getBucketsToRemove().size(); ++i) {
- _bucketSpaceComponent.getBucketDatabase()
- .remove(proc.getBucketsToRemove()[i]);
+ for (const auto & entry :proc.getBucketsToRemove()) {
+ _bucketSpaceComponent.getBucketDatabase().remove(entry);
}
}
@@ -247,9 +224,8 @@ BucketDBUpdater::onSetSystemState(
BucketDBUpdater::MergeReplyGuard::~MergeReplyGuard()
{
- if (_reply.get()) {
- _updater.getDistributorComponent().getDistributor()
- .handleCompletedMerge(_reply);
+ if (_reply) {
+ _updater.getDistributorComponent().getDistributor().handleCompletedMerge(_reply);
}
}
@@ -293,13 +269,8 @@ BucketDBUpdater::sendAllQueuedBucketRechecks()
"via NotifyBucketChange commands",
_enqueuedRechecks.size());
- typedef std::set<EnqueuedBucketRecheck>::const_iterator const_iterator;
- for (const_iterator it(_enqueuedRechecks.begin()),
- e(_enqueuedRechecks.end()); it != e; ++it)
- {
- sendRequestBucketInfo(it->node,
- it->bucket,
- std::shared_ptr<MergeReplyGuard>());
+ for (const auto & entry :_enqueuedRechecks) {
+ sendRequestBucketInfo(entry.node, entry.bucket, std::shared_ptr<MergeReplyGuard>());
}
_enqueuedRechecks.clear();
}
@@ -382,14 +353,14 @@ BucketDBUpdater::handleSingleBucketInfoFailure(
if (req.bucket != document::BucketId(0)) {
framework::MilliSecTime sendTime(_bucketSpaceComponent.getClock());
sendTime += framework::MilliSecTime(100);
- _delayedRequests.push_back(std::make_pair(sendTime, req));
+ _delayedRequests.emplace_back(sendTime, req);
}
}
void
BucketDBUpdater::resendDelayedMessages()
{
- if (_pendingClusterState.get()) {
+ if (_pendingClusterState) {
_pendingClusterState->resendDelayedMessages();
}
if (_delayedRequests.empty()) return; // Don't fetch time if not needed
@@ -398,9 +369,7 @@ BucketDBUpdater::resendDelayedMessages()
&& currentTime >= _delayedRequests.front().first)
{
BucketRequest& req(_delayedRequests.front().second);
- sendRequestBucketInfo(req.targetNode,
- req.bucket,
- std::shared_ptr<MergeReplyGuard>());
+ sendRequestBucketInfo(req.targetNode, req.bucket, std::shared_ptr<MergeReplyGuard>());
_delayedRequests.pop_front();
}
}
@@ -408,19 +377,13 @@ BucketDBUpdater::resendDelayedMessages()
void
BucketDBUpdater::convertBucketInfoToBucketList(
const std::shared_ptr<api::RequestBucketInfoReply>& repl,
- uint16_t targetNode,
- BucketListMerger::BucketList& newList)
+ uint16_t targetNode, BucketListMerger::BucketList& newList)
{
- for (uint32_t i = 0; i < repl->getBucketInfo().size(); i++) {
- LOG(debug,
- "Received bucket information from node %u for bucket %s: %s",
- targetNode,
- repl->getBucketInfo()[i]._bucketId.toString().c_str(),
- repl->getBucketInfo()[i]._info.toString().c_str());
+ for (const auto & entry : repl->getBucketInfo()) {
+ LOG(debug, "Received bucket information from node %u for bucket %s: %s", targetNode,
+ entry._bucketId.toString().c_str(), entry._info.toString().c_str());
- newList.push_back(BucketListMerger::BucketEntry(
- repl->getBucketInfo()[i]._bucketId,
- repl->getBucketInfo()[i]._info));
+ newList.emplace_back(entry._bucketId, entry._info);
}
}
@@ -446,8 +409,7 @@ bool
BucketDBUpdater::processSingleBucketInfoReply(
const std::shared_ptr<api::RequestBucketInfoReply> & repl)
{
- std::map<uint64_t, BucketRequest>::iterator iter =
- _sentMessages.find(repl->getMsgId());
+ auto iter = _sentMessages.find(repl->getMsgId());
// Has probably been deleted for some reason earlier.
if (iter == _sentMessages.end()) {
@@ -477,36 +439,30 @@ BucketDBUpdater::addBucketInfoForNode(
{
const BucketCopy* copy(e->getNode(node));
if (copy) {
- existing.push_back(BucketListMerger::BucketEntry(
- e.getBucketId(), copy->getBucketInfo()));
+ existing.emplace_back(e.getBucketId(), copy->getBucketInfo());
}
}
void
-BucketDBUpdater::findRelatedBucketsInDatabase(
- uint16_t node,
- const document::BucketId& bucketId,
- BucketListMerger::BucketList& existing)
+BucketDBUpdater::findRelatedBucketsInDatabase(uint16_t node, const document::BucketId& bucketId,
+ BucketListMerger::BucketList& existing)
{
std::vector<BucketDatabase::Entry> entries;
_bucketSpaceComponent.getBucketDatabase().getAll(bucketId, entries);
- for (uint32_t j = 0; j < entries.size(); ++j) {
- addBucketInfoForNode(entries[j], node, existing);
+ for (const BucketDatabase::Entry & entry : entries) {
+ addBucketInfoForNode(entry, node, existing);
}
}
void
BucketDBUpdater::updateDatabase(uint16_t node, BucketListMerger& merger)
{
- for (uint32_t i = 0; i < merger.getRemovedEntries().size(); i++) {
- _bucketSpaceComponent.removeNodeFromDB(merger.getRemovedEntries()[i], node);
+ for (const document::BucketId & bucket : merger.getRemovedEntries()) {
+ _bucketSpaceComponent.removeNodeFromDB(bucket, node);
}
- for (uint32_t i = 0; i < merger.getAddedEntries().size(); i++) {
- const BucketListMerger::BucketEntry& entry(
- merger.getAddedEntries()[i]);
-
+ for (const BucketListMerger::BucketEntry& entry : merger.getAddedEntries()) {
_bucketSpaceComponent.updateBucketDatabase(
entry.first,
BucketCopy(merger.getTimestamp(), node, entry.second),
@@ -603,12 +559,11 @@ BucketDBUpdater::reportXmlStatus(vespalib::xml::XmlOutputStream& xos,
<< XmlTag("systemstate_active")
<< XmlContent(_bucketSpaceComponent.getClusterState().toString())
<< XmlEndTag();
- if (_pendingClusterState.get() != 0) {
+ if (_pendingClusterState) {
xos << *_pendingClusterState;
}
xos << XmlTag("systemstate_history");
- typedef std::list<PendingClusterState::Summary>::const_reverse_iterator HistoryIter;
- for (HistoryIter i(_history.rbegin()), e(_history.rend()); i != e; ++i) {
+ for (auto i(_history.rbegin()), e(_history.rend()); i != e; ++i) {
xos << XmlTag("change")
<< XmlAttribute("from", i->_prevClusterState)
<< XmlAttribute("to", i->_newClusterState)
@@ -617,18 +572,16 @@ BucketDBUpdater::reportXmlStatus(vespalib::xml::XmlOutputStream& xos,
}
xos << XmlEndTag()
<< XmlTag("single_bucket_requests");
- for (std::map<uint64_t, BucketRequest>::const_iterator iter
- = _sentMessages.begin(); iter != _sentMessages.end(); iter++)
+ for (const auto & entry : _sentMessages)
{
xos << XmlTag("storagenode")
- << XmlAttribute("index", iter->second.targetNode);
- if (iter->second.bucket.getRawId() == 0) {
+ << XmlAttribute("index", entry.second.targetNode);
+ if (entry.second.bucket.getRawId() == 0) {
xos << XmlAttribute("bucket", ALL);
} else {
- xos << XmlAttribute("bucket", iter->second.bucket.getId(),
- XmlAttribute::HEX);
+ xos << XmlAttribute("bucket", entry.second.bucket.getId(), XmlAttribute::HEX);
}
- xos << XmlAttribute("sendtimestamp", iter->second.timestamp)
+ xos << XmlAttribute("sendtimestamp", entry.second.timestamp)
<< XmlEndTag();
}
xos << XmlEndTag() << XmlEndTag();
@@ -642,17 +595,13 @@ BucketDBUpdater::BucketListGenerator::process(BucketDatabase::Entry& e)
const BucketCopy* copy(e->getNode(_node));
if (copy) {
- _entries.push_back(
- BucketListMerger::BucketEntry(
- bucketId,
- copy->getBucketInfo()));
+ _entries.emplace_back(bucketId, copy->getBucketInfo());
}
return true;
}
void
-BucketDBUpdater::NodeRemover::logRemove(const document::BucketId& bucketId,
- const char* msg) const
+BucketDBUpdater::NodeRemover::logRemove(const document::BucketId& bucketId, const char* msg) const
{
LOG(spam, "Removing bucket %s: %s", bucketId.toString().c_str(), msg);
LOG_BUCKET_OPERATION_NO_LOCK(bucketId, msg);
@@ -748,7 +697,7 @@ BucketDBUpdater::NodeRemover::process(BucketDatabase::Entry& e)
BucketDBUpdater::NodeRemover::~NodeRemover()
{
- if (_removedBuckets.size() > 0) {
+ if ( !_removedBuckets.empty()) {
std::ostringstream ost;
ost << "After system state change "
<< _oldState.getTextualDifference(_state) << ", we removed "
@@ -760,7 +709,7 @@ BucketDBUpdater::NodeRemover::~NodeRemover()
if (_removedBuckets.size() >= 10) {
ost << " ...";
}
- LOGBM(info, ost.str().c_str());
+ LOGBM(info, "%s", ost.str().c_str());
}
}
diff --git a/storage/src/vespa/storage/distributor/bucketdbupdater.h b/storage/src/vespa/storage/distributor/bucketdbupdater.h
index 6081f782cd5..d3b2bbf86ca 100644
--- a/storage/src/vespa/storage/distributor/bucketdbupdater.h
+++ b/storage/src/vespa/storage/distributor/bucketdbupdater.h
@@ -36,16 +36,11 @@ public:
~BucketDBUpdater();
void flush();
-
BucketOwnership checkOwnershipInPendingState(const document::BucketId&) const;
-
void recheckBucketInfo(uint32_t nodeIdx, const document::BucketId& bid);
bool onSetSystemState(const std::shared_ptr<api::SetSystemStateCommand>& cmd) override;
-
- bool onRequestBucketInfoReply(
- const std::shared_ptr<api::RequestBucketInfoReply> & repl) override;
-
+ bool onRequestBucketInfoReply(const std::shared_ptr<api::RequestBucketInfoReply> & repl) override;
bool onMergeBucketReply(const std::shared_ptr<api::MergeBucketReply>& reply) override;
bool onNotifyBucketChange(const std::shared_ptr<api::NotifyBucketChangeCommand>&) override;
void resendDelayedMessages();
@@ -72,8 +67,7 @@ private:
ManagedBucketSpaceComponent _bucketSpaceComponent;
class MergeReplyGuard {
public:
- MergeReplyGuard(BucketDBUpdater& updater,
- const std::shared_ptr<api::MergeBucketReply>& reply)
+ MergeReplyGuard(BucketDBUpdater& updater, const std::shared_ptr<api::MergeBucketReply>& reply)
: _updater(updater), _reply(reply) {}
~MergeReplyGuard();
@@ -90,9 +84,7 @@ private:
BucketRequest()
: targetNode(0), bucket(0), timestamp(0) {};
- BucketRequest(uint16_t t,
- uint64_t currentTime,
- const document::BucketId& b,
+ BucketRequest(uint16_t t, uint64_t currentTime, const document::BucketId& b,
const std::shared_ptr<MergeReplyGuard>& guard)
: targetNode(t),
bucket(b),
@@ -112,8 +104,7 @@ private:
EnqueuedBucketRecheck() : node(0), bucket() {}
- EnqueuedBucketRecheck(uint16_t _node,
- const document::BucketId& _bucket)
+ EnqueuedBucketRecheck(uint16_t _node, const document::BucketId& _bucket)
: node(_node),
bucket(_bucket)
{}
@@ -130,35 +121,21 @@ private:
};
bool hasPendingClusterState() const;
-
- void clearPending(uint16_t node);
-
- bool pendingClusterStateAccepted(
- const std::shared_ptr<api::RequestBucketInfoReply>& repl);
- bool bucketOwnedAccordingToPendingState(
- const document::BucketId& bucketId) const;
- bool processSingleBucketInfoReply(
- const std::shared_ptr<api::RequestBucketInfoReply>& repl);
- void handleSingleBucketInfoFailure(
- const std::shared_ptr<api::RequestBucketInfoReply>& repl,
- const BucketRequest& req);
+ bool pendingClusterStateAccepted(const std::shared_ptr<api::RequestBucketInfoReply>& repl);
+ bool bucketOwnedAccordingToPendingState(const document::BucketId& bucketId) const;
+ bool processSingleBucketInfoReply(const std::shared_ptr<api::RequestBucketInfoReply>& repl);
+ void handleSingleBucketInfoFailure(const std::shared_ptr<api::RequestBucketInfoReply>& repl,
+ const BucketRequest& req);
bool isPendingClusterStateCompleted() const;
void processCompletedPendingClusterState();
- void mergeBucketInfoWithDatabase(
- const std::shared_ptr<api::RequestBucketInfoReply>& repl,
- const BucketRequest& req);
- void convertBucketInfoToBucketList(
- const std::shared_ptr<api::RequestBucketInfoReply>& repl,
- uint16_t targetNode,
- BucketListMerger::BucketList& newList);
- void sendRequestBucketInfo(
- uint16_t node,
- const document::BucketId& bucket,
- const std::shared_ptr<MergeReplyGuard>& mergeReply);
- void addBucketInfoForNode(
- const BucketDatabase::Entry& e,
- uint16_t node,
- BucketListMerger::BucketList& existing) const;
+ void mergeBucketInfoWithDatabase(const std::shared_ptr<api::RequestBucketInfoReply>& repl,
+ const BucketRequest& req);
+ void convertBucketInfoToBucketList(const std::shared_ptr<api::RequestBucketInfoReply>& repl,
+ uint16_t targetNode, BucketListMerger::BucketList& newList);
+ void sendRequestBucketInfo(uint16_t node, const document::BucketId& bucket,
+ const std::shared_ptr<MergeReplyGuard>& mergeReply);
+ void addBucketInfoForNode(const BucketDatabase::Entry& e, uint16_t node,
+ BucketListMerger::BucketList& existing) const;
void ensureTransitionTimerStarted();
void completeTransitionTimer();
/**
@@ -167,10 +144,8 @@ private:
* in bucketId, or that bucketId is contained in, that have copies
* on the given node.
*/
- void findRelatedBucketsInDatabase(
- uint16_t node,
- const document::BucketId& bucketId,
- BucketListMerger::BucketList& existing);
+ void findRelatedBucketsInDatabase(uint16_t node, const document::BucketId& bucketId,
+ BucketListMerger::BucketList& existing);
/**
Updates the bucket database from the information generated by the given
@@ -178,18 +153,15 @@ private:
*/
void updateDatabase(uint16_t node, BucketListMerger& merger);
- void updateState(const lib::ClusterState& oldState,
- const lib::ClusterState& newState);
+ void updateState(const lib::ClusterState& oldState, const lib::ClusterState& newState);
- void removeSuperfluousBuckets(const lib::Distribution& newDistribution,
- const lib::ClusterState& newState);
+ void removeSuperfluousBuckets(const lib::Distribution& newDistribution, const lib::ClusterState& newState);
void replyToPreviousPendingClusterStateIfAny();
void enableCurrentClusterStateInDistributor();
void addCurrentStateToClusterStateHistory();
- void enqueueRecheckUntilPendingStateEnabled(uint16_t node,
- const document::BucketId&);
+ void enqueueRecheckUntilPendingStateEnabled(uint16_t node, const document::BucketId&);
void sendAllQueuedBucketRechecks();
friend class BucketDBUpdater_Test;
@@ -198,8 +170,7 @@ private:
class BucketListGenerator
{
public:
- BucketListGenerator(uint16_t node,
- BucketListMerger::BucketList& entries)
+ BucketListGenerator(uint16_t node, BucketListMerger::BucketList& entries)
: _node(node), _entries(entries) {};
bool process(BucketDatabase::Entry&);
@@ -237,8 +208,7 @@ private:
return _removedBuckets;
}
private:
- void setCopiesInEntry(BucketDatabase::Entry& e,
- const std::vector<BucketCopy>& copies) const;
+ void setCopiesInEntry(BucketDatabase::Entry& e, const std::vector<BucketCopy>& copies) const;
void removeEmptyBucket(const document::BucketId& bucketId);
const lib::ClusterState _oldState;
diff --git a/vespalog/src/test/bufferedlogtest.cpp b/vespalog/src/test/bufferedlogtest.cpp
index 3e974057361..9b69d9d3a14 100644
--- a/vespalog/src/test/bufferedlogtest.cpp
+++ b/vespalog/src/test/bufferedlogtest.cpp
@@ -71,12 +71,11 @@ void spamLog2(uint64_t& time, int diff) {
time += diff;
std::ostringstream ost;
ost << "Message " << i;
- LOGBT(info, ost.str(), ost.str().c_str());
+ LOGBT(info, ost.str(), "%s", ost.str().c_str());
}
}
-void testThatEntriesWithHighCountIsKept(const std::string& file,
- uint64_t& timer)
+void testThatEntriesWithHighCountIsKept(const std::string& file, uint64_t& timer)
{
std::cerr << "testThatEntriesWithHighCountIsKept ...\n";
timer = 10 * 1000000 + 4;
diff --git a/vespalog/src/test/bufferedlogtest.logger1.cpp b/vespalog/src/test/bufferedlogtest.logger1.cpp
index 24b9237a71d..dde5f2edbb5 100644
--- a/vespalog/src/test/bufferedlogtest.logger1.cpp
+++ b/vespalog/src/test/bufferedlogtest.logger1.cpp
@@ -5,7 +5,7 @@
LOG_SETUP(".logger1");
-void logWithLogger1(const std::string& token, const std::string message)
+void logWithLogger1(const std::string& token, const std::string & message)
{
- LOGBT(info, token, message.c_str());
+ LOGBT(info, token, "%s", message.c_str());
}
diff --git a/vespalog/src/test/bufferedlogtest.logger1.h b/vespalog/src/test/bufferedlogtest.logger1.h
index acbd67e8c3e..1ec8f9acad4 100644
--- a/vespalog/src/test/bufferedlogtest.logger1.h
+++ b/vespalog/src/test/bufferedlogtest.logger1.h
@@ -4,5 +4,5 @@
#include <string>
-void logWithLogger1(const std::string& token, const std::string message);
+void logWithLogger1(const std::string& token, const std::string & message);
diff --git a/vespalog/src/test/bufferedlogtest.logger2.cpp b/vespalog/src/test/bufferedlogtest.logger2.cpp
index 8d6ca97130e..992eeb70481 100644
--- a/vespalog/src/test/bufferedlogtest.logger2.cpp
+++ b/vespalog/src/test/bufferedlogtest.logger2.cpp
@@ -5,7 +5,7 @@
LOG_SETUP(".logger2");
-void logWithLogger2(const std::string& token, const std::string message)
+void logWithLogger2(const std::string& token, const std::string & message)
{
- LOGBT(info, token, message.c_str());
+ LOGBT(info, token, "%s", message.c_str());
}
diff --git a/vespalog/src/test/bufferedlogtest.logger2.h b/vespalog/src/test/bufferedlogtest.logger2.h
index 398d799a2b3..cd88d8ea0be 100644
--- a/vespalog/src/test/bufferedlogtest.logger2.h
+++ b/vespalog/src/test/bufferedlogtest.logger2.h
@@ -4,5 +4,5 @@
#include <string>
-void logWithLogger2(const std::string& token, const std::string message);
+void logWithLogger2(const std::string& token, const std::string & message);
diff --git a/vespalog/src/vespa/log/bufferedlogger.h b/vespalog/src/vespa/log/bufferedlogger.h
index 64419311a17..9081ab8f9cf 100644
--- a/vespalog/src/vespa/log/bufferedlogger.h
+++ b/vespalog/src/vespa/log/bufferedlogger.h
@@ -102,7 +102,7 @@
"", __VA_ARGS__); \
} \
} \
- } while (0)
+ } while (false)
#endif
// Define LOGBM macro for logging buffered, using the message itself as a
@@ -121,7 +121,7 @@
"", __VA_ARGS__); \
} \
} \
- } while (0)
+ } while (false)
// Define LOGBP macro for logging buffered, using the call point as token.
// (File/line of macro caller)
@@ -140,7 +140,7 @@
__FILE__, __LINE__, ost123.str(), ##ARGS); \
} \
} \
- } while (0)
+ } while (false)
// Define LOGT calls for using the buffer specifically stating token
#define LOGBT(level, token, ...) \
@@ -156,7 +156,7 @@
__FILE__, __LINE__, token, __VA_ARGS__); \
} \
} \
- } while (0)
+ } while (false)
#define LOGB_FLUSH() \
ns_log::BufferedLogger::logger.flush()
diff --git a/vespalog/src/vespa/log/log.cpp b/vespalog/src/vespa/log/log.cpp
index 7d98c22957d..193f4230502 100644
--- a/vespalog/src/vespa/log/log.cpp
+++ b/vespalog/src/vespa/log/log.cpp
@@ -23,7 +23,7 @@ namespace ns_log {
uint64_t Timer::getTimestamp() const {
struct timeval tv;
- gettimeofday(&tv, NULL);
+ gettimeofday(&tv, nullptr);
uint64_t timestamp = tv.tv_sec;
timestamp *= 1000000;
timestamp += tv.tv_usec;
@@ -164,16 +164,16 @@ Logger::~Logger()
{
_numInstances--;
if ((_numInstances == 1)) {
- if (logger != NULL) {
+ if (logger != nullptr) {
logger->~Logger();
free(logger);
- logger = NULL;
+ logger = nullptr;
}
} else if (_numInstances == 0) {
delete _controlFile;
logInitialised = false;
delete _target;
- _target = NULL;
+ _target = nullptr;
}
}
@@ -390,13 +390,6 @@ Logger::doEventValue(const char *name, double value)
}
void
-Logger::doEventCollection(uint64_t collectionId, const char* name, const char* params)
-{
- doLog(event, "", 0, "collection/1 collectionId=%lu name=\"%s\" %s",
- collectionId, name, params);
-}
-
-void
Logger::doEventState(const char *name, const char *value)
{
doLog(event, "", 0, "state/1 name=\"%s\" value=\"%s\"", name, value);
diff --git a/vespalog/src/vespa/log/log.h b/vespalog/src/vespa/log/log.h
index 127baf14d55..de0b9809e35 100644
--- a/vespalog/src/vespa/log/log.h
+++ b/vespalog/src/vespa/log/log.h
@@ -39,13 +39,13 @@ do { \
if (logger.wants(ns_log::Logger::level)) { \
logger.doLog(ns_log::Logger::level, __FILE__, __LINE__, __VA_ARGS__); \
} \
-} while (0)
+} while (false)
#define VLOG(level, ...) \
do { \
if (logger.wants(level)) { \
logger.doLog(level, __FILE__, __LINE__, __VA_ARGS__); \
} \
-} while (0)
+} while (false)
#endif
// Must use placement new in the following definition, since the variable
@@ -64,7 +64,7 @@ do {
if (logger->wants(ns_log::Logger::level)) { \
logger->doLog(ns_log::Logger::level, __FILE__, __LINE__, __VA_ARGS__); \
} \
-} while (0)
+} while (false)
#define LOG_WOULD_LOG(level) logger.wants(ns_log::Logger::level)
#define LOG_WOULD_VLOG(level) logger.wants(level)
@@ -74,84 +74,77 @@ do { \
if (logger.wants(ns_log::Logger::event)) { \
logger.doEventStarting(name); \
} \
-} while (0)
+} while (false)
#define EV_STOPPING(name,why) \
do { \
if (logger.wants(ns_log::Logger::event)) { \
logger.doEventStopping(name, why); \
} \
-} while (0)
+} while (false)
#define EV_STARTED(name) \
do { \
if (logger.wants(ns_log::Logger::event)) { \
logger.doEventStarted(name); \
} \
-} while (0)
+} while (false)
#define EV_STOPPED(name,pid,exitcode) \
do { \
if (logger.wants(ns_log::Logger::event)) { \
logger.doEventStopped(name, pid, exitcode); \
} \
-} while (0)
+} while (false)
#define EV_RELOADING(name) \
do { \
if (logger.wants(ns_log::Logger::event)) { \
logger.doEventReloading(name); \
} \
-} while (0)
+} while (false)
#define EV_RELOADED(name) \
do { \
if (logger.wants(ns_log::Logger::event)) { \
logger.doEventReloaded(name); \
} \
-} while (0)
+} while (false)
#define EV_CRASH(name,pid,signal) \
do { \
if (logger.wants(ns_log::Logger::event)) { \
logger.doEventCrash(name, pid, signal); \
} \
-} while (0)
+} while (false)
#define EV_PROGRESS(name, ...) \
do { \
if (logger.wants(ns_log::Logger::event)) { \
logger.doEventProgress(name, __VA_ARGS__); \
} \
-} while (0)
+} while (false)
#define EV_COUNT(name,value) \
do { \
if (logger.wants(ns_log::Logger::event)) { \
logger.doEventCount(name, value); \
} \
-} while (0)
+} while (false)
#define EV_VALUE(name,value) \
do { \
if (logger.wants(ns_log::Logger::event)) { \
logger.doEventValue(name, value); \
} \
-} while (0)
-
-#define EV_COLLECTION(collectionId,name,params) \
-do { \
- if (logger.wants(ns_log::Logger::event)) { \
- logger.doEventCollection(collectionId, name, params); \
- } \
-} while (0)
+} while (false)
#define EV_STATE(name,value) \
do { \
if (logger.wants(ns_log::Logger::event)) { \
logger.doEventState(name, value); \
} \
-} while (0)
+} while (false)
namespace ns_log {
@@ -161,7 +154,7 @@ class ControlFile;
// XXX this is way too complicated, must be some simpler way to do this
/** Timer class used to retrieve timestamp, such that we can override in test */
struct Timer {
- virtual ~Timer() {}
+ virtual ~Timer() = default;
virtual uint64_t getTimestamp() const;
};
@@ -187,7 +180,6 @@ public:
static bool fakePid;
private:
- Logger();
Logger(const Logger &);
Logger& operator =(const Logger &);
@@ -205,7 +197,6 @@ private:
static ControlFile *_controlFile;
static void setTarget();
- void makeLockFile();
char _appendix[256];
@@ -220,8 +211,7 @@ private:
const char *fmt, va_list args);
public:
~Logger();
- explicit Logger(const char *name, const char *rcsId = 0);
- static Logger& getLogger(const char *name);
+ explicit Logger(const char *name, const char *rcsId = nullptr);
int setRcsId(const char *rcsId);
static const char *levelName(LogLevel level);
@@ -247,8 +237,6 @@ public:
void doEventProgress(const char *name, double value, double total = 0);
void doEventCount(const char *name, uint64_t value);
void doEventValue(const char *name, double value);
- void doEventCollection(uint64_t collectionId, const char *name,
- const char *params);
void doEventState(const char *name, const char *value);
// Only for unit testing