summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHenning Baldersheim <balder@yahoo-inc.com>2020-08-14 18:48:09 +0000
committerHenning Baldersheim <balder@yahoo-inc.com>2020-08-14 18:48:09 +0000
commit729e8546d793a43ecfc94c11d5eed042d4dc74e1 (patch)
tree5923c808c8a83c8c617a949f98c30e5fce0c13cd
parentbbc768d6ef92f166717d65e67689f6aa15c98952 (diff)
Build the FieldSetRepo up front with all configured fieldsets to avoid building them for every get() call."
-rw-r--r--document/src/tests/fieldsettest.cpp17
-rw-r--r--document/src/vespa/document/datatype/documenttype.h7
-rw-r--r--document/src/vespa/document/fieldset/fieldsetrepo.cpp45
-rw-r--r--document/src/vespa/document/fieldset/fieldsetrepo.h10
-rw-r--r--storage/src/tests/persistence/persistencetestutils.cpp17
-rw-r--r--storage/src/tests/persistence/persistencetestutils.h2
-rw-r--r--storage/src/vespa/storage/common/storagecomponent.cpp22
-rw-r--r--storage/src/vespa/storage/common/storagecomponent.h20
-rw-r--r--storage/src/vespa/storage/config/distributorconfiguration.cpp2
-rw-r--r--storage/src/vespa/storage/distributor/distributor.cpp3
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/removelocationoperation.cpp3
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.cpp2
-rw-r--r--storage/src/vespa/storage/persistence/mergehandler.cpp138
-rw-r--r--storage/src/vespa/storage/persistence/persistencethread.cpp6
-rw-r--r--storage/src/vespa/storage/persistence/persistenceutil.cpp2
-rw-r--r--storage/src/vespa/storage/persistence/testandsethelper.cpp17
-rw-r--r--storage/src/vespa/storage/persistence/testandsethelper.h4
-rw-r--r--storage/src/vespa/storage/storageserver/communicationmanager.cpp6
-rw-r--r--storage/src/vespa/storage/visiting/visitorthread.cpp17
-rw-r--r--streamingvisitors/src/tests/searchvisitor/searchvisitor_test.cpp10
-rw-r--r--streamingvisitors/src/vespa/searchvisitor/searchvisitor.cpp2
21 files changed, 171 insertions, 181 deletions
diff --git a/document/src/tests/fieldsettest.cpp b/document/src/tests/fieldsettest.cpp
index 29581ff4549..af23e713735 100644
--- a/document/src/tests/fieldsettest.cpp
+++ b/document/src/tests/fieldsettest.cpp
@@ -31,7 +31,7 @@ TEST_F(FieldSetTest, testParsing)
(void) dynamic_cast<NoFields&>(*FieldSetRepo::parse(docRepo, NoFields::NAME));
(void) dynamic_cast<DocIdOnly&>(*FieldSetRepo::parse(docRepo, DocIdOnly::NAME));
- FieldSet::UP set = FieldSetRepo::parse(docRepo, "testdoctype1:headerval,content");
+ auto set = FieldSetRepo::parse(docRepo, "testdoctype1:headerval,content");
auto & coll = dynamic_cast<FieldCollection&>(*set);
std::ostringstream ost;
@@ -46,8 +46,8 @@ namespace {
bool checkContains(const DocumentTypeRepo& repo,
const std::string& str1, const std::string & str2) {
- FieldSet::UP set1 = FieldSetRepo::parse(repo, str1);
- FieldSet::UP set2 = FieldSetRepo::parse(repo, str2);
+ auto set1 = FieldSetRepo::parse(repo, str1);
+ auto set2 = FieldSetRepo::parse(repo, str2);
return set1->contains(*set2);
}
@@ -141,7 +141,7 @@ FieldSetTest::doCopyFields(const Document& src,
if (!dest) {
dest = &destDoc;
}
- FieldSet::UP fset = FieldSetRepo::parse(docRepo, fieldSetStr);
+ auto fset = FieldSetRepo::parse(docRepo, fieldSetStr);
FieldSet::copyFields(*dest, src, *fset);
return stringifyFields(*dest);
}
@@ -152,7 +152,7 @@ FieldSetTest::doStripFields(const Document& doc,
const std::string& fieldSetStr)
{
Document::UP copy(doc.clone());
- FieldSet::UP fset = FieldSetRepo::parse(docRepo, fieldSetStr);
+ auto fset = FieldSetRepo::parse(docRepo, fieldSetStr);
FieldSet::stripFields(*copy, *fset);
return stringifyFields(*copy);
}
@@ -198,7 +198,7 @@ FieldSetTest::doCopyDocument(const Document& src,
const DocumentTypeRepo& docRepo,
const std::string& fieldSetStr)
{
- FieldSet::UP fset = FieldSetRepo::parse(docRepo, fieldSetStr);
+ auto fset = FieldSetRepo::parse(docRepo, fieldSetStr);
Document::UP doc(FieldSet::createDocumentSubsetCopy(src, *fset));
return stringifyFields(*doc);
}
@@ -244,10 +244,9 @@ TEST_F(FieldSetTest, testSerialize)
"testdoctype1:content,hstringval"
};
- FieldSetRepo repo;
for (const char * fieldSet : fieldSets) {
- FieldSet::UP fs = FieldSetRepo::parse(docRepo, fieldSet);
- EXPECT_EQ(vespalib::string(fieldSet), repo.serialize(*fs));
+ auto fs = FieldSetRepo::parse(docRepo, fieldSet);
+ EXPECT_EQ(vespalib::string(fieldSet), FieldSetRepo::serialize(*fs));
}
}
diff --git a/document/src/vespa/document/datatype/documenttype.h b/document/src/vespa/document/datatype/documenttype.h
index ed6e9e66ab5..fae65addb48 100644
--- a/document/src/vespa/document/datatype/documenttype.h
+++ b/document/src/vespa/document/datatype/documenttype.h
@@ -61,12 +61,10 @@ public:
DocumentType();
DocumentType(vespalib::stringref name, int32_t id);
- DocumentType(vespalib::stringref name, int32_t id,
- const StructDataType& fields);
+ DocumentType(vespalib::stringref name, int32_t id, const StructDataType& fields);
explicit DocumentType(vespalib::stringref name);
- DocumentType(vespalib::stringref name,
- const StructDataType& fields);
+ DocumentType(vespalib::stringref name, const StructDataType& fields);
~DocumentType() override;
@@ -101,6 +99,7 @@ public:
DocumentType & addFieldSet(const vespalib::string & name, FieldSet::Fields fields);
const FieldSet * getFieldSet(const vespalib::string & name) const;
+ const FieldSetMap & getFieldSets() const { return _fieldSets; }
const ImportedFieldNames& imported_field_names() const noexcept {
return _imported_field_names;
diff --git a/document/src/vespa/document/fieldset/fieldsetrepo.cpp b/document/src/vespa/document/fieldset/fieldsetrepo.cpp
index 33cbf6185c4..5bde291c8dd 100644
--- a/document/src/vespa/document/fieldset/fieldsetrepo.cpp
+++ b/document/src/vespa/document/fieldset/fieldsetrepo.cpp
@@ -5,6 +5,7 @@
#include <vespa/vespalib/util/exceptions.h>
#include <vespa/vespalib/stllike/asciistream.h>
#include <vespa/document/repo/documenttyperepo.h>
+#include <vespa/vespalib/stllike/hash_map.hpp>
using vespalib::StringTokenizer;
@@ -12,27 +13,25 @@ namespace document {
namespace {
-FieldSet::UP
+FieldSet::SP
parseSpecialValues(vespalib::stringref name)
{
- FieldSet::UP fs;
if ((name.size() == 4) && (name[1] == 'i') && (name[2] == 'd') && (name[3] == ']')) {
- fs = std::make_unique<DocIdOnly>();
+ return std::make_shared<DocIdOnly>();
} else if ((name.size() == 5) && (name[1] == 'a') && (name[2] == 'l') && (name[3] == 'l') && (name[4] == ']')) {
- fs = std::make_unique<AllFields>();
+ return std::make_shared<AllFields>();
} else if ((name.size() == 6) && (name[1] == 'n') && (name[2] == 'o') && (name[3] == 'n') && (name[4] == 'e') && (name[5] == ']')) {
- fs = std::make_unique<NoFields>();
+ return std::make_shared<NoFields>();
} else if ((name.size() == 7) && (name[1] == 'd') && (name[2] == 'o') && (name[3] == 'c') && (name[4] == 'i') && (name[5] == 'd') && (name[6] == ']')) {
- fs = std::make_unique<DocIdOnly>();
+ return std::make_shared<DocIdOnly>();
} else {
throw vespalib::IllegalArgumentException(
"The only special names (enclosed in '[]') allowed are "
"id, all, none, not '" + name + "'.");
}
- return fs;
}
-FieldSet::UP
+FieldSet::SP
parseFieldCollection(const DocumentTypeRepo& repo,
vespalib::stringref docType,
vespalib::stringref fieldNames)
@@ -55,12 +54,12 @@ parseFieldCollection(const DocumentTypeRepo& repo,
builder.add(&type.getField(token));
}
}
- return std::make_unique<FieldCollection>(type, builder.build());
+ return std::make_shared<FieldCollection>(type, builder.build());
}
}
-FieldSet::UP
+FieldSet::SP
FieldSetRepo::parse(const DocumentTypeRepo& repo, vespalib::stringref str)
{
if (str[0] == '[') {
@@ -111,5 +110,31 @@ FieldSetRepo::serialize(const FieldSet& fieldSet)
}
}
+
+FieldSetRepo::FieldSetRepo(const DocumentTypeRepo& repo)
+ : _doumentTyperepo(repo),
+ _configuredFieldSets()
+{
+ repo.forEachDocumentType(*vespalib::makeClosure(this, &FieldSetRepo::configureDocumentType));
+}
+FieldSetRepo::~FieldSetRepo() = default;
+
+void
+FieldSetRepo::configureDocumentType(const DocumentType & documentType) {
+ for (const auto & entry : documentType.getFieldSets()) {
+ vespalib::string fieldSetName(documentType.getName());
+ fieldSetName.append(':').append(entry.first);
+ _configuredFieldSets[fieldSetName] = parse(_doumentTyperepo, fieldSetName);
+ }
+}
+FieldSet::SP
+FieldSetRepo::getFieldSet(vespalib::stringref fieldSetString) const {
+ auto found = _configuredFieldSets.find(fieldSetString);
+ if (found != _configuredFieldSets.end()) {
+ return found->second;
+ }
+ return parse(_doumentTyperepo, fieldSetString);
+}
+
}
diff --git a/document/src/vespa/document/fieldset/fieldsetrepo.h b/document/src/vespa/document/fieldset/fieldsetrepo.h
index bfe9c05d1ca..d213230848a 100644
--- a/document/src/vespa/document/fieldset/fieldsetrepo.h
+++ b/document/src/vespa/document/fieldset/fieldsetrepo.h
@@ -16,9 +16,17 @@ class DocumentTypeRepo;
class FieldSetRepo
{
public:
- static FieldSet::UP parse(const DocumentTypeRepo& repo, vespalib::stringref fieldSetString);
+ FieldSetRepo(const DocumentTypeRepo& repo);
+ ~FieldSetRepo();
+ FieldSet::SP getFieldSet(vespalib::stringref fieldSetString) const;
+
+ static FieldSet::SP parse(const DocumentTypeRepo& repo, vespalib::stringref fieldSetString);
static vespalib::string serialize(const FieldSet& fs);
+private:
+ void configureDocumentType(const DocumentType & documentType);
+ const DocumentTypeRepo & _doumentTyperepo;
+ vespalib::hash_map<vespalib::string, FieldSet::SP> _configuredFieldSets;
};
}
diff --git a/storage/src/tests/persistence/persistencetestutils.cpp b/storage/src/tests/persistence/persistencetestutils.cpp
index 504767e68c7..f50fbb0c8e8 100644
--- a/storage/src/tests/persistence/persistencetestutils.cpp
+++ b/storage/src/tests/persistence/persistencetestutils.cpp
@@ -231,9 +231,9 @@ PersistenceTestUtils::doGetOnDisk(
document::DocumentUpdate::SP
PersistenceTestUtils::createBodyUpdate(const document::DocumentId& docId, const document::FieldValue& updateValue)
{
- const DocumentType* docType(_env->_component.getTypeRepo()->getDocumentType("testdoctype1"));
- document::DocumentUpdate::SP update(new document::DocumentUpdate(*_env->_component.getTypeRepo(), *docType, docId));
- std::shared_ptr<document::AssignValueUpdate> assignUpdate(new document::AssignValueUpdate(updateValue));
+ const DocumentType* docType(getTypeRepo()->getDocumentType("testdoctype1"));
+ auto update = std::make_shared<document::DocumentUpdate>(*getTypeRepo(), *docType, docId);
+ auto assignUpdate = std::make_shared<document::AssignValueUpdate>(updateValue);
document::FieldUpdate fieldUpdate(docType->getField("content"));
fieldUpdate.addUpdate(*assignUpdate);
update->addUpdate(fieldUpdate);
@@ -243,9 +243,9 @@ PersistenceTestUtils::createBodyUpdate(const document::DocumentId& docId, const
document::DocumentUpdate::SP
PersistenceTestUtils::createHeaderUpdate(const document::DocumentId& docId, const document::FieldValue& updateValue)
{
- const DocumentType* docType(_env->_component.getTypeRepo()->getDocumentType("testdoctype1"));
- document::DocumentUpdate::SP update(new document::DocumentUpdate(*_env->_component.getTypeRepo(), *docType, docId));
- std::shared_ptr<document::AssignValueUpdate> assignUpdate(new document::AssignValueUpdate(updateValue));
+ const DocumentType* docType(getTypeRepo()->getDocumentType("testdoctype1"));
+ auto update = std::make_shared<document::DocumentUpdate>(*getTypeRepo(), *docType, docId);
+ auto assignUpdate = std::make_shared<document::AssignValueUpdate>(updateValue);
document::FieldUpdate fieldUpdate(docType->getField("headerval"));
fieldUpdate.addUpdate(*assignUpdate);
update->addUpdate(fieldUpdate);
@@ -253,8 +253,7 @@ PersistenceTestUtils::createHeaderUpdate(const document::DocumentId& docId, cons
}
uint16_t
-PersistenceTestUtils::getDiskFromBucketDatabaseIfUnset(const document::Bucket& bucket,
- uint16_t disk)
+PersistenceTestUtils::getDiskFromBucketDatabaseIfUnset(const document::Bucket& bucket, uint16_t disk)
{
if (disk == 0xffff) {
StorBucketDatabase::WrappedEntry entry(
@@ -342,7 +341,7 @@ PersistenceTestUtils::clearBody(document::Document& doc)
//doc->getBody().clear();
vespalib::nbostream stream;
doc.serializeHeader(stream);
- doc.deserialize(*_env->_component.getTypeRepo(), stream);
+ doc.deserialize(*getTypeRepo(), stream);
}
document::Document::UP
diff --git a/storage/src/tests/persistence/persistencetestutils.h b/storage/src/tests/persistence/persistencetestutils.h
index 6cee3b79ab8..3d25a205017 100644
--- a/storage/src/tests/persistence/persistencetestutils.h
+++ b/storage/src/tests/persistence/persistencetestutils.h
@@ -101,7 +101,7 @@ public:
FileStorHandler& fsHandler() { return *_env->_handler; }
FileStorMetrics& metrics() { return _env->_metrics; }
MessageKeeper& messageKeeper() { return _env->_messageKeeper; }
- std::shared_ptr<const document::DocumentTypeRepo> getTypeRepo() { return _env->_component.getTypeRepo(); }
+ std::shared_ptr<const document::DocumentTypeRepo> getTypeRepo() { return _env->_component.getTypeRepo()->documentTypeRepo; }
StorageComponent& getComponent() { return _env->_component; }
TestServiceLayerApp& getNode() { return _env->_node; }
diff --git a/storage/src/vespa/storage/common/storagecomponent.cpp b/storage/src/vespa/storage/common/storagecomponent.cpp
index 21a4b8eea64..f7bf038f3de 100644
--- a/storage/src/vespa/storage/common/storagecomponent.cpp
+++ b/storage/src/vespa/storage/common/storagecomponent.cpp
@@ -2,17 +2,22 @@
#include "storagecomponent.h"
#include <vespa/storage/storageserver/prioritymapper.h>
-
#include <vespa/vespalib/util/exceptions.h>
#include <vespa/vespalib/stllike/asciistream.h>
#include <vespa/vdslib/distribution/distribution.h>
+#include <vespa/document/fieldset/fieldsetrepo.h>
namespace storage {
+StorageComponent::Repos::Repos(std::shared_ptr<const document::DocumentTypeRepo> repo)
+ : documentTypeRepo(std::move(repo)),
+ fieldSetRepo(std::make_shared<document::FieldSetRepo>(*documentTypeRepo))
+{}
+
+StorageComponent::Repos::~Repos() = default;
+
// Defined in cpp file to allow unique pointers of unknown type in header.
-StorageComponent::~StorageComponent()
-{
-}
+StorageComponent::~StorageComponent() = default;
void
StorageComponent::setNodeInfo(vespalib::stringref clusterName,
@@ -26,10 +31,11 @@ StorageComponent::setNodeInfo(vespalib::stringref clusterName,
}
void
-StorageComponent::setDocumentTypeRepo(DocumentTypeRepoSP repo)
+StorageComponent::setDocumentTypeRepo(std::shared_ptr<const document::DocumentTypeRepo> docTypeRepo)
{
+ auto repo = std::make_shared<Repos>(std::move(docTypeRepo));
std::lock_guard guard(_lock);
- _docTypeRepo = repo;
+ _repos = std::move(repo);
}
void
@@ -78,7 +84,7 @@ StorageComponent::StorageComponent(StorageComponentRegister& compReg,
_clusterName(),
_nodeType(nullptr),
_index(0),
- _docTypeRepo(),
+ _repos(),
_loadTypes(),
_priorityMapper(new PriorityMapper),
_bucketIdFactory(),
@@ -120,7 +126,7 @@ StorageComponent::DocumentTypeRepoSP
StorageComponent::getTypeRepo() const
{
std::lock_guard guard(_lock);
- return _docTypeRepo;
+ return _repos;
}
StorageComponent::LoadTypeSetSP
diff --git a/storage/src/vespa/storage/common/storagecomponent.h b/storage/src/vespa/storage/common/storagecomponent.h
index 821cd43f21d..4e5d65310e1 100644
--- a/storage/src/vespa/storage/common/storagecomponent.h
+++ b/storage/src/vespa/storage/common/storagecomponent.h
@@ -42,6 +42,7 @@ namespace vespa::config::content::core::internal {
}
namespace document {
class DocumentTypeRepo;
+ class FieldSetRepo;
}
namespace documentapi {
class LoadType;
@@ -58,9 +59,15 @@ struct StorageComponentRegister;
class StorageComponent : public framework::Component {
public:
+ struct Repos {
+ explicit Repos(std::shared_ptr<const document::DocumentTypeRepo> repo);
+ ~Repos();
+ const std::shared_ptr<const document::DocumentTypeRepo> documentTypeRepo;
+ const std::shared_ptr<const document::FieldSetRepo> fieldSetRepo;
+ };
using UP = std::unique_ptr<StorageComponent>;
using PriorityConfig = vespa::config::content::core::internal::InternalStorPrioritymappingType;
- using DocumentTypeRepoSP = std::shared_ptr<const document::DocumentTypeRepo>;
+ using DocumentTypeRepoSP = std::shared_ptr<Repos>;
using LoadTypeSetSP = std::shared_ptr<documentapi::LoadTypeSet>;
using DistributionSP = std::shared_ptr<lib::Distribution>;
@@ -68,9 +75,7 @@ public:
* Node type is supposed to be set immediately, and never be updated.
* Thus it does not need to be threadsafe. Should never be used before set.
*/
- void setNodeInfo(vespalib::stringref clusterName,
- const lib::NodeType& nodeType,
- uint16_t index);
+ void setNodeInfo(vespalib::stringref clusterName, const lib::NodeType& nodeType, uint16_t index);
/**
* Node state updater is supposed to be set immediately, and never be
@@ -78,14 +83,14 @@ public:
* before set.
*/
void setNodeStateUpdater(NodeStateUpdater& updater);
- void setDocumentTypeRepo(DocumentTypeRepoSP);
+ void setDocumentTypeRepo(std::shared_ptr<const document::DocumentTypeRepo>);
void setLoadTypes(LoadTypeSetSP);
void setPriorityConfig(const PriorityConfig&);
void setBucketIdFactory(const document::BucketIdFactory&);
void setDistribution(DistributionSP);
StorageComponent(StorageComponentRegister&, vespalib::stringref name);
- virtual ~StorageComponent();
+ ~StorageComponent() override;
vespalib::string getClusterName() const { return _clusterName; }
const lib::NodeType& getNodeType() const { return *_nodeType; }
@@ -106,7 +111,8 @@ private:
vespalib::string _clusterName;
const lib::NodeType* _nodeType;
uint16_t _index;
- DocumentTypeRepoSP _docTypeRepo;
+ DocumentTypeRepoSP _repos;
+ // TODO: move loadTypes and _distribution in to _repos so lock will only taken once and only copying one shared_ptr.
LoadTypeSetSP _loadTypes;
std::unique_ptr<PriorityMapper> _priorityMapper;
document::BucketIdFactory _bucketIdFactory;
diff --git a/storage/src/vespa/storage/config/distributorconfiguration.cpp b/storage/src/vespa/storage/config/distributorconfiguration.cpp
index 0c9988421a3..aa606cdc8b9 100644
--- a/storage/src/vespa/storage/config/distributorconfiguration.cpp
+++ b/storage/src/vespa/storage/config/distributorconfiguration.cpp
@@ -70,7 +70,7 @@ DistributorConfiguration::containsTimeStatement(const std::string& documentSelec
{
TimeVisitor visitor;
try {
- document::select::Parser parser(*_component.getTypeRepo(), _component.getBucketIdFactory());
+ document::select::Parser parser(*_component.getTypeRepo()->documentTypeRepo, _component.getBucketIdFactory());
std::unique_ptr<document::select::Node> node = parser.parse(documentSelection);
node->visit(visitor);
diff --git a/storage/src/vespa/storage/distributor/distributor.cpp b/storage/src/vespa/storage/distributor/distributor.cpp
index c74d4135556..cfd8d7f1753 100644
--- a/storage/src/vespa/storage/distributor/distributor.cpp
+++ b/storage/src/vespa/storage/distributor/distributor.cpp
@@ -108,8 +108,7 @@ Distributor::Distributor(DistributorComponentRegister& compReg,
_must_send_updated_host_info(false)
{
_component.registerMetric(*_metrics);
- _component.registerMetricUpdateHook(_metricUpdateHook,
- framework::SecondTime(0));
+ _component.registerMetricUpdateHook(_metricUpdateHook, framework::SecondTime(0));
_distributorStatusDelegate.registerStatusPage();
_bucketDBStatusDelegate.registerStatusPage();
hostInfoReporterRegistrar.registerReporter(&_hostInfoReporter);
diff --git a/storage/src/vespa/storage/distributor/operations/external/removelocationoperation.cpp b/storage/src/vespa/storage/distributor/operations/external/removelocationoperation.cpp
index ca1b6f266d6..4c762cf4c23 100644
--- a/storage/src/vespa/storage/distributor/operations/external/removelocationoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/external/removelocationoperation.cpp
@@ -38,8 +38,7 @@ RemoveLocationOperation::getBucketId(
DistributorComponent& manager,
const api::RemoveLocationCommand& cmd, document::BucketId& bid)
{
- std::shared_ptr<const document::DocumentTypeRepo> repo = manager.getTypeRepo();
- document::select::Parser parser(*repo, manager.getBucketIdFactory());
+ document::select::Parser parser(*manager.getTypeRepo()->documentTypeRepo, manager.getBucketIdFactory());
document::BucketSelector bucketSel(manager.getBucketIdFactory());
std::unique_ptr<document::BucketSelector::BucketVector> exprResult
diff --git a/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.cpp b/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.cpp
index 41f452df801..3866ee4e6f7 100644
--- a/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.cpp
@@ -576,7 +576,7 @@ TwoPhaseUpdateOperation::processAndMatchTasCondition(DistributorMessageSender& s
return true; // No condition; nothing to do here.
}
- document::select::Parser parser(*_manager.getTypeRepo(), _manager.getBucketIdFactory());
+ document::select::Parser parser(*_manager.getTypeRepo()->documentTypeRepo, _manager.getBucketIdFactory());
std::unique_ptr<document::select::Node> selection;
try {
selection = parser.parse(_updateCmd->getCondition().getSelection());
diff --git a/storage/src/vespa/storage/persistence/mergehandler.cpp b/storage/src/vespa/storage/persistence/mergehandler.cpp
index b275150fe37..612d4545a8a 100644
--- a/storage/src/vespa/storage/persistence/mergehandler.cpp
+++ b/storage/src/vespa/storage/persistence/mergehandler.cpp
@@ -1,6 +1,5 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
#include "mergehandler.h"
#include <vespa/vespalib/stllike/asciistream.h>
#include <vespa/vdslib/distribution/distribution.h>
@@ -14,17 +13,14 @@ LOG_SETUP(".persistence.mergehandler");
namespace storage {
-MergeHandler::MergeHandler(spi::PersistenceProvider& spi,
- PersistenceUtil& env)
+MergeHandler::MergeHandler(spi::PersistenceProvider& spi, PersistenceUtil& env)
: _spi(spi),
_env(env),
_maxChunkSize(env._config.bucketMergeChunkSize)
{
}
-MergeHandler::MergeHandler(spi::PersistenceProvider& spi,
- PersistenceUtil& env,
- uint32_t maxChunkSize)
+MergeHandler::MergeHandler(spi::PersistenceProvider& spi, PersistenceUtil& env, uint32_t maxChunkSize)
: _spi(spi),
_env(env),
_maxChunkSize(maxChunkSize)
@@ -58,9 +54,7 @@ checkResult(const spi::Result& result,
}
void
-checkResult(const spi::Result& result,
- const spi::Bucket& bucket,
- const char* op)
+checkResult(const spi::Result& result, const spi::Bucket& bucket, const char* op)
{
if (result.hasError()) {
vespalib::asciistream ss;
@@ -142,8 +136,7 @@ MergeHandler::populateMetaData(
IteratorGuard iteratorGuard(_spi, iteratorId, context);
while (true) {
- spi::IterateResult result(
- _spi.iterate(iteratorId, UINT64_MAX, context));
+ spi::IterateResult result(_spi.iterate(iteratorId, UINT64_MAX, context));
if (result.getErrorCode() != spi::Result::ErrorType::NONE) {
std::ostringstream ss;
ss << "Failed to iterate for "
@@ -300,8 +293,7 @@ namespace {
}
int
- countUnfilledEntries(
- const std::vector<api::ApplyBucketDiffCommand::Entry>& diff)
+ countUnfilledEntries(const std::vector<api::ApplyBucketDiffCommand::Entry>& diff)
{
int count = 0;
@@ -323,11 +315,9 @@ namespace {
return value;
}
- api::StorageMessageAddress createAddress(const std::string& clusterName,
- uint16_t node)
+ api::StorageMessageAddress createAddress(const std::string& clusterName, uint16_t node)
{
- return api::StorageMessageAddress(
- clusterName, lib::NodeType::STORAGE, node);
+ return api::StorageMessageAddress(clusterName, lib::NodeType::STORAGE, node);
}
void assertContainedInBucket(const document::DocumentId& docId,
@@ -370,10 +360,8 @@ MergeHandler::fetchLocalData(
alreadyFilled += e._headerBlob.size() + e._bodyBlob.size();
}
}
- uint32_t remainingSize = _maxChunkSize - std::min(_maxChunkSize,
- alreadyFilled);
- LOG(debug, "Diff of %s has already filled %u of max %u bytes, "
- "remaining size to fill is %u",
+ uint32_t remainingSize = _maxChunkSize - std::min(_maxChunkSize, alreadyFilled);
+ LOG(debug, "Diff of %s has already filled %u of max %u bytes, remaining size to fill is %u",
bucket.toString().c_str(), alreadyFilled, _maxChunkSize, remainingSize);
if (remainingSize == 0) {
LOG(debug, "Diff already at max chunk size, not fetching any local data");
@@ -424,8 +412,7 @@ MergeHandler::fetchLocalData(
{
remainingSize -= list[i]->getSize();
LOG(spam, "Added %s, remainingSize is %u",
- entries.back()->toString().c_str(),
- remainingSize);
+ entries.back()->toString().c_str(), remainingSize);
entries.push_back(std::move(list[i]));
} else {
LOG(spam, "Adding %s would exceed chunk size limit of %u; "
@@ -451,8 +438,7 @@ MergeHandler::fetchLocalData(
docEntry.toString().c_str());
std::vector<api::ApplyBucketDiffCommand::Entry>::iterator iter(
- std::lower_bound(diff.begin(),
- diff.end(),
+ std::lower_bound(diff.begin(), diff.end(),
api::Timestamp(docEntry.getTimestamp()),
DiffEntryTimestampPredicate()));
assert(iter != diff.end());
@@ -564,8 +550,8 @@ MergeHandler::applyDiffLocally(
std::vector<spi::DocEntry::UP> entries;
populateMetaData(bucket, MAX_TIMESTAMP, entries, context);
- std::shared_ptr<const document::DocumentTypeRepo> repo(_env._component.getTypeRepo());
- assert(repo.get() != nullptr);
+ std::shared_ptr<const document::DocumentTypeRepo> repo(_env._component.getTypeRepo()->documentTypeRepo);
+ assert(repo);
uint32_t existingCount = entries.size();
uint32_t i = 0, j = 0;
@@ -725,8 +711,7 @@ MergeHandler::processBucketMerge(const spi::Bucket& bucket, MergeStatus& status,
// If nothing to update, we're done.
if (status.diff.size() == 0) {
- LOG(debug, "Done with merge of %s. No more entries in diff.",
- bucket.toString().c_str());
+ LOG(debug, "Done with merge of %s. No more entries in diff.", bucket.toString().c_str());
return status.reply;
}
@@ -753,10 +738,8 @@ MergeHandler::processBucketMerge(const spi::Bucket& bucket, MergeStatus& status,
? std::numeric_limits<uint32_t>().max()
: _maxChunkSize);
- cmd.reset(new api::ApplyBucketDiffCommand(
- bucket.getBucket(), nodes, maxSize));
- cmd->setAddress(createAddress(_env._component.getClusterName(),
- nodes[1].index));
+ cmd = std::make_shared<api::ApplyBucketDiffCommand>(bucket.getBucket(), nodes, maxSize);
+ cmd->setAddress(createAddress(_env._component.getClusterName(), nodes[1].index));
findCandidates(bucket.getBucketId(),
status,
true,
@@ -796,8 +779,7 @@ MergeHandler::processBucketMerge(const spi::Bucket& bucket, MergeStatus& status,
for (std::map<uint16_t, uint32_t>::const_iterator it = counts.begin();
it != counts.end(); ++it)
{
- if (it->second >= uint32_t(
- _env._config.commonMergeChainOptimalizationMinimumSize)
+ if (it->second >= uint32_t(_env._config.commonMergeChainOptimalizationMinimumSize)
|| counts.size() == 1)
{
LOG(spam, "Sending separate apply bucket diff for path %x "
@@ -830,15 +812,11 @@ MergeHandler::processBucketMerge(const spi::Bucket& bucket, MergeStatus& status,
(_env._config.enableMergeLocalNodeChooseDocsOptimalization
? std::numeric_limits<uint32_t>().max()
: _maxChunkSize);
- cmd.reset(new api::ApplyBucketDiffCommand(
- bucket.getBucket(), nodes, maxSize));
- cmd->setAddress(
- createAddress(_env._component.getClusterName(),
- nodes[1].index));
+ cmd = std::make_shared<api::ApplyBucketDiffCommand>(bucket.getBucket(), nodes, maxSize);
+ cmd->setAddress(createAddress(_env._component.getClusterName(), nodes[1].index));
// Add all the metadata, and thus use big limit. Max
// data to fetch parameter will control amount added.
- findCandidates(bucket.getBucketId(), status, true,
- it->first, newMask, maxSize, *cmd);
+ findCandidates(bucket.getBucketId(), status, true, it->first, newMask, maxSize, *cmd);
break;
}
}
@@ -846,22 +824,17 @@ MergeHandler::processBucketMerge(const spi::Bucket& bucket, MergeStatus& status,
// If we found no group big enough to handle on its own, do a common
// merge to merge the remaining data.
- if (cmd.get() == 0) {
- cmd.reset(new api::ApplyBucketDiffCommand(bucket.getBucket(),
- status.nodeList,
- _maxChunkSize));
- cmd->setAddress(createAddress(_env._component.getClusterName(),
- status.nodeList[1].index));
- findCandidates(bucket.getBucketId(), status, false, 0, 0,
- _maxChunkSize, *cmd);
+ if ( ! cmd ) {
+ cmd = std::make_shared<api::ApplyBucketDiffCommand>(bucket.getBucket(), status.nodeList, _maxChunkSize);
+ cmd->setAddress(createAddress(_env._component.getClusterName(), status.nodeList[1].index));
+ findCandidates(bucket.getBucketId(), status, false, 0, 0, _maxChunkSize, *cmd);
}
cmd->setPriority(status.context.getPriority());
cmd->setTimeout(status.timeout);
if (applyDiffNeedLocalData(cmd->getDiff(), 0, true)) {
framework::MilliSecTimer startTime(_env._component.getClock());
fetchLocalData(bucket, cmd->getLoadType(), cmd->getDiff(), 0, context);
- _env._metrics.merge_handler_metrics.mergeDataReadLatency.addValue(
- startTime.getElapsedTimeAsDouble());
+ _env._metrics.merge_handler_metrics.mergeDataReadLatency.addValue(startTime.getElapsedTimeAsDouble());
}
status.pendingId = cmd->getMsgId();
LOG(debug, "Sending %s", cmd->toString().c_str());
@@ -876,8 +849,7 @@ public:
document::Bucket _bucket;
bool _active;
- MergeStateDeleter(FileStorHandler& handler,
- const document::Bucket& bucket)
+ MergeStateDeleter(FileStorHandler& handler, const document::Bucket& bucket)
: _handler(handler),
_bucket(bucket),
_active(true)
@@ -904,8 +876,7 @@ MergeHandler::handleMergeBucket(api::MergeBucketCommand& cmd, MessageTracker::UP
if (cmd.getNodes().size() < 2) {
LOG(debug, "Attempt to merge a single instance of a bucket");
- tracker->fail(ReturnCode::ILLEGAL_PARAMETERS,
- "Cannot merge a single copy");
+ tracker->fail(ReturnCode::ILLEGAL_PARAMETERS, "Cannot merge a single copy");
return tracker;
}
@@ -952,8 +923,7 @@ MergeHandler::handleMergeBucket(api::MergeBucketCommand& cmd, MessageTracker::UP
auto cmd2 = std::make_shared<api::GetBucketDiffCommand>(bucket.getBucket(), s->nodeList, s->maxTimestamp.getTime());
if (!buildBucketInfoList(bucket, cmd.getLoadType(), s->maxTimestamp, 0, cmd2->getDiff(), tracker->context())) {
LOG(debug, "Bucket non-existing in db. Failing merge.");
- tracker->fail(ReturnCode::BUCKET_DELETED,
- "Bucket not found in buildBucketInfo step");
+ tracker->fail(ReturnCode::BUCKET_DELETED, "Bucket not found in buildBucketInfo step");
return tracker;
}
_env._metrics.merge_handler_metrics.mergeMetadataReadLatency.addValue(s->startTime.getElapsedTimeAsDouble());
@@ -1114,8 +1084,7 @@ MergeHandler::handleGetBucketDiff(api::GetBucketDiffCommand& cmd, MessageTracker
checkResult(_spi.createBucket(bucket, tracker->context()), bucket, "create bucket");
if (_env._fileStorHandler.isMerging(bucket.getBucket())) {
- tracker->fail(ReturnCode::BUSY,
- "A merge is already running on this bucket.");
+ tracker->fail(ReturnCode::BUSY, "A merge is already running on this bucket.");
return tracker;
}
uint8_t index = findOwnIndex(cmd.getNodes(), _env._nodeIndex);
@@ -1128,16 +1097,13 @@ MergeHandler::handleGetBucketDiff(api::GetBucketDiffCommand& cmd, MessageTracker
index, local, tracker->context()))
{
LOG(debug, "Bucket non-existing in db. Failing merge.");
- tracker->fail(ReturnCode::BUCKET_DELETED,
- "Bucket not found in buildBucketInfo step");
+ tracker->fail(ReturnCode::BUCKET_DELETED, "Bucket not found in buildBucketInfo step");
return tracker;
}
if (!mergeLists(remote, local, local)) {
- LOG(error, "Diffing %s found suspect entries.",
- bucket.toString().c_str());
+ LOG(error, "Diffing %s found suspect entries.", bucket.toString().c_str());
}
- _env._metrics.merge_handler_metrics.mergeMetadataReadLatency.addValue(
- startTime.getElapsedTimeAsDouble());
+ _env._metrics.merge_handler_metrics.mergeMetadataReadLatency.addValue(startTime.getElapsedTimeAsDouble());
// If last node in merge chain, we can send reply straight away
if (index + 1u >= cmd.getNodes().size()) {
@@ -1214,24 +1180,21 @@ namespace {
bool operator()(const api::ApplyBucketDiffCommand::Entry& x,
const api::ApplyBucketDiffCommand::Entry& y)
{
- return (x._entry._timestamp
- < y._entry._timestamp);
+ return (x._entry._timestamp < y._entry._timestamp);
}
};
} // End of anonymous namespace
void
-MergeHandler::handleGetBucketDiffReply(api::GetBucketDiffReply& reply,
- MessageSender& sender)
+MergeHandler::handleGetBucketDiffReply(api::GetBucketDiffReply& reply, MessageSender& sender)
{
_env._metrics.getBucketDiffReply.inc();
spi::Bucket bucket(reply.getBucket(), spi::PartitionId(_env._partition));
LOG(debug, "GetBucketDiffReply(%s)", bucket.toString().c_str());
if (!_env._fileStorHandler.isMerging(bucket.getBucket())) {
- LOG(warning, "Got GetBucketDiffReply for %s which we have no "
- "merge state for.",
+ LOG(warning, "Got GetBucketDiffReply for %s which we have no merge state for.",
bucket.toString().c_str());
return;
}
@@ -1385,8 +1348,7 @@ MergeHandler::handleApplyBucketDiff(api::ApplyBucketDiffCommand& cmd, MessageTra
}
void
-MergeHandler::handleApplyBucketDiffReply(api::ApplyBucketDiffReply& reply,
- MessageSender& sender)
+MergeHandler::handleApplyBucketDiffReply(api::ApplyBucketDiffReply& reply,MessageSender& sender)
{
_env._metrics.applyBucketDiffReply.inc();
spi::Bucket bucket(reply.getBucket(), spi::PartitionId(_env._partition));
@@ -1394,8 +1356,7 @@ MergeHandler::handleApplyBucketDiffReply(api::ApplyBucketDiffReply& reply,
LOG(debug, "%s", reply.toString().c_str());
if (!_env._fileStorHandler.isMerging(bucket.getBucket())) {
- LOG(warning, "Got ApplyBucketDiffReply for %s which we have no "
- "merge state for.",
+ LOG(warning, "Got ApplyBucketDiffReply for %s which we have no merge state for.",
bucket.toString().c_str());
return;
}
@@ -1413,25 +1374,19 @@ MergeHandler::handleApplyBucketDiffReply(api::ApplyBucketDiffReply& reply,
api::ReturnCode returnCode = reply.getResult();
try {
if (reply.getResult().failed()) {
- LOG(debug, "Got failed apply bucket diff reply %s",
- reply.toString().c_str());
+ LOG(debug, "Got failed apply bucket diff reply %s", reply.toString().c_str());
} else {
assert(reply.getNodes().size() >= 2);
uint8_t index = findOwnIndex(reply.getNodes(), _env._nodeIndex);
if (applyDiffNeedLocalData(diff, index, false)) {
framework::MilliSecTimer startTime(_env._component.getClock());
- fetchLocalData(bucket, reply.getLoadType(), diff, index,
- s.context);
- _env._metrics.merge_handler_metrics.mergeDataReadLatency.addValue(
- startTime.getElapsedTimeAsDouble());
+ fetchLocalData(bucket, reply.getLoadType(), diff, index, s.context);
+ _env._metrics.merge_handler_metrics.mergeDataReadLatency.addValue(startTime.getElapsedTimeAsDouble());
}
if (applyDiffHasLocallyNeededData(diff, index)) {
framework::MilliSecTimer startTime(_env._component.getClock());
- api::BucketInfo info(
- applyDiffLocally(bucket, reply.getLoadType(), diff,
- index, s.context));
- _env._metrics.merge_handler_metrics.mergeDataWriteLatency.addValue(
- startTime.getElapsedTimeAsDouble());
+ api::BucketInfo info(applyDiffLocally(bucket, reply.getLoadType(), diff, index, s.context));
+ _env._metrics.merge_handler_metrics.mergeDataWriteLatency.addValue(startTime.getElapsedTimeAsDouble());
} else {
LOG(spam, "Merge(%s): Didn't need fetched data on node %u (%u)",
bucket.toString().c_str(),
@@ -1462,8 +1417,7 @@ MergeHandler::handleApplyBucketDiffReply(api::ApplyBucketDiffReply& reply,
"Got reply indicating merge cycle did not fix any entries: %s",
reply.toString(true).c_str());
LOG(warning,
- "Merge state for which there was no progress across a "
- "full merge cycle: %s",
+ "Merge state for which there was no progress across a full merge cycle: %s",
s.toString().c_str());
}
@@ -1477,8 +1431,7 @@ MergeHandler::handleApplyBucketDiffReply(api::ApplyBucketDiffReply& reply,
// We have sent something on and shouldn't reply now.
clearState = false;
} else {
- _env._metrics.merge_handler_metrics.mergeLatencyTotal.addValue(
- s.startTime.getElapsedTimeAsDouble());
+ _env._metrics.merge_handler_metrics.mergeLatencyTotal.addValue(s.startTime.getElapsedTimeAsDouble());
}
}
} else {
@@ -1490,8 +1443,7 @@ MergeHandler::handleApplyBucketDiffReply(api::ApplyBucketDiffReply& reply,
} catch (std::exception& e) {
_env._fileStorHandler.clearMergeStatus(
bucket.getBucket(),
- api::ReturnCode(api::ReturnCode::INTERNAL_FAILURE,
- e.what()));
+ api::ReturnCode(api::ReturnCode::INTERNAL_FAILURE, e.what()));
throw;
}
diff --git a/storage/src/vespa/storage/persistence/persistencethread.cpp b/storage/src/vespa/storage/persistence/persistencethread.cpp
index a9b13bab0f6..2cdb6194b6d 100644
--- a/storage/src/vespa/storage/persistence/persistencethread.cpp
+++ b/storage/src/vespa/storage/persistence/persistencethread.cpp
@@ -286,14 +286,14 @@ PersistenceThread::handleGet(api::GetCommand& cmd, MessageTracker::UP tracker)
tracker->setMetric(metrics);
metrics.request_size.addValue(cmd.getApproxByteSize());
- document::FieldSet::UP fieldSet = document::FieldSetRepo::parse(*_env._component.getTypeRepo(), cmd.getFieldSet());
+ auto fieldSet = _env._component.getTypeRepo()->fieldSetRepo->getFieldSet(cmd.getFieldSet());
tracker->context().setReadConsistency(api_read_consistency_to_spi(cmd.internal_read_consistency()));
spi::GetResult result =
_spi.get(getBucket(cmd.getDocumentId(), cmd.getBucket()), *fieldSet, cmd.getDocumentId(), tracker->context());
if (tracker->checkForError(result)) {
if (!result.hasDocument() && (document::FieldSet::Type::NONE != fieldSet->getType())) {
- _env._metrics.get[cmd.getLoadType()].notFound.inc();
+ metrics.notFound.inc();
}
tracker->setReply(std::make_shared<api::GetReply>(cmd, result.getDocumentPtr(), result.getTimestamp(),
false, result.is_tombstone()));
@@ -455,7 +455,7 @@ MessageTracker::UP
PersistenceThread::handleCreateIterator(CreateIteratorCommand& cmd, MessageTracker::UP tracker)
{
tracker->setMetric(_env._metrics.createIterator);
- document::FieldSet::SP fieldSet = document::FieldSetRepo::parse(*_env._component.getTypeRepo(), cmd.getFields());
+ document::FieldSet::SP fieldSet = _env._component.getTypeRepo()->fieldSetRepo->getFieldSet(cmd.getFields());
tracker->context().setReadConsistency(cmd.getReadConsistency());
spi::CreateIteratorResult result(_spi.createIterator(
spi::Bucket(cmd.getBucket(), spi::PartitionId(_env._partition)),
diff --git a/storage/src/vespa/storage/persistence/persistenceutil.cpp b/storage/src/vespa/storage/persistence/persistenceutil.cpp
index 6605e3f6363..63ac5405fab 100644
--- a/storage/src/vespa/storage/persistence/persistenceutil.cpp
+++ b/storage/src/vespa/storage/persistence/persistenceutil.cpp
@@ -162,7 +162,7 @@ PersistenceUtil::PersistenceUtil(
_nodeIndex(_component.getIndex()),
_metrics(metrics),
_bucketFactory(_component.getBucketIdFactory()),
- _repo(_component.getTypeRepo()),
+ _repo(_component.getTypeRepo()->documentTypeRepo),
_spi(provider)
{
}
diff --git a/storage/src/vespa/storage/persistence/testandsethelper.cpp b/storage/src/vespa/storage/persistence/testandsethelper.cpp
index ed396cd522e..1dbcbcc3fc6 100644
--- a/storage/src/vespa/storage/persistence/testandsethelper.cpp
+++ b/storage/src/vespa/storage/persistence/testandsethelper.cpp
@@ -1,8 +1,8 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
// @author Vegard Sjonfjell
-#include <vespa/storage/persistence/fieldvisitor.h>
-#include <vespa/storage/persistence/testandsethelper.h>
+#include "fieldvisitor.h"
+#include "testandsethelper.h"
#include <vespa/document/select/parser.h>
#include <vespa/document/repo/documenttyperepo.h>
#include <vespa/vespalib/util/stringfmt.h>
@@ -11,19 +11,19 @@ using namespace std::string_literals;
namespace storage {
-void TestAndSetHelper::getDocumentType() {
+void TestAndSetHelper::getDocumentType(const document::DocumentTypeRepo & documentTypeRepo) {
if (!_docId.hasDocType()) {
throw TestAndSetException(api::ReturnCode(api::ReturnCode::ILLEGAL_PARAMETERS, "Document id has no doctype"));
}
- _docTypePtr = _component.getTypeRepo()->getDocumentType(_docId.getDocType());
+ _docTypePtr = documentTypeRepo.getDocumentType(_docId.getDocType());
if (_docTypePtr == nullptr) {
throw TestAndSetException(api::ReturnCode(api::ReturnCode::ILLEGAL_PARAMETERS, "Document type does not exist"));
}
}
-void TestAndSetHelper::parseDocumentSelection() {
- document::select::Parser parser(*_component.getTypeRepo(), _component.getBucketIdFactory());
+void TestAndSetHelper::parseDocumentSelection(const document::DocumentTypeRepo & documentTypeRepo) {
+ document::select::Parser parser(documentTypeRepo, _component.getBucketIdFactory());
try {
_docSelectionUp = parser.parse(_cmd.getCondition().getSelection());
@@ -49,8 +49,9 @@ TestAndSetHelper::TestAndSetHelper(PersistenceThread & thread, const api::TestAn
_docTypePtr(nullptr),
_missingDocumentImpliesMatch(missingDocumentImpliesMatch)
{
- getDocumentType();
- parseDocumentSelection();
+ auto docTypeRepo = _component.getTypeRepo()->documentTypeRepo;
+ getDocumentType(*docTypeRepo);
+ parseDocumentSelection(*docTypeRepo);
}
TestAndSetHelper::~TestAndSetHelper() = default;
diff --git a/storage/src/vespa/storage/persistence/testandsethelper.h b/storage/src/vespa/storage/persistence/testandsethelper.h
index b5fa29d0106..b528b5034f9 100644
--- a/storage/src/vespa/storage/persistence/testandsethelper.h
+++ b/storage/src/vespa/storage/persistence/testandsethelper.h
@@ -28,8 +28,8 @@ class TestAndSetHelper {
std::unique_ptr<document::select::Node> _docSelectionUp;
bool _missingDocumentImpliesMatch;
- void getDocumentType();
- void parseDocumentSelection();
+ void getDocumentType(const document::DocumentTypeRepo & documentTypeRepo);
+ void parseDocumentSelection(const document::DocumentTypeRepo & documentTypeRepo);
spi::GetResult retrieveDocument(const document::FieldSet & fieldSet, spi::Context & context);
public:
diff --git a/storage/src/vespa/storage/storageserver/communicationmanager.cpp b/storage/src/vespa/storage/storageserver/communicationmanager.cpp
index c0adb01ad47..b51394e2e64 100644
--- a/storage/src/vespa/storage/storageserver/communicationmanager.cpp
+++ b/storage/src/vespa/storage/storageserver/communicationmanager.cpp
@@ -395,10 +395,12 @@ void CommunicationManager::configure(std::unique_ptr<CommunicationManagerConfig>
// Configure messagebus here as we for legacy reasons have
// config here.
+ auto documentTypeRepo = _component.getTypeRepo()->documentTypeRepo;
+ auto loadTypes = _component.getLoadTypes();
_mbus = std::make_unique<mbus::RPCMessageBus>(
mbus::ProtocolSet()
- .add(std::make_shared<documentapi::DocumentProtocol>(*_component.getLoadTypes(), _component.getTypeRepo()))
- .add(std::make_shared<mbusprot::StorageProtocol>(_component.getTypeRepo(), *_component.getLoadTypes())),
+ .add(std::make_shared<documentapi::DocumentProtocol>(*loadTypes, documentTypeRepo))
+ .add(std::make_shared<mbusprot::StorageProtocol>(documentTypeRepo, *loadTypes)),
params,
_configUri);
diff --git a/storage/src/vespa/storage/visiting/visitorthread.cpp b/storage/src/vespa/storage/visiting/visitorthread.cpp
index c6e75735690..73f4a70d80d 100644
--- a/storage/src/vespa/storage/visiting/visitorthread.cpp
+++ b/storage/src/vespa/storage/visiting/visitorthread.cpp
@@ -31,7 +31,7 @@ VisitorThread::Event::Event(Event&& other)
{
}
-VisitorThread::Event::~Event() {}
+VisitorThread::Event::~Event() = default;
VisitorThread::Event&
VisitorThread::Event::operator= (Event&& other)
@@ -44,9 +44,7 @@ VisitorThread::Event::operator= (Event&& other)
return *this;
}
-VisitorThread::Event::Event(
- api::VisitorId visitor,
- const std::shared_ptr<api::StorageMessage>& msg)
+VisitorThread::Event::Event(api::VisitorId visitor, const std::shared_ptr<api::StorageMessage>& msg)
: _visitorId(visitor),
_message(msg),
_timer(),
@@ -54,9 +52,7 @@ VisitorThread::Event::Event(
{
}
-VisitorThread::Event::Event(
- api::VisitorId visitor,
- mbus::Reply::UP reply)
+VisitorThread::Event::Event(api::VisitorId visitor, mbus::Reply::UP reply)
: _visitorId(visitor),
_mbusReply(std::move(reply)),
_timer(),
@@ -331,7 +327,7 @@ VisitorThread::handleNonExistingVisitorCall(const Event& entry,
ReturnCode& code)
{
// Get current time. Set the time that is the oldest still recent.
- framework::SecondTime currentTime(_component.getClock().getTimeInSeconds());;
+ framework::SecondTime currentTime(_component.getClock().getTimeInSeconds());
trimRecentlyCompletedList(currentTime);
// Go through all recent visitors. Ignore request if recent
@@ -435,8 +431,7 @@ VisitorThread::onCreateVisitor(
do {
// If no buckets are specified, fail command
if (cmd->getBuckets().empty()) {
- result = ReturnCode(ReturnCode::ILLEGAL_PARAMETERS,
- "No buckets specified");
+ result = ReturnCode(ReturnCode::ILLEGAL_PARAMETERS, "No buckets specified");
LOG(warning, "CreateVisitor(%s): No buckets specified. Aborting.",
cmd->getInstanceId().c_str());
break;
@@ -480,7 +475,7 @@ VisitorThread::onCreateVisitor(
// Parse document selection
try{
if (!cmd->getDocumentSelection().empty()) {
- std::shared_ptr<const document::DocumentTypeRepo> repo(_component.getTypeRepo());
+ std::shared_ptr<const document::DocumentTypeRepo> repo(_component.getTypeRepo()->documentTypeRepo);
const document::BucketIdFactory& idFactory(_component.getBucketIdFactory());
document::select::Parser parser(*repo, idFactory);
docSelection = parser.parse(cmd->getDocumentSelection());
diff --git a/streamingvisitors/src/tests/searchvisitor/searchvisitor_test.cpp b/streamingvisitors/src/tests/searchvisitor/searchvisitor_test.cpp
index 18cd7fab2b8..216e02c5edd 100644
--- a/streamingvisitors/src/tests/searchvisitor/searchvisitor_test.cpp
+++ b/streamingvisitors/src/tests/searchvisitor/searchvisitor_test.cpp
@@ -35,7 +35,7 @@ private:
public:
SearchVisitorTest();
- ~SearchVisitorTest();
+ ~SearchVisitorTest() override;
int Main() override;
};
@@ -46,9 +46,9 @@ SearchVisitorTest::SearchVisitorTest() :
{
_componentRegister.setNodeInfo("mycluster", lib::NodeType::STORAGE, 1);
_componentRegister.setClock(_clock);
- StorageComponent::DocumentTypeRepoSP repo(new DocumentTypeRepo(readDocumenttypesConfig(TEST_PATH("cfg/documenttypes.cfg"))));
+ auto repo = std::make_shared<DocumentTypeRepo>(readDocumenttypesConfig(TEST_PATH("cfg/documenttypes.cfg")));
_componentRegister.setDocumentTypeRepo(repo);
- _component.reset(new StorageComponent(_componentRegister, "storage"));
+ _component = std::make_unique<StorageComponent>(_componentRegister, "storage");
}
SearchVisitorTest::~SearchVisitorTest() = default;
@@ -80,8 +80,8 @@ SearchVisitorTest::testCreateSearchVisitor(const vespalib::string & dir, const v
void
SearchVisitorTest::testSearchEnvironment()
{
- EXPECT_TRUE(_env.getVSMAdapter("simple") != NULL);
- EXPECT_TRUE(_env.getRankManager("simple") != NULL);
+ EXPECT_TRUE(_env.getVSMAdapter("simple") != nullptr);
+ EXPECT_TRUE(_env.getRankManager("simple") != nullptr);
}
void
diff --git a/streamingvisitors/src/vespa/searchvisitor/searchvisitor.cpp b/streamingvisitors/src/vespa/searchvisitor/searchvisitor.cpp
index 76ef0f23dd2..cc28c76acce 100644
--- a/streamingvisitors/src/vespa/searchvisitor/searchvisitor.cpp
+++ b/streamingvisitors/src/vespa/searchvisitor/searchvisitor.cpp
@@ -677,7 +677,7 @@ SearchVisitor::setupScratchDocument(const StringFieldIdTMap & fieldsInQuery)
}
// Init based on default document type and mapping from field name to field id
_docTypeMapping.init(_fieldSearchSpecMap.documentTypeMap().begin()->first,
- _fieldsUnion, *_component.getTypeRepo());
+ _fieldsUnion, *_component.getTypeRepo()->documentTypeRepo);
_docTypeMapping.prepareBaseDoc(_fieldPathMap);
}