summaryrefslogtreecommitdiffstats
path: root/storage
diff options
context:
space:
mode:
authorHenning Baldersheim <balder@yahoo-inc.com>2020-08-18 22:28:23 +0200
committerGitHub <noreply@github.com>2020-08-18 22:28:23 +0200
commit2f0ccbe4e07afee74513e9e89a2a037db47fd542 (patch)
treea6eb15dc7d566151325d54ebb0222f1d8df8c22e /storage
parent4a229ee61e8409a73722eb2820b5ed8a3e6b8f21 (diff)
parent623648364a8e32ff0ec811f2a8b0179bd13d5956 (diff)
Merge pull request #14051 from vespa-engine/balder/use-an-actual-fieldset-repo
Balder/use an actual fieldset repo
Diffstat (limited to 'storage')
-rw-r--r--storage/src/tests/common/teststorageapp.cpp8
-rw-r--r--storage/src/tests/common/teststorageapp.h5
-rw-r--r--storage/src/tests/persistence/common/persistenceproviderwrapper.cpp6
-rw-r--r--storage/src/tests/persistence/common/persistenceproviderwrapper.h5
-rw-r--r--storage/src/tests/persistence/filestorage/mergeblockingtest.cpp4
-rw-r--r--storage/src/tests/persistence/filestorage/operationabortingtest.cpp15
-rw-r--r--storage/src/tests/persistence/persistencetestutils.cpp17
-rw-r--r--storage/src/tests/persistence/persistencetestutils.h2
-rw-r--r--storage/src/vespa/storage/common/storagecomponent.cpp24
-rw-r--r--storage/src/vespa/storage/common/storagecomponent.h21
-rw-r--r--storage/src/vespa/storage/config/distributorconfiguration.cpp2
-rw-r--r--storage/src/vespa/storage/distributor/distributor.cpp3
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/removelocationoperation.cpp3
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.cpp2
-rw-r--r--storage/src/vespa/storage/persistence/bucketprocessor.cpp10
-rw-r--r--storage/src/vespa/storage/persistence/mergehandler.cpp156
-rw-r--r--storage/src/vespa/storage/persistence/persistencethread.cpp10
-rw-r--r--storage/src/vespa/storage/persistence/persistenceutil.cpp2
-rw-r--r--storage/src/vespa/storage/persistence/provider_error_wrapper.cpp4
-rw-r--r--storage/src/vespa/storage/persistence/provider_error_wrapper.h5
-rw-r--r--storage/src/vespa/storage/persistence/testandsethelper.cpp17
-rw-r--r--storage/src/vespa/storage/persistence/testandsethelper.h4
-rw-r--r--storage/src/vespa/storage/storageserver/communicationmanager.cpp6
-rw-r--r--storage/src/vespa/storage/visiting/visitorthread.cpp17
24 files changed, 151 insertions, 197 deletions
diff --git a/storage/src/tests/common/teststorageapp.cpp b/storage/src/tests/common/teststorageapp.cpp
index 9fcf1049e1b..1847de0e84f 100644
--- a/storage/src/tests/common/teststorageapp.cpp
+++ b/storage/src/tests/common/teststorageapp.cpp
@@ -171,19 +171,17 @@ TestServiceLayerApp::TestServiceLayerApp(DiskCount dc, NodeIndex index,
assert(dc > 0);
}
-TestServiceLayerApp::~TestServiceLayerApp() {}
+TestServiceLayerApp::~TestServiceLayerApp() = default;
void
TestServiceLayerApp::setupDummyPersistence()
{
- spi::PersistenceProvider::UP provider(new spi::dummy::DummyPersistence(
- getTypeRepo(), _compReg.getDiskCount()));
+ auto provider = std::make_unique<spi::dummy::DummyPersistence>(getTypeRepo(), _compReg.getDiskCount());
setPersistenceProvider(std::move(provider));
}
void
-TestServiceLayerApp::setPersistenceProvider(
- spi::PersistenceProvider::UP provider)
+TestServiceLayerApp::setPersistenceProvider(PersistenceProviderUP provider)
{
_partitions = provider->getPartitionStates().getList();
assert(spi::PartitionId(_compReg.getDiskCount()) == _partitions.size());
diff --git a/storage/src/tests/common/teststorageapp.h b/storage/src/tests/common/teststorageapp.h
index e567206c371..218e7352f04 100644
--- a/storage/src/tests/common/teststorageapp.h
+++ b/storage/src/tests/common/teststorageapp.h
@@ -107,8 +107,9 @@ private:
class TestServiceLayerApp : public TestStorageApp
{
+ using PersistenceProviderUP = std::unique_ptr<spi::PersistenceProvider>;
ServiceLayerComponentRegisterImpl& _compReg;
- spi::PersistenceProvider::UP _persistenceProvider;
+ PersistenceProviderUP _persistenceProvider;
spi::PartitionStateList _partitions;
public:
@@ -118,7 +119,7 @@ public:
~TestServiceLayerApp();
void setupDummyPersistence();
- void setPersistenceProvider(spi::PersistenceProvider::UP);
+ void setPersistenceProvider(PersistenceProviderUP);
ServiceLayerComponentRegisterImpl& getComponentRegister() { return _compReg; }
diff --git a/storage/src/tests/persistence/common/persistenceproviderwrapper.cpp b/storage/src/tests/persistence/common/persistenceproviderwrapper.cpp
index dd9ce6e6cba..67a1c41a9ef 100644
--- a/storage/src/tests/persistence/common/persistenceproviderwrapper.cpp
+++ b/storage/src/tests/persistence/common/persistenceproviderwrapper.cpp
@@ -147,11 +147,9 @@ PersistenceProviderWrapper::get(const spi::Bucket& bucket,
}
spi::CreateIteratorResult
-PersistenceProviderWrapper::createIterator(const spi::Bucket& bucket,
- const document::FieldSet& fields,
- const spi::Selection& sel,
+PersistenceProviderWrapper::createIterator(const spi::Bucket &bucket, FieldSetSP fields, const spi::Selection &sel,
spi::IncludedVersions versions,
- spi::Context& context)
+ spi::Context &context)
{
// TODO: proper printing of FieldSet and Selection
diff --git a/storage/src/tests/persistence/common/persistenceproviderwrapper.h b/storage/src/tests/persistence/common/persistenceproviderwrapper.h
index 21e5d8016aa..75712750d68 100644
--- a/storage/src/tests/persistence/common/persistenceproviderwrapper.h
+++ b/storage/src/tests/persistence/common/persistenceproviderwrapper.h
@@ -100,8 +100,9 @@ public:
spi::UpdateResult update(const spi::Bucket&, spi::Timestamp, spi::DocumentUpdateSP, spi::Context&) override;
spi::GetResult get(const spi::Bucket&, const document::FieldSet&, const spi::DocumentId&, spi::Context&) const override;
- spi::CreateIteratorResult createIterator(const spi::Bucket&, const document::FieldSet&, const spi::Selection&,
- spi::IncludedVersions versions, spi::Context&) override;
+ spi::CreateIteratorResult
+ createIterator(const spi::Bucket &bucket, FieldSetSP, const spi::Selection &, spi::IncludedVersions versions,
+ spi::Context &context) override;
spi::IterateResult iterate(spi::IteratorId, uint64_t maxByteSize, spi::Context&) const override;
spi::Result destroyIterator(spi::IteratorId, spi::Context&) override;
diff --git a/storage/src/tests/persistence/filestorage/mergeblockingtest.cpp b/storage/src/tests/persistence/filestorage/mergeblockingtest.cpp
index d9582cec585..c73ae7e506c 100644
--- a/storage/src/tests/persistence/filestorage/mergeblockingtest.cpp
+++ b/storage/src/tests/persistence/filestorage/mergeblockingtest.cpp
@@ -15,9 +15,7 @@ namespace storage {
struct MergeBlockingTest : public FileStorTestFixture {
void setupDisks() {
FileStorTestFixture::setupPersistenceThreads(1);
- _node->setPersistenceProvider(
- spi::PersistenceProvider::UP(
- new spi::dummy::DummyPersistence(_node->getTypeRepo(), 1)));
+ _node->setPersistenceProvider(std::make_unique<spi::dummy::DummyPersistence>(_node->getTypeRepo(), 1));
}
void SetUp() override;
diff --git a/storage/src/tests/persistence/filestorage/operationabortingtest.cpp b/storage/src/tests/persistence/filestorage/operationabortingtest.cpp
index 93c484368de..7810a595012 100644
--- a/storage/src/tests/persistence/filestorage/operationabortingtest.cpp
+++ b/storage/src/tests/persistence/filestorage/operationabortingtest.cpp
@@ -77,18 +77,19 @@ spi::LoadType defaultLoadType(0, "default");
}
struct OperationAbortingTest : FileStorTestFixture {
- spi::PersistenceProvider::UP _dummyProvider;
- BlockingMockProvider* _blockingProvider;
+ std::unique_ptr<spi::dummy::DummyPersistence> _dummyProvider;
+ BlockingMockProvider * _blockingProvider;
std::unique_ptr<vespalib::Barrier> _queueBarrier;
std::unique_ptr<vespalib::Barrier> _completionBarrier;
void setupProviderAndBarriers(uint32_t queueBarrierThreads) {
FileStorTestFixture::setupPersistenceThreads(1);
- _dummyProvider.reset(new spi::dummy::DummyPersistence(_node->getTypeRepo(), 1));
- _queueBarrier.reset(new vespalib::Barrier(queueBarrierThreads));
- _completionBarrier.reset(new vespalib::Barrier(2));
- _blockingProvider = new BlockingMockProvider(*_dummyProvider, *_queueBarrier, *_completionBarrier);
- _node->setPersistenceProvider(spi::PersistenceProvider::UP(_blockingProvider));
+ _dummyProvider = std::make_unique<spi::dummy::DummyPersistence>(_node->getTypeRepo(), 1);
+ _queueBarrier = std::make_unique<vespalib::Barrier>(queueBarrierThreads);
+ _completionBarrier = std::make_unique<vespalib::Barrier>(2);
+ auto blockingProvider = std::make_unique<BlockingMockProvider>(*_dummyProvider, *_queueBarrier, *_completionBarrier);
+ _blockingProvider = blockingProvider.get();
+ _node->setPersistenceProvider(std::move(blockingProvider));
}
void validateReplies(DummyStorageLink& link, size_t repliesTotal,
diff --git a/storage/src/tests/persistence/persistencetestutils.cpp b/storage/src/tests/persistence/persistencetestutils.cpp
index 504767e68c7..f50fbb0c8e8 100644
--- a/storage/src/tests/persistence/persistencetestutils.cpp
+++ b/storage/src/tests/persistence/persistencetestutils.cpp
@@ -231,9 +231,9 @@ PersistenceTestUtils::doGetOnDisk(
document::DocumentUpdate::SP
PersistenceTestUtils::createBodyUpdate(const document::DocumentId& docId, const document::FieldValue& updateValue)
{
- const DocumentType* docType(_env->_component.getTypeRepo()->getDocumentType("testdoctype1"));
- document::DocumentUpdate::SP update(new document::DocumentUpdate(*_env->_component.getTypeRepo(), *docType, docId));
- std::shared_ptr<document::AssignValueUpdate> assignUpdate(new document::AssignValueUpdate(updateValue));
+ const DocumentType* docType(getTypeRepo()->getDocumentType("testdoctype1"));
+ auto update = std::make_shared<document::DocumentUpdate>(*getTypeRepo(), *docType, docId);
+ auto assignUpdate = std::make_shared<document::AssignValueUpdate>(updateValue);
document::FieldUpdate fieldUpdate(docType->getField("content"));
fieldUpdate.addUpdate(*assignUpdate);
update->addUpdate(fieldUpdate);
@@ -243,9 +243,9 @@ PersistenceTestUtils::createBodyUpdate(const document::DocumentId& docId, const
document::DocumentUpdate::SP
PersistenceTestUtils::createHeaderUpdate(const document::DocumentId& docId, const document::FieldValue& updateValue)
{
- const DocumentType* docType(_env->_component.getTypeRepo()->getDocumentType("testdoctype1"));
- document::DocumentUpdate::SP update(new document::DocumentUpdate(*_env->_component.getTypeRepo(), *docType, docId));
- std::shared_ptr<document::AssignValueUpdate> assignUpdate(new document::AssignValueUpdate(updateValue));
+ const DocumentType* docType(getTypeRepo()->getDocumentType("testdoctype1"));
+ auto update = std::make_shared<document::DocumentUpdate>(*getTypeRepo(), *docType, docId);
+ auto assignUpdate = std::make_shared<document::AssignValueUpdate>(updateValue);
document::FieldUpdate fieldUpdate(docType->getField("headerval"));
fieldUpdate.addUpdate(*assignUpdate);
update->addUpdate(fieldUpdate);
@@ -253,8 +253,7 @@ PersistenceTestUtils::createHeaderUpdate(const document::DocumentId& docId, cons
}
uint16_t
-PersistenceTestUtils::getDiskFromBucketDatabaseIfUnset(const document::Bucket& bucket,
- uint16_t disk)
+PersistenceTestUtils::getDiskFromBucketDatabaseIfUnset(const document::Bucket& bucket, uint16_t disk)
{
if (disk == 0xffff) {
StorBucketDatabase::WrappedEntry entry(
@@ -342,7 +341,7 @@ PersistenceTestUtils::clearBody(document::Document& doc)
//doc->getBody().clear();
vespalib::nbostream stream;
doc.serializeHeader(stream);
- doc.deserialize(*_env->_component.getTypeRepo(), stream);
+ doc.deserialize(*getTypeRepo(), stream);
}
document::Document::UP
diff --git a/storage/src/tests/persistence/persistencetestutils.h b/storage/src/tests/persistence/persistencetestutils.h
index 6cee3b79ab8..3d25a205017 100644
--- a/storage/src/tests/persistence/persistencetestutils.h
+++ b/storage/src/tests/persistence/persistencetestutils.h
@@ -101,7 +101,7 @@ public:
FileStorHandler& fsHandler() { return *_env->_handler; }
FileStorMetrics& metrics() { return _env->_metrics; }
MessageKeeper& messageKeeper() { return _env->_messageKeeper; }
- std::shared_ptr<const document::DocumentTypeRepo> getTypeRepo() { return _env->_component.getTypeRepo(); }
+ std::shared_ptr<const document::DocumentTypeRepo> getTypeRepo() { return _env->_component.getTypeRepo()->documentTypeRepo; }
StorageComponent& getComponent() { return _env->_component; }
TestServiceLayerApp& getNode() { return _env->_node; }
diff --git a/storage/src/vespa/storage/common/storagecomponent.cpp b/storage/src/vespa/storage/common/storagecomponent.cpp
index 21a4b8eea64..3846fe3a9c0 100644
--- a/storage/src/vespa/storage/common/storagecomponent.cpp
+++ b/storage/src/vespa/storage/common/storagecomponent.cpp
@@ -2,17 +2,22 @@
#include "storagecomponent.h"
#include <vespa/storage/storageserver/prioritymapper.h>
-
#include <vespa/vespalib/util/exceptions.h>
#include <vespa/vespalib/stllike/asciistream.h>
#include <vespa/vdslib/distribution/distribution.h>
+#include <vespa/document/fieldset/fieldsetrepo.h>
namespace storage {
+StorageComponent::Repos::Repos(std::shared_ptr<const document::DocumentTypeRepo> repo)
+ : documentTypeRepo(std::move(repo)),
+ fieldSetRepo(std::make_shared<document::FieldSetRepo>(*documentTypeRepo))
+{}
+
+StorageComponent::Repos::~Repos() = default;
+
// Defined in cpp file to allow unique pointers of unknown type in header.
-StorageComponent::~StorageComponent()
-{
-}
+StorageComponent::~StorageComponent() = default;
void
StorageComponent::setNodeInfo(vespalib::stringref clusterName,
@@ -26,10 +31,11 @@ StorageComponent::setNodeInfo(vespalib::stringref clusterName,
}
void
-StorageComponent::setDocumentTypeRepo(DocumentTypeRepoSP repo)
+StorageComponent::setDocumentTypeRepo(std::shared_ptr<const document::DocumentTypeRepo> docTypeRepo)
{
+ auto repo = std::make_shared<Repos>(std::move(docTypeRepo));
std::lock_guard guard(_lock);
- _docTypeRepo = repo;
+ _repos = std::move(repo);
}
void
@@ -78,7 +84,7 @@ StorageComponent::StorageComponent(StorageComponentRegister& compReg,
_clusterName(),
_nodeType(nullptr),
_index(0),
- _docTypeRepo(),
+ _repos(),
_loadTypes(),
_priorityMapper(new PriorityMapper),
_bucketIdFactory(),
@@ -116,11 +122,11 @@ StorageComponent::getPriority(const documentapi::LoadType& lt) const
return _priorityMapper->getPriority(lt);
}
-StorageComponent::DocumentTypeRepoSP
+std::shared_ptr<StorageComponent::Repos>
StorageComponent::getTypeRepo() const
{
std::lock_guard guard(_lock);
- return _docTypeRepo;
+ return _repos;
}
StorageComponent::LoadTypeSetSP
diff --git a/storage/src/vespa/storage/common/storagecomponent.h b/storage/src/vespa/storage/common/storagecomponent.h
index 821cd43f21d..e0b1dc74d7f 100644
--- a/storage/src/vespa/storage/common/storagecomponent.h
+++ b/storage/src/vespa/storage/common/storagecomponent.h
@@ -42,6 +42,7 @@ namespace vespa::config::content::core::internal {
}
namespace document {
class DocumentTypeRepo;
+ class FieldSetRepo;
}
namespace documentapi {
class LoadType;
@@ -58,9 +59,14 @@ struct StorageComponentRegister;
class StorageComponent : public framework::Component {
public:
+ struct Repos {
+ explicit Repos(std::shared_ptr<const document::DocumentTypeRepo> repo);
+ ~Repos();
+ const std::shared_ptr<const document::DocumentTypeRepo> documentTypeRepo;
+ const std::shared_ptr<const document::FieldSetRepo> fieldSetRepo;
+ };
using UP = std::unique_ptr<StorageComponent>;
using PriorityConfig = vespa::config::content::core::internal::InternalStorPrioritymappingType;
- using DocumentTypeRepoSP = std::shared_ptr<const document::DocumentTypeRepo>;
using LoadTypeSetSP = std::shared_ptr<documentapi::LoadTypeSet>;
using DistributionSP = std::shared_ptr<lib::Distribution>;
@@ -68,9 +74,7 @@ public:
* Node type is supposed to be set immediately, and never be updated.
* Thus it does not need to be threadsafe. Should never be used before set.
*/
- void setNodeInfo(vespalib::stringref clusterName,
- const lib::NodeType& nodeType,
- uint16_t index);
+ void setNodeInfo(vespalib::stringref clusterName, const lib::NodeType& nodeType, uint16_t index);
/**
* Node state updater is supposed to be set immediately, and never be
@@ -78,14 +82,14 @@ public:
* before set.
*/
void setNodeStateUpdater(NodeStateUpdater& updater);
- void setDocumentTypeRepo(DocumentTypeRepoSP);
+ void setDocumentTypeRepo(std::shared_ptr<const document::DocumentTypeRepo>);
void setLoadTypes(LoadTypeSetSP);
void setPriorityConfig(const PriorityConfig&);
void setBucketIdFactory(const document::BucketIdFactory&);
void setDistribution(DistributionSP);
StorageComponent(StorageComponentRegister&, vespalib::stringref name);
- virtual ~StorageComponent();
+ ~StorageComponent() override;
vespalib::string getClusterName() const { return _clusterName; }
const lib::NodeType& getNodeType() const { return *_nodeType; }
@@ -94,7 +98,7 @@ public:
vespalib::string getIdentity() const;
- DocumentTypeRepoSP getTypeRepo() const;
+ std::shared_ptr<Repos> getTypeRepo() const;
LoadTypeSetSP getLoadTypes() const;
const document::BucketIdFactory& getBucketIdFactory() const
{ return _bucketIdFactory; }
@@ -106,7 +110,8 @@ private:
vespalib::string _clusterName;
const lib::NodeType* _nodeType;
uint16_t _index;
- DocumentTypeRepoSP _docTypeRepo;
+ std::shared_ptr<Repos> _repos;
+ // TODO: move loadTypes and _distribution in to _repos so lock will only taken once and only copying one shared_ptr.
LoadTypeSetSP _loadTypes;
std::unique_ptr<PriorityMapper> _priorityMapper;
document::BucketIdFactory _bucketIdFactory;
diff --git a/storage/src/vespa/storage/config/distributorconfiguration.cpp b/storage/src/vespa/storage/config/distributorconfiguration.cpp
index 0c9988421a3..aa606cdc8b9 100644
--- a/storage/src/vespa/storage/config/distributorconfiguration.cpp
+++ b/storage/src/vespa/storage/config/distributorconfiguration.cpp
@@ -70,7 +70,7 @@ DistributorConfiguration::containsTimeStatement(const std::string& documentSelec
{
TimeVisitor visitor;
try {
- document::select::Parser parser(*_component.getTypeRepo(), _component.getBucketIdFactory());
+ document::select::Parser parser(*_component.getTypeRepo()->documentTypeRepo, _component.getBucketIdFactory());
std::unique_ptr<document::select::Node> node = parser.parse(documentSelection);
node->visit(visitor);
diff --git a/storage/src/vespa/storage/distributor/distributor.cpp b/storage/src/vespa/storage/distributor/distributor.cpp
index c74d4135556..cfd8d7f1753 100644
--- a/storage/src/vespa/storage/distributor/distributor.cpp
+++ b/storage/src/vespa/storage/distributor/distributor.cpp
@@ -108,8 +108,7 @@ Distributor::Distributor(DistributorComponentRegister& compReg,
_must_send_updated_host_info(false)
{
_component.registerMetric(*_metrics);
- _component.registerMetricUpdateHook(_metricUpdateHook,
- framework::SecondTime(0));
+ _component.registerMetricUpdateHook(_metricUpdateHook, framework::SecondTime(0));
_distributorStatusDelegate.registerStatusPage();
_bucketDBStatusDelegate.registerStatusPage();
hostInfoReporterRegistrar.registerReporter(&_hostInfoReporter);
diff --git a/storage/src/vespa/storage/distributor/operations/external/removelocationoperation.cpp b/storage/src/vespa/storage/distributor/operations/external/removelocationoperation.cpp
index ca1b6f266d6..4c762cf4c23 100644
--- a/storage/src/vespa/storage/distributor/operations/external/removelocationoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/external/removelocationoperation.cpp
@@ -38,8 +38,7 @@ RemoveLocationOperation::getBucketId(
DistributorComponent& manager,
const api::RemoveLocationCommand& cmd, document::BucketId& bid)
{
- std::shared_ptr<const document::DocumentTypeRepo> repo = manager.getTypeRepo();
- document::select::Parser parser(*repo, manager.getBucketIdFactory());
+ document::select::Parser parser(*manager.getTypeRepo()->documentTypeRepo, manager.getBucketIdFactory());
document::BucketSelector bucketSel(manager.getBucketIdFactory());
std::unique_ptr<document::BucketSelector::BucketVector> exprResult
diff --git a/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.cpp b/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.cpp
index 41f452df801..3866ee4e6f7 100644
--- a/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.cpp
@@ -576,7 +576,7 @@ TwoPhaseUpdateOperation::processAndMatchTasCondition(DistributorMessageSender& s
return true; // No condition; nothing to do here.
}
- document::select::Parser parser(*_manager.getTypeRepo(), _manager.getBucketIdFactory());
+ document::select::Parser parser(*_manager.getTypeRepo()->documentTypeRepo, _manager.getBucketIdFactory());
std::unique_ptr<document::select::Node> selection;
try {
selection = parser.parse(_updateCmd->getCondition().getSelection());
diff --git a/storage/src/vespa/storage/persistence/bucketprocessor.cpp b/storage/src/vespa/storage/persistence/bucketprocessor.cpp
index c88b08612d7..ea09fcfc348 100644
--- a/storage/src/vespa/storage/persistence/bucketprocessor.cpp
+++ b/storage/src/vespa/storage/persistence/bucketprocessor.cpp
@@ -47,11 +47,11 @@ BucketProcessor::iterateAll(spi::PersistenceProvider& provider,
spi::Selection sel
= spi::Selection(spi::DocumentSelection(documentSelection));
spi::CreateIteratorResult createIterResult(provider.createIterator(
- bucket,
- document::AllFields(),
- sel,
- versions,
- context));
+ bucket,
+ std::make_shared<document::AllFields>(),
+ sel,
+ versions,
+ context));
if (createIterResult.getErrorCode() != spi::Result::ErrorType::NONE) {
vespalib::asciistream ss;
diff --git a/storage/src/vespa/storage/persistence/mergehandler.cpp b/storage/src/vespa/storage/persistence/mergehandler.cpp
index 70894858887..612d4545a8a 100644
--- a/storage/src/vespa/storage/persistence/mergehandler.cpp
+++ b/storage/src/vespa/storage/persistence/mergehandler.cpp
@@ -1,6 +1,5 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
#include "mergehandler.h"
#include <vespa/vespalib/stllike/asciistream.h>
#include <vespa/vdslib/distribution/distribution.h>
@@ -14,17 +13,14 @@ LOG_SETUP(".persistence.mergehandler");
namespace storage {
-MergeHandler::MergeHandler(spi::PersistenceProvider& spi,
- PersistenceUtil& env)
+MergeHandler::MergeHandler(spi::PersistenceProvider& spi, PersistenceUtil& env)
: _spi(spi),
_env(env),
_maxChunkSize(env._config.bucketMergeChunkSize)
{
}
-MergeHandler::MergeHandler(spi::PersistenceProvider& spi,
- PersistenceUtil& env,
- uint32_t maxChunkSize)
+MergeHandler::MergeHandler(spi::PersistenceProvider& spi, PersistenceUtil& env, uint32_t maxChunkSize)
: _spi(spi),
_env(env),
_maxChunkSize(maxChunkSize)
@@ -58,9 +54,7 @@ checkResult(const spi::Result& result,
}
void
-checkResult(const spi::Result& result,
- const spi::Bucket& bucket,
- const char* op)
+checkResult(const spi::Result& result, const spi::Bucket& bucket, const char* op)
{
if (result.hasError()) {
vespalib::asciistream ss;
@@ -124,11 +118,11 @@ MergeHandler::populateMetaData(
spi::Selection sel(docSel);
sel.setToTimestamp(spi::Timestamp(maxTimestamp.getTime()));
spi::CreateIteratorResult createIterResult(_spi.createIterator(
- bucket,
- document::NoFields(),
- sel,
- spi::ALL_VERSIONS,
- context));
+ bucket,
+ std::make_shared<document::NoFields>(),
+ sel,
+ spi::ALL_VERSIONS,
+ context));
if (createIterResult.getErrorCode() != spi::Result::ErrorType::NONE) {
std::ostringstream ss;
@@ -142,8 +136,7 @@ MergeHandler::populateMetaData(
IteratorGuard iteratorGuard(_spi, iteratorId, context);
while (true) {
- spi::IterateResult result(
- _spi.iterate(iteratorId, UINT64_MAX, context));
+ spi::IterateResult result(_spi.iterate(iteratorId, UINT64_MAX, context));
if (result.getErrorCode() != spi::Result::ErrorType::NONE) {
std::ostringstream ss;
ss << "Failed to iterate for "
@@ -300,8 +293,7 @@ namespace {
}
int
- countUnfilledEntries(
- const std::vector<api::ApplyBucketDiffCommand::Entry>& diff)
+ countUnfilledEntries(const std::vector<api::ApplyBucketDiffCommand::Entry>& diff)
{
int count = 0;
@@ -323,11 +315,9 @@ namespace {
return value;
}
- api::StorageMessageAddress createAddress(const std::string& clusterName,
- uint16_t node)
+ api::StorageMessageAddress createAddress(const std::string& clusterName, uint16_t node)
{
- return api::StorageMessageAddress(
- clusterName, lib::NodeType::STORAGE, node);
+ return api::StorageMessageAddress(clusterName, lib::NodeType::STORAGE, node);
}
void assertContainedInBucket(const document::DocumentId& docId,
@@ -370,14 +360,11 @@ MergeHandler::fetchLocalData(
alreadyFilled += e._headerBlob.size() + e._bodyBlob.size();
}
}
- uint32_t remainingSize = _maxChunkSize - std::min(_maxChunkSize,
- alreadyFilled);
- LOG(debug, "Diff of %s has already filled %u of max %u bytes, "
- "remaining size to fill is %u",
+ uint32_t remainingSize = _maxChunkSize - std::min(_maxChunkSize, alreadyFilled);
+ LOG(debug, "Diff of %s has already filled %u of max %u bytes, remaining size to fill is %u",
bucket.toString().c_str(), alreadyFilled, _maxChunkSize, remainingSize);
if (remainingSize == 0) {
- LOG(debug,
- "Diff already at max chunk size, not fetching any local data");
+ LOG(debug, "Diff already at max chunk size, not fetching any local data");
return;
}
@@ -387,7 +374,7 @@ MergeHandler::fetchLocalData(
sel.setTimestampSubset(slots);
spi::CreateIteratorResult createIterResult(
_spi.createIterator(bucket,
- document::AllFields(),
+ std::make_shared<document::AllFields>(),
sel,
spi::NEWEST_DOCUMENT_OR_REMOVE,
context));
@@ -409,8 +396,7 @@ MergeHandler::fetchLocalData(
bool fetchedAllLocalData = false;
bool chunkLimitReached = false;
while (true) {
- spi::IterateResult result(
- _spi.iterate(iteratorId, remainingSize, context));
+ spi::IterateResult result(_spi.iterate(iteratorId, remainingSize, context));
if (result.getErrorCode() != spi::Result::ErrorType::NONE) {
std::ostringstream ss;
ss << "Failed to iterate for "
@@ -426,8 +412,7 @@ MergeHandler::fetchLocalData(
{
remainingSize -= list[i]->getSize();
LOG(spam, "Added %s, remainingSize is %u",
- entries.back()->toString().c_str(),
- remainingSize);
+ entries.back()->toString().c_str(), remainingSize);
entries.push_back(std::move(list[i]));
} else {
LOG(spam, "Adding %s would exceed chunk size limit of %u; "
@@ -453,8 +438,7 @@ MergeHandler::fetchLocalData(
docEntry.toString().c_str());
std::vector<api::ApplyBucketDiffCommand::Entry>::iterator iter(
- std::lower_bound(diff.begin(),
- diff.end(),
+ std::lower_bound(diff.begin(), diff.end(),
api::Timestamp(docEntry.getTimestamp()),
DiffEntryTimestampPredicate()));
assert(iter != diff.end());
@@ -566,8 +550,8 @@ MergeHandler::applyDiffLocally(
std::vector<spi::DocEntry::UP> entries;
populateMetaData(bucket, MAX_TIMESTAMP, entries, context);
- std::shared_ptr<const document::DocumentTypeRepo> repo(_env._component.getTypeRepo());
- assert(repo.get() != nullptr);
+ std::shared_ptr<const document::DocumentTypeRepo> repo(_env._component.getTypeRepo()->documentTypeRepo);
+ assert(repo);
uint32_t existingCount = entries.size();
uint32_t i = 0, j = 0;
@@ -727,8 +711,7 @@ MergeHandler::processBucketMerge(const spi::Bucket& bucket, MergeStatus& status,
// If nothing to update, we're done.
if (status.diff.size() == 0) {
- LOG(debug, "Done with merge of %s. No more entries in diff.",
- bucket.toString().c_str());
+ LOG(debug, "Done with merge of %s. No more entries in diff.", bucket.toString().c_str());
return status.reply;
}
@@ -755,10 +738,8 @@ MergeHandler::processBucketMerge(const spi::Bucket& bucket, MergeStatus& status,
? std::numeric_limits<uint32_t>().max()
: _maxChunkSize);
- cmd.reset(new api::ApplyBucketDiffCommand(
- bucket.getBucket(), nodes, maxSize));
- cmd->setAddress(createAddress(_env._component.getClusterName(),
- nodes[1].index));
+ cmd = std::make_shared<api::ApplyBucketDiffCommand>(bucket.getBucket(), nodes, maxSize);
+ cmd->setAddress(createAddress(_env._component.getClusterName(), nodes[1].index));
findCandidates(bucket.getBucketId(),
status,
true,
@@ -798,8 +779,7 @@ MergeHandler::processBucketMerge(const spi::Bucket& bucket, MergeStatus& status,
for (std::map<uint16_t, uint32_t>::const_iterator it = counts.begin();
it != counts.end(); ++it)
{
- if (it->second >= uint32_t(
- _env._config.commonMergeChainOptimalizationMinimumSize)
+ if (it->second >= uint32_t(_env._config.commonMergeChainOptimalizationMinimumSize)
|| counts.size() == 1)
{
LOG(spam, "Sending separate apply bucket diff for path %x "
@@ -832,15 +812,11 @@ MergeHandler::processBucketMerge(const spi::Bucket& bucket, MergeStatus& status,
(_env._config.enableMergeLocalNodeChooseDocsOptimalization
? std::numeric_limits<uint32_t>().max()
: _maxChunkSize);
- cmd.reset(new api::ApplyBucketDiffCommand(
- bucket.getBucket(), nodes, maxSize));
- cmd->setAddress(
- createAddress(_env._component.getClusterName(),
- nodes[1].index));
+ cmd = std::make_shared<api::ApplyBucketDiffCommand>(bucket.getBucket(), nodes, maxSize);
+ cmd->setAddress(createAddress(_env._component.getClusterName(), nodes[1].index));
// Add all the metadata, and thus use big limit. Max
// data to fetch parameter will control amount added.
- findCandidates(bucket.getBucketId(), status, true,
- it->first, newMask, maxSize, *cmd);
+ findCandidates(bucket.getBucketId(), status, true, it->first, newMask, maxSize, *cmd);
break;
}
}
@@ -848,22 +824,17 @@ MergeHandler::processBucketMerge(const spi::Bucket& bucket, MergeStatus& status,
// If we found no group big enough to handle on its own, do a common
// merge to merge the remaining data.
- if (cmd.get() == 0) {
- cmd.reset(new api::ApplyBucketDiffCommand(bucket.getBucket(),
- status.nodeList,
- _maxChunkSize));
- cmd->setAddress(createAddress(_env._component.getClusterName(),
- status.nodeList[1].index));
- findCandidates(bucket.getBucketId(), status, false, 0, 0,
- _maxChunkSize, *cmd);
+ if ( ! cmd ) {
+ cmd = std::make_shared<api::ApplyBucketDiffCommand>(bucket.getBucket(), status.nodeList, _maxChunkSize);
+ cmd->setAddress(createAddress(_env._component.getClusterName(), status.nodeList[1].index));
+ findCandidates(bucket.getBucketId(), status, false, 0, 0, _maxChunkSize, *cmd);
}
cmd->setPriority(status.context.getPriority());
cmd->setTimeout(status.timeout);
if (applyDiffNeedLocalData(cmd->getDiff(), 0, true)) {
framework::MilliSecTimer startTime(_env._component.getClock());
fetchLocalData(bucket, cmd->getLoadType(), cmd->getDiff(), 0, context);
- _env._metrics.merge_handler_metrics.mergeDataReadLatency.addValue(
- startTime.getElapsedTimeAsDouble());
+ _env._metrics.merge_handler_metrics.mergeDataReadLatency.addValue(startTime.getElapsedTimeAsDouble());
}
status.pendingId = cmd->getMsgId();
LOG(debug, "Sending %s", cmd->toString().c_str());
@@ -878,8 +849,7 @@ public:
document::Bucket _bucket;
bool _active;
- MergeStateDeleter(FileStorHandler& handler,
- const document::Bucket& bucket)
+ MergeStateDeleter(FileStorHandler& handler, const document::Bucket& bucket)
: _handler(handler),
_bucket(bucket),
_active(true)
@@ -906,8 +876,7 @@ MergeHandler::handleMergeBucket(api::MergeBucketCommand& cmd, MessageTracker::UP
if (cmd.getNodes().size() < 2) {
LOG(debug, "Attempt to merge a single instance of a bucket");
- tracker->fail(ReturnCode::ILLEGAL_PARAMETERS,
- "Cannot merge a single copy");
+ tracker->fail(ReturnCode::ILLEGAL_PARAMETERS, "Cannot merge a single copy");
return tracker;
}
@@ -954,8 +923,7 @@ MergeHandler::handleMergeBucket(api::MergeBucketCommand& cmd, MessageTracker::UP
auto cmd2 = std::make_shared<api::GetBucketDiffCommand>(bucket.getBucket(), s->nodeList, s->maxTimestamp.getTime());
if (!buildBucketInfoList(bucket, cmd.getLoadType(), s->maxTimestamp, 0, cmd2->getDiff(), tracker->context())) {
LOG(debug, "Bucket non-existing in db. Failing merge.");
- tracker->fail(ReturnCode::BUCKET_DELETED,
- "Bucket not found in buildBucketInfo step");
+ tracker->fail(ReturnCode::BUCKET_DELETED, "Bucket not found in buildBucketInfo step");
return tracker;
}
_env._metrics.merge_handler_metrics.mergeMetadataReadLatency.addValue(s->startTime.getElapsedTimeAsDouble());
@@ -1116,8 +1084,7 @@ MergeHandler::handleGetBucketDiff(api::GetBucketDiffCommand& cmd, MessageTracker
checkResult(_spi.createBucket(bucket, tracker->context()), bucket, "create bucket");
if (_env._fileStorHandler.isMerging(bucket.getBucket())) {
- tracker->fail(ReturnCode::BUSY,
- "A merge is already running on this bucket.");
+ tracker->fail(ReturnCode::BUSY, "A merge is already running on this bucket.");
return tracker;
}
uint8_t index = findOwnIndex(cmd.getNodes(), _env._nodeIndex);
@@ -1130,16 +1097,13 @@ MergeHandler::handleGetBucketDiff(api::GetBucketDiffCommand& cmd, MessageTracker
index, local, tracker->context()))
{
LOG(debug, "Bucket non-existing in db. Failing merge.");
- tracker->fail(ReturnCode::BUCKET_DELETED,
- "Bucket not found in buildBucketInfo step");
+ tracker->fail(ReturnCode::BUCKET_DELETED, "Bucket not found in buildBucketInfo step");
return tracker;
}
if (!mergeLists(remote, local, local)) {
- LOG(error, "Diffing %s found suspect entries.",
- bucket.toString().c_str());
+ LOG(error, "Diffing %s found suspect entries.", bucket.toString().c_str());
}
- _env._metrics.merge_handler_metrics.mergeMetadataReadLatency.addValue(
- startTime.getElapsedTimeAsDouble());
+ _env._metrics.merge_handler_metrics.mergeMetadataReadLatency.addValue(startTime.getElapsedTimeAsDouble());
// If last node in merge chain, we can send reply straight away
if (index + 1u >= cmd.getNodes().size()) {
@@ -1216,24 +1180,21 @@ namespace {
bool operator()(const api::ApplyBucketDiffCommand::Entry& x,
const api::ApplyBucketDiffCommand::Entry& y)
{
- return (x._entry._timestamp
- < y._entry._timestamp);
+ return (x._entry._timestamp < y._entry._timestamp);
}
};
} // End of anonymous namespace
void
-MergeHandler::handleGetBucketDiffReply(api::GetBucketDiffReply& reply,
- MessageSender& sender)
+MergeHandler::handleGetBucketDiffReply(api::GetBucketDiffReply& reply, MessageSender& sender)
{
_env._metrics.getBucketDiffReply.inc();
spi::Bucket bucket(reply.getBucket(), spi::PartitionId(_env._partition));
LOG(debug, "GetBucketDiffReply(%s)", bucket.toString().c_str());
if (!_env._fileStorHandler.isMerging(bucket.getBucket())) {
- LOG(warning, "Got GetBucketDiffReply for %s which we have no "
- "merge state for.",
+ LOG(warning, "Got GetBucketDiffReply for %s which we have no merge state for.",
bucket.toString().c_str());
return;
}
@@ -1387,8 +1348,7 @@ MergeHandler::handleApplyBucketDiff(api::ApplyBucketDiffCommand& cmd, MessageTra
}
void
-MergeHandler::handleApplyBucketDiffReply(api::ApplyBucketDiffReply& reply,
- MessageSender& sender)
+MergeHandler::handleApplyBucketDiffReply(api::ApplyBucketDiffReply& reply,MessageSender& sender)
{
_env._metrics.applyBucketDiffReply.inc();
spi::Bucket bucket(reply.getBucket(), spi::PartitionId(_env._partition));
@@ -1396,8 +1356,7 @@ MergeHandler::handleApplyBucketDiffReply(api::ApplyBucketDiffReply& reply,
LOG(debug, "%s", reply.toString().c_str());
if (!_env._fileStorHandler.isMerging(bucket.getBucket())) {
- LOG(warning, "Got ApplyBucketDiffReply for %s which we have no "
- "merge state for.",
+ LOG(warning, "Got ApplyBucketDiffReply for %s which we have no merge state for.",
bucket.toString().c_str());
return;
}
@@ -1415,25 +1374,19 @@ MergeHandler::handleApplyBucketDiffReply(api::ApplyBucketDiffReply& reply,
api::ReturnCode returnCode = reply.getResult();
try {
if (reply.getResult().failed()) {
- LOG(debug, "Got failed apply bucket diff reply %s",
- reply.toString().c_str());
+ LOG(debug, "Got failed apply bucket diff reply %s", reply.toString().c_str());
} else {
assert(reply.getNodes().size() >= 2);
uint8_t index = findOwnIndex(reply.getNodes(), _env._nodeIndex);
if (applyDiffNeedLocalData(diff, index, false)) {
framework::MilliSecTimer startTime(_env._component.getClock());
- fetchLocalData(bucket, reply.getLoadType(), diff, index,
- s.context);
- _env._metrics.merge_handler_metrics.mergeDataReadLatency.addValue(
- startTime.getElapsedTimeAsDouble());
+ fetchLocalData(bucket, reply.getLoadType(), diff, index, s.context);
+ _env._metrics.merge_handler_metrics.mergeDataReadLatency.addValue(startTime.getElapsedTimeAsDouble());
}
if (applyDiffHasLocallyNeededData(diff, index)) {
framework::MilliSecTimer startTime(_env._component.getClock());
- api::BucketInfo info(
- applyDiffLocally(bucket, reply.getLoadType(), diff,
- index, s.context));
- _env._metrics.merge_handler_metrics.mergeDataWriteLatency.addValue(
- startTime.getElapsedTimeAsDouble());
+ api::BucketInfo info(applyDiffLocally(bucket, reply.getLoadType(), diff, index, s.context));
+ _env._metrics.merge_handler_metrics.mergeDataWriteLatency.addValue(startTime.getElapsedTimeAsDouble());
} else {
LOG(spam, "Merge(%s): Didn't need fetched data on node %u (%u)",
bucket.toString().c_str(),
@@ -1464,8 +1417,7 @@ MergeHandler::handleApplyBucketDiffReply(api::ApplyBucketDiffReply& reply,
"Got reply indicating merge cycle did not fix any entries: %s",
reply.toString(true).c_str());
LOG(warning,
- "Merge state for which there was no progress across a "
- "full merge cycle: %s",
+ "Merge state for which there was no progress across a full merge cycle: %s",
s.toString().c_str());
}
@@ -1479,8 +1431,7 @@ MergeHandler::handleApplyBucketDiffReply(api::ApplyBucketDiffReply& reply,
// We have sent something on and shouldn't reply now.
clearState = false;
} else {
- _env._metrics.merge_handler_metrics.mergeLatencyTotal.addValue(
- s.startTime.getElapsedTimeAsDouble());
+ _env._metrics.merge_handler_metrics.mergeLatencyTotal.addValue(s.startTime.getElapsedTimeAsDouble());
}
}
} else {
@@ -1492,8 +1443,7 @@ MergeHandler::handleApplyBucketDiffReply(api::ApplyBucketDiffReply& reply,
} catch (std::exception& e) {
_env._fileStorHandler.clearMergeStatus(
bucket.getBucket(),
- api::ReturnCode(api::ReturnCode::INTERNAL_FAILURE,
- e.what()));
+ api::ReturnCode(api::ReturnCode::INTERNAL_FAILURE, e.what()));
throw;
}
diff --git a/storage/src/vespa/storage/persistence/persistencethread.cpp b/storage/src/vespa/storage/persistence/persistencethread.cpp
index 53e455ea204..2cdb6194b6d 100644
--- a/storage/src/vespa/storage/persistence/persistencethread.cpp
+++ b/storage/src/vespa/storage/persistence/persistencethread.cpp
@@ -286,14 +286,14 @@ PersistenceThread::handleGet(api::GetCommand& cmd, MessageTracker::UP tracker)
tracker->setMetric(metrics);
metrics.request_size.addValue(cmd.getApproxByteSize());
- document::FieldSet::UP fieldSet = document::FieldSetRepo::parse(*_env._component.getTypeRepo(), cmd.getFieldSet());
+ auto fieldSet = _env._component.getTypeRepo()->fieldSetRepo->getFieldSet(cmd.getFieldSet());
tracker->context().setReadConsistency(api_read_consistency_to_spi(cmd.internal_read_consistency()));
spi::GetResult result =
_spi.get(getBucket(cmd.getDocumentId(), cmd.getBucket()), *fieldSet, cmd.getDocumentId(), tracker->context());
if (tracker->checkForError(result)) {
if (!result.hasDocument() && (document::FieldSet::Type::NONE != fieldSet->getType())) {
- _env._metrics.get[cmd.getLoadType()].notFound.inc();
+ metrics.notFound.inc();
}
tracker->setReply(std::make_shared<api::GetReply>(cmd, result.getDocumentPtr(), result.getTimestamp(),
false, result.is_tombstone()));
@@ -455,11 +455,11 @@ MessageTracker::UP
PersistenceThread::handleCreateIterator(CreateIteratorCommand& cmd, MessageTracker::UP tracker)
{
tracker->setMetric(_env._metrics.createIterator);
- document::FieldSet::UP fieldSet = document::FieldSetRepo::parse(*_env._component.getTypeRepo(), cmd.getFields());
+ document::FieldSet::SP fieldSet = _env._component.getTypeRepo()->fieldSetRepo->getFieldSet(cmd.getFields());
tracker->context().setReadConsistency(cmd.getReadConsistency());
spi::CreateIteratorResult result(_spi.createIterator(
- spi::Bucket(cmd.getBucket(), spi::PartitionId(_env._partition)),
- *fieldSet, cmd.getSelection(), cmd.getIncludedVersions(), tracker->context()));
+ spi::Bucket(cmd.getBucket(), spi::PartitionId(_env._partition)),
+ std::move(fieldSet), cmd.getSelection(), cmd.getIncludedVersions(), tracker->context()));
if (tracker->checkForError(result)) {
tracker->setReply(std::make_shared<CreateIteratorReply>(cmd, spi::IteratorId(result.getIteratorId())));
}
diff --git a/storage/src/vespa/storage/persistence/persistenceutil.cpp b/storage/src/vespa/storage/persistence/persistenceutil.cpp
index 6605e3f6363..63ac5405fab 100644
--- a/storage/src/vespa/storage/persistence/persistenceutil.cpp
+++ b/storage/src/vespa/storage/persistence/persistenceutil.cpp
@@ -162,7 +162,7 @@ PersistenceUtil::PersistenceUtil(
_nodeIndex(_component.getIndex()),
_metrics(metrics),
_bucketFactory(_component.getBucketIdFactory()),
- _repo(_component.getTypeRepo()),
+ _repo(_component.getTypeRepo()->documentTypeRepo),
_spi(provider)
{
}
diff --git a/storage/src/vespa/storage/persistence/provider_error_wrapper.cpp b/storage/src/vespa/storage/persistence/provider_error_wrapper.cpp
index a5564282d17..0884d807eda 100644
--- a/storage/src/vespa/storage/persistence/provider_error_wrapper.cpp
+++ b/storage/src/vespa/storage/persistence/provider_error_wrapper.cpp
@@ -111,8 +111,8 @@ ProviderErrorWrapper::get(const spi::Bucket& bucket, const document::FieldSet& f
}
spi::CreateIteratorResult
-ProviderErrorWrapper::createIterator(const spi::Bucket& bucket, const document::FieldSet& fieldSet,
- const spi::Selection& selection, spi::IncludedVersions versions, spi::Context& context)
+ProviderErrorWrapper::createIterator(const spi::Bucket &bucket, FieldSetSP fieldSet, const spi::Selection &selection,
+ spi::IncludedVersions versions, spi::Context &context)
{
return checkResult(_impl.createIterator(bucket, fieldSet, selection, versions, context));
}
diff --git a/storage/src/vespa/storage/persistence/provider_error_wrapper.h b/storage/src/vespa/storage/persistence/provider_error_wrapper.h
index 602877e0b02..54abf0e96fb 100644
--- a/storage/src/vespa/storage/persistence/provider_error_wrapper.h
+++ b/storage/src/vespa/storage/persistence/provider_error_wrapper.h
@@ -52,8 +52,9 @@ public:
spi::RemoveResult removeIfFound(const spi::Bucket&, spi::Timestamp, const document::DocumentId&, spi::Context&) override;
spi::UpdateResult update(const spi::Bucket&, spi::Timestamp, spi::DocumentUpdateSP, spi::Context&) override;
spi::GetResult get(const spi::Bucket&, const document::FieldSet&, const document::DocumentId&, spi::Context&) const override;
- spi::CreateIteratorResult createIterator(const spi::Bucket&, const document::FieldSet&, const spi::Selection&,
- spi::IncludedVersions versions, spi::Context&) override;
+ spi::CreateIteratorResult
+ createIterator(const spi::Bucket &bucket, FieldSetSP, const spi::Selection &, spi::IncludedVersions versions,
+ spi::Context &context) override;
spi::IterateResult iterate(spi::IteratorId, uint64_t maxByteSize, spi::Context&) const override;
spi::Result destroyIterator(spi::IteratorId, spi::Context&) override;
spi::Result createBucket(const spi::Bucket&, spi::Context&) override;
diff --git a/storage/src/vespa/storage/persistence/testandsethelper.cpp b/storage/src/vespa/storage/persistence/testandsethelper.cpp
index 9232abc5c8a..57586249817 100644
--- a/storage/src/vespa/storage/persistence/testandsethelper.cpp
+++ b/storage/src/vespa/storage/persistence/testandsethelper.cpp
@@ -1,8 +1,8 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
// @author Vegard Sjonfjell
-#include <vespa/storage/persistence/fieldvisitor.h>
-#include <vespa/storage/persistence/testandsethelper.h>
+#include "fieldvisitor.h"
+#include "testandsethelper.h"
#include <vespa/document/select/parser.h>
#include <vespa/document/repo/documenttyperepo.h>
#include <vespa/vespalib/util/stringfmt.h>
@@ -11,19 +11,19 @@ using namespace std::string_literals;
namespace storage {
-void TestAndSetHelper::getDocumentType() {
+void TestAndSetHelper::getDocumentType(const document::DocumentTypeRepo & documentTypeRepo) {
if (!_docId.hasDocType()) {
throw TestAndSetException(api::ReturnCode(api::ReturnCode::ILLEGAL_PARAMETERS, "Document id has no doctype"));
}
- _docTypePtr = _component.getTypeRepo()->getDocumentType(_docId.getDocType());
+ _docTypePtr = documentTypeRepo.getDocumentType(_docId.getDocType());
if (_docTypePtr == nullptr) {
throw TestAndSetException(api::ReturnCode(api::ReturnCode::ILLEGAL_PARAMETERS, "Document type does not exist"));
}
}
-void TestAndSetHelper::parseDocumentSelection() {
- document::select::Parser parser(*_component.getTypeRepo(), _component.getBucketIdFactory());
+void TestAndSetHelper::parseDocumentSelection(const document::DocumentTypeRepo & documentTypeRepo) {
+ document::select::Parser parser(documentTypeRepo, _component.getBucketIdFactory());
try {
_docSelectionUp = parser.parse(_cmd.getCondition().getSelection());
@@ -49,8 +49,9 @@ TestAndSetHelper::TestAndSetHelper(PersistenceThread & thread, const api::TestAn
_docTypePtr(nullptr),
_missingDocumentImpliesMatch(missingDocumentImpliesMatch)
{
- getDocumentType();
- parseDocumentSelection();
+ auto docTypeRepo = _component.getTypeRepo()->documentTypeRepo;
+ getDocumentType(*docTypeRepo);
+ parseDocumentSelection(*docTypeRepo);
}
TestAndSetHelper::~TestAndSetHelper() = default;
diff --git a/storage/src/vespa/storage/persistence/testandsethelper.h b/storage/src/vespa/storage/persistence/testandsethelper.h
index b5fa29d0106..b528b5034f9 100644
--- a/storage/src/vespa/storage/persistence/testandsethelper.h
+++ b/storage/src/vespa/storage/persistence/testandsethelper.h
@@ -28,8 +28,8 @@ class TestAndSetHelper {
std::unique_ptr<document::select::Node> _docSelectionUp;
bool _missingDocumentImpliesMatch;
- void getDocumentType();
- void parseDocumentSelection();
+ void getDocumentType(const document::DocumentTypeRepo & documentTypeRepo);
+ void parseDocumentSelection(const document::DocumentTypeRepo & documentTypeRepo);
spi::GetResult retrieveDocument(const document::FieldSet & fieldSet, spi::Context & context);
public:
diff --git a/storage/src/vespa/storage/storageserver/communicationmanager.cpp b/storage/src/vespa/storage/storageserver/communicationmanager.cpp
index c0adb01ad47..b51394e2e64 100644
--- a/storage/src/vespa/storage/storageserver/communicationmanager.cpp
+++ b/storage/src/vespa/storage/storageserver/communicationmanager.cpp
@@ -395,10 +395,12 @@ void CommunicationManager::configure(std::unique_ptr<CommunicationManagerConfig>
// Configure messagebus here as we for legacy reasons have
// config here.
+ auto documentTypeRepo = _component.getTypeRepo()->documentTypeRepo;
+ auto loadTypes = _component.getLoadTypes();
_mbus = std::make_unique<mbus::RPCMessageBus>(
mbus::ProtocolSet()
- .add(std::make_shared<documentapi::DocumentProtocol>(*_component.getLoadTypes(), _component.getTypeRepo()))
- .add(std::make_shared<mbusprot::StorageProtocol>(_component.getTypeRepo(), *_component.getLoadTypes())),
+ .add(std::make_shared<documentapi::DocumentProtocol>(*loadTypes, documentTypeRepo))
+ .add(std::make_shared<mbusprot::StorageProtocol>(documentTypeRepo, *loadTypes)),
params,
_configUri);
diff --git a/storage/src/vespa/storage/visiting/visitorthread.cpp b/storage/src/vespa/storage/visiting/visitorthread.cpp
index c6e75735690..73f4a70d80d 100644
--- a/storage/src/vespa/storage/visiting/visitorthread.cpp
+++ b/storage/src/vespa/storage/visiting/visitorthread.cpp
@@ -31,7 +31,7 @@ VisitorThread::Event::Event(Event&& other)
{
}
-VisitorThread::Event::~Event() {}
+VisitorThread::Event::~Event() = default;
VisitorThread::Event&
VisitorThread::Event::operator= (Event&& other)
@@ -44,9 +44,7 @@ VisitorThread::Event::operator= (Event&& other)
return *this;
}
-VisitorThread::Event::Event(
- api::VisitorId visitor,
- const std::shared_ptr<api::StorageMessage>& msg)
+VisitorThread::Event::Event(api::VisitorId visitor, const std::shared_ptr<api::StorageMessage>& msg)
: _visitorId(visitor),
_message(msg),
_timer(),
@@ -54,9 +52,7 @@ VisitorThread::Event::Event(
{
}
-VisitorThread::Event::Event(
- api::VisitorId visitor,
- mbus::Reply::UP reply)
+VisitorThread::Event::Event(api::VisitorId visitor, mbus::Reply::UP reply)
: _visitorId(visitor),
_mbusReply(std::move(reply)),
_timer(),
@@ -331,7 +327,7 @@ VisitorThread::handleNonExistingVisitorCall(const Event& entry,
ReturnCode& code)
{
// Get current time. Set the time that is the oldest still recent.
- framework::SecondTime currentTime(_component.getClock().getTimeInSeconds());;
+ framework::SecondTime currentTime(_component.getClock().getTimeInSeconds());
trimRecentlyCompletedList(currentTime);
// Go through all recent visitors. Ignore request if recent
@@ -435,8 +431,7 @@ VisitorThread::onCreateVisitor(
do {
// If no buckets are specified, fail command
if (cmd->getBuckets().empty()) {
- result = ReturnCode(ReturnCode::ILLEGAL_PARAMETERS,
- "No buckets specified");
+ result = ReturnCode(ReturnCode::ILLEGAL_PARAMETERS, "No buckets specified");
LOG(warning, "CreateVisitor(%s): No buckets specified. Aborting.",
cmd->getInstanceId().c_str());
break;
@@ -480,7 +475,7 @@ VisitorThread::onCreateVisitor(
// Parse document selection
try{
if (!cmd->getDocumentSelection().empty()) {
- std::shared_ptr<const document::DocumentTypeRepo> repo(_component.getTypeRepo());
+ std::shared_ptr<const document::DocumentTypeRepo> repo(_component.getTypeRepo()->documentTypeRepo);
const document::BucketIdFactory& idFactory(_component.getBucketIdFactory());
document::select::Parser parser(*repo, idFactory);
docSelection = parser.parse(cmd->getDocumentSelection());