summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHenning Baldersheim <balder@yahoo-inc.com>2020-08-18 22:28:23 +0200
committerGitHub <noreply@github.com>2020-08-18 22:28:23 +0200
commit2f0ccbe4e07afee74513e9e89a2a037db47fd542 (patch)
treea6eb15dc7d566151325d54ebb0222f1d8df8c22e
parent4a229ee61e8409a73722eb2820b5ed8a3e6b8f21 (diff)
parent623648364a8e32ff0ec811f2a8b0179bd13d5956 (diff)
Merge pull request #14051 from vespa-engine/balder/use-an-actual-fieldset-repo
Balder/use an actual fieldset repo
-rw-r--r--document/src/tests/fieldsettest.cpp17
-rw-r--r--document/src/vespa/document/base/field.h1
-rw-r--r--document/src/vespa/document/datatype/documenttype.h7
-rw-r--r--document/src/vespa/document/fieldset/fieldset.h8
-rw-r--r--document/src/vespa/document/fieldset/fieldsetrepo.cpp45
-rw-r--r--document/src/vespa/document/fieldset/fieldsetrepo.h11
-rw-r--r--document/src/vespa/document/fieldset/fieldsets.h17
-rw-r--r--persistence/src/tests/dummyimpl/dummyimpltest.cpp4
-rw-r--r--persistence/src/vespa/persistence/conformancetest/conformancetest.cpp324
-rw-r--r--persistence/src/vespa/persistence/conformancetest/conformancetest.h21
-rw-r--r--persistence/src/vespa/persistence/dummyimpl/dummypersistence.cpp12
-rw-r--r--persistence/src/vespa/persistence/dummyimpl/dummypersistence.h9
-rw-r--r--persistence/src/vespa/persistence/spi/persistenceprovider.h11
-rw-r--r--searchcore/src/apps/proton/downpersistence.cpp3
-rw-r--r--searchcore/src/apps/proton/downpersistence.h5
-rw-r--r--searchcore/src/apps/tests/persistenceconformance_test.cpp7
-rw-r--r--searchcore/src/tests/proton/document_iterator/document_iterator_test.cpp60
-rw-r--r--searchcore/src/tests/proton/persistenceengine/persistenceengine_test.cpp14
-rw-r--r--searchcore/src/vespa/searchcore/proton/persistenceengine/document_iterator.cpp6
-rw-r--r--searchcore/src/vespa/searchcore/proton/persistenceengine/document_iterator.h4
-rw-r--r--searchcore/src/vespa/searchcore/proton/persistenceengine/persistenceengine.cpp6
-rw-r--r--searchcore/src/vespa/searchcore/proton/persistenceengine/persistenceengine.h8
-rw-r--r--storage/src/tests/common/teststorageapp.cpp8
-rw-r--r--storage/src/tests/common/teststorageapp.h5
-rw-r--r--storage/src/tests/persistence/common/persistenceproviderwrapper.cpp6
-rw-r--r--storage/src/tests/persistence/common/persistenceproviderwrapper.h5
-rw-r--r--storage/src/tests/persistence/filestorage/mergeblockingtest.cpp4
-rw-r--r--storage/src/tests/persistence/filestorage/operationabortingtest.cpp15
-rw-r--r--storage/src/tests/persistence/persistencetestutils.cpp17
-rw-r--r--storage/src/tests/persistence/persistencetestutils.h2
-rw-r--r--storage/src/vespa/storage/common/storagecomponent.cpp24
-rw-r--r--storage/src/vespa/storage/common/storagecomponent.h21
-rw-r--r--storage/src/vespa/storage/config/distributorconfiguration.cpp2
-rw-r--r--storage/src/vespa/storage/distributor/distributor.cpp3
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/removelocationoperation.cpp3
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.cpp2
-rw-r--r--storage/src/vespa/storage/persistence/bucketprocessor.cpp10
-rw-r--r--storage/src/vespa/storage/persistence/mergehandler.cpp156
-rw-r--r--storage/src/vespa/storage/persistence/persistencethread.cpp10
-rw-r--r--storage/src/vespa/storage/persistence/persistenceutil.cpp2
-rw-r--r--storage/src/vespa/storage/persistence/provider_error_wrapper.cpp4
-rw-r--r--storage/src/vespa/storage/persistence/provider_error_wrapper.h5
-rw-r--r--storage/src/vespa/storage/persistence/testandsethelper.cpp17
-rw-r--r--storage/src/vespa/storage/persistence/testandsethelper.h4
-rw-r--r--storage/src/vespa/storage/storageserver/communicationmanager.cpp6
-rw-r--r--storage/src/vespa/storage/visiting/visitorthread.cpp17
-rw-r--r--storageserver/src/vespa/storageserver/app/dummyservicelayerprocess.cpp4
-rw-r--r--storageserver/src/vespa/storageserver/app/dummyservicelayerprocess.h2
-rw-r--r--streamingvisitors/src/tests/searchvisitor/searchvisitor_test.cpp10
-rw-r--r--streamingvisitors/src/vespa/searchvisitor/searchvisitor.cpp2
50 files changed, 427 insertions, 539 deletions
diff --git a/document/src/tests/fieldsettest.cpp b/document/src/tests/fieldsettest.cpp
index 29581ff4549..af23e713735 100644
--- a/document/src/tests/fieldsettest.cpp
+++ b/document/src/tests/fieldsettest.cpp
@@ -31,7 +31,7 @@ TEST_F(FieldSetTest, testParsing)
(void) dynamic_cast<NoFields&>(*FieldSetRepo::parse(docRepo, NoFields::NAME));
(void) dynamic_cast<DocIdOnly&>(*FieldSetRepo::parse(docRepo, DocIdOnly::NAME));
- FieldSet::UP set = FieldSetRepo::parse(docRepo, "testdoctype1:headerval,content");
+ auto set = FieldSetRepo::parse(docRepo, "testdoctype1:headerval,content");
auto & coll = dynamic_cast<FieldCollection&>(*set);
std::ostringstream ost;
@@ -46,8 +46,8 @@ namespace {
bool checkContains(const DocumentTypeRepo& repo,
const std::string& str1, const std::string & str2) {
- FieldSet::UP set1 = FieldSetRepo::parse(repo, str1);
- FieldSet::UP set2 = FieldSetRepo::parse(repo, str2);
+ auto set1 = FieldSetRepo::parse(repo, str1);
+ auto set2 = FieldSetRepo::parse(repo, str2);
return set1->contains(*set2);
}
@@ -141,7 +141,7 @@ FieldSetTest::doCopyFields(const Document& src,
if (!dest) {
dest = &destDoc;
}
- FieldSet::UP fset = FieldSetRepo::parse(docRepo, fieldSetStr);
+ auto fset = FieldSetRepo::parse(docRepo, fieldSetStr);
FieldSet::copyFields(*dest, src, *fset);
return stringifyFields(*dest);
}
@@ -152,7 +152,7 @@ FieldSetTest::doStripFields(const Document& doc,
const std::string& fieldSetStr)
{
Document::UP copy(doc.clone());
- FieldSet::UP fset = FieldSetRepo::parse(docRepo, fieldSetStr);
+ auto fset = FieldSetRepo::parse(docRepo, fieldSetStr);
FieldSet::stripFields(*copy, *fset);
return stringifyFields(*copy);
}
@@ -198,7 +198,7 @@ FieldSetTest::doCopyDocument(const Document& src,
const DocumentTypeRepo& docRepo,
const std::string& fieldSetStr)
{
- FieldSet::UP fset = FieldSetRepo::parse(docRepo, fieldSetStr);
+ auto fset = FieldSetRepo::parse(docRepo, fieldSetStr);
Document::UP doc(FieldSet::createDocumentSubsetCopy(src, *fset));
return stringifyFields(*doc);
}
@@ -244,10 +244,9 @@ TEST_F(FieldSetTest, testSerialize)
"testdoctype1:content,hstringval"
};
- FieldSetRepo repo;
for (const char * fieldSet : fieldSets) {
- FieldSet::UP fs = FieldSetRepo::parse(docRepo, fieldSet);
- EXPECT_EQ(vespalib::string(fieldSet), repo.serialize(*fs));
+ auto fs = FieldSetRepo::parse(docRepo, fieldSet);
+ EXPECT_EQ(vespalib::string(fieldSet), FieldSetRepo::serialize(*fs));
}
}
diff --git a/document/src/vespa/document/base/field.h b/document/src/vespa/document/base/field.h
index 7580b2b692f..305021ef29a 100644
--- a/document/src/vespa/document/base/field.h
+++ b/document/src/vespa/document/base/field.h
@@ -88,7 +88,6 @@ public:
*/
Field(vespalib::stringref name, const DataType &dataType);
- Field* clone() const override { return new Field(*this); }
std::unique_ptr<FieldValue> createValue() const;
// Note that only id is checked for equality.
diff --git a/document/src/vespa/document/datatype/documenttype.h b/document/src/vespa/document/datatype/documenttype.h
index ed6e9e66ab5..fae65addb48 100644
--- a/document/src/vespa/document/datatype/documenttype.h
+++ b/document/src/vespa/document/datatype/documenttype.h
@@ -61,12 +61,10 @@ public:
DocumentType();
DocumentType(vespalib::stringref name, int32_t id);
- DocumentType(vespalib::stringref name, int32_t id,
- const StructDataType& fields);
+ DocumentType(vespalib::stringref name, int32_t id, const StructDataType& fields);
explicit DocumentType(vespalib::stringref name);
- DocumentType(vespalib::stringref name,
- const StructDataType& fields);
+ DocumentType(vespalib::stringref name, const StructDataType& fields);
~DocumentType() override;
@@ -101,6 +99,7 @@ public:
DocumentType & addFieldSet(const vespalib::string & name, FieldSet::Fields fields);
const FieldSet * getFieldSet(const vespalib::string & name) const;
+ const FieldSetMap & getFieldSets() const { return _fieldSets; }
const ImportedFieldNames& imported_field_names() const noexcept {
return _imported_field_names;
diff --git a/document/src/vespa/document/fieldset/fieldset.h b/document/src/vespa/document/fieldset/fieldset.h
index 35c912c5e45..3d74659ebf5 100644
--- a/document/src/vespa/document/fieldset/fieldset.h
+++ b/document/src/vespa/document/fieldset/fieldset.h
@@ -23,7 +23,8 @@ public:
DOCID
};
- typedef std::unique_ptr<FieldSet> UP;
+ using SP = std::shared_ptr<FieldSet>;
+ using UP = std::unique_ptr<FieldSet>;
virtual ~FieldSet() = default;
@@ -39,11 +40,6 @@ public:
virtual Type getType() const = 0;
/**
- * @return Returns a copy of this object.
- */
- virtual FieldSet* clone() const = 0;
-
- /**
* Copy all fields from src into dest that are contained within the
* given field set. If any copied field pre-exists in dest, it will
* be overwritten.
diff --git a/document/src/vespa/document/fieldset/fieldsetrepo.cpp b/document/src/vespa/document/fieldset/fieldsetrepo.cpp
index 33cbf6185c4..5bde291c8dd 100644
--- a/document/src/vespa/document/fieldset/fieldsetrepo.cpp
+++ b/document/src/vespa/document/fieldset/fieldsetrepo.cpp
@@ -5,6 +5,7 @@
#include <vespa/vespalib/util/exceptions.h>
#include <vespa/vespalib/stllike/asciistream.h>
#include <vespa/document/repo/documenttyperepo.h>
+#include <vespa/vespalib/stllike/hash_map.hpp>
using vespalib::StringTokenizer;
@@ -12,27 +13,25 @@ namespace document {
namespace {
-FieldSet::UP
+FieldSet::SP
parseSpecialValues(vespalib::stringref name)
{
- FieldSet::UP fs;
if ((name.size() == 4) && (name[1] == 'i') && (name[2] == 'd') && (name[3] == ']')) {
- fs = std::make_unique<DocIdOnly>();
+ return std::make_shared<DocIdOnly>();
} else if ((name.size() == 5) && (name[1] == 'a') && (name[2] == 'l') && (name[3] == 'l') && (name[4] == ']')) {
- fs = std::make_unique<AllFields>();
+ return std::make_shared<AllFields>();
} else if ((name.size() == 6) && (name[1] == 'n') && (name[2] == 'o') && (name[3] == 'n') && (name[4] == 'e') && (name[5] == ']')) {
- fs = std::make_unique<NoFields>();
+ return std::make_shared<NoFields>();
} else if ((name.size() == 7) && (name[1] == 'd') && (name[2] == 'o') && (name[3] == 'c') && (name[4] == 'i') && (name[5] == 'd') && (name[6] == ']')) {
- fs = std::make_unique<DocIdOnly>();
+ return std::make_shared<DocIdOnly>();
} else {
throw vespalib::IllegalArgumentException(
"The only special names (enclosed in '[]') allowed are "
"id, all, none, not '" + name + "'.");
}
- return fs;
}
-FieldSet::UP
+FieldSet::SP
parseFieldCollection(const DocumentTypeRepo& repo,
vespalib::stringref docType,
vespalib::stringref fieldNames)
@@ -55,12 +54,12 @@ parseFieldCollection(const DocumentTypeRepo& repo,
builder.add(&type.getField(token));
}
}
- return std::make_unique<FieldCollection>(type, builder.build());
+ return std::make_shared<FieldCollection>(type, builder.build());
}
}
-FieldSet::UP
+FieldSet::SP
FieldSetRepo::parse(const DocumentTypeRepo& repo, vespalib::stringref str)
{
if (str[0] == '[') {
@@ -111,5 +110,31 @@ FieldSetRepo::serialize(const FieldSet& fieldSet)
}
}
+
+FieldSetRepo::FieldSetRepo(const DocumentTypeRepo& repo)
+ : _doumentTyperepo(repo),
+ _configuredFieldSets()
+{
+ repo.forEachDocumentType(*vespalib::makeClosure(this, &FieldSetRepo::configureDocumentType));
+}
+FieldSetRepo::~FieldSetRepo() = default;
+
+void
+FieldSetRepo::configureDocumentType(const DocumentType & documentType) {
+ for (const auto & entry : documentType.getFieldSets()) {
+ vespalib::string fieldSetName(documentType.getName());
+ fieldSetName.append(':').append(entry.first);
+ _configuredFieldSets[fieldSetName] = parse(_doumentTyperepo, fieldSetName);
+ }
+}
+FieldSet::SP
+FieldSetRepo::getFieldSet(vespalib::stringref fieldSetString) const {
+ auto found = _configuredFieldSets.find(fieldSetString);
+ if (found != _configuredFieldSets.end()) {
+ return found->second;
+ }
+ return parse(_doumentTyperepo, fieldSetString);
+}
+
}
diff --git a/document/src/vespa/document/fieldset/fieldsetrepo.h b/document/src/vespa/document/fieldset/fieldsetrepo.h
index a744aa572e5..d213230848a 100644
--- a/document/src/vespa/document/fieldset/fieldsetrepo.h
+++ b/document/src/vespa/document/fieldset/fieldsetrepo.h
@@ -16,10 +16,17 @@ class DocumentTypeRepo;
class FieldSetRepo
{
public:
- static FieldSet::UP parse(const DocumentTypeRepo& repo,
- vespalib::stringref fieldSetString);
+ FieldSetRepo(const DocumentTypeRepo& repo);
+ ~FieldSetRepo();
+ FieldSet::SP getFieldSet(vespalib::stringref fieldSetString) const;
+
+ static FieldSet::SP parse(const DocumentTypeRepo& repo, vespalib::stringref fieldSetString);
static vespalib::string serialize(const FieldSet& fs);
+private:
+ void configureDocumentType(const DocumentType & documentType);
+ const DocumentTypeRepo & _doumentTyperepo;
+ vespalib::hash_map<vespalib::string, FieldSet::SP> _configuredFieldSets;
};
}
diff --git a/document/src/vespa/document/fieldset/fieldsets.h b/document/src/vespa/document/fieldset/fieldsets.h
index 9537a5bdf61..e71a96e5a7e 100644
--- a/document/src/vespa/document/fieldset/fieldsets.h
+++ b/document/src/vespa/document/fieldset/fieldsets.h
@@ -13,7 +13,6 @@ public:
static constexpr const char * NAME = "[all]";
bool contains(const FieldSet&) const override { return true; }
Type getType() const override { return Type::ALL; }
- FieldSet* clone() const override { return new AllFields(); }
};
class NoFields final : public FieldSet
@@ -22,7 +21,6 @@ public:
static constexpr const char * NAME = "[none]";
bool contains(const FieldSet& f) const override { return f.getType() == Type::NONE; }
Type getType() const override { return Type::NONE; }
- FieldSet* clone() const override { return new NoFields(); }
};
class DocIdOnly final : public FieldSet
@@ -33,7 +31,6 @@ public:
return fields.getType() == Type::DOCID || fields.getType() == Type::NONE;
}
Type getType() const override { return Type::DOCID; }
- FieldSet* clone() const override { return new DocIdOnly(); }
};
class FieldCollection : public FieldSet
@@ -49,20 +46,8 @@ public:
bool contains(const FieldSet& fields) const override;
Type getType() const override { return Type::SET; }
- /**
- * @return Returns the document type the collection is associated with.
- */
- const DocumentType& getDocumentType() const {
- return _docType;
- }
-
- /**
- * Returns all the fields contained in this collection.
- */
+ const DocumentType& getDocumentType() const { return _docType; }
const Field::Set& getFields() const { return _set; }
-
- FieldSet* clone() const override { return new FieldCollection(*this); }
-
uint64_t hash() const { return _hash; }
private:
Field::Set _set;
diff --git a/persistence/src/tests/dummyimpl/dummyimpltest.cpp b/persistence/src/tests/dummyimpl/dummyimpltest.cpp
index 6cf0b53766f..7bc27266020 100644
--- a/persistence/src/tests/dummyimpl/dummyimpltest.cpp
+++ b/persistence/src/tests/dummyimpl/dummyimpltest.cpp
@@ -14,9 +14,9 @@ namespace {
struct DummyPersistenceFactory : public ConformanceTest::PersistenceFactory {
using Repo = document::DocumentTypeRepo;
- PersistenceProvider::UP
+ std::unique_ptr<PersistenceProvider>
getPersistenceImplementation(const std::shared_ptr<const Repo>& repo, const Repo::DocumenttypesConfig&) override {
- return PersistenceProvider::UP(new dummy::DummyPersistence(repo, 4));
+ return std::make_unique<dummy::DummyPersistence>(repo, 4);
}
bool supportsActiveState() const override { return true; }
diff --git a/persistence/src/vespa/persistence/conformancetest/conformancetest.cpp b/persistence/src/vespa/persistence/conformancetest/conformancetest.cpp
index 3d15d09814f..1871d8943d8 100644
--- a/persistence/src/vespa/persistence/conformancetest/conformancetest.cpp
+++ b/persistence/src/vespa/persistence/conformancetest/conformancetest.cpp
@@ -27,13 +27,15 @@ using storage::spi::test::makeSpiBucket;
namespace storage::spi {
+using PersistenceProviderUP = std::unique_ptr<PersistenceProvider>;
+
namespace {
LoadType defaultLoadType(0, "default");
-PersistenceProvider::UP getSpi(ConformanceTest::PersistenceFactory &factory,
+std::unique_ptr<PersistenceProvider> getSpi(ConformanceTest::PersistenceFactory &factory,
const document::TestDocMan &testDocMan) {
- PersistenceProvider::UP result(factory.getPersistenceImplementation(
+ PersistenceProviderUP result(factory.getPersistenceImplementation(
testDocMan.getTypeRepoSP(), *testDocMan.getTypeConfig()));
EXPECT_TRUE(!result->initialize().hasError());
EXPECT_TRUE(!result->getPartitionStates().hasError());
@@ -53,15 +55,15 @@ createIterator(PersistenceProvider& spi,
IncludedVersions versions = NEWEST_DOCUMENT_ONLY,
int fields = ALL_FIELDS)
{
- document::FieldSet::UP fieldSet;
+ document::FieldSet::SP fieldSet;
if (fields & ALL_FIELDS) {
- fieldSet.reset(new document::AllFields());
+ fieldSet = std::make_shared<document::AllFields>();
} else {
- fieldSet.reset(new document::DocIdOnly());
+ fieldSet = std::make_shared<document::DocIdOnly>();
}
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
- return spi.createIterator(b, *fieldSet, sel, versions, context);
+ return spi.createIterator(b, std::move(fieldSet), sel, versions, context);
}
Selection
@@ -206,7 +208,7 @@ iterateBucket(PersistenceProvider& spi,
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
CreateIteratorResult iter = spi.createIterator(
bucket,
- document::AllFields(),
+ std::make_shared<document::AllFields>(),
sel,
versions,
context);
@@ -330,7 +332,7 @@ TEST_F(ConformanceTest, testBasics)
{
document::TestDocMan testDocMan;
_factory->clear();
- PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ PersistenceProviderUP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
Bucket bucket(makeSpiBucket(BucketId(8, 0x01)));
@@ -358,7 +360,7 @@ TEST_F(ConformanceTest, testBasics)
CreateIteratorResult iter = spi->createIterator(
bucket,
- document::AllFields(),
+ std::make_shared<document::AllFields>(),
sel,
includeRemoves
? NEWEST_DOCUMENT_OR_REMOVE : NEWEST_DOCUMENT_ONLY,
@@ -415,7 +417,7 @@ TEST_F(ConformanceTest, testListBuckets)
//TODO: enable when supported by provider in storage
document::TestDocMan testDocMan;
_factory->clear();
- PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ PersistenceProviderUP spi(getSpi(*_factory, testDocMan));
PartitionId partId(0);
BucketId bucketId1(8, 0x01);
@@ -460,7 +462,7 @@ TEST_F(ConformanceTest, testBucketInfo)
{
document::TestDocMan testDocMan;
_factory->clear();
- PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ PersistenceProviderUP spi(getSpi(*_factory, testDocMan));
Bucket bucket(makeSpiBucket(BucketId(8, 0x01)));
@@ -517,7 +519,7 @@ TEST_F(ConformanceTest, testOrderIndependentBucketInfo)
{
document::TestDocMan testDocMan;
_factory->clear();
- PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ PersistenceProviderUP spi(getSpi(*_factory, testDocMan));
Bucket bucket(makeSpiBucket(BucketId(8, 0x01)));
@@ -561,7 +563,7 @@ TEST_F(ConformanceTest, testPut)
{
document::TestDocMan testDocMan;
_factory->clear();
- PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ PersistenceProviderUP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
Bucket bucket(makeSpiBucket(BucketId(8, 0x01)));
@@ -586,7 +588,7 @@ TEST_F(ConformanceTest, testPutNewDocumentVersion)
{
document::TestDocMan testDocMan;
_factory->clear();
- PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ PersistenceProviderUP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
Bucket bucket(makeSpiBucket(BucketId(8, 0x01)));
@@ -637,7 +639,7 @@ TEST_F(ConformanceTest, testPutOlderDocumentVersion)
{
document::TestDocMan testDocMan;
_factory->clear();
- PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ PersistenceProviderUP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
Bucket bucket(makeSpiBucket(BucketId(8, 0x01)));
@@ -681,7 +683,7 @@ TEST_F(ConformanceTest, testPutDuplicate)
{
document::TestDocMan testDocMan;
_factory->clear();
- PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ PersistenceProviderUP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
Bucket bucket(makeSpiBucket(BucketId(8, 0x01)));
@@ -713,7 +715,7 @@ TEST_F(ConformanceTest, testRemove)
{
document::TestDocMan testDocMan;
_factory->clear();
- PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ PersistenceProviderUP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
Bucket bucket(makeSpiBucket(BucketId(8, 0x01)));
@@ -806,7 +808,7 @@ TEST_F(ConformanceTest, testRemoveMerge)
{
document::TestDocMan testDocMan;
_factory->clear();
- PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ PersistenceProviderUP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
Bucket bucket(makeSpiBucket(BucketId(8, 0x01)));
@@ -900,7 +902,7 @@ TEST_F(ConformanceTest, testUpdate)
{
document::TestDocMan testDocMan;
_factory->clear();
- PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ PersistenceProviderUP spi(getSpi(*_factory, testDocMan));
Document::SP doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
@@ -999,7 +1001,7 @@ TEST_F(ConformanceTest, testGet)
{
document::TestDocMan testDocMan;
_factory->clear();
- PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ PersistenceProviderUP spi(getSpi(*_factory, testDocMan));
Document::SP doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
@@ -1042,7 +1044,7 @@ TEST_F(ConformanceTest, testIterateCreateIterator)
{
document::TestDocMan testDocMan;
_factory->clear();
- PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ PersistenceProviderUP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
Bucket b(makeSpiBucket(BucketId(8, 0x1)));
spi->createBucket(b, context);
@@ -1061,7 +1063,7 @@ TEST_F(ConformanceTest, testIterateWithUnknownId)
{
document::TestDocMan testDocMan;
_factory->clear();
- PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ PersistenceProviderUP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
Bucket b(makeSpiBucket(BucketId(8, 0x1)));
spi->createBucket(b, context);
@@ -1075,7 +1077,7 @@ TEST_F(ConformanceTest, testIterateDestroyIterator)
{
document::TestDocMan testDocMan;
_factory->clear();
- PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ PersistenceProviderUP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
Bucket b(makeSpiBucket(BucketId(8, 0x1)));
spi->createBucket(b, context);
@@ -1107,7 +1109,7 @@ TEST_F(ConformanceTest, testIterateAllDocs)
{
document::TestDocMan testDocMan;
_factory->clear();
- PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ PersistenceProviderUP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
Bucket b(makeSpiBucket(BucketId(8, 0x1)));
spi->createBucket(b, context);
@@ -1125,7 +1127,7 @@ TEST_F(ConformanceTest, testIterateAllDocsNewestVersionOnly)
{
document::TestDocMan testDocMan;
_factory->clear();
- PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ PersistenceProviderUP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
Bucket b(makeSpiBucket(BucketId(8, 0x1)));
spi->createBucket(b, context);
@@ -1153,7 +1155,7 @@ TEST_F(ConformanceTest, testIterateChunked)
{
document::TestDocMan testDocMan;
_factory->clear();
- PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ PersistenceProviderUP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
Bucket b(makeSpiBucket(BucketId(8, 0x1)));
spi->createBucket(b, context);
@@ -1173,7 +1175,7 @@ TEST_F(ConformanceTest, testMaxByteSize)
{
document::TestDocMan testDocMan;
_factory->clear();
- PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ PersistenceProviderUP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
Bucket b(makeSpiBucket(BucketId(8, 0x1)));
spi->createBucket(b, context);
@@ -1199,7 +1201,7 @@ TEST_F(ConformanceTest, testIterateMatchTimestampRange)
{
document::TestDocMan testDocMan;
_factory->clear();
- PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ PersistenceProviderUP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
Bucket b(makeSpiBucket(BucketId(8, 0x1)));
spi->createBucket(b, context);
@@ -1237,7 +1239,7 @@ TEST_F(ConformanceTest, testIterateExplicitTimestampSubset)
{
document::TestDocMan testDocMan;
_factory->clear();
- PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ PersistenceProviderUP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
Bucket b(makeSpiBucket(BucketId(8, 0x1)));
spi->createBucket(b, context);
@@ -1286,7 +1288,7 @@ TEST_F(ConformanceTest, testIterateRemoves)
{
document::TestDocMan testDocMan;
_factory->clear();
- PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ PersistenceProviderUP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
Bucket b(makeSpiBucket(BucketId(8, 0x1)));
spi->createBucket(b, context);
@@ -1321,8 +1323,7 @@ TEST_F(ConformanceTest, testIterateRemoves)
{
Selection sel(createSelection(""));
- CreateIteratorResult iter(
- createIterator(*spi, b, sel, NEWEST_DOCUMENT_OR_REMOVE));
+ CreateIteratorResult iter(createIterator(*spi, b, sel, NEWEST_DOCUMENT_OR_REMOVE));
std::vector<Chunk> chunks = doIterate(*spi, iter.getIteratorId(), 4096);
std::vector<DocEntry::UP> entries = getEntriesFromChunks(chunks);
@@ -1337,7 +1338,7 @@ TEST_F(ConformanceTest, testIterateMatchSelection)
{
document::TestDocMan testDocMan;
_factory->clear();
- PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ PersistenceProviderUP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
Bucket b(makeSpiBucket(BucketId(8, 0x1)));
spi->createBucket(b, context);
@@ -1351,15 +1352,11 @@ TEST_F(ConformanceTest, testIterateMatchSelection)
spi->put(b, Timestamp(1000 + i), doc, context);
if ((i % 3) == 0) {
- docsToVisit.push_back(
- DocAndTimestamp(doc, Timestamp(1000 + i)));
+ docsToVisit.push_back(DocAndTimestamp(doc, Timestamp(1000 + i)));
}
}
- CreateIteratorResult iter(
- createIterator(*spi,
- b,
- createSelection("testdoctype1.headerval % 3 == 0")));
+ CreateIteratorResult iter(createIterator(*spi, b, createSelection("testdoctype1.headerval % 3 == 0")));
std::vector<Chunk> chunks = doIterate(*spi, iter.getIteratorId(), 2048 * 1024);
verifyDocs(docsToVisit, chunks);
@@ -1371,7 +1368,7 @@ TEST_F(ConformanceTest, testIterationRequiringDocumentIdOnlyMatching)
{
document::TestDocMan testDocMan;
_factory->clear();
- PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ PersistenceProviderUP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
Bucket b(makeSpiBucket(BucketId(8, 0x1)));
spi->createBucket(b, context);
@@ -1381,13 +1378,11 @@ TEST_F(ConformanceTest, testIterationRequiringDocumentIdOnlyMatching)
// Document does not already exist, remove should create a
// remove entry for it regardless.
- EXPECT_TRUE(
- !spi->remove(b, Timestamp(2000), removedId, context).wasFound());
+ EXPECT_TRUE(!spi->remove(b, Timestamp(2000), removedId, context).wasFound());
Selection sel(createSelection("id == '" + removedId.toString() + "'"));
- CreateIteratorResult iter(
- createIterator(*spi, b, sel, NEWEST_DOCUMENT_OR_REMOVE));
+ CreateIteratorResult iter(createIterator(*spi, b, sel, NEWEST_DOCUMENT_OR_REMOVE));
EXPECT_TRUE(iter.getErrorCode() == Result::ErrorType::NONE);
std::vector<Chunk> chunks = doIterate(*spi, iter.getIteratorId(), 4096);
@@ -1403,16 +1398,14 @@ TEST_F(ConformanceTest, testIterateBadDocumentSelection)
{
document::TestDocMan testDocMan;
_factory->clear();
- PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ PersistenceProviderUP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
Bucket b(makeSpiBucket(BucketId(8, 0x1)));
spi->createBucket(b, context);
{
- CreateIteratorResult iter(
- createIterator(*spi, b, createSelection("the muppet show")));
+ CreateIteratorResult iter(createIterator(*spi, b, createSelection("the muppet show")));
if (iter.getErrorCode() == Result::ErrorType::NONE) {
- IterateResult result(
- spi->iterate(iter.getIteratorId(), 4096, context));
+ IterateResult result(spi->iterate(iter.getIteratorId(), 4096, context));
EXPECT_EQ(Result::ErrorType::NONE, result.getErrorCode());
EXPECT_EQ(size_t(0), result.getEntries().size());
EXPECT_EQ(true, result.isCompleted());
@@ -1422,14 +1415,9 @@ TEST_F(ConformanceTest, testIterateBadDocumentSelection)
}
}
{
- CreateIteratorResult iter(
- createIterator(*spi,
- b,
- createSelection(
- "unknownddoctype.something=thatthing")));
+ CreateIteratorResult iter(createIterator(*spi, b, createSelection("unknownddoctype.something=thatthing")));
if (iter.getErrorCode() == Result::ErrorType::NONE) {
- IterateResult result(spi->iterate(
- iter.getIteratorId(), 4096, context));
+ IterateResult result(spi->iterate(iter.getIteratorId(), 4096, context));
EXPECT_EQ(Result::ErrorType::NONE, result.getErrorCode());
EXPECT_EQ(size_t(0), result.getEntries().size());
EXPECT_EQ(true, result.isCompleted());
@@ -1444,7 +1432,7 @@ TEST_F(ConformanceTest, testIterateAlreadyCompleted)
{
document::TestDocMan testDocMan;
_factory->clear();
- PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ PersistenceProviderUP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
Bucket b(makeSpiBucket(BucketId(8, 0x1)));
spi->createBucket(b, context);
@@ -1468,7 +1456,7 @@ TEST_F(ConformanceTest, testIterateEmptyBucket)
{
document::TestDocMan testDocMan;
_factory->clear();
- PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ PersistenceProviderUP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
Bucket b(makeSpiBucket(BucketId(8, 0x1)));
spi->createBucket(b, context);
@@ -1488,7 +1476,7 @@ TEST_F(ConformanceTest, testDeleteBucket)
{
document::TestDocMan testDocMan;
_factory->clear();
- PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ PersistenceProviderUP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
Document::SP doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
@@ -1498,28 +1486,28 @@ TEST_F(ConformanceTest, testDeleteBucket)
spi->put(bucket, Timestamp(3), doc1, context);
spi->deleteBucket(bucket, context);
- testDeleteBucketPostCondition(spi, bucket, *doc1);
+ testDeleteBucketPostCondition(*spi, bucket, *doc1);
if (_factory->hasPersistence()) {
spi.reset();
document::TestDocMan testDocMan2;
spi = getSpi(*_factory, testDocMan2);
- testDeleteBucketPostCondition(spi, bucket, *doc1);
+ testDeleteBucketPostCondition(*spi, bucket, *doc1);
}
}
void
ConformanceTest::
-testDeleteBucketPostCondition(const PersistenceProvider::UP &spi,
+testDeleteBucketPostCondition(const PersistenceProvider &spi,
const Bucket &bucket,
const Document &doc1)
{
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
{
- GetResult result = spi->get(bucket,
- document::AllFields(),
- doc1.getId(),
- context);
+ GetResult result = spi.get(bucket,
+ document::AllFields(),
+ doc1.getId(),
+ context);
EXPECT_EQ(Result::ErrorType::NONE, result.getErrorCode());
EXPECT_EQ(Timestamp(0), result.getTimestamp());
@@ -1531,7 +1519,7 @@ TEST_F(ConformanceTest, testSplitNormalCase)
{
document::TestDocMan testDocMan;
_factory->clear();
- PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ PersistenceProviderUP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
Bucket bucketA(makeSpiBucket(BucketId(3, 0x02)));
@@ -1552,51 +1540,41 @@ TEST_F(ConformanceTest, testSplitNormalCase)
}
spi->split(bucketC, bucketA, bucketB, context);
- testSplitNormalCasePostCondition(spi, bucketA, bucketB, bucketC,
- testDocMan);
+ testSplitNormalCasePostCondition(*spi, bucketA, bucketB, bucketC, testDocMan);
if (_factory->hasPersistence()) {
spi.reset();
document::TestDocMan testDocMan2;
spi = getSpi(*_factory, testDocMan2);
- testSplitNormalCasePostCondition(spi, bucketA, bucketB, bucketC,
- testDocMan2);
+ testSplitNormalCasePostCondition(*spi, bucketA, bucketB, bucketC, testDocMan2);
}
}
void
ConformanceTest::
-testSplitNormalCasePostCondition(const PersistenceProvider::UP &spi,
+testSplitNormalCasePostCondition(const PersistenceProvider &spi,
const Bucket &bucketA,
const Bucket &bucketB,
const Bucket &bucketC,
document::TestDocMan &testDocMan)
{
- EXPECT_EQ(10, (int)spi->getBucketInfo(bucketA).getBucketInfo().
- getDocumentCount());
- EXPECT_EQ(10, (int)spi->getBucketInfo(bucketB).getBucketInfo().
- getDocumentCount());
+ EXPECT_EQ(10, (int)spi.getBucketInfo(bucketA).getBucketInfo().getDocumentCount());
+ EXPECT_EQ(10, (int)spi.getBucketInfo(bucketB).getBucketInfo().getDocumentCount());
document::AllFields fs;
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
for (uint32_t i = 0; i < 10; ++i) {
Document::UP doc1 = testDocMan.createRandomDocumentAtLocation(0x02, i);
- EXPECT_TRUE(
- spi->get(bucketA, fs, doc1->getId(), context).hasDocument());
- EXPECT_TRUE(
- !spi->get(bucketC, fs, doc1->getId(), context).hasDocument());
- EXPECT_TRUE(
- !spi->get(bucketB, fs, doc1->getId(), context).hasDocument());
+ EXPECT_TRUE(spi.get(bucketA, fs, doc1->getId(), context).hasDocument());
+ EXPECT_TRUE(!spi.get(bucketC, fs, doc1->getId(), context).hasDocument());
+ EXPECT_TRUE(!spi.get(bucketB, fs, doc1->getId(), context).hasDocument());
}
for (uint32_t i = 10; i < 20; ++i) {
Document::UP doc1 = testDocMan.createRandomDocumentAtLocation(0x06, i);
- EXPECT_TRUE(
- spi->get(bucketB, fs, doc1->getId(), context).hasDocument());
- EXPECT_TRUE(
- !spi->get(bucketA, fs, doc1->getId(), context).hasDocument());
- EXPECT_TRUE(
- !spi->get(bucketC, fs, doc1->getId(), context).hasDocument());
+ EXPECT_TRUE(spi.get(bucketB, fs, doc1->getId(), context).hasDocument());
+ EXPECT_TRUE(!spi.get(bucketA, fs, doc1->getId(), context).hasDocument());
+ EXPECT_TRUE(!spi.get(bucketC, fs, doc1->getId(), context).hasDocument());
}
}
@@ -1604,7 +1582,7 @@ TEST_F(ConformanceTest, testSplitTargetExists)
{
document::TestDocMan testDocMan;
_factory->clear();
- PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ PersistenceProviderUP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
Bucket bucketA(makeSpiBucket(BucketId(3, 0x02)));
@@ -1638,29 +1616,27 @@ TEST_F(ConformanceTest, testSplitTargetExists)
}
spi->split(bucketC, bucketA, bucketB, context);
- testSplitTargetExistsPostCondition(spi, bucketA, bucketB, bucketC,
- testDocMan);
+ testSplitTargetExistsPostCondition(*spi, bucketA, bucketB, bucketC,testDocMan);
if (_factory->hasPersistence()) {
spi.reset();
document::TestDocMan testDocMan2;
spi = getSpi(*_factory, testDocMan2);
- testSplitTargetExistsPostCondition(spi, bucketA, bucketB, bucketC,
- testDocMan2);
+ testSplitTargetExistsPostCondition(*spi, bucketA, bucketB, bucketC,testDocMan2);
}
}
void
ConformanceTest::
-testSplitTargetExistsPostCondition(const PersistenceProvider::UP &spi,
+testSplitTargetExistsPostCondition(const PersistenceProvider &spi,
const Bucket &bucketA,
const Bucket &bucketB,
const Bucket &bucketC,
document::TestDocMan &testDocMan)
{
- EXPECT_EQ(10, (int)spi->getBucketInfo(bucketA).getBucketInfo().
+ EXPECT_EQ(10, (int)spi.getBucketInfo(bucketA).getBucketInfo().
getDocumentCount());
- EXPECT_EQ(15, (int)spi->getBucketInfo(bucketB).getBucketInfo().
+ EXPECT_EQ(15, (int)spi.getBucketInfo(bucketB).getBucketInfo().
getDocumentCount());
document::AllFields fs;
@@ -1668,21 +1644,21 @@ testSplitTargetExistsPostCondition(const PersistenceProvider::UP &spi,
for (uint32_t i = 0; i < 10; ++i) {
Document::UP doc1 = testDocMan.createRandomDocumentAtLocation(0x02, i);
EXPECT_TRUE(
- spi->get(bucketA, fs, doc1->getId(), context).hasDocument());
+ spi.get(bucketA, fs, doc1->getId(), context).hasDocument());
EXPECT_TRUE(
- !spi->get(bucketC, fs, doc1->getId(), context).hasDocument());
+ !spi.get(bucketC, fs, doc1->getId(), context).hasDocument());
EXPECT_TRUE(
- !spi->get(bucketB, fs, doc1->getId(), context).hasDocument());
+ !spi.get(bucketB, fs, doc1->getId(), context).hasDocument());
}
for (uint32_t i = 10; i < 25; ++i) {
Document::UP doc1 = testDocMan.createRandomDocumentAtLocation(0x06, i);
EXPECT_TRUE(
- spi->get(bucketB, fs, doc1->getId(), context).hasDocument());
+ spi.get(bucketB, fs, doc1->getId(), context).hasDocument());
EXPECT_TRUE(
- !spi->get(bucketA, fs, doc1->getId(), context).hasDocument());
+ !spi.get(bucketA, fs, doc1->getId(), context).hasDocument());
EXPECT_TRUE(
- !spi->get(bucketC, fs, doc1->getId(), context).hasDocument());
+ !spi.get(bucketC, fs, doc1->getId(), context).hasDocument());
}
}
@@ -1690,7 +1666,7 @@ TEST_F(ConformanceTest, testSplitSingleDocumentInSource)
{
document::TestDocMan testDocMan;
_factory->clear();
- PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ PersistenceProviderUP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
Bucket target1(makeSpiBucket(BucketId(3, 0x02)));
@@ -1704,42 +1680,34 @@ TEST_F(ConformanceTest, testSplitSingleDocumentInSource)
spi->put(source, Timestamp(1), doc, context);
spi->split(source, target1, target2, context);
- testSplitSingleDocumentInSourcePostCondition(
- spi, source, target1, target2, testDocMan);
+ testSplitSingleDocumentInSourcePostCondition(*spi, source, target1, target2, testDocMan);
if (_factory->hasPersistence()) {
spi.reset();
document::TestDocMan testDocMan2;
spi = getSpi(*_factory, testDocMan2);
- testSplitSingleDocumentInSourcePostCondition(
- spi, source, target1, target2, testDocMan2);
+ testSplitSingleDocumentInSourcePostCondition(*spi, source, target1, target2, testDocMan2);
}
}
void
ConformanceTest::testSplitSingleDocumentInSourcePostCondition(
- const PersistenceProvider::UP& spi,
+ const PersistenceProvider& spi,
const Bucket& source,
const Bucket& target1,
const Bucket& target2,
document::TestDocMan& testDocMan)
{
- EXPECT_EQ(uint32_t(0),
- spi->getBucketInfo(source).getBucketInfo().
- getDocumentCount());
- EXPECT_EQ(uint32_t(0),
- spi->getBucketInfo(target1).getBucketInfo().
- getDocumentCount());
- EXPECT_EQ(uint32_t(1),
- spi->getBucketInfo(target2).getBucketInfo().
- getDocumentCount());
+ EXPECT_EQ(uint32_t(0), spi.getBucketInfo(source).getBucketInfo().getDocumentCount());
+ EXPECT_EQ(uint32_t(0), spi.getBucketInfo(target1).getBucketInfo().getDocumentCount());
+ EXPECT_EQ(uint32_t(1), spi.getBucketInfo(target2).getBucketInfo().getDocumentCount());
document::AllFields fs;
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
Document::UP doc = testDocMan.createRandomDocumentAtLocation(0x06, 0);
- EXPECT_TRUE(spi->get(target2, fs, doc->getId(), context).hasDocument());
- EXPECT_TRUE(!spi->get(target1, fs, doc->getId(), context).hasDocument());
- EXPECT_TRUE(!spi->get(source, fs, doc->getId(), context).hasDocument());
+ EXPECT_TRUE(spi.get(target2, fs, doc->getId(), context).hasDocument());
+ EXPECT_TRUE(!spi.get(target1, fs, doc->getId(), context).hasDocument());
+ EXPECT_TRUE(!spi.get(source, fs, doc->getId(), context).hasDocument());
}
void
@@ -1776,21 +1744,19 @@ ConformanceTest::doTestJoinNormalCase(const Bucket& source1,
{
document::TestDocMan testDocMan;
_factory->clear();
- PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ PersistenceProviderUP spi(getSpi(*_factory, testDocMan));
createAndPopulateJoinSourceBuckets(*spi, source1, source2, testDocMan);
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
spi->join(source1, source2, target, context);
- testJoinNormalCasePostCondition(spi, source1, source2, target,
- testDocMan);
+ testJoinNormalCasePostCondition(*spi, source1, source2, target, testDocMan);
if (_factory->hasPersistence()) {
spi.reset();
document::TestDocMan testDocMan2;
spi = getSpi(*_factory, testDocMan2);
- testJoinNormalCasePostCondition(spi, source1, source2, target,
- testDocMan2);
+ testJoinNormalCasePostCondition(*spi, source1, source2, target, testDocMan2);
}
}
@@ -1812,14 +1778,13 @@ TEST_F(ConformanceTest, testJoinNormalCaseWithMultipleBitsDecreased)
void
ConformanceTest::
-testJoinNormalCasePostCondition(const PersistenceProvider::UP &spi,
+testJoinNormalCasePostCondition(const PersistenceProvider &spi,
const Bucket &bucketA,
const Bucket &bucketB,
const Bucket &bucketC,
document::TestDocMan &testDocMan)
{
- EXPECT_EQ(20, (int)spi->getBucketInfo(bucketC).
- getBucketInfo().getDocumentCount());
+ EXPECT_EQ(20, (int)spi.getBucketInfo(bucketC).getBucketInfo().getDocumentCount());
document::AllFields fs;
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
@@ -1827,20 +1792,14 @@ testJoinNormalCasePostCondition(const PersistenceProvider::UP &spi,
Document::UP doc(
testDocMan.createRandomDocumentAtLocation(
bucketA.getBucketId().getId(), i));
- EXPECT_TRUE(
- spi->get(bucketC, fs, doc->getId(), context).hasDocument());
- EXPECT_TRUE(
- !spi->get(bucketA, fs, doc->getId(), context).hasDocument());
+ EXPECT_TRUE(spi.get(bucketC, fs, doc->getId(), context).hasDocument());
+ EXPECT_TRUE(!spi.get(bucketA, fs, doc->getId(), context).hasDocument());
}
for (uint32_t i = 10; i < 20; ++i) {
- Document::UP doc(
- testDocMan.createRandomDocumentAtLocation(
- bucketB.getBucketId().getId(), i));
- EXPECT_TRUE(
- spi->get(bucketC, fs, doc->getId(), context).hasDocument());
- EXPECT_TRUE(
- !spi->get(bucketB, fs, doc->getId(), context).hasDocument());
+ Document::UP doc(testDocMan.createRandomDocumentAtLocation(bucketB.getBucketId().getId(), i));
+ EXPECT_TRUE(spi.get(bucketC, fs, doc->getId(), context).hasDocument());
+ EXPECT_TRUE(!spi.get(bucketB, fs, doc->getId(), context).hasDocument());
}
}
@@ -1849,7 +1808,7 @@ TEST_F(ConformanceTest, testJoinTargetExists)
{
document::TestDocMan testDocMan;
_factory->clear();
- PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ PersistenceProviderUP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
Bucket bucketA(makeSpiBucket(BucketId(3, 0x02)));
@@ -1878,51 +1837,43 @@ TEST_F(ConformanceTest, testJoinTargetExists)
}
spi->join(bucketA, bucketB, bucketC, context);
- testJoinTargetExistsPostCondition(spi, bucketA, bucketB, bucketC,
- testDocMan);
+ testJoinTargetExistsPostCondition(*spi, bucketA, bucketB, bucketC, testDocMan);
if (_factory->hasPersistence()) {
spi.reset();
document::TestDocMan testDocMan2;
spi = getSpi(*_factory, testDocMan2);
- testJoinTargetExistsPostCondition(spi, bucketA, bucketB, bucketC,
- testDocMan2);
+ testJoinTargetExistsPostCondition(*spi, bucketA, bucketB, bucketC, testDocMan2);
}
}
void
ConformanceTest::
-testJoinTargetExistsPostCondition(const PersistenceProvider::UP &spi,
+testJoinTargetExistsPostCondition(const PersistenceProvider &spi,
const Bucket &bucketA,
const Bucket &bucketB,
const Bucket &bucketC,
document::TestDocMan &testDocMan)
{
- EXPECT_EQ(30, (int)spi->getBucketInfo(bucketC).getBucketInfo().
- getDocumentCount());
+ EXPECT_EQ(30, (int)spi.getBucketInfo(bucketC).getBucketInfo().getDocumentCount());
document::AllFields fs;
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
for (uint32_t i = 0; i < 10; ++i) {
Document::UP doc1 = testDocMan.createRandomDocumentAtLocation(0x02, i);
- EXPECT_TRUE(
- spi->get(bucketC, fs, doc1->getId(), context).hasDocument());
- EXPECT_TRUE(
- !spi->get(bucketA, fs, doc1->getId(), context).hasDocument());
+ EXPECT_TRUE(spi.get(bucketC, fs, doc1->getId(), context).hasDocument());
+ EXPECT_TRUE(!spi.get(bucketA, fs, doc1->getId(), context).hasDocument());
}
for (uint32_t i = 10; i < 20; ++i) {
Document::UP doc1 = testDocMan.createRandomDocumentAtLocation(0x06, i);
- EXPECT_TRUE(
- spi->get(bucketC, fs, doc1->getId(), context).hasDocument());
- EXPECT_TRUE(
- !spi->get(bucketB, fs, doc1->getId(), context).hasDocument());
+ EXPECT_TRUE(spi.get(bucketC, fs, doc1->getId(), context).hasDocument());
+ EXPECT_TRUE(!spi.get(bucketB, fs, doc1->getId(), context).hasDocument());
}
for (uint32_t i = 20; i < 30; ++i) {
Document::UP doc1 = testDocMan.createRandomDocumentAtLocation(0x06, i);
- EXPECT_TRUE(
- spi->get(bucketC, fs, doc1->getId(), context).hasDocument());
+ EXPECT_TRUE(spi.get(bucketC, fs, doc1->getId(), context).hasDocument());
}
}
@@ -1937,8 +1888,7 @@ ConformanceTest::populateBucket(const Bucket& b,
assert(from <= to);
for (uint32_t i = from; i < to; ++i) {
const uint32_t location = b.getBucketId().getId();
- Document::SP doc1 = testDocMan.createRandomDocumentAtLocation(
- location, i);
+ Document::SP doc1 = testDocMan.createRandomDocumentAtLocation(location, i);
spi.put(b, Timestamp(i + 1), doc1, context);
}
}
@@ -1947,7 +1897,7 @@ TEST_F(ConformanceTest, testJoinOneBucket)
{
document::TestDocMan testDocMan;
_factory->clear();
- PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ PersistenceProviderUP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
Bucket bucketA(makeSpiBucket(BucketId(3, 0x02)));
@@ -1959,40 +1909,37 @@ TEST_F(ConformanceTest, testJoinOneBucket)
populateBucket(bucketA, *spi, context, 0, 10, testDocMan);
spi->join(bucketA, bucketB, bucketC, context);
- testJoinOneBucketPostCondition(spi, bucketA, bucketC, testDocMan);
+ testJoinOneBucketPostCondition(*spi, bucketA, bucketC, testDocMan);
if (_factory->hasPersistence()) {
spi.reset();
document::TestDocMan testDocMan2;
spi = getSpi(*_factory, testDocMan2);
- testJoinOneBucketPostCondition(spi, bucketA, bucketC, testDocMan2);
+ testJoinOneBucketPostCondition(*spi, bucketA, bucketC, testDocMan2);
}
}
void
ConformanceTest::
-testJoinOneBucketPostCondition(const PersistenceProvider::UP &spi,
+testJoinOneBucketPostCondition(const PersistenceProvider &spi,
const Bucket &bucketA,
const Bucket &bucketC,
document::TestDocMan &testDocMan)
{
- EXPECT_EQ(10, (int)spi->getBucketInfo(bucketC).getBucketInfo().
- getDocumentCount());
+ EXPECT_EQ(10, (int)spi.getBucketInfo(bucketC).getBucketInfo().getDocumentCount());
document::AllFields fs;
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
for (uint32_t i = 0; i < 10; ++i) {
Document::UP doc1 = testDocMan.createRandomDocumentAtLocation(0x02, i);
- EXPECT_TRUE(
- spi->get(bucketC, fs, doc1->getId(), context).hasDocument());
- EXPECT_TRUE(
- !spi->get(bucketA, fs, doc1->getId(), context).hasDocument());
+ EXPECT_TRUE(spi.get(bucketC, fs, doc1->getId(), context).hasDocument());
+ EXPECT_TRUE(!spi.get(bucketA, fs, doc1->getId(), context).hasDocument());
}
}
void
ConformanceTest::
testJoinSameSourceBucketsPostCondition(
- const PersistenceProvider::UP& spi,
+ const PersistenceProvider& spi,
const Bucket& source,
const Bucket& target,
document::TestDocMan& testDocMan)
@@ -2002,25 +1949,23 @@ testJoinSameSourceBucketsPostCondition(
}
void
-ConformanceTest::doTestJoinSameSourceBuckets(const Bucket& source,
- const Bucket& target)
+ConformanceTest::doTestJoinSameSourceBuckets(const Bucket& source, const Bucket& target)
{
document::TestDocMan testDocMan;
_factory->clear();
- PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ PersistenceProviderUP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
spi->createBucket(source, context);
populateBucket(source, *spi, context, 0, 10, testDocMan);
spi->join(source, source, target, context);
- testJoinSameSourceBucketsPostCondition(spi, source, target, testDocMan);
+ testJoinSameSourceBucketsPostCondition(*spi, source, target, testDocMan);
if (_factory->hasPersistence()) {
spi.reset();
document::TestDocMan testDocMan2;
spi = getSpi(*_factory, testDocMan2);
- testJoinSameSourceBucketsPostCondition(
- spi, source, target, testDocMan2);
+ testJoinSameSourceBucketsPostCondition(*spi, source, target, testDocMan2);
}
}
@@ -2045,17 +1990,14 @@ ConformanceTest::testJoinSameSourceBucketsTargetExistsPostCondition(
const Bucket& target,
document::TestDocMan& testDocMan)
{
- EXPECT_EQ(20, (int)spi.getBucketInfo(target).getBucketInfo().
- getDocumentCount());
+ EXPECT_EQ(20, (int)spi.getBucketInfo(target).getBucketInfo().getDocumentCount());
document::AllFields fs;
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
for (uint32_t i = 0; i < 20; ++i) {
Document::UP doc1 = testDocMan.createRandomDocumentAtLocation(0x02, i);
- EXPECT_TRUE(
- spi.get(target, fs, doc1->getId(), context).hasDocument());
- EXPECT_TRUE(
- !spi.get(source, fs, doc1->getId(), context).hasDocument());
+ EXPECT_TRUE(spi.get(target, fs, doc1->getId(), context).hasDocument());
+ EXPECT_TRUE(!spi.get(source, fs, doc1->getId(), context).hasDocument());
}
}
@@ -2063,7 +2005,7 @@ TEST_F(ConformanceTest, testJoinSameSourceBucketsTargetExists)
{
document::TestDocMan testDocMan;
_factory->clear();
- PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ PersistenceProviderUP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
Bucket source(makeSpiBucket(BucketId(3, 0x02)));
@@ -2082,8 +2024,7 @@ TEST_F(ConformanceTest, testJoinSameSourceBucketsTargetExists)
spi.reset();
document::TestDocMan testDocMan2;
spi = getSpi(*_factory, testDocMan2);
- testJoinSameSourceBucketsTargetExistsPostCondition(
- *spi, source, target, testDocMan2);
+ testJoinSameSourceBucketsTargetExistsPostCondition(*spi, source, target, testDocMan2);
}
}
@@ -2091,9 +2032,8 @@ TEST_F(ConformanceTest, testGetModifiedBuckets)
{
document::TestDocMan testDocMan;
_factory->clear();
- PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
- EXPECT_EQ(0,
- (int)spi->getModifiedBuckets(makeBucketSpace()).getList().size());
+ PersistenceProviderUP spi(getSpi(*_factory, testDocMan));
+ EXPECT_EQ(0, (int)spi->getModifiedBuckets(makeBucketSpace()).getList().size());
}
TEST_F(ConformanceTest, testBucketActivation)
@@ -2104,7 +2044,7 @@ TEST_F(ConformanceTest, testBucketActivation)
document::TestDocMan testDocMan;
_factory->clear();
- PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ PersistenceProviderUP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
Bucket bucket(makeSpiBucket(BucketId(8, 0x01)));
@@ -2147,7 +2087,7 @@ TEST_F(SingleDocTypeConformanceTest, testBucketActivationSplitAndJoin)
document::TestDocMan testDocMan;
_factory->clear();
- PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ PersistenceProviderUP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
Bucket bucketA(makeSpiBucket(BucketId(3, 0x02)));
@@ -2225,7 +2165,7 @@ TEST_F(ConformanceTest, testRemoveEntry)
}
document::TestDocMan testDocMan;
_factory->clear();
- PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ PersistenceProviderUP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
Bucket bucket(makeSpiBucket(BucketId(8, 0x01)));
@@ -2294,7 +2234,7 @@ TEST_F(ConformanceTest, testBucketSpaces)
}
document::TestDocMan testDocMan;
_factory->clear();
- PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ PersistenceProviderUP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
BucketSpace bucketSpace0(makeBucketSpace("testdoctype1"));
BucketSpace bucketSpace1(makeBucketSpace("testdoctype2"));
diff --git a/persistence/src/vespa/persistence/conformancetest/conformancetest.h b/persistence/src/vespa/persistence/conformancetest/conformancetest.h
index 05c1bc87c9f..19be63cf3cb 100644
--- a/persistence/src/vespa/persistence/conformancetest/conformancetest.h
+++ b/persistence/src/vespa/persistence/conformancetest/conformancetest.h
@@ -26,12 +26,13 @@ namespace storage::spi {
class ConformanceTest : public ::testing::Test {
public:
+ using PersistenceProviderUP = std::unique_ptr<PersistenceProvider>;
struct PersistenceFactory {
typedef std::unique_ptr<PersistenceFactory> UP;
using DocumenttypesConfig = const document::internal::InternalDocumenttypesType;
- virtual ~PersistenceFactory() {}
- virtual PersistenceProvider::UP getPersistenceImplementation(
+ virtual ~PersistenceFactory() = default;
+ virtual PersistenceProviderUP getPersistenceImplementation(
const std::shared_ptr<const document::DocumentTypeRepo> &repo,
const DocumenttypesConfig &typesCfg) = 0;
@@ -74,19 +75,19 @@ protected:
document::TestDocMan& testDocMan);
void
- testDeleteBucketPostCondition(const PersistenceProvider::UP &spi,
+ testDeleteBucketPostCondition(const PersistenceProvider &spi,
const Bucket &bucket,
const Document &doc1);
void
- testSplitNormalCasePostCondition(const PersistenceProvider::UP &spi,
+ testSplitNormalCasePostCondition(const PersistenceProvider &spi,
const Bucket &bucketA,
const Bucket &bucketB,
const Bucket &bucketC,
document::TestDocMan &testDocMan);
void
- testSplitTargetExistsPostCondition(const PersistenceProvider::UP &spi,
+ testSplitTargetExistsPostCondition(const PersistenceProvider &spi,
const Bucket &bucketA,
const Bucket &bucketB,
const Bucket &bucketC,
@@ -94,7 +95,7 @@ protected:
void
testSplitSingleDocumentInSourcePostCondition(
- const PersistenceProvider::UP& spi,
+ const PersistenceProvider& spi,
const Bucket& source,
const Bucket& target1,
const Bucket& target2,
@@ -113,21 +114,21 @@ protected:
const Bucket& target);
void
- testJoinNormalCasePostCondition(const PersistenceProvider::UP &spi,
+ testJoinNormalCasePostCondition(const PersistenceProvider &spi,
const Bucket &bucketA,
const Bucket &bucketB,
const Bucket &bucketC,
document::TestDocMan &testDocMan);
void
- testJoinTargetExistsPostCondition(const PersistenceProvider::UP &spi,
+ testJoinTargetExistsPostCondition(const PersistenceProvider &spi,
const Bucket &bucketA,
const Bucket &bucketB,
const Bucket &bucketC,
document::TestDocMan &testDocMan);
void
- testJoinOneBucketPostCondition(const PersistenceProvider::UP &spi,
+ testJoinOneBucketPostCondition(const PersistenceProvider &spi,
const Bucket &bucketA,
const Bucket &bucketC,
document::TestDocMan &testDocMan);
@@ -138,7 +139,7 @@ protected:
void
testJoinSameSourceBucketsPostCondition(
- const PersistenceProvider::UP& spi,
+ const PersistenceProvider& spi,
const Bucket& source,
const Bucket& target,
document::TestDocMan& testDocMan);
diff --git a/persistence/src/vespa/persistence/dummyimpl/dummypersistence.cpp b/persistence/src/vespa/persistence/dummyimpl/dummypersistence.cpp
index 73783413061..297a3319939 100644
--- a/persistence/src/vespa/persistence/dummyimpl/dummypersistence.cpp
+++ b/persistence/src/vespa/persistence/dummyimpl/dummypersistence.cpp
@@ -547,12 +547,7 @@ DummyPersistence::get(const Bucket& b, const document::FieldSet& fieldSet, const
}
CreateIteratorResult
-DummyPersistence::createIterator(
- const Bucket& b,
- const document::FieldSet& fs,
- const Selection& s,
- IncludedVersions v,
- Context&)
+DummyPersistence::createIterator(const Bucket &b, FieldSetSP fs, const Selection &s, IncludedVersions v, Context &)
{
DUMMYPERSISTENCE_VERIFY_INITIALIZED;
LOG(debug, "createIterator(%s)", b.toString().c_str());
@@ -585,7 +580,7 @@ DummyPersistence::createIterator(
}
// Memory pointed to by 'it' should now be valid from here on out
- it->_fieldSet = std::unique_ptr<document::FieldSet>(fs.clone());
+ it->_fieldSet = std::move(fs);
const BucketContent::GidMapType& gidMap((*bc)->_gidMap);
if (s.getTimestampSubset().empty()) {
@@ -600,8 +595,7 @@ DummyPersistence::createIterator(
entry.getTimestamp() > s.getToTimestamp()) {
continue;
}
- BucketContent::GidMapType::const_iterator gidIt(
- gidMap.find(bucketEntry.gid));
+ BucketContent::GidMapType::const_iterator gidIt(gidMap.find(bucketEntry.gid));
assert(gidIt != gidMap.end());
if (entry.isRemove()) {
diff --git a/persistence/src/vespa/persistence/dummyimpl/dummypersistence.h b/persistence/src/vespa/persistence/dummyimpl/dummypersistence.h
index 94c2e1cd9a4..c3a4991a590 100644
--- a/persistence/src/vespa/persistence/dummyimpl/dummypersistence.h
+++ b/persistence/src/vespa/persistence/dummyimpl/dummypersistence.h
@@ -93,7 +93,7 @@ struct Iterator {
using UP = std::unique_ptr<Iterator>;
Bucket _bucket;
std::vector<Timestamp> _leftToIterate;
- std::unique_ptr<document::FieldSet> _fieldSet;
+ std::shared_ptr<document::FieldSet> _fieldSet;
};
class DummyPersistence;
@@ -158,11 +158,8 @@ public:
RemoveResult remove(const Bucket& b, Timestamp t, const DocumentId& did, Context&) override;
UpdateResult update(const Bucket&, Timestamp, DocumentUpdateSP, Context&) override;
- CreateIteratorResult createIterator(const Bucket&,
- const document::FieldSet& fs,
- const Selection&,
- IncludedVersions,
- Context&) override;
+ CreateIteratorResult
+ createIterator(const Bucket &bucket, FieldSetSP fs, const Selection &, IncludedVersions, Context &context) override;
IterateResult iterate(IteratorId, uint64_t maxByteSize, Context&) const override;
Result destroyIterator(IteratorId, Context&) override;
diff --git a/persistence/src/vespa/persistence/spi/persistenceprovider.h b/persistence/src/vespa/persistence/spi/persistenceprovider.h
index 2bb91c776d0..021ac6338eb 100644
--- a/persistence/src/vespa/persistence/spi/persistenceprovider.h
+++ b/persistence/src/vespa/persistence/spi/persistenceprovider.h
@@ -56,8 +56,8 @@ namespace storage::spi {
*/
struct PersistenceProvider
{
- typedef std::unique_ptr<PersistenceProvider> UP;
using BucketSpace = document::BucketSpace;
+ using FieldSetSP = std::shared_ptr<document::FieldSet>;
virtual ~PersistenceProvider();
@@ -258,12 +258,9 @@ struct PersistenceProvider
* error. Identifier must be non-zero, as zero is used internally to
* signify an invalid iterator ID.
*/
- virtual CreateIteratorResult createIterator(
- const Bucket&,
- const document::FieldSet& fieldSet,
- const Selection& selection, //TODO: Make AST
- IncludedVersions versions,
- Context&) = 0;
+ virtual CreateIteratorResult
+ createIterator(const Bucket &bucket, FieldSetSP fieldSet, const Selection &selection,
+ IncludedVersions versions, Context &context) = 0;
/**
* Iterate over a bucket's document space using a valid iterator id
diff --git a/searchcore/src/apps/proton/downpersistence.cpp b/searchcore/src/apps/proton/downpersistence.cpp
index aa87c383c33..999cf6696ea 100644
--- a/searchcore/src/apps/proton/downpersistence.cpp
+++ b/searchcore/src/apps/proton/downpersistence.cpp
@@ -95,8 +95,7 @@ DownPersistence::get(const Bucket&, const document::FieldSet&, const DocumentId&
}
CreateIteratorResult
-DownPersistence::createIterator(const Bucket&, const document::FieldSet&,
- const Selection&, IncludedVersions, Context&)
+DownPersistence::createIterator(const Bucket &, FieldSetSP, const Selection &, IncludedVersions, Context &)
{
return CreateIteratorResult(errorResult.getErrorCode(), errorResult.getErrorMessage());
}
diff --git a/searchcore/src/apps/proton/downpersistence.h b/searchcore/src/apps/proton/downpersistence.h
index 10e3d9c1ad7..d8b48172880 100644
--- a/searchcore/src/apps/proton/downpersistence.h
+++ b/searchcore/src/apps/proton/downpersistence.h
@@ -39,8 +39,9 @@ public:
UpdateResult update(const Bucket&, Timestamp timestamp, DocumentUpdateSP update, Context&) override;
GetResult get(const Bucket&, const document::FieldSet& fieldSet, const DocumentId& id, Context&) const override;
- CreateIteratorResult createIterator(const Bucket&, const document::FieldSet& fieldSet,
- const Selection& selection, IncludedVersions versions, Context&) override;
+ CreateIteratorResult
+ createIterator(const Bucket &bucket, FieldSetSP fieldSet, const Selection &selection, IncludedVersions versions,
+ Context &context) override;
IterateResult iterate(IteratorId id, uint64_t maxByteSize, Context&) const override;
Result destroyIterator(IteratorId id, Context&) override;
diff --git a/searchcore/src/apps/tests/persistenceconformance_test.cpp b/searchcore/src/apps/tests/persistenceconformance_test.cpp
index 217d1edcd57..44fb2770594 100644
--- a/searchcore/src/apps/tests/persistenceconformance_test.cpp
+++ b/searchcore/src/apps/tests/persistenceconformance_test.cpp
@@ -362,14 +362,11 @@ public:
~MyPersistenceFactory() override {
clear();
}
- PersistenceProvider::UP getPersistenceImplementation(const std::shared_ptr<const DocumentTypeRepo> &repo,
+ std::unique_ptr<PersistenceProvider> getPersistenceImplementation(const std::shared_ptr<const DocumentTypeRepo> &repo,
const DocumenttypesConfig &typesCfg) override {
ConfigFactory cfgFactory(repo, std::make_shared<DocumenttypesConfig>(typesCfg), _schemaFactory);
_docDbRepo = std::make_unique<DocumentDBRepo>(cfgFactory, _docDbFactory);
- PersistenceEngine::UP engine(new MyPersistenceEngine(_engineOwner,
- _writeFilter,
- std::move(_docDbRepo),
- _docType));
+ auto engine = std::make_unique<MyPersistenceEngine>(_engineOwner,_writeFilter,std::move(_docDbRepo), _docType);
assert( ! _docDbRepo); // Repo should be handed over
return engine;
}
diff --git a/searchcore/src/tests/proton/document_iterator/document_iterator_test.cpp b/searchcore/src/tests/proton/document_iterator/document_iterator_test.cpp
index 147bd9afb84..44ce55edfbd 100644
--- a/searchcore/src/tests/proton/document_iterator/document_iterator_test.cpp
+++ b/searchcore/src/tests/proton/document_iterator/document_iterator_test.cpp
@@ -447,14 +447,14 @@ TEST("require that custom retrievers work as expected") {
}
TEST("require that an empty list of retrievers can be iterated") {
- DocumentIterator itr(bucket(5), document::AllFields(), selectAll(), newestV(), -1, false);
+ DocumentIterator itr(bucket(5), std::make_shared<document::AllFields>(), selectAll(), newestV(), -1, false);
IterateResult res = itr.iterate(largeNum);
EXPECT_EQUAL(0u, res.getEntries().size());
EXPECT_TRUE(res.isCompleted());
}
TEST("require that a list of empty retrievers can be iterated") {
- DocumentIterator itr(bucket(5), document::AllFields(), selectAll(), newestV(), -1, false);
+ DocumentIterator itr(bucket(5), std::make_shared<document::AllFields>(), selectAll(), newestV(), -1, false);
itr.add(nil());
itr.add(nil());
itr.add(nil());
@@ -464,7 +464,7 @@ TEST("require that a list of empty retrievers can be iterated") {
}
TEST("require that normal documents can be iterated") {
- DocumentIterator itr(bucket(5), document::AllFields(), selectAll(), newestV(), -1, false);
+ DocumentIterator itr(bucket(5), std::make_shared<document::AllFields>(), selectAll(), newestV(), -1, false);
itr.add(doc("id:ns:document::1", Timestamp(2), bucket(5)));
itr.add(cat(doc("id:ns:document::2", Timestamp(3), bucket(5)),
doc("id:ns:document::3", Timestamp(4), bucket(5))));
@@ -487,12 +487,12 @@ void verifyIterateIgnoringStopSignal(DocumentIterator & itr) {
}
TEST("require that iterator stops at the end, and does not auto rewind") {
- DocumentIterator itr(bucket(5), document::AllFields(), selectAll(), newestV(), -1, false);
+ DocumentIterator itr(bucket(5), std::make_shared<document::AllFields>(), selectAll(), newestV(), -1, false);
TEST_DO(verifyIterateIgnoringStopSignal(itr));
}
TEST("require that iterator ignoring maxbytes stops at the end, and does not auto rewind") {
- DocumentIterator itr(bucket(5), document::AllFields(), selectAll(), newestV(), -1, true);
+ DocumentIterator itr(bucket(5), std::make_shared<document::AllFields>(), selectAll(), newestV(), -1, true);
TEST_DO(verifyIterateIgnoringStopSignal(itr));
}
@@ -515,12 +515,12 @@ void verifyStrongReadConsistency(DocumentIterator & itr) {
}
TEST("require that default readconsistency does commit") {
- DocumentIterator itr(bucket(5), document::AllFields(), selectAll(), newestV(), -1, false);
+ DocumentIterator itr(bucket(5), std::make_shared<document::AllFields>(), selectAll(), newestV(), -1, false);
TEST_DO(verifyStrongReadConsistency(itr));
}
TEST("require that readconsistency::strong does commit") {
- DocumentIterator itr(bucket(5), document::AllFields(), selectAll(), newestV(), -1, false, storage::spi::ReadConsistency::STRONG);
+ DocumentIterator itr(bucket(5), std::make_shared<document::AllFields>(), selectAll(), newestV(), -1, false, storage::spi::ReadConsistency::STRONG);
TEST_DO(verifyStrongReadConsistency(itr));
}
@@ -528,7 +528,7 @@ TEST("require that docid limit is honoured") {
IDocumentRetriever::SP retriever = doc("id:ns:document::1", Timestamp(2), bucket(5));
auto & udr = dynamic_cast<UnitDR &>(*retriever);
udr.docid = 7;
- DocumentIterator itr(bucket(5), document::AllFields(), selectAll(), newestV(), -1, false);
+ DocumentIterator itr(bucket(5), std::make_shared<document::AllFields>(), selectAll(), newestV(), -1, false);
itr.add(retriever);
IterateResult res = itr.iterate(largeNum);
EXPECT_TRUE(res.isCompleted());
@@ -536,7 +536,7 @@ TEST("require that docid limit is honoured") {
TEST_DO(checkEntry(res, 0, Document(*DataType::DOCUMENT, DocumentId("id:ns:document::1")), Timestamp(2)));
udr.setDocIdLimit(7);
- DocumentIterator limited(bucket(5), document::AllFields(), selectAll(), newestV(), -1, false);
+ DocumentIterator limited(bucket(5), std::make_shared<document::AllFields>(), selectAll(), newestV(), -1, false);
limited.add(retriever);
res = limited.iterate(largeNum);
EXPECT_TRUE(res.isCompleted());
@@ -544,7 +544,7 @@ TEST("require that docid limit is honoured") {
}
TEST("require that remove entries can be iterated") {
- DocumentIterator itr(bucket(5), document::AllFields(), selectAll(), newestV(), -1, false);
+ DocumentIterator itr(bucket(5), std::make_shared<document::AllFields>(), selectAll(), newestV(), -1, false);
itr.add(rem("id:ns:document::1", Timestamp(2), bucket(5)));
itr.add(cat(rem("id:ns:document::2", Timestamp(3), bucket(5)),
rem("id:ns:document::3", Timestamp(4), bucket(5))));
@@ -557,7 +557,7 @@ TEST("require that remove entries can be iterated") {
}
TEST("require that remove entries can be ignored") {
- DocumentIterator itr(bucket(5), document::AllFields(), selectAll(), docV(), -1, false);
+ DocumentIterator itr(bucket(5), std::make_shared<document::AllFields>(), selectAll(), docV(), -1, false);
itr.add(rem("id:ns:document::1", Timestamp(2), bucket(5)));
itr.add(cat(doc("id:ns:document::2", Timestamp(3), bucket(5)),
rem("id:ns:document::3", Timestamp(4), bucket(5))));
@@ -568,7 +568,7 @@ TEST("require that remove entries can be ignored") {
}
TEST("require that iterating all versions returns both documents and removes") {
- DocumentIterator itr(bucket(5), document::AllFields(), selectAll(), allV(), -1, false);
+ DocumentIterator itr(bucket(5), std::make_shared<document::AllFields>(), selectAll(), allV(), -1, false);
itr.add(rem("id:ns:document::1", Timestamp(2), bucket(5)));
itr.add(cat(doc("id:ns:document::2", Timestamp(3), bucket(5)),
rem("id:ns:document::3", Timestamp(4), bucket(5))));
@@ -581,7 +581,7 @@ TEST("require that iterating all versions returns both documents and removes") {
}
TEST("require that using an empty field set returns meta-data only") {
- DocumentIterator itr(bucket(5), document::NoFields(), selectAll(), newestV(), -1, false);
+ DocumentIterator itr(bucket(5), std::make_shared<document::NoFields>(), selectAll(), newestV(), -1, false);
itr.add(doc("id:ns:document::1", Timestamp(2), bucket(5)));
itr.add(cat(doc("id:ns:document::2", Timestamp(3), bucket(5)),
rem("id:ns:document::3", Timestamp(4), bucket(5))));
@@ -594,7 +594,7 @@ TEST("require that using an empty field set returns meta-data only") {
}
TEST("require that entries in other buckets are skipped") {
- DocumentIterator itr(bucket(5), document::AllFields(), selectAll(), newestV(), -1, false);
+ DocumentIterator itr(bucket(5), std::make_shared<document::AllFields>(), selectAll(), newestV(), -1, false);
itr.add(rem("id:ns:document::1", Timestamp(2), bucket(6)));
itr.add(cat(doc("id:ns:document::2", Timestamp(3), bucket(5)),
doc("id:ns:document::3", Timestamp(4), bucket(6))));
@@ -605,7 +605,7 @@ TEST("require that entries in other buckets are skipped") {
}
TEST("require that maxBytes splits iteration results") {
- DocumentIterator itr(bucket(5), document::AllFields(), selectAll(), newestV(), -1, false);
+ DocumentIterator itr(bucket(5), std::make_shared<document::AllFields>(), selectAll(), newestV(), -1, false);
itr.add(doc("id:ns:document::1", Timestamp(2), bucket(5)));
itr.add(cat(rem("id:ns:document::2", Timestamp(3), bucket(5)),
doc("id:ns:document::3", Timestamp(4), bucket(5))));
@@ -626,7 +626,7 @@ TEST("require that maxBytes splits iteration results") {
}
TEST("require that maxBytes splits iteration results for meta-data only iteration") {
- DocumentIterator itr(bucket(5), document::NoFields(), selectAll(), newestV(), -1, false);
+ DocumentIterator itr(bucket(5), std::make_shared<document::NoFields>(), selectAll(), newestV(), -1, false);
itr.add(doc("id:ns:document::1", Timestamp(2), bucket(5)));
itr.add(cat(rem("id:ns:document::2", Timestamp(3), bucket(5)),
doc("id:ns:document::3", Timestamp(4), bucket(5))));
@@ -646,7 +646,7 @@ TEST("require that maxBytes splits iteration results for meta-data only iteratio
}
TEST("require that at least one document is returned by visit") {
- DocumentIterator itr(bucket(5), document::AllFields(), selectAll(), newestV(), -1, false);
+ DocumentIterator itr(bucket(5), std::make_shared<document::AllFields>(), selectAll(), newestV(), -1, false);
itr.add(doc("id:ns:document::1", Timestamp(2), bucket(5)));
itr.add(cat(rem("id:ns:document::2", Timestamp(3), bucket(5)),
doc("id:ns:document::3", Timestamp(4), bucket(5))));
@@ -656,7 +656,7 @@ TEST("require that at least one document is returned by visit") {
}
TEST("require that documents outside the timestamp limits are ignored") {
- DocumentIterator itr(bucket(5), document::AllFields(), selectTimestampRange(100, 200), newestV(), -1, false);
+ DocumentIterator itr(bucket(5), std::make_shared<document::AllFields>(), selectTimestampRange(100, 200), newestV(), -1, false);
itr.add(doc("id:ns:document::1", Timestamp(99), bucket(5)));
itr.add(doc("id:ns:document::2", Timestamp(100), bucket(5)));
itr.add(doc("id:ns:document::3", Timestamp(200), bucket(5)));
@@ -675,7 +675,7 @@ TEST("require that documents outside the timestamp limits are ignored") {
}
TEST("require that timestamp subset returns the appropriate documents") {
- DocumentIterator itr(bucket(5), document::AllFields(), selectTimestampSet(200, 350, 400), newestV(), -1, false);
+ DocumentIterator itr(bucket(5), std::make_shared<document::AllFields>(), selectTimestampSet(200, 350, 400), newestV(), -1, false);
itr.add(doc("id:ns:document::1", Timestamp(500), bucket(5)));
itr.add(doc("id:ns:document::2", Timestamp(400), bucket(5)));
itr.add(doc("id:ns:document::3", Timestamp(300), bucket(5)));
@@ -693,7 +693,7 @@ TEST("require that timestamp subset returns the appropriate documents") {
}
TEST("require that document selection will filter results") {
- DocumentIterator itr(bucket(5), document::AllFields(), selectDocs("id=\"id:ns:document::xxx*\""), newestV(), -1, false);
+ DocumentIterator itr(bucket(5), std::make_shared<document::AllFields>(), selectDocs("id=\"id:ns:document::xxx*\""), newestV(), -1, false);
itr.add(doc("id:ns:document::xxx1", Timestamp(99), bucket(5)));
itr.add(doc("id:ns:document::yyy1", Timestamp(100), bucket(5)));
itr.add(doc("id:ns:document::xxx2", Timestamp(200), bucket(5)));
@@ -712,7 +712,7 @@ TEST("require that document selection will filter results") {
}
TEST("require that document selection handles 'field == null'") {
- DocumentIterator itr(bucket(5), document::AllFields(), selectDocs("foo.aa == null"), newestV(), -1, false);
+ DocumentIterator itr(bucket(5), std::make_shared<document::AllFields>(), selectDocs("foo.aa == null"), newestV(), -1, false);
itr.add(doc_with_null_fields("id:ns:foo::xxx1", Timestamp(99), bucket(5)));
itr.add(doc_with_null_fields("id:ns:foo::xxx2", Timestamp(100), bucket(5)));
IterateResult res = itr.iterate(largeNum);
@@ -725,7 +725,7 @@ TEST("require that document selection handles 'field == null'") {
}
TEST("require that invalid document selection returns no documents") {
- DocumentIterator itr(bucket(5), document::AllFields(), selectDocs("=="), newestV(), -1, false);
+ DocumentIterator itr(bucket(5), std::make_shared<document::AllFields>(), selectDocs("=="), newestV(), -1, false);
itr.add(doc("id:ns:document::xxx1", Timestamp(99), bucket(5)));
itr.add(doc("id:ns:document::yyy1", Timestamp(100), bucket(5)));
itr.add(doc("id:ns:document::xxx2", Timestamp(200), bucket(5)));
@@ -740,7 +740,7 @@ TEST("require that invalid document selection returns no documents") {
}
TEST("require that document selection and timestamp range works together") {
- DocumentIterator itr(bucket(5), document::AllFields(), selectDocsWithinRange("id=\"id:ns:document::xxx*\"", 100, 200), newestV(), -1, false);
+ DocumentIterator itr(bucket(5), std::make_shared<document::AllFields>(), selectDocsWithinRange("id=\"id:ns:document::xxx*\"", 100, 200), newestV(), -1, false);
itr.add(doc("id:ns:document::xxx1", Timestamp(99), bucket(5)));
itr.add(doc("id:ns:document::yyy1", Timestamp(100), bucket(5)));
itr.add(doc("id:ns:document::xxx2", Timestamp(200), bucket(5)));
@@ -757,9 +757,8 @@ TEST("require that document selection and timestamp range works together") {
}
TEST("require that fieldset limits fields returned") {
- document::FieldCollection limited(getDocType(),
- document::Field::Set::Builder().add(&getDocType().getField("header")).build());
- DocumentIterator itr(bucket(5), limited, selectAll(), newestV(), -1, false);
+ auto limited = std::make_shared<document::FieldCollection>(getDocType(),document::Field::Set::Builder().add(&getDocType().getField("header")).build());
+ DocumentIterator itr(bucket(5), std::move(limited), selectAll(), newestV(), -1, false);
itr.add(doc_with_fields("id:ns:foo::xxx1", Timestamp(1), bucket(5)));
IterateResult res = itr.iterate(largeNum);
EXPECT_TRUE(res.isCompleted());
@@ -777,8 +776,7 @@ bool contains(const Container& c, const T& value) {
}
TEST("require that userdoc-constrained selections pre-filter on GIDs") {
- DocumentIterator itr(bucket(5), document::AllFields(),
- selectDocs("id.user=1234"), newestV(), -1, false);
+ DocumentIterator itr(bucket(5), std::make_shared<document::AllFields>(), selectDocs("id.user=1234"), newestV(), -1, false);
VisitRecordingUnitDR::VisitedLIDs visited_lids;
// Even though GID filtering is probabilistic when it comes to filtering
// user IDs that cover the 64-bit range, it's fully deterministic when the
@@ -808,7 +806,7 @@ TEST("require that userdoc-constrained selections pre-filter on GIDs") {
TEST("require that attributes are used")
{
UnitDR::reset();
- DocumentIterator itr(bucket(5), document::AllFields(), selectDocs("foo.aa == 45"), docV(), -1, false);
+ DocumentIterator itr(bucket(5), std::make_shared<document::AllFields>(), selectDocs("foo.aa == 45"), docV(), -1, false);
itr.add(doc_with_attr_fields("id:ns:foo::xx1", Timestamp(1), bucket(5),
27, 28, 27, 2.7, 2.8, "x27", "x28"));
itr.add(doc_with_attr_fields("id:ns:foo::xx2", Timestamp(2), bucket(5),
@@ -838,7 +836,7 @@ TEST("require that attributes are used")
TEST_DO(checkEntry(res, 0, expected1, Timestamp(2)));
TEST_DO(checkEntry(res, 1, expected2, Timestamp(4)));
- DocumentIterator itr2(bucket(5), document::AllFields(), selectDocs("foo.dd == 4.5"), docV(), -1, false);
+ DocumentIterator itr2(bucket(5), std::make_shared<document::AllFields>(), selectDocs("foo.dd == 4.5"), docV(), -1, false);
itr2.add(doc_with_attr_fields("id:ns:foo::xx5", Timestamp(5), bucket(5),
27, 28, 27, 2.7, 2.8, "x27", "x28"));
itr2.add(doc_with_attr_fields("id:ns:foo::xx6", Timestamp(6), bucket(5),
@@ -868,7 +866,7 @@ TEST("require that attributes are used")
TEST_DO(checkEntry(res2, 0, expected3, Timestamp(6)));
TEST_DO(checkEntry(res2, 1, expected4, Timestamp(8)));
- DocumentIterator itr3(bucket(5), document::AllFields(), selectDocs("foo.ss == \"x45\""), docV(), -1, false);
+ DocumentIterator itr3(bucket(5), std::make_shared<document::AllFields>(), selectDocs("foo.ss == \"x45\""), docV(), -1, false);
itr3.add(doc_with_attr_fields("id:ns:foo::xx9", Timestamp(9), bucket(5),
27, 28, 27, 2.7, 2.8, "x27", "x28"));
itr3.add(doc_with_attr_fields("id:ns:foo::xx10", Timestamp(10), bucket(5),
diff --git a/searchcore/src/tests/proton/persistenceengine/persistenceengine_test.cpp b/searchcore/src/tests/proton/persistenceengine/persistenceengine_test.cpp
index a31deca5d12..6351c187b45 100644
--- a/searchcore/src/tests/proton/persistenceengine/persistenceengine_test.cpp
+++ b/searchcore/src/tests/proton/persistenceengine/persistenceengine_test.cpp
@@ -693,7 +693,7 @@ TEST_F("require that createIterator does", SimpleFixture) {
storage::spi::LoadType loadType(0, "default");
Context context(loadType, storage::spi::Priority(0), storage::spi::Trace::TraceLevel(0));
CreateIteratorResult result =
- f.engine.createIterator(bucket1, document::AllFields(), selection,
+ f.engine.createIterator(bucket1, std::make_shared<document::AllFields>(), selection,
storage::spi::NEWEST_DOCUMENT_ONLY, context);
EXPECT_FALSE(result.hasError());
EXPECT_TRUE(result.getIteratorId());
@@ -707,10 +707,10 @@ TEST_F("require that iterator ids are unique", SimpleFixture) {
storage::spi::LoadType loadType(0, "default");
Context context(loadType, storage::spi::Priority(0), storage::spi::Trace::TraceLevel(0));
CreateIteratorResult result =
- f.engine.createIterator(bucket1, document::AllFields(), selection,
+ f.engine.createIterator(bucket1, std::make_shared<document::AllFields>(), selection,
storage::spi::NEWEST_DOCUMENT_ONLY, context);
CreateIteratorResult result2 =
- f.engine.createIterator(bucket1, document::AllFields(), selection,
+ f.engine.createIterator(bucket1, std::make_shared<document::AllFields>(), selection,
storage::spi::NEWEST_DOCUMENT_ONLY, context);
EXPECT_FALSE(result.hasError());
EXPECT_FALSE(result2.hasError());
@@ -727,7 +727,7 @@ TEST_F("require that iterate requires valid iterator", SimpleFixture) {
EXPECT_EQUAL("Unknown iterator with id 1", it_result.getErrorMessage());
CreateIteratorResult result =
- f.engine.createIterator(bucket1, document::AllFields(), selection,
+ f.engine.createIterator(bucket1, std::make_shared<document::AllFields>(), selection,
storage::spi::NEWEST_DOCUMENT_ONLY, context);
EXPECT_TRUE(result.getIteratorId());
@@ -743,7 +743,7 @@ TEST_F("require that iterate returns documents", SimpleFixture) {
Context context(loadType, storage::spi::Priority(0), storage::spi::Trace::TraceLevel(0));
uint64_t max_size = 1024;
CreateIteratorResult result =
- f.engine.createIterator(bucket1, document::AllFields(), selection,
+ f.engine.createIterator(bucket1, std::make_shared<document::AllFields>(), selection,
storage::spi::NEWEST_DOCUMENT_ONLY, context);
EXPECT_TRUE(result.getIteratorId());
@@ -758,7 +758,7 @@ TEST_F("require that destroyIterator prevents iteration", SimpleFixture) {
storage::spi::LoadType loadType(0, "default");
Context context(loadType, storage::spi::Priority(0), storage::spi::Trace::TraceLevel(0));
CreateIteratorResult create_result =
- f.engine.createIterator(bucket1, document::AllFields(), selection,
+ f.engine.createIterator(bucket1, std::make_shared<document::AllFields>(), selection,
storage::spi::NEWEST_DOCUMENT_ONLY, context);
EXPECT_TRUE(create_result.getIteratorId());
@@ -779,7 +779,7 @@ TEST_F("require that buckets are frozen during iterator life", SimpleFixture) {
storage::spi::LoadType loadType(0, "default");
Context context(loadType, storage::spi::Priority(0), storage::spi::Trace::TraceLevel(0));
CreateIteratorResult create_result =
- f.engine.createIterator(bucket1, document::AllFields(), selection,
+ f.engine.createIterator(bucket1, std::make_shared<document::AllFields>(), selection,
storage::spi::NEWEST_DOCUMENT_ONLY, context);
EXPECT_TRUE(f.hset.handler1.isFrozen(bucket1));
EXPECT_TRUE(f.hset.handler2.isFrozen(bucket1));
diff --git a/searchcore/src/vespa/searchcore/proton/persistenceengine/document_iterator.cpp b/searchcore/src/vespa/searchcore/proton/persistenceengine/document_iterator.cpp
index fcb9a672039..65a19a72fb9 100644
--- a/searchcore/src/vespa/searchcore/proton/persistenceengine/document_iterator.cpp
+++ b/searchcore/src/vespa/searchcore/proton/persistenceengine/document_iterator.cpp
@@ -66,7 +66,7 @@ DocumentIterator::checkMeta(const search::DocumentMetaData &meta) const
}
DocumentIterator::DocumentIterator(const storage::spi::Bucket &bucket,
- const document::FieldSet& fields,
+ document::FieldSet::SP fields,
const storage::spi::Selection &selection,
storage::spi::IncludedVersions versions,
ssize_t defaultSerializedSize,
@@ -75,10 +75,10 @@ DocumentIterator::DocumentIterator(const storage::spi::Bucket &bucket,
: _bucket(bucket),
_selection(selection),
_versions(versions),
- _fields(fields.clone()),
+ _fields(std::move(fields)),
_defaultSerializedSize((readConsistency == ReadConsistency::WEAK) ? defaultSerializedSize : -1),
_readConsistency(readConsistency),
- _metaOnly(fields.getType() == document::FieldSet::Type::NONE),
+ _metaOnly(_fields->getType() == document::FieldSet::Type::NONE),
_ignoreMaxBytes((readConsistency == ReadConsistency::WEAK) && ignoreMaxBytes),
_fetchedData(false),
_sources(),
diff --git a/searchcore/src/vespa/searchcore/proton/persistenceengine/document_iterator.h b/searchcore/src/vespa/searchcore/proton/persistenceengine/document_iterator.h
index 67242e8220f..4ae3839bfe8 100644
--- a/searchcore/src/vespa/searchcore/proton/persistenceengine/document_iterator.h
+++ b/searchcore/src/vespa/searchcore/proton/persistenceengine/document_iterator.h
@@ -19,7 +19,7 @@ private:
const storage::spi::Bucket _bucket;;
const storage::spi::Selection _selection;
const storage::spi::IncludedVersions _versions;
- const document::FieldSet::UP _fields;
+ const document::FieldSet::SP _fields;
const ssize_t _defaultSerializedSize;
const ReadConsistency _readConsistency;
const bool _metaOnly;
@@ -35,7 +35,7 @@ private:
bool isWeakRead() const { return _readConsistency == ReadConsistency::WEAK; }
public:
- DocumentIterator(const storage::spi::Bucket &bucket, const document::FieldSet& fields,
+ DocumentIterator(const storage::spi::Bucket &bucket, document::FieldSet::SP fields,
const storage::spi::Selection &selection, storage::spi::IncludedVersions versions,
ssize_t defaultSerializedSize, bool ignoreMaxBytes,
ReadConsistency readConsistency=ReadConsistency::STRONG);
diff --git a/searchcore/src/vespa/searchcore/proton/persistenceengine/persistenceengine.cpp b/searchcore/src/vespa/searchcore/proton/persistenceengine/persistenceengine.cpp
index b5166276e4a..14a56e7e0bb 100644
--- a/searchcore/src/vespa/searchcore/proton/persistenceengine/persistenceengine.cpp
+++ b/searchcore/src/vespa/searchcore/proton/persistenceengine/persistenceengine.cpp
@@ -452,13 +452,13 @@ PersistenceEngine::get(const Bucket& b, const document::FieldSet& fields, const
PersistenceEngine::CreateIteratorResult
-PersistenceEngine::createIterator(const Bucket &bucket, const document::FieldSet& fields, const Selection &selection,
- IncludedVersions versions, Context & context)
+PersistenceEngine::createIterator(const Bucket &bucket, FieldSetSP fields, const Selection &selection,
+ IncludedVersions versions, Context &context)
{
std::shared_lock<std::shared_timed_mutex> rguard(_rwMutex);
HandlerSnapshot snapshot = getHandlerSnapshot(rguard, bucket.getBucketSpace());
- auto entry = std::make_unique<IteratorEntry>(context.getReadConsistency(), bucket, fields, selection,
+ auto entry = std::make_unique<IteratorEntry>(context.getReadConsistency(), bucket, std::move(fields), selection,
versions, _defaultSerializedSize, _ignoreMaxBytes);
entry->bucket_guards.reserve(snapshot.size());
for (PersistenceHandlerSequence & handlers = snapshot.handlers(); handlers.valid(); handlers.next()) {
diff --git a/searchcore/src/vespa/searchcore/proton/persistenceengine/persistenceengine.h b/searchcore/src/vespa/searchcore/proton/persistenceengine/persistenceengine.h
index 230f8c411aa..a874d91eb20 100644
--- a/searchcore/src/vespa/searchcore/proton/persistenceengine/persistenceengine.h
+++ b/searchcore/src/vespa/searchcore/proton/persistenceengine/persistenceengine.h
@@ -45,10 +45,10 @@ private:
DocumentIterator it;
bool in_use;
std::vector<BucketGuard::UP> bucket_guards;
- IteratorEntry(storage::spi::ReadConsistency readConsistency, const Bucket &b, const document::FieldSet& f,
+ IteratorEntry(storage::spi::ReadConsistency readConsistency, const Bucket &b, FieldSetSP f,
const Selection &s, IncludedVersions v, ssize_t defaultSerializedSize, bool ignoreMaxBytes)
: handler_sequence(),
- it(b, f, s, v, defaultSerializedSize, ignoreMaxBytes, readConsistency),
+ it(b, std::move(f), s, v, defaultSerializedSize, ignoreMaxBytes, readConsistency),
in_use(false),
bucket_guards() {}
};
@@ -105,8 +105,8 @@ public:
void removeAsync(const Bucket&, Timestamp, const document::DocumentId&, Context&, OperationComplete::UP) override;
void updateAsync(const Bucket&, Timestamp, storage::spi::DocumentUpdateSP, Context&, OperationComplete::UP) override;
GetResult get(const Bucket&, const document::FieldSet&, const document::DocumentId&, Context&) const override;
- CreateIteratorResult createIterator(const Bucket&, const document::FieldSet&, const Selection&,
- IncludedVersions, Context&) override;
+ CreateIteratorResult
+ createIterator(const Bucket &bucket, FieldSetSP, const Selection &, IncludedVersions, Context &context) override;
IterateResult iterate(IteratorId, uint64_t maxByteSize, Context&) const override;
Result destroyIterator(IteratorId, Context&) override;
diff --git a/storage/src/tests/common/teststorageapp.cpp b/storage/src/tests/common/teststorageapp.cpp
index 9fcf1049e1b..1847de0e84f 100644
--- a/storage/src/tests/common/teststorageapp.cpp
+++ b/storage/src/tests/common/teststorageapp.cpp
@@ -171,19 +171,17 @@ TestServiceLayerApp::TestServiceLayerApp(DiskCount dc, NodeIndex index,
assert(dc > 0);
}
-TestServiceLayerApp::~TestServiceLayerApp() {}
+TestServiceLayerApp::~TestServiceLayerApp() = default;
void
TestServiceLayerApp::setupDummyPersistence()
{
- spi::PersistenceProvider::UP provider(new spi::dummy::DummyPersistence(
- getTypeRepo(), _compReg.getDiskCount()));
+ auto provider = std::make_unique<spi::dummy::DummyPersistence>(getTypeRepo(), _compReg.getDiskCount());
setPersistenceProvider(std::move(provider));
}
void
-TestServiceLayerApp::setPersistenceProvider(
- spi::PersistenceProvider::UP provider)
+TestServiceLayerApp::setPersistenceProvider(PersistenceProviderUP provider)
{
_partitions = provider->getPartitionStates().getList();
assert(spi::PartitionId(_compReg.getDiskCount()) == _partitions.size());
diff --git a/storage/src/tests/common/teststorageapp.h b/storage/src/tests/common/teststorageapp.h
index e567206c371..218e7352f04 100644
--- a/storage/src/tests/common/teststorageapp.h
+++ b/storage/src/tests/common/teststorageapp.h
@@ -107,8 +107,9 @@ private:
class TestServiceLayerApp : public TestStorageApp
{
+ using PersistenceProviderUP = std::unique_ptr<spi::PersistenceProvider>;
ServiceLayerComponentRegisterImpl& _compReg;
- spi::PersistenceProvider::UP _persistenceProvider;
+ PersistenceProviderUP _persistenceProvider;
spi::PartitionStateList _partitions;
public:
@@ -118,7 +119,7 @@ public:
~TestServiceLayerApp();
void setupDummyPersistence();
- void setPersistenceProvider(spi::PersistenceProvider::UP);
+ void setPersistenceProvider(PersistenceProviderUP);
ServiceLayerComponentRegisterImpl& getComponentRegister() { return _compReg; }
diff --git a/storage/src/tests/persistence/common/persistenceproviderwrapper.cpp b/storage/src/tests/persistence/common/persistenceproviderwrapper.cpp
index dd9ce6e6cba..67a1c41a9ef 100644
--- a/storage/src/tests/persistence/common/persistenceproviderwrapper.cpp
+++ b/storage/src/tests/persistence/common/persistenceproviderwrapper.cpp
@@ -147,11 +147,9 @@ PersistenceProviderWrapper::get(const spi::Bucket& bucket,
}
spi::CreateIteratorResult
-PersistenceProviderWrapper::createIterator(const spi::Bucket& bucket,
- const document::FieldSet& fields,
- const spi::Selection& sel,
+PersistenceProviderWrapper::createIterator(const spi::Bucket &bucket, FieldSetSP fields, const spi::Selection &sel,
spi::IncludedVersions versions,
- spi::Context& context)
+ spi::Context &context)
{
// TODO: proper printing of FieldSet and Selection
diff --git a/storage/src/tests/persistence/common/persistenceproviderwrapper.h b/storage/src/tests/persistence/common/persistenceproviderwrapper.h
index 21e5d8016aa..75712750d68 100644
--- a/storage/src/tests/persistence/common/persistenceproviderwrapper.h
+++ b/storage/src/tests/persistence/common/persistenceproviderwrapper.h
@@ -100,8 +100,9 @@ public:
spi::UpdateResult update(const spi::Bucket&, spi::Timestamp, spi::DocumentUpdateSP, spi::Context&) override;
spi::GetResult get(const spi::Bucket&, const document::FieldSet&, const spi::DocumentId&, spi::Context&) const override;
- spi::CreateIteratorResult createIterator(const spi::Bucket&, const document::FieldSet&, const spi::Selection&,
- spi::IncludedVersions versions, spi::Context&) override;
+ spi::CreateIteratorResult
+ createIterator(const spi::Bucket &bucket, FieldSetSP, const spi::Selection &, spi::IncludedVersions versions,
+ spi::Context &context) override;
spi::IterateResult iterate(spi::IteratorId, uint64_t maxByteSize, spi::Context&) const override;
spi::Result destroyIterator(spi::IteratorId, spi::Context&) override;
diff --git a/storage/src/tests/persistence/filestorage/mergeblockingtest.cpp b/storage/src/tests/persistence/filestorage/mergeblockingtest.cpp
index d9582cec585..c73ae7e506c 100644
--- a/storage/src/tests/persistence/filestorage/mergeblockingtest.cpp
+++ b/storage/src/tests/persistence/filestorage/mergeblockingtest.cpp
@@ -15,9 +15,7 @@ namespace storage {
struct MergeBlockingTest : public FileStorTestFixture {
void setupDisks() {
FileStorTestFixture::setupPersistenceThreads(1);
- _node->setPersistenceProvider(
- spi::PersistenceProvider::UP(
- new spi::dummy::DummyPersistence(_node->getTypeRepo(), 1)));
+ _node->setPersistenceProvider(std::make_unique<spi::dummy::DummyPersistence>(_node->getTypeRepo(), 1));
}
void SetUp() override;
diff --git a/storage/src/tests/persistence/filestorage/operationabortingtest.cpp b/storage/src/tests/persistence/filestorage/operationabortingtest.cpp
index 93c484368de..7810a595012 100644
--- a/storage/src/tests/persistence/filestorage/operationabortingtest.cpp
+++ b/storage/src/tests/persistence/filestorage/operationabortingtest.cpp
@@ -77,18 +77,19 @@ spi::LoadType defaultLoadType(0, "default");
}
struct OperationAbortingTest : FileStorTestFixture {
- spi::PersistenceProvider::UP _dummyProvider;
- BlockingMockProvider* _blockingProvider;
+ std::unique_ptr<spi::dummy::DummyPersistence> _dummyProvider;
+ BlockingMockProvider * _blockingProvider;
std::unique_ptr<vespalib::Barrier> _queueBarrier;
std::unique_ptr<vespalib::Barrier> _completionBarrier;
void setupProviderAndBarriers(uint32_t queueBarrierThreads) {
FileStorTestFixture::setupPersistenceThreads(1);
- _dummyProvider.reset(new spi::dummy::DummyPersistence(_node->getTypeRepo(), 1));
- _queueBarrier.reset(new vespalib::Barrier(queueBarrierThreads));
- _completionBarrier.reset(new vespalib::Barrier(2));
- _blockingProvider = new BlockingMockProvider(*_dummyProvider, *_queueBarrier, *_completionBarrier);
- _node->setPersistenceProvider(spi::PersistenceProvider::UP(_blockingProvider));
+ _dummyProvider = std::make_unique<spi::dummy::DummyPersistence>(_node->getTypeRepo(), 1);
+ _queueBarrier = std::make_unique<vespalib::Barrier>(queueBarrierThreads);
+ _completionBarrier = std::make_unique<vespalib::Barrier>(2);
+ auto blockingProvider = std::make_unique<BlockingMockProvider>(*_dummyProvider, *_queueBarrier, *_completionBarrier);
+ _blockingProvider = blockingProvider.get();
+ _node->setPersistenceProvider(std::move(blockingProvider));
}
void validateReplies(DummyStorageLink& link, size_t repliesTotal,
diff --git a/storage/src/tests/persistence/persistencetestutils.cpp b/storage/src/tests/persistence/persistencetestutils.cpp
index 504767e68c7..f50fbb0c8e8 100644
--- a/storage/src/tests/persistence/persistencetestutils.cpp
+++ b/storage/src/tests/persistence/persistencetestutils.cpp
@@ -231,9 +231,9 @@ PersistenceTestUtils::doGetOnDisk(
document::DocumentUpdate::SP
PersistenceTestUtils::createBodyUpdate(const document::DocumentId& docId, const document::FieldValue& updateValue)
{
- const DocumentType* docType(_env->_component.getTypeRepo()->getDocumentType("testdoctype1"));
- document::DocumentUpdate::SP update(new document::DocumentUpdate(*_env->_component.getTypeRepo(), *docType, docId));
- std::shared_ptr<document::AssignValueUpdate> assignUpdate(new document::AssignValueUpdate(updateValue));
+ const DocumentType* docType(getTypeRepo()->getDocumentType("testdoctype1"));
+ auto update = std::make_shared<document::DocumentUpdate>(*getTypeRepo(), *docType, docId);
+ auto assignUpdate = std::make_shared<document::AssignValueUpdate>(updateValue);
document::FieldUpdate fieldUpdate(docType->getField("content"));
fieldUpdate.addUpdate(*assignUpdate);
update->addUpdate(fieldUpdate);
@@ -243,9 +243,9 @@ PersistenceTestUtils::createBodyUpdate(const document::DocumentId& docId, const
document::DocumentUpdate::SP
PersistenceTestUtils::createHeaderUpdate(const document::DocumentId& docId, const document::FieldValue& updateValue)
{
- const DocumentType* docType(_env->_component.getTypeRepo()->getDocumentType("testdoctype1"));
- document::DocumentUpdate::SP update(new document::DocumentUpdate(*_env->_component.getTypeRepo(), *docType, docId));
- std::shared_ptr<document::AssignValueUpdate> assignUpdate(new document::AssignValueUpdate(updateValue));
+ const DocumentType* docType(getTypeRepo()->getDocumentType("testdoctype1"));
+ auto update = std::make_shared<document::DocumentUpdate>(*getTypeRepo(), *docType, docId);
+ auto assignUpdate = std::make_shared<document::AssignValueUpdate>(updateValue);
document::FieldUpdate fieldUpdate(docType->getField("headerval"));
fieldUpdate.addUpdate(*assignUpdate);
update->addUpdate(fieldUpdate);
@@ -253,8 +253,7 @@ PersistenceTestUtils::createHeaderUpdate(const document::DocumentId& docId, cons
}
uint16_t
-PersistenceTestUtils::getDiskFromBucketDatabaseIfUnset(const document::Bucket& bucket,
- uint16_t disk)
+PersistenceTestUtils::getDiskFromBucketDatabaseIfUnset(const document::Bucket& bucket, uint16_t disk)
{
if (disk == 0xffff) {
StorBucketDatabase::WrappedEntry entry(
@@ -342,7 +341,7 @@ PersistenceTestUtils::clearBody(document::Document& doc)
//doc->getBody().clear();
vespalib::nbostream stream;
doc.serializeHeader(stream);
- doc.deserialize(*_env->_component.getTypeRepo(), stream);
+ doc.deserialize(*getTypeRepo(), stream);
}
document::Document::UP
diff --git a/storage/src/tests/persistence/persistencetestutils.h b/storage/src/tests/persistence/persistencetestutils.h
index 6cee3b79ab8..3d25a205017 100644
--- a/storage/src/tests/persistence/persistencetestutils.h
+++ b/storage/src/tests/persistence/persistencetestutils.h
@@ -101,7 +101,7 @@ public:
FileStorHandler& fsHandler() { return *_env->_handler; }
FileStorMetrics& metrics() { return _env->_metrics; }
MessageKeeper& messageKeeper() { return _env->_messageKeeper; }
- std::shared_ptr<const document::DocumentTypeRepo> getTypeRepo() { return _env->_component.getTypeRepo(); }
+ std::shared_ptr<const document::DocumentTypeRepo> getTypeRepo() { return _env->_component.getTypeRepo()->documentTypeRepo; }
StorageComponent& getComponent() { return _env->_component; }
TestServiceLayerApp& getNode() { return _env->_node; }
diff --git a/storage/src/vespa/storage/common/storagecomponent.cpp b/storage/src/vespa/storage/common/storagecomponent.cpp
index 21a4b8eea64..3846fe3a9c0 100644
--- a/storage/src/vespa/storage/common/storagecomponent.cpp
+++ b/storage/src/vespa/storage/common/storagecomponent.cpp
@@ -2,17 +2,22 @@
#include "storagecomponent.h"
#include <vespa/storage/storageserver/prioritymapper.h>
-
#include <vespa/vespalib/util/exceptions.h>
#include <vespa/vespalib/stllike/asciistream.h>
#include <vespa/vdslib/distribution/distribution.h>
+#include <vespa/document/fieldset/fieldsetrepo.h>
namespace storage {
+StorageComponent::Repos::Repos(std::shared_ptr<const document::DocumentTypeRepo> repo)
+ : documentTypeRepo(std::move(repo)),
+ fieldSetRepo(std::make_shared<document::FieldSetRepo>(*documentTypeRepo))
+{}
+
+StorageComponent::Repos::~Repos() = default;
+
// Defined in cpp file to allow unique pointers of unknown type in header.
-StorageComponent::~StorageComponent()
-{
-}
+StorageComponent::~StorageComponent() = default;
void
StorageComponent::setNodeInfo(vespalib::stringref clusterName,
@@ -26,10 +31,11 @@ StorageComponent::setNodeInfo(vespalib::stringref clusterName,
}
void
-StorageComponent::setDocumentTypeRepo(DocumentTypeRepoSP repo)
+StorageComponent::setDocumentTypeRepo(std::shared_ptr<const document::DocumentTypeRepo> docTypeRepo)
{
+ auto repo = std::make_shared<Repos>(std::move(docTypeRepo));
std::lock_guard guard(_lock);
- _docTypeRepo = repo;
+ _repos = std::move(repo);
}
void
@@ -78,7 +84,7 @@ StorageComponent::StorageComponent(StorageComponentRegister& compReg,
_clusterName(),
_nodeType(nullptr),
_index(0),
- _docTypeRepo(),
+ _repos(),
_loadTypes(),
_priorityMapper(new PriorityMapper),
_bucketIdFactory(),
@@ -116,11 +122,11 @@ StorageComponent::getPriority(const documentapi::LoadType& lt) const
return _priorityMapper->getPriority(lt);
}
-StorageComponent::DocumentTypeRepoSP
+std::shared_ptr<StorageComponent::Repos>
StorageComponent::getTypeRepo() const
{
std::lock_guard guard(_lock);
- return _docTypeRepo;
+ return _repos;
}
StorageComponent::LoadTypeSetSP
diff --git a/storage/src/vespa/storage/common/storagecomponent.h b/storage/src/vespa/storage/common/storagecomponent.h
index 821cd43f21d..e0b1dc74d7f 100644
--- a/storage/src/vespa/storage/common/storagecomponent.h
+++ b/storage/src/vespa/storage/common/storagecomponent.h
@@ -42,6 +42,7 @@ namespace vespa::config::content::core::internal {
}
namespace document {
class DocumentTypeRepo;
+ class FieldSetRepo;
}
namespace documentapi {
class LoadType;
@@ -58,9 +59,14 @@ struct StorageComponentRegister;
class StorageComponent : public framework::Component {
public:
+ struct Repos {
+ explicit Repos(std::shared_ptr<const document::DocumentTypeRepo> repo);
+ ~Repos();
+ const std::shared_ptr<const document::DocumentTypeRepo> documentTypeRepo;
+ const std::shared_ptr<const document::FieldSetRepo> fieldSetRepo;
+ };
using UP = std::unique_ptr<StorageComponent>;
using PriorityConfig = vespa::config::content::core::internal::InternalStorPrioritymappingType;
- using DocumentTypeRepoSP = std::shared_ptr<const document::DocumentTypeRepo>;
using LoadTypeSetSP = std::shared_ptr<documentapi::LoadTypeSet>;
using DistributionSP = std::shared_ptr<lib::Distribution>;
@@ -68,9 +74,7 @@ public:
* Node type is supposed to be set immediately, and never be updated.
* Thus it does not need to be threadsafe. Should never be used before set.
*/
- void setNodeInfo(vespalib::stringref clusterName,
- const lib::NodeType& nodeType,
- uint16_t index);
+ void setNodeInfo(vespalib::stringref clusterName, const lib::NodeType& nodeType, uint16_t index);
/**
* Node state updater is supposed to be set immediately, and never be
@@ -78,14 +82,14 @@ public:
* before set.
*/
void setNodeStateUpdater(NodeStateUpdater& updater);
- void setDocumentTypeRepo(DocumentTypeRepoSP);
+ void setDocumentTypeRepo(std::shared_ptr<const document::DocumentTypeRepo>);
void setLoadTypes(LoadTypeSetSP);
void setPriorityConfig(const PriorityConfig&);
void setBucketIdFactory(const document::BucketIdFactory&);
void setDistribution(DistributionSP);
StorageComponent(StorageComponentRegister&, vespalib::stringref name);
- virtual ~StorageComponent();
+ ~StorageComponent() override;
vespalib::string getClusterName() const { return _clusterName; }
const lib::NodeType& getNodeType() const { return *_nodeType; }
@@ -94,7 +98,7 @@ public:
vespalib::string getIdentity() const;
- DocumentTypeRepoSP getTypeRepo() const;
+ std::shared_ptr<Repos> getTypeRepo() const;
LoadTypeSetSP getLoadTypes() const;
const document::BucketIdFactory& getBucketIdFactory() const
{ return _bucketIdFactory; }
@@ -106,7 +110,8 @@ private:
vespalib::string _clusterName;
const lib::NodeType* _nodeType;
uint16_t _index;
- DocumentTypeRepoSP _docTypeRepo;
+ std::shared_ptr<Repos> _repos;
+ // TODO: move loadTypes and _distribution in to _repos so lock will only taken once and only copying one shared_ptr.
LoadTypeSetSP _loadTypes;
std::unique_ptr<PriorityMapper> _priorityMapper;
document::BucketIdFactory _bucketIdFactory;
diff --git a/storage/src/vespa/storage/config/distributorconfiguration.cpp b/storage/src/vespa/storage/config/distributorconfiguration.cpp
index 0c9988421a3..aa606cdc8b9 100644
--- a/storage/src/vespa/storage/config/distributorconfiguration.cpp
+++ b/storage/src/vespa/storage/config/distributorconfiguration.cpp
@@ -70,7 +70,7 @@ DistributorConfiguration::containsTimeStatement(const std::string& documentSelec
{
TimeVisitor visitor;
try {
- document::select::Parser parser(*_component.getTypeRepo(), _component.getBucketIdFactory());
+ document::select::Parser parser(*_component.getTypeRepo()->documentTypeRepo, _component.getBucketIdFactory());
std::unique_ptr<document::select::Node> node = parser.parse(documentSelection);
node->visit(visitor);
diff --git a/storage/src/vespa/storage/distributor/distributor.cpp b/storage/src/vespa/storage/distributor/distributor.cpp
index c74d4135556..cfd8d7f1753 100644
--- a/storage/src/vespa/storage/distributor/distributor.cpp
+++ b/storage/src/vespa/storage/distributor/distributor.cpp
@@ -108,8 +108,7 @@ Distributor::Distributor(DistributorComponentRegister& compReg,
_must_send_updated_host_info(false)
{
_component.registerMetric(*_metrics);
- _component.registerMetricUpdateHook(_metricUpdateHook,
- framework::SecondTime(0));
+ _component.registerMetricUpdateHook(_metricUpdateHook, framework::SecondTime(0));
_distributorStatusDelegate.registerStatusPage();
_bucketDBStatusDelegate.registerStatusPage();
hostInfoReporterRegistrar.registerReporter(&_hostInfoReporter);
diff --git a/storage/src/vespa/storage/distributor/operations/external/removelocationoperation.cpp b/storage/src/vespa/storage/distributor/operations/external/removelocationoperation.cpp
index ca1b6f266d6..4c762cf4c23 100644
--- a/storage/src/vespa/storage/distributor/operations/external/removelocationoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/external/removelocationoperation.cpp
@@ -38,8 +38,7 @@ RemoveLocationOperation::getBucketId(
DistributorComponent& manager,
const api::RemoveLocationCommand& cmd, document::BucketId& bid)
{
- std::shared_ptr<const document::DocumentTypeRepo> repo = manager.getTypeRepo();
- document::select::Parser parser(*repo, manager.getBucketIdFactory());
+ document::select::Parser parser(*manager.getTypeRepo()->documentTypeRepo, manager.getBucketIdFactory());
document::BucketSelector bucketSel(manager.getBucketIdFactory());
std::unique_ptr<document::BucketSelector::BucketVector> exprResult
diff --git a/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.cpp b/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.cpp
index 41f452df801..3866ee4e6f7 100644
--- a/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.cpp
@@ -576,7 +576,7 @@ TwoPhaseUpdateOperation::processAndMatchTasCondition(DistributorMessageSender& s
return true; // No condition; nothing to do here.
}
- document::select::Parser parser(*_manager.getTypeRepo(), _manager.getBucketIdFactory());
+ document::select::Parser parser(*_manager.getTypeRepo()->documentTypeRepo, _manager.getBucketIdFactory());
std::unique_ptr<document::select::Node> selection;
try {
selection = parser.parse(_updateCmd->getCondition().getSelection());
diff --git a/storage/src/vespa/storage/persistence/bucketprocessor.cpp b/storage/src/vespa/storage/persistence/bucketprocessor.cpp
index c88b08612d7..ea09fcfc348 100644
--- a/storage/src/vespa/storage/persistence/bucketprocessor.cpp
+++ b/storage/src/vespa/storage/persistence/bucketprocessor.cpp
@@ -47,11 +47,11 @@ BucketProcessor::iterateAll(spi::PersistenceProvider& provider,
spi::Selection sel
= spi::Selection(spi::DocumentSelection(documentSelection));
spi::CreateIteratorResult createIterResult(provider.createIterator(
- bucket,
- document::AllFields(),
- sel,
- versions,
- context));
+ bucket,
+ std::make_shared<document::AllFields>(),
+ sel,
+ versions,
+ context));
if (createIterResult.getErrorCode() != spi::Result::ErrorType::NONE) {
vespalib::asciistream ss;
diff --git a/storage/src/vespa/storage/persistence/mergehandler.cpp b/storage/src/vespa/storage/persistence/mergehandler.cpp
index 70894858887..612d4545a8a 100644
--- a/storage/src/vespa/storage/persistence/mergehandler.cpp
+++ b/storage/src/vespa/storage/persistence/mergehandler.cpp
@@ -1,6 +1,5 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
#include "mergehandler.h"
#include <vespa/vespalib/stllike/asciistream.h>
#include <vespa/vdslib/distribution/distribution.h>
@@ -14,17 +13,14 @@ LOG_SETUP(".persistence.mergehandler");
namespace storage {
-MergeHandler::MergeHandler(spi::PersistenceProvider& spi,
- PersistenceUtil& env)
+MergeHandler::MergeHandler(spi::PersistenceProvider& spi, PersistenceUtil& env)
: _spi(spi),
_env(env),
_maxChunkSize(env._config.bucketMergeChunkSize)
{
}
-MergeHandler::MergeHandler(spi::PersistenceProvider& spi,
- PersistenceUtil& env,
- uint32_t maxChunkSize)
+MergeHandler::MergeHandler(spi::PersistenceProvider& spi, PersistenceUtil& env, uint32_t maxChunkSize)
: _spi(spi),
_env(env),
_maxChunkSize(maxChunkSize)
@@ -58,9 +54,7 @@ checkResult(const spi::Result& result,
}
void
-checkResult(const spi::Result& result,
- const spi::Bucket& bucket,
- const char* op)
+checkResult(const spi::Result& result, const spi::Bucket& bucket, const char* op)
{
if (result.hasError()) {
vespalib::asciistream ss;
@@ -124,11 +118,11 @@ MergeHandler::populateMetaData(
spi::Selection sel(docSel);
sel.setToTimestamp(spi::Timestamp(maxTimestamp.getTime()));
spi::CreateIteratorResult createIterResult(_spi.createIterator(
- bucket,
- document::NoFields(),
- sel,
- spi::ALL_VERSIONS,
- context));
+ bucket,
+ std::make_shared<document::NoFields>(),
+ sel,
+ spi::ALL_VERSIONS,
+ context));
if (createIterResult.getErrorCode() != spi::Result::ErrorType::NONE) {
std::ostringstream ss;
@@ -142,8 +136,7 @@ MergeHandler::populateMetaData(
IteratorGuard iteratorGuard(_spi, iteratorId, context);
while (true) {
- spi::IterateResult result(
- _spi.iterate(iteratorId, UINT64_MAX, context));
+ spi::IterateResult result(_spi.iterate(iteratorId, UINT64_MAX, context));
if (result.getErrorCode() != spi::Result::ErrorType::NONE) {
std::ostringstream ss;
ss << "Failed to iterate for "
@@ -300,8 +293,7 @@ namespace {
}
int
- countUnfilledEntries(
- const std::vector<api::ApplyBucketDiffCommand::Entry>& diff)
+ countUnfilledEntries(const std::vector<api::ApplyBucketDiffCommand::Entry>& diff)
{
int count = 0;
@@ -323,11 +315,9 @@ namespace {
return value;
}
- api::StorageMessageAddress createAddress(const std::string& clusterName,
- uint16_t node)
+ api::StorageMessageAddress createAddress(const std::string& clusterName, uint16_t node)
{
- return api::StorageMessageAddress(
- clusterName, lib::NodeType::STORAGE, node);
+ return api::StorageMessageAddress(clusterName, lib::NodeType::STORAGE, node);
}
void assertContainedInBucket(const document::DocumentId& docId,
@@ -370,14 +360,11 @@ MergeHandler::fetchLocalData(
alreadyFilled += e._headerBlob.size() + e._bodyBlob.size();
}
}
- uint32_t remainingSize = _maxChunkSize - std::min(_maxChunkSize,
- alreadyFilled);
- LOG(debug, "Diff of %s has already filled %u of max %u bytes, "
- "remaining size to fill is %u",
+ uint32_t remainingSize = _maxChunkSize - std::min(_maxChunkSize, alreadyFilled);
+ LOG(debug, "Diff of %s has already filled %u of max %u bytes, remaining size to fill is %u",
bucket.toString().c_str(), alreadyFilled, _maxChunkSize, remainingSize);
if (remainingSize == 0) {
- LOG(debug,
- "Diff already at max chunk size, not fetching any local data");
+ LOG(debug, "Diff already at max chunk size, not fetching any local data");
return;
}
@@ -387,7 +374,7 @@ MergeHandler::fetchLocalData(
sel.setTimestampSubset(slots);
spi::CreateIteratorResult createIterResult(
_spi.createIterator(bucket,
- document::AllFields(),
+ std::make_shared<document::AllFields>(),
sel,
spi::NEWEST_DOCUMENT_OR_REMOVE,
context));
@@ -409,8 +396,7 @@ MergeHandler::fetchLocalData(
bool fetchedAllLocalData = false;
bool chunkLimitReached = false;
while (true) {
- spi::IterateResult result(
- _spi.iterate(iteratorId, remainingSize, context));
+ spi::IterateResult result(_spi.iterate(iteratorId, remainingSize, context));
if (result.getErrorCode() != spi::Result::ErrorType::NONE) {
std::ostringstream ss;
ss << "Failed to iterate for "
@@ -426,8 +412,7 @@ MergeHandler::fetchLocalData(
{
remainingSize -= list[i]->getSize();
LOG(spam, "Added %s, remainingSize is %u",
- entries.back()->toString().c_str(),
- remainingSize);
+ entries.back()->toString().c_str(), remainingSize);
entries.push_back(std::move(list[i]));
} else {
LOG(spam, "Adding %s would exceed chunk size limit of %u; "
@@ -453,8 +438,7 @@ MergeHandler::fetchLocalData(
docEntry.toString().c_str());
std::vector<api::ApplyBucketDiffCommand::Entry>::iterator iter(
- std::lower_bound(diff.begin(),
- diff.end(),
+ std::lower_bound(diff.begin(), diff.end(),
api::Timestamp(docEntry.getTimestamp()),
DiffEntryTimestampPredicate()));
assert(iter != diff.end());
@@ -566,8 +550,8 @@ MergeHandler::applyDiffLocally(
std::vector<spi::DocEntry::UP> entries;
populateMetaData(bucket, MAX_TIMESTAMP, entries, context);
- std::shared_ptr<const document::DocumentTypeRepo> repo(_env._component.getTypeRepo());
- assert(repo.get() != nullptr);
+ std::shared_ptr<const document::DocumentTypeRepo> repo(_env._component.getTypeRepo()->documentTypeRepo);
+ assert(repo);
uint32_t existingCount = entries.size();
uint32_t i = 0, j = 0;
@@ -727,8 +711,7 @@ MergeHandler::processBucketMerge(const spi::Bucket& bucket, MergeStatus& status,
// If nothing to update, we're done.
if (status.diff.size() == 0) {
- LOG(debug, "Done with merge of %s. No more entries in diff.",
- bucket.toString().c_str());
+ LOG(debug, "Done with merge of %s. No more entries in diff.", bucket.toString().c_str());
return status.reply;
}
@@ -755,10 +738,8 @@ MergeHandler::processBucketMerge(const spi::Bucket& bucket, MergeStatus& status,
? std::numeric_limits<uint32_t>().max()
: _maxChunkSize);
- cmd.reset(new api::ApplyBucketDiffCommand(
- bucket.getBucket(), nodes, maxSize));
- cmd->setAddress(createAddress(_env._component.getClusterName(),
- nodes[1].index));
+ cmd = std::make_shared<api::ApplyBucketDiffCommand>(bucket.getBucket(), nodes, maxSize);
+ cmd->setAddress(createAddress(_env._component.getClusterName(), nodes[1].index));
findCandidates(bucket.getBucketId(),
status,
true,
@@ -798,8 +779,7 @@ MergeHandler::processBucketMerge(const spi::Bucket& bucket, MergeStatus& status,
for (std::map<uint16_t, uint32_t>::const_iterator it = counts.begin();
it != counts.end(); ++it)
{
- if (it->second >= uint32_t(
- _env._config.commonMergeChainOptimalizationMinimumSize)
+ if (it->second >= uint32_t(_env._config.commonMergeChainOptimalizationMinimumSize)
|| counts.size() == 1)
{
LOG(spam, "Sending separate apply bucket diff for path %x "
@@ -832,15 +812,11 @@ MergeHandler::processBucketMerge(const spi::Bucket& bucket, MergeStatus& status,
(_env._config.enableMergeLocalNodeChooseDocsOptimalization
? std::numeric_limits<uint32_t>().max()
: _maxChunkSize);
- cmd.reset(new api::ApplyBucketDiffCommand(
- bucket.getBucket(), nodes, maxSize));
- cmd->setAddress(
- createAddress(_env._component.getClusterName(),
- nodes[1].index));
+ cmd = std::make_shared<api::ApplyBucketDiffCommand>(bucket.getBucket(), nodes, maxSize);
+ cmd->setAddress(createAddress(_env._component.getClusterName(), nodes[1].index));
// Add all the metadata, and thus use big limit. Max
// data to fetch parameter will control amount added.
- findCandidates(bucket.getBucketId(), status, true,
- it->first, newMask, maxSize, *cmd);
+ findCandidates(bucket.getBucketId(), status, true, it->first, newMask, maxSize, *cmd);
break;
}
}
@@ -848,22 +824,17 @@ MergeHandler::processBucketMerge(const spi::Bucket& bucket, MergeStatus& status,
// If we found no group big enough to handle on its own, do a common
// merge to merge the remaining data.
- if (cmd.get() == 0) {
- cmd.reset(new api::ApplyBucketDiffCommand(bucket.getBucket(),
- status.nodeList,
- _maxChunkSize));
- cmd->setAddress(createAddress(_env._component.getClusterName(),
- status.nodeList[1].index));
- findCandidates(bucket.getBucketId(), status, false, 0, 0,
- _maxChunkSize, *cmd);
+ if ( ! cmd ) {
+ cmd = std::make_shared<api::ApplyBucketDiffCommand>(bucket.getBucket(), status.nodeList, _maxChunkSize);
+ cmd->setAddress(createAddress(_env._component.getClusterName(), status.nodeList[1].index));
+ findCandidates(bucket.getBucketId(), status, false, 0, 0, _maxChunkSize, *cmd);
}
cmd->setPriority(status.context.getPriority());
cmd->setTimeout(status.timeout);
if (applyDiffNeedLocalData(cmd->getDiff(), 0, true)) {
framework::MilliSecTimer startTime(_env._component.getClock());
fetchLocalData(bucket, cmd->getLoadType(), cmd->getDiff(), 0, context);
- _env._metrics.merge_handler_metrics.mergeDataReadLatency.addValue(
- startTime.getElapsedTimeAsDouble());
+ _env._metrics.merge_handler_metrics.mergeDataReadLatency.addValue(startTime.getElapsedTimeAsDouble());
}
status.pendingId = cmd->getMsgId();
LOG(debug, "Sending %s", cmd->toString().c_str());
@@ -878,8 +849,7 @@ public:
document::Bucket _bucket;
bool _active;
- MergeStateDeleter(FileStorHandler& handler,
- const document::Bucket& bucket)
+ MergeStateDeleter(FileStorHandler& handler, const document::Bucket& bucket)
: _handler(handler),
_bucket(bucket),
_active(true)
@@ -906,8 +876,7 @@ MergeHandler::handleMergeBucket(api::MergeBucketCommand& cmd, MessageTracker::UP
if (cmd.getNodes().size() < 2) {
LOG(debug, "Attempt to merge a single instance of a bucket");
- tracker->fail(ReturnCode::ILLEGAL_PARAMETERS,
- "Cannot merge a single copy");
+ tracker->fail(ReturnCode::ILLEGAL_PARAMETERS, "Cannot merge a single copy");
return tracker;
}
@@ -954,8 +923,7 @@ MergeHandler::handleMergeBucket(api::MergeBucketCommand& cmd, MessageTracker::UP
auto cmd2 = std::make_shared<api::GetBucketDiffCommand>(bucket.getBucket(), s->nodeList, s->maxTimestamp.getTime());
if (!buildBucketInfoList(bucket, cmd.getLoadType(), s->maxTimestamp, 0, cmd2->getDiff(), tracker->context())) {
LOG(debug, "Bucket non-existing in db. Failing merge.");
- tracker->fail(ReturnCode::BUCKET_DELETED,
- "Bucket not found in buildBucketInfo step");
+ tracker->fail(ReturnCode::BUCKET_DELETED, "Bucket not found in buildBucketInfo step");
return tracker;
}
_env._metrics.merge_handler_metrics.mergeMetadataReadLatency.addValue(s->startTime.getElapsedTimeAsDouble());
@@ -1116,8 +1084,7 @@ MergeHandler::handleGetBucketDiff(api::GetBucketDiffCommand& cmd, MessageTracker
checkResult(_spi.createBucket(bucket, tracker->context()), bucket, "create bucket");
if (_env._fileStorHandler.isMerging(bucket.getBucket())) {
- tracker->fail(ReturnCode::BUSY,
- "A merge is already running on this bucket.");
+ tracker->fail(ReturnCode::BUSY, "A merge is already running on this bucket.");
return tracker;
}
uint8_t index = findOwnIndex(cmd.getNodes(), _env._nodeIndex);
@@ -1130,16 +1097,13 @@ MergeHandler::handleGetBucketDiff(api::GetBucketDiffCommand& cmd, MessageTracker
index, local, tracker->context()))
{
LOG(debug, "Bucket non-existing in db. Failing merge.");
- tracker->fail(ReturnCode::BUCKET_DELETED,
- "Bucket not found in buildBucketInfo step");
+ tracker->fail(ReturnCode::BUCKET_DELETED, "Bucket not found in buildBucketInfo step");
return tracker;
}
if (!mergeLists(remote, local, local)) {
- LOG(error, "Diffing %s found suspect entries.",
- bucket.toString().c_str());
+ LOG(error, "Diffing %s found suspect entries.", bucket.toString().c_str());
}
- _env._metrics.merge_handler_metrics.mergeMetadataReadLatency.addValue(
- startTime.getElapsedTimeAsDouble());
+ _env._metrics.merge_handler_metrics.mergeMetadataReadLatency.addValue(startTime.getElapsedTimeAsDouble());
// If last node in merge chain, we can send reply straight away
if (index + 1u >= cmd.getNodes().size()) {
@@ -1216,24 +1180,21 @@ namespace {
bool operator()(const api::ApplyBucketDiffCommand::Entry& x,
const api::ApplyBucketDiffCommand::Entry& y)
{
- return (x._entry._timestamp
- < y._entry._timestamp);
+ return (x._entry._timestamp < y._entry._timestamp);
}
};
} // End of anonymous namespace
void
-MergeHandler::handleGetBucketDiffReply(api::GetBucketDiffReply& reply,
- MessageSender& sender)
+MergeHandler::handleGetBucketDiffReply(api::GetBucketDiffReply& reply, MessageSender& sender)
{
_env._metrics.getBucketDiffReply.inc();
spi::Bucket bucket(reply.getBucket(), spi::PartitionId(_env._partition));
LOG(debug, "GetBucketDiffReply(%s)", bucket.toString().c_str());
if (!_env._fileStorHandler.isMerging(bucket.getBucket())) {
- LOG(warning, "Got GetBucketDiffReply for %s which we have no "
- "merge state for.",
+ LOG(warning, "Got GetBucketDiffReply for %s which we have no merge state for.",
bucket.toString().c_str());
return;
}
@@ -1387,8 +1348,7 @@ MergeHandler::handleApplyBucketDiff(api::ApplyBucketDiffCommand& cmd, MessageTra
}
void
-MergeHandler::handleApplyBucketDiffReply(api::ApplyBucketDiffReply& reply,
- MessageSender& sender)
+MergeHandler::handleApplyBucketDiffReply(api::ApplyBucketDiffReply& reply,MessageSender& sender)
{
_env._metrics.applyBucketDiffReply.inc();
spi::Bucket bucket(reply.getBucket(), spi::PartitionId(_env._partition));
@@ -1396,8 +1356,7 @@ MergeHandler::handleApplyBucketDiffReply(api::ApplyBucketDiffReply& reply,
LOG(debug, "%s", reply.toString().c_str());
if (!_env._fileStorHandler.isMerging(bucket.getBucket())) {
- LOG(warning, "Got ApplyBucketDiffReply for %s which we have no "
- "merge state for.",
+ LOG(warning, "Got ApplyBucketDiffReply for %s which we have no merge state for.",
bucket.toString().c_str());
return;
}
@@ -1415,25 +1374,19 @@ MergeHandler::handleApplyBucketDiffReply(api::ApplyBucketDiffReply& reply,
api::ReturnCode returnCode = reply.getResult();
try {
if (reply.getResult().failed()) {
- LOG(debug, "Got failed apply bucket diff reply %s",
- reply.toString().c_str());
+ LOG(debug, "Got failed apply bucket diff reply %s", reply.toString().c_str());
} else {
assert(reply.getNodes().size() >= 2);
uint8_t index = findOwnIndex(reply.getNodes(), _env._nodeIndex);
if (applyDiffNeedLocalData(diff, index, false)) {
framework::MilliSecTimer startTime(_env._component.getClock());
- fetchLocalData(bucket, reply.getLoadType(), diff, index,
- s.context);
- _env._metrics.merge_handler_metrics.mergeDataReadLatency.addValue(
- startTime.getElapsedTimeAsDouble());
+ fetchLocalData(bucket, reply.getLoadType(), diff, index, s.context);
+ _env._metrics.merge_handler_metrics.mergeDataReadLatency.addValue(startTime.getElapsedTimeAsDouble());
}
if (applyDiffHasLocallyNeededData(diff, index)) {
framework::MilliSecTimer startTime(_env._component.getClock());
- api::BucketInfo info(
- applyDiffLocally(bucket, reply.getLoadType(), diff,
- index, s.context));
- _env._metrics.merge_handler_metrics.mergeDataWriteLatency.addValue(
- startTime.getElapsedTimeAsDouble());
+ api::BucketInfo info(applyDiffLocally(bucket, reply.getLoadType(), diff, index, s.context));
+ _env._metrics.merge_handler_metrics.mergeDataWriteLatency.addValue(startTime.getElapsedTimeAsDouble());
} else {
LOG(spam, "Merge(%s): Didn't need fetched data on node %u (%u)",
bucket.toString().c_str(),
@@ -1464,8 +1417,7 @@ MergeHandler::handleApplyBucketDiffReply(api::ApplyBucketDiffReply& reply,
"Got reply indicating merge cycle did not fix any entries: %s",
reply.toString(true).c_str());
LOG(warning,
- "Merge state for which there was no progress across a "
- "full merge cycle: %s",
+ "Merge state for which there was no progress across a full merge cycle: %s",
s.toString().c_str());
}
@@ -1479,8 +1431,7 @@ MergeHandler::handleApplyBucketDiffReply(api::ApplyBucketDiffReply& reply,
// We have sent something on and shouldn't reply now.
clearState = false;
} else {
- _env._metrics.merge_handler_metrics.mergeLatencyTotal.addValue(
- s.startTime.getElapsedTimeAsDouble());
+ _env._metrics.merge_handler_metrics.mergeLatencyTotal.addValue(s.startTime.getElapsedTimeAsDouble());
}
}
} else {
@@ -1492,8 +1443,7 @@ MergeHandler::handleApplyBucketDiffReply(api::ApplyBucketDiffReply& reply,
} catch (std::exception& e) {
_env._fileStorHandler.clearMergeStatus(
bucket.getBucket(),
- api::ReturnCode(api::ReturnCode::INTERNAL_FAILURE,
- e.what()));
+ api::ReturnCode(api::ReturnCode::INTERNAL_FAILURE, e.what()));
throw;
}
diff --git a/storage/src/vespa/storage/persistence/persistencethread.cpp b/storage/src/vespa/storage/persistence/persistencethread.cpp
index 53e455ea204..2cdb6194b6d 100644
--- a/storage/src/vespa/storage/persistence/persistencethread.cpp
+++ b/storage/src/vespa/storage/persistence/persistencethread.cpp
@@ -286,14 +286,14 @@ PersistenceThread::handleGet(api::GetCommand& cmd, MessageTracker::UP tracker)
tracker->setMetric(metrics);
metrics.request_size.addValue(cmd.getApproxByteSize());
- document::FieldSet::UP fieldSet = document::FieldSetRepo::parse(*_env._component.getTypeRepo(), cmd.getFieldSet());
+ auto fieldSet = _env._component.getTypeRepo()->fieldSetRepo->getFieldSet(cmd.getFieldSet());
tracker->context().setReadConsistency(api_read_consistency_to_spi(cmd.internal_read_consistency()));
spi::GetResult result =
_spi.get(getBucket(cmd.getDocumentId(), cmd.getBucket()), *fieldSet, cmd.getDocumentId(), tracker->context());
if (tracker->checkForError(result)) {
if (!result.hasDocument() && (document::FieldSet::Type::NONE != fieldSet->getType())) {
- _env._metrics.get[cmd.getLoadType()].notFound.inc();
+ metrics.notFound.inc();
}
tracker->setReply(std::make_shared<api::GetReply>(cmd, result.getDocumentPtr(), result.getTimestamp(),
false, result.is_tombstone()));
@@ -455,11 +455,11 @@ MessageTracker::UP
PersistenceThread::handleCreateIterator(CreateIteratorCommand& cmd, MessageTracker::UP tracker)
{
tracker->setMetric(_env._metrics.createIterator);
- document::FieldSet::UP fieldSet = document::FieldSetRepo::parse(*_env._component.getTypeRepo(), cmd.getFields());
+ document::FieldSet::SP fieldSet = _env._component.getTypeRepo()->fieldSetRepo->getFieldSet(cmd.getFields());
tracker->context().setReadConsistency(cmd.getReadConsistency());
spi::CreateIteratorResult result(_spi.createIterator(
- spi::Bucket(cmd.getBucket(), spi::PartitionId(_env._partition)),
- *fieldSet, cmd.getSelection(), cmd.getIncludedVersions(), tracker->context()));
+ spi::Bucket(cmd.getBucket(), spi::PartitionId(_env._partition)),
+ std::move(fieldSet), cmd.getSelection(), cmd.getIncludedVersions(), tracker->context()));
if (tracker->checkForError(result)) {
tracker->setReply(std::make_shared<CreateIteratorReply>(cmd, spi::IteratorId(result.getIteratorId())));
}
diff --git a/storage/src/vespa/storage/persistence/persistenceutil.cpp b/storage/src/vespa/storage/persistence/persistenceutil.cpp
index 6605e3f6363..63ac5405fab 100644
--- a/storage/src/vespa/storage/persistence/persistenceutil.cpp
+++ b/storage/src/vespa/storage/persistence/persistenceutil.cpp
@@ -162,7 +162,7 @@ PersistenceUtil::PersistenceUtil(
_nodeIndex(_component.getIndex()),
_metrics(metrics),
_bucketFactory(_component.getBucketIdFactory()),
- _repo(_component.getTypeRepo()),
+ _repo(_component.getTypeRepo()->documentTypeRepo),
_spi(provider)
{
}
diff --git a/storage/src/vespa/storage/persistence/provider_error_wrapper.cpp b/storage/src/vespa/storage/persistence/provider_error_wrapper.cpp
index a5564282d17..0884d807eda 100644
--- a/storage/src/vespa/storage/persistence/provider_error_wrapper.cpp
+++ b/storage/src/vespa/storage/persistence/provider_error_wrapper.cpp
@@ -111,8 +111,8 @@ ProviderErrorWrapper::get(const spi::Bucket& bucket, const document::FieldSet& f
}
spi::CreateIteratorResult
-ProviderErrorWrapper::createIterator(const spi::Bucket& bucket, const document::FieldSet& fieldSet,
- const spi::Selection& selection, spi::IncludedVersions versions, spi::Context& context)
+ProviderErrorWrapper::createIterator(const spi::Bucket &bucket, FieldSetSP fieldSet, const spi::Selection &selection,
+ spi::IncludedVersions versions, spi::Context &context)
{
return checkResult(_impl.createIterator(bucket, fieldSet, selection, versions, context));
}
diff --git a/storage/src/vespa/storage/persistence/provider_error_wrapper.h b/storage/src/vespa/storage/persistence/provider_error_wrapper.h
index 602877e0b02..54abf0e96fb 100644
--- a/storage/src/vespa/storage/persistence/provider_error_wrapper.h
+++ b/storage/src/vespa/storage/persistence/provider_error_wrapper.h
@@ -52,8 +52,9 @@ public:
spi::RemoveResult removeIfFound(const spi::Bucket&, spi::Timestamp, const document::DocumentId&, spi::Context&) override;
spi::UpdateResult update(const spi::Bucket&, spi::Timestamp, spi::DocumentUpdateSP, spi::Context&) override;
spi::GetResult get(const spi::Bucket&, const document::FieldSet&, const document::DocumentId&, spi::Context&) const override;
- spi::CreateIteratorResult createIterator(const spi::Bucket&, const document::FieldSet&, const spi::Selection&,
- spi::IncludedVersions versions, spi::Context&) override;
+ spi::CreateIteratorResult
+ createIterator(const spi::Bucket &bucket, FieldSetSP, const spi::Selection &, spi::IncludedVersions versions,
+ spi::Context &context) override;
spi::IterateResult iterate(spi::IteratorId, uint64_t maxByteSize, spi::Context&) const override;
spi::Result destroyIterator(spi::IteratorId, spi::Context&) override;
spi::Result createBucket(const spi::Bucket&, spi::Context&) override;
diff --git a/storage/src/vespa/storage/persistence/testandsethelper.cpp b/storage/src/vespa/storage/persistence/testandsethelper.cpp
index 9232abc5c8a..57586249817 100644
--- a/storage/src/vespa/storage/persistence/testandsethelper.cpp
+++ b/storage/src/vespa/storage/persistence/testandsethelper.cpp
@@ -1,8 +1,8 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
// @author Vegard Sjonfjell
-#include <vespa/storage/persistence/fieldvisitor.h>
-#include <vespa/storage/persistence/testandsethelper.h>
+#include "fieldvisitor.h"
+#include "testandsethelper.h"
#include <vespa/document/select/parser.h>
#include <vespa/document/repo/documenttyperepo.h>
#include <vespa/vespalib/util/stringfmt.h>
@@ -11,19 +11,19 @@ using namespace std::string_literals;
namespace storage {
-void TestAndSetHelper::getDocumentType() {
+void TestAndSetHelper::getDocumentType(const document::DocumentTypeRepo & documentTypeRepo) {
if (!_docId.hasDocType()) {
throw TestAndSetException(api::ReturnCode(api::ReturnCode::ILLEGAL_PARAMETERS, "Document id has no doctype"));
}
- _docTypePtr = _component.getTypeRepo()->getDocumentType(_docId.getDocType());
+ _docTypePtr = documentTypeRepo.getDocumentType(_docId.getDocType());
if (_docTypePtr == nullptr) {
throw TestAndSetException(api::ReturnCode(api::ReturnCode::ILLEGAL_PARAMETERS, "Document type does not exist"));
}
}
-void TestAndSetHelper::parseDocumentSelection() {
- document::select::Parser parser(*_component.getTypeRepo(), _component.getBucketIdFactory());
+void TestAndSetHelper::parseDocumentSelection(const document::DocumentTypeRepo & documentTypeRepo) {
+ document::select::Parser parser(documentTypeRepo, _component.getBucketIdFactory());
try {
_docSelectionUp = parser.parse(_cmd.getCondition().getSelection());
@@ -49,8 +49,9 @@ TestAndSetHelper::TestAndSetHelper(PersistenceThread & thread, const api::TestAn
_docTypePtr(nullptr),
_missingDocumentImpliesMatch(missingDocumentImpliesMatch)
{
- getDocumentType();
- parseDocumentSelection();
+ auto docTypeRepo = _component.getTypeRepo()->documentTypeRepo;
+ getDocumentType(*docTypeRepo);
+ parseDocumentSelection(*docTypeRepo);
}
TestAndSetHelper::~TestAndSetHelper() = default;
diff --git a/storage/src/vespa/storage/persistence/testandsethelper.h b/storage/src/vespa/storage/persistence/testandsethelper.h
index b5fa29d0106..b528b5034f9 100644
--- a/storage/src/vespa/storage/persistence/testandsethelper.h
+++ b/storage/src/vespa/storage/persistence/testandsethelper.h
@@ -28,8 +28,8 @@ class TestAndSetHelper {
std::unique_ptr<document::select::Node> _docSelectionUp;
bool _missingDocumentImpliesMatch;
- void getDocumentType();
- void parseDocumentSelection();
+ void getDocumentType(const document::DocumentTypeRepo & documentTypeRepo);
+ void parseDocumentSelection(const document::DocumentTypeRepo & documentTypeRepo);
spi::GetResult retrieveDocument(const document::FieldSet & fieldSet, spi::Context & context);
public:
diff --git a/storage/src/vespa/storage/storageserver/communicationmanager.cpp b/storage/src/vespa/storage/storageserver/communicationmanager.cpp
index c0adb01ad47..b51394e2e64 100644
--- a/storage/src/vespa/storage/storageserver/communicationmanager.cpp
+++ b/storage/src/vespa/storage/storageserver/communicationmanager.cpp
@@ -395,10 +395,12 @@ void CommunicationManager::configure(std::unique_ptr<CommunicationManagerConfig>
// Configure messagebus here as we for legacy reasons have
// config here.
+ auto documentTypeRepo = _component.getTypeRepo()->documentTypeRepo;
+ auto loadTypes = _component.getLoadTypes();
_mbus = std::make_unique<mbus::RPCMessageBus>(
mbus::ProtocolSet()
- .add(std::make_shared<documentapi::DocumentProtocol>(*_component.getLoadTypes(), _component.getTypeRepo()))
- .add(std::make_shared<mbusprot::StorageProtocol>(_component.getTypeRepo(), *_component.getLoadTypes())),
+ .add(std::make_shared<documentapi::DocumentProtocol>(*loadTypes, documentTypeRepo))
+ .add(std::make_shared<mbusprot::StorageProtocol>(documentTypeRepo, *loadTypes)),
params,
_configUri);
diff --git a/storage/src/vespa/storage/visiting/visitorthread.cpp b/storage/src/vespa/storage/visiting/visitorthread.cpp
index c6e75735690..73f4a70d80d 100644
--- a/storage/src/vespa/storage/visiting/visitorthread.cpp
+++ b/storage/src/vespa/storage/visiting/visitorthread.cpp
@@ -31,7 +31,7 @@ VisitorThread::Event::Event(Event&& other)
{
}
-VisitorThread::Event::~Event() {}
+VisitorThread::Event::~Event() = default;
VisitorThread::Event&
VisitorThread::Event::operator= (Event&& other)
@@ -44,9 +44,7 @@ VisitorThread::Event::operator= (Event&& other)
return *this;
}
-VisitorThread::Event::Event(
- api::VisitorId visitor,
- const std::shared_ptr<api::StorageMessage>& msg)
+VisitorThread::Event::Event(api::VisitorId visitor, const std::shared_ptr<api::StorageMessage>& msg)
: _visitorId(visitor),
_message(msg),
_timer(),
@@ -54,9 +52,7 @@ VisitorThread::Event::Event(
{
}
-VisitorThread::Event::Event(
- api::VisitorId visitor,
- mbus::Reply::UP reply)
+VisitorThread::Event::Event(api::VisitorId visitor, mbus::Reply::UP reply)
: _visitorId(visitor),
_mbusReply(std::move(reply)),
_timer(),
@@ -331,7 +327,7 @@ VisitorThread::handleNonExistingVisitorCall(const Event& entry,
ReturnCode& code)
{
// Get current time. Set the time that is the oldest still recent.
- framework::SecondTime currentTime(_component.getClock().getTimeInSeconds());;
+ framework::SecondTime currentTime(_component.getClock().getTimeInSeconds());
trimRecentlyCompletedList(currentTime);
// Go through all recent visitors. Ignore request if recent
@@ -435,8 +431,7 @@ VisitorThread::onCreateVisitor(
do {
// If no buckets are specified, fail command
if (cmd->getBuckets().empty()) {
- result = ReturnCode(ReturnCode::ILLEGAL_PARAMETERS,
- "No buckets specified");
+ result = ReturnCode(ReturnCode::ILLEGAL_PARAMETERS, "No buckets specified");
LOG(warning, "CreateVisitor(%s): No buckets specified. Aborting.",
cmd->getInstanceId().c_str());
break;
@@ -480,7 +475,7 @@ VisitorThread::onCreateVisitor(
// Parse document selection
try{
if (!cmd->getDocumentSelection().empty()) {
- std::shared_ptr<const document::DocumentTypeRepo> repo(_component.getTypeRepo());
+ std::shared_ptr<const document::DocumentTypeRepo> repo(_component.getTypeRepo()->documentTypeRepo);
const document::BucketIdFactory& idFactory(_component.getBucketIdFactory());
document::select::Parser parser(*repo, idFactory);
docSelection = parser.parse(cmd->getDocumentSelection());
diff --git a/storageserver/src/vespa/storageserver/app/dummyservicelayerprocess.cpp b/storageserver/src/vespa/storageserver/app/dummyservicelayerprocess.cpp
index b642fa9dc8e..338ae45714e 100644
--- a/storageserver/src/vespa/storageserver/app/dummyservicelayerprocess.cpp
+++ b/storageserver/src/vespa/storageserver/app/dummyservicelayerprocess.cpp
@@ -15,13 +15,13 @@ void
DummyServiceLayerProcess::shutdown()
{
ServiceLayerProcess::shutdown();
- _provider.reset(0);
+ _provider.reset();
}
void
DummyServiceLayerProcess::setupProvider()
{
- _provider.reset(new spi::dummy::DummyPersistence(getTypeRepo()));
+ _provider = std::make_unique<spi::dummy::DummyPersistence>(getTypeRepo());
}
} // storage
diff --git a/storageserver/src/vespa/storageserver/app/dummyservicelayerprocess.h b/storageserver/src/vespa/storageserver/app/dummyservicelayerprocess.h
index 6eb774a131d..dc4e3286d0e 100644
--- a/storageserver/src/vespa/storageserver/app/dummyservicelayerprocess.h
+++ b/storageserver/src/vespa/storageserver/app/dummyservicelayerprocess.h
@@ -12,7 +12,7 @@
namespace storage {
class DummyServiceLayerProcess : public ServiceLayerProcess {
- spi::dummy::DummyPersistence::UP _provider;
+ std::unique_ptr<spi::PersistenceProvider> _provider;
public:
DummyServiceLayerProcess(const config::ConfigUri & configUri);
diff --git a/streamingvisitors/src/tests/searchvisitor/searchvisitor_test.cpp b/streamingvisitors/src/tests/searchvisitor/searchvisitor_test.cpp
index 18cd7fab2b8..216e02c5edd 100644
--- a/streamingvisitors/src/tests/searchvisitor/searchvisitor_test.cpp
+++ b/streamingvisitors/src/tests/searchvisitor/searchvisitor_test.cpp
@@ -35,7 +35,7 @@ private:
public:
SearchVisitorTest();
- ~SearchVisitorTest();
+ ~SearchVisitorTest() override;
int Main() override;
};
@@ -46,9 +46,9 @@ SearchVisitorTest::SearchVisitorTest() :
{
_componentRegister.setNodeInfo("mycluster", lib::NodeType::STORAGE, 1);
_componentRegister.setClock(_clock);
- StorageComponent::DocumentTypeRepoSP repo(new DocumentTypeRepo(readDocumenttypesConfig(TEST_PATH("cfg/documenttypes.cfg"))));
+ auto repo = std::make_shared<DocumentTypeRepo>(readDocumenttypesConfig(TEST_PATH("cfg/documenttypes.cfg")));
_componentRegister.setDocumentTypeRepo(repo);
- _component.reset(new StorageComponent(_componentRegister, "storage"));
+ _component = std::make_unique<StorageComponent>(_componentRegister, "storage");
}
SearchVisitorTest::~SearchVisitorTest() = default;
@@ -80,8 +80,8 @@ SearchVisitorTest::testCreateSearchVisitor(const vespalib::string & dir, const v
void
SearchVisitorTest::testSearchEnvironment()
{
- EXPECT_TRUE(_env.getVSMAdapter("simple") != NULL);
- EXPECT_TRUE(_env.getRankManager("simple") != NULL);
+ EXPECT_TRUE(_env.getVSMAdapter("simple") != nullptr);
+ EXPECT_TRUE(_env.getRankManager("simple") != nullptr);
}
void
diff --git a/streamingvisitors/src/vespa/searchvisitor/searchvisitor.cpp b/streamingvisitors/src/vespa/searchvisitor/searchvisitor.cpp
index 76ef0f23dd2..cc28c76acce 100644
--- a/streamingvisitors/src/vespa/searchvisitor/searchvisitor.cpp
+++ b/streamingvisitors/src/vespa/searchvisitor/searchvisitor.cpp
@@ -677,7 +677,7 @@ SearchVisitor::setupScratchDocument(const StringFieldIdTMap & fieldsInQuery)
}
// Init based on default document type and mapping from field name to field id
_docTypeMapping.init(_fieldSearchSpecMap.documentTypeMap().begin()->first,
- _fieldsUnion, *_component.getTypeRepo());
+ _fieldsUnion, *_component.getTypeRepo()->documentTypeRepo);
_docTypeMapping.prepareBaseDoc(_fieldPathMap);
}