summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHenning Baldersheim <balder@yahoo-inc.com>2019-08-16 14:48:11 +0000
committerHenning Baldersheim <balder@yahoo-inc.com>2019-08-16 17:22:45 +0000
commit224b5fbe12ecab75beee6efe24068a9ce7092110 (patch)
tree3e8c0dd797c583b032b960488b2bf3436ccbd2b9
parent5fb8e66dbd2d6e02a64a054e147ac7214943d563 (diff)
doc: -> id:
-rw-r--r--documentapi/src/tests/policyfactory/policyfactory.cpp2
-rw-r--r--persistence/src/tests/dummyimpl/dummypersistence_test.cpp22
-rw-r--r--searchcore/src/tests/proton/attribute/attribute_test.cpp34
-rw-r--r--searchcore/src/tests/proton/common/cachedselect_test.cpp22
-rw-r--r--searchcore/src/tests/proton/common/selectpruner_test.cpp67
-rw-r--r--searchcore/src/tests/proton/docsummary/docsummary.cpp59
-rw-r--r--searchcore/src/tests/proton/docsummary/summaryfieldconverter_test.cpp12
-rw-r--r--searchcore/src/tests/proton/document_iterator/document_iterator_test.cpp270
-rw-r--r--searchcore/src/tests/proton/documentdb/feedhandler/feedhandler_test.cpp8
-rw-r--r--searchcore/src/tests/proton/documentdb/feedview/feedview_test.cpp14
-rw-r--r--searchcore/src/tests/proton/documentmetastore/documentmetastore_test.cpp4
-rw-r--r--searchcore/src/tests/proton/feed_and_search/feed_and_search.cpp2
-rw-r--r--searchcore/src/tests/proton/feedoperation/feedoperation_test.cpp6
-rw-r--r--searchcore/src/tests/proton/index/fusionrunner_test.cpp2
-rw-r--r--searchcore/src/tests/proton/index/index_writer/index_writer_test.cpp2
-rw-r--r--searchcore/src/tests/proton/index/indexmanager_test.cpp2
-rw-r--r--searchcore/src/tests/proton/matching/matching_test.cpp46
-rw-r--r--searchcore/src/tests/proton/server/documentretriever_test.cpp9
-rw-r--r--searchcore/src/tests/proton/server/feeddebugger_test.cpp10
-rw-r--r--searchcore/src/tests/proton/server/feedstates_test.cpp2
-rw-r--r--searchcore/src/vespa/searchcore/proton/common/feeddebugger.h2
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/documentretrieverbase.cpp14
-rw-r--r--searchlib/src/tests/diskindex/fusion/fusion_test.cpp6
-rw-r--r--searchlib/src/tests/engine/transportserver/transportserver_test.cpp4
-rw-r--r--searchlib/src/tests/index/docbuilder/docbuilder_test.cpp16
-rw-r--r--searchlib/src/tests/memoryindex/document_inverter/document_inverter_test.cpp12
-rw-r--r--searchlib/src/tests/memoryindex/field_index/field_index_test.cpp18
-rw-r--r--searchlib/src/tests/memoryindex/field_inverter/field_inverter_test.cpp16
-rw-r--r--searchlib/src/tests/memoryindex/memory_index/memory_index_test.cpp2
-rw-r--r--searchlib/src/tests/memoryindex/url_field_inverter/url_field_inverter_test.cpp8
-rw-r--r--storage/src/tests/distributor/pendingmessagetrackertest.cpp2
-rw-r--r--storage/src/tests/distributor/putoperationtest.cpp114
-rw-r--r--storage/src/tests/distributor/removeoperationtest.cpp29
-rw-r--r--storage/src/tests/persistence/filestorage/filestormanagertest.cpp4
-rw-r--r--storage/src/tests/storageserver/bouncertest.cpp4
-rw-r--r--storage/src/tests/storageserver/communicationmanagertest.cpp6
-rw-r--r--streamingvisitors/src/tests/hitcollector/hitcollector.cpp2
37 files changed, 404 insertions, 450 deletions
diff --git a/documentapi/src/tests/policyfactory/policyfactory.cpp b/documentapi/src/tests/policyfactory/policyfactory.cpp
index 41905183928..877ade22e2a 100644
--- a/documentapi/src/tests/policyfactory/policyfactory.cpp
+++ b/documentapi/src/tests/policyfactory/policyfactory.cpp
@@ -60,7 +60,7 @@ MyFactory::createPolicy(const string &param) const
mbus::Message::UP
createMessage()
{
- auto ret = std::make_unique<RemoveDocumentMessage>(document::DocumentId("doc:scheme:"));
+ auto ret = std::make_unique<RemoveDocumentMessage>(document::DocumentId("id:ns:type::"));
ret->getTrace().setLevel(9);
return ret;
}
diff --git a/persistence/src/tests/dummyimpl/dummypersistence_test.cpp b/persistence/src/tests/dummyimpl/dummypersistence_test.cpp
index 637b9ef512a..4ca78181fb8 100644
--- a/persistence/src/tests/dummyimpl/dummypersistence_test.cpp
+++ b/persistence/src/tests/dummyimpl/dummypersistence_test.cpp
@@ -25,9 +25,9 @@ struct Fixture {
}
Fixture() {
- insert(DocumentId("doc:test:3"), Timestamp(3), NONE);
- insert(DocumentId("doc:test:1"), Timestamp(1), NONE);
- insert(DocumentId("doc:test:2"), Timestamp(2), NONE);
+ insert(DocumentId("id:ns:type::test:3"), Timestamp(3), NONE);
+ insert(DocumentId("id:ns:type::test:1"), Timestamp(1), NONE);
+ insert(DocumentId("id:ns:type::test:2"), Timestamp(2), NONE);
}
};
@@ -35,21 +35,21 @@ TEST("require that empty BucketContent behaves") {
BucketContent content;
EXPECT_FALSE(content.hasTimestamp(Timestamp(1)));
EXPECT_FALSE(content.getEntry(Timestamp(1)).get());
- EXPECT_FALSE(content.getEntry(DocumentId("doc:test:1")).get());
+ EXPECT_FALSE(content.getEntry(DocumentId("id:ns:type::test:1")).get());
}
TEST_F("require that BucketContent can retrieve by timestamp", Fixture) {
DocEntry::SP entry = f.content.getEntry(Timestamp(1));
ASSERT_TRUE(entry.get());
ASSERT_TRUE(entry->getDocumentId());
- ASSERT_EQUAL("doc:test:1", entry->getDocumentId()->toString());
+ ASSERT_EQUAL("id:ns:type::test:1", entry->getDocumentId()->toString());
}
TEST_F("require that BucketContent can retrieve by doc id", Fixture) {
- DocEntry::SP entry = f.content.getEntry(DocumentId("doc:test:2"));
+ DocEntry::SP entry = f.content.getEntry(DocumentId("id:ns:type::test:2"));
ASSERT_TRUE(entry.get());
ASSERT_TRUE(entry->getDocumentId());
- ASSERT_EQUAL("doc:test:2", entry->getDocumentId()->toString());
+ ASSERT_EQUAL("id:ns:type::test:2", entry->getDocumentId()->toString());
}
TEST_F("require that BucketContent can check a timestamp", Fixture) {
@@ -64,13 +64,13 @@ TEST_F("require that BucketContent can provide bucket info", Fixture) {
uint32_t lastChecksum = 0;
EXPECT_NOT_EQUAL(lastChecksum, f.content.getBucketInfo().getChecksum());
lastChecksum = f.content.getBucketInfo().getChecksum();
- f.insert(DocumentId("doc:test:3"), Timestamp(4), NONE);
+ f.insert(DocumentId("id:ns:type::test:3"), Timestamp(4), NONE);
EXPECT_NOT_EQUAL(lastChecksum, f.content.getBucketInfo().getChecksum());
lastChecksum = f.content.getBucketInfo().getChecksum();
- f.insert(DocumentId("doc:test:2"), Timestamp(5), REMOVE_ENTRY);
+ f.insert(DocumentId("id:ns:type::test:2"), Timestamp(5), REMOVE_ENTRY);
EXPECT_NOT_EQUAL(lastChecksum, f.content.getBucketInfo().getChecksum());
- f.insert(DocumentId("doc:test:1"), Timestamp(6), REMOVE_ENTRY);
- f.insert(DocumentId("doc:test:3"), Timestamp(7), REMOVE_ENTRY);
+ f.insert(DocumentId("id:ns:type::test:1"), Timestamp(6), REMOVE_ENTRY);
+ f.insert(DocumentId("id:ns:type::test:3"), Timestamp(7), REMOVE_ENTRY);
EXPECT_EQUAL(0u, f.content.getBucketInfo().getChecksum());
}
diff --git a/searchcore/src/tests/proton/attribute/attribute_test.cpp b/searchcore/src/tests/proton/attribute/attribute_test.cpp
index 3734d2fe1dc..3da27cde10e 100644
--- a/searchcore/src/tests/proton/attribute/attribute_test.cpp
+++ b/searchcore/src/tests/proton/attribute/attribute_test.cpp
@@ -204,7 +204,7 @@ TEST_F("require that attribute writer handles put", Fixture)
attribute::ConstCharContent sbuf;
{ // empty document should give default values
EXPECT_EQUAL(1u, a1->getNumDocs());
- f.put(1, *idb.startDocument("doc::1").endDocument(), 1);
+ f.put(1, *idb.startDocument("id:ns:searchdocument::1").endDocument(), 1);
EXPECT_EQUAL(2u, a1->getNumDocs());
EXPECT_EQUAL(2u, a2->getNumDocs());
EXPECT_EQUAL(2u, a3->getNumDocs());
@@ -226,7 +226,7 @@ TEST_F("require that attribute writer handles put", Fixture)
EXPECT_EQUAL(strcmp("", sbuf[0]), 0);
}
{ // document with single value & multi value attribute
- Document::UP doc = idb.startDocument("doc::2").
+ Document::UP doc = idb.startDocument("id:ns:searchdocument::2").
startAttributeField("a1").addInt(10).endField().
startAttributeField("a2").startElement().addInt(20).endElement().
startElement().addInt(30).endElement().endField().endDocument();
@@ -246,7 +246,7 @@ TEST_F("require that attribute writer handles put", Fixture)
EXPECT_EQUAL(30u, ibuf[1]);
}
{ // replace existing document
- Document::UP doc = idb.startDocument("doc::2").
+ Document::UP doc = idb.startDocument("id:ns:searchdocument::2").
startAttributeField("a1").addInt(100).endField().
startAttributeField("a2").startElement().addInt(200).endElement().
startElement().addInt(300).endElement().
@@ -281,7 +281,7 @@ TEST_F("require that attribute writer handles predicate put", Fixture)
// empty document should give default values
EXPECT_EQUAL(1u, a1->getNumDocs());
- f.put(1, *idb.startDocument("doc::1").endDocument(), 1);
+ f.put(1, *idb.startDocument("id:ns:searchdocument::1").endDocument(), 1);
EXPECT_EQUAL(2u, a1->getNumDocs());
EXPECT_EQUAL(1u, a1->getStatus().getLastSyncToken());
EXPECT_EQUAL(0u, index.getZeroConstraintDocs().size());
@@ -289,7 +289,7 @@ TEST_F("require that attribute writer handles predicate put", Fixture)
// document with single value attribute
PredicateSlimeBuilder builder;
Document::UP doc =
- idb.startDocument("doc::2").startAttributeField("a1")
+ idb.startDocument("id:ns:searchdocument::2").startAttributeField("a1")
.addPredicate(builder.true_predicate().build())
.endField().endDocument();
f.put(2, *doc, 2);
@@ -301,7 +301,7 @@ TEST_F("require that attribute writer handles predicate put", Fixture)
EXPECT_FALSE(it.valid());
// replace existing document
- doc = idb.startDocument("doc::2").startAttributeField("a1")
+ doc = idb.startDocument("id:ns:searchdocument::2").startAttributeField("a1")
.addPredicate(builder.feature("foo").value("bar").build())
.endField().endDocument();
f.put(3, *doc, 2);
@@ -374,7 +374,7 @@ TEST_F("require that visibilitydelay is honoured", Fixture)
DocBuilder idb(s);
EXPECT_EQUAL(1u, a1->getNumDocs());
EXPECT_EQUAL(0u, a1->getStatus().getLastSyncToken());
- Document::UP doc = idb.startDocument("doc::1")
+ Document::UP doc = idb.startDocument("id:ns:searchdocument::1")
.startAttributeField("a1").addStr("10").endField()
.endDocument();
f.put(3, *doc, 1);
@@ -398,11 +398,11 @@ TEST_F("require that visibilitydelay is honoured", Fixture)
EXPECT_EQUAL(8u, a1->getStatus().getLastSyncToken());
verifyAttributeContent(*a1, 2, "10");
- awDelayed.put(9, *idb.startDocument("doc::1").startAttributeField("a1").addStr("11").endField().endDocument(),
+ awDelayed.put(9, *idb.startDocument("id:ns:searchdocument::1").startAttributeField("a1").addStr("11").endField().endDocument(),
2, false, emptyCallback);
- awDelayed.put(10, *idb.startDocument("doc::1").startAttributeField("a1").addStr("20").endField().endDocument(),
+ awDelayed.put(10, *idb.startDocument("id:ns:searchdocument::1").startAttributeField("a1").addStr("20").endField().endDocument(),
2, false, emptyCallback);
- awDelayed.put(11, *idb.startDocument("doc::1").startAttributeField("a1").addStr("30").endField().endDocument(),
+ awDelayed.put(11, *idb.startDocument("id:ns:searchdocument::1").startAttributeField("a1").addStr("30").endField().endDocument(),
2, false, emptyCallback);
EXPECT_EQUAL(8u, a1->getStatus().getLastSyncToken());
verifyAttributeContent(*a1, 2, "10");
@@ -422,7 +422,7 @@ TEST_F("require that attribute writer handles predicate remove", Fixture)
DocBuilder idb(s);
PredicateSlimeBuilder builder;
Document::UP doc =
- idb.startDocument("doc::1").startAttributeField("a1")
+ idb.startDocument("id:ns:searchdocument::1").startAttributeField("a1")
.addPredicate(builder.true_predicate().build())
.endField().endDocument();
f.put(1, *doc, 1);
@@ -447,7 +447,7 @@ TEST_F("require that attribute writer handles update", Fixture)
schema.addAttributeField(Schema::AttributeField("a2", schema::DataType::INT32, CollectionType::SINGLE));
DocBuilder idb(schema);
const document::DocumentType &dt(idb.getDocumentType());
- DocumentUpdate upd(*idb.getDocumentTypeRepo(), dt, DocumentId("doc::1"));
+ DocumentUpdate upd(*idb.getDocumentTypeRepo(), dt, DocumentId("id:ns:searchdocument::1"));
upd.addUpdate(FieldUpdate(upd.getType().getField("a1"))
.addUpdate(ArithmeticValueUpdate(ArithmeticValueUpdate::Add, 5)));
upd.addUpdate(FieldUpdate(upd.getType().getField("a2"))
@@ -484,14 +484,14 @@ TEST_F("require that attribute writer handles predicate update", Fixture)
DocBuilder idb(schema);
PredicateSlimeBuilder builder;
Document::UP doc =
- idb.startDocument("doc::1").startAttributeField("a1")
+ idb.startDocument("id:ns:searchdocument::1").startAttributeField("a1")
.addPredicate(builder.true_predicate().build())
.endField().endDocument();
f.put(1, *doc, 1);
EXPECT_EQUAL(2u, a1->getNumDocs());
const document::DocumentType &dt(idb.getDocumentType());
- DocumentUpdate upd(*idb.getDocumentTypeRepo(), dt, DocumentId("doc::1"));
+ DocumentUpdate upd(*idb.getDocumentTypeRepo(), dt, DocumentId("id:ns:searchdocument::1"));
PredicateFieldValue new_value(builder.feature("foo").value("bar").build());
upd.addUpdate(FieldUpdate(upd.getType().getField("a1"))
.addUpdate(AssignValueUpdate(new_value)));
@@ -633,7 +633,7 @@ createTensorSchema() {
Document::UP
createTensorPutDoc(DocBuilder &builder, const Tensor &tensor) {
- return builder.startDocument("doc::1").
+ return builder.startDocument("id:ns:searchdocument::1").
startAttributeField("a1").
addTensor(tensor.clone()).endField().endDocument();
}
@@ -678,7 +678,7 @@ TEST_F("require that attribute writer handles tensor assign update", Fixture)
EXPECT_TRUE(tensor->equals(*tensor2));
const document::DocumentType &dt(builder.getDocumentType());
- DocumentUpdate upd(*builder.getDocumentTypeRepo(), dt, DocumentId("doc::1"));
+ DocumentUpdate upd(*builder.getDocumentTypeRepo(), dt, DocumentId("id:ns:searchdocument::1"));
auto new_tensor = make_tensor(TensorSpec("tensor(x{},y{})")
.add({{"x", "8"}, {"y", "9"}}, 11));
TensorDataType xySparseTensorDataType(vespalib::eval::ValueType::from_spec("tensor(x{},y{})"));
@@ -728,7 +728,7 @@ putAttributes(Fixture &f, std::vector<uint32_t> expExecuteHistory)
EXPECT_EQUAL(1u, a1->getNumDocs());
EXPECT_EQUAL(1u, a2->getNumDocs());
EXPECT_EQUAL(1u, a3->getNumDocs());
- f.put(1, *idb.startDocument("doc::1").
+ f.put(1, *idb.startDocument("id:ns:searchdocument::1").
startAttributeField("a1").addInt(10).endField().
startAttributeField("a2").addInt(15).endField().
startAttributeField("a3").addInt(20).endField().
diff --git a/searchcore/src/tests/proton/common/cachedselect_test.cpp b/searchcore/src/tests/proton/common/cachedselect_test.cpp
index dcba8fda1c6..df414439bce 100644
--- a/searchcore/src/tests/proton/common/cachedselect_test.cpp
+++ b/searchcore/src/tests/proton/common/cachedselect_test.cpp
@@ -466,10 +466,10 @@ TEST_F("Test that basic select works", TestFixture)
{
MyDB &db(*f._db);
- db.addDoc(1u, "doc:test:1", "hello", "null", 45, 37);
- db.addDoc(2u, "doc:test:2", "gotcha", "foo", 3, 25);
- db.addDoc(3u, "doc:test:3", "gotcha", "foo", noIntVal, noIntVal);
- db.addDoc(4u, "doc:test:4", "null", "foo", noIntVal, noIntVal);
+ db.addDoc(1u, "id:ns:test::1", "hello", "null", 45, 37);
+ db.addDoc(2u, "id:ns:test::2", "gotcha", "foo", 3, 25);
+ db.addDoc(3u, "id:ns:test::3", "gotcha", "foo", noIntVal, noIntVal);
+ db.addDoc(4u, "id:ns:test::4", "null", "foo", noIntVal, noIntVal);
CachedSelect::SP cs;
@@ -566,9 +566,9 @@ struct PreDocSelectFixture : public TestFixture {
PreDocSelectFixture()
: TestFixture()
{
- db().addDoc(1u, "doc:test:1", "foo", "null", 3, 5);
- db().addDoc(2u, "doc:test:1", "bar", "null", 3, 5);
- db().addDoc(3u, "doc:test:2", "foo", "null", 7, 5);
+ db().addDoc(1u, "id:ns:test::1", "foo", "null", 3, 5);
+ db().addDoc(2u, "id:ns:test::1", "bar", "null", 3, 5);
+ db().addDoc(3u, "id:ns:test::2", "foo", "null", 7, 5);
}
};
@@ -602,10 +602,10 @@ TEST_F("Test performance when using attributes", TestFixture)
{
MyDB &db(*f._db);
- db.addDoc(1u, "doc:test:1", "hello", "null", 45, 37);
- db.addDoc(2u, "doc:test:2", "gotcha", "foo", 3, 25);
- db.addDoc(3u, "doc:test:3", "gotcha", "foo", noIntVal, noIntVal);
- db.addDoc(4u, "doc:test:4", "null", "foo", noIntVal, noIntVal);
+ db.addDoc(1u, "id:ns:test::1", "hello", "null", 45, 37);
+ db.addDoc(2u, "id:ns:test::2", "gotcha", "foo", 3, 25);
+ db.addDoc(3u, "id:ns:test::3", "gotcha", "foo", noIntVal, noIntVal);
+ db.addDoc(4u, "id:ns:test::4", "null", "foo", noIntVal, noIntVal);
CachedSelect::SP cs;
cs = f.testParse("test.aa < 45", "test");
diff --git a/searchcore/src/tests/proton/common/selectpruner_test.cpp b/searchcore/src/tests/proton/common/selectpruner_test.cpp
index a7feb865d96..5b1fa3ed4bf 100644
--- a/searchcore/src/tests/proton/common/selectpruner_test.cpp
+++ b/searchcore/src/tests/proton/common/selectpruner_test.cpp
@@ -36,8 +36,7 @@ using search::AttributeFactory;
typedef Node::UP NodeUP;
-namespace
-{
+namespace {
const int32_t doc_type_id = 787121340;
const string type_name = "test";
@@ -57,9 +56,6 @@ const string invalid_name("test_2.ac > 3999");
const string invalid2_name("test_2.ac > 4999");
const string empty("");
-const document::DocumentId docId("doc:test:1");
-
-
std::unique_ptr<const DocumentTypeRepo>
makeDocTypeRepo()
{
@@ -135,23 +131,12 @@ public:
bool _hasDocuments;
TestFixture();
-
~TestFixture();
- void
- testParse(const string &selection);
-
- void
- testParseFail(const string &selection);
-
- void
- testPrune(const string &selection,
- const string &exp);
-
- void
- testPrune(const string &selection,
- const string &exp,
- const string &docTypeName);
+ void testParse(const string &selection);
+ void testParseFail(const string &selection);
+ void testPrune(const string &selection, const string &exp);
+ void testPrune(const string &selection, const string &exp, const string &docTypeName);
};
@@ -169,28 +154,22 @@ TestFixture::TestFixture()
}
-TestFixture::~TestFixture()
-{
-}
+TestFixture::~TestFixture() = default;
void
TestFixture::testParse(const string &selection)
{
const DocumentTypeRepo &repo(*_repoUP);
- document::select::Parser parser(repo,
- document::BucketIdFactory());
+ document::select::Parser parser(repo,document::BucketIdFactory());
NodeUP select;
try {
- LOG(info,
- "Trying to parse '%s'",
- selection.c_str());
+ LOG(info, "Trying to parse '%s'", selection.c_str());
select = parser.parse(selection);
} catch (document::select::ParsingFailedException &e) {
- LOG(info,
- "Parse failed: %s", e.what());
+ LOG(info, "Parse failed: %s", e.what());
select.reset(0);
}
ASSERT_TRUE(select.get() != NULL);
@@ -201,20 +180,15 @@ void
TestFixture::testParseFail(const string &selection)
{
const DocumentTypeRepo &repo(*_repoUP);
- document::select::Parser parser(repo,
- document::BucketIdFactory());
+ document::select::Parser parser(repo,document::BucketIdFactory());
NodeUP select;
try {
- LOG(info,
- "Trying to parse '%s'",
- selection.c_str());
+ LOG(info, "Trying to parse '%s'", selection.c_str());
select = parser.parse(selection);
} catch (document::select::ParsingFailedException &e) {
- LOG(info,
- "Parse failed: %s",
- e.getMessage().c_str());
+ LOG(info, "Parse failed: %s", e.getMessage().c_str());
select.reset(0);
}
ASSERT_TRUE(select.get() == NULL);
@@ -222,25 +196,18 @@ TestFixture::testParseFail(const string &selection)
void
-TestFixture::testPrune(const string &selection,
- const string &exp,
- const string &docTypeName)
+TestFixture::testPrune(const string &selection, const string &exp, const string &docTypeName)
{
const DocumentTypeRepo &repo(*_repoUP);
- document::select::Parser parser(repo,
- document::BucketIdFactory());
+ document::select::Parser parser(repo,document::BucketIdFactory());
NodeUP select;
try {
- LOG(info,
- "Trying to parse '%s' with docType=%s",
- selection.c_str(),
- docTypeName.c_str());
+ LOG(info, "Trying to parse '%s' with docType=%s", selection.c_str(), docTypeName.c_str());
select = parser.parse(selection);
} catch (document::select::ParsingFailedException &e) {
- LOG(info,
- "Parse failed: %s", e.what());
+ LOG(info, "Parse failed: %s", e.what());
select.reset(0);
}
ASSERT_TRUE(select.get() != NULL);
@@ -249,7 +216,7 @@ TestFixture::testPrune(const string &selection,
LOG(info, "ParseTree: '%s'", os.str().c_str());
const DocumentType *docType = repo.getDocumentType(docTypeName);
ASSERT_TRUE(docType != NULL);
- Document::UP emptyDoc(new Document(*docType, docId));
+ Document::UP emptyDoc(new Document(*docType, document::DocumentId("id:ns:" + docTypeName + "::1")));
emptyDoc->setRepo(repo);
SelectPruner pruner(docTypeName, &_amgr, *emptyDoc, repo, _hasFields, _hasDocuments);
pruner.process(*select);
diff --git a/searchcore/src/tests/proton/docsummary/docsummary.cpp b/searchcore/src/tests/proton/docsummary/docsummary.cpp
index e8152161faa..0a9f3127844 100644
--- a/searchcore/src/tests/proton/docsummary/docsummary.cpp
+++ b/searchcore/src/tests/proton/docsummary/docsummary.cpp
@@ -429,7 +429,7 @@ Test::requireThatAdapterHandlesAllFieldTypes()
s.addSummaryField(Schema::SummaryField("l", schema::DataType::STRING));
BuildContext bc(s);
- bc._bld.startDocument("doc::0");
+ bc._bld.startDocument("id:ns:searchdocument::0");
bc._bld.startSummaryField("a").addInt(255).endField();
bc._bld.startSummaryField("b").addInt(32767).endField();
bc._bld.startSummaryField("c").addInt(2147483647).endField();
@@ -478,12 +478,12 @@ Test::requireThatAdapterHandlesMultipleDocuments()
s.addSummaryField(Schema::SummaryField("a", schema::DataType::INT32));
BuildContext bc(s);
- bc._bld.startDocument("doc::0").
+ bc._bld.startDocument("id:ns:searchdocument::0").
startSummaryField("a").
addInt(1000).
endField();
bc.endDocument(0);
- bc._bld.startDocument("doc::1").
+ bc._bld.startDocument("id:ns:searchdocument::1").
startSummaryField("a").
addInt(2000).endField();
bc.endDocument(1);
@@ -519,7 +519,7 @@ Test::requireThatAdapterHandlesDocumentIdField()
Schema s;
s.addSummaryField(Schema::SummaryField("documentid", schema::DataType::STRING));
BuildContext bc(s);
- bc._bld.startDocument("doc::0").
+ bc._bld.startDocument("id:ns:searchdocument::0").
startSummaryField("documentid").
addStr("foo").
endField();
@@ -528,16 +528,16 @@ Test::requireThatAdapterHandlesDocumentIdField()
bc.createFieldCacheRepo(getResultConfig())->getFieldCache("class4"),
getMarkupFields());
GeneralResultPtr res = getResult(dsa, 0);
- EXPECT_EQUAL("doc::0", std::string(res->GetEntry("documentid")->_stringval,
+ EXPECT_EQUAL("id:ns:searchdocument::0", std::string(res->GetEntry("documentid")->_stringval,
res->GetEntry("documentid")->_stringlen));
}
-GlobalId gid1 = DocumentId("doc::1").getGlobalId(); // lid 1
-GlobalId gid2 = DocumentId("doc::2").getGlobalId(); // lid 2
-GlobalId gid3 = DocumentId("doc::3").getGlobalId(); // lid 3
-GlobalId gid4 = DocumentId("doc::4").getGlobalId(); // lid 4
-GlobalId gid9 = DocumentId("doc::9").getGlobalId(); // not existing
+GlobalId gid1 = DocumentId("id:ns:searchdocument::1").getGlobalId(); // lid 1
+GlobalId gid2 = DocumentId("id:ns:searchdocument::2").getGlobalId(); // lid 2
+GlobalId gid3 = DocumentId("id:ns:searchdocument::3").getGlobalId(); // lid 3
+GlobalId gid4 = DocumentId("id:ns:searchdocument::4").getGlobalId(); // lid 4
+GlobalId gid9 = DocumentId("id:ns:searchdocument::9").getGlobalId(); // not existing
void
Test::requireThatDocsumRequestIsProcessed()
@@ -547,31 +547,31 @@ Test::requireThatDocsumRequestIsProcessed()
BuildContext bc(s);
DBContext dc(bc._repo, getDocTypeName());
- dc.put(*bc._bld.startDocument("doc::1").
+ dc.put(*bc._bld.startDocument("id:ns:searchdocument::1").
startSummaryField("a").
addInt(10).
endField().
endDocument(),
1);
- dc.put(*bc._bld.startDocument("doc::2").
+ dc.put(*bc._bld.startDocument("id:ns:searchdocument::2").
startSummaryField("a").
addInt(20).
endField().
endDocument(),
2);
- dc.put(*bc._bld.startDocument("doc::3").
+ dc.put(*bc._bld.startDocument("id:ns:searchdocument::3").
startSummaryField("a").
addInt(30).
endField().
endDocument(),
3);
- dc.put(*bc._bld.startDocument("doc::4").
+ dc.put(*bc._bld.startDocument("id:ns:searchdocument::4").
startSummaryField("a").
addInt(40).
endField().
endDocument(),
4);
- dc.put(*bc._bld.startDocument("doc::5").
+ dc.put(*bc._bld.startDocument("id:ns:searchdocument::5").
startSummaryField("a").
addInt(50).
endField().
@@ -607,7 +607,7 @@ Test::requireThatRewritersAreUsed()
BuildContext bc(s);
DBContext dc(bc._repo, getDocTypeName());
- dc.put(*bc._bld.startDocument("doc::1").
+ dc.put(*bc._bld.startDocument("id:ns:searchdocument::1").
startSummaryField("aa").
addInt(10).
endField().
@@ -634,7 +634,7 @@ Test::requireThatSummariesTimeout()
BuildContext bc(s);
DBContext dc(bc._repo, getDocTypeName());
- dc.put(*bc._bld.startDocument("doc::1").
+ dc.put(*bc._bld.startDocument("id:ns:searchdocument::1").
startSummaryField("aa").
addInt(10).
endField().
@@ -686,10 +686,10 @@ Test::requireThatAttributesAreUsed()
BuildContext bc(s);
DBContext dc(bc._repo, getDocTypeName());
- dc.put(*bc._bld.startDocument("doc::1").
+ dc.put(*bc._bld.startDocument("id:ns:searchdocument::1").
endDocument(),
1); // empty doc
- dc.put(*bc._bld.startDocument("doc::2").
+ dc.put(*bc._bld.startDocument("id:ns:searchdocument::2").
startAttributeField("ba").
addInt(10).
endField().
@@ -753,7 +753,7 @@ Test::requireThatAttributesAreUsed()
endField().
endDocument(),
2);
- dc.put(*bc._bld.startDocument("doc::3").
+ dc.put(*bc._bld.startDocument("id:ns:searchdocument::3").
endDocument(),
3); // empty doc
@@ -818,7 +818,7 @@ Test::requireThatSummaryAdapterHandlesPutAndRemove()
s.addSummaryField(Schema::SummaryField("f1", schema::DataType::STRING, CollectionType::SINGLE));
BuildContext bc(s);
DBContext dc(bc._repo, getDocTypeName());
- Document::UP exp = bc._bld.startDocument("doc::1").
+ Document::UP exp = bc._bld.startDocument("id:ns:searchdocument::1").
startSummaryField("f1").
addStr("foo").
endField().
@@ -854,7 +854,7 @@ Test::requireThatAnnotationsAreUsed()
s.addSummaryField(Schema::SummaryField("dynamicstring", schema::DataType::STRING, CollectionType::SINGLE));
BuildContext bc(s);
DBContext dc(bc._repo, getDocTypeName());
- Document::UP exp = bc._bld.startDocument("doc::0").
+ Document::UP exp = bc._bld.startDocument("id:ns:searchdocument::0").
startIndexField("g").
addStr("foo").
addStr("bar").
@@ -908,7 +908,7 @@ Test::requireThatUrisAreUsed()
s.addSummaryField(Schema::SummaryField("uriwset", schema::DataType::STRING, CollectionType::WEIGHTEDSET));
BuildContext bc(s);
DBContext dc(bc._repo, getDocTypeName());
- Document::UP exp = bc._bld.startDocument("doc::0").
+ Document::UP exp = bc._bld.startDocument("id:ns:searchdocument::0").
startIndexField("urisingle").
startSubField("all").
addUrlTokenizedString("http://www.example.com:81/fluke?ab=2#4").
@@ -1074,7 +1074,7 @@ Test::requireThatPositionsAreUsed()
BuildContext bc(s);
DBContext dc(bc._repo, getDocTypeName());
- Document::UP exp = bc._bld.startDocument("doc::1").
+ Document::UP exp = bc._bld.startDocument("id:ns:searchdocument::1").
startAttributeField("sp2").
addPosition(1002, 1003).
endField().
@@ -1146,7 +1146,7 @@ Test::requireThatRawFieldsWorks()
BuildContext bc(s);
DBContext dc(bc._repo, getDocTypeName());
- Document::UP exp = bc._bld.startDocument("doc::0").
+ Document::UP exp = bc._bld.startDocument("id:ns:searchdocument::0").
startSummaryField("i").
addRaw(raw1s.c_str(), raw1s.size()).
endField().
@@ -1178,8 +1178,7 @@ Test::requireThatRawFieldsWorks()
bc.createFieldCacheRepo(getResultConfig())->getFieldCache("class0"),
getMarkupFields());
- ASSERT_TRUE(assertString(raw1s,
- "i", dsa, 1));
+ ASSERT_TRUE(assertString(raw1s, "i", dsa, 1));
GeneralResultPtr res = getResult(dsa, 1);
{
@@ -1237,14 +1236,12 @@ Test::Test()
continue;
// Assume just one argument: source field that must contain markup
_markupFields.insert(markupField);
- LOG(info,
- "Field %s has markup",
- markupField.c_str());
+ LOG(info, "Field %s has markup", markupField.c_str());
}
}
}
-Test::~Test() {}
+Test::~Test() = default;
int
Test::Main()
diff --git a/searchcore/src/tests/proton/docsummary/summaryfieldconverter_test.cpp b/searchcore/src/tests/proton/docsummary/summaryfieldconverter_test.cpp
index cc6eef14fd6..b295926c64a 100644
--- a/searchcore/src/tests/proton/docsummary/summaryfieldconverter_test.cpp
+++ b/searchcore/src/tests/proton/docsummary/summaryfieldconverter_test.cpp
@@ -350,7 +350,7 @@ StringFieldValue Test::makeAnnotatedChineseString() {
}
Document Test::makeDocument() {
- Document doc(getDocType(), DocumentId("doc:scheme:"));
+ Document doc(getDocType(), DocumentId("id:ns:indexingdocument::"));
doc.setRepo(*_documentRepo);
doc.setValue("string", makeAnnotatedString());
@@ -667,7 +667,7 @@ Test::requireThatPredicateIsPrinted()
Cursor &arr = obj.setArray(Predicate::SET);
arr.addString("bar");
- Document doc(getDocType(), DocumentId("doc:scheme:"));
+ Document doc(getDocType(), DocumentId("id:ns:indexingdocument::"));
doc.setRepo(*_documentRepo);
doc.setValue("predicate", PredicateFieldValue(std::move(input)));
@@ -687,7 +687,7 @@ Test::requireThatTensorIsNotConverted()
TensorFieldValue tensorFieldValue(tensorDataType);
tensorFieldValue = make_tensor(TensorSpec("tensor(x{},y{})")
.add({{"x", "4"}, {"y", "5"}}, 7));
- Document doc(getDocType(), DocumentId("doc:scheme:"));
+ Document doc(getDocType(), DocumentId("id:ns:indexingdocument::"));
doc.setRepo(*_documentRepo);
doc.setValue("tensor", tensorFieldValue);
@@ -712,7 +712,7 @@ const ReferenceDataType& Test::getAsRefType(const string& name) const {
}
void Test::requireThatNonEmptyReferenceIsConvertedToStringWithId() {
- Document doc(getDocType(), DocumentId("doc:scheme:"));
+ Document doc(getDocType(), DocumentId("id:ns:indexingdocument::"));
doc.setRepo(*_documentRepo);
doc.setValue("ref", ReferenceFieldValue(
getAsRefType("Reference<target_dummy_document>"),
@@ -723,7 +723,7 @@ void Test::requireThatNonEmptyReferenceIsConvertedToStringWithId() {
}
void Test::requireThatEmptyReferenceIsConvertedToEmptyString() {
- Document doc(getDocType(), DocumentId("doc:scheme:"));
+ Document doc(getDocType(), DocumentId("id:ns:indexingdocument::"));
doc.setRepo(*_documentRepo);
doc.setValue("ref", ReferenceFieldValue(
getAsRefType("Reference<target_dummy_document>")));
@@ -735,7 +735,7 @@ void Test::requireThatEmptyReferenceIsConvertedToEmptyString() {
// Own test for this to ensure that SlimeFiller code path is executed,
// as this only triggers for composite field types.
void Test::requireThatReferenceInCompositeTypeEmitsSlimeData() {
- Document doc(getDocType(), DocumentId("doc:scheme:"));
+ Document doc(getDocType(), DocumentId("id:ns:indexingdocument::"));
doc.setRepo(*_documentRepo);
StructFieldValue sfv(getDataType("indexingdocument.header.nested"));
diff --git a/searchcore/src/tests/proton/document_iterator/document_iterator_test.cpp b/searchcore/src/tests/proton/document_iterator/document_iterator_test.cpp
index ad5ac55c5e9..9342ddd4b8a 100644
--- a/searchcore/src/tests/proton/document_iterator/document_iterator_test.cpp
+++ b/searchcore/src/tests/proton/document_iterator/document_iterator_test.cpp
@@ -416,17 +416,17 @@ void checkEntry(const IterateResult &res, size_t idx, const Document &doc, const
TEST("require that custom retrievers work as expected") {
IDocumentRetriever::SP dr =
- cat(cat(doc("doc:foo:1", Timestamp(2), bucket(5)),
- rem("doc:foo:2", Timestamp(3), bucket(5))),
- cat(doc("doc:foo:3", Timestamp(7), bucket(6)),
+ cat(cat(doc("id:ns:document::1", Timestamp(2), bucket(5)),
+ rem("id:ns:document::2", Timestamp(3), bucket(5))),
+ cat(doc("id:ns:document::3", Timestamp(7), bucket(6)),
nil()));
- EXPECT_FALSE(dr->getDocumentMetaData(DocumentId("doc:foo:bogus")).valid());
+ EXPECT_FALSE(dr->getDocumentMetaData(DocumentId("id:ns:document::bogus")).valid());
EXPECT_TRUE(dr->getDocument(1).get() == 0);
EXPECT_TRUE(dr->getDocument(2).get() == 0);
EXPECT_TRUE(dr->getDocument(3).get() != 0);
- TEST_DO(checkDoc(*dr, "doc:foo:1", 2, 5, false));
- TEST_DO(checkDoc(*dr, "doc:foo:2", 3, 5, true));
- TEST_DO(checkDoc(*dr, "doc:foo:3", 7, 6, false));
+ TEST_DO(checkDoc(*dr, "id:ns:document::1", 2, 5, false));
+ TEST_DO(checkDoc(*dr, "id:ns:document::2", 3, 5, true));
+ TEST_DO(checkDoc(*dr, "id:ns:document::3", 7, 6, false));
DocumentMetaData::Vector b5;
DocumentMetaData::Vector b6;
dr->getBucketMetaData(bucket(5), b5);
@@ -456,19 +456,19 @@ TEST("require that a list of empty retrievers can be iterated") {
TEST("require that normal documents can be iterated") {
DocumentIterator itr(bucket(5), document::AllFields(), selectAll(), newestV(), -1, false);
- itr.add(doc("doc:foo:1", Timestamp(2), bucket(5)));
- itr.add(cat(doc("doc:foo:2", Timestamp(3), bucket(5)),
- doc("doc:foo:3", Timestamp(4), bucket(5))));
+ itr.add(doc("id:ns:document::1", Timestamp(2), bucket(5)));
+ itr.add(cat(doc("id:ns:document::2", Timestamp(3), bucket(5)),
+ doc("id:ns:document::3", Timestamp(4), bucket(5))));
IterateResult res = itr.iterate(largeNum);
EXPECT_TRUE(res.isCompleted());
EXPECT_EQUAL(3u, res.getEntries().size());
- TEST_DO(checkEntry(res, 0, Document(*DataType::DOCUMENT, DocumentId("doc:foo:1")), Timestamp(2)));
- TEST_DO(checkEntry(res, 1, Document(*DataType::DOCUMENT, DocumentId("doc:foo:2")), Timestamp(3)));
- TEST_DO(checkEntry(res, 2, Document(*DataType::DOCUMENT, DocumentId("doc:foo:3")), Timestamp(4)));
+ TEST_DO(checkEntry(res, 0, Document(*DataType::DOCUMENT, DocumentId("id:ns:document::1")), Timestamp(2)));
+ TEST_DO(checkEntry(res, 1, Document(*DataType::DOCUMENT, DocumentId("id:ns:document::2")), Timestamp(3)));
+ TEST_DO(checkEntry(res, 2, Document(*DataType::DOCUMENT, DocumentId("id:ns:document::3")), Timestamp(4)));
}
void verifyIterateIgnoringStopSignal(DocumentIterator & itr) {
- itr.add(doc("doc:foo:1", Timestamp(2), bucket(5)));
+ itr.add(doc("id:ns:document::1", Timestamp(2), bucket(5)));
IterateResult res = itr.iterate(largeNum);
EXPECT_TRUE(res.isCompleted());
EXPECT_EQUAL(1u, res.getEntries().size());
@@ -488,14 +488,14 @@ TEST("require that iterator ignoring maxbytes stops at the end, and does not aut
}
void verifyReadConsistency(DocumentIterator & itr, Committer & committer) {
- IDocumentRetriever::SP retriever = doc("doc:foo:1", Timestamp(2), bucket(5));
+ IDocumentRetriever::SP retriever = doc("id:ns:document::1", Timestamp(2), bucket(5));
IDocumentRetriever::SP commitAndWaitRetriever(new CommitAndWaitDocumentRetriever(retriever, committer));
itr.add(commitAndWaitRetriever);
IterateResult res = itr.iterate(largeNum);
EXPECT_TRUE(res.isCompleted());
EXPECT_EQUAL(1u, res.getEntries().size());
- TEST_DO(checkEntry(res, 0, Document(*DataType::DOCUMENT, DocumentId("doc:foo:1")), Timestamp(2)));
+ TEST_DO(checkEntry(res, 0, Document(*DataType::DOCUMENT, DocumentId("id:ns:document::1")), Timestamp(2)));
EXPECT_EQUAL(0u, committer._commitCount);
}
@@ -516,7 +516,7 @@ TEST("require that readconsistency::strong does commit") {
}
TEST("require that docid limit is honoured") {
- IDocumentRetriever::SP retriever = doc("doc:foo:1", Timestamp(2), bucket(5));
+ IDocumentRetriever::SP retriever = doc("id:ns:document::1", Timestamp(2), bucket(5));
UnitDR & udr = dynamic_cast<UnitDR &>(*retriever);
udr.docid = 7;
DocumentIterator itr(bucket(5), document::AllFields(), selectAll(), newestV(), -1, false);
@@ -524,7 +524,7 @@ TEST("require that docid limit is honoured") {
IterateResult res = itr.iterate(largeNum);
EXPECT_TRUE(res.isCompleted());
EXPECT_EQUAL(1u, res.getEntries().size());
- TEST_DO(checkEntry(res, 0, Document(*DataType::DOCUMENT, DocumentId("doc:foo:1")), Timestamp(2)));
+ TEST_DO(checkEntry(res, 0, Document(*DataType::DOCUMENT, DocumentId("id:ns:document::1")), Timestamp(2)));
udr.setDocIdLimit(7);
DocumentIterator limited(bucket(5), document::AllFields(), selectAll(), newestV(), -1, false);
@@ -536,46 +536,46 @@ TEST("require that docid limit is honoured") {
TEST("require that remove entries can be iterated") {
DocumentIterator itr(bucket(5), document::AllFields(), selectAll(), newestV(), -1, false);
- itr.add(rem("doc:foo:1", Timestamp(2), bucket(5)));
- itr.add(cat(rem("doc:foo:2", Timestamp(3), bucket(5)),
- rem("doc:foo:3", Timestamp(4), bucket(5))));
+ itr.add(rem("id:ns:document::1", Timestamp(2), bucket(5)));
+ itr.add(cat(rem("id:ns:document::2", Timestamp(3), bucket(5)),
+ rem("id:ns:document::3", Timestamp(4), bucket(5))));
IterateResult res = itr.iterate(largeNum);
EXPECT_TRUE(res.isCompleted());
EXPECT_EQUAL(3u, res.getEntries().size());
- TEST_DO(checkEntry(res, 0, DocumentId("doc:foo:1"), Timestamp(2)));
- TEST_DO(checkEntry(res, 1, DocumentId("doc:foo:2"), Timestamp(3)));
- TEST_DO(checkEntry(res, 2, DocumentId("doc:foo:3"), Timestamp(4)));
+ TEST_DO(checkEntry(res, 0, DocumentId("id:ns:document::1"), Timestamp(2)));
+ TEST_DO(checkEntry(res, 1, DocumentId("id:ns:document::2"), Timestamp(3)));
+ TEST_DO(checkEntry(res, 2, DocumentId("id:ns:document::3"), Timestamp(4)));
}
TEST("require that remove entries can be ignored") {
DocumentIterator itr(bucket(5), document::AllFields(), selectAll(), docV(), -1, false);
- itr.add(rem("doc:foo:1", Timestamp(2), bucket(5)));
- itr.add(cat(doc("doc:foo:2", Timestamp(3), bucket(5)),
- rem("doc:foo:3", Timestamp(4), bucket(5))));
+ itr.add(rem("id:ns:document::1", Timestamp(2), bucket(5)));
+ itr.add(cat(doc("id:ns:document::2", Timestamp(3), bucket(5)),
+ rem("id:ns:document::3", Timestamp(4), bucket(5))));
IterateResult res = itr.iterate(largeNum);
EXPECT_TRUE(res.isCompleted());
EXPECT_EQUAL(1u, res.getEntries().size());
- TEST_DO(checkEntry(res, 0, Document(*DataType::DOCUMENT, DocumentId("doc:foo:2")), Timestamp(3)));
+ TEST_DO(checkEntry(res, 0, Document(*DataType::DOCUMENT, DocumentId("id:ns:document::2")), Timestamp(3)));
}
TEST("require that iterating all versions returns both documents and removes") {
DocumentIterator itr(bucket(5), document::AllFields(), selectAll(), allV(), -1, false);
- itr.add(rem("doc:foo:1", Timestamp(2), bucket(5)));
- itr.add(cat(doc("doc:foo:2", Timestamp(3), bucket(5)),
- rem("doc:foo:3", Timestamp(4), bucket(5))));
+ itr.add(rem("id:ns:document::1", Timestamp(2), bucket(5)));
+ itr.add(cat(doc("id:ns:document::2", Timestamp(3), bucket(5)),
+ rem("id:ns:document::3", Timestamp(4), bucket(5))));
IterateResult res = itr.iterate(largeNum);
EXPECT_TRUE(res.isCompleted());
EXPECT_EQUAL(3u, res.getEntries().size());
- TEST_DO(checkEntry(res, 0, DocumentId("doc:foo:1"), Timestamp(2)));
- TEST_DO(checkEntry(res, 1, Document(*DataType::DOCUMENT, DocumentId("doc:foo:2")), Timestamp(3)));
- TEST_DO(checkEntry(res, 2, DocumentId("doc:foo:3"), Timestamp(4)));
+ TEST_DO(checkEntry(res, 0, DocumentId("id:ns:document::1"), Timestamp(2)));
+ TEST_DO(checkEntry(res, 1, Document(*DataType::DOCUMENT, DocumentId("id:ns:document::2")), Timestamp(3)));
+ TEST_DO(checkEntry(res, 2, DocumentId("id:ns:document::3"), Timestamp(4)));
}
TEST("require that using an empty field set returns meta-data only") {
DocumentIterator itr(bucket(5), document::NoFields(), selectAll(), newestV(), -1, false);
- itr.add(doc("doc:foo:1", Timestamp(2), bucket(5)));
- itr.add(cat(doc("doc:foo:2", Timestamp(3), bucket(5)),
- rem("doc:foo:3", Timestamp(4), bucket(5))));
+ itr.add(doc("id:ns:document::1", Timestamp(2), bucket(5)));
+ itr.add(cat(doc("id:ns:document::2", Timestamp(3), bucket(5)),
+ rem("id:ns:document::3", Timestamp(4), bucket(5))));
IterateResult res = itr.iterate(largeNum);
EXPECT_TRUE(res.isCompleted());
EXPECT_EQUAL(3u, res.getEntries().size());
@@ -586,30 +586,30 @@ TEST("require that using an empty field set returns meta-data only") {
TEST("require that entries in other buckets are skipped") {
DocumentIterator itr(bucket(5), document::AllFields(), selectAll(), newestV(), -1, false);
- itr.add(rem("doc:foo:1", Timestamp(2), bucket(6)));
- itr.add(cat(doc("doc:foo:2", Timestamp(3), bucket(5)),
- doc("doc:foo:3", Timestamp(4), bucket(6))));
+ itr.add(rem("id:ns:document::1", Timestamp(2), bucket(6)));
+ itr.add(cat(doc("id:ns:document::2", Timestamp(3), bucket(5)),
+ doc("id:ns:document::3", Timestamp(4), bucket(6))));
IterateResult res = itr.iterate(largeNum);
EXPECT_TRUE(res.isCompleted());
EXPECT_EQUAL(1u, res.getEntries().size());
- TEST_DO(checkEntry(res, 0, Document(*DataType::DOCUMENT, DocumentId("doc:foo:2")), Timestamp(3)));
+ TEST_DO(checkEntry(res, 0, Document(*DataType::DOCUMENT, DocumentId("id:ns:document::2")), Timestamp(3)));
}
TEST("require that maxBytes splits iteration results") {
DocumentIterator itr(bucket(5), document::AllFields(), selectAll(), newestV(), -1, false);
- itr.add(doc("doc:foo:1", Timestamp(2), bucket(5)));
- itr.add(cat(rem("doc:foo:2", Timestamp(3), bucket(5)),
- doc("doc:foo:3", Timestamp(4), bucket(5))));
- IterateResult res1 = itr.iterate(getSize(Document(*DataType::DOCUMENT, DocumentId("doc:foo:1"))) +
- getSize(DocumentId("doc:foo:2")));
+ itr.add(doc("id:ns:document::1", Timestamp(2), bucket(5)));
+ itr.add(cat(rem("id:ns:document::2", Timestamp(3), bucket(5)),
+ doc("id:ns:document::3", Timestamp(4), bucket(5))));
+ IterateResult res1 = itr.iterate(getSize(Document(*DataType::DOCUMENT, DocumentId("id:ns:document::1"))) +
+ getSize(DocumentId("id:ns:document::2")));
EXPECT_TRUE(!res1.isCompleted());
EXPECT_EQUAL(2u, res1.getEntries().size());
- TEST_DO(checkEntry(res1, 0, Document(*DataType::DOCUMENT, DocumentId("doc:foo:1")), Timestamp(2)));
- TEST_DO(checkEntry(res1, 1, DocumentId("doc:foo:2"), Timestamp(3)));
+ TEST_DO(checkEntry(res1, 0, Document(*DataType::DOCUMENT, DocumentId("id:ns:document::1")), Timestamp(2)));
+ TEST_DO(checkEntry(res1, 1, DocumentId("id:ns:document::2"), Timestamp(3)));
IterateResult res2 = itr.iterate(largeNum);
EXPECT_TRUE(res2.isCompleted());
- TEST_DO(checkEntry(res2, 0, Document(*DataType::DOCUMENT, DocumentId("doc:foo:3")), Timestamp(4)));
+ TEST_DO(checkEntry(res2, 0, Document(*DataType::DOCUMENT, DocumentId("id:ns:document::3")), Timestamp(4)));
IterateResult res3 = itr.iterate(largeNum);
EXPECT_TRUE(res3.isCompleted());
@@ -618,9 +618,9 @@ TEST("require that maxBytes splits iteration results") {
TEST("require that maxBytes splits iteration results for meta-data only iteration") {
DocumentIterator itr(bucket(5), document::NoFields(), selectAll(), newestV(), -1, false);
- itr.add(doc("doc:foo:1", Timestamp(2), bucket(5)));
- itr.add(cat(rem("doc:foo:2", Timestamp(3), bucket(5)),
- doc("doc:foo:3", Timestamp(4), bucket(5))));
+ itr.add(doc("id:ns:document::1", Timestamp(2), bucket(5)));
+ itr.add(cat(rem("id:ns:document::2", Timestamp(3), bucket(5)),
+ doc("id:ns:document::3", Timestamp(4), bucket(5))));
IterateResult res1 = itr.iterate(getSize() + getSize());
EXPECT_TRUE(!res1.isCompleted());
EXPECT_EQUAL(2u, res1.getEntries().size());
@@ -638,122 +638,122 @@ TEST("require that maxBytes splits iteration results for meta-data only iteratio
TEST("require that at least one document is returned by visit") {
DocumentIterator itr(bucket(5), document::AllFields(), selectAll(), newestV(), -1, false);
- itr.add(doc("doc:foo:1", Timestamp(2), bucket(5)));
- itr.add(cat(rem("doc:foo:2", Timestamp(3), bucket(5)),
- doc("doc:foo:3", Timestamp(4), bucket(5))));
+ itr.add(doc("id:ns:document::1", Timestamp(2), bucket(5)));
+ itr.add(cat(rem("id:ns:document::2", Timestamp(3), bucket(5)),
+ doc("id:ns:document::3", Timestamp(4), bucket(5))));
IterateResult res1 = itr.iterate(0);
EXPECT_TRUE(1u <= res1.getEntries().size());
- TEST_DO(checkEntry(res1, 0, Document(*DataType::DOCUMENT,DocumentId("doc:foo:1")), Timestamp(2)));
+ TEST_DO(checkEntry(res1, 0, Document(*DataType::DOCUMENT,DocumentId("id:ns:document::1")), Timestamp(2)));
}
TEST("require that documents outside the timestamp limits are ignored") {
DocumentIterator itr(bucket(5), document::AllFields(), selectTimestampRange(100, 200), newestV(), -1, false);
- itr.add(doc("doc:foo:1", Timestamp(99), bucket(5)));
- itr.add(doc("doc:foo:2", Timestamp(100), bucket(5)));
- itr.add(doc("doc:foo:3", Timestamp(200), bucket(5)));
- itr.add(doc("doc:foo:4", Timestamp(201), bucket(5)));
- itr.add(rem("doc:foo:5", Timestamp(99), bucket(5)));
- itr.add(rem("doc:foo:6", Timestamp(100), bucket(5)));
- itr.add(rem("doc:foo:7", Timestamp(200), bucket(5)));
- itr.add(rem("doc:foo:8", Timestamp(201), bucket(5)));
+ itr.add(doc("id:ns:document::1", Timestamp(99), bucket(5)));
+ itr.add(doc("id:ns:document::2", Timestamp(100), bucket(5)));
+ itr.add(doc("id:ns:document::3", Timestamp(200), bucket(5)));
+ itr.add(doc("id:ns:document::4", Timestamp(201), bucket(5)));
+ itr.add(rem("id:ns:document::5", Timestamp(99), bucket(5)));
+ itr.add(rem("id:ns:document::6", Timestamp(100), bucket(5)));
+ itr.add(rem("id:ns:document::7", Timestamp(200), bucket(5)));
+ itr.add(rem("id:ns:document::8", Timestamp(201), bucket(5)));
IterateResult res = itr.iterate(largeNum);
EXPECT_TRUE(res.isCompleted());
EXPECT_EQUAL(4u, res.getEntries().size());
- TEST_DO(checkEntry(res, 0, Document(*DataType::DOCUMENT, DocumentId("doc:foo:2")), Timestamp(100)));
- TEST_DO(checkEntry(res, 1, Document(*DataType::DOCUMENT, DocumentId("doc:foo:3")), Timestamp(200)));
- TEST_DO(checkEntry(res, 2, DocumentId("doc:foo:6"), Timestamp(100)));
- TEST_DO(checkEntry(res, 3, DocumentId("doc:foo:7"), Timestamp(200)));
+ TEST_DO(checkEntry(res, 0, Document(*DataType::DOCUMENT, DocumentId("id:ns:document::2")), Timestamp(100)));
+ TEST_DO(checkEntry(res, 1, Document(*DataType::DOCUMENT, DocumentId("id:ns:document::3")), Timestamp(200)));
+ TEST_DO(checkEntry(res, 2, DocumentId("id:ns:document::6"), Timestamp(100)));
+ TEST_DO(checkEntry(res, 3, DocumentId("id:ns:document::7"), Timestamp(200)));
}
TEST("require that timestamp subset returns the appropriate documents") {
DocumentIterator itr(bucket(5), document::AllFields(), selectTimestampSet(200, 350, 400), newestV(), -1, false);
- itr.add(doc("doc:foo:1", Timestamp(500), bucket(5)));
- itr.add(doc("doc:foo:2", Timestamp(400), bucket(5)));
- itr.add(doc("doc:foo:3", Timestamp(300), bucket(5)));
- itr.add(doc("doc:foo:4", Timestamp(200), bucket(5)));
- itr.add(rem("doc:foo:5", Timestamp(250), bucket(5)));
- itr.add(rem("doc:foo:6", Timestamp(350), bucket(5)));
- itr.add(rem("doc:foo:7", Timestamp(450), bucket(5)));
- itr.add(rem("doc:foo:8", Timestamp(550), bucket(5)));
+ itr.add(doc("id:ns:document::1", Timestamp(500), bucket(5)));
+ itr.add(doc("id:ns:document::2", Timestamp(400), bucket(5)));
+ itr.add(doc("id:ns:document::3", Timestamp(300), bucket(5)));
+ itr.add(doc("id:ns:document::4", Timestamp(200), bucket(5)));
+ itr.add(rem("id:ns:document::5", Timestamp(250), bucket(5)));
+ itr.add(rem("id:ns:document::6", Timestamp(350), bucket(5)));
+ itr.add(rem("id:ns:document::7", Timestamp(450), bucket(5)));
+ itr.add(rem("id:ns:document::8", Timestamp(550), bucket(5)));
IterateResult res = itr.iterate(largeNum);
EXPECT_TRUE(res.isCompleted());
EXPECT_EQUAL(3u, res.getEntries().size());
- TEST_DO(checkEntry(res, 0, Document(*DataType::DOCUMENT, DocumentId("doc:foo:2")), Timestamp(400)));
- TEST_DO(checkEntry(res, 1, Document(*DataType::DOCUMENT, DocumentId("doc:foo:4")), Timestamp(200)));
- TEST_DO(checkEntry(res, 2, DocumentId("doc:foo:6"), Timestamp(350)));
+ TEST_DO(checkEntry(res, 0, Document(*DataType::DOCUMENT, DocumentId("id:ns:document::2")), Timestamp(400)));
+ TEST_DO(checkEntry(res, 1, Document(*DataType::DOCUMENT, DocumentId("id:ns:document::4")), Timestamp(200)));
+ TEST_DO(checkEntry(res, 2, DocumentId("id:ns:document::6"), Timestamp(350)));
}
TEST("require that document selection will filter results") {
- DocumentIterator itr(bucket(5), document::AllFields(), selectDocs("id=\"doc:foo:xxx*\""), newestV(), -1, false);
- itr.add(doc("doc:foo:xxx1", Timestamp(99), bucket(5)));
- itr.add(doc("doc:foo:yyy1", Timestamp(100), bucket(5)));
- itr.add(doc("doc:foo:xxx2", Timestamp(200), bucket(5)));
- itr.add(doc("doc:foo:yyy2", Timestamp(201), bucket(5)));
- itr.add(rem("doc:foo:xxx3", Timestamp(99), bucket(5)));
- itr.add(rem("doc:foo:yyy3", Timestamp(100), bucket(5)));
- itr.add(rem("doc:foo:xxx4", Timestamp(200), bucket(5)));
- itr.add(rem("doc:foo:yyy4", Timestamp(201), bucket(5)));
+ DocumentIterator itr(bucket(5), document::AllFields(), selectDocs("id=\"id:ns:document::xxx*\""), newestV(), -1, false);
+ itr.add(doc("id:ns:document::xxx1", Timestamp(99), bucket(5)));
+ itr.add(doc("id:ns:document::yyy1", Timestamp(100), bucket(5)));
+ itr.add(doc("id:ns:document::xxx2", Timestamp(200), bucket(5)));
+ itr.add(doc("id:ns:document::yyy2", Timestamp(201), bucket(5)));
+ itr.add(rem("id:ns:document::xxx3", Timestamp(99), bucket(5)));
+ itr.add(rem("id:ns:document::yyy3", Timestamp(100), bucket(5)));
+ itr.add(rem("id:ns:document::xxx4", Timestamp(200), bucket(5)));
+ itr.add(rem("id:ns:document::yyy4", Timestamp(201), bucket(5)));
IterateResult res = itr.iterate(largeNum);
EXPECT_TRUE(res.isCompleted());
EXPECT_EQUAL(4u, res.getEntries().size());
- TEST_DO(checkEntry(res, 0, Document(*DataType::DOCUMENT, DocumentId("doc:foo:xxx1")), Timestamp(99)));
- TEST_DO(checkEntry(res, 1, Document(*DataType::DOCUMENT, DocumentId("doc:foo:xxx2")), Timestamp(200)));
- TEST_DO(checkEntry(res, 2, DocumentId("doc:foo:xxx3"), Timestamp(99)));
- TEST_DO(checkEntry(res, 3, DocumentId("doc:foo:xxx4"), Timestamp(200)));
+ TEST_DO(checkEntry(res, 0, Document(*DataType::DOCUMENT, DocumentId("id:ns:document::xxx1")), Timestamp(99)));
+ TEST_DO(checkEntry(res, 1, Document(*DataType::DOCUMENT, DocumentId("id:ns:document::xxx2")), Timestamp(200)));
+ TEST_DO(checkEntry(res, 2, DocumentId("id:ns:document::xxx3"), Timestamp(99)));
+ TEST_DO(checkEntry(res, 3, DocumentId("id:ns:document::xxx4"), Timestamp(200)));
}
TEST("require that document selection handles 'field == null'") {
DocumentIterator itr(bucket(5), document::AllFields(), selectDocs("foo.aa == null"), newestV(), -1, false);
- itr.add(doc_with_null_fields("doc:foo:xxx1", Timestamp(99), bucket(5)));
- itr.add(doc_with_null_fields("doc:foo:xxx2", Timestamp(100), bucket(5)));
+ itr.add(doc_with_null_fields("id:ns:foo::xxx1", Timestamp(99), bucket(5)));
+ itr.add(doc_with_null_fields("id:ns:foo::xxx2", Timestamp(100), bucket(5)));
IterateResult res = itr.iterate(largeNum);
EXPECT_TRUE(res.isCompleted());
ASSERT_EQUAL(2u, res.getEntries().size());
- Document expected1(getAttrDocType(), DocumentId("doc:foo:xxx1"));
+ Document expected1(getAttrDocType(), DocumentId("id:ns:foo::xxx1"));
TEST_DO(checkEntry(res, 0, expected1, Timestamp(99)));
- Document expected2(getAttrDocType(), DocumentId("doc:foo:xxx2"));
+ Document expected2(getAttrDocType(), DocumentId("id:ns:foo::xxx2"));
TEST_DO(checkEntry(res, 1, expected2, Timestamp(100)));
}
TEST("require that invalid document selection returns no documents") {
DocumentIterator itr(bucket(5), document::AllFields(), selectDocs("=="), newestV(), -1, false);
- itr.add(doc("doc:foo:xxx1", Timestamp(99), bucket(5)));
- itr.add(doc("doc:foo:yyy1", Timestamp(100), bucket(5)));
- itr.add(doc("doc:foo:xxx2", Timestamp(200), bucket(5)));
- itr.add(doc("doc:foo:yyy2", Timestamp(201), bucket(5)));
- itr.add(rem("doc:foo:xxx3", Timestamp(99), bucket(5)));
- itr.add(rem("doc:foo:yyy3", Timestamp(100), bucket(5)));
- itr.add(rem("doc:foo:xxx4", Timestamp(200), bucket(5)));
- itr.add(rem("doc:foo:yyy4", Timestamp(201), bucket(5)));
+ itr.add(doc("id:ns:document::xxx1", Timestamp(99), bucket(5)));
+ itr.add(doc("id:ns:document::yyy1", Timestamp(100), bucket(5)));
+ itr.add(doc("id:ns:document::xxx2", Timestamp(200), bucket(5)));
+ itr.add(doc("id:ns:document::yyy2", Timestamp(201), bucket(5)));
+ itr.add(rem("id:ns:document::xxx3", Timestamp(99), bucket(5)));
+ itr.add(rem("id:ns:document::yyy3", Timestamp(100), bucket(5)));
+ itr.add(rem("id:ns:document::xxx4", Timestamp(200), bucket(5)));
+ itr.add(rem("id:ns:document::yyy4", Timestamp(201), bucket(5)));
IterateResult res = itr.iterate(largeNum);
EXPECT_TRUE(res.isCompleted());
EXPECT_EQUAL(0u, res.getEntries().size());
}
TEST("require that document selection and timestamp range works together") {
- DocumentIterator itr(bucket(5), document::AllFields(), selectDocsWithinRange("id=\"doc:foo:xxx*\"", 100, 200), newestV(), -1, false);
- itr.add(doc("doc:foo:xxx1", Timestamp(99), bucket(5)));
- itr.add(doc("doc:foo:yyy1", Timestamp(100), bucket(5)));
- itr.add(doc("doc:foo:xxx2", Timestamp(200), bucket(5)));
- itr.add(doc("doc:foo:yyy2", Timestamp(201), bucket(5)));
- itr.add(rem("doc:foo:xxx3", Timestamp(99), bucket(5)));
- itr.add(rem("doc:foo:yyy3", Timestamp(100), bucket(5)));
- itr.add(rem("doc:foo:xxx4", Timestamp(200), bucket(5)));
- itr.add(rem("doc:foo:yyy4", Timestamp(201), bucket(5)));
+ DocumentIterator itr(bucket(5), document::AllFields(), selectDocsWithinRange("id=\"id:ns:document::xxx*\"", 100, 200), newestV(), -1, false);
+ itr.add(doc("id:ns:document::xxx1", Timestamp(99), bucket(5)));
+ itr.add(doc("id:ns:document::yyy1", Timestamp(100), bucket(5)));
+ itr.add(doc("id:ns:document::xxx2", Timestamp(200), bucket(5)));
+ itr.add(doc("id:ns:document::yyy2", Timestamp(201), bucket(5)));
+ itr.add(rem("id:ns:document::xxx3", Timestamp(99), bucket(5)));
+ itr.add(rem("id:ns:document::yyy3", Timestamp(100), bucket(5)));
+ itr.add(rem("id:ns:document::xxx4", Timestamp(200), bucket(5)));
+ itr.add(rem("id:ns:document::yyy4", Timestamp(201), bucket(5)));
IterateResult res = itr.iterate(largeNum);
EXPECT_TRUE(res.isCompleted());
EXPECT_EQUAL(2u, res.getEntries().size());
- TEST_DO(checkEntry(res, 0, Document(*DataType::DOCUMENT, DocumentId("doc:foo:xxx2")), Timestamp(200)));
- TEST_DO(checkEntry(res, 1, DocumentId("doc:foo:xxx4"), Timestamp(200)));
+ TEST_DO(checkEntry(res, 0, Document(*DataType::DOCUMENT, DocumentId("id:ns:document::xxx2")), Timestamp(200)));
+ TEST_DO(checkEntry(res, 1, DocumentId("id:ns:document::xxx4"), Timestamp(200)));
}
TEST("require that fieldset limits fields returned") {
DocumentIterator itr(bucket(5), document::HeaderFields(), selectAll(), newestV(), -1, false);
- itr.add(doc_with_fields("doc:foo:xxx1", Timestamp(1), bucket(5)));
+ itr.add(doc_with_fields("id:ns:foo::xxx1", Timestamp(1), bucket(5)));
IterateResult res = itr.iterate(largeNum);
EXPECT_TRUE(res.isCompleted());
EXPECT_EQUAL(1u, res.getEntries().size());
- Document expected(getDocType(), DocumentId("doc:foo:xxx1"));
+ Document expected(getDocType(), DocumentId("id:ns:foo::xxx1"));
expected.set("header", "foo");
TEST_DO(checkEntry(res, 0, expected, Timestamp(1)));
}
@@ -798,26 +798,26 @@ TEST("require that attributes are used")
{
UnitDR::reset();
DocumentIterator itr(bucket(5), document::AllFields(), selectDocs("foo.aa == 45"), docV(), -1, false);
- itr.add(doc_with_attr_fields("doc:foo:xx1", Timestamp(1), bucket(5),
+ itr.add(doc_with_attr_fields("id:ns:foo::xx1", Timestamp(1), bucket(5),
27, 28, 27, 2.7, 2.8, "x27", "x28"));
- itr.add(doc_with_attr_fields("doc:foo:xx2", Timestamp(2), bucket(5),
+ itr.add(doc_with_attr_fields("id:ns:foo::xx2", Timestamp(2), bucket(5),
27, 28, 45, 2.7, 4.5, "x27", "x45"));
- itr.add(doc_with_attr_fields("doc:foo:xx3", Timestamp(3), bucket(5),
+ itr.add(doc_with_attr_fields("id:ns:foo::xx3", Timestamp(3), bucket(5),
45, 46, 27, 4.5, 2.7, "x45", "x27"));
- itr.add(doc_with_attr_fields("doc:foo:xx4", Timestamp(4), bucket(5),
+ itr.add(doc_with_attr_fields("id:ns:foo::xx4", Timestamp(4), bucket(5),
45, 46, 45, 4.5, 4.5, "x45", "x45"));
IterateResult res = itr.iterate(largeNum);
EXPECT_TRUE(res.isCompleted());
EXPECT_EQUAL(2u, res.getEntries().size());
- Document expected1(getAttrDocType(), DocumentId("doc:foo:xx2"));
+ Document expected1(getAttrDocType(), DocumentId("id:ns:foo::xx2"));
expected1.set("header", "foo");
expected1.set("body", "bar");
expected1.set("aa", 27);
expected1.set("ab", 28);
expected1.set("dd", 2.7);
expected1.set("ss", "x27");
- Document expected2(getAttrDocType(), DocumentId("doc:foo:xx4"));
+ Document expected2(getAttrDocType(), DocumentId("id:ns:foo::xx4"));
expected2.set("header", "foo");
expected2.set("body", "bar");
expected2.set("aa", 45);
@@ -828,26 +828,26 @@ TEST("require that attributes are used")
TEST_DO(checkEntry(res, 1, expected2, Timestamp(4)));
DocumentIterator itr2(bucket(5), document::AllFields(), selectDocs("foo.dd == 4.5"), docV(), -1, false);
- itr2.add(doc_with_attr_fields("doc:foo:xx5", Timestamp(5), bucket(5),
+ itr2.add(doc_with_attr_fields("id:ns:foo::xx5", Timestamp(5), bucket(5),
27, 28, 27, 2.7, 2.8, "x27", "x28"));
- itr2.add(doc_with_attr_fields("doc:foo:xx6", Timestamp(6), bucket(5),
+ itr2.add(doc_with_attr_fields("id:ns:foo::xx6", Timestamp(6), bucket(5),
27, 28, 45, 2.7, 4.5, "x27", "x45"));
- itr2.add(doc_with_attr_fields("doc:foo:xx7", Timestamp(7), bucket(5),
+ itr2.add(doc_with_attr_fields("id:ns:foo::xx7", Timestamp(7), bucket(5),
45, 46, 27, 4.5, 2.7, "x45", "x27"));
- itr2.add(doc_with_attr_fields("doc:foo:xx8", Timestamp(8), bucket(5),
+ itr2.add(doc_with_attr_fields("id:ns:foo::xx8", Timestamp(8), bucket(5),
45, 46, 45, 4.5, 4.5, "x45", "x45"));
IterateResult res2 = itr2.iterate(largeNum);
EXPECT_TRUE(res2.isCompleted());
EXPECT_EQUAL(2u, res2.getEntries().size());
- Document expected3(getAttrDocType(), DocumentId("doc:foo:xx6"));
+ Document expected3(getAttrDocType(), DocumentId("id:ns:foo::xx6"));
expected3.set("header", "foo");
expected3.set("body", "bar");
expected3.set("aa", 27);
expected3.set("ab", 28);
expected3.set("dd", 2.7);
expected3.set("ss", "x27");
- Document expected4(getAttrDocType(), DocumentId("doc:foo:xx8"));
+ Document expected4(getAttrDocType(), DocumentId("id:ns:foo::xx8"));
expected4.set("header", "foo");
expected4.set("body", "bar");
expected4.set("aa", 45);
@@ -858,26 +858,26 @@ TEST("require that attributes are used")
TEST_DO(checkEntry(res2, 1, expected4, Timestamp(8)));
DocumentIterator itr3(bucket(5), document::AllFields(), selectDocs("foo.ss == \"x45\""), docV(), -1, false);
- itr3.add(doc_with_attr_fields("doc:foo:xx9", Timestamp(9), bucket(5),
+ itr3.add(doc_with_attr_fields("id:ns:foo::xx9", Timestamp(9), bucket(5),
27, 28, 27, 2.7, 2.8, "x27", "x28"));
- itr3.add(doc_with_attr_fields("doc:foo:xx10", Timestamp(10), bucket(5),
+ itr3.add(doc_with_attr_fields("id:ns:foo::xx10", Timestamp(10), bucket(5),
27, 28, 45, 2.7, 4.5, "x27", "x45"));
- itr3.add(doc_with_attr_fields("doc:foo:xx11", Timestamp(11), bucket(5),
+ itr3.add(doc_with_attr_fields("id:ns:foo::xx11", Timestamp(11), bucket(5),
45, 46, 27, 4.5, 2.7, "x45", "x27"));
- itr3.add(doc_with_attr_fields("doc:foo:xx12", Timestamp(12), bucket(5),
+ itr3.add(doc_with_attr_fields("id:ns:foo::xx12", Timestamp(12), bucket(5),
45, 46, 45, 4.5, 4.5, "x45", "x45"));
IterateResult res3 = itr3.iterate(largeNum);
EXPECT_TRUE(res3.isCompleted());
EXPECT_EQUAL(2u, res3.getEntries().size());
- Document expected5(getAttrDocType(), DocumentId("doc:foo:xx10"));
+ Document expected5(getAttrDocType(), DocumentId("id:ns:foo::xx10"));
expected5.set("header", "foo");
expected5.set("body", "bar");
expected5.set("aa", 27);
expected5.set("ab", 28);
expected5.set("dd", 2.7);
expected5.set("ss", "x27");
- Document expected6(getAttrDocType(), DocumentId("doc:foo:xx12"));
+ Document expected6(getAttrDocType(), DocumentId("id:ns:foo::xx12"));
expected6.set("header", "foo");
expected6.set("body", "bar");
expected6.set("aa", 45);
diff --git a/searchcore/src/tests/proton/documentdb/feedhandler/feedhandler_test.cpp b/searchcore/src/tests/proton/documentdb/feedhandler/feedhandler_test.cpp
index 4b3b68a85ea..f99668a13f8 100644
--- a/searchcore/src/tests/proton/documentdb/feedhandler/feedhandler_test.cpp
+++ b/searchcore/src/tests/proton/documentdb/feedhandler/feedhandler_test.cpp
@@ -537,7 +537,7 @@ TEST_F("require that heartBeat calls FeedView's heartBeat",
TEST_F("require that outdated remove is ignored", FeedHandlerFixture)
{
- DocumentContext doc_context("doc:test:foo", *f.schema.builder);
+ DocumentContext doc_context("id:ns:searchdocument::foo", *f.schema.builder);
FeedOperation::UP op(new RemoveOperation(doc_context.bucketId, Timestamp(10), doc_context.doc->getId()));
static_cast<DocumentOperation &>(*op).setPrevDbDocumentId(DbDocumentId(4));
static_cast<DocumentOperation &>(*op).setPrevTimestamp(Timestamp(10000));
@@ -549,7 +549,7 @@ TEST_F("require that outdated remove is ignored", FeedHandlerFixture)
TEST_F("require that outdated put is ignored", FeedHandlerFixture)
{
- DocumentContext doc_context("doc:test:foo", *f.schema.builder);
+ DocumentContext doc_context("id:ns:searchdocument::foo", *f.schema.builder);
FeedOperation::UP op(new PutOperation(doc_context.bucketId,
Timestamp(10), doc_context.doc));
static_cast<DocumentOperation &>(*op).setPrevTimestamp(Timestamp(10000));
@@ -570,7 +570,7 @@ addLidToRemove(RemoveDocumentsOperation &op)
TEST_F("require that handleMove calls FeedView", FeedHandlerFixture)
{
- DocumentContext doc_context("doc:test:foo", *f.schema.builder);
+ DocumentContext doc_context("id:ns:searchdocument::foo", *f.schema.builder);
MoveOperation op(doc_context.bucketId, Timestamp(2), doc_context.doc, DbDocumentId(0, 2), 1);
op.setDbDocumentId(DbDocumentId(1, 2));
f.runAsMaster([&]() { f.handler.handleMove(op, IDestructorCallback::SP()); });
@@ -806,7 +806,7 @@ TEST_F("require that tensor update with wrong tensor type fails", FeedHandlerFix
TEST_F("require that put with different document type repo is ok", FeedHandlerFixture)
{
TwoFieldsSchemaContext schema;
- DocumentContext doc_context("doc:test:foo", *schema.builder);
+ DocumentContext doc_context("id:ns:searchdocument::foo", *schema.builder);
auto op = std::make_unique<PutOperation>(doc_context.bucketId,
Timestamp(10), doc_context.doc);
FeedTokenContext token_context;
diff --git a/searchcore/src/tests/proton/documentdb/feedview/feedview_test.cpp b/searchcore/src/tests/proton/documentdb/feedview/feedview_test.cpp
index b39b70572e0..144f4ca4ff7 100644
--- a/searchcore/src/tests/proton/documentdb/feedview/feedview_test.cpp
+++ b/searchcore/src/tests/proton/documentdb/feedview/feedview_test.cpp
@@ -577,7 +577,7 @@ struct FixtureBase
}
DocumentContext doc1(uint64_t timestamp = 10) {
- return doc("doc:test:1", timestamp);
+ return doc("id:ns:searchdocument::1", timestamp);
}
void performPut(FeedToken token, PutOperation &op) {
@@ -661,7 +661,7 @@ struct FixtureBase
uint32_t id = first + i;
uint64_t ts = tsfirst + i;
vespalib::asciistream os;
- os << "doc:test:" << id;
+ os << "id:ns:searchdocument::" << id;
docs.push_back(doc(os.str(), ts));
}
return docs;
@@ -822,7 +822,7 @@ TEST_F("require that put() calls attribute adapter", SearchableFeedViewFixture)
f.putAndWait(dc);
EXPECT_EQUAL(1u, f.maw._putSerial);
- EXPECT_EQUAL(DocumentId("doc:test:1"), f.maw._putDocId);
+ EXPECT_EQUAL(DocumentId("id:ns:searchdocument::1"), f.maw._putDocId);
EXPECT_EQUAL(1u, f.maw._putLid);
EXPECT_EQUAL(2u, f._docIdLimit.get());
}
@@ -861,7 +861,7 @@ TEST_F("require that update() calls attribute adapter", SearchableFeedViewFixtur
f.putAndWait(dc1);
f.updateAndWait(dc2);
- assertAttributeUpdate(2u, DocumentId("doc:test:1"), 1u, f.maw);
+ assertAttributeUpdate(2u, DocumentId("id:ns:searchdocument::1"), 1u, f.maw);
}
TEST_F("require that remove() updates document meta store with bucket info",
@@ -1064,7 +1064,7 @@ void putDocumentAndUpdate(Fixture &f, const vespalib::string &fieldName)
f.putAndWait(dc1);
EXPECT_EQUAL(1u, f.msa._store._lastSyncToken);
- DocumentContext dc2("doc:test:1", 20, f.getBuilder());
+ DocumentContext dc2("id:ns:searchdocument::1", 20, f.getBuilder());
dc2.addFieldUpdate(f.getBuilder(), fieldName);
f.updateAndWait(dc2);
}
@@ -1076,7 +1076,7 @@ void requireThatUpdateOnlyUpdatesAttributeAndNotDocumentStore(Fixture &f,
putDocumentAndUpdate(f, fieldName);
EXPECT_EQUAL(1u, f.msa._store._lastSyncToken); // document store not updated
- assertAttributeUpdate(2u, DocumentId("doc:test:1"), 1, f.maw);
+ assertAttributeUpdate(2u, DocumentId("id:ns:searchdocument::1"), 1, f.maw);
}
template <typename Fixture>
@@ -1086,7 +1086,7 @@ void requireThatUpdateUpdatesAttributeAndDocumentStore(Fixture &f,
putDocumentAndUpdate(f, fieldName);
EXPECT_EQUAL(2u, f.msa._store._lastSyncToken); // document store updated
- assertAttributeUpdate(2u, DocumentId("doc:test:1"), 1, f.maw);
+ assertAttributeUpdate(2u, DocumentId("id:ns:searchdocument::1"), 1, f.maw);
}
TEST_F("require that update() to fast-access attribute only updates attribute and not document store",
diff --git a/searchcore/src/tests/proton/documentmetastore/documentmetastore_test.cpp b/searchcore/src/tests/proton/documentmetastore/documentmetastore_test.cpp
index f6f0c2b0806..2fc6cc87631 100644
--- a/searchcore/src/tests/proton/documentmetastore/documentmetastore_test.cpp
+++ b/searchcore/src/tests/proton/documentmetastore/documentmetastore_test.cpp
@@ -545,14 +545,14 @@ TEST(DocumentMetaStoreTest, lid_and_gid_space_is_reused)
GlobalId
createGid(uint32_t lid)
{
- DocumentId docId(vespalib::make_string("doc:id:%u", lid));
+ DocumentId docId(vespalib::make_string("id:ns:testdoc::%u", lid));
return docId.getGlobalId();
}
GlobalId
createGid(uint32_t userId, uint32_t lid)
{
- DocumentId docId(vespalib::make_string("id:id:testdoc:n=%u:%u", userId, lid));
+ DocumentId docId(vespalib::make_string("id:ns:testdoc:n=%u:%u", userId, lid));
return docId.getGlobalId();
}
diff --git a/searchcore/src/tests/proton/feed_and_search/feed_and_search.cpp b/searchcore/src/tests/proton/feed_and_search/feed_and_search.cpp
index 23a87415f7f..4580865b3a4 100644
--- a/searchcore/src/tests/proton/feed_and_search/feed_and_search.cpp
+++ b/searchcore/src/tests/proton/feed_and_search/feed_and_search.cpp
@@ -105,7 +105,7 @@ Schema getSchema() {
Document::UP buildDocument(DocBuilder & doc_builder, int id,
const string &word) {
ostringstream ost;
- ost << "doc::" << id;
+ ost << "id:ns:searchdocument::" << id;
doc_builder.startDocument(ost.str());
doc_builder.startIndexField(field_name)
.addStr(noise).addStr(word).endField();
diff --git a/searchcore/src/tests/proton/feedoperation/feedoperation_test.cpp b/searchcore/src/tests/proton/feedoperation/feedoperation_test.cpp
index 6a9dc42b56d..5a3ed4b7274 100644
--- a/searchcore/src/tests/proton/feedoperation/feedoperation_test.cpp
+++ b/searchcore/src/tests/proton/feedoperation/feedoperation_test.cpp
@@ -145,7 +145,7 @@ TEST("require that toString() on derived classes are meaningful")
uint32_t sub_db_id = 1;
MyStreamHandler stream_handler;
DocumentIdT doc_id_limit = 15;
- DocumentId doc_id("doc:foo:bar");
+ DocumentId doc_id("id:ns:foo:::bar");
DocumentUpdate::SP update(new DocumentUpdate(repo, *DataType::DOCUMENT, doc_id));
EXPECT_EQUAL("DeleteBucket(BucketId(0x0000000000000000), serialNum=0)",
@@ -196,7 +196,7 @@ TEST("require that toString() on derived classes are meaningful")
EXPECT_EQUAL("Remove(null::, BucketId(0x0000000000000000), timestamp=0, dbdId=(subDbId=0, lid=0), "
"prevDbdId=(subDbId=0, lid=0), prevMarkedAsRemoved=false, prevTimestamp=0, serialNum=0)",
RemoveOperation().toString());
- EXPECT_EQUAL("Remove(doc:foo:bar, BucketId(0x000000000000002a), timestamp=10, dbdId=(subDbId=0, lid=0), "
+ EXPECT_EQUAL("Remove(id:ns:foo:::bar, BucketId(0x000000000000002a), timestamp=10, dbdId=(subDbId=0, lid=0), "
"prevDbdId=(subDbId=0, lid=0), prevMarkedAsRemoved=false, prevTimestamp=0, serialNum=0)",
RemoveOperation(bucket_id1, timestamp, doc_id).toString());
@@ -214,7 +214,7 @@ TEST("require that toString() on derived classes are meaningful")
EXPECT_EQUAL("Update(NULL, BucketId(0x0000000000000000), timestamp=0, dbdId=(subDbId=0, lid=0), "
"prevDbdId=(subDbId=0, lid=0), prevMarkedAsRemoved=false, prevTimestamp=0, serialNum=0)",
UpdateOperation().toString());
- EXPECT_EQUAL("Update(doc:foo:bar, BucketId(0x000000000000002a), timestamp=10, dbdId=(subDbId=0, lid=0), "
+ EXPECT_EQUAL("Update(id:ns:foo:::bar, BucketId(0x000000000000002a), timestamp=10, dbdId=(subDbId=0, lid=0), "
"prevDbdId=(subDbId=0, lid=0), prevMarkedAsRemoved=false, prevTimestamp=0, serialNum=0)",
UpdateOperation(bucket_id1, timestamp, update).toString());
diff --git a/searchcore/src/tests/proton/index/fusionrunner_test.cpp b/searchcore/src/tests/proton/index/fusionrunner_test.cpp
index e6cdbf8d6cb..49b452aec2e 100644
--- a/searchcore/src/tests/proton/index/fusionrunner_test.cpp
+++ b/searchcore/src/tests/proton/index/fusionrunner_test.cpp
@@ -143,7 +143,7 @@ void Test::tearDown() {
Document::UP buildDocument(DocBuilder & doc_builder, int id, const string &word) {
vespalib::asciistream ost;
- ost << "doc::" << id;
+ ost << "id:ns:searchdocument::" << id;
doc_builder.startDocument(ost.str());
doc_builder.startIndexField(field_name).addStr(word).endField();
return doc_builder.endDocument();
diff --git a/searchcore/src/tests/proton/index/index_writer/index_writer_test.cpp b/searchcore/src/tests/proton/index/index_writer/index_writer_test.cpp
index d92ac0dcdc2..73919a7c628 100644
--- a/searchcore/src/tests/proton/index/index_writer/index_writer_test.cpp
+++ b/searchcore/src/tests/proton/index/index_writer/index_writer_test.cpp
@@ -89,7 +89,7 @@ struct Fixture
{
}
Document::UP createDoc(uint32_t lid) {
- builder.startDocument(vespalib::make_string("doc:test:%u", lid));
+ builder.startDocument(vespalib::make_string("id:ns:searchdocument::%u", lid));
return builder.endDocument();
}
void put(SerialNum serialNum, const search::DocumentIdT lid) {
diff --git a/searchcore/src/tests/proton/index/indexmanager_test.cpp b/searchcore/src/tests/proton/index/indexmanager_test.cpp
index d92cc62c5a1..80b1f9f0560 100644
--- a/searchcore/src/tests/proton/index/indexmanager_test.cpp
+++ b/searchcore/src/tests/proton/index/indexmanager_test.cpp
@@ -89,7 +89,7 @@ void removeTestData() {
Document::UP buildDocument(DocBuilder &doc_builder, int id,
const string &word) {
vespalib::asciistream ost;
- ost << "doc::" << id;
+ ost << "id:ns:searchdocument::" << id;
doc_builder.startDocument(ost.str());
doc_builder.startIndexField(field_name).addStr(word).endField();
return doc_builder.endDocument();
diff --git a/searchcore/src/tests/proton/matching/matching_test.cpp b/searchcore/src/tests/proton/matching/matching_test.cpp
index e46ed997d0f..3f68b54aca2 100644
--- a/searchcore/src/tests/proton/matching/matching_test.cpp
+++ b/searchcore/src/tests/proton/matching/matching_test.cpp
@@ -192,7 +192,7 @@ struct MyWorld {
// metaStore
for (uint32_t i = 0; i < NUM_DOCS; ++i) {
- document::DocumentId docId(vespalib::make_string("doc::%u", i));
+ document::DocumentId docId(vespalib::make_string("id:ns:searchdocument::%u", i));
const document::GlobalId &gid = docId.getGlobalId();
document::BucketId bucketId(BucketFactory::getBucketId(docId));
uint32_t docSize = 1;
@@ -455,11 +455,11 @@ TEST("require that ranking is performed (multi-threaded)") {
EXPECT_EQUAL(9u, world.matchingStats.docsRanked());
EXPECT_EQUAL(0u, world.matchingStats.docsReRanked());
ASSERT_TRUE(reply->hits.size() == 9u);
- EXPECT_EQUAL(document::DocumentId("doc::900").getGlobalId(), reply->hits[0].gid);
+ EXPECT_EQUAL(document::DocumentId("id:ns:searchdocument::900").getGlobalId(), reply->hits[0].gid);
EXPECT_EQUAL(900.0, reply->hits[0].metric);
- EXPECT_EQUAL(document::DocumentId("doc::800").getGlobalId(), reply->hits[1].gid);
+ EXPECT_EQUAL(document::DocumentId("id:ns:searchdocument::800").getGlobalId(), reply->hits[1].gid);
EXPECT_EQUAL(800.0, reply->hits[1].metric);
- EXPECT_EQUAL(document::DocumentId("doc::700").getGlobalId(), reply->hits[2].gid);
+ EXPECT_EQUAL(document::DocumentId("id:ns:searchdocument::700").getGlobalId(), reply->hits[2].gid);
EXPECT_EQUAL(700.0, reply->hits[2].metric);
EXPECT_GREATER(world.matchingStats.matchTimeAvg(), 0.0000001);
EXPECT_EQUAL(0.0, world.matchingStats.rerankTimeAvg());
@@ -478,15 +478,15 @@ TEST("require that re-ranking is performed (multi-threaded)") {
EXPECT_EQUAL(9u, world.matchingStats.docsRanked());
EXPECT_EQUAL(3u, world.matchingStats.docsReRanked());
ASSERT_TRUE(reply->hits.size() == 9u);
- EXPECT_EQUAL(document::DocumentId("doc::900").getGlobalId(), reply->hits[0].gid);
+ EXPECT_EQUAL(document::DocumentId("id:ns:searchdocument::900").getGlobalId(), reply->hits[0].gid);
EXPECT_EQUAL(1800.0, reply->hits[0].metric);
- EXPECT_EQUAL(document::DocumentId("doc::800").getGlobalId(), reply->hits[1].gid);
+ EXPECT_EQUAL(document::DocumentId("id:ns:searchdocument::800").getGlobalId(), reply->hits[1].gid);
EXPECT_EQUAL(1600.0, reply->hits[1].metric);
- EXPECT_EQUAL(document::DocumentId("doc::700").getGlobalId(), reply->hits[2].gid);
+ EXPECT_EQUAL(document::DocumentId("id:ns:searchdocument::700").getGlobalId(), reply->hits[2].gid);
EXPECT_EQUAL(1400.0, reply->hits[2].metric);
- EXPECT_EQUAL(document::DocumentId("doc::600").getGlobalId(), reply->hits[3].gid);
+ EXPECT_EQUAL(document::DocumentId("id:ns:searchdocument::600").getGlobalId(), reply->hits[3].gid);
EXPECT_EQUAL(600.0, reply->hits[3].metric);
- EXPECT_EQUAL(document::DocumentId("doc::500").getGlobalId(), reply->hits[4].gid);
+ EXPECT_EQUAL(document::DocumentId("id:ns:searchdocument::500").getGlobalId(), reply->hits[4].gid);
EXPECT_EQUAL(500.0, reply->hits[4].metric);
EXPECT_GREATER(world.matchingStats.matchTimeAvg(), 0.0000001);
EXPECT_GREATER(world.matchingStats.rerankTimeAvg(), 0.0000001);
@@ -532,15 +532,15 @@ TEST("require that re-ranking is diverse with diversity = 1/1") {
EXPECT_EQUAL(9u, world.matchingStats.docsRanked());
EXPECT_EQUAL(3u, world.matchingStats.docsReRanked());
ASSERT_TRUE(reply->hits.size() == 9u);
- EXPECT_EQUAL(document::DocumentId("doc::900").getGlobalId(), reply->hits[0].gid);
+ EXPECT_EQUAL(document::DocumentId("id:ns:searchdocument::900").getGlobalId(), reply->hits[0].gid);
EXPECT_EQUAL(1800.0, reply->hits[0].metric);
- EXPECT_EQUAL(document::DocumentId("doc::800").getGlobalId(), reply->hits[1].gid);
+ EXPECT_EQUAL(document::DocumentId("id:ns:searchdocument::800").getGlobalId(), reply->hits[1].gid);
EXPECT_EQUAL(1600.0, reply->hits[1].metric);
- EXPECT_EQUAL(document::DocumentId("doc::700").getGlobalId(), reply->hits[2].gid);
+ EXPECT_EQUAL(document::DocumentId("id:ns:searchdocument::700").getGlobalId(), reply->hits[2].gid);
EXPECT_EQUAL(1400.0, reply->hits[2].metric);
- EXPECT_EQUAL(document::DocumentId("doc::600").getGlobalId(), reply->hits[3].gid);
+ EXPECT_EQUAL(document::DocumentId("id:ns:searchdocument::600").getGlobalId(), reply->hits[3].gid);
EXPECT_EQUAL(600.0, reply->hits[3].metric);
- EXPECT_EQUAL(document::DocumentId("doc::500").getGlobalId(), reply->hits[4].gid);
+ EXPECT_EQUAL(document::DocumentId("id:ns:searchdocument::500").getGlobalId(), reply->hits[4].gid);
EXPECT_EQUAL(500.0, reply->hits[4].metric);
}
@@ -559,16 +559,16 @@ TEST("require that re-ranking is diverse with diversity = 1/10") {
EXPECT_EQUAL(9u, world.matchingStats.docsRanked());
EXPECT_EQUAL(1u, world.matchingStats.docsReRanked());
ASSERT_TRUE(reply->hits.size() == 9u);
- EXPECT_EQUAL(document::DocumentId("doc::900").getGlobalId(), reply->hits[0].gid);
+ EXPECT_EQUAL(document::DocumentId("id:ns:searchdocument::900").getGlobalId(), reply->hits[0].gid);
EXPECT_EQUAL(1800.0, reply->hits[0].metric);
//TODO This is of course incorrect until the selectBest method sees everything.
- EXPECT_EQUAL(document::DocumentId("doc::800").getGlobalId(), reply->hits[1].gid);
+ EXPECT_EQUAL(document::DocumentId("id:ns:searchdocument::800").getGlobalId(), reply->hits[1].gid);
EXPECT_EQUAL(800.0, reply->hits[1].metric);
- EXPECT_EQUAL(document::DocumentId("doc::700").getGlobalId(), reply->hits[2].gid);
+ EXPECT_EQUAL(document::DocumentId("id:ns:searchdocument::700").getGlobalId(), reply->hits[2].gid);
EXPECT_EQUAL(700.0, reply->hits[2].metric);
- EXPECT_EQUAL(document::DocumentId("doc::600").getGlobalId(), reply->hits[3].gid);
+ EXPECT_EQUAL(document::DocumentId("id:ns:searchdocument::600").getGlobalId(), reply->hits[3].gid);
EXPECT_EQUAL(600.0, reply->hits[3].metric);
- EXPECT_EQUAL(document::DocumentId("doc::500").getGlobalId(), reply->hits[4].gid);
+ EXPECT_EQUAL(document::DocumentId("id:ns:searchdocument::500").getGlobalId(), reply->hits[4].gid);
EXPECT_EQUAL(500.0, reply->hits[4].metric);
}
@@ -585,11 +585,11 @@ TEST("require that sortspec can be used (multi-threaded)") {
}
SearchReply::UP reply = world.performSearch(request, threads);
ASSERT_EQUAL(9u, reply->hits.size());
- EXPECT_EQUAL(document::DocumentId("doc::100").getGlobalId(), reply->hits[0].gid);
+ EXPECT_EQUAL(document::DocumentId("id:ns:searchdocument::100").getGlobalId(), reply->hits[0].gid);
EXPECT_EQUAL(zero_rank_value, reply->hits[0].metric);
- EXPECT_EQUAL(document::DocumentId("doc::200").getGlobalId(), reply->hits[1].gid);
+ EXPECT_EQUAL(document::DocumentId("id:ns:searchdocument::200").getGlobalId(), reply->hits[1].gid);
EXPECT_EQUAL(zero_rank_value, reply->hits[1].metric);
- EXPECT_EQUAL(document::DocumentId("doc::300").getGlobalId(), reply->hits[2].gid);
+ EXPECT_EQUAL(document::DocumentId("id:ns:searchdocument::300").getGlobalId(), reply->hits[2].gid);
EXPECT_EQUAL(zero_rank_value, reply->hits[2].metric);
EXPECT_EQUAL(drop_sort_data, reply->sortIndex.empty());
EXPECT_EQUAL(drop_sort_data, reply->sortData.empty());
@@ -911,7 +911,7 @@ TEST("require that same element search works (note that this does not test/use t
SearchRequest::SP request = world.createSameElementRequest("foo", "bar");
SearchReply::UP reply = world.performSearch(request, 1);
ASSERT_EQUAL(1u, reply->hits.size());
- EXPECT_EQUAL(document::DocumentId("doc::20").getGlobalId(), reply->hits[0].gid);
+ EXPECT_EQUAL(document::DocumentId("id:ns:searchdocument::20").getGlobalId(), reply->hits[0].gid);
}
TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchcore/src/tests/proton/server/documentretriever_test.cpp b/searchcore/src/tests/proton/server/documentretriever_test.cpp
index d3fbaebcffb..d5e40592b12 100644
--- a/searchcore/src/tests/proton/server/documentretriever_test.cpp
+++ b/searchcore/src/tests/proton/server/documentretriever_test.cpp
@@ -22,7 +22,6 @@
#include <vespa/eval/tensor/tensor.h>
#include <vespa/eval/tensor/test/test_utils.h>
#include <vespa/persistence/spi/bucket.h>
-#include <vespa/persistence/spi/result.h>
#include <vespa/persistence/spi/test.h>
#include <vespa/searchcommon/common/schema.h>
#include <vespa/searchcore/proton/documentmetastore/documentmetastorecontext.h>
@@ -121,7 +120,7 @@ const char dyn_wset_field_i[] = "dynamic int wset field";
const char dyn_wset_field_d[] = "dynamic double wset field";
const char dyn_wset_field_s[] = "dynamic string wset field";
const char dyn_wset_field_n[] = "dynamic null wset field";
-const DocumentId doc_id("doc:test:1");
+const DocumentId doc_id("id:ns:type_name::1");
const int32_t static_value = 4;
const int32_t dyn_value_i = 17;
const double dyn_value_d = 42.42;
@@ -144,8 +143,7 @@ struct MyDocumentStore : proton::test::DummyDocumentStore {
~MyDocumentStore() override;
- virtual Document::UP read(DocumentIdT lid,
- const DocumentTypeRepo &r) const override {
+ Document::UP read(DocumentIdT lid, const DocumentTypeRepo &r) const override {
if (lid == 0) {
return Document::UP();
}
@@ -489,8 +487,7 @@ TEST_F("require that position fields are regenerated from zcurves", Fixture) {
EXPECT_EQUAL(-123096000, static_cast<IntFieldValue&>(*x).getValue());
EXPECT_EQUAL(49401000, static_cast<IntFieldValue&>(*y).getValue());
- checkFieldValue<LongFieldValue>(doc->getValue(zcurve_field),
- dynamic_zcurve_value);
+ checkFieldValue<LongFieldValue>(doc->getValue(zcurve_field), dynamic_zcurve_value);
}
TEST_F("require that non-existing lid returns null pointer", Fixture) {
diff --git a/searchcore/src/tests/proton/server/feeddebugger_test.cpp b/searchcore/src/tests/proton/server/feeddebugger_test.cpp
index c54e13f4840..b5bd1cfafa8 100644
--- a/searchcore/src/tests/proton/server/feeddebugger_test.cpp
+++ b/searchcore/src/tests/proton/server/feeddebugger_test.cpp
@@ -65,18 +65,18 @@ TEST("require that setting an environment variable turns on docid-specific"
" debugging.") {
EnvSaver save_lid_env(lid_env_name);
EnvSaver save_docid_env(docid_env_name);
- setenv(docid_env_name, "doc:test:foo,doc:test:bar,doc:test:baz", true);
+ setenv(docid_env_name, "id:ns:type::test:foo,id:ns:type::test:bar,id:ns:type::test:baz", true);
FeedDebugger debugger;
EXPECT_TRUE(debugger.isDebugging());
EXPECT_EQUAL(ns_log::Logger::info,
- debugger.getDebugLevel(1, DocumentId("doc:test:foo")));
+ debugger.getDebugLevel(1, DocumentId("id:ns:type::test:foo")));
EXPECT_EQUAL(ns_log::Logger::info,
- debugger.getDebugLevel(1, DocumentId("doc:test:bar")));
+ debugger.getDebugLevel(1, DocumentId("id:ns:type::test:bar")));
EXPECT_EQUAL(ns_log::Logger::info,
- debugger.getDebugLevel(1, DocumentId("doc:test:baz")));
+ debugger.getDebugLevel(1, DocumentId("id:ns:type::test:baz")));
EXPECT_EQUAL(ns_log::Logger::spam,
- debugger.getDebugLevel(1, DocumentId("doc:test:qux")));
+ debugger.getDebugLevel(1, DocumentId("id:ns:type::test:qux")));
}
} // namespace
diff --git a/searchcore/src/tests/proton/server/feedstates_test.cpp b/searchcore/src/tests/proton/server/feedstates_test.cpp
index f206ffc9b17..96096c0401f 100644
--- a/searchcore/src/tests/proton/server/feedstates_test.cpp
+++ b/searchcore/src/tests/proton/server/feedstates_test.cpp
@@ -100,7 +100,7 @@ struct RemoveOperationContext
};
RemoveOperationContext::RemoveOperationContext(search::SerialNum serial)
- : doc_id("doc:foo:bar"),
+ : doc_id("id:ns:doctypename::bar"),
op(BucketFactory::getBucketId(doc_id), Timestamp(10), doc_id),
str(), packet()
{
diff --git a/searchcore/src/vespa/searchcore/proton/common/feeddebugger.h b/searchcore/src/vespa/searchcore/proton/common/feeddebugger.h
index 3b02c0f2b76..5c582157174 100644
--- a/searchcore/src/vespa/searchcore/proton/common/feeddebugger.h
+++ b/searchcore/src/vespa/searchcore/proton/common/feeddebugger.h
@@ -27,7 +27,7 @@ private:
ns_log::Logger::LogLevel getDebugDebuggerInternal(uint32_t lid, const document::DocumentId * docid) const;
bool _enableDebugging;
std::vector<uint32_t> _debugLidList; // List of lids to dump when feeding/replaying log.
- std::vector<document::DocumentId> _debugDocIdList; // List of docids("doc:bla:blu" to dump when feeding/replaying log.
+ std::vector<document::DocumentId> _debugDocIdList; // List of docids("id:ns:doctype::xyz" to dump when feeding/replaying log.
};
} // namespace proton
diff --git a/searchcore/src/vespa/searchcore/proton/server/documentretrieverbase.cpp b/searchcore/src/vespa/searchcore/proton/server/documentretrieverbase.cpp
index d06319ae7f9..65a4f7e7c4a 100644
--- a/searchcore/src/vespa/searchcore/proton/server/documentretrieverbase.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/documentretrieverbase.cpp
@@ -4,16 +4,11 @@
#include <vespa/document/repo/documenttyperepo.h>
#include <vespa/document/datatype/documenttype.h>
#include <vespa/vespalib/stllike/lrucache_map.hpp>
+#include <vespa/vespalib/util/stringfmt.h>
using document::DocumentId;
using document::GlobalId;
-namespace {
-
-const DocumentId docId("doc:test:1");
-
-}
-
namespace proton {
DocumentRetrieverBase::DocumentRetrieverBase(
@@ -30,13 +25,12 @@ DocumentRetrieverBase::DocumentRetrieverBase(
_emptyDoc(),
_hasFields(hasFields)
{
- const document::DocumentType *
- docType(_repo.getDocumentType(_docTypeName.getName()));
- _emptyDoc.reset(new document::Document(*docType, docId));
+ const document::DocumentType * docType(_repo.getDocumentType(_docTypeName.getName()));
+ _emptyDoc.reset(new document::Document(*docType, DocumentId("id:empty:" + _docTypeName.getName() + "::empty")));
_emptyDoc->setRepo(_repo);
}
-DocumentRetrieverBase::~DocumentRetrieverBase() { }
+DocumentRetrieverBase::~DocumentRetrieverBase() = default;
const document::DocumentTypeRepo &
DocumentRetrieverBase::getDocumentTypeRepo() const {
diff --git a/searchlib/src/tests/diskindex/fusion/fusion_test.cpp b/searchlib/src/tests/diskindex/fusion/fusion_test.cpp
index b77df846ebb..1825c00ceda 100644
--- a/searchlib/src/tests/diskindex/fusion/fusion_test.cpp
+++ b/searchlib/src/tests/diskindex/fusion/fusion_test.cpp
@@ -100,7 +100,7 @@ toString(FieldPositionsIterator posItr, bool hasElements = false, bool hasWeight
std::unique_ptr<Document>
make_doc10(DocBuilder &b)
{
- b.startDocument("doc::10");
+ b.startDocument("id:ns:searchdocument::10");
b.startIndexField("f0").
addStr("a").addStr("b").addStr("c").addStr("d").
addStr("e").addStr("f").addStr("z").
@@ -325,7 +325,7 @@ FusionTest::requireThatFusionIsWorking(const vespalib::string &prefix, bool dire
myPushDocument(inv);
pushThreads.sync();
- b.startDocument("doc::11").
+ b.startDocument("id:ns:searchdocument::11").
startIndexField("f3").
startElement(-27).addStr("zz").endElement().
endField();
@@ -335,7 +335,7 @@ FusionTest::requireThatFusionIsWorking(const vespalib::string &prefix, bool dire
myPushDocument(inv);
pushThreads.sync();
- b.startDocument("doc::12").
+ b.startDocument("id:ns:searchdocument::12").
startIndexField("f3").
startElement(0).addStr("zz0").endElement().
endField();
diff --git a/searchlib/src/tests/engine/transportserver/transportserver_test.cpp b/searchlib/src/tests/engine/transportserver/transportserver_test.cpp
index a15e80da0a4..baa581c65f9 100644
--- a/searchlib/src/tests/engine/transportserver/transportserver_test.cpp
+++ b/searchlib/src/tests/engine/transportserver/transportserver_test.cpp
@@ -64,7 +64,7 @@ SyncServer::getDocsums(DocsumRequest::Source request, DocsumClient &)
LOG(info, "responding to docsum request...");
ret.docsums.resize(1);
ret.docsums[0].setData("data", strlen("data"));
- ret.docsums[0].gid = DocumentId(vespalib::make_string("doc::100")).getGlobalId();
+ ret.docsums[0].gid = DocumentId(vespalib::make_string("id:ns:type::100")).getGlobalId();
return reply;
}
@@ -145,7 +145,7 @@ TEST("transportserver") {
ASSERT_TRUE(p != 0);
ASSERT_TRUE(p->GetPCODE() == PCODE_DOCSUM);
FS4Packet_DOCSUM *r = (FS4Packet_DOCSUM*)p;
- EXPECT_EQUAL(r->getGid(), DocumentId("doc::100").getGlobalId());
+ EXPECT_EQUAL(r->getGid(), DocumentId("id:ns:type::100").getGlobalId());
p->Free();
p = q.DequeuePacket(60000, &ctx);
ASSERT_TRUE(p != 0);
diff --git a/searchlib/src/tests/index/docbuilder/docbuilder_test.cpp b/searchlib/src/tests/index/docbuilder/docbuilder_test.cpp
index 019c7096877..f880510647d 100644
--- a/searchlib/src/tests/index/docbuilder/docbuilder_test.cpp
+++ b/searchlib/src/tests/index/docbuilder/docbuilder_test.cpp
@@ -77,11 +77,11 @@ Test::testBuilder()
std::string xml;
{ // empty
- doc = b.startDocument("doc::0").endDocument();
+ doc = b.startDocument("id:ns:searchdocument::0").endDocument();
xml = doc->toXml("");
boost::split(lines, xml, boost::is_any_of("\n"));
itr = lines.begin();
- EXPECT_EQUAL("<document documenttype=\"searchdocument\" documentid=\"doc::0\"/>", *itr++);
+ EXPECT_EQUAL("<document documenttype=\"searchdocument\" documentid=\"id:ns:searchdocument::0\"/>", *itr++);
EXPECT_EQUAL("", *itr++);
EXPECT_TRUE(itr == lines.end());
}
@@ -105,7 +105,7 @@ Test::testBuilder()
&binaryBlob[0] + binaryBlob.size());
raw1w1 += std::string(&binaryBlob[0],
&binaryBlob[0] + binaryBlob.size());
- b.startDocument("doc::1");
+ b.startDocument("id:ns:searchdocument::1");
b.startIndexField("ia").addStr("foo").addStr("bar").addStr("baz").addTermAnnotation("altbaz").endField();
b.startIndexField("ib").startElement().addStr("foo").endElement().
startElement(1).addStr("bar").addStr("baz").endElement().endField();
@@ -289,7 +289,7 @@ Test::testBuilder()
xml = doc->toXml("");
boost::split(lines, xml, boost::is_any_of("\n"));
itr = lines.begin();
- EXPECT_EQUAL("<document documenttype=\"searchdocument\" documentid=\"doc::1\">", *itr++);
+ EXPECT_EQUAL("<document documenttype=\"searchdocument\" documentid=\"id:ns:searchdocument::1\">", *itr++);
EXPECT_EQUAL("<sj>", *itr++);
EXPECT_EQUAL(empty +"<item weight=\"46\" binaryencoding=\"base64\">" +
vespalib::Base64::encode(raw1w1) +
@@ -425,7 +425,7 @@ Test::testBuilder()
#endif
}
{ // create one more to see that everything is cleared
- b.startDocument("doc::2");
+ b.startDocument("id:ns:searchdocument::2");
b.startIndexField("ia").addStr("yes").endField();
b.startAttributeField("aa").addInt(20).endField();
b.startSummaryField("sa").addInt(10).endField();
@@ -433,7 +433,7 @@ Test::testBuilder()
xml = doc->toXml("");
boost::split(lines, xml, boost::is_any_of("\n"));
itr = lines.begin();
- EXPECT_EQUAL("<document documenttype=\"searchdocument\" documentid=\"doc::2\">", *itr++);
+ EXPECT_EQUAL("<document documenttype=\"searchdocument\" documentid=\"id:ns:searchdocument::2\">", *itr++);
EXPECT_EQUAL("<sa>10</sa>", *itr++);
EXPECT_EQUAL("<aa>20</aa>", *itr++);
EXPECT_EQUAL("<ia>yes</ia>", *itr++);
@@ -441,7 +441,7 @@ Test::testBuilder()
EXPECT_TRUE(itr == lines.end());
}
{ // create field with cjk chars
- b.startDocument("doc::3");
+ b.startDocument("id:ns:searchdocument::3");
b.startIndexField("ia").
addStr("我就是那个").
setAutoSpace(false).
@@ -452,7 +452,7 @@ Test::testBuilder()
xml = doc->toXml("");
boost::split(lines, xml, boost::is_any_of("\n"));
itr = lines.begin();
- EXPECT_EQUAL("<document documenttype=\"searchdocument\" documentid=\"doc::3\">", *itr++);
+ EXPECT_EQUAL("<document documenttype=\"searchdocument\" documentid=\"id:ns:searchdocument::3\">", *itr++);
EXPECT_EQUAL("<ia>我就是那个大灰狼</ia>", *itr++);
EXPECT_EQUAL("</document>", *itr++);
EXPECT_TRUE(itr == lines.end());
diff --git a/searchlib/src/tests/memoryindex/document_inverter/document_inverter_test.cpp b/searchlib/src/tests/memoryindex/document_inverter/document_inverter_test.cpp
index 38862dfe94b..3f798df3c05 100644
--- a/searchlib/src/tests/memoryindex/document_inverter/document_inverter_test.cpp
+++ b/searchlib/src/tests/memoryindex/document_inverter/document_inverter_test.cpp
@@ -28,7 +28,7 @@ namespace {
Document::UP
makeDoc10(DocBuilder &b)
{
- b.startDocument("doc::10");
+ b.startDocument("id:ns:searchdocument::10");
b.startIndexField("f0").
addStr("a").addStr("b").addStr("c").addStr("d").
endField();
@@ -38,7 +38,7 @@ makeDoc10(DocBuilder &b)
Document::UP
makeDoc11(DocBuilder &b)
{
- b.startDocument("doc::11");
+ b.startDocument("id:ns:searchdocument::11");
b.startIndexField("f0").
addStr("a").addStr("b").addStr("e").addStr("f").
endField();
@@ -51,7 +51,7 @@ makeDoc11(DocBuilder &b)
Document::UP
makeDoc12(DocBuilder &b)
{
- b.startDocument("doc::12");
+ b.startDocument("id:ns:searchdocument::12");
b.startIndexField("f0").
addStr("h").addStr("doc12").
endField();
@@ -61,7 +61,7 @@ makeDoc12(DocBuilder &b)
Document::UP
makeDoc13(DocBuilder &b)
{
- b.startDocument("doc::13");
+ b.startDocument("id:ns:searchdocument::13");
b.startIndexField("f0").
addStr("i").addStr("doc13").
endField();
@@ -71,7 +71,7 @@ makeDoc13(DocBuilder &b)
Document::UP
makeDoc14(DocBuilder &b)
{
- b.startDocument("doc::14");
+ b.startDocument("id:ns:searchdocument::14");
b.startIndexField("f0").
addStr("j").addStr("doc14").
endField();
@@ -81,7 +81,7 @@ makeDoc14(DocBuilder &b)
Document::UP
makeDoc15(DocBuilder &b)
{
- b.startDocument("doc::15");
+ b.startDocument("id:ns:searchdocument::15");
return b.endDocument();
}
diff --git a/searchlib/src/tests/memoryindex/field_index/field_index_test.cpp b/searchlib/src/tests/memoryindex/field_index/field_index_test.cpp
index ac1735e0549..512e1bd2051 100644
--- a/searchlib/src/tests/memoryindex/field_index/field_index_test.cpp
+++ b/searchlib/src/tests/memoryindex/field_index/field_index_test.cpp
@@ -937,7 +937,7 @@ TEST_F(BasicInverterTest, require_that_inversion_is_working)
{
Document::UP doc;
- _b.startDocument("doc::10");
+ _b.startDocument("id:ns:searchdocument::10");
_b.startIndexField("f0").
addStr("a").addStr("b").addStr("c").addStr("d").
endField();
@@ -947,7 +947,7 @@ TEST_F(BasicInverterTest, require_that_inversion_is_working)
myPushDocument(_inv);
_pushThreads.sync();
- _b.startDocument("doc::20");
+ _b.startDocument("id:ns:searchdocument::20");
_b.startIndexField("f0").
addStr("a").addStr("a").addStr("b").addStr("c").addStr("d").
endField();
@@ -957,7 +957,7 @@ TEST_F(BasicInverterTest, require_that_inversion_is_working)
myPushDocument(_inv);
_pushThreads.sync();
- _b.startDocument("doc::30");
+ _b.startDocument("id:ns:searchdocument::30");
_b.startIndexField("f0").
addStr("a").addStr("b").addStr("c").addStr("d").
addStr("e").addStr("f").
@@ -988,7 +988,7 @@ TEST_F(BasicInverterTest, require_that_inversion_is_working)
myPushDocument(_inv);
_pushThreads.sync();
- _b.startDocument("doc::40");
+ _b.startDocument("id:ns:searchdocument::40");
_b.startIndexField("f0").
addStr("a").addStr("a").addStr("b").addStr("c").addStr("a").
addStr("e").addStr("f").
@@ -999,7 +999,7 @@ TEST_F(BasicInverterTest, require_that_inversion_is_working)
myPushDocument(_inv);
_pushThreads.sync();
- _b.startDocument("doc::999");
+ _b.startDocument("id:ns:searchdocument::999");
_b.startIndexField("f0").
addStr("this").addStr("is").addStr("_a_").addStr("test").
addStr("for").addStr("insertion").addStr("speed").addStr("with").
@@ -1137,7 +1137,7 @@ TEST_F(BasicInverterTest, require_that_inverter_handles_remove_via_document_remo
{
Document::UP doc;
- _b.startDocument("doc::1");
+ _b.startDocument("id:ns:searchdocument::1");
_b.startIndexField("f0").addStr("a").addStr("b").endField();
_b.startIndexField("f1").addStr("a").addStr("c").endField();
Document::UP doc1 = _b.endDocument();
@@ -1146,7 +1146,7 @@ TEST_F(BasicInverterTest, require_that_inverter_handles_remove_via_document_remo
myPushDocument(_inv);
_pushThreads.sync();
- _b.startDocument("doc::2");
+ _b.startDocument("id:ns:searchdocument::2");
_b.startIndexField("f0").addStr("b").addStr("c").endField();
Document::UP doc2 = _b.endDocument();
_inv.invertDocument(2, *doc2.get());
@@ -1189,7 +1189,7 @@ TEST_F(UriInverterTest, require_that_uri_indexing_is_working)
{
Document::UP doc;
- _b.startDocument("doc::10");
+ _b.startDocument("id:ns:searchdocument::10");
_b.startIndexField("iu").
startSubField("all").
addUrlTokenizedString("http://www.example.com:81/fluke?ab=2#4").
@@ -1378,7 +1378,7 @@ TEST_F(CjkInverterTest, require_that_cjk_indexing_is_working)
{
Document::UP doc;
- _b.startDocument("doc::10");
+ _b.startDocument("id:ns:searchdocument::10");
_b.startIndexField("f0").
addStr("我就是那个").
setAutoSpace(false).
diff --git a/searchlib/src/tests/memoryindex/field_inverter/field_inverter_test.cpp b/searchlib/src/tests/memoryindex/field_inverter/field_inverter_test.cpp
index 72a8f6ed239..d3a286b3c1b 100644
--- a/searchlib/src/tests/memoryindex/field_inverter/field_inverter_test.cpp
+++ b/searchlib/src/tests/memoryindex/field_inverter/field_inverter_test.cpp
@@ -26,7 +26,7 @@ namespace {
Document::UP
makeDoc10(DocBuilder &b)
{
- b.startDocument("doc::10");
+ b.startDocument("id:ns:searchdocument::10");
b.startIndexField("f0").
addStr("a").addStr("b").addStr("c").addStr("d").
endField();
@@ -36,7 +36,7 @@ makeDoc10(DocBuilder &b)
Document::UP
makeDoc11(DocBuilder &b)
{
- b.startDocument("doc::11");
+ b.startDocument("id:ns:searchdocument::11");
b.startIndexField("f0").
addStr("a").addStr("b").addStr("e").addStr("f").
endField();
@@ -49,7 +49,7 @@ makeDoc11(DocBuilder &b)
Document::UP
makeDoc12(DocBuilder &b)
{
- b.startDocument("doc::12");
+ b.startDocument("id:ns:searchdocument::12");
b.startIndexField("f0").
addStr("h").addStr("doc12").
endField();
@@ -59,7 +59,7 @@ makeDoc12(DocBuilder &b)
Document::UP
makeDoc13(DocBuilder &b)
{
- b.startDocument("doc::13");
+ b.startDocument("id:ns:searchdocument::13");
b.startIndexField("f0").
addStr("i").addStr("doc13").
endField();
@@ -69,7 +69,7 @@ makeDoc13(DocBuilder &b)
Document::UP
makeDoc14(DocBuilder &b)
{
- b.startDocument("doc::14");
+ b.startDocument("id:ns:searchdocument::14");
b.startIndexField("f0").
addStr("j").addStr("doc14").
endField();
@@ -79,14 +79,14 @@ makeDoc14(DocBuilder &b)
Document::UP
makeDoc15(DocBuilder &b)
{
- b.startDocument("doc::15");
+ b.startDocument("id:ns:searchdocument::15");
return b.endDocument();
}
Document::UP
makeDoc16(DocBuilder &b)
{
- b.startDocument("doc::16");
+ b.startDocument("id:ns:searchdocument::16");
b.startIndexField("f0").addStr("foo").addStr("bar").addStr("baz").
addTermAnnotation("altbaz").addStr("y").addTermAnnotation("alty").
addStr("z").endField();
@@ -96,7 +96,7 @@ makeDoc16(DocBuilder &b)
Document::UP
makeDoc17(DocBuilder &b)
{
- b.startDocument("doc::17");
+ b.startDocument("id:ns:searchdocument::17");
b.startIndexField("f1").addStr("foo0").addStr("bar0").endField();
b.startIndexField("f2").startElement(1).addStr("foo").addStr("bar").endElement().startElement(1).addStr("bar").endElement().endField();
b.startIndexField("f3").startElement(3).addStr("foo2").addStr("bar2").endElement().startElement(4).addStr("bar2").endElement().endField();
diff --git a/searchlib/src/tests/memoryindex/memory_index/memory_index_test.cpp b/searchlib/src/tests/memoryindex/memory_index/memory_index_test.cpp
index dd4bb2cef7f..a320c4a0641 100644
--- a/searchlib/src/tests/memoryindex/memory_index/memory_index_test.cpp
+++ b/searchlib/src/tests/memoryindex/memory_index/memory_index_test.cpp
@@ -81,7 +81,7 @@ struct Index {
}
Index &doc(uint32_t id) {
docid = id;
- builder.startDocument(vespalib::make_string("doc::%u", id));
+ builder.startDocument(vespalib::make_string("id:ns:searchdocument::%u", id));
return *this;
}
Index &field(const std::string &name) {
diff --git a/searchlib/src/tests/memoryindex/url_field_inverter/url_field_inverter_test.cpp b/searchlib/src/tests/memoryindex/url_field_inverter/url_field_inverter_test.cpp
index 2151a44a66d..86c58c11c09 100644
--- a/searchlib/src/tests/memoryindex/url_field_inverter/url_field_inverter_test.cpp
+++ b/searchlib/src/tests/memoryindex/url_field_inverter/url_field_inverter_test.cpp
@@ -27,7 +27,7 @@ const vespalib::string url = "url";
Document::UP
makeDoc10Single(DocBuilder &b)
{
- b.startDocument("doc::10");
+ b.startDocument("id:ns:searchdocument::10");
b.startIndexField("url").
startSubField("all").
addUrlTokenizedString("http://www.example.com:81/fluke?ab=2#4").
@@ -58,7 +58,7 @@ makeDoc10Single(DocBuilder &b)
Document::UP
makeDoc10Array(DocBuilder &b)
{
- b.startDocument("doc::10");
+ b.startDocument("id:ns:searchdocument::10");
b.startIndexField("url").
startElement(1).
startSubField("all").
@@ -114,7 +114,7 @@ makeDoc10Array(DocBuilder &b)
Document::UP
makeDoc10WeightedSet(DocBuilder &b)
{
- b.startDocument("doc::10");
+ b.startDocument("id:ns:searchdocument::10");
b.startIndexField("url").
startElement(4).
startSubField("all").
@@ -170,7 +170,7 @@ makeDoc10WeightedSet(DocBuilder &b)
Document::UP
makeDoc10Empty(DocBuilder &b)
{
- b.startDocument("doc::10");
+ b.startDocument("id:ns:searchdocument::10");
return b.endDocument();
}
diff --git a/storage/src/tests/distributor/pendingmessagetrackertest.cpp b/storage/src/tests/distributor/pendingmessagetrackertest.cpp
index a4a883d7059..e1bca1a1890 100644
--- a/storage/src/tests/distributor/pendingmessagetrackertest.cpp
+++ b/storage/src/tests/distributor/pendingmessagetrackertest.cpp
@@ -173,7 +173,7 @@ TEST_F(PendingMessageTrackerTest, simple) {
std::ostringstream ost;
tracker.reportStatus(ost, framework::HttpUrlPath("/pendingmessages?order=bucket"));
- EXPECT_THAT(ost.str(), Not(HasSubstr("doc:")));
+ EXPECT_THAT(ost.str(), Not(HasSubstr("id:")));
}
}
diff --git a/storage/src/tests/distributor/putoperationtest.cpp b/storage/src/tests/distributor/putoperationtest.cpp
index 99c6ec3d71e..d882d17841e 100644
--- a/storage/src/tests/distributor/putoperationtest.cpp
+++ b/storage/src/tests/distributor/putoperationtest.cpp
@@ -85,7 +85,7 @@ public:
}
Document::SP createDummyDocument(const char* ns, const char* id) const {
- return std::make_shared<Document>(doc_type(), DocumentId(DocIdString(ns, id)));
+ return std::make_shared<Document>(doc_type(), DocumentId(vespalib::make_string("id:%s:testdoctype1::%s", ns, id)));
}
std::shared_ptr<api::PutCommand> createPut(Document::SP doc) const {
@@ -97,7 +97,7 @@ PutOperationTest::~PutOperationTest() = default;
document::BucketId
PutOperationTest::createAndSendSampleDocument(uint32_t timeout) {
- auto doc = std::make_shared<Document>(doc_type(), DocumentId(DocIdString("test", "test")));
+ auto doc = std::make_shared<Document>(doc_type(), DocumentId("id:test:testdoctype1::"));
document::BucketId id = getExternalOperationHandler().getBucketId(doc->getId());
addIdealNodes(id);
@@ -123,13 +123,13 @@ TEST_F(PutOperationTest, simple) {
setupDistributor(1, 1, "storage:1 distributor:1");
createAndSendSampleDocument(180);
- ASSERT_EQ("Put(BucketId(0x4000000000008b13), "
- "doc:test:test, timestamp 100, size 36) => 0",
+ ASSERT_EQ("Put(BucketId(0x4000000000001dd4), "
+ "id:test:testdoctype1::, timestamp 100, size 45) => 0",
_sender.getCommands(true, true));
sendReply();
- ASSERT_EQ("PutReply(doc:test:test, BucketId(0x0000000000000000), "
+ ASSERT_EQ("PutReply(id:test:testdoctype1::, BucketId(0x0000000000000000), "
"timestamp 100) ReturnCode(NONE)",
_sender.getLastReply());
}
@@ -141,7 +141,7 @@ TEST_F(PutOperationTest, bucket_database_gets_special_entry_when_CreateBucket_se
sendPut(createPut(doc));
// Database updated before CreateBucket is sent
- ASSERT_EQ("BucketId(0x4000000000008b13) : "
+ ASSERT_EQ("BucketId(0x4000000000008f09) : "
"node(idx=0,crc=0x1,docs=0/0,bytes=0/0,trusted=true,active=true,ready=false)",
dumpBucket(getExternalOperationHandler().getBucketId(doc->getId())));
@@ -153,16 +153,16 @@ TEST_F(PutOperationTest, send_inline_split_before_put_if_bucket_too_large) {
getConfig().setSplitCount(1024);
getConfig().setSplitSize(1000000);
- addNodesToBucketDB(document::BucketId(0x4000000000002a52), "0=10000/10000/10000/t");
+ addNodesToBucketDB(document::BucketId(0x4000000000000593), "0=10000/10000/10000/t");
sendPut(createPut(createDummyDocument("test", "uri")));
- ASSERT_EQ("SplitBucketCommand(BucketId(0x4000000000002a52)Max doc count: "
+ ASSERT_EQ("SplitBucketCommand(BucketId(0x4000000000000593)Max doc count: "
"1024, Max total doc size: 1000000) Reasons to start: "
"[Splitting bucket because its maximum size (10000 b, 10000 docs, 10000 meta, 10000 b total) is "
"higher than the configured limit of (1000000, 1024)] => 0,"
- "Put(BucketId(0x4000000000002a52), doc:test:uri, timestamp 100, "
- "size 35) => 0",
+ "Put(BucketId(0x4000000000000593), id:test:testdoctype1::uri, timestamp 100, "
+ "size 48) => 0",
_sender.getCommands(true, true));
}
@@ -171,12 +171,12 @@ TEST_F(PutOperationTest, do_not_send_inline_split_if_not_configured) {
getConfig().setSplitCount(1024);
getConfig().setDoInlineSplit(false);
- addNodesToBucketDB(document::BucketId(0x4000000000002a52), "0=10000/10000/10000/t");
+ addNodesToBucketDB(document::BucketId(0x4000000000000593), "0=10000/10000/10000/t");
sendPut(createPut(createDummyDocument("test", "uri")));
- ASSERT_EQ("Put(BucketId(0x4000000000002a52), doc:test:uri, timestamp 100, "
- "size 35) => 0",
+ ASSERT_EQ("Put(BucketId(0x4000000000000593), id:test:testdoctype1::uri, timestamp 100, "
+ "size 48) => 0",
_sender.getCommands(true, true));
}
@@ -184,22 +184,22 @@ TEST_F(PutOperationTest, node_removed_on_reply) {
setupDistributor(2, 2, "storage:2 distributor:1");
createAndSendSampleDocument(180);
- ASSERT_EQ("Put(BucketId(0x4000000000008b13), "
- "doc:test:test, timestamp 100, size 36) => 1,"
- "Put(BucketId(0x4000000000008b13), "
- "doc:test:test, timestamp 100, size 36) => 0",
+ ASSERT_EQ("Put(BucketId(0x4000000000001dd4), "
+ "id:test:testdoctype1::, timestamp 100, size 45) => 0,"
+ "Put(BucketId(0x4000000000001dd4), "
+ "id:test:testdoctype1::, timestamp 100, size 45) => 1",
_sender.getCommands(true, true));
- getExternalOperationHandler().removeNodeFromDB(makeDocumentBucket(document::BucketId(16, 0x8b13)), 0);
+ getExternalOperationHandler().removeNodeFromDB(makeDocumentBucket(document::BucketId(16, 0x1dd4)), 0);
sendReply(0);
sendReply(1);
- ASSERT_EQ("PutReply(doc:test:test, BucketId(0x0000000000000000), "
+ ASSERT_EQ("PutReply(id:test:testdoctype1::, BucketId(0x0000000000000000), "
"timestamp 100) ReturnCode(BUCKET_DELETED, "
- "Bucket(BucketSpace(0x0000000000000001), BucketId(0x4000000000008b13)) was deleted from nodes [0] "
+ "Bucket(BucketSpace(0x0000000000000001), BucketId(0x4000000000001dd4)) was deleted from nodes [0] "
"after message was sent but before it was done. "
- "Sent to [1,0])",
+ "Sent to [0,1])",
_sender.getLastReply());
}
@@ -210,7 +210,7 @@ TEST_F(PutOperationTest, storage_failed) {
sendReply(-1, api::ReturnCode::INTERNAL_FAILURE);
- ASSERT_EQ("PutReply(doc:test:test, BucketId(0x0000000000000000), "
+ ASSERT_EQ("PutReply(id:test:testdoctype1::, BucketId(0x0000000000000000), "
"timestamp 100) ReturnCode(INTERNAL_FAILURE)",
_sender.getLastReply(true));
}
@@ -221,22 +221,22 @@ TEST_F(PutOperationTest, multiple_copies) {
Document::SP doc(createDummyDocument("test", "test"));
sendPut(createPut(doc));
- ASSERT_EQ("Create bucket => 3,Create bucket => 1,"
- "Create bucket => 0,Put => 3,Put => 1,Put => 0",
+ ASSERT_EQ("Create bucket => 3,Create bucket => 2,"
+ "Create bucket => 1,Put => 3,Put => 2,Put => 1",
_sender.getCommands(true));
for (uint32_t i = 0; i < 6; i++) {
sendReply(i);
}
- ASSERT_EQ("PutReply(doc:test:test, BucketId(0x0000000000000000), "
+ ASSERT_EQ("PutReply(id:test:testdoctype1::test, BucketId(0x0000000000000000), "
"timestamp 100) ReturnCode(NONE)",
_sender.getLastReply(true));
- ASSERT_EQ("BucketId(0x4000000000008b13) : "
+ ASSERT_EQ("BucketId(0x4000000000008f09) : "
"node(idx=3,crc=0x1,docs=2/4,bytes=3/5,trusted=true,active=false,ready=false), "
- "node(idx=1,crc=0x1,docs=2/4,bytes=3/5,trusted=true,active=false,ready=false), "
- "node(idx=0,crc=0x1,docs=2/4,bytes=3/5,trusted=true,active=false,ready=false)",
+ "node(idx=2,crc=0x1,docs=2/4,bytes=3/5,trusted=true,active=false,ready=false), "
+ "node(idx=1,crc=0x1,docs=2/4,bytes=3/5,trusted=true,active=false,ready=false)",
dumpBucket(getExternalOperationHandler().getBucketId(doc->getId())));
}
@@ -245,8 +245,8 @@ TEST_F(PutOperationTest, multiple_copies_early_return_primary_required) {
sendPut(createPut(createDummyDocument("test", "test")));
- ASSERT_EQ("Create bucket => 3,Create bucket => 1,"
- "Create bucket => 0,Put => 3,Put => 1,Put => 0",
+ ASSERT_EQ("Create bucket => 3,Create bucket => 2,"
+ "Create bucket => 1,Put => 3,Put => 2,Put => 1",
_sender.getCommands(true));
// Reply to 2 CreateBucket, including primary
@@ -258,7 +258,7 @@ TEST_F(PutOperationTest, multiple_copies_early_return_primary_required) {
sendReply(3 + i);
}
- ASSERT_EQ("PutReply(doc:test:test, BucketId(0x0000000000000000), "
+ ASSERT_EQ("PutReply(id:test:testdoctype1::test, BucketId(0x0000000000000000), "
"timestamp 100) ReturnCode(NONE)",
_sender.getLastReply());
}
@@ -268,8 +268,8 @@ TEST_F(PutOperationTest, multiple_copies_early_return_primary_not_required) {
sendPut(createPut(createDummyDocument("test", "test")));
- ASSERT_EQ("Create bucket => 3,Create bucket => 1,"
- "Create bucket => 0,Put => 3,Put => 1,Put => 0",
+ ASSERT_EQ("Create bucket => 3,Create bucket => 2,"
+ "Create bucket => 1,Put => 3,Put => 2,Put => 1",
_sender.getCommands(true));
// Reply only to 2 nodes (but not the primary)
@@ -280,7 +280,7 @@ TEST_F(PutOperationTest, multiple_copies_early_return_primary_not_required) {
sendReply(3 + i); // Put
}
- ASSERT_EQ("PutReply(doc:test:test, BucketId(0x0000000000000000), "
+ ASSERT_EQ("PutReply(id:test:testdoctype1::test, BucketId(0x0000000000000000), "
"timestamp 100) ReturnCode(NONE)",
_sender.getLastReply());
}
@@ -290,8 +290,8 @@ TEST_F(PutOperationTest, multiple_copies_early_return_primary_required_not_done)
sendPut(createPut(createDummyDocument("test", "test")));
- ASSERT_EQ("Create bucket => 3,Create bucket => 1,"
- "Create bucket => 0,Put => 3,Put => 1,Put => 0",
+ ASSERT_EQ("Create bucket => 3,Create bucket => 2,"
+ "Create bucket => 1,Put => 3,Put => 2,Put => 1",
_sender.getCommands(true));
// Reply only to 2 nodes (but not the primary)
@@ -309,8 +309,8 @@ TEST_F(PutOperationTest, do_not_revert_on_failure_after_early_return) {
sendPut(createPut(createDummyDocument("test", "test")));
- ASSERT_EQ("Create bucket => 3,Create bucket => 1,"
- "Create bucket => 0,Put => 3,Put => 1,Put => 0",
+ ASSERT_EQ("Create bucket => 3,Create bucket => 2,"
+ "Create bucket => 1,Put => 3,Put => 2,Put => 1",
_sender.getCommands(true));
for (uint32_t i = 0; i < 3; i++) {
@@ -320,14 +320,14 @@ TEST_F(PutOperationTest, do_not_revert_on_failure_after_early_return) {
sendReply(3 + i); // Put
}
- ASSERT_EQ("PutReply(doc:test:test, BucketId(0x0000000000000000), "
+ ASSERT_EQ("PutReply(id:test:testdoctype1::test, BucketId(0x0000000000000000), "
"timestamp 100) ReturnCode(NONE)",
_sender.getLastReply());
sendReply(5, api::ReturnCode::INTERNAL_FAILURE);
// Should not be any revert commands sent
- ASSERT_EQ("Create bucket => 3,Create bucket => 1,"
- "Create bucket => 0,Put => 3,Put => 1,Put => 0",
+ ASSERT_EQ("Create bucket => 3,Create bucket => 2,"
+ "Create bucket => 1,Put => 3,Put => 2,Put => 1",
_sender.getCommands(true));
}
@@ -336,7 +336,7 @@ TEST_F(PutOperationTest, revert_successful_copies_when_one_fails) {
createAndSendSampleDocument(180);
- ASSERT_EQ("Put => 3,Put => 1,Put => 0", _sender.getCommands(true));
+ ASSERT_EQ("Put => 0,Put => 2,Put => 1", _sender.getCommands(true));
for (uint32_t i = 0; i < 2; i++) {
sendReply(i);
@@ -344,12 +344,12 @@ TEST_F(PutOperationTest, revert_successful_copies_when_one_fails) {
sendReply(2, api::ReturnCode::INTERNAL_FAILURE);
- ASSERT_EQ("PutReply(doc:test:test, "
+ ASSERT_EQ("PutReply(id:test:testdoctype1::, "
"BucketId(0x0000000000000000), timestamp 100) "
"ReturnCode(INTERNAL_FAILURE)",
_sender.getLastReply(true));
- ASSERT_EQ("Revert => 3,Revert => 1", _sender.getCommands(true, false, 3));
+ ASSERT_EQ("Revert => 0,Revert => 2", _sender.getCommands(true, false, 3));
}
TEST_F(PutOperationTest, no_revert_if_revert_disabled) {
@@ -361,7 +361,7 @@ TEST_F(PutOperationTest, no_revert_if_revert_disabled) {
createAndSendSampleDocument(180);
- ASSERT_EQ("Put => 3,Put => 1,Put => 0", _sender.getCommands(true));
+ ASSERT_EQ("Put => 0,Put => 2,Put => 1", _sender.getCommands(true));
for (uint32_t i = 0; i < 2; i++) {
sendReply(i);
@@ -369,7 +369,7 @@ TEST_F(PutOperationTest, no_revert_if_revert_disabled) {
sendReply(2, api::ReturnCode::INTERNAL_FAILURE);
- ASSERT_EQ("PutReply(doc:test:test, "
+ ASSERT_EQ("PutReply(id:test:testdoctype1::, "
"BucketId(0x0000000000000000), timestamp 100) "
"ReturnCode(INTERNAL_FAILURE)",
_sender.getLastReply(true));
@@ -405,7 +405,7 @@ TEST_F(PutOperationTest, do_not_send_CreateBucket_if_already_pending) {
TEST_F(PutOperationTest, no_storage_nodes) {
setupDistributor(2, 1, "storage:0 distributor:1");
createAndSendSampleDocument(180);
- ASSERT_EQ("PutReply(doc:test:test, BucketId(0x0000000000000000), "
+ ASSERT_EQ("PutReply(id:test:testdoctype1::, BucketId(0x0000000000000000), "
"timestamp 100) ReturnCode(NOT_CONNECTED, "
"Can't store document: No storage nodes available)",
_sender.getLastReply(true));
@@ -492,10 +492,10 @@ PutOperationTest::getNodes(const std::string& infoString) {
TEST_F(PutOperationTest, target_nodes) {
setupDistributor(2, 6, "storage:6 distributor:1");
- // Ideal state of bucket is 1,3.
- ASSERT_EQ("target( 1 3 ) create( 1 3 )", getNodes(""));
- ASSERT_EQ("target( 1 3 ) create( 3 )", getNodes("1-1-true"));
- ASSERT_EQ("target( 1 3 ) create( 3 )", getNodes("1-1-false"));
+ // Ideal state of bucket is 1,2.
+ ASSERT_EQ("target( 1 2 ) create( 1 2 )", getNodes(""));
+ ASSERT_EQ("target( 1 2 ) create( 2 )", getNodes("1-1-true"));
+ ASSERT_EQ("target( 1 2 ) create( 2 )", getNodes("1-1-false"));
ASSERT_EQ("target( 3 4 5 ) create( )", getNodes("3-1-true,4-1-true,5-1-true"));
ASSERT_EQ("target( 3 4 ) create( )", getNodes("3-2-true,4-2-true,5-1-false"));
ASSERT_EQ("target( 1 3 4 ) create( )", getNodes("3-2-true,4-2-true,1-1-false"));
@@ -513,7 +513,7 @@ TEST_F(PutOperationTest, replica_not_resurrected_in_db_when_node_down_in_active_
sendPut(createPut(doc));
- ASSERT_EQ("Put => 1,Put => 0,Put => 2", _sender.getCommands(true));
+ ASSERT_EQ("Put => 1,Put => 2,Put => 0", _sender.getCommands(true));
enableDistributorClusterState("distributor:1 storage:3 .1.s:d .2.s:m");
addNodesToBucketDB(bId, "0=1/2/3/t"); // This will actually remove node #1.
@@ -522,8 +522,8 @@ TEST_F(PutOperationTest, replica_not_resurrected_in_db_when_node_down_in_active_
sendReply(1, api::ReturnCode::OK, api::BucketInfo(5, 6, 7));
sendReply(2, api::ReturnCode::OK, api::BucketInfo(7, 8, 9));
- ASSERT_EQ("BucketId(0x4000000000002a52) : "
- "node(idx=0,crc=0x5,docs=6/6,bytes=7/7,trusted=true,active=false,ready=false)",
+ ASSERT_EQ("BucketId(0x4000000000000593) : "
+ "node(idx=0,crc=0x7,docs=8/8,bytes=9/9,trusted=true,active=false,ready=false)",
dumpBucket(getExternalOperationHandler().getBucketId(doc->getId())));
}
@@ -535,7 +535,7 @@ TEST_F(PutOperationTest, replica_not_resurrected_in_db_when_node_down_in_pending
addNodesToBucketDB(bucket, "0=1/2/3/t,1=1/2/3/t,2=1/2/3/t");
sendPut(createPut(doc));
- ASSERT_EQ("Put => 1,Put => 0,Put => 2", _sender.getCommands(true));
+ ASSERT_EQ("Put => 1,Put => 2,Put => 0", _sender.getCommands(true));
// Trigger a pending (but not completed) cluster state transition where content
// node 0 is down. This will prune its replica from the DB. We assume that the
// downed node managed to send off a reply to the Put before it went down, and
@@ -555,7 +555,7 @@ TEST_F(PutOperationTest, replica_not_resurrected_in_db_when_node_down_in_pending
sendReply(1, api::ReturnCode::OK, api::BucketInfo(6, 7, 8));
sendReply(2, api::ReturnCode::OK, api::BucketInfo(9, 8, 7));
- ASSERT_EQ("BucketId(0x4000000000002a52) : "
+ ASSERT_EQ("BucketId(0x4000000000000593) : "
"node(idx=1,crc=0x5,docs=6/6,bytes=7/7,trusted=true,active=false,ready=false)",
dumpBucket(bucket));
}
@@ -574,7 +574,7 @@ TEST_F(PutOperationTest, put_is_failed_with_busy_if_target_down_in_pending_state
sendPut(createPut(doc));
EXPECT_EQ("", _sender.getCommands(true));
- EXPECT_EQ("PutReply(doc:test:test, BucketId(0x0000000000000000), "
+ EXPECT_EQ("PutReply(id:test:testdoctype1::test, BucketId(0x0000000000000000), "
"timestamp 100) ReturnCode(BUSY, "
"One or more target content nodes are unavailable in the pending cluster state)",
_sender.getLastReply(true));
diff --git a/storage/src/tests/distributor/removeoperationtest.cpp b/storage/src/tests/distributor/removeoperationtest.cpp
index bae2395bfa7..c3fcda30bf5 100644
--- a/storage/src/tests/distributor/removeoperationtest.cpp
+++ b/storage/src/tests/distributor/removeoperationtest.cpp
@@ -22,7 +22,7 @@ struct RemoveOperationTest : Test, DistributorTestUtil {
void SetUp() override {
createLinks();
- docId = document::DocumentId(document::DocIdString("test", "uri"));
+ docId = document::DocumentId("id:test:test::uri");
bucketId = getExternalOperationHandler().getBucketId(docId);
enableDistributorClusterState("distributor:1 storage:4");
};
@@ -57,8 +57,7 @@ struct RemoveOperationTest : Test, DistributorTestUtil {
std::unique_ptr<api::StorageReply> reply(removec->makeReply());
auto* removeR = static_cast<api::RemoveReply*>(reply.get());
removeR->setOldTimestamp(oldTimestamp);
- callback.onReceive(_sender,
- std::shared_ptr<api::StorageReply>(reply.release()));
+ callback.onReceive(_sender, std::shared_ptr<api::StorageReply>(reply.release()));
}
void sendRemove() {
@@ -71,13 +70,13 @@ TEST_F(RemoveOperationTest, simple) {
sendRemove();
- ASSERT_EQ("Remove(BucketId(0x4000000000002a52), doc:test:uri, "
+ ASSERT_EQ("Remove(BucketId(0x4000000000000593), id:test:test::uri, "
"timestamp 100) => 1",
_sender.getLastCommand());
replyToMessage(*op, -1, 34);
- ASSERT_EQ("RemoveReply(BucketId(0x0000000000000000), doc:test:uri, "
+ ASSERT_EQ("RemoveReply(BucketId(0x0000000000000000), id:test:test::uri, "
"timestamp 100, removed doc from 34) ReturnCode(NONE)",
_sender.getLastReply());
}
@@ -87,13 +86,13 @@ TEST_F(RemoveOperationTest, not_found) {
sendRemove();
- ASSERT_EQ("Remove(BucketId(0x4000000000002a52), doc:test:uri, "
+ ASSERT_EQ("Remove(BucketId(0x4000000000000593), id:test:test::uri, "
"timestamp 100) => 1",
_sender.getLastCommand());
replyToMessage(*op, -1, 0);
- ASSERT_EQ("RemoveReply(BucketId(0x0000000000000000), doc:test:uri, "
+ ASSERT_EQ("RemoveReply(BucketId(0x0000000000000000), id:test:test::uri, "
"timestamp 100, not found) ReturnCode(NONE)",
_sender.getLastReply());
}
@@ -103,13 +102,13 @@ TEST_F(RemoveOperationTest, storage_failure) {
sendRemove();
- ASSERT_EQ("Remove(BucketId(0x4000000000002a52), doc:test:uri, "
+ ASSERT_EQ("Remove(BucketId(0x4000000000000593), id:test:test::uri, "
"timestamp 100) => 1",
_sender.getLastCommand());
sendReply(*op, -1, api::ReturnCode::INTERNAL_FAILURE);
- ASSERT_EQ("RemoveReply(BucketId(0x0000000000000000), doc:test:uri, "
+ ASSERT_EQ("RemoveReply(BucketId(0x0000000000000000), id:test:test::uri, "
"timestamp 100, not found) ReturnCode(INTERNAL_FAILURE)",
_sender.getLastReply());
}
@@ -118,7 +117,7 @@ TEST_F(RemoveOperationTest, not_in_db) {
sendRemove();
ASSERT_EQ("RemoveReply(BucketId(0x0000000000000000), "
- "doc:test:uri, timestamp 100, not found) ReturnCode(NONE)",
+ "id:test:test::uri, timestamp 100, not found) ReturnCode(NONE)",
_sender.getLastReply());
}
@@ -127,11 +126,11 @@ TEST_F(RemoveOperationTest, multiple_copies) {
sendRemove();
- ASSERT_EQ("Remove(BucketId(0x4000000000002a52), doc:test:uri, "
+ ASSERT_EQ("Remove(BucketId(0x4000000000000593), id:test:test::uri, "
"timestamp 100) => 1,"
- "Remove(BucketId(0x4000000000002a52), doc:test:uri, "
+ "Remove(BucketId(0x4000000000000593), id:test:test::uri, "
"timestamp 100) => 2,"
- "Remove(BucketId(0x4000000000002a52), doc:test:uri, "
+ "Remove(BucketId(0x4000000000000593), id:test:test::uri, "
"timestamp 100) => 3",
_sender.getCommands(true, true));
@@ -140,7 +139,7 @@ TEST_F(RemoveOperationTest, multiple_copies) {
replyToMessage(*op, 2, 75);
ASSERT_EQ("RemoveReply(BucketId(0x0000000000000000), "
- "doc:test:uri, timestamp 100, removed doc from 75) ReturnCode(NONE)",
+ "id:test:test::uri, timestamp 100, removed doc from 75) ReturnCode(NONE)",
_sender.getLastReply());
}
@@ -149,7 +148,7 @@ TEST_F(RemoveOperationTest, can_send_remove_when_all_replica_nodes_retired) {
addNodesToBucketDB(bucketId, "0=123");
sendRemove();
- ASSERT_EQ("Remove(BucketId(0x4000000000002a52), doc:test:uri, "
+ ASSERT_EQ("Remove(BucketId(0x4000000000000593), id:test:test::uri, "
"timestamp 100) => 0",
_sender.getLastCommand());
}
diff --git a/storage/src/tests/persistence/filestorage/filestormanagertest.cpp b/storage/src/tests/persistence/filestorage/filestormanagertest.cpp
index 2a3e72b48b7..44cb92071a1 100644
--- a/storage/src/tests/persistence/filestorage/filestormanagertest.cpp
+++ b/storage/src/tests/persistence/filestorage/filestormanagertest.cpp
@@ -474,7 +474,7 @@ TEST_F(FileStorManagerTest, flush) {
api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 3);
// Creating a document to test with
- document::DocumentId docId("doc:crawler:http://www.ntnu.no/");
+ document::DocumentId docId("id:ns:testdoctype1::crawler:http://www.ntnu.no/");
auto doc = std::make_shared<Document>(*_testdoctype1, docId);
document::BucketId bid(4000);
@@ -1767,7 +1767,7 @@ TEST_F(FileStorManagerTest, no_timestamps) {
"storage", lib::NodeType::STORAGE, 3);
// Creating a document to test with
Document::SP doc(createDocument(
- "some content", "doc:crawler:http://www.ntnu.no/").release());
+ "some content", "id:ns:testdoctype1::crawler:http://www.ntnu.no/").release());
document::BucketId bid(16, 4000);
createBucket(bid, 0);
diff --git a/storage/src/tests/storageserver/bouncertest.cpp b/storage/src/tests/storageserver/bouncertest.cpp
index 35b752fedfd..c19d8814af4 100644
--- a/storage/src/tests/storageserver/bouncertest.cpp
+++ b/storage/src/tests/storageserver/bouncertest.cpp
@@ -100,7 +100,7 @@ BouncerTest::createDummyFeedMessage(api::Timestamp timestamp,
{
auto cmd = std::make_shared<api::RemoveCommand>(
makeDocumentBucket(document::BucketId(0)),
- document::DocumentId("doc:foo:bar"),
+ document::DocumentId("id:ns:foo::bar"),
timestamp);
cmd->setPriority(priority);
return cmd;
@@ -112,7 +112,7 @@ BouncerTest::createDummyFeedMessage(api::Timestamp timestamp,
{
auto cmd = std::make_shared<api::RemoveCommand>(
document::Bucket(bucketSpace, document::BucketId(0)),
- document::DocumentId("doc:foo:bar"),
+ document::DocumentId("id:ns:foo::bar"),
timestamp);
cmd->setPriority(Priority(0));
return cmd;
diff --git a/storage/src/tests/storageserver/communicationmanagertest.cpp b/storage/src/tests/storageserver/communicationmanagertest.cpp
index b970e56343e..caee6e6ab91 100644
--- a/storage/src/tests/storageserver/communicationmanagertest.cpp
+++ b/storage/src/tests/storageserver/communicationmanagertest.cpp
@@ -32,7 +32,7 @@ struct CommunicationManagerTest : Test {
api::StorageMessage::Priority priority)
{
auto cmd = std::make_shared<api::GetCommand>(makeDocumentBucket(document::BucketId(0)),
- document::DocumentId("doc::mydoc"),
+ document::DocumentId("id:ns:mytype::mydoc"),
"[all]");
cmd->setAddress(api::StorageMessageAddress("storage", lib::NodeType::STORAGE, 1));
cmd->setPriority(priority);
@@ -69,13 +69,13 @@ TEST_F(CommunicationManagerTest, simple) {
// Send a message through from distributor to storage
auto cmd = std::make_shared<api::GetCommand>(
- makeDocumentBucket(document::BucketId(0)), document::DocumentId("doc::mydoc"), "[all]");
+ makeDocumentBucket(document::BucketId(0)), document::DocumentId("id:ns:mytype::mydoc"), "[all]");
cmd->setAddress(api::StorageMessageAddress("storage", lib::NodeType::STORAGE, 1));
distributorLink->sendUp(cmd);
storageLink->waitForMessages(1, MESSAGE_WAIT_TIME_SEC);
ASSERT_GT(storageLink->getNumCommands(), 0);
auto cmd2 = std::dynamic_pointer_cast<api::StorageCommand>(storageLink->getCommand(0));
- EXPECT_EQ("doc::mydoc", dynamic_cast<api::GetCommand&>(*cmd2).getDocumentId().toString());
+ EXPECT_EQ("id:ns:mytype::mydoc", dynamic_cast<api::GetCommand&>(*cmd2).getDocumentId().toString());
// Reply to the message
std::shared_ptr<api::StorageReply> reply(cmd2->makeReply().release());
storageLink->sendUp(reply);
diff --git a/streamingvisitors/src/tests/hitcollector/hitcollector.cpp b/streamingvisitors/src/tests/hitcollector/hitcollector.cpp
index 9650834d0f1..30e6b8a7adb 100644
--- a/streamingvisitors/src/tests/hitcollector/hitcollector.cpp
+++ b/streamingvisitors/src/tests/hitcollector/hitcollector.cpp
@@ -76,7 +76,7 @@ HitCollectorTest::assertHit(SearchResult::RankType expRank, uint32_t expDocId, u
void
HitCollectorTest::addHit(HitCollector &hc, uint32_t docId, double score, const char *sortData, size_t sortDataSize)
{
- document::Document::UP doc(new document::Document(_docType, DocumentId("doc::")));
+ document::Document::UP doc(new document::Document(_docType, DocumentId("id:ns:testdoc::")));
StorageDocument::UP sdoc(new StorageDocument(std::move(doc), SharedFieldPathMap(), 0));
ASSERT_TRUE(sdoc->valid());
MatchData md(MatchData::params());