summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--config-application-package/src/main/java/com/yahoo/config/model/application/provider/StaticConfigDefinitionRepo.java4
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/InstanceResolver.java3
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomConfigPayloadBuilder.java4
-rw-r--r--config-model/src/test/java/com/yahoo/config/model/ApplicationDeployTest.java16
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/builder/UserConfigBuilderTest.java10
-rwxr-xr-xconfig/src/main/java/com/yahoo/vespa/config/ConfigKey.java9
-rw-r--r--config/src/main/java/com/yahoo/vespa/config/util/ConfigUtils.java4
-rw-r--r--config/src/test/java/com/yahoo/vespa/config/ConfigKeyTest.java10
-rw-r--r--configgen/src/main/java/com/yahoo/config/codegen/CNode.java6
-rw-r--r--configgen/src/main/java/com/yahoo/config/codegen/JavaClassBuilder.java17
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/resource/MeteringClient.java17
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/resource/MeteringInfo.java41
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/resource/ResourceAllocation.java2
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/resource/ResourceSnapshot.java14
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/resource/ResourceSnapshotConsumer.java16
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/stubs/MockMeteringClient.java37
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/stubs/MockResourceSnapshotConsumer.java29
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Controller.java13
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintenance.java6
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainer.java12
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java63
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTester.java4
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainerTest.java4
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ControllerContainerTest.java2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application1-metering.json6
-rw-r--r--documentapi/src/tests/messagebus/messagebus_test.cpp10
-rw-r--r--documentapi/src/tests/policyfactory/policyfactory.cpp2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirer.java5
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java10
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java99
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java16
-rw-r--r--persistence/src/tests/dummyimpl/dummypersistence_test.cpp22
-rw-r--r--searchcore/src/tests/proton/attribute/attribute_test.cpp34
-rw-r--r--searchcore/src/tests/proton/common/cachedselect_test.cpp22
-rw-r--r--searchcore/src/tests/proton/common/selectpruner_test.cpp67
-rw-r--r--searchcore/src/tests/proton/docsummary/docsummary.cpp59
-rw-r--r--searchcore/src/tests/proton/docsummary/summaryfieldconverter_test.cpp12
-rw-r--r--searchcore/src/tests/proton/document_iterator/document_iterator_test.cpp270
-rw-r--r--searchcore/src/tests/proton/documentdb/feedhandler/feedhandler_test.cpp8
-rw-r--r--searchcore/src/tests/proton/documentdb/feedview/feedview_test.cpp14
-rw-r--r--searchcore/src/tests/proton/documentmetastore/documentmetastore_test.cpp4
-rw-r--r--searchcore/src/tests/proton/feed_and_search/feed_and_search.cpp2
-rw-r--r--searchcore/src/tests/proton/feedoperation/feedoperation_test.cpp6
-rw-r--r--searchcore/src/tests/proton/index/fusionrunner_test.cpp2
-rw-r--r--searchcore/src/tests/proton/index/index_writer/index_writer_test.cpp2
-rw-r--r--searchcore/src/tests/proton/index/indexmanager_test.cpp2
-rw-r--r--searchcore/src/tests/proton/matching/matching_test.cpp46
-rw-r--r--searchcore/src/tests/proton/persistenceengine/persistenceengine_test.cpp35
-rw-r--r--searchcore/src/tests/proton/server/documentretriever_test.cpp9
-rw-r--r--searchcore/src/tests/proton/server/feeddebugger_test.cpp10
-rw-r--r--searchcore/src/tests/proton/server/feedstates_test.cpp2
-rw-r--r--searchcore/src/vespa/searchcore/proton/common/feeddebugger.h2
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/documentretrieverbase.cpp14
-rw-r--r--searchlib/src/tests/diskindex/fusion/fusion_test.cpp6
-rw-r--r--searchlib/src/tests/engine/transportserver/transportserver_test.cpp4
-rw-r--r--searchlib/src/tests/index/docbuilder/docbuilder_test.cpp16
-rw-r--r--searchlib/src/tests/memoryindex/document_inverter/document_inverter_test.cpp12
-rw-r--r--searchlib/src/tests/memoryindex/field_index/field_index_test.cpp18
-rw-r--r--searchlib/src/tests/memoryindex/field_inverter/field_inverter_test.cpp16
-rw-r--r--searchlib/src/tests/memoryindex/memory_index/memory_index_test.cpp2
-rw-r--r--searchlib/src/tests/memoryindex/url_field_inverter/url_field_inverter_test.cpp8
-rwxr-xr-xsecurity-tools/src/main/sh/vespa-curl-wrapper69
-rw-r--r--staging_vespalib/src/tests/state_server/state_server_test.cpp6
-rw-r--r--storage/src/tests/bucketdb/bucketmanagertest.cpp6
-rw-r--r--storage/src/tests/distributor/getoperationtest.cpp54
-rw-r--r--storage/src/tests/distributor/pendingmessagetrackertest.cpp2
-rw-r--r--storage/src/tests/distributor/putoperationtest.cpp114
-rw-r--r--storage/src/tests/distributor/removeoperationtest.cpp29
-rw-r--r--storage/src/tests/distributor/twophaseupdateoperationtest.cpp129
-rw-r--r--storage/src/tests/distributor/updateoperationtest.cpp10
-rw-r--r--storage/src/tests/frameworkimpl/status/statustest.cpp18
-rw-r--r--storage/src/tests/persistence/filestorage/filestormanagertest.cpp4
-rw-r--r--storage/src/tests/persistence/testandsettest.cpp14
-rw-r--r--storage/src/tests/storageserver/bouncertest.cpp4
-rw-r--r--storage/src/tests/storageserver/communicationmanagertest.cpp6
-rw-r--r--storage/src/tests/storageserver/documentapiconvertertest.cpp3
-rw-r--r--streamingvisitors/src/tests/hitcollector/hitcollector.cpp2
-rw-r--r--vespalib/src/tests/datastore/unique_store/unique_store_test.cpp234
-rw-r--r--vespalib/src/tests/portal/portal_test.cpp6
-rw-r--r--vespalib/src/vespa/vespalib/datastore/unique_store.h7
-rw-r--r--vespalib/src/vespa/vespalib/datastore/unique_store.hpp4
-rw-r--r--vespalib/src/vespa/vespalib/datastore/unique_store_allocator.h1
-rw-r--r--vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.cpp7
-rw-r--r--vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.h2
-rw-r--r--vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.hpp3
-rw-r--r--vespalib/src/vespa/vespalib/datastore/unique_store_string_comparator.h51
-rw-r--r--vespalib/src/vespa/vespalib/portal/http_connection.cpp21
87 files changed, 1113 insertions, 910 deletions
diff --git a/config-application-package/src/main/java/com/yahoo/config/model/application/provider/StaticConfigDefinitionRepo.java b/config-application-package/src/main/java/com/yahoo/config/model/application/provider/StaticConfigDefinitionRepo.java
index 58318958dfb..d9253d6105b 100644
--- a/config-application-package/src/main/java/com/yahoo/config/model/application/provider/StaticConfigDefinitionRepo.java
+++ b/config-application-package/src/main/java/com/yahoo/config/model/application/provider/StaticConfigDefinitionRepo.java
@@ -1,7 +1,6 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.config.model.application.provider;
-import com.yahoo.config.codegen.CNode;
import com.yahoo.config.model.api.ConfigDefinitionRepo;
import com.yahoo.io.IOUtils;
import com.yahoo.log.LogLevel;
@@ -16,7 +15,6 @@ import java.io.IOException;
import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.Map;
-import java.util.stream.Collectors;
/**
* A global pool of all config definitions that this server knows about. These objects can be shared
@@ -48,8 +46,6 @@ public class StaticConfigDefinitionRepo implements ConfigDefinitionRepo {
private void addConfigDefinition(File def) {
try {
ConfigDefinitionKey key = ConfigUtils.createConfigDefinitionKeyFromDefFile(def);
- if (key.getNamespace().isEmpty())
- key = new ConfigDefinitionKey(key.getName(), CNode.DEFAULT_NAMESPACE);
addConfigDefinition(key, def);
} catch (IOException e) {
log.log(LogLevel.WARNING, "Exception adding config definition " + def, e);
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/InstanceResolver.java b/config-model/src/main/java/com/yahoo/vespa/model/InstanceResolver.java
index f90b399d305..a8404b076d4 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/InstanceResolver.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/InstanceResolver.java
@@ -135,8 +135,7 @@ class InstanceResolver {
}
static String packageName(ConfigDefinitionKey cKey, PackagePrefix packagePrefix) {
- String prefix = packagePrefix.value;
- return prefix + (cKey.getNamespace().isEmpty() ? CNode.DEFAULT_NAMESPACE : cKey.getNamespace());
+ return packagePrefix.value + cKey.getNamespace();
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomConfigPayloadBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomConfigPayloadBuilder.java
index d07f6c4e6fd..31231857aae 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomConfigPayloadBuilder.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomConfigPayloadBuilder.java
@@ -86,11 +86,15 @@ public class DomConfigPayloadBuilder {
}
private static boolean validName(String name) {
+ if (name == null) return false;
+
Matcher m = namePattern.matcher(name);
return m.matches();
}
private static boolean validNamespace(String namespace) {
+ if (namespace == null) return false;
+
Matcher m = namespacePattern.matcher(namespace);
return m.matches();
}
diff --git a/config-model/src/test/java/com/yahoo/config/model/ApplicationDeployTest.java b/config-model/src/test/java/com/yahoo/config/model/ApplicationDeployTest.java
index ae8e8fe7de5..ec9d631dec5 100644
--- a/config-model/src/test/java/com/yahoo/config/model/ApplicationDeployTest.java
+++ b/config-model/src/test/java/com/yahoo/config/model/ApplicationDeployTest.java
@@ -5,9 +5,10 @@ import com.google.common.io.Files;
import com.yahoo.config.ConfigInstance;
import com.yahoo.config.application.api.ApplicationMetaData;
import com.yahoo.config.application.api.UnparsedConfigDefinition;
-import com.yahoo.config.codegen.CNode;
import com.yahoo.config.application.api.ApplicationPackage;
-import com.yahoo.config.model.application.provider.*;
+import com.yahoo.config.model.application.provider.Bundle;
+import com.yahoo.config.model.application.provider.DeployData;
+import com.yahoo.config.model.application.provider.FilesApplicationPackage;
import com.yahoo.config.model.deploy.DeployState;
import com.yahoo.path.Path;
import com.yahoo.document.DataType;
@@ -338,19 +339,16 @@ public class ApplicationDeployTest {
DeployState deployState = new DeployState.Builder().applicationPackage(app).build();
- ConfigDefinition def = deployState.getConfigDefinition(new ConfigDefinitionKey("foo", CNode.DEFAULT_NAMESPACE)).get();
- assertThat(def.getNamespace(), is(CNode.DEFAULT_NAMESPACE));
-
- def = deployState.getConfigDefinition(new ConfigDefinitionKey("baz", "xyzzy")).get();
+ ConfigDefinition def = deployState.getConfigDefinition(new ConfigDefinitionKey("baz", "xyzzy")).get();
assertThat(def.getNamespace(), is("xyzzy"));
def = deployState.getConfigDefinition(new ConfigDefinitionKey("foo", "qux")).get();
assertThat(def.getNamespace(), is("qux"));
// A config def without version in filename and version in file header
- def = deployState.getConfigDefinition(new ConfigDefinitionKey("xyzzy", CNode.DEFAULT_NAMESPACE)).get();
- assertThat(def.getNamespace(), is(CNode.DEFAULT_NAMESPACE));
- assertThat(def.getName(), is("xyzzy"));
+ def = deployState.getConfigDefinition(new ConfigDefinitionKey("bar", "xyzzy")).get();
+ assertThat(def.getNamespace(), is("xyzzy"));
+ assertThat(def.getName(), is("bar"));
}
@Test(expected=IllegalArgumentException.class)
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/builder/UserConfigBuilderTest.java b/config-model/src/test/java/com/yahoo/vespa/model/builder/UserConfigBuilderTest.java
index 6bfd55ca5de..bbed96d3251 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/builder/UserConfigBuilderTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/builder/UserConfigBuilderTest.java
@@ -1,13 +1,13 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.model.builder;
-import com.yahoo.test.ArraytypesConfig;
import com.yahoo.config.ConfigInstance;
import com.yahoo.config.model.application.provider.BaseDeployLogger;
+import com.yahoo.config.model.builder.xml.XmlHelper;
import com.yahoo.config.model.deploy.ConfigDefinitionStore;
-import com.yahoo.test.SimpletypesConfig;
import com.yahoo.config.model.producer.UserConfigRepo;
-import com.yahoo.config.model.builder.xml.XmlHelper;
+import com.yahoo.test.ArraytypesConfig;
+import com.yahoo.test.SimpletypesConfig;
import com.yahoo.vespa.config.ConfigDefinitionKey;
import com.yahoo.vespa.config.ConfigPayload;
import com.yahoo.vespa.config.ConfigPayloadBuilder;
@@ -22,7 +22,9 @@ import java.io.StringReader;
import java.util.Optional;
import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertThat;
/**
* @author Ulf Lilleengen
diff --git a/config/src/main/java/com/yahoo/vespa/config/ConfigKey.java b/config/src/main/java/com/yahoo/vespa/config/ConfigKey.java
index 930b74ea804..a1d069da284 100755
--- a/config/src/main/java/com/yahoo/vespa/config/ConfigKey.java
+++ b/config/src/main/java/com/yahoo/vespa/config/ConfigKey.java
@@ -3,7 +3,6 @@ package com.yahoo.vespa.config;
import com.yahoo.config.ConfigInstance;
import com.yahoo.config.ConfigurationRuntimeException;
-import com.yahoo.config.codegen.CNode;
import edu.umd.cs.findbugs.annotations.NonNull;
import edu.umd.cs.findbugs.annotations.Nullable;
@@ -48,11 +47,13 @@ public class ConfigKey<CONFIGCLASS extends ConfigInstance> implements Comparable
}
public ConfigKey(String name, String configIdString, String namespace, String defMd5, Class<CONFIGCLASS> clazz) {
- if (name == null)
- throw new ConfigurationRuntimeException("Config name must be non-null!");
+ if (name == null || name.isEmpty())
+ throw new ConfigurationRuntimeException("Config name cannot be null or empty!");
+ if (namespace == null || namespace.isEmpty())
+ throw new ConfigurationRuntimeException("Config namespace cannot be null or empty!");
this.name = name;
this.configId = (configIdString == null) ? "" : configIdString;
- this.namespace = (namespace == null) ? CNode.DEFAULT_NAMESPACE : namespace;
+ this.namespace = namespace;
this.md5 = (defMd5 == null) ? "" : defMd5;
this.configClass = clazz;
}
diff --git a/config/src/main/java/com/yahoo/vespa/config/util/ConfigUtils.java b/config/src/main/java/com/yahoo/vespa/config/util/ConfigUtils.java
index 56ede8897ed..d7653572773 100644
--- a/config/src/main/java/com/yahoo/vespa/config/util/ConfigUtils.java
+++ b/config/src/main/java/com/yahoo/vespa/config/util/ConfigUtils.java
@@ -2,7 +2,6 @@
package com.yahoo.vespa.config.util;
import com.yahoo.collections.Tuple2;
-import com.yahoo.config.codegen.CNode;
import com.yahoo.io.HexDump;
import com.yahoo.io.IOUtils;
import com.yahoo.net.HostName;
@@ -302,9 +301,6 @@ public class ConfigUtils {
*/
static ConfigDefinitionKey createConfigDefinitionKeyFromDefContent(String name, byte[] content) {
String namespace = ConfigUtils.getDefNamespace(new StringReader(Utf8.toString(content)));
- if (namespace.isEmpty()) {
- namespace = CNode.DEFAULT_NAMESPACE;
- }
return new ConfigDefinitionKey(name, namespace);
}
diff --git a/config/src/test/java/com/yahoo/vespa/config/ConfigKeyTest.java b/config/src/test/java/com/yahoo/vespa/config/ConfigKeyTest.java
index 427014316cf..452d0d78897 100644
--- a/config/src/test/java/com/yahoo/vespa/config/ConfigKeyTest.java
+++ b/config/src/test/java/com/yahoo/vespa/config/ConfigKeyTest.java
@@ -7,10 +7,10 @@ import java.util.List;
import com.yahoo.foo.AppConfig;
import com.yahoo.config.ConfigurationRuntimeException;
-import com.yahoo.config.codegen.CNode;
import org.junit.Test;
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
/**
*
@@ -27,7 +27,6 @@ public class ConfigKeyTest {
assertEquals(key1, key2);
ConfigKey<?> key3 = new ConfigKey<>("foo", "a/b/c/d", namespace);
- assertFalse(key1.equals(key3));
assertNotEquals(key1, key3);
assertEquals("a/b/c", new ConfigKey<>("foo", "a/b/c", namespace).getConfigId());
@@ -67,15 +66,10 @@ public class ConfigKeyTest {
// Tests namespace and equals with combinations of namespace.
@Test
public void testNamespace() {
- ConfigKey<?> noNamespace = new ConfigKey<>("name", "id", null);
ConfigKey<?> namespaceFoo = new ConfigKey<>("name", "id", "foo");
ConfigKey<?> namespaceBar = new ConfigKey<>("name", "id", "bar");
- assertEquals(noNamespace, noNamespace);
assertEquals(namespaceFoo, namespaceFoo);
- assertNotEquals(noNamespace, namespaceFoo);
- assertNotEquals(namespaceFoo, noNamespace);
assertNotEquals(namespaceFoo, namespaceBar);
- assertEquals(noNamespace.getNamespace(), CNode.DEFAULT_NAMESPACE);
assertEquals(namespaceBar.getNamespace(), "bar");
}
diff --git a/configgen/src/main/java/com/yahoo/config/codegen/CNode.java b/configgen/src/main/java/com/yahoo/config/codegen/CNode.java
index ea88e115530..1c1fb5f5bce 100644
--- a/configgen/src/main/java/com/yahoo/config/codegen/CNode.java
+++ b/configgen/src/main/java/com/yahoo/config/codegen/CNode.java
@@ -3,17 +3,13 @@ package com.yahoo.config.codegen;
import java.util.StringTokenizer;
-import static com.yahoo.config.codegen.DefParser.DEFAULT_PACKAGE_PREFIX;
-
/**
- * Abstract superclass for all nodes representing a definition file.
+ * Abstract superclass for all nodes representing a config definition.
*
* @author gjoranv
*/
public abstract class CNode {
- public static final String DEFAULT_NAMESPACE = "config";
-
// TODO: replace by "type" enum
public final boolean isArray;
public final boolean isMap;
diff --git a/configgen/src/main/java/com/yahoo/config/codegen/JavaClassBuilder.java b/configgen/src/main/java/com/yahoo/config/codegen/JavaClassBuilder.java
index 75149d7a50e..40c903ee929 100644
--- a/configgen/src/main/java/com/yahoo/config/codegen/JavaClassBuilder.java
+++ b/configgen/src/main/java/com/yahoo/config/codegen/JavaClassBuilder.java
@@ -23,11 +23,10 @@ import static com.yahoo.config.codegen.DefParser.DEFAULT_PACKAGE_PREFIX;
*/
public class JavaClassBuilder implements ClassBuilder {
- public static final String INDENTATION = " ";
+ static final String INDENTATION = " ";
private final InnerCNode root;
private final NormalizedDefinition nd;
- private final String packagePrefix;
private final String javaPackage;
private final String className;
private final File destDir;
@@ -35,7 +34,7 @@ public class JavaClassBuilder implements ClassBuilder {
public JavaClassBuilder(InnerCNode root, NormalizedDefinition nd, File destDir, String rawPackagePrefix) {
this.root = root;
this.nd = nd;
- this.packagePrefix = (rawPackagePrefix != null) ? rawPackagePrefix : DEFAULT_PACKAGE_PREFIX;
+ String packagePrefix = (rawPackagePrefix != null) ? rawPackagePrefix : DEFAULT_PACKAGE_PREFIX;
this.javaPackage = (root.getPackage() != null) ? root.getPackage() : packagePrefix + root.getNamespace();
this.className = createClassName(root.getName());
this.destDir = destDir;
@@ -74,15 +73,7 @@ public class JavaClassBuilder implements ClassBuilder {
"import java.io.File;\n" + //
"import java.nio.file.Path;\n" + //
"import edu.umd.cs.findbugs.annotations.NonNull;\n" + //
- getImportFrameworkClasses(root.getNamespace());
- }
-
- private String getImportFrameworkClasses(String namespace) {
- if (CNode.DEFAULT_NAMESPACE.equals(namespace) == false) {
- return "import " + packagePrefix + CNode.DEFAULT_NAMESPACE + ".*;";
- } else {
- return "";
- }
+ "import com.yahoo.config.*;";
}
// TODO: remove the extra comment line " *" if root.getCommentBlock is empty
@@ -96,7 +87,7 @@ public class JavaClassBuilder implements ClassBuilder {
" public final static String CONFIG_DEF_MD5 = \"" + root.getMd5() + "\";\n" + //
" public final static String CONFIG_DEF_NAME = \"" + root.getName() + "\";\n" + //
" public final static String CONFIG_DEF_NAMESPACE = \"" + root.getNamespace() + "\";\n" + //
- " public final static String CONFIG_DEF_VERSION = \"" + root.getVersion() + "\";\n" + // TODO: Remove on Vespa 8
+ " public final static String CONFIG_DEF_VERSION = \"" + root.getVersion() + "\";\n" + // TODO: Remove in Vespa 8
" public final static String[] CONFIG_DEF_SCHEMA = {\n" + //
"" + indentCode(INDENTATION + INDENTATION, getDefSchema()) + "\n" + //
" };\n" + //
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/resource/MeteringClient.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/resource/MeteringClient.java
new file mode 100644
index 00000000000..ab2a8425897
--- /dev/null
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/resource/MeteringClient.java
@@ -0,0 +1,17 @@
+// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.api.integration.resource;
+
+import java.util.List;
+
+/**
+ * Consumes and retrieves snapshots of resourses allocated per application.
+ *
+ * @author olaa
+ */
+public interface MeteringClient {
+
+ void consume(List<ResourceSnapshot> resources);
+
+ MeteringInfo getResourceSnapshots(String tenantName, String applicationName);
+
+}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/resource/MeteringInfo.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/resource/MeteringInfo.java
new file mode 100644
index 00000000000..8709315a83c
--- /dev/null
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/resource/MeteringInfo.java
@@ -0,0 +1,41 @@
+// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.api.integration.resource;
+
+import com.yahoo.config.provision.ApplicationId;
+
+import java.util.List;
+import java.util.Map;
+
+/**
+ * @author olaa
+ */
+public class MeteringInfo {
+
+ private final ResourceAllocation thisMonth;
+ private final ResourceAllocation lastMonth;
+ private final ResourceAllocation currentSnapshot;
+ Map<ApplicationId, List<ResourceSnapshot>> snapshotHistory;
+
+ public MeteringInfo(ResourceAllocation thisMonth, ResourceAllocation lastMonth, ResourceAllocation currentSnapshot, Map<ApplicationId, List<ResourceSnapshot>> snapshotHistory) {
+ this.thisMonth = thisMonth;
+ this.lastMonth = lastMonth;
+ this.currentSnapshot = currentSnapshot;
+ this.snapshotHistory = snapshotHistory;
+ }
+
+ public ResourceAllocation getThisMonth() {
+ return thisMonth;
+ }
+
+ public ResourceAllocation getLastMonth() {
+ return lastMonth;
+ }
+
+ public ResourceAllocation getCurrentSnapshot() {
+ return currentSnapshot;
+ }
+
+ public Map<ApplicationId, List<ResourceSnapshot>> getSnapshotHistory() {
+ return snapshotHistory;
+ }
+}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/resource/ResourceAllocation.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/resource/ResourceAllocation.java
index 3b86b7b8219..a6f47e34170 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/resource/ResourceAllocation.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/resource/ResourceAllocation.java
@@ -16,7 +16,7 @@ public class ResourceAllocation {
private final double memoryGb;
private final double diskGb;
- private ResourceAllocation(double cpuCores, double memoryGb, double diskGb) {
+ public ResourceAllocation(double cpuCores, double memoryGb, double diskGb) {
this.cpuCores = cpuCores;
this.memoryGb = memoryGb;
this.diskGb = diskGb;
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/resource/ResourceSnapshot.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/resource/ResourceSnapshot.java
index e3a2781142a..bd4d31e53ae 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/resource/ResourceSnapshot.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/resource/ResourceSnapshot.java
@@ -15,16 +15,12 @@ import java.util.stream.Collectors;
public class ResourceSnapshot {
private final ApplicationId applicationId;
- private final double cpuCores;
- private final double memoryGb;
- private final double diskGb;
+ private final ResourceAllocation resourceAllocation;
private final Instant timestamp;
public ResourceSnapshot(ApplicationId applicationId, double cpuCores, double memoryGb, double diskGb, Instant timestamp) {
this.applicationId = applicationId;
- this.cpuCores = cpuCores;
- this.memoryGb = memoryGb;
- this.diskGb = diskGb;
+ this.resourceAllocation = new ResourceAllocation(cpuCores, memoryGb, diskGb);
this.timestamp = timestamp;
}
@@ -49,15 +45,15 @@ public class ResourceSnapshot {
}
public double getCpuCores() {
- return cpuCores;
+ return resourceAllocation.getCpuCores();
}
public double getMemoryGb() {
- return memoryGb;
+ return resourceAllocation.getMemoryGb();
}
public double getDiskGb() {
- return diskGb;
+ return resourceAllocation.getDiskGb();
}
public Instant getTimestamp() {
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/resource/ResourceSnapshotConsumer.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/resource/ResourceSnapshotConsumer.java
deleted file mode 100644
index bb6830770e2..00000000000
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/resource/ResourceSnapshotConsumer.java
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.controller.api.integration.resource;
-
-import java.util.List;
-
-/**
- * Consumes a snapshot of resourses allocated/used per application.
- *
- * @author olaa
- */
-public interface ResourceSnapshotConsumer {
-
- public void consume(List<ResourceSnapshot> resources);
-
- public List<ResourceSnapshot> getResourceSnapshots(String tenantName, String applicationName);
-}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/stubs/MockMeteringClient.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/stubs/MockMeteringClient.java
new file mode 100644
index 00000000000..ae3c4a579a2
--- /dev/null
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/stubs/MockMeteringClient.java
@@ -0,0 +1,37 @@
+// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.api.integration.stubs;
+
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.vespa.hosted.controller.api.integration.resource.MeteringInfo;
+import com.yahoo.vespa.hosted.controller.api.integration.resource.ResourceAllocation;
+import com.yahoo.vespa.hosted.controller.api.integration.resource.ResourceSnapshot;
+import com.yahoo.vespa.hosted.controller.api.integration.resource.MeteringClient;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * @author olaa
+ */
+public class MockMeteringClient implements MeteringClient {
+
+ private List<ResourceSnapshot> resources = new ArrayList<>();
+
+ @Override
+ public void consume(List<ResourceSnapshot> resources){
+ this.resources = resources;
+ }
+
+ @Override
+ public MeteringInfo getResourceSnapshots(String tenantName, String applicationName) {
+ ResourceAllocation emptyAllocation = new ResourceAllocation(0, 0, 0);
+ ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default");
+ Map<ApplicationId, List<ResourceSnapshot>> snapshotHistory = Map.of(applicationId, new ArrayList<>());
+ return new MeteringInfo(emptyAllocation, emptyAllocation, emptyAllocation, snapshotHistory);
+ }
+
+ public List<ResourceSnapshot> consumedResources() {
+ return this.resources;
+ }
+}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/stubs/MockResourceSnapshotConsumer.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/stubs/MockResourceSnapshotConsumer.java
deleted file mode 100644
index 2cd3aef1903..00000000000
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/stubs/MockResourceSnapshotConsumer.java
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.controller.api.integration.stubs;
-
-import com.yahoo.vespa.hosted.controller.api.integration.resource.ResourceSnapshot;
-import com.yahoo.vespa.hosted.controller.api.integration.resource.ResourceSnapshotConsumer;
-
-import java.util.List;
-
-/**
- * @author olaa
- */
-public class MockResourceSnapshotConsumer implements ResourceSnapshotConsumer {
-
- private List<ResourceSnapshot> resources;
-
- @Override
- public void consume(List<ResourceSnapshot> resources){
- this.resources = resources;
- }
-
- @Override
- public List<ResourceSnapshot> getResourceSnapshots(String tenantName, String applicationName) {
- throw new UnsupportedOperationException();
- }
-
- public List<ResourceSnapshot> consumedResources() {
- return this.resources;
- }
-}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Controller.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Controller.java
index 621c25df914..f642ee61ec4 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Controller.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Controller.java
@@ -21,6 +21,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.deployment.ArtifactRepo
import com.yahoo.vespa.hosted.controller.api.integration.deployment.TesterCloud;
import com.yahoo.vespa.hosted.controller.api.integration.maven.MavenRepository;
import com.yahoo.vespa.hosted.controller.api.integration.organization.Mailer;
+import com.yahoo.vespa.hosted.controller.api.integration.resource.MeteringClient;
import com.yahoo.vespa.hosted.controller.api.integration.routing.RoutingGenerator;
import com.yahoo.vespa.hosted.controller.api.integration.user.Roles;
import com.yahoo.vespa.hosted.controller.api.integration.zone.ZoneRegistry;
@@ -83,6 +84,7 @@ public class Controller extends AbstractComponent {
private final NameServiceForwarder nameServiceForwarder;
private final ApplicationCertificateProvider applicationCertificateProvider;
private final MavenRepository mavenRepository;
+ private final MeteringClient meteringClient;
/**
* Creates a controller
@@ -96,12 +98,12 @@ public class Controller extends AbstractComponent {
AccessControl accessControl,
ArtifactRepository artifactRepository, ApplicationStore applicationStore, TesterCloud testerCloud,
BuildService buildService, RunDataStore runDataStore, Mailer mailer, FlagSource flagSource,
- MavenRepository mavenRepository, ApplicationCertificateProvider applicationCertificateProvider) {
+ MavenRepository mavenRepository, ApplicationCertificateProvider applicationCertificateProvider, MeteringClient meteringClient) {
this(curator, rotationsConfig, zoneRegistry,
configServer, metricsService, routingGenerator,
Clock.systemUTC(), accessControl, artifactRepository, applicationStore, testerCloud,
buildService, runDataStore, com.yahoo.net.HostName::getLocalhost, mailer, flagSource,
- mavenRepository, applicationCertificateProvider);
+ mavenRepository, applicationCertificateProvider, meteringClient);
}
public Controller(CuratorDb curator, RotationsConfig rotationsConfig,
@@ -111,7 +113,7 @@ public class Controller extends AbstractComponent {
AccessControl accessControl,
ArtifactRepository artifactRepository, ApplicationStore applicationStore, TesterCloud testerCloud,
BuildService buildService, RunDataStore runDataStore, Supplier<String> hostnameSupplier,
- Mailer mailer, FlagSource flagSource, MavenRepository mavenRepository, ApplicationCertificateProvider applicationCertificateProvider) {
+ Mailer mailer, FlagSource flagSource, MavenRepository mavenRepository, ApplicationCertificateProvider applicationCertificateProvider, MeteringClient meteringClient) {
this.hostnameSupplier = Objects.requireNonNull(hostnameSupplier, "HostnameSupplier cannot be null");
this.curator = Objects.requireNonNull(curator, "Curator cannot be null");
@@ -124,6 +126,7 @@ public class Controller extends AbstractComponent {
this.nameServiceForwarder = new NameServiceForwarder(curator);
this.applicationCertificateProvider = Objects.requireNonNull(applicationCertificateProvider);
this.mavenRepository = Objects.requireNonNull(mavenRepository, "MavenRepository cannot be null");
+ this.meteringClient = meteringClient;
jobController = new JobController(this, runDataStore, Objects.requireNonNull(testerCloud));
applicationController = new ApplicationController(this, curator, accessControl,
@@ -297,6 +300,10 @@ public class Controller extends AbstractComponent {
return applicationCertificateProvider;
}
+ public MeteringClient meteringClient() {
+ return meteringClient;
+ }
+
/** Returns all other roles the given tenant role implies. */
public Set<Role> impliedRoles(TenantRole role) {
return Stream.concat(Roles.tenantRoles(role.tenant()).stream(),
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintenance.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintenance.java
index 0a894a1031f..4cadefc35b5 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintenance.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintenance.java
@@ -9,7 +9,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.aws.AwsEventFetcher;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.NodeRepository;
import com.yahoo.vespa.hosted.controller.api.integration.dns.NameService;
import com.yahoo.vespa.hosted.controller.api.integration.organization.*;
-import com.yahoo.vespa.hosted.controller.api.integration.resource.ResourceSnapshotConsumer;
+import com.yahoo.vespa.hosted.controller.api.integration.resource.MeteringClient;
import com.yahoo.vespa.hosted.controller.authority.config.ApiAuthorityConfig;
import com.yahoo.vespa.hosted.controller.maintenance.config.MaintainerConfig;
import com.yahoo.vespa.hosted.controller.persistence.CuratorDb;
@@ -62,7 +62,7 @@ public class ControllerMaintenance extends AbstractComponent {
NameService nameService, NodeRepository nodeRepository,
ContactRetriever contactRetriever,
CostReportConsumer reportConsumer,
- ResourceSnapshotConsumer resourceSnapshotConsumer,
+ MeteringClient meteringClient,
Billing billing,
SelfHostedCostConfig selfHostedCostConfig,
IssueHandler issueHandler,
@@ -86,7 +86,7 @@ public class ControllerMaintenance extends AbstractComponent {
osVersionStatusUpdater = new OsVersionStatusUpdater(controller, maintenanceInterval, jobControl);
contactInformationMaintainer = new ContactInformationMaintainer(controller, Duration.ofHours(12), jobControl, contactRetriever);
costReportMaintainer = new CostReportMaintainer(controller, Duration.ofHours(2), reportConsumer, jobControl, nodeRepository, Clock.systemUTC(), selfHostedCostConfig);
- resourceMeterMaintainer = new ResourceMeterMaintainer(controller, Duration.ofMinutes(60), jobControl, nodeRepository, Clock.systemUTC(), metric, resourceSnapshotConsumer);
+ resourceMeterMaintainer = new ResourceMeterMaintainer(controller, Duration.ofMinutes(60), jobControl, nodeRepository, Clock.systemUTC(), metric, meteringClient);
nameServiceDispatcher = new NameServiceDispatcher(controller, Duration.ofSeconds(10), jobControl, nameService);
billingMaintainer = new BillingMaintainer(controller, Duration.ofDays(3), jobControl, billing);
awsEventReporterMaintainer = new AwsEventReporterMaintainer(controller, Duration.ofDays(1), jobControl, issueHandler, awsEventFetcher);
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainer.java
index 41460cd9fdc..c63dd28c13d 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainer.java
@@ -11,7 +11,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.noderepository.NodeOwne
import com.yahoo.vespa.hosted.controller.api.integration.noderepository.NodeRepositoryNode;
import com.yahoo.vespa.hosted.controller.api.integration.noderepository.NodeState;
import com.yahoo.vespa.hosted.controller.api.integration.resource.ResourceSnapshot;
-import com.yahoo.vespa.hosted.controller.api.integration.resource.ResourceSnapshotConsumer;
+import com.yahoo.vespa.hosted.controller.api.integration.resource.MeteringClient;
import java.time.Clock;
import java.time.Duration;
@@ -20,7 +20,7 @@ import java.util.List;
import java.util.stream.Collectors;
/**
- * Creates a ResourceSnapshot per application, which is then passed on to a ResourceSnapshotConsumer
+ * Creates a ResourceSnapshot per application, which is then passed on to a MeteringClient
*
* @author olaa
*/
@@ -29,7 +29,7 @@ public class ResourceMeterMaintainer extends Maintainer {
private final Clock clock;
private final Metric metric;
private final NodeRepository nodeRepository;
- private final ResourceSnapshotConsumer resourceSnapshotConsumer;
+ private final MeteringClient meteringClient;
private static final String METERING_LAST_REPORTED = "metering_last_reported";
private static final String METERING_TOTAL_REPORTED = "metering_total_reported";
@@ -41,12 +41,12 @@ public class ResourceMeterMaintainer extends Maintainer {
NodeRepository nodeRepository,
Clock clock,
Metric metric,
- ResourceSnapshotConsumer resourceSnapshotConsumer) {
+ MeteringClient meteringClient) {
super(controller, interval, jobControl, null, SystemName.all());
this.clock = clock;
this.nodeRepository = nodeRepository;
this.metric = metric;
- this.resourceSnapshotConsumer = resourceSnapshotConsumer;
+ this.meteringClient = meteringClient;
}
@Override
@@ -54,7 +54,7 @@ public class ResourceMeterMaintainer extends Maintainer {
List<NodeRepositoryNode> nodes = getNodes();
List<ResourceSnapshot> resourceSnapshots = getResourceSnapshots(nodes);
- resourceSnapshotConsumer.consume(resourceSnapshots);
+ meteringClient.consume(resourceSnapshots);
metric.set(METERING_LAST_REPORTED, clock.millis() / 1000, metric.createContext(Collections.emptyMap()));
metric.set(METERING_TOTAL_REPORTED, resourceSnapshots.stream()
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
index 12e536aeefe..4772637ecfd 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
@@ -45,6 +45,9 @@ import com.yahoo.vespa.hosted.controller.api.integration.deployment.ApplicationV
import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.RunId;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.SourceRevision;
+import com.yahoo.vespa.hosted.controller.api.integration.resource.MeteringInfo;
+import com.yahoo.vespa.hosted.controller.api.integration.resource.ResourceAllocation;
+import com.yahoo.vespa.hosted.controller.api.integration.resource.ResourceSnapshot;
import com.yahoo.vespa.hosted.controller.api.integration.routing.RoutingEndpoint;
import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.application.Change;
@@ -778,37 +781,61 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
Slime slime = new Slime();
Cursor root = slime.setObject();
+ MeteringInfo meteringInfo = controller.meteringClient().getResourceSnapshots(tenant, application);
+
+ ResourceAllocation currentSnapshot = meteringInfo.getCurrentSnapshot();
Cursor currentRate = root.setObject("currentrate");
- currentRate.setDouble("cpu", 0);
- currentRate.setDouble("mem", 0);
- currentRate.setDouble("disk", 0);
+ currentRate.setDouble("cpu", currentSnapshot.getCpuCores());
+ currentRate.setDouble("mem", currentSnapshot.getMemoryGb());
+ currentRate.setDouble("disk", currentSnapshot.getDiskGb());
+ ResourceAllocation thisMonth = meteringInfo.getThisMonth();
Cursor thismonth = root.setObject("thismonth");
- thismonth.setDouble("cpu", 0);
- thismonth.setDouble("mem", 0);
- thismonth.setDouble("disk", 0);
+ thismonth.setDouble("cpu", thisMonth.getCpuCores());
+ thismonth.setDouble("mem", thisMonth.getMemoryGb());
+ thismonth.setDouble("disk", thisMonth.getDiskGb());
+ ResourceAllocation lastMonth = meteringInfo.getLastMonth();
Cursor lastmonth = root.setObject("lastmonth");
- lastmonth.setDouble("cpu", 0);
- lastmonth.setDouble("mem", 0);
- lastmonth.setDouble("disk", 0);
+ lastmonth.setDouble("cpu", lastMonth.getCpuCores());
+ lastmonth.setDouble("mem", lastMonth.getMemoryGb());
+ lastmonth.setDouble("disk", lastMonth.getDiskGb());
+
+ Map<ApplicationId, List<ResourceSnapshot>> history = meteringInfo.getSnapshotHistory();
Cursor details = root.setObject("details");
Cursor detailsCpu = details.setObject("cpu");
- Cursor detailsCpuDummyApp = detailsCpu.setObject("dummy");
- Cursor detailsCpuDummyData = detailsCpuDummyApp.setArray("data");
+ Cursor detailsMem = details.setObject("mem");
+ Cursor detailsDisk = details.setObject("disk");
- // The data array should be filled with objects like: { unixms: <number>, valur: <number }
+ history.entrySet().stream()
+ .forEach(entry -> {
+ String instanceName = entry.getKey().instance().value();
+ Cursor detailsCpuApp = detailsCpu.setObject(instanceName);
+ Cursor detailsMemApp = detailsMem.setObject(instanceName);
+ Cursor detailsDiskApp = detailsDisk.setObject(instanceName);
+ Cursor detailsCpuData = detailsCpuApp.setArray("data");
+ Cursor detailsMemData = detailsMemApp.setArray("data");
+ Cursor detailsDiskData = detailsDiskApp.setArray("data");
+ entry.getValue().stream()
+ .forEach(resourceSnapshot -> {
- Cursor detailsMem = details.setObject("mem");
- Cursor detailsMemDummyApp = detailsMem.setObject("dummy");
- Cursor detailsMemDummyData = detailsMemDummyApp.setArray("data");
+ Cursor cpu = detailsCpuData.addObject();
+ cpu.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
+ cpu.setDouble("value", resourceSnapshot.getCpuCores());
- Cursor detailsDisk = details.setObject("disk");
- Cursor detailsDiskDummyApp = detailsDisk.setObject("dummy");
- Cursor detailsDiskDummyData = detailsDiskDummyApp.setArray("data");
+ Cursor mem = detailsMemData.addObject();
+ cpu.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
+ cpu.setDouble("value", resourceSnapshot.getMemoryGb());
+
+ Cursor disk = detailsDiskData.addObject();
+ cpu.setLong("unixms", resourceSnapshot.getTimestamp().toEpochMilli());
+ cpu.setDouble("value", resourceSnapshot.getDiskGb());
+ });
+
+ });
return new SlimeJsonResponse(slime);
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTester.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTester.java
index d178bf08592..7c5442edbde 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTester.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTester.java
@@ -31,6 +31,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.routing.RoutingGenerato
import com.yahoo.vespa.hosted.controller.api.integration.stubs.MockBuildService;
import com.yahoo.vespa.hosted.controller.api.integration.stubs.MockMailer;
import com.yahoo.vespa.hosted.controller.api.integration.stubs.MockMavenRepository;
+import com.yahoo.vespa.hosted.controller.api.integration.stubs.MockMeteringClient;
import com.yahoo.vespa.hosted.controller.api.integration.stubs.MockRunDataStore;
import com.yahoo.vespa.hosted.controller.api.integration.stubs.MockTesterCloud;
import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
@@ -347,7 +348,8 @@ public final class ControllerTester {
new MockMailer(),
new InMemoryFlagSource(),
new MockMavenRepository(),
- new ApplicationCertificateMock());
+ new ApplicationCertificateMock(),
+ new MockMeteringClient());
// Calculate initial versions
controller.updateVersionStatus(VersionStatus.compute(controller));
return controller;
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainerTest.java
index ad4511e7f11..76568ef17e0 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainerTest.java
@@ -4,7 +4,7 @@ package com.yahoo.vespa.hosted.controller.maintenance;
import com.yahoo.vespa.hosted.controller.ControllerTester;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.vespa.hosted.controller.api.integration.resource.ResourceSnapshot;
-import com.yahoo.vespa.hosted.controller.api.integration.stubs.MockResourceSnapshotConsumer;
+import com.yahoo.vespa.hosted.controller.api.integration.stubs.MockMeteringClient;
import com.yahoo.vespa.hosted.controller.integration.NodeRepositoryClientMock;
import com.yahoo.vespa.hosted.controller.integration.MetricsMock;
import com.yahoo.vespa.hosted.controller.integration.ZoneApiMock;
@@ -22,7 +22,7 @@ public class ResourceMeterMaintainerTest {
private final double DELTA = Double.MIN_VALUE;
private NodeRepositoryClientMock nodeRepository = new NodeRepositoryClientMock();
- private MockResourceSnapshotConsumer snapshotConsumer = new MockResourceSnapshotConsumer();
+ private MockMeteringClient snapshotConsumer = new MockMeteringClient();
private MetricsMock metrics = new MetricsMock();
@Test
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ControllerContainerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ControllerContainerTest.java
index 1a838f41220..fe31200dc93 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ControllerContainerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ControllerContainerTest.java
@@ -74,7 +74,7 @@ public class ControllerContainerTest {
" <component id='com.yahoo.vespa.hosted.controller.api.integration.organization.MockIssueHandler'/>\n" +
" <component id='com.yahoo.vespa.hosted.controller.api.integration.aws.MockAwsEventFetcher' />\n" +
" <component id='com.yahoo.vespa.hosted.controller.api.integration.organization.MockBilling'/>\n" +
- " <component id='com.yahoo.vespa.hosted.controller.api.integration.stubs.MockResourceSnapshotConsumer'/>\n" +
+ " <component id='com.yahoo.vespa.hosted.controller.api.integration.stubs.MockMeteringClient'/>\n" +
" <component id='com.yahoo.vespa.hosted.controller.integration.ConfigServerMock'/>\n" +
" <component id='com.yahoo.vespa.hosted.controller.integration.NodeRepositoryClientMock'/>\n" +
" <component id='com.yahoo.vespa.hosted.controller.integration.ZoneRegistryMock'/>\n" +
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application1-metering.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application1-metering.json
index 63e1c1ebbd1..92d45c624fa 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application1-metering.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application1-metering.json
@@ -16,17 +16,17 @@
},
"details": {
"cpu": {
- "dummy": {
+ "default": {
"data": []
}
},
"mem": {
- "dummy": {
+ "default": {
"data": []
}
},
"disk": {
- "dummy": {
+ "default": {
"data": []
}
}
diff --git a/documentapi/src/tests/messagebus/messagebus_test.cpp b/documentapi/src/tests/messagebus/messagebus_test.cpp
index 05275870a2e..3406db966f0 100644
--- a/documentapi/src/tests/messagebus/messagebus_test.cpp
+++ b/documentapi/src/tests/messagebus/messagebus_test.cpp
@@ -51,8 +51,8 @@ Test::Main()
TEST_DONE();
}
-Test::Test() {}
-Test::~Test() {}
+Test::Test() = default;
+Test::~Test() = default;
void Test::testMessage() {
const document::DataType *testdoc_type = _repo->getDocumentType("testdoc");
@@ -61,8 +61,7 @@ void Test::testMessage() {
UpdateDocumentMessage upd1(
document::DocumentUpdate::SP(
new document::DocumentUpdate(*_repo, *testdoc_type,
- document::DocumentId(document::DocIdString(
- "testdoc", "testme1")))));
+ document::DocumentId("id:ns:testdoc::testme1"))));
EXPECT_TRUE(upd1.getType() == DocumentProtocol::MESSAGE_UPDATEDOCUMENT);
EXPECT_TRUE(upd1.getProtocol() == "document");
@@ -82,8 +81,7 @@ void Test::testMessage() {
UpdateDocumentMessage upd2(
document::DocumentUpdate::SP(
new document::DocumentUpdate(*_repo, *testdoc_type,
- document::DocumentId(document::DocIdString(
- "testdoc", "testme2")))));
+ document::DocumentId("id:ns:testdoc::testme2"))));
EXPECT_TRUE(!(upd1.getDocumentUpdate().getId() == upd2.getDocumentUpdate().getId()));
DocumentMessage& msg2 = static_cast<DocumentMessage&>(upd2);
diff --git a/documentapi/src/tests/policyfactory/policyfactory.cpp b/documentapi/src/tests/policyfactory/policyfactory.cpp
index 41905183928..877ade22e2a 100644
--- a/documentapi/src/tests/policyfactory/policyfactory.cpp
+++ b/documentapi/src/tests/policyfactory/policyfactory.cpp
@@ -60,7 +60,7 @@ MyFactory::createPolicy(const string &param) const
mbus::Message::UP
createMessage()
{
- auto ret = std::make_unique<RemoveDocumentMessage>(document::DocumentId("doc:scheme:"));
+ auto ret = std::make_unique<RemoveDocumentMessage>(document::DocumentId("id:ns:type::"));
ret->getTrace().setLevel(9);
return ret;
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirer.java
index 7d7f8d479fe..c9e440e787b 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirer.java
@@ -3,7 +3,6 @@ package com.yahoo.vespa.hosted.provision.maintenance;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.log.LogLevel;
-import com.yahoo.vespa.curator.Lock;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.lb.LoadBalancer;
import com.yahoo.vespa.hosted.provision.lb.LoadBalancer.State;
@@ -49,7 +48,7 @@ public class LoadBalancerExpirer extends Maintainer {
}
private void expireReserved() {
- try (Lock lock = db.lockLoadBalancers()) {
+ try (var lock = db.lockLoadBalancers()) {
var now = nodeRepository().clock().instant();
var expirationTime = now.minus(reservedExpiry);
var expired = nodeRepository().loadBalancers()
@@ -62,7 +61,7 @@ public class LoadBalancerExpirer extends Maintainer {
private void removeInactive() {
List<LoadBalancerId> failed = new ArrayList<>();
Exception lastException = null;
- try (Lock lock = db.lockLoadBalancers()) {
+ try (var lock = db.lockLoadBalancers()) {
var now = nodeRepository().clock().instant();
var expirationTime = now.minus(inactiveExpiry);
var expired = nodeRepository().loadBalancers()
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java
index 1e83c2c9176..725a3426eae 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java
@@ -42,7 +42,7 @@ class Activator {
public void activate(ApplicationId application, Collection<HostSpec> hosts, NestedTransaction transaction) {
try (Mutex lock = nodeRepository.lock(application)) {
activateNodes(application, hosts, transaction, lock);
- activateLoadBalancers(application, hosts, lock);
+ activateLoadBalancers(application, hosts, transaction, lock);
}
}
@@ -91,17 +91,17 @@ class Activator {
}
/** Activate load balancers */
- private void activateLoadBalancers(ApplicationId application, Collection<HostSpec> hosts,
+ private void activateLoadBalancers(ApplicationId application, Collection<HostSpec> hosts, NestedTransaction transaction,
@SuppressWarnings("unused") Mutex applicationLock) {
- loadBalancerProvisioner.ifPresent(provisioner -> provisioner.activate(application, clustersOf(hosts)));
+ loadBalancerProvisioner.ifPresent(provisioner -> provisioner.activate(application, clustersOf(hosts), applicationLock, transaction));
}
- private static List<ClusterSpec> clustersOf(Collection<HostSpec> hosts) {
+ private static Set<ClusterSpec> clustersOf(Collection<HostSpec> hosts) {
return hosts.stream()
.map(HostSpec::membership)
.flatMap(Optional::stream)
.map(ClusterMembership::cluster)
- .collect(Collectors.toUnmodifiableList());
+ .collect(Collectors.toUnmodifiableSet());
}
private static void validateParentHosts(ApplicationId application, NodeList nodes, List<Node> potentialChildren) {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java
index ea30fba9798..693fa254ac3 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java
@@ -5,6 +5,7 @@ import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.HostName;
import com.yahoo.config.provision.NodeType;
+import com.yahoo.config.provision.exception.LoadBalancerServiceException;
import com.yahoo.log.LogLevel;
import com.yahoo.transaction.Mutex;
import com.yahoo.transaction.NestedTransaction;
@@ -14,16 +15,18 @@ import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.lb.LoadBalancer;
import com.yahoo.vespa.hosted.provision.lb.LoadBalancerId;
import com.yahoo.vespa.hosted.provision.lb.LoadBalancerInstance;
-import com.yahoo.config.provision.exception.LoadBalancerServiceException;
import com.yahoo.vespa.hosted.provision.lb.LoadBalancerService;
import com.yahoo.vespa.hosted.provision.lb.Real;
import com.yahoo.vespa.hosted.provision.node.IP;
import com.yahoo.vespa.hosted.provision.persistence.CuratorDatabaseClient;
+import java.util.ArrayList;
+import java.util.Collections;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
+import java.util.function.Function;
import java.util.logging.Logger;
import java.util.stream.Collectors;
@@ -70,7 +73,9 @@ public class LoadBalancerProvisioner {
public void prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes) {
if (requestedNodes.type() != NodeType.tenant) return; // Nothing to provision for this node type
if (cluster.type() != ClusterSpec.Type.container) return; // Nothing to provision for this cluster type
- provision(application, cluster.id(), false);
+ try (var loadBalancersLock = db.lockLoadBalancers()) {
+ provision(application, cluster.id(), false, loadBalancersLock);
+ }
}
/**
@@ -79,12 +84,21 @@ public class LoadBalancerProvisioner {
* If a load balancer for the cluster already exists, it will be reconfigured based on the currently allocated
* nodes and the load balancer itself will be moved to {@link LoadBalancer.State#active}.
*
+ * Load balancers for clusters that are no longer in given clusters are deactivated.
+ *
* Calling this when no load balancer has been prepared for given cluster is a no-op.
*/
- public void activate(ApplicationId application, List<ClusterSpec> clusters) {
- for (var clusterId : containerClusterIdsOf(clusters)) {
- // Provision again to ensure that load balancer instance re-configured with correct nodes
- provision(application, clusterId, true);
+ public void activate(ApplicationId application, Set<ClusterSpec> clusters,
+ @SuppressWarnings("unused") Mutex applicationLock, NestedTransaction transaction) {
+ try (var loadBalancersLock = db.lockLoadBalancers()) {
+ var containerClusters = containerClusterOf(clusters);
+ for (var clusterId : containerClusters) {
+ // Provision again to ensure that load balancer instance is re-configured with correct nodes
+ provision(application, clusterId, true, loadBalancersLock);
+ }
+ // Deactivate any surplus load balancers, i.e. load balancers for clusters that have been removed
+ var surplusLoadBalancers = surplusLoadBalancersOf(application, containerClusters);
+ deactivate(surplusLoadBalancers, transaction);
}
}
@@ -93,38 +107,57 @@ public class LoadBalancerProvisioner {
* load balancer(s).
*/
public void deactivate(ApplicationId application, NestedTransaction transaction) {
- try (Mutex applicationLock = nodeRepository.lock(application)) {
+ try (var applicationLock = nodeRepository.lock(application)) {
try (Mutex loadBalancersLock = db.lockLoadBalancers()) {
- var now = nodeRepository.clock().instant();
- var deactivatedLoadBalancers = nodeRepository.loadBalancers().owner(application).asList().stream()
- .map(lb -> lb.with(LoadBalancer.State.inactive, now))
- .collect(Collectors.toList());
- db.writeLoadBalancers(deactivatedLoadBalancers, transaction);
+ deactivate(nodeRepository.loadBalancers().owner(application).asList(), transaction);
}
}
}
+ /** Returns load balancers of given application that are no longer referenced by wantedClusters */
+ private List<LoadBalancer> surplusLoadBalancersOf(ApplicationId application, Set<ClusterSpec.Id> activeClusters) {
+ var activeLoadBalancersByCluster = nodeRepository.loadBalancers()
+ .owner(application)
+ .in(LoadBalancer.State.active)
+ .asList()
+ .stream()
+ .collect(Collectors.toMap(lb -> lb.id().cluster(),
+ Function.identity()));
+ var surplus = new ArrayList<LoadBalancer>();
+ for (var kv : activeLoadBalancersByCluster.entrySet()) {
+ if (activeClusters.contains(kv.getKey())) continue;
+ surplus.add(kv.getValue());
+ }
+ return Collections.unmodifiableList(surplus);
+ }
+
+ private void deactivate(List<LoadBalancer> loadBalancers, NestedTransaction transaction) {
+ var now = nodeRepository.clock().instant();
+ var deactivatedLoadBalancers = loadBalancers.stream()
+ .map(lb -> lb.with(LoadBalancer.State.inactive, now))
+ .collect(Collectors.toList());
+ db.writeLoadBalancers(deactivatedLoadBalancers, transaction);
+ }
+
+
/** Idempotently provision a load balancer for given application and cluster */
- private void provision(ApplicationId application, ClusterSpec.Id clusterId, boolean activate) {
- try (var applicationLock = nodeRepository.lock(application)) {
- try (var loadBalancersLock = db.lockLoadBalancers()) {
- var id = new LoadBalancerId(application, clusterId);
- var now = nodeRepository.clock().instant();
- var loadBalancer = db.readLoadBalancer(id);
- if (loadBalancer.isEmpty() && activate) return; // Nothing to activate as this load balancer was never prepared
-
- var force = loadBalancer.isPresent() && loadBalancer.get().state() != LoadBalancer.State.active;
- var instance = create(application, clusterId, allocatedContainers(application, clusterId), force);
- LoadBalancer newLoadBalancer;
- if (loadBalancer.isEmpty()) {
- newLoadBalancer = new LoadBalancer(id, instance, LoadBalancer.State.reserved, now);
- } else {
- var newState = activate ? LoadBalancer.State.active : loadBalancer.get().state();
- newLoadBalancer = loadBalancer.get().with(instance).with(newState, now);
- }
- db.writeLoadBalancer(newLoadBalancer);
- }
+ private void provision(ApplicationId application, ClusterSpec.Id clusterId, boolean activate,
+ @SuppressWarnings("unused") Mutex loadBalancersLock) {
+ var id = new LoadBalancerId(application, clusterId);
+ var now = nodeRepository.clock().instant();
+ var loadBalancer = db.readLoadBalancer(id);
+ if (loadBalancer.isEmpty() && activate) return; // Nothing to activate as this load balancer was never prepared
+
+ var force = loadBalancer.isPresent() && loadBalancer.get().state() != LoadBalancer.State.active;
+ var instance = create(application, clusterId, allocatedContainers(application, clusterId), force);
+ LoadBalancer newLoadBalancer;
+ if (loadBalancer.isEmpty()) {
+ newLoadBalancer = new LoadBalancer(id, instance, LoadBalancer.State.reserved, now);
+ } else {
+ var newState = activate ? LoadBalancer.State.active : loadBalancer.get().state();
+ newLoadBalancer = loadBalancer.get().with(instance).with(newState, now);
}
+ db.writeLoadBalancer(newLoadBalancer);
}
private LoadBalancerInstance create(ApplicationId application, ClusterSpec.Id cluster, List<Node> nodes, boolean force) {
@@ -171,11 +204,11 @@ public class LoadBalancerProvisioner {
return reachable;
}
- private static List<ClusterSpec.Id> containerClusterIdsOf(List<ClusterSpec> clusters) {
+ private static Set<ClusterSpec.Id> containerClusterOf(Set<ClusterSpec> clusters) {
return clusters.stream()
.filter(c -> c.type() == ClusterSpec.Type.container)
.map(ClusterSpec::id)
- .collect(Collectors.toUnmodifiableList());
+ .collect(Collectors.toUnmodifiableSet());
}
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java
index 77273f98f76..a26461c4501 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java
@@ -116,6 +116,22 @@ public class LoadBalancerProvisionerTest {
.collect(Collectors.toList());
assertEquals(activeContainers, reals);
+ // Cluster removal deactivates relevant load balancer
+ tester.activate(app1, prepare(app1, clusterRequest(ClusterSpec.Type.container, containerCluster1)));
+ assertEquals(2, lbApp1.get().size());
+ assertEquals("Deactivated load balancer for cluster " + containerCluster2, LoadBalancer.State.inactive,
+ lbApp1.get().stream()
+ .filter(lb -> lb.id().cluster().equals(containerCluster2))
+ .map(LoadBalancer::state)
+ .findFirst()
+ .get());
+ assertEquals("Load balancer for cluster " + containerCluster1 + " remains active", LoadBalancer.State.active,
+ lbApp1.get().stream()
+ .filter(lb -> lb.id().cluster().equals(containerCluster1))
+ .map(LoadBalancer::state)
+ .findFirst()
+ .get());
+
// Application is removed, nodes and load balancer are deactivated
NestedTransaction removeTransaction = new NestedTransaction();
tester.provisioner().remove(removeTransaction, app1);
diff --git a/persistence/src/tests/dummyimpl/dummypersistence_test.cpp b/persistence/src/tests/dummyimpl/dummypersistence_test.cpp
index 637b9ef512a..4ca78181fb8 100644
--- a/persistence/src/tests/dummyimpl/dummypersistence_test.cpp
+++ b/persistence/src/tests/dummyimpl/dummypersistence_test.cpp
@@ -25,9 +25,9 @@ struct Fixture {
}
Fixture() {
- insert(DocumentId("doc:test:3"), Timestamp(3), NONE);
- insert(DocumentId("doc:test:1"), Timestamp(1), NONE);
- insert(DocumentId("doc:test:2"), Timestamp(2), NONE);
+ insert(DocumentId("id:ns:type::test:3"), Timestamp(3), NONE);
+ insert(DocumentId("id:ns:type::test:1"), Timestamp(1), NONE);
+ insert(DocumentId("id:ns:type::test:2"), Timestamp(2), NONE);
}
};
@@ -35,21 +35,21 @@ TEST("require that empty BucketContent behaves") {
BucketContent content;
EXPECT_FALSE(content.hasTimestamp(Timestamp(1)));
EXPECT_FALSE(content.getEntry(Timestamp(1)).get());
- EXPECT_FALSE(content.getEntry(DocumentId("doc:test:1")).get());
+ EXPECT_FALSE(content.getEntry(DocumentId("id:ns:type::test:1")).get());
}
TEST_F("require that BucketContent can retrieve by timestamp", Fixture) {
DocEntry::SP entry = f.content.getEntry(Timestamp(1));
ASSERT_TRUE(entry.get());
ASSERT_TRUE(entry->getDocumentId());
- ASSERT_EQUAL("doc:test:1", entry->getDocumentId()->toString());
+ ASSERT_EQUAL("id:ns:type::test:1", entry->getDocumentId()->toString());
}
TEST_F("require that BucketContent can retrieve by doc id", Fixture) {
- DocEntry::SP entry = f.content.getEntry(DocumentId("doc:test:2"));
+ DocEntry::SP entry = f.content.getEntry(DocumentId("id:ns:type::test:2"));
ASSERT_TRUE(entry.get());
ASSERT_TRUE(entry->getDocumentId());
- ASSERT_EQUAL("doc:test:2", entry->getDocumentId()->toString());
+ ASSERT_EQUAL("id:ns:type::test:2", entry->getDocumentId()->toString());
}
TEST_F("require that BucketContent can check a timestamp", Fixture) {
@@ -64,13 +64,13 @@ TEST_F("require that BucketContent can provide bucket info", Fixture) {
uint32_t lastChecksum = 0;
EXPECT_NOT_EQUAL(lastChecksum, f.content.getBucketInfo().getChecksum());
lastChecksum = f.content.getBucketInfo().getChecksum();
- f.insert(DocumentId("doc:test:3"), Timestamp(4), NONE);
+ f.insert(DocumentId("id:ns:type::test:3"), Timestamp(4), NONE);
EXPECT_NOT_EQUAL(lastChecksum, f.content.getBucketInfo().getChecksum());
lastChecksum = f.content.getBucketInfo().getChecksum();
- f.insert(DocumentId("doc:test:2"), Timestamp(5), REMOVE_ENTRY);
+ f.insert(DocumentId("id:ns:type::test:2"), Timestamp(5), REMOVE_ENTRY);
EXPECT_NOT_EQUAL(lastChecksum, f.content.getBucketInfo().getChecksum());
- f.insert(DocumentId("doc:test:1"), Timestamp(6), REMOVE_ENTRY);
- f.insert(DocumentId("doc:test:3"), Timestamp(7), REMOVE_ENTRY);
+ f.insert(DocumentId("id:ns:type::test:1"), Timestamp(6), REMOVE_ENTRY);
+ f.insert(DocumentId("id:ns:type::test:3"), Timestamp(7), REMOVE_ENTRY);
EXPECT_EQUAL(0u, f.content.getBucketInfo().getChecksum());
}
diff --git a/searchcore/src/tests/proton/attribute/attribute_test.cpp b/searchcore/src/tests/proton/attribute/attribute_test.cpp
index 3734d2fe1dc..3da27cde10e 100644
--- a/searchcore/src/tests/proton/attribute/attribute_test.cpp
+++ b/searchcore/src/tests/proton/attribute/attribute_test.cpp
@@ -204,7 +204,7 @@ TEST_F("require that attribute writer handles put", Fixture)
attribute::ConstCharContent sbuf;
{ // empty document should give default values
EXPECT_EQUAL(1u, a1->getNumDocs());
- f.put(1, *idb.startDocument("doc::1").endDocument(), 1);
+ f.put(1, *idb.startDocument("id:ns:searchdocument::1").endDocument(), 1);
EXPECT_EQUAL(2u, a1->getNumDocs());
EXPECT_EQUAL(2u, a2->getNumDocs());
EXPECT_EQUAL(2u, a3->getNumDocs());
@@ -226,7 +226,7 @@ TEST_F("require that attribute writer handles put", Fixture)
EXPECT_EQUAL(strcmp("", sbuf[0]), 0);
}
{ // document with single value & multi value attribute
- Document::UP doc = idb.startDocument("doc::2").
+ Document::UP doc = idb.startDocument("id:ns:searchdocument::2").
startAttributeField("a1").addInt(10).endField().
startAttributeField("a2").startElement().addInt(20).endElement().
startElement().addInt(30).endElement().endField().endDocument();
@@ -246,7 +246,7 @@ TEST_F("require that attribute writer handles put", Fixture)
EXPECT_EQUAL(30u, ibuf[1]);
}
{ // replace existing document
- Document::UP doc = idb.startDocument("doc::2").
+ Document::UP doc = idb.startDocument("id:ns:searchdocument::2").
startAttributeField("a1").addInt(100).endField().
startAttributeField("a2").startElement().addInt(200).endElement().
startElement().addInt(300).endElement().
@@ -281,7 +281,7 @@ TEST_F("require that attribute writer handles predicate put", Fixture)
// empty document should give default values
EXPECT_EQUAL(1u, a1->getNumDocs());
- f.put(1, *idb.startDocument("doc::1").endDocument(), 1);
+ f.put(1, *idb.startDocument("id:ns:searchdocument::1").endDocument(), 1);
EXPECT_EQUAL(2u, a1->getNumDocs());
EXPECT_EQUAL(1u, a1->getStatus().getLastSyncToken());
EXPECT_EQUAL(0u, index.getZeroConstraintDocs().size());
@@ -289,7 +289,7 @@ TEST_F("require that attribute writer handles predicate put", Fixture)
// document with single value attribute
PredicateSlimeBuilder builder;
Document::UP doc =
- idb.startDocument("doc::2").startAttributeField("a1")
+ idb.startDocument("id:ns:searchdocument::2").startAttributeField("a1")
.addPredicate(builder.true_predicate().build())
.endField().endDocument();
f.put(2, *doc, 2);
@@ -301,7 +301,7 @@ TEST_F("require that attribute writer handles predicate put", Fixture)
EXPECT_FALSE(it.valid());
// replace existing document
- doc = idb.startDocument("doc::2").startAttributeField("a1")
+ doc = idb.startDocument("id:ns:searchdocument::2").startAttributeField("a1")
.addPredicate(builder.feature("foo").value("bar").build())
.endField().endDocument();
f.put(3, *doc, 2);
@@ -374,7 +374,7 @@ TEST_F("require that visibilitydelay is honoured", Fixture)
DocBuilder idb(s);
EXPECT_EQUAL(1u, a1->getNumDocs());
EXPECT_EQUAL(0u, a1->getStatus().getLastSyncToken());
- Document::UP doc = idb.startDocument("doc::1")
+ Document::UP doc = idb.startDocument("id:ns:searchdocument::1")
.startAttributeField("a1").addStr("10").endField()
.endDocument();
f.put(3, *doc, 1);
@@ -398,11 +398,11 @@ TEST_F("require that visibilitydelay is honoured", Fixture)
EXPECT_EQUAL(8u, a1->getStatus().getLastSyncToken());
verifyAttributeContent(*a1, 2, "10");
- awDelayed.put(9, *idb.startDocument("doc::1").startAttributeField("a1").addStr("11").endField().endDocument(),
+ awDelayed.put(9, *idb.startDocument("id:ns:searchdocument::1").startAttributeField("a1").addStr("11").endField().endDocument(),
2, false, emptyCallback);
- awDelayed.put(10, *idb.startDocument("doc::1").startAttributeField("a1").addStr("20").endField().endDocument(),
+ awDelayed.put(10, *idb.startDocument("id:ns:searchdocument::1").startAttributeField("a1").addStr("20").endField().endDocument(),
2, false, emptyCallback);
- awDelayed.put(11, *idb.startDocument("doc::1").startAttributeField("a1").addStr("30").endField().endDocument(),
+ awDelayed.put(11, *idb.startDocument("id:ns:searchdocument::1").startAttributeField("a1").addStr("30").endField().endDocument(),
2, false, emptyCallback);
EXPECT_EQUAL(8u, a1->getStatus().getLastSyncToken());
verifyAttributeContent(*a1, 2, "10");
@@ -422,7 +422,7 @@ TEST_F("require that attribute writer handles predicate remove", Fixture)
DocBuilder idb(s);
PredicateSlimeBuilder builder;
Document::UP doc =
- idb.startDocument("doc::1").startAttributeField("a1")
+ idb.startDocument("id:ns:searchdocument::1").startAttributeField("a1")
.addPredicate(builder.true_predicate().build())
.endField().endDocument();
f.put(1, *doc, 1);
@@ -447,7 +447,7 @@ TEST_F("require that attribute writer handles update", Fixture)
schema.addAttributeField(Schema::AttributeField("a2", schema::DataType::INT32, CollectionType::SINGLE));
DocBuilder idb(schema);
const document::DocumentType &dt(idb.getDocumentType());
- DocumentUpdate upd(*idb.getDocumentTypeRepo(), dt, DocumentId("doc::1"));
+ DocumentUpdate upd(*idb.getDocumentTypeRepo(), dt, DocumentId("id:ns:searchdocument::1"));
upd.addUpdate(FieldUpdate(upd.getType().getField("a1"))
.addUpdate(ArithmeticValueUpdate(ArithmeticValueUpdate::Add, 5)));
upd.addUpdate(FieldUpdate(upd.getType().getField("a2"))
@@ -484,14 +484,14 @@ TEST_F("require that attribute writer handles predicate update", Fixture)
DocBuilder idb(schema);
PredicateSlimeBuilder builder;
Document::UP doc =
- idb.startDocument("doc::1").startAttributeField("a1")
+ idb.startDocument("id:ns:searchdocument::1").startAttributeField("a1")
.addPredicate(builder.true_predicate().build())
.endField().endDocument();
f.put(1, *doc, 1);
EXPECT_EQUAL(2u, a1->getNumDocs());
const document::DocumentType &dt(idb.getDocumentType());
- DocumentUpdate upd(*idb.getDocumentTypeRepo(), dt, DocumentId("doc::1"));
+ DocumentUpdate upd(*idb.getDocumentTypeRepo(), dt, DocumentId("id:ns:searchdocument::1"));
PredicateFieldValue new_value(builder.feature("foo").value("bar").build());
upd.addUpdate(FieldUpdate(upd.getType().getField("a1"))
.addUpdate(AssignValueUpdate(new_value)));
@@ -633,7 +633,7 @@ createTensorSchema() {
Document::UP
createTensorPutDoc(DocBuilder &builder, const Tensor &tensor) {
- return builder.startDocument("doc::1").
+ return builder.startDocument("id:ns:searchdocument::1").
startAttributeField("a1").
addTensor(tensor.clone()).endField().endDocument();
}
@@ -678,7 +678,7 @@ TEST_F("require that attribute writer handles tensor assign update", Fixture)
EXPECT_TRUE(tensor->equals(*tensor2));
const document::DocumentType &dt(builder.getDocumentType());
- DocumentUpdate upd(*builder.getDocumentTypeRepo(), dt, DocumentId("doc::1"));
+ DocumentUpdate upd(*builder.getDocumentTypeRepo(), dt, DocumentId("id:ns:searchdocument::1"));
auto new_tensor = make_tensor(TensorSpec("tensor(x{},y{})")
.add({{"x", "8"}, {"y", "9"}}, 11));
TensorDataType xySparseTensorDataType(vespalib::eval::ValueType::from_spec("tensor(x{},y{})"));
@@ -728,7 +728,7 @@ putAttributes(Fixture &f, std::vector<uint32_t> expExecuteHistory)
EXPECT_EQUAL(1u, a1->getNumDocs());
EXPECT_EQUAL(1u, a2->getNumDocs());
EXPECT_EQUAL(1u, a3->getNumDocs());
- f.put(1, *idb.startDocument("doc::1").
+ f.put(1, *idb.startDocument("id:ns:searchdocument::1").
startAttributeField("a1").addInt(10).endField().
startAttributeField("a2").addInt(15).endField().
startAttributeField("a3").addInt(20).endField().
diff --git a/searchcore/src/tests/proton/common/cachedselect_test.cpp b/searchcore/src/tests/proton/common/cachedselect_test.cpp
index dcba8fda1c6..df414439bce 100644
--- a/searchcore/src/tests/proton/common/cachedselect_test.cpp
+++ b/searchcore/src/tests/proton/common/cachedselect_test.cpp
@@ -466,10 +466,10 @@ TEST_F("Test that basic select works", TestFixture)
{
MyDB &db(*f._db);
- db.addDoc(1u, "doc:test:1", "hello", "null", 45, 37);
- db.addDoc(2u, "doc:test:2", "gotcha", "foo", 3, 25);
- db.addDoc(3u, "doc:test:3", "gotcha", "foo", noIntVal, noIntVal);
- db.addDoc(4u, "doc:test:4", "null", "foo", noIntVal, noIntVal);
+ db.addDoc(1u, "id:ns:test::1", "hello", "null", 45, 37);
+ db.addDoc(2u, "id:ns:test::2", "gotcha", "foo", 3, 25);
+ db.addDoc(3u, "id:ns:test::3", "gotcha", "foo", noIntVal, noIntVal);
+ db.addDoc(4u, "id:ns:test::4", "null", "foo", noIntVal, noIntVal);
CachedSelect::SP cs;
@@ -566,9 +566,9 @@ struct PreDocSelectFixture : public TestFixture {
PreDocSelectFixture()
: TestFixture()
{
- db().addDoc(1u, "doc:test:1", "foo", "null", 3, 5);
- db().addDoc(2u, "doc:test:1", "bar", "null", 3, 5);
- db().addDoc(3u, "doc:test:2", "foo", "null", 7, 5);
+ db().addDoc(1u, "id:ns:test::1", "foo", "null", 3, 5);
+ db().addDoc(2u, "id:ns:test::1", "bar", "null", 3, 5);
+ db().addDoc(3u, "id:ns:test::2", "foo", "null", 7, 5);
}
};
@@ -602,10 +602,10 @@ TEST_F("Test performance when using attributes", TestFixture)
{
MyDB &db(*f._db);
- db.addDoc(1u, "doc:test:1", "hello", "null", 45, 37);
- db.addDoc(2u, "doc:test:2", "gotcha", "foo", 3, 25);
- db.addDoc(3u, "doc:test:3", "gotcha", "foo", noIntVal, noIntVal);
- db.addDoc(4u, "doc:test:4", "null", "foo", noIntVal, noIntVal);
+ db.addDoc(1u, "id:ns:test::1", "hello", "null", 45, 37);
+ db.addDoc(2u, "id:ns:test::2", "gotcha", "foo", 3, 25);
+ db.addDoc(3u, "id:ns:test::3", "gotcha", "foo", noIntVal, noIntVal);
+ db.addDoc(4u, "id:ns:test::4", "null", "foo", noIntVal, noIntVal);
CachedSelect::SP cs;
cs = f.testParse("test.aa < 45", "test");
diff --git a/searchcore/src/tests/proton/common/selectpruner_test.cpp b/searchcore/src/tests/proton/common/selectpruner_test.cpp
index a7feb865d96..5b1fa3ed4bf 100644
--- a/searchcore/src/tests/proton/common/selectpruner_test.cpp
+++ b/searchcore/src/tests/proton/common/selectpruner_test.cpp
@@ -36,8 +36,7 @@ using search::AttributeFactory;
typedef Node::UP NodeUP;
-namespace
-{
+namespace {
const int32_t doc_type_id = 787121340;
const string type_name = "test";
@@ -57,9 +56,6 @@ const string invalid_name("test_2.ac > 3999");
const string invalid2_name("test_2.ac > 4999");
const string empty("");
-const document::DocumentId docId("doc:test:1");
-
-
std::unique_ptr<const DocumentTypeRepo>
makeDocTypeRepo()
{
@@ -135,23 +131,12 @@ public:
bool _hasDocuments;
TestFixture();
-
~TestFixture();
- void
- testParse(const string &selection);
-
- void
- testParseFail(const string &selection);
-
- void
- testPrune(const string &selection,
- const string &exp);
-
- void
- testPrune(const string &selection,
- const string &exp,
- const string &docTypeName);
+ void testParse(const string &selection);
+ void testParseFail(const string &selection);
+ void testPrune(const string &selection, const string &exp);
+ void testPrune(const string &selection, const string &exp, const string &docTypeName);
};
@@ -169,28 +154,22 @@ TestFixture::TestFixture()
}
-TestFixture::~TestFixture()
-{
-}
+TestFixture::~TestFixture() = default;
void
TestFixture::testParse(const string &selection)
{
const DocumentTypeRepo &repo(*_repoUP);
- document::select::Parser parser(repo,
- document::BucketIdFactory());
+ document::select::Parser parser(repo,document::BucketIdFactory());
NodeUP select;
try {
- LOG(info,
- "Trying to parse '%s'",
- selection.c_str());
+ LOG(info, "Trying to parse '%s'", selection.c_str());
select = parser.parse(selection);
} catch (document::select::ParsingFailedException &e) {
- LOG(info,
- "Parse failed: %s", e.what());
+ LOG(info, "Parse failed: %s", e.what());
select.reset(0);
}
ASSERT_TRUE(select.get() != NULL);
@@ -201,20 +180,15 @@ void
TestFixture::testParseFail(const string &selection)
{
const DocumentTypeRepo &repo(*_repoUP);
- document::select::Parser parser(repo,
- document::BucketIdFactory());
+ document::select::Parser parser(repo,document::BucketIdFactory());
NodeUP select;
try {
- LOG(info,
- "Trying to parse '%s'",
- selection.c_str());
+ LOG(info, "Trying to parse '%s'", selection.c_str());
select = parser.parse(selection);
} catch (document::select::ParsingFailedException &e) {
- LOG(info,
- "Parse failed: %s",
- e.getMessage().c_str());
+ LOG(info, "Parse failed: %s", e.getMessage().c_str());
select.reset(0);
}
ASSERT_TRUE(select.get() == NULL);
@@ -222,25 +196,18 @@ TestFixture::testParseFail(const string &selection)
void
-TestFixture::testPrune(const string &selection,
- const string &exp,
- const string &docTypeName)
+TestFixture::testPrune(const string &selection, const string &exp, const string &docTypeName)
{
const DocumentTypeRepo &repo(*_repoUP);
- document::select::Parser parser(repo,
- document::BucketIdFactory());
+ document::select::Parser parser(repo,document::BucketIdFactory());
NodeUP select;
try {
- LOG(info,
- "Trying to parse '%s' with docType=%s",
- selection.c_str(),
- docTypeName.c_str());
+ LOG(info, "Trying to parse '%s' with docType=%s", selection.c_str(), docTypeName.c_str());
select = parser.parse(selection);
} catch (document::select::ParsingFailedException &e) {
- LOG(info,
- "Parse failed: %s", e.what());
+ LOG(info, "Parse failed: %s", e.what());
select.reset(0);
}
ASSERT_TRUE(select.get() != NULL);
@@ -249,7 +216,7 @@ TestFixture::testPrune(const string &selection,
LOG(info, "ParseTree: '%s'", os.str().c_str());
const DocumentType *docType = repo.getDocumentType(docTypeName);
ASSERT_TRUE(docType != NULL);
- Document::UP emptyDoc(new Document(*docType, docId));
+ Document::UP emptyDoc(new Document(*docType, document::DocumentId("id:ns:" + docTypeName + "::1")));
emptyDoc->setRepo(repo);
SelectPruner pruner(docTypeName, &_amgr, *emptyDoc, repo, _hasFields, _hasDocuments);
pruner.process(*select);
diff --git a/searchcore/src/tests/proton/docsummary/docsummary.cpp b/searchcore/src/tests/proton/docsummary/docsummary.cpp
index e8152161faa..0a9f3127844 100644
--- a/searchcore/src/tests/proton/docsummary/docsummary.cpp
+++ b/searchcore/src/tests/proton/docsummary/docsummary.cpp
@@ -429,7 +429,7 @@ Test::requireThatAdapterHandlesAllFieldTypes()
s.addSummaryField(Schema::SummaryField("l", schema::DataType::STRING));
BuildContext bc(s);
- bc._bld.startDocument("doc::0");
+ bc._bld.startDocument("id:ns:searchdocument::0");
bc._bld.startSummaryField("a").addInt(255).endField();
bc._bld.startSummaryField("b").addInt(32767).endField();
bc._bld.startSummaryField("c").addInt(2147483647).endField();
@@ -478,12 +478,12 @@ Test::requireThatAdapterHandlesMultipleDocuments()
s.addSummaryField(Schema::SummaryField("a", schema::DataType::INT32));
BuildContext bc(s);
- bc._bld.startDocument("doc::0").
+ bc._bld.startDocument("id:ns:searchdocument::0").
startSummaryField("a").
addInt(1000).
endField();
bc.endDocument(0);
- bc._bld.startDocument("doc::1").
+ bc._bld.startDocument("id:ns:searchdocument::1").
startSummaryField("a").
addInt(2000).endField();
bc.endDocument(1);
@@ -519,7 +519,7 @@ Test::requireThatAdapterHandlesDocumentIdField()
Schema s;
s.addSummaryField(Schema::SummaryField("documentid", schema::DataType::STRING));
BuildContext bc(s);
- bc._bld.startDocument("doc::0").
+ bc._bld.startDocument("id:ns:searchdocument::0").
startSummaryField("documentid").
addStr("foo").
endField();
@@ -528,16 +528,16 @@ Test::requireThatAdapterHandlesDocumentIdField()
bc.createFieldCacheRepo(getResultConfig())->getFieldCache("class4"),
getMarkupFields());
GeneralResultPtr res = getResult(dsa, 0);
- EXPECT_EQUAL("doc::0", std::string(res->GetEntry("documentid")->_stringval,
+ EXPECT_EQUAL("id:ns:searchdocument::0", std::string(res->GetEntry("documentid")->_stringval,
res->GetEntry("documentid")->_stringlen));
}
-GlobalId gid1 = DocumentId("doc::1").getGlobalId(); // lid 1
-GlobalId gid2 = DocumentId("doc::2").getGlobalId(); // lid 2
-GlobalId gid3 = DocumentId("doc::3").getGlobalId(); // lid 3
-GlobalId gid4 = DocumentId("doc::4").getGlobalId(); // lid 4
-GlobalId gid9 = DocumentId("doc::9").getGlobalId(); // not existing
+GlobalId gid1 = DocumentId("id:ns:searchdocument::1").getGlobalId(); // lid 1
+GlobalId gid2 = DocumentId("id:ns:searchdocument::2").getGlobalId(); // lid 2
+GlobalId gid3 = DocumentId("id:ns:searchdocument::3").getGlobalId(); // lid 3
+GlobalId gid4 = DocumentId("id:ns:searchdocument::4").getGlobalId(); // lid 4
+GlobalId gid9 = DocumentId("id:ns:searchdocument::9").getGlobalId(); // not existing
void
Test::requireThatDocsumRequestIsProcessed()
@@ -547,31 +547,31 @@ Test::requireThatDocsumRequestIsProcessed()
BuildContext bc(s);
DBContext dc(bc._repo, getDocTypeName());
- dc.put(*bc._bld.startDocument("doc::1").
+ dc.put(*bc._bld.startDocument("id:ns:searchdocument::1").
startSummaryField("a").
addInt(10).
endField().
endDocument(),
1);
- dc.put(*bc._bld.startDocument("doc::2").
+ dc.put(*bc._bld.startDocument("id:ns:searchdocument::2").
startSummaryField("a").
addInt(20).
endField().
endDocument(),
2);
- dc.put(*bc._bld.startDocument("doc::3").
+ dc.put(*bc._bld.startDocument("id:ns:searchdocument::3").
startSummaryField("a").
addInt(30).
endField().
endDocument(),
3);
- dc.put(*bc._bld.startDocument("doc::4").
+ dc.put(*bc._bld.startDocument("id:ns:searchdocument::4").
startSummaryField("a").
addInt(40).
endField().
endDocument(),
4);
- dc.put(*bc._bld.startDocument("doc::5").
+ dc.put(*bc._bld.startDocument("id:ns:searchdocument::5").
startSummaryField("a").
addInt(50).
endField().
@@ -607,7 +607,7 @@ Test::requireThatRewritersAreUsed()
BuildContext bc(s);
DBContext dc(bc._repo, getDocTypeName());
- dc.put(*bc._bld.startDocument("doc::1").
+ dc.put(*bc._bld.startDocument("id:ns:searchdocument::1").
startSummaryField("aa").
addInt(10).
endField().
@@ -634,7 +634,7 @@ Test::requireThatSummariesTimeout()
BuildContext bc(s);
DBContext dc(bc._repo, getDocTypeName());
- dc.put(*bc._bld.startDocument("doc::1").
+ dc.put(*bc._bld.startDocument("id:ns:searchdocument::1").
startSummaryField("aa").
addInt(10).
endField().
@@ -686,10 +686,10 @@ Test::requireThatAttributesAreUsed()
BuildContext bc(s);
DBContext dc(bc._repo, getDocTypeName());
- dc.put(*bc._bld.startDocument("doc::1").
+ dc.put(*bc._bld.startDocument("id:ns:searchdocument::1").
endDocument(),
1); // empty doc
- dc.put(*bc._bld.startDocument("doc::2").
+ dc.put(*bc._bld.startDocument("id:ns:searchdocument::2").
startAttributeField("ba").
addInt(10).
endField().
@@ -753,7 +753,7 @@ Test::requireThatAttributesAreUsed()
endField().
endDocument(),
2);
- dc.put(*bc._bld.startDocument("doc::3").
+ dc.put(*bc._bld.startDocument("id:ns:searchdocument::3").
endDocument(),
3); // empty doc
@@ -818,7 +818,7 @@ Test::requireThatSummaryAdapterHandlesPutAndRemove()
s.addSummaryField(Schema::SummaryField("f1", schema::DataType::STRING, CollectionType::SINGLE));
BuildContext bc(s);
DBContext dc(bc._repo, getDocTypeName());
- Document::UP exp = bc._bld.startDocument("doc::1").
+ Document::UP exp = bc._bld.startDocument("id:ns:searchdocument::1").
startSummaryField("f1").
addStr("foo").
endField().
@@ -854,7 +854,7 @@ Test::requireThatAnnotationsAreUsed()
s.addSummaryField(Schema::SummaryField("dynamicstring", schema::DataType::STRING, CollectionType::SINGLE));
BuildContext bc(s);
DBContext dc(bc._repo, getDocTypeName());
- Document::UP exp = bc._bld.startDocument("doc::0").
+ Document::UP exp = bc._bld.startDocument("id:ns:searchdocument::0").
startIndexField("g").
addStr("foo").
addStr("bar").
@@ -908,7 +908,7 @@ Test::requireThatUrisAreUsed()
s.addSummaryField(Schema::SummaryField("uriwset", schema::DataType::STRING, CollectionType::WEIGHTEDSET));
BuildContext bc(s);
DBContext dc(bc._repo, getDocTypeName());
- Document::UP exp = bc._bld.startDocument("doc::0").
+ Document::UP exp = bc._bld.startDocument("id:ns:searchdocument::0").
startIndexField("urisingle").
startSubField("all").
addUrlTokenizedString("http://www.example.com:81/fluke?ab=2#4").
@@ -1074,7 +1074,7 @@ Test::requireThatPositionsAreUsed()
BuildContext bc(s);
DBContext dc(bc._repo, getDocTypeName());
- Document::UP exp = bc._bld.startDocument("doc::1").
+ Document::UP exp = bc._bld.startDocument("id:ns:searchdocument::1").
startAttributeField("sp2").
addPosition(1002, 1003).
endField().
@@ -1146,7 +1146,7 @@ Test::requireThatRawFieldsWorks()
BuildContext bc(s);
DBContext dc(bc._repo, getDocTypeName());
- Document::UP exp = bc._bld.startDocument("doc::0").
+ Document::UP exp = bc._bld.startDocument("id:ns:searchdocument::0").
startSummaryField("i").
addRaw(raw1s.c_str(), raw1s.size()).
endField().
@@ -1178,8 +1178,7 @@ Test::requireThatRawFieldsWorks()
bc.createFieldCacheRepo(getResultConfig())->getFieldCache("class0"),
getMarkupFields());
- ASSERT_TRUE(assertString(raw1s,
- "i", dsa, 1));
+ ASSERT_TRUE(assertString(raw1s, "i", dsa, 1));
GeneralResultPtr res = getResult(dsa, 1);
{
@@ -1237,14 +1236,12 @@ Test::Test()
continue;
// Assume just one argument: source field that must contain markup
_markupFields.insert(markupField);
- LOG(info,
- "Field %s has markup",
- markupField.c_str());
+ LOG(info, "Field %s has markup", markupField.c_str());
}
}
}
-Test::~Test() {}
+Test::~Test() = default;
int
Test::Main()
diff --git a/searchcore/src/tests/proton/docsummary/summaryfieldconverter_test.cpp b/searchcore/src/tests/proton/docsummary/summaryfieldconverter_test.cpp
index cc6eef14fd6..b295926c64a 100644
--- a/searchcore/src/tests/proton/docsummary/summaryfieldconverter_test.cpp
+++ b/searchcore/src/tests/proton/docsummary/summaryfieldconverter_test.cpp
@@ -350,7 +350,7 @@ StringFieldValue Test::makeAnnotatedChineseString() {
}
Document Test::makeDocument() {
- Document doc(getDocType(), DocumentId("doc:scheme:"));
+ Document doc(getDocType(), DocumentId("id:ns:indexingdocument::"));
doc.setRepo(*_documentRepo);
doc.setValue("string", makeAnnotatedString());
@@ -667,7 +667,7 @@ Test::requireThatPredicateIsPrinted()
Cursor &arr = obj.setArray(Predicate::SET);
arr.addString("bar");
- Document doc(getDocType(), DocumentId("doc:scheme:"));
+ Document doc(getDocType(), DocumentId("id:ns:indexingdocument::"));
doc.setRepo(*_documentRepo);
doc.setValue("predicate", PredicateFieldValue(std::move(input)));
@@ -687,7 +687,7 @@ Test::requireThatTensorIsNotConverted()
TensorFieldValue tensorFieldValue(tensorDataType);
tensorFieldValue = make_tensor(TensorSpec("tensor(x{},y{})")
.add({{"x", "4"}, {"y", "5"}}, 7));
- Document doc(getDocType(), DocumentId("doc:scheme:"));
+ Document doc(getDocType(), DocumentId("id:ns:indexingdocument::"));
doc.setRepo(*_documentRepo);
doc.setValue("tensor", tensorFieldValue);
@@ -712,7 +712,7 @@ const ReferenceDataType& Test::getAsRefType(const string& name) const {
}
void Test::requireThatNonEmptyReferenceIsConvertedToStringWithId() {
- Document doc(getDocType(), DocumentId("doc:scheme:"));
+ Document doc(getDocType(), DocumentId("id:ns:indexingdocument::"));
doc.setRepo(*_documentRepo);
doc.setValue("ref", ReferenceFieldValue(
getAsRefType("Reference<target_dummy_document>"),
@@ -723,7 +723,7 @@ void Test::requireThatNonEmptyReferenceIsConvertedToStringWithId() {
}
void Test::requireThatEmptyReferenceIsConvertedToEmptyString() {
- Document doc(getDocType(), DocumentId("doc:scheme:"));
+ Document doc(getDocType(), DocumentId("id:ns:indexingdocument::"));
doc.setRepo(*_documentRepo);
doc.setValue("ref", ReferenceFieldValue(
getAsRefType("Reference<target_dummy_document>")));
@@ -735,7 +735,7 @@ void Test::requireThatEmptyReferenceIsConvertedToEmptyString() {
// Own test for this to ensure that SlimeFiller code path is executed,
// as this only triggers for composite field types.
void Test::requireThatReferenceInCompositeTypeEmitsSlimeData() {
- Document doc(getDocType(), DocumentId("doc:scheme:"));
+ Document doc(getDocType(), DocumentId("id:ns:indexingdocument::"));
doc.setRepo(*_documentRepo);
StructFieldValue sfv(getDataType("indexingdocument.header.nested"));
diff --git a/searchcore/src/tests/proton/document_iterator/document_iterator_test.cpp b/searchcore/src/tests/proton/document_iterator/document_iterator_test.cpp
index ad5ac55c5e9..9342ddd4b8a 100644
--- a/searchcore/src/tests/proton/document_iterator/document_iterator_test.cpp
+++ b/searchcore/src/tests/proton/document_iterator/document_iterator_test.cpp
@@ -416,17 +416,17 @@ void checkEntry(const IterateResult &res, size_t idx, const Document &doc, const
TEST("require that custom retrievers work as expected") {
IDocumentRetriever::SP dr =
- cat(cat(doc("doc:foo:1", Timestamp(2), bucket(5)),
- rem("doc:foo:2", Timestamp(3), bucket(5))),
- cat(doc("doc:foo:3", Timestamp(7), bucket(6)),
+ cat(cat(doc("id:ns:document::1", Timestamp(2), bucket(5)),
+ rem("id:ns:document::2", Timestamp(3), bucket(5))),
+ cat(doc("id:ns:document::3", Timestamp(7), bucket(6)),
nil()));
- EXPECT_FALSE(dr->getDocumentMetaData(DocumentId("doc:foo:bogus")).valid());
+ EXPECT_FALSE(dr->getDocumentMetaData(DocumentId("id:ns:document::bogus")).valid());
EXPECT_TRUE(dr->getDocument(1).get() == 0);
EXPECT_TRUE(dr->getDocument(2).get() == 0);
EXPECT_TRUE(dr->getDocument(3).get() != 0);
- TEST_DO(checkDoc(*dr, "doc:foo:1", 2, 5, false));
- TEST_DO(checkDoc(*dr, "doc:foo:2", 3, 5, true));
- TEST_DO(checkDoc(*dr, "doc:foo:3", 7, 6, false));
+ TEST_DO(checkDoc(*dr, "id:ns:document::1", 2, 5, false));
+ TEST_DO(checkDoc(*dr, "id:ns:document::2", 3, 5, true));
+ TEST_DO(checkDoc(*dr, "id:ns:document::3", 7, 6, false));
DocumentMetaData::Vector b5;
DocumentMetaData::Vector b6;
dr->getBucketMetaData(bucket(5), b5);
@@ -456,19 +456,19 @@ TEST("require that a list of empty retrievers can be iterated") {
TEST("require that normal documents can be iterated") {
DocumentIterator itr(bucket(5), document::AllFields(), selectAll(), newestV(), -1, false);
- itr.add(doc("doc:foo:1", Timestamp(2), bucket(5)));
- itr.add(cat(doc("doc:foo:2", Timestamp(3), bucket(5)),
- doc("doc:foo:3", Timestamp(4), bucket(5))));
+ itr.add(doc("id:ns:document::1", Timestamp(2), bucket(5)));
+ itr.add(cat(doc("id:ns:document::2", Timestamp(3), bucket(5)),
+ doc("id:ns:document::3", Timestamp(4), bucket(5))));
IterateResult res = itr.iterate(largeNum);
EXPECT_TRUE(res.isCompleted());
EXPECT_EQUAL(3u, res.getEntries().size());
- TEST_DO(checkEntry(res, 0, Document(*DataType::DOCUMENT, DocumentId("doc:foo:1")), Timestamp(2)));
- TEST_DO(checkEntry(res, 1, Document(*DataType::DOCUMENT, DocumentId("doc:foo:2")), Timestamp(3)));
- TEST_DO(checkEntry(res, 2, Document(*DataType::DOCUMENT, DocumentId("doc:foo:3")), Timestamp(4)));
+ TEST_DO(checkEntry(res, 0, Document(*DataType::DOCUMENT, DocumentId("id:ns:document::1")), Timestamp(2)));
+ TEST_DO(checkEntry(res, 1, Document(*DataType::DOCUMENT, DocumentId("id:ns:document::2")), Timestamp(3)));
+ TEST_DO(checkEntry(res, 2, Document(*DataType::DOCUMENT, DocumentId("id:ns:document::3")), Timestamp(4)));
}
void verifyIterateIgnoringStopSignal(DocumentIterator & itr) {
- itr.add(doc("doc:foo:1", Timestamp(2), bucket(5)));
+ itr.add(doc("id:ns:document::1", Timestamp(2), bucket(5)));
IterateResult res = itr.iterate(largeNum);
EXPECT_TRUE(res.isCompleted());
EXPECT_EQUAL(1u, res.getEntries().size());
@@ -488,14 +488,14 @@ TEST("require that iterator ignoring maxbytes stops at the end, and does not aut
}
void verifyReadConsistency(DocumentIterator & itr, Committer & committer) {
- IDocumentRetriever::SP retriever = doc("doc:foo:1", Timestamp(2), bucket(5));
+ IDocumentRetriever::SP retriever = doc("id:ns:document::1", Timestamp(2), bucket(5));
IDocumentRetriever::SP commitAndWaitRetriever(new CommitAndWaitDocumentRetriever(retriever, committer));
itr.add(commitAndWaitRetriever);
IterateResult res = itr.iterate(largeNum);
EXPECT_TRUE(res.isCompleted());
EXPECT_EQUAL(1u, res.getEntries().size());
- TEST_DO(checkEntry(res, 0, Document(*DataType::DOCUMENT, DocumentId("doc:foo:1")), Timestamp(2)));
+ TEST_DO(checkEntry(res, 0, Document(*DataType::DOCUMENT, DocumentId("id:ns:document::1")), Timestamp(2)));
EXPECT_EQUAL(0u, committer._commitCount);
}
@@ -516,7 +516,7 @@ TEST("require that readconsistency::strong does commit") {
}
TEST("require that docid limit is honoured") {
- IDocumentRetriever::SP retriever = doc("doc:foo:1", Timestamp(2), bucket(5));
+ IDocumentRetriever::SP retriever = doc("id:ns:document::1", Timestamp(2), bucket(5));
UnitDR & udr = dynamic_cast<UnitDR &>(*retriever);
udr.docid = 7;
DocumentIterator itr(bucket(5), document::AllFields(), selectAll(), newestV(), -1, false);
@@ -524,7 +524,7 @@ TEST("require that docid limit is honoured") {
IterateResult res = itr.iterate(largeNum);
EXPECT_TRUE(res.isCompleted());
EXPECT_EQUAL(1u, res.getEntries().size());
- TEST_DO(checkEntry(res, 0, Document(*DataType::DOCUMENT, DocumentId("doc:foo:1")), Timestamp(2)));
+ TEST_DO(checkEntry(res, 0, Document(*DataType::DOCUMENT, DocumentId("id:ns:document::1")), Timestamp(2)));
udr.setDocIdLimit(7);
DocumentIterator limited(bucket(5), document::AllFields(), selectAll(), newestV(), -1, false);
@@ -536,46 +536,46 @@ TEST("require that docid limit is honoured") {
TEST("require that remove entries can be iterated") {
DocumentIterator itr(bucket(5), document::AllFields(), selectAll(), newestV(), -1, false);
- itr.add(rem("doc:foo:1", Timestamp(2), bucket(5)));
- itr.add(cat(rem("doc:foo:2", Timestamp(3), bucket(5)),
- rem("doc:foo:3", Timestamp(4), bucket(5))));
+ itr.add(rem("id:ns:document::1", Timestamp(2), bucket(5)));
+ itr.add(cat(rem("id:ns:document::2", Timestamp(3), bucket(5)),
+ rem("id:ns:document::3", Timestamp(4), bucket(5))));
IterateResult res = itr.iterate(largeNum);
EXPECT_TRUE(res.isCompleted());
EXPECT_EQUAL(3u, res.getEntries().size());
- TEST_DO(checkEntry(res, 0, DocumentId("doc:foo:1"), Timestamp(2)));
- TEST_DO(checkEntry(res, 1, DocumentId("doc:foo:2"), Timestamp(3)));
- TEST_DO(checkEntry(res, 2, DocumentId("doc:foo:3"), Timestamp(4)));
+ TEST_DO(checkEntry(res, 0, DocumentId("id:ns:document::1"), Timestamp(2)));
+ TEST_DO(checkEntry(res, 1, DocumentId("id:ns:document::2"), Timestamp(3)));
+ TEST_DO(checkEntry(res, 2, DocumentId("id:ns:document::3"), Timestamp(4)));
}
TEST("require that remove entries can be ignored") {
DocumentIterator itr(bucket(5), document::AllFields(), selectAll(), docV(), -1, false);
- itr.add(rem("doc:foo:1", Timestamp(2), bucket(5)));
- itr.add(cat(doc("doc:foo:2", Timestamp(3), bucket(5)),
- rem("doc:foo:3", Timestamp(4), bucket(5))));
+ itr.add(rem("id:ns:document::1", Timestamp(2), bucket(5)));
+ itr.add(cat(doc("id:ns:document::2", Timestamp(3), bucket(5)),
+ rem("id:ns:document::3", Timestamp(4), bucket(5))));
IterateResult res = itr.iterate(largeNum);
EXPECT_TRUE(res.isCompleted());
EXPECT_EQUAL(1u, res.getEntries().size());
- TEST_DO(checkEntry(res, 0, Document(*DataType::DOCUMENT, DocumentId("doc:foo:2")), Timestamp(3)));
+ TEST_DO(checkEntry(res, 0, Document(*DataType::DOCUMENT, DocumentId("id:ns:document::2")), Timestamp(3)));
}
TEST("require that iterating all versions returns both documents and removes") {
DocumentIterator itr(bucket(5), document::AllFields(), selectAll(), allV(), -1, false);
- itr.add(rem("doc:foo:1", Timestamp(2), bucket(5)));
- itr.add(cat(doc("doc:foo:2", Timestamp(3), bucket(5)),
- rem("doc:foo:3", Timestamp(4), bucket(5))));
+ itr.add(rem("id:ns:document::1", Timestamp(2), bucket(5)));
+ itr.add(cat(doc("id:ns:document::2", Timestamp(3), bucket(5)),
+ rem("id:ns:document::3", Timestamp(4), bucket(5))));
IterateResult res = itr.iterate(largeNum);
EXPECT_TRUE(res.isCompleted());
EXPECT_EQUAL(3u, res.getEntries().size());
- TEST_DO(checkEntry(res, 0, DocumentId("doc:foo:1"), Timestamp(2)));
- TEST_DO(checkEntry(res, 1, Document(*DataType::DOCUMENT, DocumentId("doc:foo:2")), Timestamp(3)));
- TEST_DO(checkEntry(res, 2, DocumentId("doc:foo:3"), Timestamp(4)));
+ TEST_DO(checkEntry(res, 0, DocumentId("id:ns:document::1"), Timestamp(2)));
+ TEST_DO(checkEntry(res, 1, Document(*DataType::DOCUMENT, DocumentId("id:ns:document::2")), Timestamp(3)));
+ TEST_DO(checkEntry(res, 2, DocumentId("id:ns:document::3"), Timestamp(4)));
}
TEST("require that using an empty field set returns meta-data only") {
DocumentIterator itr(bucket(5), document::NoFields(), selectAll(), newestV(), -1, false);
- itr.add(doc("doc:foo:1", Timestamp(2), bucket(5)));
- itr.add(cat(doc("doc:foo:2", Timestamp(3), bucket(5)),
- rem("doc:foo:3", Timestamp(4), bucket(5))));
+ itr.add(doc("id:ns:document::1", Timestamp(2), bucket(5)));
+ itr.add(cat(doc("id:ns:document::2", Timestamp(3), bucket(5)),
+ rem("id:ns:document::3", Timestamp(4), bucket(5))));
IterateResult res = itr.iterate(largeNum);
EXPECT_TRUE(res.isCompleted());
EXPECT_EQUAL(3u, res.getEntries().size());
@@ -586,30 +586,30 @@ TEST("require that using an empty field set returns meta-data only") {
TEST("require that entries in other buckets are skipped") {
DocumentIterator itr(bucket(5), document::AllFields(), selectAll(), newestV(), -1, false);
- itr.add(rem("doc:foo:1", Timestamp(2), bucket(6)));
- itr.add(cat(doc("doc:foo:2", Timestamp(3), bucket(5)),
- doc("doc:foo:3", Timestamp(4), bucket(6))));
+ itr.add(rem("id:ns:document::1", Timestamp(2), bucket(6)));
+ itr.add(cat(doc("id:ns:document::2", Timestamp(3), bucket(5)),
+ doc("id:ns:document::3", Timestamp(4), bucket(6))));
IterateResult res = itr.iterate(largeNum);
EXPECT_TRUE(res.isCompleted());
EXPECT_EQUAL(1u, res.getEntries().size());
- TEST_DO(checkEntry(res, 0, Document(*DataType::DOCUMENT, DocumentId("doc:foo:2")), Timestamp(3)));
+ TEST_DO(checkEntry(res, 0, Document(*DataType::DOCUMENT, DocumentId("id:ns:document::2")), Timestamp(3)));
}
TEST("require that maxBytes splits iteration results") {
DocumentIterator itr(bucket(5), document::AllFields(), selectAll(), newestV(), -1, false);
- itr.add(doc("doc:foo:1", Timestamp(2), bucket(5)));
- itr.add(cat(rem("doc:foo:2", Timestamp(3), bucket(5)),
- doc("doc:foo:3", Timestamp(4), bucket(5))));
- IterateResult res1 = itr.iterate(getSize(Document(*DataType::DOCUMENT, DocumentId("doc:foo:1"))) +
- getSize(DocumentId("doc:foo:2")));
+ itr.add(doc("id:ns:document::1", Timestamp(2), bucket(5)));
+ itr.add(cat(rem("id:ns:document::2", Timestamp(3), bucket(5)),
+ doc("id:ns:document::3", Timestamp(4), bucket(5))));
+ IterateResult res1 = itr.iterate(getSize(Document(*DataType::DOCUMENT, DocumentId("id:ns:document::1"))) +
+ getSize(DocumentId("id:ns:document::2")));
EXPECT_TRUE(!res1.isCompleted());
EXPECT_EQUAL(2u, res1.getEntries().size());
- TEST_DO(checkEntry(res1, 0, Document(*DataType::DOCUMENT, DocumentId("doc:foo:1")), Timestamp(2)));
- TEST_DO(checkEntry(res1, 1, DocumentId("doc:foo:2"), Timestamp(3)));
+ TEST_DO(checkEntry(res1, 0, Document(*DataType::DOCUMENT, DocumentId("id:ns:document::1")), Timestamp(2)));
+ TEST_DO(checkEntry(res1, 1, DocumentId("id:ns:document::2"), Timestamp(3)));
IterateResult res2 = itr.iterate(largeNum);
EXPECT_TRUE(res2.isCompleted());
- TEST_DO(checkEntry(res2, 0, Document(*DataType::DOCUMENT, DocumentId("doc:foo:3")), Timestamp(4)));
+ TEST_DO(checkEntry(res2, 0, Document(*DataType::DOCUMENT, DocumentId("id:ns:document::3")), Timestamp(4)));
IterateResult res3 = itr.iterate(largeNum);
EXPECT_TRUE(res3.isCompleted());
@@ -618,9 +618,9 @@ TEST("require that maxBytes splits iteration results") {
TEST("require that maxBytes splits iteration results for meta-data only iteration") {
DocumentIterator itr(bucket(5), document::NoFields(), selectAll(), newestV(), -1, false);
- itr.add(doc("doc:foo:1", Timestamp(2), bucket(5)));
- itr.add(cat(rem("doc:foo:2", Timestamp(3), bucket(5)),
- doc("doc:foo:3", Timestamp(4), bucket(5))));
+ itr.add(doc("id:ns:document::1", Timestamp(2), bucket(5)));
+ itr.add(cat(rem("id:ns:document::2", Timestamp(3), bucket(5)),
+ doc("id:ns:document::3", Timestamp(4), bucket(5))));
IterateResult res1 = itr.iterate(getSize() + getSize());
EXPECT_TRUE(!res1.isCompleted());
EXPECT_EQUAL(2u, res1.getEntries().size());
@@ -638,122 +638,122 @@ TEST("require that maxBytes splits iteration results for meta-data only iteratio
TEST("require that at least one document is returned by visit") {
DocumentIterator itr(bucket(5), document::AllFields(), selectAll(), newestV(), -1, false);
- itr.add(doc("doc:foo:1", Timestamp(2), bucket(5)));
- itr.add(cat(rem("doc:foo:2", Timestamp(3), bucket(5)),
- doc("doc:foo:3", Timestamp(4), bucket(5))));
+ itr.add(doc("id:ns:document::1", Timestamp(2), bucket(5)));
+ itr.add(cat(rem("id:ns:document::2", Timestamp(3), bucket(5)),
+ doc("id:ns:document::3", Timestamp(4), bucket(5))));
IterateResult res1 = itr.iterate(0);
EXPECT_TRUE(1u <= res1.getEntries().size());
- TEST_DO(checkEntry(res1, 0, Document(*DataType::DOCUMENT,DocumentId("doc:foo:1")), Timestamp(2)));
+ TEST_DO(checkEntry(res1, 0, Document(*DataType::DOCUMENT,DocumentId("id:ns:document::1")), Timestamp(2)));
}
TEST("require that documents outside the timestamp limits are ignored") {
DocumentIterator itr(bucket(5), document::AllFields(), selectTimestampRange(100, 200), newestV(), -1, false);
- itr.add(doc("doc:foo:1", Timestamp(99), bucket(5)));
- itr.add(doc("doc:foo:2", Timestamp(100), bucket(5)));
- itr.add(doc("doc:foo:3", Timestamp(200), bucket(5)));
- itr.add(doc("doc:foo:4", Timestamp(201), bucket(5)));
- itr.add(rem("doc:foo:5", Timestamp(99), bucket(5)));
- itr.add(rem("doc:foo:6", Timestamp(100), bucket(5)));
- itr.add(rem("doc:foo:7", Timestamp(200), bucket(5)));
- itr.add(rem("doc:foo:8", Timestamp(201), bucket(5)));
+ itr.add(doc("id:ns:document::1", Timestamp(99), bucket(5)));
+ itr.add(doc("id:ns:document::2", Timestamp(100), bucket(5)));
+ itr.add(doc("id:ns:document::3", Timestamp(200), bucket(5)));
+ itr.add(doc("id:ns:document::4", Timestamp(201), bucket(5)));
+ itr.add(rem("id:ns:document::5", Timestamp(99), bucket(5)));
+ itr.add(rem("id:ns:document::6", Timestamp(100), bucket(5)));
+ itr.add(rem("id:ns:document::7", Timestamp(200), bucket(5)));
+ itr.add(rem("id:ns:document::8", Timestamp(201), bucket(5)));
IterateResult res = itr.iterate(largeNum);
EXPECT_TRUE(res.isCompleted());
EXPECT_EQUAL(4u, res.getEntries().size());
- TEST_DO(checkEntry(res, 0, Document(*DataType::DOCUMENT, DocumentId("doc:foo:2")), Timestamp(100)));
- TEST_DO(checkEntry(res, 1, Document(*DataType::DOCUMENT, DocumentId("doc:foo:3")), Timestamp(200)));
- TEST_DO(checkEntry(res, 2, DocumentId("doc:foo:6"), Timestamp(100)));
- TEST_DO(checkEntry(res, 3, DocumentId("doc:foo:7"), Timestamp(200)));
+ TEST_DO(checkEntry(res, 0, Document(*DataType::DOCUMENT, DocumentId("id:ns:document::2")), Timestamp(100)));
+ TEST_DO(checkEntry(res, 1, Document(*DataType::DOCUMENT, DocumentId("id:ns:document::3")), Timestamp(200)));
+ TEST_DO(checkEntry(res, 2, DocumentId("id:ns:document::6"), Timestamp(100)));
+ TEST_DO(checkEntry(res, 3, DocumentId("id:ns:document::7"), Timestamp(200)));
}
TEST("require that timestamp subset returns the appropriate documents") {
DocumentIterator itr(bucket(5), document::AllFields(), selectTimestampSet(200, 350, 400), newestV(), -1, false);
- itr.add(doc("doc:foo:1", Timestamp(500), bucket(5)));
- itr.add(doc("doc:foo:2", Timestamp(400), bucket(5)));
- itr.add(doc("doc:foo:3", Timestamp(300), bucket(5)));
- itr.add(doc("doc:foo:4", Timestamp(200), bucket(5)));
- itr.add(rem("doc:foo:5", Timestamp(250), bucket(5)));
- itr.add(rem("doc:foo:6", Timestamp(350), bucket(5)));
- itr.add(rem("doc:foo:7", Timestamp(450), bucket(5)));
- itr.add(rem("doc:foo:8", Timestamp(550), bucket(5)));
+ itr.add(doc("id:ns:document::1", Timestamp(500), bucket(5)));
+ itr.add(doc("id:ns:document::2", Timestamp(400), bucket(5)));
+ itr.add(doc("id:ns:document::3", Timestamp(300), bucket(5)));
+ itr.add(doc("id:ns:document::4", Timestamp(200), bucket(5)));
+ itr.add(rem("id:ns:document::5", Timestamp(250), bucket(5)));
+ itr.add(rem("id:ns:document::6", Timestamp(350), bucket(5)));
+ itr.add(rem("id:ns:document::7", Timestamp(450), bucket(5)));
+ itr.add(rem("id:ns:document::8", Timestamp(550), bucket(5)));
IterateResult res = itr.iterate(largeNum);
EXPECT_TRUE(res.isCompleted());
EXPECT_EQUAL(3u, res.getEntries().size());
- TEST_DO(checkEntry(res, 0, Document(*DataType::DOCUMENT, DocumentId("doc:foo:2")), Timestamp(400)));
- TEST_DO(checkEntry(res, 1, Document(*DataType::DOCUMENT, DocumentId("doc:foo:4")), Timestamp(200)));
- TEST_DO(checkEntry(res, 2, DocumentId("doc:foo:6"), Timestamp(350)));
+ TEST_DO(checkEntry(res, 0, Document(*DataType::DOCUMENT, DocumentId("id:ns:document::2")), Timestamp(400)));
+ TEST_DO(checkEntry(res, 1, Document(*DataType::DOCUMENT, DocumentId("id:ns:document::4")), Timestamp(200)));
+ TEST_DO(checkEntry(res, 2, DocumentId("id:ns:document::6"), Timestamp(350)));
}
TEST("require that document selection will filter results") {
- DocumentIterator itr(bucket(5), document::AllFields(), selectDocs("id=\"doc:foo:xxx*\""), newestV(), -1, false);
- itr.add(doc("doc:foo:xxx1", Timestamp(99), bucket(5)));
- itr.add(doc("doc:foo:yyy1", Timestamp(100), bucket(5)));
- itr.add(doc("doc:foo:xxx2", Timestamp(200), bucket(5)));
- itr.add(doc("doc:foo:yyy2", Timestamp(201), bucket(5)));
- itr.add(rem("doc:foo:xxx3", Timestamp(99), bucket(5)));
- itr.add(rem("doc:foo:yyy3", Timestamp(100), bucket(5)));
- itr.add(rem("doc:foo:xxx4", Timestamp(200), bucket(5)));
- itr.add(rem("doc:foo:yyy4", Timestamp(201), bucket(5)));
+ DocumentIterator itr(bucket(5), document::AllFields(), selectDocs("id=\"id:ns:document::xxx*\""), newestV(), -1, false);
+ itr.add(doc("id:ns:document::xxx1", Timestamp(99), bucket(5)));
+ itr.add(doc("id:ns:document::yyy1", Timestamp(100), bucket(5)));
+ itr.add(doc("id:ns:document::xxx2", Timestamp(200), bucket(5)));
+ itr.add(doc("id:ns:document::yyy2", Timestamp(201), bucket(5)));
+ itr.add(rem("id:ns:document::xxx3", Timestamp(99), bucket(5)));
+ itr.add(rem("id:ns:document::yyy3", Timestamp(100), bucket(5)));
+ itr.add(rem("id:ns:document::xxx4", Timestamp(200), bucket(5)));
+ itr.add(rem("id:ns:document::yyy4", Timestamp(201), bucket(5)));
IterateResult res = itr.iterate(largeNum);
EXPECT_TRUE(res.isCompleted());
EXPECT_EQUAL(4u, res.getEntries().size());
- TEST_DO(checkEntry(res, 0, Document(*DataType::DOCUMENT, DocumentId("doc:foo:xxx1")), Timestamp(99)));
- TEST_DO(checkEntry(res, 1, Document(*DataType::DOCUMENT, DocumentId("doc:foo:xxx2")), Timestamp(200)));
- TEST_DO(checkEntry(res, 2, DocumentId("doc:foo:xxx3"), Timestamp(99)));
- TEST_DO(checkEntry(res, 3, DocumentId("doc:foo:xxx4"), Timestamp(200)));
+ TEST_DO(checkEntry(res, 0, Document(*DataType::DOCUMENT, DocumentId("id:ns:document::xxx1")), Timestamp(99)));
+ TEST_DO(checkEntry(res, 1, Document(*DataType::DOCUMENT, DocumentId("id:ns:document::xxx2")), Timestamp(200)));
+ TEST_DO(checkEntry(res, 2, DocumentId("id:ns:document::xxx3"), Timestamp(99)));
+ TEST_DO(checkEntry(res, 3, DocumentId("id:ns:document::xxx4"), Timestamp(200)));
}
TEST("require that document selection handles 'field == null'") {
DocumentIterator itr(bucket(5), document::AllFields(), selectDocs("foo.aa == null"), newestV(), -1, false);
- itr.add(doc_with_null_fields("doc:foo:xxx1", Timestamp(99), bucket(5)));
- itr.add(doc_with_null_fields("doc:foo:xxx2", Timestamp(100), bucket(5)));
+ itr.add(doc_with_null_fields("id:ns:foo::xxx1", Timestamp(99), bucket(5)));
+ itr.add(doc_with_null_fields("id:ns:foo::xxx2", Timestamp(100), bucket(5)));
IterateResult res = itr.iterate(largeNum);
EXPECT_TRUE(res.isCompleted());
ASSERT_EQUAL(2u, res.getEntries().size());
- Document expected1(getAttrDocType(), DocumentId("doc:foo:xxx1"));
+ Document expected1(getAttrDocType(), DocumentId("id:ns:foo::xxx1"));
TEST_DO(checkEntry(res, 0, expected1, Timestamp(99)));
- Document expected2(getAttrDocType(), DocumentId("doc:foo:xxx2"));
+ Document expected2(getAttrDocType(), DocumentId("id:ns:foo::xxx2"));
TEST_DO(checkEntry(res, 1, expected2, Timestamp(100)));
}
TEST("require that invalid document selection returns no documents") {
DocumentIterator itr(bucket(5), document::AllFields(), selectDocs("=="), newestV(), -1, false);
- itr.add(doc("doc:foo:xxx1", Timestamp(99), bucket(5)));
- itr.add(doc("doc:foo:yyy1", Timestamp(100), bucket(5)));
- itr.add(doc("doc:foo:xxx2", Timestamp(200), bucket(5)));
- itr.add(doc("doc:foo:yyy2", Timestamp(201), bucket(5)));
- itr.add(rem("doc:foo:xxx3", Timestamp(99), bucket(5)));
- itr.add(rem("doc:foo:yyy3", Timestamp(100), bucket(5)));
- itr.add(rem("doc:foo:xxx4", Timestamp(200), bucket(5)));
- itr.add(rem("doc:foo:yyy4", Timestamp(201), bucket(5)));
+ itr.add(doc("id:ns:document::xxx1", Timestamp(99), bucket(5)));
+ itr.add(doc("id:ns:document::yyy1", Timestamp(100), bucket(5)));
+ itr.add(doc("id:ns:document::xxx2", Timestamp(200), bucket(5)));
+ itr.add(doc("id:ns:document::yyy2", Timestamp(201), bucket(5)));
+ itr.add(rem("id:ns:document::xxx3", Timestamp(99), bucket(5)));
+ itr.add(rem("id:ns:document::yyy3", Timestamp(100), bucket(5)));
+ itr.add(rem("id:ns:document::xxx4", Timestamp(200), bucket(5)));
+ itr.add(rem("id:ns:document::yyy4", Timestamp(201), bucket(5)));
IterateResult res = itr.iterate(largeNum);
EXPECT_TRUE(res.isCompleted());
EXPECT_EQUAL(0u, res.getEntries().size());
}
TEST("require that document selection and timestamp range works together") {
- DocumentIterator itr(bucket(5), document::AllFields(), selectDocsWithinRange("id=\"doc:foo:xxx*\"", 100, 200), newestV(), -1, false);
- itr.add(doc("doc:foo:xxx1", Timestamp(99), bucket(5)));
- itr.add(doc("doc:foo:yyy1", Timestamp(100), bucket(5)));
- itr.add(doc("doc:foo:xxx2", Timestamp(200), bucket(5)));
- itr.add(doc("doc:foo:yyy2", Timestamp(201), bucket(5)));
- itr.add(rem("doc:foo:xxx3", Timestamp(99), bucket(5)));
- itr.add(rem("doc:foo:yyy3", Timestamp(100), bucket(5)));
- itr.add(rem("doc:foo:xxx4", Timestamp(200), bucket(5)));
- itr.add(rem("doc:foo:yyy4", Timestamp(201), bucket(5)));
+ DocumentIterator itr(bucket(5), document::AllFields(), selectDocsWithinRange("id=\"id:ns:document::xxx*\"", 100, 200), newestV(), -1, false);
+ itr.add(doc("id:ns:document::xxx1", Timestamp(99), bucket(5)));
+ itr.add(doc("id:ns:document::yyy1", Timestamp(100), bucket(5)));
+ itr.add(doc("id:ns:document::xxx2", Timestamp(200), bucket(5)));
+ itr.add(doc("id:ns:document::yyy2", Timestamp(201), bucket(5)));
+ itr.add(rem("id:ns:document::xxx3", Timestamp(99), bucket(5)));
+ itr.add(rem("id:ns:document::yyy3", Timestamp(100), bucket(5)));
+ itr.add(rem("id:ns:document::xxx4", Timestamp(200), bucket(5)));
+ itr.add(rem("id:ns:document::yyy4", Timestamp(201), bucket(5)));
IterateResult res = itr.iterate(largeNum);
EXPECT_TRUE(res.isCompleted());
EXPECT_EQUAL(2u, res.getEntries().size());
- TEST_DO(checkEntry(res, 0, Document(*DataType::DOCUMENT, DocumentId("doc:foo:xxx2")), Timestamp(200)));
- TEST_DO(checkEntry(res, 1, DocumentId("doc:foo:xxx4"), Timestamp(200)));
+ TEST_DO(checkEntry(res, 0, Document(*DataType::DOCUMENT, DocumentId("id:ns:document::xxx2")), Timestamp(200)));
+ TEST_DO(checkEntry(res, 1, DocumentId("id:ns:document::xxx4"), Timestamp(200)));
}
TEST("require that fieldset limits fields returned") {
DocumentIterator itr(bucket(5), document::HeaderFields(), selectAll(), newestV(), -1, false);
- itr.add(doc_with_fields("doc:foo:xxx1", Timestamp(1), bucket(5)));
+ itr.add(doc_with_fields("id:ns:foo::xxx1", Timestamp(1), bucket(5)));
IterateResult res = itr.iterate(largeNum);
EXPECT_TRUE(res.isCompleted());
EXPECT_EQUAL(1u, res.getEntries().size());
- Document expected(getDocType(), DocumentId("doc:foo:xxx1"));
+ Document expected(getDocType(), DocumentId("id:ns:foo::xxx1"));
expected.set("header", "foo");
TEST_DO(checkEntry(res, 0, expected, Timestamp(1)));
}
@@ -798,26 +798,26 @@ TEST("require that attributes are used")
{
UnitDR::reset();
DocumentIterator itr(bucket(5), document::AllFields(), selectDocs("foo.aa == 45"), docV(), -1, false);
- itr.add(doc_with_attr_fields("doc:foo:xx1", Timestamp(1), bucket(5),
+ itr.add(doc_with_attr_fields("id:ns:foo::xx1", Timestamp(1), bucket(5),
27, 28, 27, 2.7, 2.8, "x27", "x28"));
- itr.add(doc_with_attr_fields("doc:foo:xx2", Timestamp(2), bucket(5),
+ itr.add(doc_with_attr_fields("id:ns:foo::xx2", Timestamp(2), bucket(5),
27, 28, 45, 2.7, 4.5, "x27", "x45"));
- itr.add(doc_with_attr_fields("doc:foo:xx3", Timestamp(3), bucket(5),
+ itr.add(doc_with_attr_fields("id:ns:foo::xx3", Timestamp(3), bucket(5),
45, 46, 27, 4.5, 2.7, "x45", "x27"));
- itr.add(doc_with_attr_fields("doc:foo:xx4", Timestamp(4), bucket(5),
+ itr.add(doc_with_attr_fields("id:ns:foo::xx4", Timestamp(4), bucket(5),
45, 46, 45, 4.5, 4.5, "x45", "x45"));
IterateResult res = itr.iterate(largeNum);
EXPECT_TRUE(res.isCompleted());
EXPECT_EQUAL(2u, res.getEntries().size());
- Document expected1(getAttrDocType(), DocumentId("doc:foo:xx2"));
+ Document expected1(getAttrDocType(), DocumentId("id:ns:foo::xx2"));
expected1.set("header", "foo");
expected1.set("body", "bar");
expected1.set("aa", 27);
expected1.set("ab", 28);
expected1.set("dd", 2.7);
expected1.set("ss", "x27");
- Document expected2(getAttrDocType(), DocumentId("doc:foo:xx4"));
+ Document expected2(getAttrDocType(), DocumentId("id:ns:foo::xx4"));
expected2.set("header", "foo");
expected2.set("body", "bar");
expected2.set("aa", 45);
@@ -828,26 +828,26 @@ TEST("require that attributes are used")
TEST_DO(checkEntry(res, 1, expected2, Timestamp(4)));
DocumentIterator itr2(bucket(5), document::AllFields(), selectDocs("foo.dd == 4.5"), docV(), -1, false);
- itr2.add(doc_with_attr_fields("doc:foo:xx5", Timestamp(5), bucket(5),
+ itr2.add(doc_with_attr_fields("id:ns:foo::xx5", Timestamp(5), bucket(5),
27, 28, 27, 2.7, 2.8, "x27", "x28"));
- itr2.add(doc_with_attr_fields("doc:foo:xx6", Timestamp(6), bucket(5),
+ itr2.add(doc_with_attr_fields("id:ns:foo::xx6", Timestamp(6), bucket(5),
27, 28, 45, 2.7, 4.5, "x27", "x45"));
- itr2.add(doc_with_attr_fields("doc:foo:xx7", Timestamp(7), bucket(5),
+ itr2.add(doc_with_attr_fields("id:ns:foo::xx7", Timestamp(7), bucket(5),
45, 46, 27, 4.5, 2.7, "x45", "x27"));
- itr2.add(doc_with_attr_fields("doc:foo:xx8", Timestamp(8), bucket(5),
+ itr2.add(doc_with_attr_fields("id:ns:foo::xx8", Timestamp(8), bucket(5),
45, 46, 45, 4.5, 4.5, "x45", "x45"));
IterateResult res2 = itr2.iterate(largeNum);
EXPECT_TRUE(res2.isCompleted());
EXPECT_EQUAL(2u, res2.getEntries().size());
- Document expected3(getAttrDocType(), DocumentId("doc:foo:xx6"));
+ Document expected3(getAttrDocType(), DocumentId("id:ns:foo::xx6"));
expected3.set("header", "foo");
expected3.set("body", "bar");
expected3.set("aa", 27);
expected3.set("ab", 28);
expected3.set("dd", 2.7);
expected3.set("ss", "x27");
- Document expected4(getAttrDocType(), DocumentId("doc:foo:xx8"));
+ Document expected4(getAttrDocType(), DocumentId("id:ns:foo::xx8"));
expected4.set("header", "foo");
expected4.set("body", "bar");
expected4.set("aa", 45);
@@ -858,26 +858,26 @@ TEST("require that attributes are used")
TEST_DO(checkEntry(res2, 1, expected4, Timestamp(8)));
DocumentIterator itr3(bucket(5), document::AllFields(), selectDocs("foo.ss == \"x45\""), docV(), -1, false);
- itr3.add(doc_with_attr_fields("doc:foo:xx9", Timestamp(9), bucket(5),
+ itr3.add(doc_with_attr_fields("id:ns:foo::xx9", Timestamp(9), bucket(5),
27, 28, 27, 2.7, 2.8, "x27", "x28"));
- itr3.add(doc_with_attr_fields("doc:foo:xx10", Timestamp(10), bucket(5),
+ itr3.add(doc_with_attr_fields("id:ns:foo::xx10", Timestamp(10), bucket(5),
27, 28, 45, 2.7, 4.5, "x27", "x45"));
- itr3.add(doc_with_attr_fields("doc:foo:xx11", Timestamp(11), bucket(5),
+ itr3.add(doc_with_attr_fields("id:ns:foo::xx11", Timestamp(11), bucket(5),
45, 46, 27, 4.5, 2.7, "x45", "x27"));
- itr3.add(doc_with_attr_fields("doc:foo:xx12", Timestamp(12), bucket(5),
+ itr3.add(doc_with_attr_fields("id:ns:foo::xx12", Timestamp(12), bucket(5),
45, 46, 45, 4.5, 4.5, "x45", "x45"));
IterateResult res3 = itr3.iterate(largeNum);
EXPECT_TRUE(res3.isCompleted());
EXPECT_EQUAL(2u, res3.getEntries().size());
- Document expected5(getAttrDocType(), DocumentId("doc:foo:xx10"));
+ Document expected5(getAttrDocType(), DocumentId("id:ns:foo::xx10"));
expected5.set("header", "foo");
expected5.set("body", "bar");
expected5.set("aa", 27);
expected5.set("ab", 28);
expected5.set("dd", 2.7);
expected5.set("ss", "x27");
- Document expected6(getAttrDocType(), DocumentId("doc:foo:xx12"));
+ Document expected6(getAttrDocType(), DocumentId("id:ns:foo::xx12"));
expected6.set("header", "foo");
expected6.set("body", "bar");
expected6.set("aa", 45);
diff --git a/searchcore/src/tests/proton/documentdb/feedhandler/feedhandler_test.cpp b/searchcore/src/tests/proton/documentdb/feedhandler/feedhandler_test.cpp
index 4b3b68a85ea..f99668a13f8 100644
--- a/searchcore/src/tests/proton/documentdb/feedhandler/feedhandler_test.cpp
+++ b/searchcore/src/tests/proton/documentdb/feedhandler/feedhandler_test.cpp
@@ -537,7 +537,7 @@ TEST_F("require that heartBeat calls FeedView's heartBeat",
TEST_F("require that outdated remove is ignored", FeedHandlerFixture)
{
- DocumentContext doc_context("doc:test:foo", *f.schema.builder);
+ DocumentContext doc_context("id:ns:searchdocument::foo", *f.schema.builder);
FeedOperation::UP op(new RemoveOperation(doc_context.bucketId, Timestamp(10), doc_context.doc->getId()));
static_cast<DocumentOperation &>(*op).setPrevDbDocumentId(DbDocumentId(4));
static_cast<DocumentOperation &>(*op).setPrevTimestamp(Timestamp(10000));
@@ -549,7 +549,7 @@ TEST_F("require that outdated remove is ignored", FeedHandlerFixture)
TEST_F("require that outdated put is ignored", FeedHandlerFixture)
{
- DocumentContext doc_context("doc:test:foo", *f.schema.builder);
+ DocumentContext doc_context("id:ns:searchdocument::foo", *f.schema.builder);
FeedOperation::UP op(new PutOperation(doc_context.bucketId,
Timestamp(10), doc_context.doc));
static_cast<DocumentOperation &>(*op).setPrevTimestamp(Timestamp(10000));
@@ -570,7 +570,7 @@ addLidToRemove(RemoveDocumentsOperation &op)
TEST_F("require that handleMove calls FeedView", FeedHandlerFixture)
{
- DocumentContext doc_context("doc:test:foo", *f.schema.builder);
+ DocumentContext doc_context("id:ns:searchdocument::foo", *f.schema.builder);
MoveOperation op(doc_context.bucketId, Timestamp(2), doc_context.doc, DbDocumentId(0, 2), 1);
op.setDbDocumentId(DbDocumentId(1, 2));
f.runAsMaster([&]() { f.handler.handleMove(op, IDestructorCallback::SP()); });
@@ -806,7 +806,7 @@ TEST_F("require that tensor update with wrong tensor type fails", FeedHandlerFix
TEST_F("require that put with different document type repo is ok", FeedHandlerFixture)
{
TwoFieldsSchemaContext schema;
- DocumentContext doc_context("doc:test:foo", *schema.builder);
+ DocumentContext doc_context("id:ns:searchdocument::foo", *schema.builder);
auto op = std::make_unique<PutOperation>(doc_context.bucketId,
Timestamp(10), doc_context.doc);
FeedTokenContext token_context;
diff --git a/searchcore/src/tests/proton/documentdb/feedview/feedview_test.cpp b/searchcore/src/tests/proton/documentdb/feedview/feedview_test.cpp
index b39b70572e0..144f4ca4ff7 100644
--- a/searchcore/src/tests/proton/documentdb/feedview/feedview_test.cpp
+++ b/searchcore/src/tests/proton/documentdb/feedview/feedview_test.cpp
@@ -577,7 +577,7 @@ struct FixtureBase
}
DocumentContext doc1(uint64_t timestamp = 10) {
- return doc("doc:test:1", timestamp);
+ return doc("id:ns:searchdocument::1", timestamp);
}
void performPut(FeedToken token, PutOperation &op) {
@@ -661,7 +661,7 @@ struct FixtureBase
uint32_t id = first + i;
uint64_t ts = tsfirst + i;
vespalib::asciistream os;
- os << "doc:test:" << id;
+ os << "id:ns:searchdocument::" << id;
docs.push_back(doc(os.str(), ts));
}
return docs;
@@ -822,7 +822,7 @@ TEST_F("require that put() calls attribute adapter", SearchableFeedViewFixture)
f.putAndWait(dc);
EXPECT_EQUAL(1u, f.maw._putSerial);
- EXPECT_EQUAL(DocumentId("doc:test:1"), f.maw._putDocId);
+ EXPECT_EQUAL(DocumentId("id:ns:searchdocument::1"), f.maw._putDocId);
EXPECT_EQUAL(1u, f.maw._putLid);
EXPECT_EQUAL(2u, f._docIdLimit.get());
}
@@ -861,7 +861,7 @@ TEST_F("require that update() calls attribute adapter", SearchableFeedViewFixtur
f.putAndWait(dc1);
f.updateAndWait(dc2);
- assertAttributeUpdate(2u, DocumentId("doc:test:1"), 1u, f.maw);
+ assertAttributeUpdate(2u, DocumentId("id:ns:searchdocument::1"), 1u, f.maw);
}
TEST_F("require that remove() updates document meta store with bucket info",
@@ -1064,7 +1064,7 @@ void putDocumentAndUpdate(Fixture &f, const vespalib::string &fieldName)
f.putAndWait(dc1);
EXPECT_EQUAL(1u, f.msa._store._lastSyncToken);
- DocumentContext dc2("doc:test:1", 20, f.getBuilder());
+ DocumentContext dc2("id:ns:searchdocument::1", 20, f.getBuilder());
dc2.addFieldUpdate(f.getBuilder(), fieldName);
f.updateAndWait(dc2);
}
@@ -1076,7 +1076,7 @@ void requireThatUpdateOnlyUpdatesAttributeAndNotDocumentStore(Fixture &f,
putDocumentAndUpdate(f, fieldName);
EXPECT_EQUAL(1u, f.msa._store._lastSyncToken); // document store not updated
- assertAttributeUpdate(2u, DocumentId("doc:test:1"), 1, f.maw);
+ assertAttributeUpdate(2u, DocumentId("id:ns:searchdocument::1"), 1, f.maw);
}
template <typename Fixture>
@@ -1086,7 +1086,7 @@ void requireThatUpdateUpdatesAttributeAndDocumentStore(Fixture &f,
putDocumentAndUpdate(f, fieldName);
EXPECT_EQUAL(2u, f.msa._store._lastSyncToken); // document store updated
- assertAttributeUpdate(2u, DocumentId("doc:test:1"), 1, f.maw);
+ assertAttributeUpdate(2u, DocumentId("id:ns:searchdocument::1"), 1, f.maw);
}
TEST_F("require that update() to fast-access attribute only updates attribute and not document store",
diff --git a/searchcore/src/tests/proton/documentmetastore/documentmetastore_test.cpp b/searchcore/src/tests/proton/documentmetastore/documentmetastore_test.cpp
index f6f0c2b0806..2fc6cc87631 100644
--- a/searchcore/src/tests/proton/documentmetastore/documentmetastore_test.cpp
+++ b/searchcore/src/tests/proton/documentmetastore/documentmetastore_test.cpp
@@ -545,14 +545,14 @@ TEST(DocumentMetaStoreTest, lid_and_gid_space_is_reused)
GlobalId
createGid(uint32_t lid)
{
- DocumentId docId(vespalib::make_string("doc:id:%u", lid));
+ DocumentId docId(vespalib::make_string("id:ns:testdoc::%u", lid));
return docId.getGlobalId();
}
GlobalId
createGid(uint32_t userId, uint32_t lid)
{
- DocumentId docId(vespalib::make_string("id:id:testdoc:n=%u:%u", userId, lid));
+ DocumentId docId(vespalib::make_string("id:ns:testdoc:n=%u:%u", userId, lid));
return docId.getGlobalId();
}
diff --git a/searchcore/src/tests/proton/feed_and_search/feed_and_search.cpp b/searchcore/src/tests/proton/feed_and_search/feed_and_search.cpp
index 23a87415f7f..4580865b3a4 100644
--- a/searchcore/src/tests/proton/feed_and_search/feed_and_search.cpp
+++ b/searchcore/src/tests/proton/feed_and_search/feed_and_search.cpp
@@ -105,7 +105,7 @@ Schema getSchema() {
Document::UP buildDocument(DocBuilder & doc_builder, int id,
const string &word) {
ostringstream ost;
- ost << "doc::" << id;
+ ost << "id:ns:searchdocument::" << id;
doc_builder.startDocument(ost.str());
doc_builder.startIndexField(field_name)
.addStr(noise).addStr(word).endField();
diff --git a/searchcore/src/tests/proton/feedoperation/feedoperation_test.cpp b/searchcore/src/tests/proton/feedoperation/feedoperation_test.cpp
index 6a9dc42b56d..5a3ed4b7274 100644
--- a/searchcore/src/tests/proton/feedoperation/feedoperation_test.cpp
+++ b/searchcore/src/tests/proton/feedoperation/feedoperation_test.cpp
@@ -145,7 +145,7 @@ TEST("require that toString() on derived classes are meaningful")
uint32_t sub_db_id = 1;
MyStreamHandler stream_handler;
DocumentIdT doc_id_limit = 15;
- DocumentId doc_id("doc:foo:bar");
+ DocumentId doc_id("id:ns:foo:::bar");
DocumentUpdate::SP update(new DocumentUpdate(repo, *DataType::DOCUMENT, doc_id));
EXPECT_EQUAL("DeleteBucket(BucketId(0x0000000000000000), serialNum=0)",
@@ -196,7 +196,7 @@ TEST("require that toString() on derived classes are meaningful")
EXPECT_EQUAL("Remove(null::, BucketId(0x0000000000000000), timestamp=0, dbdId=(subDbId=0, lid=0), "
"prevDbdId=(subDbId=0, lid=0), prevMarkedAsRemoved=false, prevTimestamp=0, serialNum=0)",
RemoveOperation().toString());
- EXPECT_EQUAL("Remove(doc:foo:bar, BucketId(0x000000000000002a), timestamp=10, dbdId=(subDbId=0, lid=0), "
+ EXPECT_EQUAL("Remove(id:ns:foo:::bar, BucketId(0x000000000000002a), timestamp=10, dbdId=(subDbId=0, lid=0), "
"prevDbdId=(subDbId=0, lid=0), prevMarkedAsRemoved=false, prevTimestamp=0, serialNum=0)",
RemoveOperation(bucket_id1, timestamp, doc_id).toString());
@@ -214,7 +214,7 @@ TEST("require that toString() on derived classes are meaningful")
EXPECT_EQUAL("Update(NULL, BucketId(0x0000000000000000), timestamp=0, dbdId=(subDbId=0, lid=0), "
"prevDbdId=(subDbId=0, lid=0), prevMarkedAsRemoved=false, prevTimestamp=0, serialNum=0)",
UpdateOperation().toString());
- EXPECT_EQUAL("Update(doc:foo:bar, BucketId(0x000000000000002a), timestamp=10, dbdId=(subDbId=0, lid=0), "
+ EXPECT_EQUAL("Update(id:ns:foo:::bar, BucketId(0x000000000000002a), timestamp=10, dbdId=(subDbId=0, lid=0), "
"prevDbdId=(subDbId=0, lid=0), prevMarkedAsRemoved=false, prevTimestamp=0, serialNum=0)",
UpdateOperation(bucket_id1, timestamp, update).toString());
diff --git a/searchcore/src/tests/proton/index/fusionrunner_test.cpp b/searchcore/src/tests/proton/index/fusionrunner_test.cpp
index e6cdbf8d6cb..49b452aec2e 100644
--- a/searchcore/src/tests/proton/index/fusionrunner_test.cpp
+++ b/searchcore/src/tests/proton/index/fusionrunner_test.cpp
@@ -143,7 +143,7 @@ void Test::tearDown() {
Document::UP buildDocument(DocBuilder & doc_builder, int id, const string &word) {
vespalib::asciistream ost;
- ost << "doc::" << id;
+ ost << "id:ns:searchdocument::" << id;
doc_builder.startDocument(ost.str());
doc_builder.startIndexField(field_name).addStr(word).endField();
return doc_builder.endDocument();
diff --git a/searchcore/src/tests/proton/index/index_writer/index_writer_test.cpp b/searchcore/src/tests/proton/index/index_writer/index_writer_test.cpp
index d92ac0dcdc2..73919a7c628 100644
--- a/searchcore/src/tests/proton/index/index_writer/index_writer_test.cpp
+++ b/searchcore/src/tests/proton/index/index_writer/index_writer_test.cpp
@@ -89,7 +89,7 @@ struct Fixture
{
}
Document::UP createDoc(uint32_t lid) {
- builder.startDocument(vespalib::make_string("doc:test:%u", lid));
+ builder.startDocument(vespalib::make_string("id:ns:searchdocument::%u", lid));
return builder.endDocument();
}
void put(SerialNum serialNum, const search::DocumentIdT lid) {
diff --git a/searchcore/src/tests/proton/index/indexmanager_test.cpp b/searchcore/src/tests/proton/index/indexmanager_test.cpp
index d92cc62c5a1..80b1f9f0560 100644
--- a/searchcore/src/tests/proton/index/indexmanager_test.cpp
+++ b/searchcore/src/tests/proton/index/indexmanager_test.cpp
@@ -89,7 +89,7 @@ void removeTestData() {
Document::UP buildDocument(DocBuilder &doc_builder, int id,
const string &word) {
vespalib::asciistream ost;
- ost << "doc::" << id;
+ ost << "id:ns:searchdocument::" << id;
doc_builder.startDocument(ost.str());
doc_builder.startIndexField(field_name).addStr(word).endField();
return doc_builder.endDocument();
diff --git a/searchcore/src/tests/proton/matching/matching_test.cpp b/searchcore/src/tests/proton/matching/matching_test.cpp
index e46ed997d0f..3f68b54aca2 100644
--- a/searchcore/src/tests/proton/matching/matching_test.cpp
+++ b/searchcore/src/tests/proton/matching/matching_test.cpp
@@ -192,7 +192,7 @@ struct MyWorld {
// metaStore
for (uint32_t i = 0; i < NUM_DOCS; ++i) {
- document::DocumentId docId(vespalib::make_string("doc::%u", i));
+ document::DocumentId docId(vespalib::make_string("id:ns:searchdocument::%u", i));
const document::GlobalId &gid = docId.getGlobalId();
document::BucketId bucketId(BucketFactory::getBucketId(docId));
uint32_t docSize = 1;
@@ -455,11 +455,11 @@ TEST("require that ranking is performed (multi-threaded)") {
EXPECT_EQUAL(9u, world.matchingStats.docsRanked());
EXPECT_EQUAL(0u, world.matchingStats.docsReRanked());
ASSERT_TRUE(reply->hits.size() == 9u);
- EXPECT_EQUAL(document::DocumentId("doc::900").getGlobalId(), reply->hits[0].gid);
+ EXPECT_EQUAL(document::DocumentId("id:ns:searchdocument::900").getGlobalId(), reply->hits[0].gid);
EXPECT_EQUAL(900.0, reply->hits[0].metric);
- EXPECT_EQUAL(document::DocumentId("doc::800").getGlobalId(), reply->hits[1].gid);
+ EXPECT_EQUAL(document::DocumentId("id:ns:searchdocument::800").getGlobalId(), reply->hits[1].gid);
EXPECT_EQUAL(800.0, reply->hits[1].metric);
- EXPECT_EQUAL(document::DocumentId("doc::700").getGlobalId(), reply->hits[2].gid);
+ EXPECT_EQUAL(document::DocumentId("id:ns:searchdocument::700").getGlobalId(), reply->hits[2].gid);
EXPECT_EQUAL(700.0, reply->hits[2].metric);
EXPECT_GREATER(world.matchingStats.matchTimeAvg(), 0.0000001);
EXPECT_EQUAL(0.0, world.matchingStats.rerankTimeAvg());
@@ -478,15 +478,15 @@ TEST("require that re-ranking is performed (multi-threaded)") {
EXPECT_EQUAL(9u, world.matchingStats.docsRanked());
EXPECT_EQUAL(3u, world.matchingStats.docsReRanked());
ASSERT_TRUE(reply->hits.size() == 9u);
- EXPECT_EQUAL(document::DocumentId("doc::900").getGlobalId(), reply->hits[0].gid);
+ EXPECT_EQUAL(document::DocumentId("id:ns:searchdocument::900").getGlobalId(), reply->hits[0].gid);
EXPECT_EQUAL(1800.0, reply->hits[0].metric);
- EXPECT_EQUAL(document::DocumentId("doc::800").getGlobalId(), reply->hits[1].gid);
+ EXPECT_EQUAL(document::DocumentId("id:ns:searchdocument::800").getGlobalId(), reply->hits[1].gid);
EXPECT_EQUAL(1600.0, reply->hits[1].metric);
- EXPECT_EQUAL(document::DocumentId("doc::700").getGlobalId(), reply->hits[2].gid);
+ EXPECT_EQUAL(document::DocumentId("id:ns:searchdocument::700").getGlobalId(), reply->hits[2].gid);
EXPECT_EQUAL(1400.0, reply->hits[2].metric);
- EXPECT_EQUAL(document::DocumentId("doc::600").getGlobalId(), reply->hits[3].gid);
+ EXPECT_EQUAL(document::DocumentId("id:ns:searchdocument::600").getGlobalId(), reply->hits[3].gid);
EXPECT_EQUAL(600.0, reply->hits[3].metric);
- EXPECT_EQUAL(document::DocumentId("doc::500").getGlobalId(), reply->hits[4].gid);
+ EXPECT_EQUAL(document::DocumentId("id:ns:searchdocument::500").getGlobalId(), reply->hits[4].gid);
EXPECT_EQUAL(500.0, reply->hits[4].metric);
EXPECT_GREATER(world.matchingStats.matchTimeAvg(), 0.0000001);
EXPECT_GREATER(world.matchingStats.rerankTimeAvg(), 0.0000001);
@@ -532,15 +532,15 @@ TEST("require that re-ranking is diverse with diversity = 1/1") {
EXPECT_EQUAL(9u, world.matchingStats.docsRanked());
EXPECT_EQUAL(3u, world.matchingStats.docsReRanked());
ASSERT_TRUE(reply->hits.size() == 9u);
- EXPECT_EQUAL(document::DocumentId("doc::900").getGlobalId(), reply->hits[0].gid);
+ EXPECT_EQUAL(document::DocumentId("id:ns:searchdocument::900").getGlobalId(), reply->hits[0].gid);
EXPECT_EQUAL(1800.0, reply->hits[0].metric);
- EXPECT_EQUAL(document::DocumentId("doc::800").getGlobalId(), reply->hits[1].gid);
+ EXPECT_EQUAL(document::DocumentId("id:ns:searchdocument::800").getGlobalId(), reply->hits[1].gid);
EXPECT_EQUAL(1600.0, reply->hits[1].metric);
- EXPECT_EQUAL(document::DocumentId("doc::700").getGlobalId(), reply->hits[2].gid);
+ EXPECT_EQUAL(document::DocumentId("id:ns:searchdocument::700").getGlobalId(), reply->hits[2].gid);
EXPECT_EQUAL(1400.0, reply->hits[2].metric);
- EXPECT_EQUAL(document::DocumentId("doc::600").getGlobalId(), reply->hits[3].gid);
+ EXPECT_EQUAL(document::DocumentId("id:ns:searchdocument::600").getGlobalId(), reply->hits[3].gid);
EXPECT_EQUAL(600.0, reply->hits[3].metric);
- EXPECT_EQUAL(document::DocumentId("doc::500").getGlobalId(), reply->hits[4].gid);
+ EXPECT_EQUAL(document::DocumentId("id:ns:searchdocument::500").getGlobalId(), reply->hits[4].gid);
EXPECT_EQUAL(500.0, reply->hits[4].metric);
}
@@ -559,16 +559,16 @@ TEST("require that re-ranking is diverse with diversity = 1/10") {
EXPECT_EQUAL(9u, world.matchingStats.docsRanked());
EXPECT_EQUAL(1u, world.matchingStats.docsReRanked());
ASSERT_TRUE(reply->hits.size() == 9u);
- EXPECT_EQUAL(document::DocumentId("doc::900").getGlobalId(), reply->hits[0].gid);
+ EXPECT_EQUAL(document::DocumentId("id:ns:searchdocument::900").getGlobalId(), reply->hits[0].gid);
EXPECT_EQUAL(1800.0, reply->hits[0].metric);
//TODO This is of course incorrect until the selectBest method sees everything.
- EXPECT_EQUAL(document::DocumentId("doc::800").getGlobalId(), reply->hits[1].gid);
+ EXPECT_EQUAL(document::DocumentId("id:ns:searchdocument::800").getGlobalId(), reply->hits[1].gid);
EXPECT_EQUAL(800.0, reply->hits[1].metric);
- EXPECT_EQUAL(document::DocumentId("doc::700").getGlobalId(), reply->hits[2].gid);
+ EXPECT_EQUAL(document::DocumentId("id:ns:searchdocument::700").getGlobalId(), reply->hits[2].gid);
EXPECT_EQUAL(700.0, reply->hits[2].metric);
- EXPECT_EQUAL(document::DocumentId("doc::600").getGlobalId(), reply->hits[3].gid);
+ EXPECT_EQUAL(document::DocumentId("id:ns:searchdocument::600").getGlobalId(), reply->hits[3].gid);
EXPECT_EQUAL(600.0, reply->hits[3].metric);
- EXPECT_EQUAL(document::DocumentId("doc::500").getGlobalId(), reply->hits[4].gid);
+ EXPECT_EQUAL(document::DocumentId("id:ns:searchdocument::500").getGlobalId(), reply->hits[4].gid);
EXPECT_EQUAL(500.0, reply->hits[4].metric);
}
@@ -585,11 +585,11 @@ TEST("require that sortspec can be used (multi-threaded)") {
}
SearchReply::UP reply = world.performSearch(request, threads);
ASSERT_EQUAL(9u, reply->hits.size());
- EXPECT_EQUAL(document::DocumentId("doc::100").getGlobalId(), reply->hits[0].gid);
+ EXPECT_EQUAL(document::DocumentId("id:ns:searchdocument::100").getGlobalId(), reply->hits[0].gid);
EXPECT_EQUAL(zero_rank_value, reply->hits[0].metric);
- EXPECT_EQUAL(document::DocumentId("doc::200").getGlobalId(), reply->hits[1].gid);
+ EXPECT_EQUAL(document::DocumentId("id:ns:searchdocument::200").getGlobalId(), reply->hits[1].gid);
EXPECT_EQUAL(zero_rank_value, reply->hits[1].metric);
- EXPECT_EQUAL(document::DocumentId("doc::300").getGlobalId(), reply->hits[2].gid);
+ EXPECT_EQUAL(document::DocumentId("id:ns:searchdocument::300").getGlobalId(), reply->hits[2].gid);
EXPECT_EQUAL(zero_rank_value, reply->hits[2].metric);
EXPECT_EQUAL(drop_sort_data, reply->sortIndex.empty());
EXPECT_EQUAL(drop_sort_data, reply->sortData.empty());
@@ -911,7 +911,7 @@ TEST("require that same element search works (note that this does not test/use t
SearchRequest::SP request = world.createSameElementRequest("foo", "bar");
SearchReply::UP reply = world.performSearch(request, 1);
ASSERT_EQUAL(1u, reply->hits.size());
- EXPECT_EQUAL(document::DocumentId("doc::20").getGlobalId(), reply->hits[0].gid);
+ EXPECT_EQUAL(document::DocumentId("id:ns:searchdocument::20").getGlobalId(), reply->hits[0].gid);
}
TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchcore/src/tests/proton/persistenceengine/persistenceengine_test.cpp b/searchcore/src/tests/proton/persistenceengine/persistenceengine_test.cpp
index cdc0e8656d8..569b36a425d 100644
--- a/searchcore/src/tests/proton/persistenceengine/persistenceengine_test.cpp
+++ b/searchcore/src/tests/proton/persistenceengine/persistenceengine_test.cpp
@@ -326,15 +326,12 @@ DocumentId docId0;
DocumentId docId1("id:type1:type1::1");
DocumentId docId2("id:type2:type2::1");
DocumentId docId3("id:type3:type3::1");
-DocumentId old_docId("doc:old:id-scheme");
Document::SP doc1(createDoc(type1, docId1));
Document::SP doc2(createDoc(type2, docId2));
Document::SP doc3(createDoc(type3, docId3));
-Document::SP old_doc(createDoc(type1, old_docId));
document::DocumentUpdate::SP upd1(createUpd(type1, docId1));
document::DocumentUpdate::SP upd2(createUpd(type2, docId2));
document::DocumentUpdate::SP upd3(createUpd(type3, docId3));
-document::DocumentUpdate::SP old_upd(createUpd(type1, old_docId));
document::DocumentUpdate::SP bad_id_upd(createUpd(type1, docId2));
PartitionId partId(0);
BucketId bckId1(1);
@@ -474,14 +471,6 @@ TEST_F("require that puts are routed to handler", SimpleFixture)
}
-TEST_F("require that puts with old id scheme are rejected", SimpleFixture) {
- storage::spi::LoadType loadType(0, "default");
- Context context(loadType, storage::spi::Priority(0), storage::spi::Trace::TraceLevel(0));
- EXPECT_EQUAL(Result(Result::ErrorType::PERMANENT_ERROR, "Old id scheme not supported in elastic mode (doc:old:id-scheme)"),
- f.engine.put(bucket1, tstamp1, old_doc, context));
-}
-
-
TEST_F("require that put is rejected if resource limit is reached", SimpleFixture)
{
f._writeFilter._acceptWriteOperation = false;
@@ -491,8 +480,8 @@ TEST_F("require that put is rejected if resource limit is reached", SimpleFixtur
Context context(loadType, storage::spi::Priority(0), storage::spi::Trace::TraceLevel(0));
EXPECT_EQUAL(
Result(Result::ErrorType::RESOURCE_EXHAUSTED,
- "Put operation rejected for document 'doc:old:id-scheme': 'Disk is full'"),
- f.engine.put(bucket1, tstamp1, old_doc, context));
+ "Put operation rejected for document 'id:type3:type3::1': 'Disk is full'"),
+ f.engine.put(bucket1, tstamp1, doc3, context));
}
@@ -516,16 +505,6 @@ TEST_F("require that updates are routed to handler", SimpleFixture)
f.engine.update(bucket1, tstamp1, upd3, context));
}
-
-TEST_F("require that updates with old id scheme are rejected", SimpleFixture)
-{
- storage::spi::LoadType loadType(0, "default");
- Context context(loadType, storage::spi::Priority(0), storage::spi::Trace::TraceLevel(0));
-
- EXPECT_EQUAL(UpdateResult(Result::ErrorType::PERMANENT_ERROR, "Old id scheme not supported in elastic mode (doc:old:id-scheme)"),
- f.engine.update(bucket1, tstamp1, old_upd, context));
-}
-
TEST_F("require that updates with bad ids are rejected", SimpleFixture)
{
storage::spi::LoadType loadType(0, "default");
@@ -584,16 +563,6 @@ TEST_F("require that removes are routed to handlers", SimpleFixture)
EXPECT_FALSE(rr.hasError());
}
-
-TEST_F("require that removes with old id scheme are rejected", SimpleFixture)
-{
- storage::spi::LoadType loadType(0, "default");
- Context context(loadType, storage::spi::Priority(0), storage::spi::Trace::TraceLevel(0));
-
- EXPECT_EQUAL(RemoveResult(Result::ErrorType::PERMANENT_ERROR, "Old id scheme not supported in elastic mode (doc:old:id-scheme)"),
- f.engine.remove(bucket1, tstamp1, old_docId, context));
-}
-
TEST_F("require that remove is NOT rejected if resource limit is reached", SimpleFixture)
{
f._writeFilter._acceptWriteOperation = false;
diff --git a/searchcore/src/tests/proton/server/documentretriever_test.cpp b/searchcore/src/tests/proton/server/documentretriever_test.cpp
index d3fbaebcffb..d5e40592b12 100644
--- a/searchcore/src/tests/proton/server/documentretriever_test.cpp
+++ b/searchcore/src/tests/proton/server/documentretriever_test.cpp
@@ -22,7 +22,6 @@
#include <vespa/eval/tensor/tensor.h>
#include <vespa/eval/tensor/test/test_utils.h>
#include <vespa/persistence/spi/bucket.h>
-#include <vespa/persistence/spi/result.h>
#include <vespa/persistence/spi/test.h>
#include <vespa/searchcommon/common/schema.h>
#include <vespa/searchcore/proton/documentmetastore/documentmetastorecontext.h>
@@ -121,7 +120,7 @@ const char dyn_wset_field_i[] = "dynamic int wset field";
const char dyn_wset_field_d[] = "dynamic double wset field";
const char dyn_wset_field_s[] = "dynamic string wset field";
const char dyn_wset_field_n[] = "dynamic null wset field";
-const DocumentId doc_id("doc:test:1");
+const DocumentId doc_id("id:ns:type_name::1");
const int32_t static_value = 4;
const int32_t dyn_value_i = 17;
const double dyn_value_d = 42.42;
@@ -144,8 +143,7 @@ struct MyDocumentStore : proton::test::DummyDocumentStore {
~MyDocumentStore() override;
- virtual Document::UP read(DocumentIdT lid,
- const DocumentTypeRepo &r) const override {
+ Document::UP read(DocumentIdT lid, const DocumentTypeRepo &r) const override {
if (lid == 0) {
return Document::UP();
}
@@ -489,8 +487,7 @@ TEST_F("require that position fields are regenerated from zcurves", Fixture) {
EXPECT_EQUAL(-123096000, static_cast<IntFieldValue&>(*x).getValue());
EXPECT_EQUAL(49401000, static_cast<IntFieldValue&>(*y).getValue());
- checkFieldValue<LongFieldValue>(doc->getValue(zcurve_field),
- dynamic_zcurve_value);
+ checkFieldValue<LongFieldValue>(doc->getValue(zcurve_field), dynamic_zcurve_value);
}
TEST_F("require that non-existing lid returns null pointer", Fixture) {
diff --git a/searchcore/src/tests/proton/server/feeddebugger_test.cpp b/searchcore/src/tests/proton/server/feeddebugger_test.cpp
index c54e13f4840..b5bd1cfafa8 100644
--- a/searchcore/src/tests/proton/server/feeddebugger_test.cpp
+++ b/searchcore/src/tests/proton/server/feeddebugger_test.cpp
@@ -65,18 +65,18 @@ TEST("require that setting an environment variable turns on docid-specific"
" debugging.") {
EnvSaver save_lid_env(lid_env_name);
EnvSaver save_docid_env(docid_env_name);
- setenv(docid_env_name, "doc:test:foo,doc:test:bar,doc:test:baz", true);
+ setenv(docid_env_name, "id:ns:type::test:foo,id:ns:type::test:bar,id:ns:type::test:baz", true);
FeedDebugger debugger;
EXPECT_TRUE(debugger.isDebugging());
EXPECT_EQUAL(ns_log::Logger::info,
- debugger.getDebugLevel(1, DocumentId("doc:test:foo")));
+ debugger.getDebugLevel(1, DocumentId("id:ns:type::test:foo")));
EXPECT_EQUAL(ns_log::Logger::info,
- debugger.getDebugLevel(1, DocumentId("doc:test:bar")));
+ debugger.getDebugLevel(1, DocumentId("id:ns:type::test:bar")));
EXPECT_EQUAL(ns_log::Logger::info,
- debugger.getDebugLevel(1, DocumentId("doc:test:baz")));
+ debugger.getDebugLevel(1, DocumentId("id:ns:type::test:baz")));
EXPECT_EQUAL(ns_log::Logger::spam,
- debugger.getDebugLevel(1, DocumentId("doc:test:qux")));
+ debugger.getDebugLevel(1, DocumentId("id:ns:type::test:qux")));
}
} // namespace
diff --git a/searchcore/src/tests/proton/server/feedstates_test.cpp b/searchcore/src/tests/proton/server/feedstates_test.cpp
index f206ffc9b17..96096c0401f 100644
--- a/searchcore/src/tests/proton/server/feedstates_test.cpp
+++ b/searchcore/src/tests/proton/server/feedstates_test.cpp
@@ -100,7 +100,7 @@ struct RemoveOperationContext
};
RemoveOperationContext::RemoveOperationContext(search::SerialNum serial)
- : doc_id("doc:foo:bar"),
+ : doc_id("id:ns:doctypename::bar"),
op(BucketFactory::getBucketId(doc_id), Timestamp(10), doc_id),
str(), packet()
{
diff --git a/searchcore/src/vespa/searchcore/proton/common/feeddebugger.h b/searchcore/src/vespa/searchcore/proton/common/feeddebugger.h
index 3b02c0f2b76..5c582157174 100644
--- a/searchcore/src/vespa/searchcore/proton/common/feeddebugger.h
+++ b/searchcore/src/vespa/searchcore/proton/common/feeddebugger.h
@@ -27,7 +27,7 @@ private:
ns_log::Logger::LogLevel getDebugDebuggerInternal(uint32_t lid, const document::DocumentId * docid) const;
bool _enableDebugging;
std::vector<uint32_t> _debugLidList; // List of lids to dump when feeding/replaying log.
- std::vector<document::DocumentId> _debugDocIdList; // List of docids("doc:bla:blu" to dump when feeding/replaying log.
+ std::vector<document::DocumentId> _debugDocIdList; // List of docids("id:ns:doctype::xyz" to dump when feeding/replaying log.
};
} // namespace proton
diff --git a/searchcore/src/vespa/searchcore/proton/server/documentretrieverbase.cpp b/searchcore/src/vespa/searchcore/proton/server/documentretrieverbase.cpp
index d06319ae7f9..65a4f7e7c4a 100644
--- a/searchcore/src/vespa/searchcore/proton/server/documentretrieverbase.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/documentretrieverbase.cpp
@@ -4,16 +4,11 @@
#include <vespa/document/repo/documenttyperepo.h>
#include <vespa/document/datatype/documenttype.h>
#include <vespa/vespalib/stllike/lrucache_map.hpp>
+#include <vespa/vespalib/util/stringfmt.h>
using document::DocumentId;
using document::GlobalId;
-namespace {
-
-const DocumentId docId("doc:test:1");
-
-}
-
namespace proton {
DocumentRetrieverBase::DocumentRetrieverBase(
@@ -30,13 +25,12 @@ DocumentRetrieverBase::DocumentRetrieverBase(
_emptyDoc(),
_hasFields(hasFields)
{
- const document::DocumentType *
- docType(_repo.getDocumentType(_docTypeName.getName()));
- _emptyDoc.reset(new document::Document(*docType, docId));
+ const document::DocumentType * docType(_repo.getDocumentType(_docTypeName.getName()));
+ _emptyDoc.reset(new document::Document(*docType, DocumentId("id:empty:" + _docTypeName.getName() + "::empty")));
_emptyDoc->setRepo(_repo);
}
-DocumentRetrieverBase::~DocumentRetrieverBase() { }
+DocumentRetrieverBase::~DocumentRetrieverBase() = default;
const document::DocumentTypeRepo &
DocumentRetrieverBase::getDocumentTypeRepo() const {
diff --git a/searchlib/src/tests/diskindex/fusion/fusion_test.cpp b/searchlib/src/tests/diskindex/fusion/fusion_test.cpp
index b77df846ebb..1825c00ceda 100644
--- a/searchlib/src/tests/diskindex/fusion/fusion_test.cpp
+++ b/searchlib/src/tests/diskindex/fusion/fusion_test.cpp
@@ -100,7 +100,7 @@ toString(FieldPositionsIterator posItr, bool hasElements = false, bool hasWeight
std::unique_ptr<Document>
make_doc10(DocBuilder &b)
{
- b.startDocument("doc::10");
+ b.startDocument("id:ns:searchdocument::10");
b.startIndexField("f0").
addStr("a").addStr("b").addStr("c").addStr("d").
addStr("e").addStr("f").addStr("z").
@@ -325,7 +325,7 @@ FusionTest::requireThatFusionIsWorking(const vespalib::string &prefix, bool dire
myPushDocument(inv);
pushThreads.sync();
- b.startDocument("doc::11").
+ b.startDocument("id:ns:searchdocument::11").
startIndexField("f3").
startElement(-27).addStr("zz").endElement().
endField();
@@ -335,7 +335,7 @@ FusionTest::requireThatFusionIsWorking(const vespalib::string &prefix, bool dire
myPushDocument(inv);
pushThreads.sync();
- b.startDocument("doc::12").
+ b.startDocument("id:ns:searchdocument::12").
startIndexField("f3").
startElement(0).addStr("zz0").endElement().
endField();
diff --git a/searchlib/src/tests/engine/transportserver/transportserver_test.cpp b/searchlib/src/tests/engine/transportserver/transportserver_test.cpp
index a15e80da0a4..baa581c65f9 100644
--- a/searchlib/src/tests/engine/transportserver/transportserver_test.cpp
+++ b/searchlib/src/tests/engine/transportserver/transportserver_test.cpp
@@ -64,7 +64,7 @@ SyncServer::getDocsums(DocsumRequest::Source request, DocsumClient &)
LOG(info, "responding to docsum request...");
ret.docsums.resize(1);
ret.docsums[0].setData("data", strlen("data"));
- ret.docsums[0].gid = DocumentId(vespalib::make_string("doc::100")).getGlobalId();
+ ret.docsums[0].gid = DocumentId(vespalib::make_string("id:ns:type::100")).getGlobalId();
return reply;
}
@@ -145,7 +145,7 @@ TEST("transportserver") {
ASSERT_TRUE(p != 0);
ASSERT_TRUE(p->GetPCODE() == PCODE_DOCSUM);
FS4Packet_DOCSUM *r = (FS4Packet_DOCSUM*)p;
- EXPECT_EQUAL(r->getGid(), DocumentId("doc::100").getGlobalId());
+ EXPECT_EQUAL(r->getGid(), DocumentId("id:ns:type::100").getGlobalId());
p->Free();
p = q.DequeuePacket(60000, &ctx);
ASSERT_TRUE(p != 0);
diff --git a/searchlib/src/tests/index/docbuilder/docbuilder_test.cpp b/searchlib/src/tests/index/docbuilder/docbuilder_test.cpp
index 019c7096877..f880510647d 100644
--- a/searchlib/src/tests/index/docbuilder/docbuilder_test.cpp
+++ b/searchlib/src/tests/index/docbuilder/docbuilder_test.cpp
@@ -77,11 +77,11 @@ Test::testBuilder()
std::string xml;
{ // empty
- doc = b.startDocument("doc::0").endDocument();
+ doc = b.startDocument("id:ns:searchdocument::0").endDocument();
xml = doc->toXml("");
boost::split(lines, xml, boost::is_any_of("\n"));
itr = lines.begin();
- EXPECT_EQUAL("<document documenttype=\"searchdocument\" documentid=\"doc::0\"/>", *itr++);
+ EXPECT_EQUAL("<document documenttype=\"searchdocument\" documentid=\"id:ns:searchdocument::0\"/>", *itr++);
EXPECT_EQUAL("", *itr++);
EXPECT_TRUE(itr == lines.end());
}
@@ -105,7 +105,7 @@ Test::testBuilder()
&binaryBlob[0] + binaryBlob.size());
raw1w1 += std::string(&binaryBlob[0],
&binaryBlob[0] + binaryBlob.size());
- b.startDocument("doc::1");
+ b.startDocument("id:ns:searchdocument::1");
b.startIndexField("ia").addStr("foo").addStr("bar").addStr("baz").addTermAnnotation("altbaz").endField();
b.startIndexField("ib").startElement().addStr("foo").endElement().
startElement(1).addStr("bar").addStr("baz").endElement().endField();
@@ -289,7 +289,7 @@ Test::testBuilder()
xml = doc->toXml("");
boost::split(lines, xml, boost::is_any_of("\n"));
itr = lines.begin();
- EXPECT_EQUAL("<document documenttype=\"searchdocument\" documentid=\"doc::1\">", *itr++);
+ EXPECT_EQUAL("<document documenttype=\"searchdocument\" documentid=\"id:ns:searchdocument::1\">", *itr++);
EXPECT_EQUAL("<sj>", *itr++);
EXPECT_EQUAL(empty +"<item weight=\"46\" binaryencoding=\"base64\">" +
vespalib::Base64::encode(raw1w1) +
@@ -425,7 +425,7 @@ Test::testBuilder()
#endif
}
{ // create one more to see that everything is cleared
- b.startDocument("doc::2");
+ b.startDocument("id:ns:searchdocument::2");
b.startIndexField("ia").addStr("yes").endField();
b.startAttributeField("aa").addInt(20).endField();
b.startSummaryField("sa").addInt(10).endField();
@@ -433,7 +433,7 @@ Test::testBuilder()
xml = doc->toXml("");
boost::split(lines, xml, boost::is_any_of("\n"));
itr = lines.begin();
- EXPECT_EQUAL("<document documenttype=\"searchdocument\" documentid=\"doc::2\">", *itr++);
+ EXPECT_EQUAL("<document documenttype=\"searchdocument\" documentid=\"id:ns:searchdocument::2\">", *itr++);
EXPECT_EQUAL("<sa>10</sa>", *itr++);
EXPECT_EQUAL("<aa>20</aa>", *itr++);
EXPECT_EQUAL("<ia>yes</ia>", *itr++);
@@ -441,7 +441,7 @@ Test::testBuilder()
EXPECT_TRUE(itr == lines.end());
}
{ // create field with cjk chars
- b.startDocument("doc::3");
+ b.startDocument("id:ns:searchdocument::3");
b.startIndexField("ia").
addStr("我就是那个").
setAutoSpace(false).
@@ -452,7 +452,7 @@ Test::testBuilder()
xml = doc->toXml("");
boost::split(lines, xml, boost::is_any_of("\n"));
itr = lines.begin();
- EXPECT_EQUAL("<document documenttype=\"searchdocument\" documentid=\"doc::3\">", *itr++);
+ EXPECT_EQUAL("<document documenttype=\"searchdocument\" documentid=\"id:ns:searchdocument::3\">", *itr++);
EXPECT_EQUAL("<ia>我就是那个大灰狼</ia>", *itr++);
EXPECT_EQUAL("</document>", *itr++);
EXPECT_TRUE(itr == lines.end());
diff --git a/searchlib/src/tests/memoryindex/document_inverter/document_inverter_test.cpp b/searchlib/src/tests/memoryindex/document_inverter/document_inverter_test.cpp
index 38862dfe94b..3f798df3c05 100644
--- a/searchlib/src/tests/memoryindex/document_inverter/document_inverter_test.cpp
+++ b/searchlib/src/tests/memoryindex/document_inverter/document_inverter_test.cpp
@@ -28,7 +28,7 @@ namespace {
Document::UP
makeDoc10(DocBuilder &b)
{
- b.startDocument("doc::10");
+ b.startDocument("id:ns:searchdocument::10");
b.startIndexField("f0").
addStr("a").addStr("b").addStr("c").addStr("d").
endField();
@@ -38,7 +38,7 @@ makeDoc10(DocBuilder &b)
Document::UP
makeDoc11(DocBuilder &b)
{
- b.startDocument("doc::11");
+ b.startDocument("id:ns:searchdocument::11");
b.startIndexField("f0").
addStr("a").addStr("b").addStr("e").addStr("f").
endField();
@@ -51,7 +51,7 @@ makeDoc11(DocBuilder &b)
Document::UP
makeDoc12(DocBuilder &b)
{
- b.startDocument("doc::12");
+ b.startDocument("id:ns:searchdocument::12");
b.startIndexField("f0").
addStr("h").addStr("doc12").
endField();
@@ -61,7 +61,7 @@ makeDoc12(DocBuilder &b)
Document::UP
makeDoc13(DocBuilder &b)
{
- b.startDocument("doc::13");
+ b.startDocument("id:ns:searchdocument::13");
b.startIndexField("f0").
addStr("i").addStr("doc13").
endField();
@@ -71,7 +71,7 @@ makeDoc13(DocBuilder &b)
Document::UP
makeDoc14(DocBuilder &b)
{
- b.startDocument("doc::14");
+ b.startDocument("id:ns:searchdocument::14");
b.startIndexField("f0").
addStr("j").addStr("doc14").
endField();
@@ -81,7 +81,7 @@ makeDoc14(DocBuilder &b)
Document::UP
makeDoc15(DocBuilder &b)
{
- b.startDocument("doc::15");
+ b.startDocument("id:ns:searchdocument::15");
return b.endDocument();
}
diff --git a/searchlib/src/tests/memoryindex/field_index/field_index_test.cpp b/searchlib/src/tests/memoryindex/field_index/field_index_test.cpp
index ac1735e0549..512e1bd2051 100644
--- a/searchlib/src/tests/memoryindex/field_index/field_index_test.cpp
+++ b/searchlib/src/tests/memoryindex/field_index/field_index_test.cpp
@@ -937,7 +937,7 @@ TEST_F(BasicInverterTest, require_that_inversion_is_working)
{
Document::UP doc;
- _b.startDocument("doc::10");
+ _b.startDocument("id:ns:searchdocument::10");
_b.startIndexField("f0").
addStr("a").addStr("b").addStr("c").addStr("d").
endField();
@@ -947,7 +947,7 @@ TEST_F(BasicInverterTest, require_that_inversion_is_working)
myPushDocument(_inv);
_pushThreads.sync();
- _b.startDocument("doc::20");
+ _b.startDocument("id:ns:searchdocument::20");
_b.startIndexField("f0").
addStr("a").addStr("a").addStr("b").addStr("c").addStr("d").
endField();
@@ -957,7 +957,7 @@ TEST_F(BasicInverterTest, require_that_inversion_is_working)
myPushDocument(_inv);
_pushThreads.sync();
- _b.startDocument("doc::30");
+ _b.startDocument("id:ns:searchdocument::30");
_b.startIndexField("f0").
addStr("a").addStr("b").addStr("c").addStr("d").
addStr("e").addStr("f").
@@ -988,7 +988,7 @@ TEST_F(BasicInverterTest, require_that_inversion_is_working)
myPushDocument(_inv);
_pushThreads.sync();
- _b.startDocument("doc::40");
+ _b.startDocument("id:ns:searchdocument::40");
_b.startIndexField("f0").
addStr("a").addStr("a").addStr("b").addStr("c").addStr("a").
addStr("e").addStr("f").
@@ -999,7 +999,7 @@ TEST_F(BasicInverterTest, require_that_inversion_is_working)
myPushDocument(_inv);
_pushThreads.sync();
- _b.startDocument("doc::999");
+ _b.startDocument("id:ns:searchdocument::999");
_b.startIndexField("f0").
addStr("this").addStr("is").addStr("_a_").addStr("test").
addStr("for").addStr("insertion").addStr("speed").addStr("with").
@@ -1137,7 +1137,7 @@ TEST_F(BasicInverterTest, require_that_inverter_handles_remove_via_document_remo
{
Document::UP doc;
- _b.startDocument("doc::1");
+ _b.startDocument("id:ns:searchdocument::1");
_b.startIndexField("f0").addStr("a").addStr("b").endField();
_b.startIndexField("f1").addStr("a").addStr("c").endField();
Document::UP doc1 = _b.endDocument();
@@ -1146,7 +1146,7 @@ TEST_F(BasicInverterTest, require_that_inverter_handles_remove_via_document_remo
myPushDocument(_inv);
_pushThreads.sync();
- _b.startDocument("doc::2");
+ _b.startDocument("id:ns:searchdocument::2");
_b.startIndexField("f0").addStr("b").addStr("c").endField();
Document::UP doc2 = _b.endDocument();
_inv.invertDocument(2, *doc2.get());
@@ -1189,7 +1189,7 @@ TEST_F(UriInverterTest, require_that_uri_indexing_is_working)
{
Document::UP doc;
- _b.startDocument("doc::10");
+ _b.startDocument("id:ns:searchdocument::10");
_b.startIndexField("iu").
startSubField("all").
addUrlTokenizedString("http://www.example.com:81/fluke?ab=2#4").
@@ -1378,7 +1378,7 @@ TEST_F(CjkInverterTest, require_that_cjk_indexing_is_working)
{
Document::UP doc;
- _b.startDocument("doc::10");
+ _b.startDocument("id:ns:searchdocument::10");
_b.startIndexField("f0").
addStr("我就是那个").
setAutoSpace(false).
diff --git a/searchlib/src/tests/memoryindex/field_inverter/field_inverter_test.cpp b/searchlib/src/tests/memoryindex/field_inverter/field_inverter_test.cpp
index 72a8f6ed239..d3a286b3c1b 100644
--- a/searchlib/src/tests/memoryindex/field_inverter/field_inverter_test.cpp
+++ b/searchlib/src/tests/memoryindex/field_inverter/field_inverter_test.cpp
@@ -26,7 +26,7 @@ namespace {
Document::UP
makeDoc10(DocBuilder &b)
{
- b.startDocument("doc::10");
+ b.startDocument("id:ns:searchdocument::10");
b.startIndexField("f0").
addStr("a").addStr("b").addStr("c").addStr("d").
endField();
@@ -36,7 +36,7 @@ makeDoc10(DocBuilder &b)
Document::UP
makeDoc11(DocBuilder &b)
{
- b.startDocument("doc::11");
+ b.startDocument("id:ns:searchdocument::11");
b.startIndexField("f0").
addStr("a").addStr("b").addStr("e").addStr("f").
endField();
@@ -49,7 +49,7 @@ makeDoc11(DocBuilder &b)
Document::UP
makeDoc12(DocBuilder &b)
{
- b.startDocument("doc::12");
+ b.startDocument("id:ns:searchdocument::12");
b.startIndexField("f0").
addStr("h").addStr("doc12").
endField();
@@ -59,7 +59,7 @@ makeDoc12(DocBuilder &b)
Document::UP
makeDoc13(DocBuilder &b)
{
- b.startDocument("doc::13");
+ b.startDocument("id:ns:searchdocument::13");
b.startIndexField("f0").
addStr("i").addStr("doc13").
endField();
@@ -69,7 +69,7 @@ makeDoc13(DocBuilder &b)
Document::UP
makeDoc14(DocBuilder &b)
{
- b.startDocument("doc::14");
+ b.startDocument("id:ns:searchdocument::14");
b.startIndexField("f0").
addStr("j").addStr("doc14").
endField();
@@ -79,14 +79,14 @@ makeDoc14(DocBuilder &b)
Document::UP
makeDoc15(DocBuilder &b)
{
- b.startDocument("doc::15");
+ b.startDocument("id:ns:searchdocument::15");
return b.endDocument();
}
Document::UP
makeDoc16(DocBuilder &b)
{
- b.startDocument("doc::16");
+ b.startDocument("id:ns:searchdocument::16");
b.startIndexField("f0").addStr("foo").addStr("bar").addStr("baz").
addTermAnnotation("altbaz").addStr("y").addTermAnnotation("alty").
addStr("z").endField();
@@ -96,7 +96,7 @@ makeDoc16(DocBuilder &b)
Document::UP
makeDoc17(DocBuilder &b)
{
- b.startDocument("doc::17");
+ b.startDocument("id:ns:searchdocument::17");
b.startIndexField("f1").addStr("foo0").addStr("bar0").endField();
b.startIndexField("f2").startElement(1).addStr("foo").addStr("bar").endElement().startElement(1).addStr("bar").endElement().endField();
b.startIndexField("f3").startElement(3).addStr("foo2").addStr("bar2").endElement().startElement(4).addStr("bar2").endElement().endField();
diff --git a/searchlib/src/tests/memoryindex/memory_index/memory_index_test.cpp b/searchlib/src/tests/memoryindex/memory_index/memory_index_test.cpp
index dd4bb2cef7f..a320c4a0641 100644
--- a/searchlib/src/tests/memoryindex/memory_index/memory_index_test.cpp
+++ b/searchlib/src/tests/memoryindex/memory_index/memory_index_test.cpp
@@ -81,7 +81,7 @@ struct Index {
}
Index &doc(uint32_t id) {
docid = id;
- builder.startDocument(vespalib::make_string("doc::%u", id));
+ builder.startDocument(vespalib::make_string("id:ns:searchdocument::%u", id));
return *this;
}
Index &field(const std::string &name) {
diff --git a/searchlib/src/tests/memoryindex/url_field_inverter/url_field_inverter_test.cpp b/searchlib/src/tests/memoryindex/url_field_inverter/url_field_inverter_test.cpp
index 2151a44a66d..86c58c11c09 100644
--- a/searchlib/src/tests/memoryindex/url_field_inverter/url_field_inverter_test.cpp
+++ b/searchlib/src/tests/memoryindex/url_field_inverter/url_field_inverter_test.cpp
@@ -27,7 +27,7 @@ const vespalib::string url = "url";
Document::UP
makeDoc10Single(DocBuilder &b)
{
- b.startDocument("doc::10");
+ b.startDocument("id:ns:searchdocument::10");
b.startIndexField("url").
startSubField("all").
addUrlTokenizedString("http://www.example.com:81/fluke?ab=2#4").
@@ -58,7 +58,7 @@ makeDoc10Single(DocBuilder &b)
Document::UP
makeDoc10Array(DocBuilder &b)
{
- b.startDocument("doc::10");
+ b.startDocument("id:ns:searchdocument::10");
b.startIndexField("url").
startElement(1).
startSubField("all").
@@ -114,7 +114,7 @@ makeDoc10Array(DocBuilder &b)
Document::UP
makeDoc10WeightedSet(DocBuilder &b)
{
- b.startDocument("doc::10");
+ b.startDocument("id:ns:searchdocument::10");
b.startIndexField("url").
startElement(4).
startSubField("all").
@@ -170,7 +170,7 @@ makeDoc10WeightedSet(DocBuilder &b)
Document::UP
makeDoc10Empty(DocBuilder &b)
{
- b.startDocument("doc::10");
+ b.startDocument("id:ns:searchdocument::10");
return b.endDocument();
}
diff --git a/security-tools/src/main/sh/vespa-curl-wrapper b/security-tools/src/main/sh/vespa-curl-wrapper
index da1465a07bc..4c616b0fc4e 100755
--- a/security-tools/src/main/sh/vespa-curl-wrapper
+++ b/security-tools/src/main/sh/vespa-curl-wrapper
@@ -4,6 +4,75 @@
# Uses vespa-security-env to call curl with paths to credentials.
# This script should be installed in libexec only. It is not public api.
+# BEGIN environment bootstrap section
+# Do not edit between here and END as this section should stay identical in all scripts
+
+findpath () {
+ myname=${0}
+ mypath=${myname%/*}
+ myname=${myname##*/}
+ if [ "$mypath" ] && [ -d "$mypath" ]; then
+ return
+ fi
+ mypath=$(pwd)
+ if [ -f "${mypath}/${myname}" ]; then
+ return
+ fi
+ echo "FATAL: Could not figure out the path where $myname lives from $0"
+ exit 1
+}
+
+COMMON_ENV=libexec/vespa/common-env.sh
+
+source_common_env () {
+ if [ "$VESPA_HOME" ] && [ -d "$VESPA_HOME" ]; then
+ export VESPA_HOME
+ common_env=$VESPA_HOME/$COMMON_ENV
+ if [ -f "$common_env" ]; then
+ . $common_env
+ return
+ fi
+ fi
+ return 1
+}
+
+findroot () {
+ source_common_env && return
+ if [ "$VESPA_HOME" ]; then
+ echo "FATAL: bad VESPA_HOME value '$VESPA_HOME'"
+ exit 1
+ fi
+ if [ "$ROOT" ] && [ -d "$ROOT" ]; then
+ VESPA_HOME="$ROOT"
+ source_common_env && return
+ fi
+ findpath
+ while [ "$mypath" ]; do
+ VESPA_HOME=${mypath}
+ source_common_env && return
+ mypath=${mypath%/*}
+ done
+ echo "FATAL: missing VESPA_HOME environment variable"
+ echo "Could not locate $COMMON_ENV anywhere"
+ exit 1
+}
+
+findhost () {
+ if [ "${VESPA_HOSTNAME}" = "" ]; then
+ VESPA_HOSTNAME=$(vespa-detect-hostname || hostname -f || hostname || echo "localhost") || exit 1
+ fi
+ validate="${VESPA_HOME}/bin/vespa-validate-hostname"
+ if [ -f "$validate" ]; then
+ "$validate" "${VESPA_HOSTNAME}" || exit 1
+ fi
+ export VESPA_HOSTNAME
+}
+
+findroot
+findhost
+
+# END environment bootstrap section
+
set -e
eval $($VESPA_HOME/bin/vespa-security-env)
diff --git a/staging_vespalib/src/tests/state_server/state_server_test.cpp b/staging_vespalib/src/tests/state_server/state_server_test.cpp
index 6c7397a1719..e61d3d216cd 100644
--- a/staging_vespalib/src/tests/state_server/state_server_test.cpp
+++ b/staging_vespalib/src/tests/state_server/state_server_test.cpp
@@ -87,6 +87,12 @@ TEST_FF("require that non-empty known url returns expected headers", DummyHandle
"Connection: close\r\n"
"Content-Type: application/json\r\n"
"Content-Length: 5\r\n"
+ "X-XSS-Protection: 1; mode=block\r\n"
+ "X-Frame-Options: DENY\r\n"
+ "Content-Security-Policy: default-src 'none'\r\n"
+ "X-Content-Type-Options: nosniff\r\n"
+ "Cache-Control: no-store\r\n"
+ "Pragma: no-cache\r\n"
"\r\n"
"[123]");
std::string actual = getFull(f2.port(), my_path);
diff --git a/storage/src/tests/bucketdb/bucketmanagertest.cpp b/storage/src/tests/bucketdb/bucketmanagertest.cpp
index 11f5ec70014..ad46d494d11 100644
--- a/storage/src/tests/bucketdb/bucketmanagertest.cpp
+++ b/storage/src/tests/bucketdb/bucketmanagertest.cpp
@@ -171,10 +171,8 @@ void BucketManagerTest::setupTestEnvironment(bool fakePersistenceLayer,
_top->push_back(std::move(bottom));
}
// Generate a doc to use for testing..
- const DocumentType &type(*_node->getTypeRepo()
- ->getDocumentType("text/html"));
- _document = std::make_shared<document::Document>(
- type, document::DocumentId(document::DocIdString("test", "ntnu")));
+ const DocumentType &type(*_node->getTypeRepo()->getDocumentType("text/html"));
+ _document = std::make_shared<document::Document>(type, document::DocumentId("id:ns:text/html::ntnu"));
}
void BucketManagerTest::addBucketsToDB(uint32_t count)
diff --git a/storage/src/tests/distributor/getoperationtest.cpp b/storage/src/tests/distributor/getoperationtest.cpp
index 4b67cf1963d..7c308e152db 100644
--- a/storage/src/tests/distributor/getoperationtest.cpp
+++ b/storage/src/tests/distributor/getoperationtest.cpp
@@ -40,7 +40,7 @@ struct GetOperationTest : Test, DistributorTestUtil {
FileSpec("../config-doctypes.cfg"))));
createLinks();
- docId = document::DocumentId(document::DocIdString("test", "uri"));
+ docId = document::DocumentId("id:ns:text/html::uri");
bucketId = getExternalOperationHandler().getBucketId(docId);
};
@@ -133,7 +133,7 @@ TEST_F(GetOperationTest, simple) {
ASSERT_NO_FATAL_FAILURE(replyWithDocument());
- EXPECT_EQ("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
+ EXPECT_EQ("GetReply(BucketId(0x0000000000000000), id:ns:text/html::uri, "
"timestamp 100) ReturnCode(NONE)",
_sender.getLastReply());
}
@@ -149,7 +149,7 @@ TEST_F(GetOperationTest, ask_trusted_node_if_bucket_is_inconsistent) {
ASSERT_NO_FATAL_FAILURE(replyWithDocument());
- EXPECT_EQ("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
+ EXPECT_EQ("GetReply(BucketId(0x0000000000000000), id:ns:text/html::uri, "
"timestamp 100) ReturnCode(NONE)",
_sender.getLastReply());
}
@@ -166,7 +166,7 @@ TEST_F(GetOperationTest, ask_all_nodes_if_bucket_is_inconsistent) {
ASSERT_NO_FATAL_FAILURE(sendReply(0, api::ReturnCode::OK, "newauthor", 2));
ASSERT_NO_FATAL_FAILURE(sendReply(1, api::ReturnCode::OK, "oldauthor", 1));
- ASSERT_EQ("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
+ ASSERT_EQ("GetReply(BucketId(0x0000000000000000), id:ns:text/html::uri, "
"timestamp 2) ReturnCode(NONE)",
_sender.getLastReply());
@@ -185,7 +185,7 @@ TEST_F(GetOperationTest, send_to_all_invalid_copies) {
ASSERT_NO_FATAL_FAILURE(sendReply(0, api::ReturnCode::OK, "newauthor", 2));
ASSERT_NO_FATAL_FAILURE(sendReply(1, api::ReturnCode::OK, "oldauthor", 1));
- ASSERT_EQ("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
+ ASSERT_EQ("GetReply(BucketId(0x0000000000000000), id:ns:text/html::uri, "
"timestamp 2) ReturnCode(NONE)",
_sender.getLastReply());
@@ -207,7 +207,7 @@ TEST_F(GetOperationTest, send_to_all_invalid_nodes_when_inconsistent) {
ASSERT_NO_FATAL_FAILURE(sendReply(2, api::ReturnCode::OK, "oldauthor", 1));
ASSERT_NO_FATAL_FAILURE(sendReply(3, api::ReturnCode::OK, "oldauthor", 1));
- ASSERT_EQ("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
+ ASSERT_EQ("GetReply(BucketId(0x0000000000000000), id:ns:text/html::uri, "
"timestamp 2) ReturnCode(NONE)",
_sender.getLastReply());
@@ -217,8 +217,8 @@ TEST_F(GetOperationTest, send_to_all_invalid_nodes_when_inconsistent) {
TEST_F(GetOperationTest, inconsistent_split) {
setClusterState("distributor:1 storage:4");
- addNodesToBucketDB(document::BucketId(16, 0x2a52), "0=100");
- addNodesToBucketDB(document::BucketId(17, 0x2a52), "1=200");
+ addNodesToBucketDB(document::BucketId(16, 0x0593), "0=100");
+ addNodesToBucketDB(document::BucketId(17, 0x10593), "1=200");
sendGet();
@@ -227,7 +227,7 @@ TEST_F(GetOperationTest, inconsistent_split) {
ASSERT_NO_FATAL_FAILURE(sendReply(0, api::ReturnCode::OK, "newauthor", 2));
ASSERT_NO_FATAL_FAILURE(sendReply(1, api::ReturnCode::OK, "oldauthor", 1));
- ASSERT_EQ("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
+ ASSERT_EQ("GetReply(BucketId(0x0000000000000000), id:ns:text/html::uri, "
"timestamp 2) ReturnCode(NONE)",
_sender.getLastReply());
@@ -246,7 +246,7 @@ TEST_F(GetOperationTest, multi_inconsistent_bucket_not_found) {
ASSERT_NO_FATAL_FAILURE(sendReply(0, api::ReturnCode::OK, "newauthor", 2));
ASSERT_NO_FATAL_FAILURE(sendReply(1, api::ReturnCode::OK, "", 0));
- ASSERT_EQ("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
+ ASSERT_EQ("GetReply(BucketId(0x0000000000000000), id:ns:text/html::uri, "
"timestamp 2) ReturnCode(NONE)",
_sender.getLastReply());
}
@@ -265,7 +265,7 @@ TEST_F(GetOperationTest, multi_inconsistent_bucket_not_found_deleted) {
// at timestamp 3.
ASSERT_NO_FATAL_FAILURE(sendReply(1, api::ReturnCode::OK, "", 3));
- ASSERT_EQ("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
+ ASSERT_EQ("GetReply(BucketId(0x0000000000000000), id:ns:text/html::uri, "
"timestamp 3) ReturnCode(NONE)",
_sender.getLastReply());
}
@@ -282,7 +282,7 @@ TEST_F(GetOperationTest, multi_inconsistent_bucket) {
ASSERT_NO_FATAL_FAILURE(sendReply(0, api::ReturnCode::OK, "newauthor", 2));
ASSERT_NO_FATAL_FAILURE(sendReply(1, api::ReturnCode::OK, "oldauthor", 1));
- ASSERT_EQ("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
+ ASSERT_EQ("GetReply(BucketId(0x0000000000000000), id:ns:text/html::uri, "
"timestamp 2) ReturnCode(NONE)",
_sender.getLastReply());
@@ -301,12 +301,12 @@ TEST_F(GetOperationTest, multi_inconsistent_bucket_fail) {
ASSERT_NO_FATAL_FAILURE(sendReply(0, api::ReturnCode::OK, "newauthor", 1));
ASSERT_NO_FATAL_FAILURE(sendReply(1, api::ReturnCode::DISK_FAILURE, "", 0));
- ASSERT_EQ("Get(BucketId(0x4000000000002a52), doc:test:uri) => 3",
+ ASSERT_EQ("Get(BucketId(0x4000000000000593), id:ns:text/html::uri) => 3",
_sender.getLastCommand());
ASSERT_NO_FATAL_FAILURE(replyWithDocument());
- ASSERT_EQ("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
+ ASSERT_EQ("GetReply(BucketId(0x0000000000000000), id:ns:text/html::uri, "
"timestamp 100) ReturnCode(NONE)",
_sender.getLastReply());
}
@@ -316,7 +316,7 @@ TEST_F(GetOperationTest, return_not_found_when_bucket_not_in_db) {
sendGet();
- ASSERT_EQ("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
+ ASSERT_EQ("GetReply(BucketId(0x0000000000000000), id:ns:text/html::uri, "
"timestamp 0) ReturnCode(NONE)",
_sender.getLastReply());
}
@@ -328,12 +328,12 @@ TEST_F(GetOperationTest, not_found) {
sendGet();
- ASSERT_EQ("Get(BucketId(0x4000000000002a52), doc:test:uri) => 0",
+ ASSERT_EQ("Get(BucketId(0x4000000000000593), id:ns:text/html::uri) => 0",
_sender.getLastCommand());
ASSERT_NO_FATAL_FAILURE(replyWithNotFound());
- ASSERT_EQ("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
+ ASSERT_EQ("GetReply(BucketId(0x0000000000000000), id:ns:text/html::uri, "
"timestamp 0) ReturnCode(NONE)",
_sender.getLastReply());
@@ -350,17 +350,17 @@ TEST_F(GetOperationTest, resend_on_storage_failure) {
sendGet();
- ASSERT_EQ("Get(BucketId(0x4000000000002a52), doc:test:uri) => 1",
+ ASSERT_EQ("Get(BucketId(0x4000000000000593), id:ns:text/html::uri) => 1",
_sender.getLastCommand());
ASSERT_NO_FATAL_FAILURE(replyWithFailure());
- ASSERT_EQ("Get(BucketId(0x4000000000002a52), doc:test:uri) => 2",
+ ASSERT_EQ("Get(BucketId(0x4000000000000593), id:ns:text/html::uri) => 2",
_sender.getLastCommand());
ASSERT_NO_FATAL_FAILURE(replyWithDocument());
- ASSERT_EQ("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
+ ASSERT_EQ("GetReply(BucketId(0x0000000000000000), id:ns:text/html::uri, "
"timestamp 100) ReturnCode(NONE)",
_sender.getLastReply());
}
@@ -374,17 +374,17 @@ TEST_F(GetOperationTest, resend_on_storage_failure_all_fail) {
sendGet();
- ASSERT_EQ("Get(BucketId(0x4000000000002a52), doc:test:uri) => 1",
+ ASSERT_EQ("Get(BucketId(0x4000000000000593), id:ns:text/html::uri) => 1",
_sender.getLastCommand());
ASSERT_NO_FATAL_FAILURE(replyWithFailure());
- ASSERT_EQ("Get(BucketId(0x4000000000002a52), doc:test:uri) => 2",
+ ASSERT_EQ("Get(BucketId(0x4000000000000593), id:ns:text/html::uri) => 2",
_sender.getLastCommand());
ASSERT_NO_FATAL_FAILURE(replyWithFailure());
- ASSERT_EQ("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
+ ASSERT_EQ("GetReply(BucketId(0x0000000000000000), id:ns:text/html::uri, "
"timestamp 0) ReturnCode(IO_FAILURE)",
_sender.getLastReply());
}
@@ -397,12 +397,12 @@ TEST_F(GetOperationTest, send_to_ideal_copy_if_bucket_in_sync) {
sendGet();
// Should always send to node 1 (follow bucket db order)
- ASSERT_EQ("Get(BucketId(0x4000000000002a52), doc:test:uri) => 1",
+ ASSERT_EQ("Get(BucketId(0x4000000000000593), id:ns:text/html::uri) => 1",
_sender.getLastCommand());
ASSERT_NO_FATAL_FAILURE(replyWithDocument());
- ASSERT_EQ("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
+ ASSERT_EQ("GetReply(BucketId(0x0000000000000000), id:ns:text/html::uri, "
"timestamp 100) ReturnCode(NONE)",
_sender.getLastReply());
}
@@ -412,7 +412,7 @@ TEST_F(GetOperationTest, multiple_copies_with_failure_on_local_node) {
// Node 0 is local copy to distributor 0 and will be preferred when
// sending initially.
- addNodesToBucketDB(document::BucketId(16, 0x2a52), "2=100,0=100");
+ addNodesToBucketDB(document::BucketId(16, 0x0593), "2=100,0=100");
sendGet();
@@ -427,7 +427,7 @@ TEST_F(GetOperationTest, multiple_copies_with_failure_on_local_node) {
ASSERT_NO_FATAL_FAILURE(sendReply(1, api::ReturnCode::OK, "newestauthor", 3));
- ASSERT_EQ("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
+ ASSERT_EQ("GetReply(BucketId(0x0000000000000000), id:ns:text/html::uri, "
"timestamp 3) ReturnCode(NONE)",
_sender.getLastReply());
diff --git a/storage/src/tests/distributor/pendingmessagetrackertest.cpp b/storage/src/tests/distributor/pendingmessagetrackertest.cpp
index a4a883d7059..e1bca1a1890 100644
--- a/storage/src/tests/distributor/pendingmessagetrackertest.cpp
+++ b/storage/src/tests/distributor/pendingmessagetrackertest.cpp
@@ -173,7 +173,7 @@ TEST_F(PendingMessageTrackerTest, simple) {
std::ostringstream ost;
tracker.reportStatus(ost, framework::HttpUrlPath("/pendingmessages?order=bucket"));
- EXPECT_THAT(ost.str(), Not(HasSubstr("doc:")));
+ EXPECT_THAT(ost.str(), Not(HasSubstr("id:")));
}
}
diff --git a/storage/src/tests/distributor/putoperationtest.cpp b/storage/src/tests/distributor/putoperationtest.cpp
index 99c6ec3d71e..d882d17841e 100644
--- a/storage/src/tests/distributor/putoperationtest.cpp
+++ b/storage/src/tests/distributor/putoperationtest.cpp
@@ -85,7 +85,7 @@ public:
}
Document::SP createDummyDocument(const char* ns, const char* id) const {
- return std::make_shared<Document>(doc_type(), DocumentId(DocIdString(ns, id)));
+ return std::make_shared<Document>(doc_type(), DocumentId(vespalib::make_string("id:%s:testdoctype1::%s", ns, id)));
}
std::shared_ptr<api::PutCommand> createPut(Document::SP doc) const {
@@ -97,7 +97,7 @@ PutOperationTest::~PutOperationTest() = default;
document::BucketId
PutOperationTest::createAndSendSampleDocument(uint32_t timeout) {
- auto doc = std::make_shared<Document>(doc_type(), DocumentId(DocIdString("test", "test")));
+ auto doc = std::make_shared<Document>(doc_type(), DocumentId("id:test:testdoctype1::"));
document::BucketId id = getExternalOperationHandler().getBucketId(doc->getId());
addIdealNodes(id);
@@ -123,13 +123,13 @@ TEST_F(PutOperationTest, simple) {
setupDistributor(1, 1, "storage:1 distributor:1");
createAndSendSampleDocument(180);
- ASSERT_EQ("Put(BucketId(0x4000000000008b13), "
- "doc:test:test, timestamp 100, size 36) => 0",
+ ASSERT_EQ("Put(BucketId(0x4000000000001dd4), "
+ "id:test:testdoctype1::, timestamp 100, size 45) => 0",
_sender.getCommands(true, true));
sendReply();
- ASSERT_EQ("PutReply(doc:test:test, BucketId(0x0000000000000000), "
+ ASSERT_EQ("PutReply(id:test:testdoctype1::, BucketId(0x0000000000000000), "
"timestamp 100) ReturnCode(NONE)",
_sender.getLastReply());
}
@@ -141,7 +141,7 @@ TEST_F(PutOperationTest, bucket_database_gets_special_entry_when_CreateBucket_se
sendPut(createPut(doc));
// Database updated before CreateBucket is sent
- ASSERT_EQ("BucketId(0x4000000000008b13) : "
+ ASSERT_EQ("BucketId(0x4000000000008f09) : "
"node(idx=0,crc=0x1,docs=0/0,bytes=0/0,trusted=true,active=true,ready=false)",
dumpBucket(getExternalOperationHandler().getBucketId(doc->getId())));
@@ -153,16 +153,16 @@ TEST_F(PutOperationTest, send_inline_split_before_put_if_bucket_too_large) {
getConfig().setSplitCount(1024);
getConfig().setSplitSize(1000000);
- addNodesToBucketDB(document::BucketId(0x4000000000002a52), "0=10000/10000/10000/t");
+ addNodesToBucketDB(document::BucketId(0x4000000000000593), "0=10000/10000/10000/t");
sendPut(createPut(createDummyDocument("test", "uri")));
- ASSERT_EQ("SplitBucketCommand(BucketId(0x4000000000002a52)Max doc count: "
+ ASSERT_EQ("SplitBucketCommand(BucketId(0x4000000000000593)Max doc count: "
"1024, Max total doc size: 1000000) Reasons to start: "
"[Splitting bucket because its maximum size (10000 b, 10000 docs, 10000 meta, 10000 b total) is "
"higher than the configured limit of (1000000, 1024)] => 0,"
- "Put(BucketId(0x4000000000002a52), doc:test:uri, timestamp 100, "
- "size 35) => 0",
+ "Put(BucketId(0x4000000000000593), id:test:testdoctype1::uri, timestamp 100, "
+ "size 48) => 0",
_sender.getCommands(true, true));
}
@@ -171,12 +171,12 @@ TEST_F(PutOperationTest, do_not_send_inline_split_if_not_configured) {
getConfig().setSplitCount(1024);
getConfig().setDoInlineSplit(false);
- addNodesToBucketDB(document::BucketId(0x4000000000002a52), "0=10000/10000/10000/t");
+ addNodesToBucketDB(document::BucketId(0x4000000000000593), "0=10000/10000/10000/t");
sendPut(createPut(createDummyDocument("test", "uri")));
- ASSERT_EQ("Put(BucketId(0x4000000000002a52), doc:test:uri, timestamp 100, "
- "size 35) => 0",
+ ASSERT_EQ("Put(BucketId(0x4000000000000593), id:test:testdoctype1::uri, timestamp 100, "
+ "size 48) => 0",
_sender.getCommands(true, true));
}
@@ -184,22 +184,22 @@ TEST_F(PutOperationTest, node_removed_on_reply) {
setupDistributor(2, 2, "storage:2 distributor:1");
createAndSendSampleDocument(180);
- ASSERT_EQ("Put(BucketId(0x4000000000008b13), "
- "doc:test:test, timestamp 100, size 36) => 1,"
- "Put(BucketId(0x4000000000008b13), "
- "doc:test:test, timestamp 100, size 36) => 0",
+ ASSERT_EQ("Put(BucketId(0x4000000000001dd4), "
+ "id:test:testdoctype1::, timestamp 100, size 45) => 0,"
+ "Put(BucketId(0x4000000000001dd4), "
+ "id:test:testdoctype1::, timestamp 100, size 45) => 1",
_sender.getCommands(true, true));
- getExternalOperationHandler().removeNodeFromDB(makeDocumentBucket(document::BucketId(16, 0x8b13)), 0);
+ getExternalOperationHandler().removeNodeFromDB(makeDocumentBucket(document::BucketId(16, 0x1dd4)), 0);
sendReply(0);
sendReply(1);
- ASSERT_EQ("PutReply(doc:test:test, BucketId(0x0000000000000000), "
+ ASSERT_EQ("PutReply(id:test:testdoctype1::, BucketId(0x0000000000000000), "
"timestamp 100) ReturnCode(BUCKET_DELETED, "
- "Bucket(BucketSpace(0x0000000000000001), BucketId(0x4000000000008b13)) was deleted from nodes [0] "
+ "Bucket(BucketSpace(0x0000000000000001), BucketId(0x4000000000001dd4)) was deleted from nodes [0] "
"after message was sent but before it was done. "
- "Sent to [1,0])",
+ "Sent to [0,1])",
_sender.getLastReply());
}
@@ -210,7 +210,7 @@ TEST_F(PutOperationTest, storage_failed) {
sendReply(-1, api::ReturnCode::INTERNAL_FAILURE);
- ASSERT_EQ("PutReply(doc:test:test, BucketId(0x0000000000000000), "
+ ASSERT_EQ("PutReply(id:test:testdoctype1::, BucketId(0x0000000000000000), "
"timestamp 100) ReturnCode(INTERNAL_FAILURE)",
_sender.getLastReply(true));
}
@@ -221,22 +221,22 @@ TEST_F(PutOperationTest, multiple_copies) {
Document::SP doc(createDummyDocument("test", "test"));
sendPut(createPut(doc));
- ASSERT_EQ("Create bucket => 3,Create bucket => 1,"
- "Create bucket => 0,Put => 3,Put => 1,Put => 0",
+ ASSERT_EQ("Create bucket => 3,Create bucket => 2,"
+ "Create bucket => 1,Put => 3,Put => 2,Put => 1",
_sender.getCommands(true));
for (uint32_t i = 0; i < 6; i++) {
sendReply(i);
}
- ASSERT_EQ("PutReply(doc:test:test, BucketId(0x0000000000000000), "
+ ASSERT_EQ("PutReply(id:test:testdoctype1::test, BucketId(0x0000000000000000), "
"timestamp 100) ReturnCode(NONE)",
_sender.getLastReply(true));
- ASSERT_EQ("BucketId(0x4000000000008b13) : "
+ ASSERT_EQ("BucketId(0x4000000000008f09) : "
"node(idx=3,crc=0x1,docs=2/4,bytes=3/5,trusted=true,active=false,ready=false), "
- "node(idx=1,crc=0x1,docs=2/4,bytes=3/5,trusted=true,active=false,ready=false), "
- "node(idx=0,crc=0x1,docs=2/4,bytes=3/5,trusted=true,active=false,ready=false)",
+ "node(idx=2,crc=0x1,docs=2/4,bytes=3/5,trusted=true,active=false,ready=false), "
+ "node(idx=1,crc=0x1,docs=2/4,bytes=3/5,trusted=true,active=false,ready=false)",
dumpBucket(getExternalOperationHandler().getBucketId(doc->getId())));
}
@@ -245,8 +245,8 @@ TEST_F(PutOperationTest, multiple_copies_early_return_primary_required) {
sendPut(createPut(createDummyDocument("test", "test")));
- ASSERT_EQ("Create bucket => 3,Create bucket => 1,"
- "Create bucket => 0,Put => 3,Put => 1,Put => 0",
+ ASSERT_EQ("Create bucket => 3,Create bucket => 2,"
+ "Create bucket => 1,Put => 3,Put => 2,Put => 1",
_sender.getCommands(true));
// Reply to 2 CreateBucket, including primary
@@ -258,7 +258,7 @@ TEST_F(PutOperationTest, multiple_copies_early_return_primary_required) {
sendReply(3 + i);
}
- ASSERT_EQ("PutReply(doc:test:test, BucketId(0x0000000000000000), "
+ ASSERT_EQ("PutReply(id:test:testdoctype1::test, BucketId(0x0000000000000000), "
"timestamp 100) ReturnCode(NONE)",
_sender.getLastReply());
}
@@ -268,8 +268,8 @@ TEST_F(PutOperationTest, multiple_copies_early_return_primary_not_required) {
sendPut(createPut(createDummyDocument("test", "test")));
- ASSERT_EQ("Create bucket => 3,Create bucket => 1,"
- "Create bucket => 0,Put => 3,Put => 1,Put => 0",
+ ASSERT_EQ("Create bucket => 3,Create bucket => 2,"
+ "Create bucket => 1,Put => 3,Put => 2,Put => 1",
_sender.getCommands(true));
// Reply only to 2 nodes (but not the primary)
@@ -280,7 +280,7 @@ TEST_F(PutOperationTest, multiple_copies_early_return_primary_not_required) {
sendReply(3 + i); // Put
}
- ASSERT_EQ("PutReply(doc:test:test, BucketId(0x0000000000000000), "
+ ASSERT_EQ("PutReply(id:test:testdoctype1::test, BucketId(0x0000000000000000), "
"timestamp 100) ReturnCode(NONE)",
_sender.getLastReply());
}
@@ -290,8 +290,8 @@ TEST_F(PutOperationTest, multiple_copies_early_return_primary_required_not_done)
sendPut(createPut(createDummyDocument("test", "test")));
- ASSERT_EQ("Create bucket => 3,Create bucket => 1,"
- "Create bucket => 0,Put => 3,Put => 1,Put => 0",
+ ASSERT_EQ("Create bucket => 3,Create bucket => 2,"
+ "Create bucket => 1,Put => 3,Put => 2,Put => 1",
_sender.getCommands(true));
// Reply only to 2 nodes (but not the primary)
@@ -309,8 +309,8 @@ TEST_F(PutOperationTest, do_not_revert_on_failure_after_early_return) {
sendPut(createPut(createDummyDocument("test", "test")));
- ASSERT_EQ("Create bucket => 3,Create bucket => 1,"
- "Create bucket => 0,Put => 3,Put => 1,Put => 0",
+ ASSERT_EQ("Create bucket => 3,Create bucket => 2,"
+ "Create bucket => 1,Put => 3,Put => 2,Put => 1",
_sender.getCommands(true));
for (uint32_t i = 0; i < 3; i++) {
@@ -320,14 +320,14 @@ TEST_F(PutOperationTest, do_not_revert_on_failure_after_early_return) {
sendReply(3 + i); // Put
}
- ASSERT_EQ("PutReply(doc:test:test, BucketId(0x0000000000000000), "
+ ASSERT_EQ("PutReply(id:test:testdoctype1::test, BucketId(0x0000000000000000), "
"timestamp 100) ReturnCode(NONE)",
_sender.getLastReply());
sendReply(5, api::ReturnCode::INTERNAL_FAILURE);
// Should not be any revert commands sent
- ASSERT_EQ("Create bucket => 3,Create bucket => 1,"
- "Create bucket => 0,Put => 3,Put => 1,Put => 0",
+ ASSERT_EQ("Create bucket => 3,Create bucket => 2,"
+ "Create bucket => 1,Put => 3,Put => 2,Put => 1",
_sender.getCommands(true));
}
@@ -336,7 +336,7 @@ TEST_F(PutOperationTest, revert_successful_copies_when_one_fails) {
createAndSendSampleDocument(180);
- ASSERT_EQ("Put => 3,Put => 1,Put => 0", _sender.getCommands(true));
+ ASSERT_EQ("Put => 0,Put => 2,Put => 1", _sender.getCommands(true));
for (uint32_t i = 0; i < 2; i++) {
sendReply(i);
@@ -344,12 +344,12 @@ TEST_F(PutOperationTest, revert_successful_copies_when_one_fails) {
sendReply(2, api::ReturnCode::INTERNAL_FAILURE);
- ASSERT_EQ("PutReply(doc:test:test, "
+ ASSERT_EQ("PutReply(id:test:testdoctype1::, "
"BucketId(0x0000000000000000), timestamp 100) "
"ReturnCode(INTERNAL_FAILURE)",
_sender.getLastReply(true));
- ASSERT_EQ("Revert => 3,Revert => 1", _sender.getCommands(true, false, 3));
+ ASSERT_EQ("Revert => 0,Revert => 2", _sender.getCommands(true, false, 3));
}
TEST_F(PutOperationTest, no_revert_if_revert_disabled) {
@@ -361,7 +361,7 @@ TEST_F(PutOperationTest, no_revert_if_revert_disabled) {
createAndSendSampleDocument(180);
- ASSERT_EQ("Put => 3,Put => 1,Put => 0", _sender.getCommands(true));
+ ASSERT_EQ("Put => 0,Put => 2,Put => 1", _sender.getCommands(true));
for (uint32_t i = 0; i < 2; i++) {
sendReply(i);
@@ -369,7 +369,7 @@ TEST_F(PutOperationTest, no_revert_if_revert_disabled) {
sendReply(2, api::ReturnCode::INTERNAL_FAILURE);
- ASSERT_EQ("PutReply(doc:test:test, "
+ ASSERT_EQ("PutReply(id:test:testdoctype1::, "
"BucketId(0x0000000000000000), timestamp 100) "
"ReturnCode(INTERNAL_FAILURE)",
_sender.getLastReply(true));
@@ -405,7 +405,7 @@ TEST_F(PutOperationTest, do_not_send_CreateBucket_if_already_pending) {
TEST_F(PutOperationTest, no_storage_nodes) {
setupDistributor(2, 1, "storage:0 distributor:1");
createAndSendSampleDocument(180);
- ASSERT_EQ("PutReply(doc:test:test, BucketId(0x0000000000000000), "
+ ASSERT_EQ("PutReply(id:test:testdoctype1::, BucketId(0x0000000000000000), "
"timestamp 100) ReturnCode(NOT_CONNECTED, "
"Can't store document: No storage nodes available)",
_sender.getLastReply(true));
@@ -492,10 +492,10 @@ PutOperationTest::getNodes(const std::string& infoString) {
TEST_F(PutOperationTest, target_nodes) {
setupDistributor(2, 6, "storage:6 distributor:1");
- // Ideal state of bucket is 1,3.
- ASSERT_EQ("target( 1 3 ) create( 1 3 )", getNodes(""));
- ASSERT_EQ("target( 1 3 ) create( 3 )", getNodes("1-1-true"));
- ASSERT_EQ("target( 1 3 ) create( 3 )", getNodes("1-1-false"));
+ // Ideal state of bucket is 1,2.
+ ASSERT_EQ("target( 1 2 ) create( 1 2 )", getNodes(""));
+ ASSERT_EQ("target( 1 2 ) create( 2 )", getNodes("1-1-true"));
+ ASSERT_EQ("target( 1 2 ) create( 2 )", getNodes("1-1-false"));
ASSERT_EQ("target( 3 4 5 ) create( )", getNodes("3-1-true,4-1-true,5-1-true"));
ASSERT_EQ("target( 3 4 ) create( )", getNodes("3-2-true,4-2-true,5-1-false"));
ASSERT_EQ("target( 1 3 4 ) create( )", getNodes("3-2-true,4-2-true,1-1-false"));
@@ -513,7 +513,7 @@ TEST_F(PutOperationTest, replica_not_resurrected_in_db_when_node_down_in_active_
sendPut(createPut(doc));
- ASSERT_EQ("Put => 1,Put => 0,Put => 2", _sender.getCommands(true));
+ ASSERT_EQ("Put => 1,Put => 2,Put => 0", _sender.getCommands(true));
enableDistributorClusterState("distributor:1 storage:3 .1.s:d .2.s:m");
addNodesToBucketDB(bId, "0=1/2/3/t"); // This will actually remove node #1.
@@ -522,8 +522,8 @@ TEST_F(PutOperationTest, replica_not_resurrected_in_db_when_node_down_in_active_
sendReply(1, api::ReturnCode::OK, api::BucketInfo(5, 6, 7));
sendReply(2, api::ReturnCode::OK, api::BucketInfo(7, 8, 9));
- ASSERT_EQ("BucketId(0x4000000000002a52) : "
- "node(idx=0,crc=0x5,docs=6/6,bytes=7/7,trusted=true,active=false,ready=false)",
+ ASSERT_EQ("BucketId(0x4000000000000593) : "
+ "node(idx=0,crc=0x7,docs=8/8,bytes=9/9,trusted=true,active=false,ready=false)",
dumpBucket(getExternalOperationHandler().getBucketId(doc->getId())));
}
@@ -535,7 +535,7 @@ TEST_F(PutOperationTest, replica_not_resurrected_in_db_when_node_down_in_pending
addNodesToBucketDB(bucket, "0=1/2/3/t,1=1/2/3/t,2=1/2/3/t");
sendPut(createPut(doc));
- ASSERT_EQ("Put => 1,Put => 0,Put => 2", _sender.getCommands(true));
+ ASSERT_EQ("Put => 1,Put => 2,Put => 0", _sender.getCommands(true));
// Trigger a pending (but not completed) cluster state transition where content
// node 0 is down. This will prune its replica from the DB. We assume that the
// downed node managed to send off a reply to the Put before it went down, and
@@ -555,7 +555,7 @@ TEST_F(PutOperationTest, replica_not_resurrected_in_db_when_node_down_in_pending
sendReply(1, api::ReturnCode::OK, api::BucketInfo(6, 7, 8));
sendReply(2, api::ReturnCode::OK, api::BucketInfo(9, 8, 7));
- ASSERT_EQ("BucketId(0x4000000000002a52) : "
+ ASSERT_EQ("BucketId(0x4000000000000593) : "
"node(idx=1,crc=0x5,docs=6/6,bytes=7/7,trusted=true,active=false,ready=false)",
dumpBucket(bucket));
}
@@ -574,7 +574,7 @@ TEST_F(PutOperationTest, put_is_failed_with_busy_if_target_down_in_pending_state
sendPut(createPut(doc));
EXPECT_EQ("", _sender.getCommands(true));
- EXPECT_EQ("PutReply(doc:test:test, BucketId(0x0000000000000000), "
+ EXPECT_EQ("PutReply(id:test:testdoctype1::test, BucketId(0x0000000000000000), "
"timestamp 100) ReturnCode(BUSY, "
"One or more target content nodes are unavailable in the pending cluster state)",
_sender.getLastReply(true));
diff --git a/storage/src/tests/distributor/removeoperationtest.cpp b/storage/src/tests/distributor/removeoperationtest.cpp
index bae2395bfa7..c3fcda30bf5 100644
--- a/storage/src/tests/distributor/removeoperationtest.cpp
+++ b/storage/src/tests/distributor/removeoperationtest.cpp
@@ -22,7 +22,7 @@ struct RemoveOperationTest : Test, DistributorTestUtil {
void SetUp() override {
createLinks();
- docId = document::DocumentId(document::DocIdString("test", "uri"));
+ docId = document::DocumentId("id:test:test::uri");
bucketId = getExternalOperationHandler().getBucketId(docId);
enableDistributorClusterState("distributor:1 storage:4");
};
@@ -57,8 +57,7 @@ struct RemoveOperationTest : Test, DistributorTestUtil {
std::unique_ptr<api::StorageReply> reply(removec->makeReply());
auto* removeR = static_cast<api::RemoveReply*>(reply.get());
removeR->setOldTimestamp(oldTimestamp);
- callback.onReceive(_sender,
- std::shared_ptr<api::StorageReply>(reply.release()));
+ callback.onReceive(_sender, std::shared_ptr<api::StorageReply>(reply.release()));
}
void sendRemove() {
@@ -71,13 +70,13 @@ TEST_F(RemoveOperationTest, simple) {
sendRemove();
- ASSERT_EQ("Remove(BucketId(0x4000000000002a52), doc:test:uri, "
+ ASSERT_EQ("Remove(BucketId(0x4000000000000593), id:test:test::uri, "
"timestamp 100) => 1",
_sender.getLastCommand());
replyToMessage(*op, -1, 34);
- ASSERT_EQ("RemoveReply(BucketId(0x0000000000000000), doc:test:uri, "
+ ASSERT_EQ("RemoveReply(BucketId(0x0000000000000000), id:test:test::uri, "
"timestamp 100, removed doc from 34) ReturnCode(NONE)",
_sender.getLastReply());
}
@@ -87,13 +86,13 @@ TEST_F(RemoveOperationTest, not_found) {
sendRemove();
- ASSERT_EQ("Remove(BucketId(0x4000000000002a52), doc:test:uri, "
+ ASSERT_EQ("Remove(BucketId(0x4000000000000593), id:test:test::uri, "
"timestamp 100) => 1",
_sender.getLastCommand());
replyToMessage(*op, -1, 0);
- ASSERT_EQ("RemoveReply(BucketId(0x0000000000000000), doc:test:uri, "
+ ASSERT_EQ("RemoveReply(BucketId(0x0000000000000000), id:test:test::uri, "
"timestamp 100, not found) ReturnCode(NONE)",
_sender.getLastReply());
}
@@ -103,13 +102,13 @@ TEST_F(RemoveOperationTest, storage_failure) {
sendRemove();
- ASSERT_EQ("Remove(BucketId(0x4000000000002a52), doc:test:uri, "
+ ASSERT_EQ("Remove(BucketId(0x4000000000000593), id:test:test::uri, "
"timestamp 100) => 1",
_sender.getLastCommand());
sendReply(*op, -1, api::ReturnCode::INTERNAL_FAILURE);
- ASSERT_EQ("RemoveReply(BucketId(0x0000000000000000), doc:test:uri, "
+ ASSERT_EQ("RemoveReply(BucketId(0x0000000000000000), id:test:test::uri, "
"timestamp 100, not found) ReturnCode(INTERNAL_FAILURE)",
_sender.getLastReply());
}
@@ -118,7 +117,7 @@ TEST_F(RemoveOperationTest, not_in_db) {
sendRemove();
ASSERT_EQ("RemoveReply(BucketId(0x0000000000000000), "
- "doc:test:uri, timestamp 100, not found) ReturnCode(NONE)",
+ "id:test:test::uri, timestamp 100, not found) ReturnCode(NONE)",
_sender.getLastReply());
}
@@ -127,11 +126,11 @@ TEST_F(RemoveOperationTest, multiple_copies) {
sendRemove();
- ASSERT_EQ("Remove(BucketId(0x4000000000002a52), doc:test:uri, "
+ ASSERT_EQ("Remove(BucketId(0x4000000000000593), id:test:test::uri, "
"timestamp 100) => 1,"
- "Remove(BucketId(0x4000000000002a52), doc:test:uri, "
+ "Remove(BucketId(0x4000000000000593), id:test:test::uri, "
"timestamp 100) => 2,"
- "Remove(BucketId(0x4000000000002a52), doc:test:uri, "
+ "Remove(BucketId(0x4000000000000593), id:test:test::uri, "
"timestamp 100) => 3",
_sender.getCommands(true, true));
@@ -140,7 +139,7 @@ TEST_F(RemoveOperationTest, multiple_copies) {
replyToMessage(*op, 2, 75);
ASSERT_EQ("RemoveReply(BucketId(0x0000000000000000), "
- "doc:test:uri, timestamp 100, removed doc from 75) ReturnCode(NONE)",
+ "id:test:test::uri, timestamp 100, removed doc from 75) ReturnCode(NONE)",
_sender.getLastReply());
}
@@ -149,7 +148,7 @@ TEST_F(RemoveOperationTest, can_send_remove_when_all_replica_nodes_retired) {
addNodesToBucketDB(bucketId, "0=123");
sendRemove();
- ASSERT_EQ("Remove(BucketId(0x4000000000002a52), doc:test:uri, "
+ ASSERT_EQ("Remove(BucketId(0x4000000000000593), id:test:test::uri, "
"timestamp 100) => 0",
_sender.getLastCommand());
}
diff --git a/storage/src/tests/distributor/twophaseupdateoperationtest.cpp b/storage/src/tests/distributor/twophaseupdateoperationtest.cpp
index edb5261fbfa..df9bf683326 100644
--- a/storage/src/tests/distributor/twophaseupdateoperationtest.cpp
+++ b/storage/src/tests/distributor/twophaseupdateoperationtest.cpp
@@ -197,8 +197,7 @@ TwoPhaseUpdateOperationTest::replyToGet(
std::shared_ptr<api::StorageReply> reply;
if (haveDocument) {
- auto doc(std::make_shared<Document>(
- *_doc_type, DocumentId(DocIdString("test", "test"))));
+ auto doc(std::make_shared<Document>(*_doc_type, DocumentId("id:ns:" + _doc_type->getName() + "::1")));
doc->setValue("headerval", IntFieldValue(oldTimestamp));
reply = std::make_shared<api::GetReply>(get, doc, oldTimestamp);
@@ -229,7 +228,7 @@ TwoPhaseUpdateOperationTest::sendUpdate(const std::string& bucketState,
if (!options._withError) {
update = std::make_shared<document::DocumentUpdate>(
*_repo, *_doc_type,
- document::DocumentId(document::DocIdString("test", "test")));
+ document::DocumentId("id:ns:" + _doc_type->getName() + "::1"));
document::FieldUpdate fup(_doc_type->getField("headerval"));
fup.addUpdate(ArithmeticValueUpdate(ArithmeticValueUpdate::Add, 10));
update->addUpdate(fup);
@@ -239,7 +238,7 @@ TwoPhaseUpdateOperationTest::sendUpdate(const std::string& bucketState,
auto* badDocType = _repo->getDocumentType("testdoctype2");
update = std::make_shared<document::DocumentUpdate>(
*_repo, *badDocType,
- document::DocumentId(document::DocIdString("test", "test")));
+ document::DocumentId("id:ns:" + _doc_type->getName() + "::1"));
document::FieldUpdate fup(badDocType->getField("onlyinchild"));
fup.addUpdate(ArithmeticValueUpdate(ArithmeticValueUpdate::Add, 10));
update->addUpdate(fup);
@@ -285,7 +284,7 @@ TEST_F(TwoPhaseUpdateOperationTest, simple) {
replyToMessage(*cb, sender, 0, 90);
- EXPECT_EQ("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
+ EXPECT_EQ("UpdateReply(id:ns:testdoctype1::1, BucketId(0x0000000000000000), "
"timestamp 0, timestamp of updated doc: 90) ReturnCode(NONE)",
sender.getLastReply(true));
}
@@ -297,7 +296,7 @@ TEST_F(TwoPhaseUpdateOperationTest, non_existing) {
DistributorMessageSenderStub sender;
cb->start(sender, framework::MilliSecTime(0));
- EXPECT_EQ("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
+ EXPECT_EQ("UpdateReply(id:ns:testdoctype1::1, BucketId(0x0000000000000000), "
"timestamp 0, timestamp of updated doc: 0) ReturnCode(NONE)",
sender.getLastReply(true));
}
@@ -313,7 +312,7 @@ TEST_F(TwoPhaseUpdateOperationTest, update_failed) {
replyToMessage(*cb, sender, 0, 90, api::ReturnCode::INTERNAL_FAILURE);
- EXPECT_EQ("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
+ EXPECT_EQ("UpdateReply(id:ns:testdoctype1::1, BucketId(0x0000000000000000), "
"timestamp 0, timestamp of updated doc: 0) "
"ReturnCode(INTERNAL_FAILURE)",
sender.getLastReply(true));
@@ -331,20 +330,18 @@ TEST_F(TwoPhaseUpdateOperationTest, fast_path_inconsistent_timestamps) {
replyToMessage(*cb, sender, 0, 90);
replyToMessage(*cb, sender, 1, 110);
- ASSERT_EQ("Get(BucketId(0x4000000000008b13), doc:test:test) => 1",
- sender.getLastCommand(true));
+ ASSERT_EQ("Get(BucketId(0x400000000000cac4), id:ns:testdoctype1::1) => 1", sender.getLastCommand(true));
replyToGet(*cb, sender, 2, 110);
- ASSERT_EQ("Update => 0,Update => 1,Get => 1,Put => 1,Put => 0",
- sender.getCommands(true));
+ ASSERT_EQ("Update => 0,Update => 1,Get => 1,Put => 1,Put => 0", sender.getCommands(true));
ASSERT_TRUE(sender.replies().empty());
replyToPut(*cb, sender, 3);
replyToPut(*cb, sender, 4);
- EXPECT_EQ("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
+ EXPECT_EQ("UpdateReply(id:ns:testdoctype1::1, BucketId(0x0000000000000000), "
"timestamp 0, timestamp of updated doc: 110 Was inconsistent "
"(best node 1)) ReturnCode(NONE)",
sender.getLastReply(true));
@@ -362,13 +359,12 @@ TEST_F(TwoPhaseUpdateOperationTest, fast_path_inconsistent_timestamps_not_found)
replyToMessage(*cb, sender, 0, 90);
replyToMessage(*cb, sender, 1, 110);
- ASSERT_EQ("Get(BucketId(0x4000000000008b13), doc:test:test) => 1",
- sender.getLastCommand(true));
+ ASSERT_EQ("Get(BucketId(0x400000000000cac4), id:ns:testdoctype1::1) => 1", sender.getLastCommand(true));
ASSERT_TRUE(sender.replies().empty());
replyToGet(*cb, sender, 2, 110, false);
- EXPECT_EQ("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
+ EXPECT_EQ("UpdateReply(id:ns:testdoctype1::1, BucketId(0x0000000000000000), "
"timestamp 0, timestamp of updated doc: 110 Was inconsistent "
"(best node 1)) ReturnCode(INTERNAL_FAILURE)",
sender.getLastReply(true));
@@ -387,7 +383,7 @@ TEST_F(TwoPhaseUpdateOperationTest, fast_path_inconsistent_timestamps_update_err
ASSERT_TRUE(sender.replies().empty());
replyToMessage(*cb, sender, 1, 110, api::ReturnCode::IO_FAILURE);
- EXPECT_EQ("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
+ EXPECT_EQ("UpdateReply(id:ns:testdoctype1::1, BucketId(0x0000000000000000), "
"timestamp 0, timestamp of updated doc: 90) "
"ReturnCode(IO_FAILURE)",
sender.getLastReply(true));
@@ -405,13 +401,13 @@ TEST_F(TwoPhaseUpdateOperationTest, fast_path_inconsistent_timestamps_get_error)
replyToMessage(*cb, sender, 0, 90);
replyToMessage(*cb, sender, 1, 110);
- ASSERT_EQ("Get(BucketId(0x4000000000008b13), doc:test:test) => 1",
+ ASSERT_EQ("Get(BucketId(0x400000000000cac4), id:ns:testdoctype1::1) => 1",
sender.getLastCommand(true));
ASSERT_TRUE(sender.replies().empty());
replyToGet(*cb, sender, 2, 110, false, api::ReturnCode::IO_FAILURE);
- EXPECT_EQ("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
+ EXPECT_EQ("UpdateReply(id:ns:testdoctype1::1, BucketId(0x0000000000000000), "
"timestamp 0, timestamp of updated doc: 110 Was inconsistent "
"(best node 1)) ReturnCode(IO_FAILURE)",
sender.getLastReply(true));
@@ -429,7 +425,7 @@ TEST_F(TwoPhaseUpdateOperationTest, fast_path_inconsistent_timestamps_put_error)
replyToMessage(*cb, sender, 0, 90);
replyToMessage(*cb, sender, 1, 110);
- ASSERT_EQ("Get(BucketId(0x4000000000008b13), doc:test:test) => 1",
+ ASSERT_EQ("Get(BucketId(0x400000000000cac4), id:ns:testdoctype1::1) => 1",
sender.getLastCommand(true));
replyToGet(*cb, sender, 2, 110);
@@ -441,7 +437,7 @@ TEST_F(TwoPhaseUpdateOperationTest, fast_path_inconsistent_timestamps_put_error)
ASSERT_TRUE(sender.replies().empty());
replyToPut(*cb, sender, 4);
- EXPECT_EQ("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
+ EXPECT_EQ("UpdateReply(id:ns:testdoctype1::1, BucketId(0x0000000000000000), "
"timestamp 0, timestamp of updated doc: 110 Was inconsistent "
"(best node 1)) ReturnCode(IO_FAILURE)",
sender.getLastReply(true));
@@ -459,7 +455,7 @@ TEST_F(TwoPhaseUpdateOperationTest, fast_path_inconsistent_timestamps_put_not_st
replyToMessage(*cb, sender, 0, 90);
replyToMessage(*cb, sender, 1, 110);
- ASSERT_EQ("Get(BucketId(0x4000000000008b13), doc:test:test) => 1",
+ ASSERT_EQ("Get(BucketId(0x400000000000cac4), id:ns:testdoctype1::1) => 1",
sender.getLastCommand(true));
checkMessageSettingsPropagatedTo(sender.commands().back());
@@ -467,7 +463,7 @@ TEST_F(TwoPhaseUpdateOperationTest, fast_path_inconsistent_timestamps_put_not_st
ASSERT_TRUE(sender.replies().empty());
replyToGet(*cb, sender, 2, 110);
- EXPECT_EQ("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
+ EXPECT_EQ("UpdateReply(id:ns:testdoctype1::1, BucketId(0x0000000000000000), "
"timestamp 0, timestamp of updated doc: 110 Was inconsistent "
"(best node 1)) ReturnCode(NOT_CONNECTED, "
"Can't store document: No storage nodes available)",
@@ -483,8 +479,8 @@ TEST_F(TwoPhaseUpdateOperationTest, fast_path_inconsistent_timestamps_inconsiste
DistributorMessageSenderStub sender;
cb->start(sender, framework::MilliSecTime(0));
- std::string wanted("Get(BucketId(0x4000000000008b13), doc:test:test) => 0,"
- "Get(BucketId(0x4400000000008b13), doc:test:test) => 0");
+ std::string wanted("Get(BucketId(0x400000000000cac4), id:ns:testdoctype1::1) => 0,"
+ "Get(BucketId(0x440000000000cac4), id:ns:testdoctype1::1) => 0");
std::string text = sender.getCommands(true, true);
ASSERT_EQ(wanted, text);
@@ -492,17 +488,17 @@ TEST_F(TwoPhaseUpdateOperationTest, fast_path_inconsistent_timestamps_inconsiste
replyToGet(*cb, sender, 0, 90);
replyToGet(*cb, sender, 1, 120);
- ASSERT_EQ("Put(BucketId(0x4400000000008b13), doc:test:test, "
- "timestamp 200000000, size 52) => 1,"
- "Put(BucketId(0x4400000000008b13), doc:test:test, "
- "timestamp 200000000, size 52) => 0",
+ ASSERT_EQ("Put(BucketId(0x440000000000cac4), id:ns:testdoctype1::1, "
+ "timestamp 200000000, size 60) => 1,"
+ "Put(BucketId(0x440000000000cac4), id:ns:testdoctype1::1, "
+ "timestamp 200000000, size 60) => 0",
sender.getCommands(true, true, 2));
replyToPut(*cb, sender, 2);
ASSERT_TRUE(sender.replies().empty());
replyToPut(*cb, sender, 3);
- EXPECT_EQ("UpdateReply(doc:test:test, "
+ EXPECT_EQ("UpdateReply(id:ns:testdoctype1::1, "
"BucketId(0x0000000000000000), "
"timestamp 0, timestamp of updated doc: 120) "
"ReturnCode(NONE)",
@@ -543,7 +539,7 @@ TEST_F(TwoPhaseUpdateOperationTest, n_of_m) {
ASSERT_TRUE(sender.replies().empty());
replyToMessage(*cb, sender, 0, 90);
- EXPECT_EQ("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
+ EXPECT_EQ("UpdateReply(id:ns:testdoctype1::1, BucketId(0x0000000000000000), "
"timestamp 0, timestamp of updated doc: 90) ReturnCode(NONE)",
sender.getLastReply(true));
@@ -569,18 +565,15 @@ TEST_F(TwoPhaseUpdateOperationTest, safe_path_updates_newest_received_document)
DistributorMessageSenderStub sender;
cb->start(sender, framework::MilliSecTime(0));
- ASSERT_EQ("Get(BucketId(0x4000000000008b13), doc:test:test) => 0,"
- "Get(BucketId(0x4000000000008b13), doc:test:test) => 2",
+ ASSERT_EQ("Get(BucketId(0x400000000000cac4), id:ns:testdoctype1::1) => 0,"
+ "Get(BucketId(0x400000000000cac4), id:ns:testdoctype1::1) => 2",
sender.getCommands(true, true));
replyToGet(*cb, sender, 0, 50);
replyToGet(*cb, sender, 1, 70);
- ASSERT_EQ("Put(BucketId(0x4000000000008b13), doc:test:test, "
- "timestamp 200000000, size 52) => 1,"
- "Put(BucketId(0x4000000000008b13), doc:test:test, "
- "timestamp 200000000, size 52) => 0,"
- "Put(BucketId(0x4000000000008b13), doc:test:test, "
- "timestamp 200000000, size 52) => 2",
+ ASSERT_EQ("Put(BucketId(0x400000000000cac4), id:ns:testdoctype1::1, timestamp 200000000, size 60) => 1,"
+ "Put(BucketId(0x400000000000cac4), id:ns:testdoctype1::1, timestamp 200000000, size 60) => 2,"
+ "Put(BucketId(0x400000000000cac4), id:ns:testdoctype1::1, timestamp 200000000, size 60) => 0",
sender.getCommands(true, true, 2));
// Make sure Put contains an updated document (+10 arith. update on field
// whose value equals gotten timestamp). In this case we want 70 -> 80.
@@ -591,7 +584,7 @@ TEST_F(TwoPhaseUpdateOperationTest, safe_path_updates_newest_received_document)
ASSERT_TRUE(sender.replies().empty());
replyToPut(*cb, sender, 4);
- EXPECT_EQ("UpdateReply(doc:test:test, "
+ EXPECT_EQ("UpdateReply(id:ns:testdoctype1::1, "
"BucketId(0x0000000000000000), "
"timestamp 0, timestamp of updated doc: 70) "
"ReturnCode(NONE)",
@@ -612,12 +605,9 @@ TEST_F(TwoPhaseUpdateOperationTest, create_if_non_existent_creates_document_if_a
replyToGet(*cb, sender, 1, 0, false);
// Since create-if-non-existent is set, distributor should create doc from
// scratch.
- ASSERT_EQ("Put(BucketId(0x4000000000008b13), doc:test:test, "
- "timestamp 200000000, size 52) => 1,"
- "Put(BucketId(0x4000000000008b13), doc:test:test, "
- "timestamp 200000000, size 52) => 0,"
- "Put(BucketId(0x4000000000008b13), doc:test:test, "
- "timestamp 200000000, size 52) => 2",
+ ASSERT_EQ("Put(BucketId(0x400000000000cac4), id:ns:testdoctype1::1, timestamp 200000000, size 60) => 1,"
+ "Put(BucketId(0x400000000000cac4), id:ns:testdoctype1::1, timestamp 200000000, size 60) => 2,"
+ "Put(BucketId(0x400000000000cac4), id:ns:testdoctype1::1, timestamp 200000000, size 60) => 0",
sender.getCommands(true, true, 2));
ASSERT_EQ("10", getUpdatedValueFromLastPut(sender));
@@ -627,7 +617,7 @@ TEST_F(TwoPhaseUpdateOperationTest, create_if_non_existent_creates_document_if_a
ASSERT_TRUE(sender.replies().empty());
replyToPut(*cb, sender, 4);
- EXPECT_EQ("UpdateReply(doc:test:test, "
+ EXPECT_EQ("UpdateReply(id:ns:testdoctype1::1, "
"BucketId(0x0000000000000000), "
"timestamp 0, timestamp of updated doc: 200000000) "
"ReturnCode(NONE)",
@@ -648,14 +638,14 @@ TEST_F(TwoPhaseUpdateOperationTest, update_fails_if_safe_path_has_failed_put) {
replyToGet(*cb, sender, 1, 0, false);
// Since create-if-non-existent is set, distributor should create doc from
// scratch.
- ASSERT_EQ("Put => 1,Put => 0,Put => 2", sender.getCommands(true, false, 2));
+ ASSERT_EQ("Put => 1,Put => 2,Put => 0", sender.getCommands(true, false, 2));
replyToPut(*cb, sender, 2);
replyToPut(*cb, sender, 3);
ASSERT_TRUE(sender.replies().empty());
replyToPut(*cb, sender, 4, api::ReturnCode::IO_FAILURE);
- EXPECT_EQ("UpdateReply(doc:test:test, "
+ EXPECT_EQ("UpdateReply(id:ns:testdoctype1::1, "
"BucketId(0x0000000000000000), "
"timestamp 0, timestamp of updated doc: 200000000) "
"ReturnCode(IO_FAILURE)",
@@ -675,7 +665,7 @@ TEST_F(TwoPhaseUpdateOperationTest, update_fails_if_safe_path_gets_fail) {
replyToGet(*cb, sender, 0, 0, false, api::ReturnCode::IO_FAILURE);
ASSERT_TRUE(sender.replies().empty());
replyToGet(*cb, sender, 1, 0, false, api::ReturnCode::IO_FAILURE);
- EXPECT_EQ("UpdateReply(doc:test:test, "
+ EXPECT_EQ("UpdateReply(id:ns:testdoctype1::1, "
"BucketId(0x0000000000000000), "
"timestamp 0, timestamp of updated doc: 0) "
"ReturnCode(IO_FAILURE)",
@@ -696,7 +686,7 @@ TEST_F(TwoPhaseUpdateOperationTest, update_fails_if_apply_throws_exception) {
ASSERT_TRUE(sender.replies().empty());
replyToGet(*cb, sender, 1, 70);
- EXPECT_EQ("UpdateReply(doc:test:test, "
+ EXPECT_EQ("UpdateReply(id:ns:testdoctype1::1, "
"BucketId(0x0000000000000000), "
"timestamp 0, timestamp of updated doc: 70) "
"ReturnCode(INTERNAL_FAILURE, Can not apply a "
@@ -713,10 +703,10 @@ TEST_F(TwoPhaseUpdateOperationTest, non_existing_with_auto_create) {
DistributorMessageSenderStub sender;
cb->start(sender, framework::MilliSecTime(0));
- ASSERT_EQ("CreateBucketCommand(BucketId(0x4000000000008b13), active) "
+ ASSERT_EQ("CreateBucketCommand(BucketId(0x400000000000cac4), active) "
"Reasons to start: => 0,"
- "Put(BucketId(0x4000000000008b13), doc:test:test, "
- "timestamp 200000000, size 52) => 0",
+ "Put(BucketId(0x400000000000cac4), id:ns:testdoctype1::1, "
+ "timestamp 200000000, size 60) => 0",
sender.getCommands(true, true));
ASSERT_EQ("10", getUpdatedValueFromLastPut(sender));
@@ -725,7 +715,7 @@ TEST_F(TwoPhaseUpdateOperationTest, non_existing_with_auto_create) {
ASSERT_TRUE(sender.replies().empty());
replyToPut(*cb, sender, 1);
- EXPECT_EQ("UpdateReply(doc:test:test, "
+ EXPECT_EQ("UpdateReply(id:ns:testdoctype1::1, "
"BucketId(0x0000000000000000), "
"timestamp 0, timestamp of updated doc: 200000000) "
"ReturnCode(NONE)",
@@ -745,7 +735,7 @@ TEST_F(TwoPhaseUpdateOperationTest, safe_path_fails_update_when_mismatching_time
replyToGet(*cb, sender, 0, 100);
ASSERT_TRUE(sender.replies().empty());
replyToGet(*cb, sender, 1, 110);
- EXPECT_EQ("UpdateReply(doc:test:test, "
+ EXPECT_EQ("UpdateReply(id:ns:testdoctype1::1, "
"BucketId(0x0000000000000000), "
"timestamp 0, timestamp of updated doc: 0) "
"ReturnCode(NONE, No document with requested "
@@ -755,8 +745,7 @@ TEST_F(TwoPhaseUpdateOperationTest, safe_path_fails_update_when_mismatching_time
TEST_F(TwoPhaseUpdateOperationTest, safe_path_update_propagates_message_settings_to_gets_and_puts) {
setupDistributor(3, 3, "storage:3 distributor:1");
- std::shared_ptr<TwoPhaseUpdateOperation> cb(
- sendUpdate("0=1/2/3,1=1/2/3,2=2/3/4"));
+ std::shared_ptr<TwoPhaseUpdateOperation> cb(sendUpdate("0=1/2/3,1=1/2/3,2=2/3/4"));
DistributorMessageSenderStub sender;
cb->start(sender, framework::MilliSecTime(0));
@@ -765,7 +754,7 @@ TEST_F(TwoPhaseUpdateOperationTest, safe_path_update_propagates_message_settings
checkMessageSettingsPropagatedTo(sender.command(1));
replyToGet(*cb, sender, 0, 50);
replyToGet(*cb, sender, 1, 70);
- ASSERT_EQ("Put => 1,Put => 0,Put => 2", sender.getCommands(true, false, 2));
+ ASSERT_EQ("Put => 1,Put => 2,Put => 0", sender.getCommands(true, false, 2));
checkMessageSettingsPropagatedTo(sender.command(2));
checkMessageSettingsPropagatedTo(sender.command(3));
checkMessageSettingsPropagatedTo(sender.command(4));
@@ -776,16 +765,14 @@ TEST_F(TwoPhaseUpdateOperationTest, safe_path_update_propagates_message_settings
TEST_F(TwoPhaseUpdateOperationTest, safe_path_propagates_mbus_traces_from_replies) {
setupDistributor(3, 3, "storage:3 distributor:1");
- std::shared_ptr<TwoPhaseUpdateOperation> cb(
- sendUpdate("0=1/2/3,1=1/2/3,2=2/3/4"));
+ std::shared_ptr<TwoPhaseUpdateOperation> cb(sendUpdate("0=1/2/3,1=1/2/3,2=2/3/4"));
DistributorMessageSenderStub sender;
cb->start(sender, framework::MilliSecTime(0));
ASSERT_EQ("Get => 0,Get => 2", sender.getCommands(true));
- replyToGet(*cb, sender, 0, 50, true,
- api::ReturnCode::OK, "hello earthlings");
+ replyToGet(*cb, sender, 0, 50, true, api::ReturnCode::OK, "hello earthlings");
replyToGet(*cb, sender, 1, 70);
- ASSERT_EQ("Put => 1,Put => 0,Put => 2", sender.getCommands(true, false, 2));
+ ASSERT_EQ("Put => 1,Put => 2,Put => 0", sender.getCommands(true, false, 2));
replyToPut(*cb, sender, 2, api::ReturnCode::OK, "fooo");
replyToPut(*cb, sender, 3, api::ReturnCode::OK, "baaa");
ASSERT_TRUE(sender.replies().empty());
@@ -803,8 +790,7 @@ TEST_F(TwoPhaseUpdateOperationTest, update_fails_if_ownership_changes_between_ge
setupDistributor(2, 2, "storage:2 distributor:1");
// Update towards inconsistent bucket invokes safe path.
- std::shared_ptr<TwoPhaseUpdateOperation> cb(
- sendUpdate("0=1/2/3,1=2/3/4"));
+ std::shared_ptr<TwoPhaseUpdateOperation> cb(sendUpdate("0=1/2/3,1=2/3/4"));
DistributorMessageSenderStub sender;
cb->start(sender, framework::MilliSecTime(0));
@@ -823,7 +809,7 @@ TEST_F(TwoPhaseUpdateOperationTest, update_fails_if_ownership_changes_between_ge
// BUCKET_NOT_FOUND is a transient error code which should cause the client
// to re-send the operation, presumably to the correct distributor the next
// time.
- EXPECT_EQ("UpdateReply(doc:test:test, "
+ EXPECT_EQ("UpdateReply(id:ns:testdoctype1::1, "
"BucketId(0x0000000000000000), "
"timestamp 0, timestamp of updated doc: 70) "
"ReturnCode(BUCKET_NOT_FOUND, Distributor lost "
@@ -843,7 +829,7 @@ TEST_F(TwoPhaseUpdateOperationTest, safe_path_condition_mismatch_fails_with_tas_
// Newest doc has headerval==110, not 120.
replyToGet(*cb, sender, 0, 100);
replyToGet(*cb, sender, 1, 110);
- EXPECT_EQ("UpdateReply(doc:test:test, "
+ EXPECT_EQ("UpdateReply(id:ns:testdoctype1::1, "
"BucketId(0x0000000000000000), "
"timestamp 0, timestamp of updated doc: 0) "
"ReturnCode(TEST_AND_SET_CONDITION_FAILED, "
@@ -877,7 +863,7 @@ TEST_F(TwoPhaseUpdateOperationTest, safe_path_condition_parse_failure_fails_with
// NOTE: condition is currently not attempted parsed until Gets have been
// replied to. This may change in the future.
// XXX reliance on parser/exception error message is very fragile.
- EXPECT_EQ("UpdateReply(doc:test:test, "
+ EXPECT_EQ("UpdateReply(id:ns:testdoctype1::1, "
"BucketId(0x0000000000000000), "
"timestamp 0, timestamp of updated doc: 0) "
"ReturnCode(ILLEGAL_PARAMETERS, "
@@ -899,7 +885,7 @@ TEST_F(TwoPhaseUpdateOperationTest, safe_path_condition_unknown_doc_type_fails_w
replyToGet(*cb, sender, 1, 110);
// NOTE: condition is currently not attempted parsed until Gets have been
// replied to. This may change in the future.
- EXPECT_EQ("UpdateReply(doc:test:test, "
+ EXPECT_EQ("UpdateReply(id:ns:testdoctype1::1, "
"BucketId(0x0000000000000000), "
"timestamp 0, timestamp of updated doc: 0) "
"ReturnCode(ILLEGAL_PARAMETERS, "
@@ -920,7 +906,7 @@ TEST_F(TwoPhaseUpdateOperationTest, safe_path_condition_with_missing_doc_and_no_
// Both Gets return nothing at all, nothing at all.
replyToGet(*cb, sender, 0, 100, false);
replyToGet(*cb, sender, 1, 110, false);
- EXPECT_EQ("UpdateReply(doc:test:test, "
+ EXPECT_EQ("UpdateReply(id:ns:testdoctype1::1, "
"BucketId(0x0000000000000000), "
"timestamp 0, timestamp of updated doc: 0) "
"ReturnCode(TEST_AND_SET_CONDITION_FAILED, "
@@ -976,8 +962,7 @@ TEST_F(TwoPhaseUpdateOperationTest, fast_path_close_edge_sends_correct_reply) {
TEST_F(TwoPhaseUpdateOperationTest, safe_path_close_edge_sends_correct_reply) {
setupDistributor(2, 2, "storage:2 distributor:1");
- std::shared_ptr<TwoPhaseUpdateOperation> cb(
- sendUpdate("0=1/2/3,1=2/3/4")); // Inconsistent replicas.
+ std::shared_ptr<TwoPhaseUpdateOperation> cb(sendUpdate("0=1/2/3,1=2/3/4")); // Inconsistent replicas.
DistributorMessageSenderStub sender;
cb->start(sender, framework::MilliSecTime(0));
diff --git a/storage/src/tests/distributor/updateoperationtest.cpp b/storage/src/tests/distributor/updateoperationtest.cpp
index 7cf3ea0ad18..6bc000f6780 100644
--- a/storage/src/tests/distributor/updateoperationtest.cpp
+++ b/storage/src/tests/distributor/updateoperationtest.cpp
@@ -55,7 +55,7 @@ UpdateOperationTest::sendUpdate(const std::string& bucketState)
{
auto update = std::make_shared<document::DocumentUpdate>(
*_repo, *_html_type,
- document::DocumentId(document::DocIdString("test", "test")));
+ document::DocumentId("id:ns:" + _html_type->getName() + "::1"));
_bId = getExternalOperationHandler().getBucketId(update->getId());
@@ -95,7 +95,7 @@ TEST_F(UpdateOperationTest, simple) {
replyToMessage(*cb, sender, 0, 90);
- ASSERT_EQ("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
+ ASSERT_EQ("UpdateReply(id:ns:text/html::1, BucketId(0x0000000000000000), "
"timestamp 100, timestamp of updated doc: 90) ReturnCode(NONE)",
sender.getLastReply(true));
@@ -114,7 +114,7 @@ TEST_F(UpdateOperationTest, not_found) {
replyToMessage(*cb, sender, 0, 0);
- EXPECT_EQ("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
+ EXPECT_EQ("UpdateReply(id:ns:text/html::1, BucketId(0x0000000000000000), "
"timestamp 100, timestamp of updated doc: 0) ReturnCode(NONE)",
sender.getLastReply(true));
}
@@ -130,7 +130,7 @@ TEST_F(UpdateOperationTest, multi_node) {
replyToMessage(*cb, sender, 0, 120);
replyToMessage(*cb, sender, 1, 120);
- ASSERT_EQ("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
+ ASSERT_EQ("UpdateReply(id:ns:text/html::1, BucketId(0x0000000000000000), "
"timestamp 100, timestamp of updated doc: 120) ReturnCode(NONE)",
sender.getLastReply(true));
@@ -154,7 +154,7 @@ TEST_F(UpdateOperationTest, multi_node_inconsistent_timestamp) {
replyToMessage(*cb, sender, 0, 119);
replyToMessage(*cb, sender, 1, 120);
- ASSERT_EQ("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
+ ASSERT_EQ("UpdateReply(id:ns:text/html::1, BucketId(0x0000000000000000), "
"timestamp 100, timestamp of updated doc: 120 Was inconsistent "
"(best node 1)) ReturnCode(NONE)",
sender.getLastReply(true));
diff --git a/storage/src/tests/frameworkimpl/status/statustest.cpp b/storage/src/tests/frameworkimpl/status/statustest.cpp
index e7d0d496cc8..81d91e2f08a 100644
--- a/storage/src/tests/frameworkimpl/status/statustest.cpp
+++ b/storage/src/tests/frameworkimpl/status/statustest.cpp
@@ -115,6 +115,12 @@ TEST_F(StatusTest, index_status_page) {
"Connection: close\r\n"
"Content-Type: text\\/html\r\n"
"Content-Length: [0-9]+\r\n"
+ "X-XSS-Protection: 1; mode=block\r\n"
+ "X-Frame-Options: DENY\r\n"
+ "Content-Security-Policy: default-src 'none'\r\n"
+ "X-Content-Type-Options: nosniff\r\n"
+ "Cache-Control: no-store\r\n"
+ "Pragma: no-cache\r\n"
"\r\n"
"<html>\n"
"<head>\n"
@@ -144,6 +150,12 @@ TEST_F(StatusTest, html_status) {
"Connection: close\r\n"
"Content-Type: text/html\r\n"
"Content-Length: 117\r\n"
+ "X-XSS-Protection: 1; mode=block\r\n"
+ "X-Frame-Options: DENY\r\n"
+ "Content-Security-Policy: default-src 'none'\r\n"
+ "X-Content-Type-Options: nosniff\r\n"
+ "Cache-Control: no-store\r\n"
+ "Pragma: no-cache\r\n"
"\r\n"
"<html>\n"
"<head>\n"
@@ -170,6 +182,12 @@ TEST_F(StatusTest, xml_sStatus) {
"Connection: close\r\n"
"Content-Type: application/xml\r\n"
"Content-Length: 100\r\n"
+ "X-XSS-Protection: 1; mode=block\r\n"
+ "X-Frame-Options: DENY\r\n"
+ "Content-Security-Policy: default-src 'none'\r\n"
+ "X-Content-Type-Options: nosniff\r\n"
+ "Cache-Control: no-store\r\n"
+ "Pragma: no-cache\r\n"
"\r\n"
"<?xml version=\"1.0\"?>\n"
"<status id=\"fooid\" name=\"Foo impl\">\n"
diff --git a/storage/src/tests/persistence/filestorage/filestormanagertest.cpp b/storage/src/tests/persistence/filestorage/filestormanagertest.cpp
index 2a3e72b48b7..44cb92071a1 100644
--- a/storage/src/tests/persistence/filestorage/filestormanagertest.cpp
+++ b/storage/src/tests/persistence/filestorage/filestormanagertest.cpp
@@ -474,7 +474,7 @@ TEST_F(FileStorManagerTest, flush) {
api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 3);
// Creating a document to test with
- document::DocumentId docId("doc:crawler:http://www.ntnu.no/");
+ document::DocumentId docId("id:ns:testdoctype1::crawler:http://www.ntnu.no/");
auto doc = std::make_shared<Document>(*_testdoctype1, docId);
document::BucketId bid(4000);
@@ -1767,7 +1767,7 @@ TEST_F(FileStorManagerTest, no_timestamps) {
"storage", lib::NodeType::STORAGE, 3);
// Creating a document to test with
Document::SP doc(createDocument(
- "some content", "doc:crawler:http://www.ntnu.no/").release());
+ "some content", "id:ns:testdoctype1::crawler:http://www.ntnu.no/").release());
document::BucketId bid(16, 4000);
createBucket(bid, 0);
diff --git a/storage/src/tests/persistence/testandsettest.cpp b/storage/src/tests/persistence/testandsettest.cpp
index 4c4a7c9a0be..fe10d346ba7 100644
--- a/storage/src/tests/persistence/testandsettest.cpp
+++ b/storage/src/tests/persistence/testandsettest.cpp
@@ -235,20 +235,6 @@ TEST_F(TestAndSetTest, conditional_put_to_non_existing_document_should_fail) {
EXPECT_EQ("", dumpBucket(BUCKET_ID));
}
-TEST_F(TestAndSetTest, document_with_no_type_should_fail) {
- // Conditionally replace nonexisting document
- // Fail since no document exists to match with test and set
- api::Timestamp timestamp = 0;
- document::DocumentId legacyDocId("doc:mail:3619.html");
- api::RemoveCommand remove(makeDocumentBucket(BUCKET_ID), legacyDocId, timestamp);
- setTestCondition(remove);
-
- auto code = thread->handleRemove(remove)->getResult();
- EXPECT_EQ(code.getResult(), api::ReturnCode::Result::ILLEGAL_PARAMETERS);
- EXPECT_EQ(code.getMessage(), "Document id has no doctype");
- EXPECT_EQ("", dumpBucket(BUCKET_ID));
-}
-
document::Document::SP
TestAndSetTest::createTestDocument()
{
diff --git a/storage/src/tests/storageserver/bouncertest.cpp b/storage/src/tests/storageserver/bouncertest.cpp
index 35b752fedfd..c19d8814af4 100644
--- a/storage/src/tests/storageserver/bouncertest.cpp
+++ b/storage/src/tests/storageserver/bouncertest.cpp
@@ -100,7 +100,7 @@ BouncerTest::createDummyFeedMessage(api::Timestamp timestamp,
{
auto cmd = std::make_shared<api::RemoveCommand>(
makeDocumentBucket(document::BucketId(0)),
- document::DocumentId("doc:foo:bar"),
+ document::DocumentId("id:ns:foo::bar"),
timestamp);
cmd->setPriority(priority);
return cmd;
@@ -112,7 +112,7 @@ BouncerTest::createDummyFeedMessage(api::Timestamp timestamp,
{
auto cmd = std::make_shared<api::RemoveCommand>(
document::Bucket(bucketSpace, document::BucketId(0)),
- document::DocumentId("doc:foo:bar"),
+ document::DocumentId("id:ns:foo::bar"),
timestamp);
cmd->setPriority(Priority(0));
return cmd;
diff --git a/storage/src/tests/storageserver/communicationmanagertest.cpp b/storage/src/tests/storageserver/communicationmanagertest.cpp
index b970e56343e..caee6e6ab91 100644
--- a/storage/src/tests/storageserver/communicationmanagertest.cpp
+++ b/storage/src/tests/storageserver/communicationmanagertest.cpp
@@ -32,7 +32,7 @@ struct CommunicationManagerTest : Test {
api::StorageMessage::Priority priority)
{
auto cmd = std::make_shared<api::GetCommand>(makeDocumentBucket(document::BucketId(0)),
- document::DocumentId("doc::mydoc"),
+ document::DocumentId("id:ns:mytype::mydoc"),
"[all]");
cmd->setAddress(api::StorageMessageAddress("storage", lib::NodeType::STORAGE, 1));
cmd->setPriority(priority);
@@ -69,13 +69,13 @@ TEST_F(CommunicationManagerTest, simple) {
// Send a message through from distributor to storage
auto cmd = std::make_shared<api::GetCommand>(
- makeDocumentBucket(document::BucketId(0)), document::DocumentId("doc::mydoc"), "[all]");
+ makeDocumentBucket(document::BucketId(0)), document::DocumentId("id:ns:mytype::mydoc"), "[all]");
cmd->setAddress(api::StorageMessageAddress("storage", lib::NodeType::STORAGE, 1));
distributorLink->sendUp(cmd);
storageLink->waitForMessages(1, MESSAGE_WAIT_TIME_SEC);
ASSERT_GT(storageLink->getNumCommands(), 0);
auto cmd2 = std::dynamic_pointer_cast<api::StorageCommand>(storageLink->getCommand(0));
- EXPECT_EQ("doc::mydoc", dynamic_cast<api::GetCommand&>(*cmd2).getDocumentId().toString());
+ EXPECT_EQ("id:ns:mytype::mydoc", dynamic_cast<api::GetCommand&>(*cmd2).getDocumentId().toString());
// Reply to the message
std::shared_ptr<api::StorageReply> reply(cmd2->makeReply().release());
storageLink->sendUp(reply);
diff --git a/storage/src/tests/storageserver/documentapiconvertertest.cpp b/storage/src/tests/storageserver/documentapiconvertertest.cpp
index c879f7d2779..f006277a7b6 100644
--- a/storage/src/tests/storageserver/documentapiconvertertest.cpp
+++ b/storage/src/tests/storageserver/documentapiconvertertest.cpp
@@ -22,7 +22,6 @@ using document::Bucket;
using document::BucketId;
using document::BucketSpace;
using document::DataType;
-using document::DocIdString;
using document::Document;
using document::DocumentId;
using document::DocumentTypeRepo;
@@ -124,7 +123,7 @@ TEST_F(DocumentApiConverterTest, put) {
}
TEST_F(DocumentApiConverterTest, forwarded_put) {
- auto doc = std::make_shared<Document>(_html_type, DocumentId(DocIdString("test", "test")));
+ auto doc = std::make_shared<Document>(_html_type, DocumentId("id:ns:" + _html_type.getName() + "::test"));
auto putmsg = std::make_unique<documentapi::PutDocumentMessage>(doc);
auto* putmsg_raw = putmsg.get();
diff --git a/streamingvisitors/src/tests/hitcollector/hitcollector.cpp b/streamingvisitors/src/tests/hitcollector/hitcollector.cpp
index 9650834d0f1..30e6b8a7adb 100644
--- a/streamingvisitors/src/tests/hitcollector/hitcollector.cpp
+++ b/streamingvisitors/src/tests/hitcollector/hitcollector.cpp
@@ -76,7 +76,7 @@ HitCollectorTest::assertHit(SearchResult::RankType expRank, uint32_t expDocId, u
void
HitCollectorTest::addHit(HitCollector &hc, uint32_t docId, double score, const char *sortData, size_t sortDataSize)
{
- document::Document::UP doc(new document::Document(_docType, DocumentId("doc::")));
+ document::Document::UP doc(new document::Document(_docType, DocumentId("id:ns:testdoc::")));
StorageDocument::UP sdoc(new StorageDocument(std::move(doc), SharedFieldPathMap(), 0));
ASSERT_TRUE(sdoc->valid());
MatchData md(MatchData::params());
diff --git a/vespalib/src/tests/datastore/unique_store/unique_store_test.cpp b/vespalib/src/tests/datastore/unique_store/unique_store_test.cpp
index a585186aa3e..ba2c98a6d54 100644
--- a/vespalib/src/tests/datastore/unique_store/unique_store_test.cpp
+++ b/vespalib/src/tests/datastore/unique_store/unique_store_test.cpp
@@ -1,7 +1,9 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include <vespa/vespalib/datastore/unique_store.hpp>
+#include <vespa/vespalib/datastore/unique_store_string_allocator.hpp>
+#include <vespa/vespalib/datastore/unique_store_string_comparator.h>
#include <vespa/vespalib/gtest/gtest.h>
-#include <vespa/vespalib/test/datastore/memstats.h>
+#include <vespa/vespalib/test/datastore/buffer_stats.h>
#include <vespa/vespalib/test/insertion_operators.h>
#include <vespa/vespalib/util/traits.h>
#include <vector>
@@ -10,43 +12,47 @@
LOG_SETUP("unique_store_test");
using namespace search::datastore;
-using vespalib::MemoryUsage;
using vespalib::ArrayRef;
using generation_t = vespalib::GenerationHandler::generation_t;
-using MemStats = search::datastore::test::MemStats;
+using search::datastore::test::BufferStats;
-template <typename EntryT, typename RefT = EntryRefT<22> >
+template <typename UniqueStoreT>
struct TestBase : public ::testing::Test {
- using EntryRefType = RefT;
- using UniqueStoreType = UniqueStore<EntryT, RefT>;
- using value_type = EntryT;
- using ReferenceStore = std::map<EntryRef, std::pair<EntryT,uint32_t>>;
+ using EntryRefType = typename UniqueStoreT::RefType;
+ using UniqueStoreType = UniqueStoreT;
+ using ValueType = typename UniqueStoreT::EntryType;
+ using ValueConstRefType = typename UniqueStoreT::EntryConstRefType;
+ using ReferenceStoreValueType = std::conditional_t<std::is_same_v<ValueType, const char *>, std::string, ValueType>;
+ using ReferenceStore = std::map<EntryRef, std::pair<ReferenceStoreValueType,uint32_t>>;
UniqueStoreType store;
ReferenceStore refStore;
generation_t generation;
+
+ static std::vector<ValueType> values;
+
TestBase()
: store(),
refStore(),
generation(1)
{}
- void assertAdd(const EntryT &input) {
+ void assertAdd(ValueConstRefType input) {
EntryRef ref = add(input);
assertGet(ref, input);
}
- EntryRef add(const EntryT &input) {
+ EntryRef add(ValueConstRefType input) {
UniqueStoreAddResult addResult = store.add(input);
EntryRef result = addResult.ref();
- auto insres = refStore.insert(std::make_pair(result, std::make_pair(input, 1u)));
+ auto insres = refStore.insert(std::make_pair(result, std::make_pair(ReferenceStoreValueType(input), 1u)));
EXPECT_EQ(insres.second, addResult.inserted());
if (!insres.second) {
++insres.first->second.second;
}
return result;
}
- void alignRefStore(EntryRef ref, const EntryT &input, uint32_t refcnt) {
+ void alignRefStore(EntryRef ref, ValueConstRefType input, uint32_t refcnt) {
if (refcnt > 0) {
- auto insres = refStore.insert(std::make_pair(ref, std::make_pair(input, refcnt)));
+ auto insres = refStore.insert(std::make_pair(ref, std::make_pair(ReferenceStoreValueType(input), refcnt)));
if (!insres.second) {
insres.first->second.second = refcnt;
}
@@ -54,8 +60,8 @@ struct TestBase : public ::testing::Test {
refStore.erase(ref);
}
}
- void assertGet(EntryRef ref, const EntryT &exp) const {
- EntryT act = store.get(ref);
+ void assertGet(EntryRef ref, ReferenceStoreValueType exp) const {
+ ReferenceStoreValueType act = store.get(ref);
EXPECT_EQ(exp, act);
}
void remove(EntryRef ref) {
@@ -67,29 +73,23 @@ struct TestBase : public ::testing::Test {
refStore.erase(ref);
}
}
- void remove(const EntryT &input) {
+ void remove(ValueConstRefType input) {
remove(getEntryRef(input));
}
uint32_t getBufferId(EntryRef ref) const {
return EntryRefType(ref).bufferId();
}
- void assertBufferState(EntryRef ref, const MemStats expStats) const {
+ void assertBufferState(EntryRef ref, const BufferStats expStats) const {
EXPECT_EQ(expStats._used, store.bufferState(ref).size());
EXPECT_EQ(expStats._hold, store.bufferState(ref).getHoldElems());
EXPECT_EQ(expStats._dead, store.bufferState(ref).getDeadElems());
}
- void assertMemoryUsage(const MemStats expStats) const {
- MemoryUsage act = store.getMemoryUsage();
- EXPECT_EQ(expStats._used, act.usedBytes());
- EXPECT_EQ(expStats._hold, act.allocatedBytesOnHold());
- EXPECT_EQ(expStats._dead, act.deadBytes());
- }
void assertStoreContent() const {
for (const auto &elem : refStore) {
assertGet(elem.first, elem.second.first);
}
}
- EntryRef getEntryRef(const EntryT &input) {
+ EntryRef getEntryRef(ValueConstRefType input) {
for (const auto &elem : refStore) {
if (elem.second.first == input) {
return elem.first;
@@ -121,57 +121,79 @@ struct TestBase : public ::testing::Test {
}
refStore = compactedRefStore;
}
- size_t entrySize() const { return sizeof(EntryT); }
+ size_t entrySize() const { return sizeof(ValueType); }
auto getBuilder(uint32_t uniqueValuesHint) { return store.getBuilder(uniqueValuesHint); }
auto getSaver() { return store.getSaver(); }
+ size_t get_reserved(EntryRef ref) {
+ return store.bufferState(ref).getTypeHandler()->getReservedElements(getBufferId(ref));
+ }
+ size_t get_array_size(EntryRef ref) {
+ return store.bufferState(ref).getArraySize();
+ }
};
-using NumberTest = TestBase<uint32_t>;
-using StringTest = TestBase<std::string>;
-using SmallOffsetNumberTest = TestBase<uint32_t, EntryRefT<10>>;
+using NumberUniqueStore = UniqueStore<uint32_t>;
+using StringUniqueStore = UniqueStore<std::string>;
+using CStringUniqueStore = UniqueStore<const char *, EntryRefT<22>, UniqueStoreStringComparator<EntryRefT<22>>, UniqueStoreStringAllocator<EntryRefT<22>>>;
+using SmallOffsetNumberUniqueStore = UniqueStore<uint32_t, EntryRefT<10,10>>;
-TEST(UniqueStoreTest, trivial_and_non_trivial_types_are_tested)
-{
- EXPECT_TRUE(vespalib::can_skip_destruction<NumberTest::value_type>::value);
- EXPECT_FALSE(vespalib::can_skip_destruction<StringTest::value_type>::value);
-}
+template <>
+std::vector<uint32_t> TestBase<NumberUniqueStore>::values{10, 20, 30, 10 };
+template <>
+std::vector<std::string> TestBase<StringUniqueStore>::values{ "aa", "bbb", "ccc", "aa" };
+template <>
+std::vector<const char *> TestBase<CStringUniqueStore>::values{ "aa", "bbb", "ccc", "aa" };
+
+using UniqueStoreTestTypes = ::testing::Types<NumberUniqueStore, StringUniqueStore, CStringUniqueStore>;
+TYPED_TEST_CASE(TestBase, UniqueStoreTestTypes);
-TEST_F(NumberTest, can_add_and_get_values_of_trivial_type)
+// Disable warnings emitted by gtest generated files when using typed tests
+#pragma GCC diagnostic push
+#ifndef __clang__
+#pragma GCC diagnostic ignored "-Wsuggest-override"
+#endif
+
+using NumberTest = TestBase<NumberUniqueStore>;
+using StringTest = TestBase<StringUniqueStore>;
+using CStringTest = TestBase<CStringUniqueStore>;
+using SmallOffsetNumberTest = TestBase<SmallOffsetNumberUniqueStore>;
+
+TEST(UniqueStoreTest, trivial_and_non_trivial_types_are_tested)
{
- assertAdd(1);
- assertAdd(2);
- assertAdd(3);
- assertAdd(1);
+ EXPECT_TRUE(vespalib::can_skip_destruction<NumberTest::ValueType>::value);
+ EXPECT_FALSE(vespalib::can_skip_destruction<StringTest::ValueType>::value);
}
-TEST_F(StringTest, can_add_and_get_values_of_non_trivial_type)
+TYPED_TEST(TestBase, can_add_and_get_values)
{
- assertAdd("aa");
- assertAdd("bbb");
- assertAdd("ccc");
- assertAdd("aa");
+ for (auto &val : this->values) {
+ this->assertAdd(val);
+ }
}
-TEST_F(NumberTest, elements_are_put_on_hold_when_value_is_removed)
+TYPED_TEST(TestBase, elements_are_put_on_hold_when_value_is_removed)
{
- EntryRef ref = add(1);
- // Note: The first buffer have the first element reserved -> we expect 2 elements used here.
- assertBufferState(ref, MemStats().used(2).hold(0).dead(1));
- store.remove(ref);
- assertBufferState(ref, MemStats().used(2).hold(1).dead(1));
+ EntryRef ref = this->add(this->values[0]);
+ size_t reserved = this->get_reserved(ref);
+ size_t array_size = this->get_array_size(ref);
+ this->assertBufferState(ref, BufferStats().used(array_size + reserved).hold(0).dead(reserved));
+ this->store.remove(ref);
+ this->assertBufferState(ref, BufferStats().used(array_size + reserved).hold(array_size).dead(reserved));
}
-TEST_F(NumberTest, elements_are_reference_counted)
+TYPED_TEST(TestBase, elements_are_reference_counted)
{
- EntryRef ref = add(1);
- EntryRef ref2 = add(1);
+ EntryRef ref = this->add(this->values[0]);
+ EntryRef ref2 = this->add(this->values[0]);
EXPECT_EQ(ref.ref(), ref2.ref());
// Note: The first buffer have the first element reserved -> we expect 2 elements used here.
- assertBufferState(ref, MemStats().used(2).hold(0).dead(1));
- store.remove(ref);
- assertBufferState(ref, MemStats().used(2).hold(0).dead(1));
- store.remove(ref);
- assertBufferState(ref, MemStats().used(2).hold(1).dead(1));
+ size_t reserved = this->get_reserved(ref);
+ size_t array_size = this->get_array_size(ref);
+ this->assertBufferState(ref, BufferStats().used(array_size + reserved).hold(0).dead(reserved));
+ this->store.remove(ref);
+ this->assertBufferState(ref, BufferStats().used(array_size + reserved).hold(0).dead(reserved));
+ this->store.remove(ref);
+ this->assertBufferState(ref, BufferStats().used(array_size + reserved).hold(array_size).dead(reserved));
}
TEST_F(SmallOffsetNumberTest, new_underlying_buffer_is_allocated_when_current_is_full)
@@ -193,74 +215,80 @@ TEST_F(SmallOffsetNumberTest, new_underlying_buffer_is_allocated_when_current_is
assertStoreContent();
}
-TEST_F(NumberTest, store_can_be_compacted)
+TYPED_TEST(TestBase, store_can_be_compacted)
{
- EntryRef val1Ref = add(1);
- EntryRef val2Ref = add(2);
- remove(add(4));
- trimHoldLists();
- assertBufferState(val1Ref, MemStats().used(4).dead(2)); // Note: First element is reserved
- uint32_t val1BufferId = getBufferId(val1Ref);
+ EntryRef val0Ref = this->add(this->values[0]);
+ EntryRef val1Ref = this->add(this->values[1]);
+ this->remove(this->add(this->values[2]));
+ this->trimHoldLists();
+ size_t reserved = this->get_reserved(val0Ref);
+ size_t array_size = this->get_array_size(val0Ref);
+ this->assertBufferState(val0Ref, BufferStats().used(reserved + 3 * array_size).dead(reserved + array_size));
+ uint32_t val1BufferId = this->getBufferId(val0Ref);
- EXPECT_EQ(2u, refStore.size());
- compactWorst();
- EXPECT_EQ(2u, refStore.size());
- assertStoreContent();
+ EXPECT_EQ(2u, this->refStore.size());
+ this->compactWorst();
+ EXPECT_EQ(2u, this->refStore.size());
+ this->assertStoreContent();
// Buffer has been compacted
- EXPECT_NE(val1BufferId, getBufferId(getEntryRef(1)));
+ EXPECT_NE(val1BufferId, this->getBufferId(this->getEntryRef(this->values[0])));
// Old ref should still point to data.
- assertGet(val1Ref, 1);
- assertGet(val2Ref, 2);
- EXPECT_TRUE(store.bufferState(val1Ref).isOnHold());
- trimHoldLists();
- EXPECT_TRUE(store.bufferState(val1Ref).isFree());
- assertStoreContent();
+ this->assertGet(val0Ref, this->values[0]);
+ this->assertGet(val1Ref, this->values[1]);
+ EXPECT_TRUE(this->store.bufferState(val0Ref).isOnHold());
+ this->trimHoldLists();
+ EXPECT_TRUE(this->store.bufferState(val0Ref).isFree());
+ this->assertStoreContent();
}
-TEST_F(NumberTest, store_can_be_instantiated_with_builder)
+TYPED_TEST(TestBase, store_can_be_instantiated_with_builder)
{
- auto builder = getBuilder(2);
- builder.add(10);
- builder.add(20);
+ auto builder = this->getBuilder(2);
+ builder.add(this->values[0]);
+ builder.add(this->values[1]);
builder.setupRefCounts();
- EntryRef val10Ref = builder.mapEnumValueToEntryRef(1);
- EntryRef val20Ref = builder.mapEnumValueToEntryRef(2);
- assertBufferState(val10Ref, MemStats().used(3).dead(1)); // Note: First element is reserved
- EXPECT_TRUE(val10Ref.valid());
- EXPECT_TRUE(val20Ref.valid());
- EXPECT_NE(val10Ref.ref(), val20Ref.ref());
- assertGet(val10Ref, 10);
- assertGet(val20Ref, 20);
+ EntryRef val0Ref = builder.mapEnumValueToEntryRef(1);
+ EntryRef val1Ref = builder.mapEnumValueToEntryRef(2);
+ size_t reserved = this->get_reserved(val0Ref);
+ size_t array_size = this->get_array_size(val0Ref);
+ this->assertBufferState(val0Ref, BufferStats().used(2 * array_size + reserved).dead(reserved)); // Note: First element is reserved
+ EXPECT_TRUE(val0Ref.valid());
+ EXPECT_TRUE(val1Ref.valid());
+ EXPECT_NE(val0Ref.ref(), val1Ref.ref());
+ this->assertGet(val0Ref, this->values[0]);
+ this->assertGet(val1Ref, this->values[1]);
builder.makeDictionary();
// Align refstore with the two entries added by builder.
- alignRefStore(val10Ref, 10, 1);
- alignRefStore(val20Ref, 20, 1);
- EXPECT_EQ(val10Ref.ref(), add(10).ref());
- EXPECT_EQ(val20Ref.ref(), add(20).ref());
+ this->alignRefStore(val0Ref, this->values[0], 1);
+ this->alignRefStore(val1Ref, this->values[1], 1);
+ EXPECT_EQ(val0Ref.ref(), this->add(this->values[0]).ref());
+ EXPECT_EQ(val1Ref.ref(), this->add(this->values[1]).ref());
}
-TEST_F(NumberTest, store_can_be_saved)
+TYPED_TEST(TestBase, store_can_be_saved)
{
- EntryRef val10Ref = add(10);
- EntryRef val20Ref = add(20);
- remove(add(40));
- trimHoldLists();
+ EntryRef val0Ref = this->add(this->values[0]);
+ EntryRef val1Ref = this->add(this->values[1]);
+ this->remove(this->add(this->values[2]));
+ this->trimHoldLists();
- auto saver = getSaver();
+ auto saver = this->getSaver();
std::vector<uint32_t> refs;
saver.foreach_key([&](EntryRef ref) { refs.push_back(ref.ref()); });
std::vector<uint32_t> expRefs;
- expRefs.push_back(val10Ref.ref());
- expRefs.push_back(val20Ref.ref());
+ expRefs.push_back(val0Ref.ref());
+ expRefs.push_back(val1Ref.ref());
EXPECT_EQ(expRefs, refs);
saver.enumerateValues();
uint32_t invalidEnum = saver.mapEntryRefToEnumValue(EntryRef());
- uint32_t enumValue10 = saver.mapEntryRefToEnumValue(val10Ref);
- uint32_t enumValue20 = saver.mapEntryRefToEnumValue(val20Ref);
+ uint32_t enumValue1 = saver.mapEntryRefToEnumValue(val0Ref);
+ uint32_t enumValue2 = saver.mapEntryRefToEnumValue(val1Ref);
EXPECT_EQ(0u, invalidEnum);
- EXPECT_EQ(1u, enumValue10);
- EXPECT_EQ(2u, enumValue20);
+ EXPECT_EQ(1u, enumValue1);
+ EXPECT_EQ(2u, enumValue2);
}
+#pragma GCC diagnostic pop
+
GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/vespalib/src/tests/portal/portal_test.cpp b/vespalib/src/tests/portal/portal_test.cpp
index 1baebc69e97..e54700306fe 100644
--- a/vespalib/src/tests/portal/portal_test.cpp
+++ b/vespalib/src/tests/portal/portal_test.cpp
@@ -48,6 +48,12 @@ vespalib::string make_expected_response(const vespalib::string &content_type, co
"Connection: close\r\n"
"Content-Type: %s\r\n"
"Content-Length: %zu\r\n"
+ "X-XSS-Protection: 1; mode=block\r\n"
+ "X-Frame-Options: DENY\r\n"
+ "Content-Security-Policy: default-src 'none'\r\n"
+ "X-Content-Type-Options: nosniff\r\n"
+ "Cache-Control: no-store\r\n"
+ "Pragma: no-cache\r\n"
"\r\n"
"%s", content_type.c_str(), content.size(), content.c_str());
}
diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store.h b/vespalib/src/vespa/vespalib/datastore/unique_store.h
index 8a7f0e50845..a045da6ca1f 100644
--- a/vespalib/src/vespa/vespalib/datastore/unique_store.h
+++ b/vespalib/src/vespa/vespalib/datastore/unique_store.h
@@ -35,6 +35,7 @@ public:
using RefType = RefT;
using Saver = UniqueStoreSaver<EntryT, RefT>;
using Builder = UniqueStoreBuilder<Allocator>;
+ using EntryConstRefType = typename Allocator::EntryConstRefType;
private:
Allocator _allocator;
DataStoreType &_store;
@@ -44,9 +45,9 @@ private:
public:
UniqueStore();
~UniqueStore();
- UniqueStoreAddResult add(const EntryType &value);
- EntryRef find(const EntryType &value);
- const EntryType &get(EntryRef ref) const { return _allocator.get(ref); }
+ UniqueStoreAddResult add(EntryConstRefType value);
+ EntryRef find(EntryConstRefType value);
+ EntryConstRefType get(EntryRef ref) const { return _allocator.get(ref); }
void remove(EntryRef ref);
ICompactionContext::UP compactWorst();
vespalib::MemoryUsage getMemoryUsage() const;
diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store.hpp b/vespalib/src/vespa/vespalib/datastore/unique_store.hpp
index bc1873a2c3a..e149f470bdf 100644
--- a/vespalib/src/vespa/vespalib/datastore/unique_store.hpp
+++ b/vespalib/src/vespa/vespalib/datastore/unique_store.hpp
@@ -27,7 +27,7 @@ UniqueStore<EntryT, RefT, Compare, Allocator>::~UniqueStore() = default;
template <typename EntryT, typename RefT, typename Compare, typename Allocator>
UniqueStoreAddResult
-UniqueStore<EntryT, RefT, Compare, Allocator>::add(const EntryType &value)
+UniqueStore<EntryT, RefT, Compare, Allocator>::add(EntryConstRefType value)
{
Compare comp(_store, value);
UniqueStoreAddResult result = _dict->add(comp, [this, &value]() -> EntryRef { return _allocator.allocate(value); });
@@ -37,7 +37,7 @@ UniqueStore<EntryT, RefT, Compare, Allocator>::add(const EntryType &value)
template <typename EntryT, typename RefT, typename Compare, typename Allocator>
EntryRef
-UniqueStore<EntryT, RefT, Compare, Allocator>::find(const EntryType &value)
+UniqueStore<EntryT, RefT, Compare, Allocator>::find(EntryConstRefType value)
{
Compare comp(_store, value);
return _dict->find(comp);
diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store_allocator.h b/vespalib/src/vespa/vespalib/datastore/unique_store_allocator.h
index cadc2b09c0e..1981a190cc6 100644
--- a/vespalib/src/vespa/vespalib/datastore/unique_store_allocator.h
+++ b/vespalib/src/vespa/vespalib/datastore/unique_store_allocator.h
@@ -20,6 +20,7 @@ class UniqueStoreAllocator : public ICompactable
public:
using DataStoreType = DataStoreT<RefT>;
using EntryType = EntryT;
+ using EntryConstRefType = const EntryType &;
using WrappedEntryType = UniqueStoreEntry<EntryType>;
using RefType = RefT;
using UniqueStoreBufferType = BufferType<WrappedEntryType>;
diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.cpp b/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.cpp
index 4c665ee0517..d7b79c439ef 100644
--- a/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.cpp
+++ b/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.cpp
@@ -4,6 +4,13 @@
namespace search::datastore {
+namespace {
+
+constexpr size_t NUM_ARRAYS_FOR_NEW_UNIQUESTORE_BUFFER = 1024u;
+constexpr float ALLOC_GROW_FACTOR = 0.2;
+
+}
+
namespace string_allocator {
std::vector<size_t> array_sizes = { 16, 24, 32, 40, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224, 256 };
diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.h b/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.h
index 0adb089da08..f72b9c6119c 100644
--- a/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.h
+++ b/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.h
@@ -8,6 +8,7 @@
#include "unique_store_entry.h"
#include "i_compactable.h"
#include <cassert>
+#include <string>
namespace search::datastore {
@@ -90,6 +91,7 @@ class UniqueStoreStringAllocator : public ICompactable
public:
using DataStoreType = DataStoreT<RefT>;
using EntryType = const char *;
+ using EntryConstRefType = const char *;
using WrappedExternalEntryType = UniqueStoreEntry<std::string>;
using RefType = RefT;
private:
diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.hpp b/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.hpp
index 9bd2e050507..2b2af70439a 100644
--- a/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.hpp
+++ b/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.hpp
@@ -7,9 +7,6 @@
namespace search::datastore {
-constexpr size_t NUM_ARRAYS_FOR_NEW_UNIQUESTORE_BUFFER = 1024u;
-constexpr float ALLOC_GROW_FACTOR = 0.2;
-
template <typename RefT>
UniqueStoreStringAllocator<RefT>::UniqueStoreStringAllocator()
: ICompactable(),
diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store_string_comparator.h b/vespalib/src/vespa/vespalib/datastore/unique_store_string_comparator.h
new file mode 100644
index 00000000000..e5d3888a5e2
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/datastore/unique_store_string_comparator.h
@@ -0,0 +1,51 @@
+// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "entry_comparator.h"
+#include "unique_store_string_allocator.h"
+
+namespace search::datastore {
+
+/*
+ * Compare two strings based on entry refs. Valid entry ref is mapped
+ * to a string in a data store. Invalid entry ref is mapped to a
+ * temporary string pointed to by comparator instance.
+ */
+template <typename RefT>
+class UniqueStoreStringComparator : public EntryComparator {
+ using RefType = RefT;
+ using WrappedExternalEntryType = UniqueStoreEntry<std::string>;
+ using DataStoreType = DataStoreT<RefT>;
+ const DataStoreType &_store;
+ const char *_value;
+public:
+ UniqueStoreStringComparator(const DataStoreType &store, const char *value)
+ : _store(store),
+ _value(value)
+ {
+ }
+ const char *get(EntryRef ref) const {
+ if (ref.valid()) {
+ RefType iRef(ref);
+ auto &state = _store.getBufferState(iRef.bufferId());
+ auto type_id = state.getTypeId();
+ if (type_id != 0) {
+ return reinterpret_cast<const UniqueStoreSmallStringEntry *>(_store.template getEntryArray<char>(iRef, state.getArraySize()))->value();
+ } else {
+ return _store.template getEntry<WrappedExternalEntryType>(iRef)->value().c_str();
+ }
+ } else {
+ return _value;
+ }
+ }
+
+ bool operator()(const EntryRef lhs, const EntryRef rhs) const override
+ {
+ const char *lhs_value = get(lhs);
+ const char *rhs_value = get(rhs);
+ return (strcmp(lhs_value, rhs_value) < 0);
+ }
+};
+
+}
diff --git a/vespalib/src/vespa/vespalib/portal/http_connection.cpp b/vespalib/src/vespa/vespalib/portal/http_connection.cpp
index 97a5f6082c9..aa2c0ec4cdd 100644
--- a/vespalib/src/vespa/vespalib/portal/http_connection.cpp
+++ b/vespalib/src/vespa/vespalib/portal/http_connection.cpp
@@ -90,6 +90,26 @@ WriteRes half_close(CryptoSocket &socket) {
}
}
+/**
+ * Emit a basic set of HTTP security headers meant to minimize any impact
+ * in the case of unsanitized/unescaped data making its way to an internal
+ * status page.
+ */
+void emit_http_security_headers(OutputWriter &dst) {
+ // Reject detected cross-site scripting attacks
+ dst.printf("X-XSS-Protection: 1; mode=block\r\n");
+ // Do not allow embedding via iframe (clickjacking prevention)
+ dst.printf("X-Frame-Options: DENY\r\n");
+ // Do not allow _anything_ to be externally loaded, nor inline scripts
+ // etc to be executed.
+ dst.printf("Content-Security-Policy: default-src 'none'\r\n");
+ // No heuristic auto-inference of content-type based on payload.
+ dst.printf("X-Content-Type-Options: nosniff\r\n");
+ // Don't store any potentially sensitive data in any caches.
+ dst.printf("Cache-Control: no-store\r\n");
+ dst.printf("Pragma: no-cache\r\n");
+}
+
} // namespace vespalib::portal::<unnamed>
void
@@ -223,6 +243,7 @@ HttpConnection::respond_with_content(const vespalib::string &content_type,
dst.printf("Connection: close\r\n");
dst.printf("Content-Type: %s\r\n", content_type.c_str());
dst.printf("Content-Length: %zu\r\n", content.size());
+ emit_http_security_headers(dst);
dst.printf("\r\n");
dst.write(content.data(), content.size());
}