aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--config-model-api/abi-spec.json2
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/application/api/ValidationOverrides.java3
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/model/api/ConfigChangeAction.java3
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/model/api/ConfigChangeReindexAction.java18
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java24
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/model/api/Reindexing.java13
-rw-r--r--config-model-api/src/test/java/com/yahoo/config/model/api/ModelContextTest.java36
-rw-r--r--config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java27
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/IndexingModeChangeValidator.java75
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/VespaReindexAction.java70
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/IndexingScriptChangeValidator.java8
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/clients/ContainerDocumentApi.java47
-rwxr-xr-xconfig-model/src/main/java/com/yahoo/vespa/model/container/ContainerCluster.java6
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/http/JettyHttpServer.java6
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java10
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/xml/DocumentApiOptionsBuilder.java9
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java22
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/storagecluster/FileStorProducer.java11
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/storagecluster/StorServerProducer.java47
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/storagecluster/StorageCluster.java2
-rw-r--r--config-model/src/main/resources/schema/containercluster.rnc4
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/ConfigChangeTestUtils.java9
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/IndexingModeChangeValidatorTest.java22
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/DocumentDatabaseChangeValidatorTest.java3
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/IndexingScriptChangeValidatorTest.java42
-rwxr-xr-xconfig-model/src/test/java/com/yahoo/vespa/model/container/ContainerClusterTest.java5
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/container/xml/ContainerDocumentApiBuilderTest.java13
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/content/StorageClusterTest.java48
-rw-r--r--config-model/src/test/schema-test-files/services.xml7
-rw-r--r--config-provisioning/src/main/java/com/yahoo/config/provision/ApplicationTransaction.java8
-rw-r--r--configdefinitions/src/vespa/reindexing.def2
-rw-r--r--configdefinitions/src/vespa/stor-filestor.def5
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java14
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/application/ApplicationCuratorDatabase.java184
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/application/ApplicationReindexing.java229
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/application/ReindexingStatus.java132
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/application/TenantApplications.java53
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/configchange/ConfigChangeActions.java10
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/configchange/ConfigChangeActionsSlimeConverter.java15
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/configchange/ReindexActions.java87
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/configchange/ReindexActionsFormatter.java32
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/deploy/Deployment.java3
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java52
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ConfigServerMaintainer.java22
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ConfigServerMaintenance.java6
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ReindexingMaintainer.java110
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/ApplicationRepositoryTest.java2
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/ModelContextImplTest.java3
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/application/ApplicationCuratorDatabaseTest.java24
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/application/ApplicationReindexingTest.java63
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/application/ReindexingStatusTest.java34
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/configchange/ConfigChangeActionsBuilder.java17
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/configchange/ConfigChangeActionsSlimeConverterTest.java43
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/configchange/ReindexActionsFormatterTest.java53
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/SessionPrepareHandlerTest.java7
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/maintenance/ReindexingMaintainerTest.java43
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/aws/MockResourceTagger.java9
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/aws/ResourceTagger.java3
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/Node.java16
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/NodeRepository.java1
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/NodeRepositoryNode.java1
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/PathGroup.java1
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/LockedTenant.java3
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java9
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceTagMaintainer.java12
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java49
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/NodeRepositoryMock.java1
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceTagMaintainerTest.java45
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiCloudTest.java11
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/dev-us-east-1-log-first-part.json2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/staging-test-log.json8
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/system-test-details.json20
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/system-test-log.json20
-rw-r--r--docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/DockerEngine.java39
-rw-r--r--docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/DockerEngineTest.java4
-rw-r--r--eval/CMakeLists.txt1
-rw-r--r--eval/src/tests/eval/fast_value/fast_value_test.cpp61
-rw-r--r--eval/src/tests/eval/tensor_function/tensor_function_test.cpp47
-rw-r--r--eval/src/tests/instruction/generic_peek/CMakeLists.txt9
-rw-r--r--eval/src/tests/instruction/generic_peek/generic_peek_test.cpp236
-rw-r--r--eval/src/tests/tensor/instruction_benchmark/instruction_benchmark.cpp45
-rw-r--r--eval/src/vespa/eval/eval/CMakeLists.txt1
-rw-r--r--eval/src/vespa/eval/eval/engine_or_factory.cpp11
-rw-r--r--eval/src/vespa/eval/eval/engine_or_factory.h1
-rw-r--r--eval/src/vespa/eval/eval/fast_sparse_map.h33
-rw-r--r--eval/src/vespa/eval/eval/fast_value.hpp36
-rw-r--r--eval/src/vespa/eval/eval/interpreted_function.cpp3
-rw-r--r--eval/src/vespa/eval/eval/optimize_tensor_function.cpp95
-rw-r--r--eval/src/vespa/eval/eval/optimize_tensor_function.h15
-rw-r--r--eval/src/vespa/eval/eval/tensor_function.cpp19
-rw-r--r--eval/src/vespa/eval/eval/test/eval_fixture.cpp3
-rw-r--r--eval/src/vespa/eval/instruction/CMakeLists.txt1
-rw-r--r--eval/src/vespa/eval/instruction/generic_peek.cpp352
-rw-r--r--eval/src/vespa/eval/instruction/generic_peek.h29
-rw-r--r--eval/src/vespa/eval/tensor/default_tensor_engine.cpp2
-rw-r--r--eval/src/vespa/eval/tensor/mixed/packed_mixed_tensor_builder.cpp6
-rw-r--r--flags/src/main/java/com/yahoo/vespa/flags/Flags.java35
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java3
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesResponse.java3
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java140
-rw-r--r--searchcore/src/apps/vespa-feed-bm/vespa_feed_bm.cpp13
-rw-r--r--searchcore/src/tests/proton/matching/docid_range_scheduler/docid_range_scheduler_test.cpp17
-rw-r--r--searchcore/src/tests/proton/matching/match_loop_communicator/match_loop_communicator_test.cpp196
-rw-r--r--searchcore/src/vespa/searchcore/proton/matching/docid_range_scheduler.h22
-rw-r--r--searchcore/src/vespa/searchcore/proton/matching/document_scorer.cpp11
-rw-r--r--searchcore/src/vespa/searchcore/proton/matching/document_scorer.h15
-rw-r--r--searchcore/src/vespa/searchcore/proton/matching/i_match_loop_communicator.h6
-rw-r--r--searchcore/src/vespa/searchcore/proton/matching/match_loop_communicator.cpp71
-rw-r--r--searchcore/src/vespa/searchcore/proton/matching/match_loop_communicator.h41
-rw-r--r--searchcore/src/vespa/searchcore/proton/matching/match_master.cpp8
-rw-r--r--searchcore/src/vespa/searchcore/proton/matching/match_thread.cpp56
-rw-r--r--searchlib/src/tests/hitcollector/hitcollector_test.cpp40
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/hitcollector.cpp43
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/hitcollector.h21
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/scores.h11
-rw-r--r--serviceview/pom.xml5
-rw-r--r--storage/src/tests/common/teststorageapp.cpp2
-rw-r--r--storage/src/tests/distributor/bucketdbupdatertest.cpp47
-rw-r--r--storage/src/tests/distributor/statecheckerstest.cpp74
-rw-r--r--storage/src/tests/persistence/CMakeLists.txt1
-rw-r--r--storage/src/tests/persistence/apply_bucket_diff_entry_result_test.cpp70
-rw-r--r--storage/src/tests/persistence/common/filestortestfixture.h2
-rw-r--r--storage/src/tests/persistence/mergehandlertest.cpp2
-rw-r--r--storage/src/vespa/storage/bucketdb/btree_lockable_map.hpp2
-rw-r--r--storage/src/vespa/storage/persistence/CMakeLists.txt2
-rw-r--r--storage/src/vespa/storage/persistence/apply_bucket_diff_entry_complete.cpp34
-rw-r--r--storage/src/vespa/storage/persistence/apply_bucket_diff_entry_complete.h28
-rw-r--r--storage/src/vespa/storage/persistence/apply_bucket_diff_entry_result.cpp46
-rw-r--r--storage/src/vespa/storage/persistence/apply_bucket_diff_entry_result.h33
-rw-r--r--storage/src/vespa/storage/persistence/mergehandler.cpp48
-rw-r--r--storage/src/vespa/storage/persistence/mergehandler.h9
-rw-r--r--storage/src/vespa/storage/storageserver/communicationmanager.cpp14
-rw-r--r--storage/src/vespa/storage/storageserver/communicationmanager.h3
-rw-r--r--storage/src/vespa/storage/storageserver/servicelayernode.cpp25
-rw-r--r--storage/src/vespa/storage/storageserver/statemanager.cpp9
-rw-r--r--storage/src/vespa/storage/storageserver/storagenode.cpp34
-rw-r--r--storage/src/vespa/storage/storageserver/storagenode.h1
-rw-r--r--vdslib/src/tests/distribution/distributiontest.cpp173
-rw-r--r--vdslib/src/tests/state/clusterstatetest.cpp82
-rw-r--r--vdslib/src/vespa/vdslib/distribution/distribution.cpp99
-rw-r--r--vdslib/src/vespa/vdslib/distribution/distribution.h20
-rw-r--r--vdslib/src/vespa/vdslib/state/CMakeLists.txt1
-rw-r--r--vdslib/src/vespa/vdslib/state/clusterstate.cpp6
-rw-r--r--vdslib/src/vespa/vdslib/state/diskstate.cpp163
-rw-r--r--vdslib/src/vespa/vdslib/state/diskstate.h45
-rw-r--r--vdslib/src/vespa/vdslib/state/nodestate.cpp197
-rw-r--r--vdslib/src/vespa/vdslib/state/nodestate.h18
-rw-r--r--vdslib/src/vespa/vdslib/state/state.cpp17
-rw-r--r--vdslib/src/vespa/vdslib/state/state.h6
-rw-r--r--vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/LocalDataVisitorHandler.java70
-rw-r--r--vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/OperationHandler.java112
-rw-r--r--vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/OperationHandlerImpl.java460
-rw-r--r--vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/Response.java50
-rw-r--r--vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/RestApiException.java21
-rw-r--r--vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/RestUri.java177
-rw-r--r--vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/DocumentV1ApiHandler.java6
-rw-r--r--vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/RestApi.java427
-rw-r--r--vespaclient-container-plugin/src/main/java/com/yahoo/documentapi/metrics/DocumentOperationStatus.java7
-rw-r--r--vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/DocumentApiApplicationTest.java25
-rw-r--r--vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/OperationHandlerImplTest.java445
-rw-r--r--vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/RestUriTest.java128
-rw-r--r--vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/feed-document1.json0
-rw-r--r--vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/DocumentV1ApiTest.java4
-rw-r--r--vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/MockedOperationHandler.java82
-rw-r--r--vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/RestApiMaxThreadTest.java54
-rw-r--r--vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/RestApiTest.java537
-rw-r--r--vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/RestApiWithTestDocumentHandler.java36
-rw-r--r--vespaclient-container-plugin/src/test/rest-api-application/services.xml19
168 files changed, 3216 insertions, 4619 deletions
diff --git a/config-model-api/abi-spec.json b/config-model-api/abi-spec.json
index c67e86de38b..9df68821454 100644
--- a/config-model-api/abi-spec.json
+++ b/config-model-api/abi-spec.json
@@ -592,7 +592,7 @@
"methods": [
"public void <init>(java.util.List)",
"public void invalid(com.yahoo.config.application.api.ValidationId, java.lang.String, java.time.Instant)",
- "public final boolean allows(java.lang.String, java.time.Instant)",
+ "public boolean allows(java.lang.String, java.time.Instant)",
"public boolean allows(com.yahoo.config.application.api.ValidationId, java.time.Instant)",
"public java.lang.String xmlForm()",
"public static java.lang.String toAllowMessage(com.yahoo.config.application.api.ValidationId)",
diff --git a/config-model-api/src/main/java/com/yahoo/config/application/api/ValidationOverrides.java b/config-model-api/src/main/java/com/yahoo/config/application/api/ValidationOverrides.java
index bcf2ce3999a..a85e109f731 100644
--- a/config-model-api/src/main/java/com/yahoo/config/application/api/ValidationOverrides.java
+++ b/config-model-api/src/main/java/com/yahoo/config/application/api/ValidationOverrides.java
@@ -17,7 +17,6 @@ import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import java.util.logging.Level;
-import java.util.logging.Logger;
/**
* A set of allows which suppresses specific validations in limited time periods.
@@ -54,7 +53,7 @@ public class ValidationOverrides {
throw new ValidationException(validationId, message);
}
- public final boolean allows(String validationIdString, Instant now) {
+ public boolean allows(String validationIdString, Instant now) {
Optional<ValidationId> validationId = ValidationId.from(validationIdString);
if ( ! validationId.isPresent()) return false; // unknown id -> not allowed
return allows(validationId.get(), now);
diff --git a/config-model-api/src/main/java/com/yahoo/config/model/api/ConfigChangeAction.java b/config-model-api/src/main/java/com/yahoo/config/model/api/ConfigChangeAction.java
index ffe011af1e8..1248560c931 100644
--- a/config-model-api/src/main/java/com/yahoo/config/model/api/ConfigChangeAction.java
+++ b/config-model-api/src/main/java/com/yahoo/config/model/api/ConfigChangeAction.java
@@ -4,7 +4,6 @@ package com.yahoo.config.model.api;
import com.yahoo.config.provision.ClusterSpec;
import java.util.List;
-import java.util.Optional;
/**
* Contains the action to be performed on the given services to handle a config change
@@ -15,7 +14,7 @@ import java.util.Optional;
public interface ConfigChangeAction {
enum Type {
- RESTART("restart"), REFEED("refeed");
+ RESTART("restart"), REFEED("refeed"), REINDEX("reindex");
private final String type;
diff --git a/config-model-api/src/main/java/com/yahoo/config/model/api/ConfigChangeReindexAction.java b/config-model-api/src/main/java/com/yahoo/config/model/api/ConfigChangeReindexAction.java
new file mode 100644
index 00000000000..085638e31ff
--- /dev/null
+++ b/config-model-api/src/main/java/com/yahoo/config/model/api/ConfigChangeReindexAction.java
@@ -0,0 +1,18 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.config.model.api;
+
+/**
+ * Represents an action to re-index a document type in order to handle a config change.
+ *
+ * @author bjorncs
+ */
+public interface ConfigChangeReindexAction extends ConfigChangeAction {
+
+ @Override default Type getType() { return Type.REINDEX; }
+
+ /** @return name identifying this kind of change, used to identify names which should be allowed */
+ String name();
+
+ /** @return name of the document type that must be re-indexed */
+ String getDocumentType();
+}
diff --git a/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java b/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java
index 7ab1a6ab0cb..44f1e92bb79 100644
--- a/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java
+++ b/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java
@@ -12,6 +12,10 @@ import com.yahoo.config.provision.HostName;
import com.yahoo.config.provision.Zone;
import java.io.File;
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
import java.net.URI;
import java.time.Duration;
import java.util.List;
@@ -45,8 +49,13 @@ public interface ModelContext {
/** The Vespa version we want nodes to become */
Version wantedNodeVespaVersion();
+ interface FeatureFlags {
+ @ModelFeatureFlag(owner = "bjorncs") default boolean enableAutomaticReindexing() { return false; }
+ }
+
/** Warning: As elsewhere in this package, do not make backwards incompatible changes that will break old config models! */
interface Properties {
+ FeatureFlags featureFlags();
boolean multitenant();
ApplicationId applicationId();
List<ConfigServerSpec> configServerSpecs();
@@ -89,6 +98,10 @@ public interface ModelContext {
default String tlsCompressionType() { return "ZSTD"; }
default double visibilityDelay() { return 0.0; }
+ boolean useAsyncMessageHandlingOnSchedule();
+ int contentNodeBucketDBStripeBits();
+ int mergeChunkSize();
+
// TODO(balder) Last used on 7.306
default boolean useContentNodeBtreeDb() { return true; }
@@ -121,14 +134,21 @@ public interface ModelContext {
}
// TODO(bjorncs): Temporary feature flag
- default boolean useNewRestapiHandler() { return false; }
+ default boolean useNewRestapiHandler() { return true; }
// TODO(mortent): Temporary feature flag
default boolean useAccessControlTlsHandshakeClientAuth() { return false; }
// TODO(bjorncs): Temporary feature flag
- default double jettyThreadpoolSizeFactor() { return 0.0; }
+ default double jettyThreadpoolSizeFactor() { return 1.0; }
+
+ }
+ @Retention(RetentionPolicy.RUNTIME)
+ @Target(ElementType.METHOD)
+ @interface ModelFeatureFlag {
+ String owner();
+ String comment() default "";
}
}
diff --git a/config-model-api/src/main/java/com/yahoo/config/model/api/Reindexing.java b/config-model-api/src/main/java/com/yahoo/config/model/api/Reindexing.java
index 0d37f6810a7..4dc06eae841 100644
--- a/config-model-api/src/main/java/com/yahoo/config/model/api/Reindexing.java
+++ b/config-model-api/src/main/java/com/yahoo/config/model/api/Reindexing.java
@@ -5,21 +5,20 @@ import java.time.Instant;
import java.util.Map;
/**
- * Instants after which reindexing should be triggered, for select document types.
+ * Status of reindexing for the documents of an application.
*
* @author jonmv
*/
public interface Reindexing {
- /** The reindexing status for each document type for which this is known. */
- default Map<String, ? extends Status> status() { return Map.of(); }
+ /** Reindexing status for a given application, cluster and document type. */
+ default Status status(String cluster, String documentType) { return () -> Instant.MAX; }
-
- /** Reindexing status of a given document type. */
+ /** Reindexing status of a given document type in a given cluster in a given application. */
interface Status {
- /** The instant at which reindexing of this document type may begin. */
- default Instant ready() { return Instant.MAX; };
+ /** The instant at which reindexing may begin. */
+ Instant ready();
}
diff --git a/config-model-api/src/test/java/com/yahoo/config/model/api/ModelContextTest.java b/config-model-api/src/test/java/com/yahoo/config/model/api/ModelContextTest.java
new file mode 100644
index 00000000000..68b6b9f814f
--- /dev/null
+++ b/config-model-api/src/test/java/com/yahoo/config/model/api/ModelContextTest.java
@@ -0,0 +1,36 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.config.model.api;
+
+import org.junit.Test;
+
+import java.lang.reflect.Method;
+
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * @author bjorncs
+ */
+public class ModelContextTest {
+
+ @Test
+ public void verify_all_feature_flag_methods_have_annotation() {
+ for (Method method : ModelContext.FeatureFlags.class.getDeclaredMethods()) {
+ assertNotNull(
+ String.format(
+ "Method '%s' is not annotated with '%s'",
+ method.getName(), ModelContext.ModelFeatureFlag.class.getSimpleName()),
+ method.getDeclaredAnnotation(ModelContext.ModelFeatureFlag.class));
+ }
+ }
+
+ @Test
+ public void verify_all_feature_flag_methods_have_default_implementation() {
+ for (Method method : ModelContext.FeatureFlags.class.getDeclaredMethods()) {
+ assertTrue(
+ String.format("Method '%s' has no default implementation", method.getName()),
+ method.isDefault());
+ }
+ }
+
+} \ No newline at end of file
diff --git a/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java b/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java
index 9008aecbfde..664ed896ad6 100644
--- a/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java
+++ b/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java
@@ -26,7 +26,7 @@ import java.util.Set;
*
* @author hakonhall
*/
-public class TestProperties implements ModelContext.Properties {
+public class TestProperties implements ModelContext.Properties, ModelContext.FeatureFlags {
private boolean multitenant = false;
private ApplicationId applicationId = ApplicationId.defaultId();
@@ -48,8 +48,11 @@ public class TestProperties implements ModelContext.Properties {
private ApplicationRoles applicationRoles;
private Quota quota = Quota.unlimited();
private boolean useAccessControlTlsHandshakeClientAuth;
- private double jettyThreadpoolSizeFactor = 0.0;
+ private boolean useAsyncMessageHandlingOnSchedule = false;
+ private int contentNodeBucketDBStripeBits = 0;
+ private int mergeChunkSize = 0x400000 - 0x1000; // 4M -4k
+ @Override public ModelContext.FeatureFlags featureFlags() { return this; }
@Override public boolean multitenant() { return multitenant; }
@Override public ApplicationId applicationId() { return applicationId; }
@Override public List<ConfigServerSpec> configServerSpecs() { return configServerSpecs; }
@@ -78,7 +81,23 @@ public class TestProperties implements ModelContext.Properties {
@Override public boolean skipMbusReplyThread() { return false; }
@Override public Quota quota() { return quota; }
@Override public boolean useAccessControlTlsHandshakeClientAuth() { return useAccessControlTlsHandshakeClientAuth; }
- @Override public double jettyThreadpoolSizeFactor() { return jettyThreadpoolSizeFactor; }
+ @Override public boolean useAsyncMessageHandlingOnSchedule() { return useAsyncMessageHandlingOnSchedule; }
+ @Override public int contentNodeBucketDBStripeBits() { return contentNodeBucketDBStripeBits; }
+ @Override public int mergeChunkSize() { return mergeChunkSize; }
+
+ public TestProperties setMergeChunkSize(int size) {
+ mergeChunkSize = size;
+ return this;
+ }
+ public TestProperties setContentNodeBucketDBStripeBits(int bits) {
+ contentNodeBucketDBStripeBits = bits;
+ return this;
+ }
+
+ public TestProperties setAsyncMessageHandlingOnSchedule(boolean value) {
+ useAsyncMessageHandlingOnSchedule = value;
+ return this;
+ }
public TestProperties setJvmGCOptions(String gcOptions) {
jvmGCOptions = gcOptions;
@@ -116,8 +135,6 @@ public class TestProperties implements ModelContext.Properties {
return this;
}
- public TestProperties setJettyThreadpoolSizeFactor(double factor) { this.jettyThreadpoolSizeFactor = factor; return this; }
-
public TestProperties setApplicationId(ApplicationId applicationId) {
this.applicationId = applicationId;
return this;
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/IndexingModeChangeValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/IndexingModeChangeValidator.java
index 08b4b37968c..0b907b43203 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/IndexingModeChangeValidator.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/IndexingModeChangeValidator.java
@@ -1,22 +1,30 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.model.application.validation.change;
-import com.yahoo.config.model.api.ConfigChangeAction;
-import com.yahoo.vespa.model.VespaModel;
import com.yahoo.config.application.api.ValidationId;
import com.yahoo.config.application.api.ValidationOverrides;
+import com.yahoo.config.model.api.ConfigChangeAction;
+import com.yahoo.config.model.api.ServiceInfo;
+import com.yahoo.documentmodel.NewDocumentType;
+import com.yahoo.vespa.model.VespaModel;
+import com.yahoo.vespa.model.content.ContentSearchCluster;
import com.yahoo.vespa.model.content.cluster.ContentCluster;
+import com.yahoo.vespa.model.search.SearchNode;
import java.time.Instant;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
-import java.util.Optional;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+import static java.util.stream.Collectors.toSet;
/**
* Returns any change to the indexing mode of a cluster.
*
* @author hmusum
+ * @author bjorncs
*/
public class IndexingModeChangeValidator implements ChangeValidator {
@@ -27,31 +35,56 @@ public class IndexingModeChangeValidator implements ChangeValidator {
for (Map.Entry<String, ContentCluster> currentEntry : currentModel.getContentClusters().entrySet()) {
ContentCluster nextCluster = nextModel.getContentClusters().get(currentEntry.getKey());
if (nextCluster == null) continue;
-
- Optional<ConfigChangeAction> change = validateContentCluster(currentEntry.getValue(), nextCluster, overrides, now);
- if (change.isPresent())
- actions.add(change.get());
+ actions.addAll(validateContentCluster(currentEntry.getValue(), nextCluster, overrides, now));
}
return actions;
}
- private Optional<ConfigChangeAction> validateContentCluster(ContentCluster currentCluster, ContentCluster nextCluster,
- ValidationOverrides overrides, Instant now) {
- boolean currentClusterIsIndexed = currentCluster.getSearch().hasIndexedCluster();
- boolean nextClusterIsIndexed = nextCluster.getSearch().hasIndexedCluster();
-
- if (currentClusterIsIndexed == nextClusterIsIndexed) return Optional.empty();
+ private List<ConfigChangeAction> validateContentCluster(
+ ContentCluster currentCluster, ContentCluster nextCluster, ValidationOverrides overrides, Instant now) {
+ List<ConfigChangeAction> changes = new ArrayList<>();
+ ContentSearchCluster currentSearchCluster = currentCluster.getSearch();
+ ContentSearchCluster nextSearchCluster = nextCluster.getSearch();
+ {
+ Set<String> currentStreamingTypes = toDocumentTypeNames(currentSearchCluster.getDocumentTypesWithStreamingCluster());
+ Set<String> nextIndexedTypes = toDocumentTypeNames(nextSearchCluster.getDocumentTypesWithIndexedCluster());
+ for (String type : nextIndexedTypes) {
+ if (currentStreamingTypes.contains(type)) {
+ changes.add(createReindexAction(overrides, now, nextCluster, type, "streaming", "indexed"));
+ }
+ }
+ }
+ {
+ Set<String> currentIndexedTypes = toDocumentTypeNames(currentSearchCluster.getDocumentTypesWithIndexedCluster());
+ Set<String> nextStreamingTypes = toDocumentTypeNames(nextSearchCluster.getDocumentTypesWithStreamingCluster());
+ for (String type : nextStreamingTypes) {
+ if (currentIndexedTypes.contains(type)) {
+ changes.add(createReindexAction(overrides, now, nextCluster, type, "indexed", "streaming"));
+ }
+ }
+ }
+ return changes;
+ }
- return Optional.of(VespaRefeedAction.of(currentCluster.id(),
- ValidationId.indexModeChange.value(),
- overrides,
- "Cluster '" + currentCluster.getName() + "' changed indexing mode from '" +
- indexingMode(currentClusterIsIndexed) + "' to '" + indexingMode(nextClusterIsIndexed) + "'",
- now));
+ private static VespaReindexAction createReindexAction(
+ ValidationOverrides overrides, Instant now, ContentCluster nextCluster, String documentType, String indexModeFrom, String indexModeTo) {
+ List<ServiceInfo> services = nextCluster.getSearch().getSearchNodes().stream()
+ .map(SearchNode::getServiceInfo)
+ .collect(Collectors.toList());
+ return VespaReindexAction.of(
+ nextCluster.id(),
+ ValidationId.indexModeChange.value(),
+ overrides,
+ String.format("Document type '%s' in cluster '%s' changed indexing mode from '%s' to '%s'", documentType, nextCluster.getName(), indexModeFrom, indexModeTo),
+ services,
+ documentType,
+ now);
}
- private String indexingMode(boolean isIndexed) {
- return isIndexed ? "indexed" : "streaming";
+ private static Set<String> toDocumentTypeNames(List<NewDocumentType> types) {
+ return types.stream()
+ .map(type -> type.getFullName().getName())
+ .collect(toSet());
}
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/VespaReindexAction.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/VespaReindexAction.java
new file mode 100644
index 00000000000..f10802afc31
--- /dev/null
+++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/VespaReindexAction.java
@@ -0,0 +1,70 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.model.application.validation.change;
+
+import com.yahoo.config.application.api.ValidationOverrides;
+import com.yahoo.config.model.api.ConfigChangeReindexAction;
+import com.yahoo.config.model.api.ServiceInfo;
+import com.yahoo.config.provision.ClusterSpec;
+
+import java.time.Instant;
+import java.util.List;
+import java.util.Objects;
+
+/**
+ * Represents an action to re-index a document type in order to handle a config change.
+ *
+ * @author bjorncs
+ */
+public class VespaReindexAction extends VespaConfigChangeAction implements ConfigChangeReindexAction {
+
+ /**
+ * The name of this action, which must be a valid ValidationId. This is a string here because
+ * the validation ids belong to the Vespa model while these names are exposed to the config server,
+ * which is model version independent.
+ */
+ private final String name;
+ private final String documentType;
+ private final boolean allowed;
+
+ private VespaReindexAction(ClusterSpec.Id id, String name, String message, List<ServiceInfo> services, String documentType, boolean allowed) {
+ super(id, message, services);
+ this.name = name;
+ this.documentType = documentType;
+ this.allowed = allowed;
+ }
+
+ public static VespaReindexAction of(
+ ClusterSpec.Id id, String name, ValidationOverrides overrides, String message, Instant now) {
+ return new VespaReindexAction(id, name, message, List.of(), /*documentType*/null, overrides.allows(name, now));
+ }
+
+ public static VespaReindexAction of(
+ ClusterSpec.Id id, String name, ValidationOverrides overrides, String message,
+ List<ServiceInfo> services, String documentType, Instant now) {
+ return new VespaReindexAction(id, name, message, services, documentType, overrides.allows(name, now));
+ }
+
+ @Override
+ public VespaConfigChangeAction modifyAction(String newMessage, List<ServiceInfo> newServices, String documentType) {
+ return new VespaReindexAction(clusterId(), name, newMessage, newServices, documentType, allowed);
+ }
+
+ @Override public String name() { return name; }
+ @Override public String getDocumentType() { return documentType; }
+ @Override public boolean allowed() { return allowed; }
+ @Override public boolean ignoreForInternalRedeploy() { return false; }
+ @Override public String toString() { return super.toString() + ", documentType='" + documentType + "'"; }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ if (!super.equals(o)) return false;
+ VespaReindexAction that = (VespaReindexAction) o;
+ return allowed == that.allowed &&
+ Objects.equals(name, that.name) &&
+ Objects.equals(documentType, that.documentType);
+ }
+
+ @Override public int hashCode() { return Objects.hash(super.hashCode(), name, documentType, allowed); }
+}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/IndexingScriptChangeValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/IndexingScriptChangeValidator.java
index 4761d1613b8..8f9b1a3ed77 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/IndexingScriptChangeValidator.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/IndexingScriptChangeValidator.java
@@ -1,6 +1,8 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.model.application.validation.change.search;
+import com.yahoo.config.application.api.ValidationId;
+import com.yahoo.config.application.api.ValidationOverrides;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.searchdefinition.Search;
import com.yahoo.searchdefinition.document.ImmutableSDField;
@@ -8,10 +10,8 @@ import com.yahoo.vespa.indexinglanguage.ExpressionConverter;
import com.yahoo.vespa.indexinglanguage.expressions.Expression;
import com.yahoo.vespa.indexinglanguage.expressions.OutputExpression;
import com.yahoo.vespa.indexinglanguage.expressions.ScriptExpression;
-import com.yahoo.config.application.api.ValidationId;
-import com.yahoo.config.application.api.ValidationOverrides;
import com.yahoo.vespa.model.application.validation.change.VespaConfigChangeAction;
-import com.yahoo.vespa.model.application.validation.change.VespaRefeedAction;
+import com.yahoo.vespa.model.application.validation.change.VespaReindexAction;
import java.time.Instant;
import java.util.ArrayList;
@@ -55,7 +55,7 @@ public class IndexingScriptChangeValidator {
ChangeMessageBuilder messageBuilder = new ChangeMessageBuilder(nextField.getName());
new IndexingScriptChangeMessageBuilder(currentSearch, currentField, nextSearch, nextField).populate(messageBuilder);
messageBuilder.addChange("indexing script", currentScript.toString(), nextScript.toString());
- return Optional.of(VespaRefeedAction.of(id, ValidationId.indexingChange.value(), overrides, messageBuilder.build(), now));
+ return Optional.of(VespaReindexAction.of(id, ValidationId.indexingChange.value(), overrides, messageBuilder.build(), now));
}
return Optional.empty();
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/clients/ContainerDocumentApi.java b/config-model/src/main/java/com/yahoo/vespa/model/clients/ContainerDocumentApi.java
index c6f035b2885..48bf51714ee 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/clients/ContainerDocumentApi.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/clients/ContainerDocumentApi.java
@@ -30,42 +30,29 @@ public class ContainerDocumentApi {
private static void addFeedHandler(ContainerCluster<?> cluster, Options options) {
String bindingSuffix = ContainerCluster.RESERVED_URI_PREFIX + "/feedapi";
- var handler = newVespaClientHandler(
- "com.yahoo.vespa.http.server.FeedHandler", bindingSuffix, options);
+ var handler = newVespaClientHandler("com.yahoo.vespa.http.server.FeedHandler", bindingSuffix, options);
cluster.addComponent(handler);
- var executor = new Threadpool(
- "feedapi-handler", cluster, options.feedApiThreadpoolOptions);
+ var executor = new Threadpool("feedapi-handler", cluster, options.feedApiThreadpoolOptions);
handler.inject(executor);
handler.addComponent(executor);
}
private static void addRestApiHandler(ContainerCluster<?> cluster, Options options) {
- // TODO(bjorncs,jonmv) Cleanup once old restapi handler is gone
- // We need to include the old handler implementation even when the new handler is enabled
- // The internal legacy test framework requires that the name of the old handler is listed in /ApplicationStatus
- String oldHandlerName = "com.yahoo.document.restapi.resource.RestApi";
- String bindingSuffix = "/document/v1/*";
- var oldHandler = newVespaClientHandler(oldHandlerName, options.useNewRestapiHandler ? null : bindingSuffix, options);
- cluster.addComponent(oldHandler);
- var executor = new Threadpool("restapi-handler", cluster, options.restApiThreadpoolOptions);
- oldHandler.inject(executor);
- oldHandler.addComponent(executor);
-
- if (options.useNewRestapiHandler) {
- String newHandlerName = "com.yahoo.document.restapi.resource.DocumentV1ApiHandler";
- var newHandler = newVespaClientHandler(newHandlerName, bindingSuffix, options);
- cluster.addComponent(newHandler);
- }
+ var handler = newVespaClientHandler("com.yahoo.document.restapi.resource.DocumentV1ApiHandler", "/document/v1/*", options);
+ cluster.addComponent(handler);
+
+ // We need to include a dummy implementation of the previous restapi handler (using the same class name).
+ // The internal legacy test framework requires that the name of the old handler is listed in /ApplicationStatus.
+ var oldHandlerDummy = handlerComponentSpecification("com.yahoo.document.restapi.resource.RestApi");
+ cluster.addComponent(oldHandlerDummy);
}
private static Handler<AbstractConfigProducer<?>> newVespaClientHandler(
String componentId,
String bindingSuffix,
Options options) {
- Handler<AbstractConfigProducer<?>> handler = new Handler<>(new ComponentModel(
- BundleInstantiationSpecification.getFromStrings(componentId, null, "vespaclient-container-plugin"), ""));
- if (bindingSuffix == null) return handler; // TODO(bjorncs,jonmv) Cleanup once old restapi handler is gone
+ Handler<AbstractConfigProducer<?>> handler = handlerComponentSpecification(componentId);
if (options.bindings.isEmpty()) {
handler.addServerBindings(
SystemBindingPattern.fromHttpPath(bindingSuffix),
@@ -81,20 +68,18 @@ public class ContainerDocumentApi {
return handler;
}
+ private static Handler<AbstractConfigProducer<?>> handlerComponentSpecification(String className) {
+ return new Handler<>(new ComponentModel(
+ BundleInstantiationSpecification.getFromStrings(className, null, "vespaclient-container-plugin"), ""));
+ }
+
public static final class Options {
private final Collection<String> bindings;
- private final ContainerThreadpool.UserOptions restApiThreadpoolOptions;
private final ContainerThreadpool.UserOptions feedApiThreadpoolOptions;
- private final boolean useNewRestapiHandler;
- public Options(Collection<String> bindings,
- ContainerThreadpool.UserOptions restApiThreadpoolOptions,
- ContainerThreadpool.UserOptions feedApiThreadpoolOptions,
- boolean useNewRestapiHandler) {
+ public Options(Collection<String> bindings, ContainerThreadpool.UserOptions feedApiThreadpoolOptions) {
this.bindings = Collections.unmodifiableCollection(bindings);
- this.restApiThreadpoolOptions = restApiThreadpoolOptions;
this.feedApiThreadpoolOptions = feedApiThreadpoolOptions;
- this.useNewRestapiHandler = useNewRestapiHandler;
}
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/ContainerCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/container/ContainerCluster.java
index 57ebc215dfa..4888add3253 100755
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/ContainerCluster.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/ContainerCluster.java
@@ -161,14 +161,11 @@ public abstract class ContainerCluster<CONTAINER extends Container>
private boolean deferChangesUntilRestart = false;
- private double jettyThreadpoolSizeFactor;
-
public ContainerCluster(AbstractConfigProducer<?> parent, String configSubId, String clusterId, DeployState deployState) {
super(parent, configSubId);
this.name = clusterId;
this.isHostedVespa = stateIsHosted(deployState);
this.zone = (deployState != null) ? deployState.zone() : Zone.defaultZone();
- this.jettyThreadpoolSizeFactor = deployState.getProperties().jettyThreadpoolSizeFactor();
componentGroup = new ComponentGroup<>(this, "component");
@@ -642,7 +639,4 @@ public abstract class ContainerCluster<CONTAINER extends Container>
.max(); // Use highest vcpu as scale factor
}
- public OptionalDouble jettyThreadpoolSizeFactor() {
- return jettyThreadpoolSizeFactor > 0 ? OptionalDouble.of(jettyThreadpoolSizeFactor) : OptionalDouble.empty();
- }
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/http/JettyHttpServer.java b/config-model/src/main/java/com/yahoo/vespa/model/container/http/JettyHttpServer.java
index 1b0592a4855..5b8df11e6d1 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/http/JettyHttpServer.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/http/JettyHttpServer.java
@@ -78,10 +78,8 @@ public class JettyHttpServer extends SimpleComponent implements ServerConfig.Pro
private void configureJettyThreadpool(ServerConfig.Builder builder) {
if (cluster == null) return;
double vcpu = cluster.vcpu().orElse(0);
- double scaleFactor = cluster.jettyThreadpoolSizeFactor().orElse(0);
- if (vcpu > 0 && scaleFactor > 0) {
- int minThreads = 16;
- int threads = minThreads + (int) Math.ceil(vcpu * scaleFactor);
+ if (vcpu > 0) {
+ int threads = 16 + (int) Math.ceil(vcpu);
builder.maxWorkerThreads(threads).minWorkerThreads(threads);
}
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java
index 37dd97a49b1..d76d177fd21 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java
@@ -180,7 +180,7 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
addProcessing(deployState, spec, cluster);
addSearch(deployState, spec, cluster);
addDocproc(deployState, spec, cluster);
- addDocumentApi(deployState, spec, cluster); // NOTE: Must be done after addSearch
+ addDocumentApi(spec, cluster); // NOTE: Must be done after addSearch
cluster.addDefaultHandlersExceptStatus();
addStatusHandlers(cluster, context.getDeployState().isHosted());
@@ -401,8 +401,8 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
cluster.addServlet(new ServletBuilder().build(deployState, cluster, servletElem));
}
- private void addDocumentApi(DeployState deployState, Element spec, ApplicationContainerCluster cluster) {
- ContainerDocumentApi containerDocumentApi = buildDocumentApi(deployState, cluster, spec);
+ private void addDocumentApi(Element spec, ApplicationContainerCluster cluster) {
+ ContainerDocumentApi containerDocumentApi = buildDocumentApi(cluster, spec);
if (containerDocumentApi == null) return;
cluster.setDocumentApi(containerDocumentApi);
@@ -829,11 +829,11 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
return result;
}
- private ContainerDocumentApi buildDocumentApi(DeployState deployState, ApplicationContainerCluster cluster, Element spec) {
+ private ContainerDocumentApi buildDocumentApi(ApplicationContainerCluster cluster, Element spec) {
Element documentApiElement = XML.getChild(spec, "document-api");
if (documentApiElement == null) return null;
- ContainerDocumentApi.Options documentApiOptions = DocumentApiOptionsBuilder.build(deployState, documentApiElement);
+ ContainerDocumentApi.Options documentApiOptions = DocumentApiOptionsBuilder.build(documentApiElement);
return new ContainerDocumentApi(cluster, documentApiOptions);
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/xml/DocumentApiOptionsBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/container/xml/DocumentApiOptionsBuilder.java
index 2a64a194d37..b991608eb48 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/xml/DocumentApiOptionsBuilder.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/xml/DocumentApiOptionsBuilder.java
@@ -1,7 +1,6 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.model.container.xml;
-import com.yahoo.config.model.deploy.DeployState;
import com.yahoo.text.XML;
import com.yahoo.vespa.model.clients.ContainerDocumentApi;
import com.yahoo.vespa.model.container.ContainerThreadpool;
@@ -20,12 +19,8 @@ public class DocumentApiOptionsBuilder {
private static final Logger log = Logger.getLogger(DocumentApiOptionsBuilder.class.getName());
- public static ContainerDocumentApi.Options build(DeployState deployState, Element spec) {
- return new ContainerDocumentApi.Options(
- getBindings(spec),
- threadpoolOptions(spec, "rest-api"),
- threadpoolOptions(spec, "http-client-api"),
- deployState.getProperties().useNewRestapiHandler());
+ public static ContainerDocumentApi.Options build(Element spec) {
+ return new ContainerDocumentApi.Options(getBindings(spec), threadpoolOptions(spec, "http-client-api"));
}
private static ContainerThreadpool.UserOptions threadpoolOptions(Element spec, String elementName) {
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java
index 7c057264204..61852ae6bdd 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java
@@ -335,6 +335,28 @@ public class ContentSearchCluster extends AbstractConfigProducer implements Prot
.collect(Collectors.toList());
}
+ public List<NewDocumentType> getDocumentTypesWithStreamingCluster() {
+ List<NewDocumentType> streamingDocTypes = new ArrayList<>();
+ for (NewDocumentType type : documentDefinitions.values()) {
+ if (findStreamingCluster(type.getFullName().getName()).isPresent()) {
+ streamingDocTypes.add(type);
+ }
+ }
+ return streamingDocTypes;
+ }
+
+ public List<NewDocumentType> getDocumentTypesWithIndexedCluster() {
+ List<NewDocumentType> indexedDocTypes = new ArrayList<>();
+ for (NewDocumentType type : documentDefinitions.values()) {
+ if (findStreamingCluster(type.getFullName().getName()).isEmpty()
+ && hasIndexedCluster()
+ && getIndexed().hasDocumentDB(type.getFullName().getName())) {
+ indexedDocTypes.add(type);
+ }
+ }
+ return indexedDocTypes;
+ }
+
@Override
public void getConfig(ProtonConfig.Builder builder) {
builder.feeding.concurrency(0.50); // As if specified 1.0 in services.xml
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/storagecluster/FileStorProducer.java b/config-model/src/main/java/com/yahoo/vespa/model/content/storagecluster/FileStorProducer.java
index 2e1a6662488..fd4de13be39 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/storagecluster/FileStorProducer.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/storagecluster/FileStorProducer.java
@@ -3,7 +3,6 @@ package com.yahoo.vespa.model.content.storagecluster;
import com.yahoo.config.model.api.ModelContext;
import com.yahoo.vespa.config.content.StorFilestorConfig;
-import com.yahoo.vespa.config.search.core.ProtonConfig;
import com.yahoo.vespa.model.builder.xml.dom.ModelElement;
import com.yahoo.vespa.model.content.cluster.ContentCluster;
@@ -47,6 +46,8 @@ public class FileStorProducer implements StorFilestorConfig.Producer {
private final ContentCluster cluster;
private final int reponseNumThreads;
private final StorFilestorConfig.Response_sequencer_type.Enum responseSequencerType;
+ private final boolean useAsyncMessageHandlingOnSchedule;
+ private final int mergeChunkSize;
private static StorFilestorConfig.Response_sequencer_type.Enum convertResponseSequencerType(String sequencerType) {
try {
@@ -55,11 +56,17 @@ public class FileStorProducer implements StorFilestorConfig.Producer {
return StorFilestorConfig.Response_sequencer_type.Enum.ADAPTIVE;
}
}
+ private static int alignUp2MiB(int value) {
+ final int twoMB = 0x200000;
+ return ((value + twoMB - 1)/twoMB) * twoMB;
+ }
public FileStorProducer(ModelContext.Properties properties, ContentCluster parent, Integer numThreads) {
this.numThreads = numThreads;
this.cluster = parent;
this.reponseNumThreads = properties.defaultNumResponseThreads();
this.responseSequencerType = convertResponseSequencerType(properties.responseSequencerType());
+ useAsyncMessageHandlingOnSchedule = properties.useAsyncMessageHandlingOnSchedule();
+ mergeChunkSize = alignUp2MiB(properties.mergeChunkSize()); // Align up to default huge page size.
}
@Override
@@ -70,6 +77,8 @@ public class FileStorProducer implements StorFilestorConfig.Producer {
builder.enable_multibit_split_optimalization(cluster.getPersistence().enableMultiLevelSplitting());
builder.num_response_threads(reponseNumThreads);
builder.response_sequencer_type(responseSequencerType);
+ builder.use_async_message_handling_on_schedule(useAsyncMessageHandlingOnSchedule);
+ builder.bucket_merge_chunk_size(mergeChunkSize);
}
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/storagecluster/StorServerProducer.java b/config-model/src/main/java/com/yahoo/vespa/model/content/storagecluster/StorServerProducer.java
index cb8221e3ad6..dc9a7cda32c 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/storagecluster/StorServerProducer.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/storagecluster/StorServerProducer.java
@@ -1,6 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.model.content.storagecluster;
+import com.yahoo.config.model.api.ModelContext;
import com.yahoo.vespa.config.content.core.StorServerConfig;
import com.yahoo.vespa.model.content.cluster.ContentCluster;
import com.yahoo.vespa.model.builder.xml.dom.ModelElement;
@@ -10,33 +11,42 @@ import com.yahoo.vespa.model.builder.xml.dom.ModelElement;
*/
public class StorServerProducer implements StorServerConfig.Producer {
public static class Builder {
- StorServerProducer build(ModelElement element) {
+ StorServerProducer build(ModelContext.Properties properties, ModelElement element) {
ModelElement tuning = element.child("tuning");
- if (tuning == null) {
- return new StorServerProducer(ContentCluster.getClusterId(element), null, null);
- }
+ StorServerProducer producer = new StorServerProducer(ContentCluster.getClusterId(element));
+ producer.setBucketDBStripeBits(properties.contentNodeBucketDBStripeBits());
+ if (tuning == null) return producer;
ModelElement merges = tuning.child("merges");
- if (merges == null) {
- return new StorServerProducer(ContentCluster.getClusterId(element), null, null);
- }
+ if (merges == null) return producer;
- return new StorServerProducer(ContentCluster.getClusterId(element),
- merges.integerAttribute("max-per-node"),
- merges.integerAttribute("max-queue-size"));
+ producer.setMaxMergesPerNode(merges.integerAttribute("max-per-node"))
+ .setMaxQueueSize(merges.integerAttribute("max-queue-size"));
+ return producer;
}
}
- private final String clusterName;
- private final Integer maxMergesPerNode;
- private final Integer queueSize;
+ private String clusterName;
+ private Integer maxMergesPerNode;
+ private Integer queueSize;
+ private Integer bucketDBStripeBits;
- public StorServerProducer(String clusterName, Integer maxMergesPerNode,
- Integer queueSize) {
+ private StorServerProducer setMaxMergesPerNode(Integer value) {
+ maxMergesPerNode = value;
+ return this;
+ }
+ private StorServerProducer setMaxQueueSize(Integer value) {
+ queueSize = value;
+ return this;
+ }
+ private StorServerProducer setBucketDBStripeBits(Integer value) {
+ bucketDBStripeBits = value;
+ return this;
+ }
+
+ public StorServerProducer(String clusterName) {
this.clusterName = clusterName;
- this.maxMergesPerNode = maxMergesPerNode;
- this.queueSize = queueSize;
}
@Override
@@ -53,5 +63,8 @@ public class StorServerProducer implements StorServerConfig.Producer {
if (queueSize != null) {
builder.max_merge_queue_size(queueSize);
}
+ if (bucketDBStripeBits != null) {
+ builder.content_node_bucket_db_stripe_bits(bucketDBStripeBits);
+ }
}
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/storagecluster/StorageCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/content/storagecluster/StorageCluster.java
index b9fd26c45ff..88b6833221d 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/storagecluster/StorageCluster.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/storagecluster/StorageCluster.java
@@ -38,7 +38,7 @@ public class StorageCluster extends AbstractConfigProducer<StorageNode>
ContentCluster.getClusterId(clusterElem),
new FileStorProducer.Builder().build(deployState.getProperties(), cluster, clusterElem),
new IntegrityCheckerProducer.Builder().build(cluster, clusterElem),
- new StorServerProducer.Builder().build(clusterElem),
+ new StorServerProducer.Builder().build(deployState.getProperties(), clusterElem),
new StorVisitorProducer.Builder().build(clusterElem),
new PersistenceProducer.Builder().build(clusterElem));
}
diff --git a/config-model/src/main/resources/schema/containercluster.rnc b/config-model/src/main/resources/schema/containercluster.rnc
index 98ea696ceef..66438570d2f 100644
--- a/config-model/src/main/resources/schema/containercluster.rnc
+++ b/config-model/src/main/resources/schema/containercluster.rnc
@@ -219,9 +219,7 @@ DocumentApi = element document-api {
HttpClientApi?
}
-DocumentRestApi = element rest-api {
- Threadpool?
-}
+DocumentRestApi = element rest-api { empty }
HttpClientApi = element http-client-api {
Threadpool?
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/ConfigChangeTestUtils.java b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/ConfigChangeTestUtils.java
index 4fd2609eb67..f3f9022f6f0 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/ConfigChangeTestUtils.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/ConfigChangeTestUtils.java
@@ -37,6 +37,15 @@ public class ConfigChangeTestUtils {
return VespaRefeedAction.of(id, name, overrides, message, services, documentType, now);
}
+ public static VespaConfigChangeAction newReindexAction(ClusterSpec.Id id, String name, ValidationOverrides overrides, String message, Instant now) {
+ return VespaReindexAction.of(id, name, overrides, message, now);
+ }
+
+ public static VespaConfigChangeAction newReindexAction(ClusterSpec.Id id, String name, ValidationOverrides overrides, String message,
+ List<ServiceInfo> services, String documentType, Instant now) {
+ return VespaReindexAction.of(id, name, overrides, message, services, documentType, now);
+ }
+
public static List<ConfigChangeAction> normalizeServicesInActions(List<ConfigChangeAction> result) {
return result.stream()
.map(action -> ((VespaConfigChangeAction) action).modifyAction(
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/IndexingModeChangeValidatorTest.java b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/IndexingModeChangeValidatorTest.java
index ab56178bee3..70995a40181 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/IndexingModeChangeValidatorTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/IndexingModeChangeValidatorTest.java
@@ -2,7 +2,7 @@
package com.yahoo.vespa.model.application.validation.change;
import com.yahoo.config.model.api.ConfigChangeAction;
-import com.yahoo.config.model.api.ConfigChangeRefeedAction;
+import com.yahoo.config.model.api.ConfigChangeReindexAction;
import com.yahoo.config.provision.Environment;
import com.yahoo.vespa.model.VespaModel;
import com.yahoo.vespa.model.application.validation.ValidationTester;
@@ -29,20 +29,20 @@ public class IndexingModeChangeValidatorTest {
List<ConfigChangeAction> changeActions =
tester.deploy(oldModel, getServices(AbstractSearchCluster.IndexingMode.STREAMING), Environment.prod, validationOverrides).getSecond();
- assertRefeedChange(true, // allowed=true due to validation override
- "Cluster 'default' changed indexing mode from 'indexed' to 'streaming'",
+ assertReindexingChange(true, // allowed=true due to validation override
+ "Document type 'music' in cluster 'default' changed indexing mode from 'indexed' to 'streaming'",
changeActions);
}
- private void assertRefeedChange(boolean allowed, String message, List<ConfigChangeAction> changeActions) {
- List<ConfigChangeAction> refeedActions = changeActions.stream()
- .filter(a -> a instanceof ConfigChangeRefeedAction)
+ private void assertReindexingChange(boolean allowed, String message, List<ConfigChangeAction> changeActions) {
+ List<ConfigChangeAction> reindexingActions = changeActions.stream()
+ .filter(a -> a instanceof ConfigChangeReindexAction)
.collect(Collectors.toList());
- assertEquals(1, refeedActions.size());
- assertEquals(allowed, refeedActions.get(0).allowed());
- assertTrue(refeedActions.get(0) instanceof ConfigChangeRefeedAction);
- assertEquals("indexing-mode-change", ((ConfigChangeRefeedAction)refeedActions.get(0)).name());
- assertEquals(message, refeedActions.get(0).getMessage());
+ assertEquals(1, reindexingActions.size());
+ assertEquals(allowed, reindexingActions.get(0).allowed());
+ assertTrue(reindexingActions.get(0) instanceof ConfigChangeReindexAction);
+ assertEquals("indexing-mode-change", ((ConfigChangeReindexAction)reindexingActions.get(0)).name());
+ assertEquals(message, reindexingActions.get(0).getMessage());
}
private static final String getServices(AbstractSearchCluster.IndexingMode indexingMode) {
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/DocumentDatabaseChangeValidatorTest.java b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/DocumentDatabaseChangeValidatorTest.java
index 60b17142340..a4fbf474a7f 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/DocumentDatabaseChangeValidatorTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/DocumentDatabaseChangeValidatorTest.java
@@ -10,6 +10,7 @@ import java.time.Instant;
import java.util.Arrays;
import java.util.List;
+import static com.yahoo.vespa.model.application.validation.change.ConfigChangeTestUtils.newReindexAction;
import static com.yahoo.vespa.model.application.validation.change.ConfigChangeTestUtils.newRestartAction;
import static com.yahoo.vespa.model.application.validation.change.ConfigChangeTestUtils.newRefeedAction;
@@ -51,7 +52,7 @@ public class DocumentDatabaseChangeValidatorTest {
"Field 'f1' changed: add attribute aspect"),
newRestartAction(ClusterSpec.Id.from("test"),
"Field 'f4.s1' changed: add attribute aspect"),
- newRefeedAction(ClusterSpec.Id.from("test"),
+ newReindexAction(ClusterSpec.Id.from("test"),
"indexing-change",
ValidationOverrides.empty,
"Field 'f2' changed: add index aspect, indexing script: '{ input f2 | summary f2; }' -> " +
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/IndexingScriptChangeValidatorTest.java b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/IndexingScriptChangeValidatorTest.java
index 8ffd02a4381..9f418476a24 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/IndexingScriptChangeValidatorTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/IndexingScriptChangeValidatorTest.java
@@ -1,11 +1,11 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.model.application.validation.change.search;
+import com.yahoo.config.application.api.ValidationOverrides;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.vespa.indexinglanguage.expressions.ScriptExpression;
-import com.yahoo.config.application.api.ValidationOverrides;
import com.yahoo.vespa.model.application.validation.change.VespaConfigChangeAction;
-import com.yahoo.vespa.model.application.validation.change.VespaRefeedAction;
+import com.yahoo.vespa.model.application.validation.change.VespaReindexAction;
import org.junit.Test;
import java.time.Instant;
@@ -50,12 +50,12 @@ public class IndexingScriptChangeValidatorTest {
private static final String FIELD = "field f1 type string";
private static final String FIELD_F2 = "field f2 type string";
- private static VespaConfigChangeAction expectedAction(String changedMsg, String fromScript, String toScript) {
- return expectedAction("f1", changedMsg, fromScript, toScript);
+ private static VespaConfigChangeAction expectedReindexingAction(String changedMsg, String fromScript, String toScript) {
+ return expectedReindexingAction("f1", changedMsg, fromScript, toScript);
}
- private static VespaConfigChangeAction expectedAction(String field, String changedMsg, String fromScript, String toScript) {
- return VespaRefeedAction.of(ClusterSpec.Id.from("test"),
+ private static VespaConfigChangeAction expectedReindexingAction(String field, String changedMsg, String fromScript, String toScript) {
+ return VespaReindexAction.of(ClusterSpec.Id.from("test"),
"indexing-change",
ValidationOverrides.empty,
"Field '" + field + "' changed: " +
@@ -65,67 +65,67 @@ public class IndexingScriptChangeValidatorTest {
}
@Test
- public void requireThatAddingIndexAspectRequireRefeed() throws Exception {
+ public void requireThatAddingIndexAspectRequireReindexing() throws Exception {
new Fixture(FIELD + " { indexing: summary }",
FIELD + " { indexing: index | summary }").
- assertValidation(expectedAction("add index aspect",
+ assertValidation(expectedReindexingAction("add index aspect",
"{ input f1 | summary f1; }",
"{ input f1 | tokenize normalize stem:\"BEST\" | index f1 | summary f1; }"));
}
@Test
- public void requireThatRemovingIndexAspectRequireRefeed() throws Exception {
+ public void requireThatRemovingIndexAspectRequireReindexing() throws Exception {
new Fixture(FIELD + " { indexing: index | summary }",
FIELD + " { indexing: summary }").
- assertValidation(expectedAction("remove index aspect",
+ assertValidation(expectedReindexingAction("remove index aspect",
"{ input f1 | tokenize normalize stem:\"BEST\" | index f1 | summary f1; }",
"{ input f1 | summary f1; }"));
}
@Test
- public void requireThatChangingStemmingRequireRefeed() throws Exception {
+ public void requireThatChangingStemmingRequireReindexing() throws Exception {
new Fixture(FIELD + " { indexing: index }",
FIELD + " { indexing: index \n stemming: none }").
- assertValidation(expectedAction("stemming: 'best' -> 'none'",
+ assertValidation(expectedReindexingAction("stemming: 'best' -> 'none'",
"{ input f1 | tokenize normalize stem:\"BEST\" | index f1; }",
"{ input f1 | tokenize normalize | index f1; }"));
}
@Test
- public void requireThatChangingNormalizingRequireRefeed() throws Exception {
+ public void requireThatChangingNormalizingRequireReindexing() throws Exception {
new Fixture(FIELD + " { indexing: index }",
FIELD + " { indexing: index \n normalizing: none }").
- assertValidation(expectedAction("normalizing: 'ACCENT' -> 'NONE'",
+ assertValidation(expectedReindexingAction("normalizing: 'ACCENT' -> 'NONE'",
"{ input f1 | tokenize normalize stem:\"BEST\" | index f1; }",
"{ input f1 | tokenize stem:\"BEST\" | index f1; }"));
}
@Test
- public void requireThatChangingMatchingRequireRefeed() throws Exception {
+ public void requireThatChangingMatchingRequireReindexing() throws Exception {
new Fixture(FIELD + " { indexing: index \n match: exact }",
FIELD + " { indexing: index \n match { gram \n gram-size: 3 } }").
- assertValidation(expectedAction("matching: 'exact' -> 'gram (size 3)', normalizing: 'LOWERCASE' -> 'CODEPOINT'",
+ assertValidation(expectedReindexingAction("matching: 'exact' -> 'gram (size 3)', normalizing: 'LOWERCASE' -> 'CODEPOINT'",
"{ input f1 | exact | index f1; }",
"{ input f1 | ngram 3 | index f1; }"));
}
@Test
- public void requireThatSettingDynamicSummaryRequireRefeed() throws Exception {
+ public void requireThatSettingDynamicSummaryRequireReindexing() throws Exception {
new Fixture(FIELD + " { indexing: summary }",
FIELD + " { indexing: summary \n summary: dynamic }").
- assertValidation(expectedAction("summary field 'f1' transform: 'none' -> 'dynamicteaser'",
+ assertValidation(expectedReindexingAction("summary field 'f1' transform: 'none' -> 'dynamicteaser'",
"{ input f1 | summary f1; }",
"{ input f1 | tokenize normalize stem:\"BEST\" | summary f1; }"));
}
@Test
- public void requireThatMultipleChangesRequireRefeed() throws Exception {
+ public void requireThatMultipleChangesRequireReindexing() throws Exception {
new Fixture(FIELD + " { indexing: index } " + FIELD_F2 + " { indexing: index }",
FIELD + " { indexing: index \n stemming: none } " + FIELD_F2 + " { indexing: index \n normalizing: none }").
- assertValidation(Arrays.asList(expectedAction("f1", "stemming: 'best' -> 'none'",
+ assertValidation(Arrays.asList(expectedReindexingAction("f1", "stemming: 'best' -> 'none'",
"{ input f1 | tokenize normalize stem:\"BEST\" | index f1; }",
"{ input f1 | tokenize normalize | index f1; }"),
- expectedAction("f2", "normalizing: 'ACCENT' -> 'NONE'",
+ expectedReindexingAction("f2", "normalizing: 'ACCENT' -> 'NONE'",
"{ input f2 | tokenize normalize stem:\"BEST\" | index f2; }",
"{ input f2 | tokenize stem:\"BEST\" | index f2; }")));
}
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/ContainerClusterTest.java b/config-model/src/test/java/com/yahoo/vespa/model/container/ContainerClusterTest.java
index 3da872c2f70..42e6be05454 100755
--- a/config-model/src/test/java/com/yahoo/vespa/model/container/ContainerClusterTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/container/ContainerClusterTest.java
@@ -279,7 +279,6 @@ public class ContainerClusterTest {
MockRoot root = new MockRoot(
"foo",
new DeployState.Builder()
- .properties(new TestProperties().setJettyThreadpoolSizeFactor(4).setHostedVespa(true))
.applicationPackage(new MockApplicationPackage.Builder().build())
.modelHostProvisioner(hostProvisioner)
.build());
@@ -291,8 +290,8 @@ public class ContainerClusterTest {
root.freezeModelTopology();
ServerConfig cfg = root.getConfig(ServerConfig.class, "container0/c1/DefaultHttpServer");
- assertEquals(64, cfg.maxWorkerThreads());
- assertEquals(64, cfg.minWorkerThreads());
+ assertEquals(28, cfg.maxWorkerThreads());
+ assertEquals(28, cfg.minWorkerThreads());
}
@Test
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/xml/ContainerDocumentApiBuilderTest.java b/config-model/src/test/java/com/yahoo/vespa/model/container/xml/ContainerDocumentApiBuilderTest.java
index 9f061dcbd0a..3c733d5109d 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/container/xml/ContainerDocumentApiBuilderTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/container/xml/ContainerDocumentApiBuilderTest.java
@@ -116,13 +116,6 @@ public class ContainerDocumentApiBuilderTest extends ContainerModelBuilderTestBa
Element elem = DomBuilderTest.parse(
"<container id='cluster1' version='1.0'>",
" <document-api>",
- " <rest-api>",
- " <threadpool>",
- " <max-threads>20</max-threads>",
- " <min-threads>10</min-threads>",
- " <queue-size>0</queue-size>",
- " </threadpool>",
- " </rest-api>",
" <http-client-api>",
" <threadpool>",
" <max-threads>50</max-threads>",
@@ -135,12 +128,6 @@ public class ContainerDocumentApiBuilderTest extends ContainerModelBuilderTestBa
"</container>");
createModel(root, elem);
- ContainerThreadpoolConfig restApiThreadpoolConfig = root.getConfig(
- ContainerThreadpoolConfig.class, "cluster1/component/com.yahoo.document.restapi.resource.RestApi/threadpool@restapi-handler");
- assertEquals(20, restApiThreadpoolConfig.maxThreads());
- assertEquals(10, restApiThreadpoolConfig.minThreads());
- assertEquals(0, restApiThreadpoolConfig.queueSize());
-
ContainerThreadpoolConfig feedThreadpoolConfig = root.getConfig(
ContainerThreadpoolConfig.class, "cluster1/component/com.yahoo.vespa.http.server.FeedHandler/threadpool@feedapi-handler");
assertEquals(50, feedThreadpoolConfig.maxThreads());
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/content/StorageClusterTest.java b/config-model/src/test/java/com/yahoo/vespa/model/content/StorageClusterTest.java
index 238f65b2988..3ccdc739410 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/content/StorageClusterTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/content/StorageClusterTest.java
@@ -75,6 +75,7 @@ public class StorageClusterTest {
StorServerConfig config = new StorServerConfig(builder);
assertFalse(config.is_distributor());
assertEquals("foofighters", config.cluster_name());
+ assertEquals(0, config.content_node_bucket_db_stripe_bits());
}
@Test
public void testCommunicationManagerDefaults() {
@@ -96,6 +97,14 @@ public class StorageClusterTest {
}
@Test
+ public void testBucketDBStripeBitsControl() {
+ StorServerConfig.Builder builder = new StorServerConfig.Builder();
+ simpleCluster(new TestProperties().setContentNodeBucketDBStripeBits(7)).getConfig(builder);
+ StorServerConfig config = new StorServerConfig(builder);
+ assertEquals(7, config.content_node_bucket_db_stripe_bits());
+ }
+
+ @Test
public void testMerges() {
StorServerConfig.Builder builder = new StorServerConfig.Builder();
parse("" +
@@ -265,9 +274,8 @@ public class StorageClusterTest {
}
}
- @Test
- public void testFeatureFlagControlOfResponseSequencer() {
- StorageCluster stc = parse(
+ private StorageCluster simpleCluster(ModelContext.Properties properties) {
+ return parse(
"<cluster id=\"bees\">\n" +
" <documents/>" +
" <group>" +
@@ -275,15 +283,43 @@ public class StorageClusterTest {
" </group>" +
"</cluster>",
new Flavor(new FlavorsConfig.Flavor.Builder().name("test-flavor").minCpuCores(9).build()),
- new TestProperties().setResponseNumThreads(13).setResponseSequencerType("THROUGHPUT")
- );
+ properties);
+ }
+
+ @Test
+ public void testFeatureFlagControlOfResponseSequencer() {
StorFilestorConfig.Builder builder = new StorFilestorConfig.Builder();
- stc.getConfig(builder);
+ simpleCluster(new TestProperties().setResponseNumThreads(13).setResponseSequencerType("THROUGHPUT")).getConfig(builder);
StorFilestorConfig config = new StorFilestorConfig(builder);
assertEquals(13, config.num_response_threads());
assertEquals(StorFilestorConfig.Response_sequencer_type.THROUGHPUT, config.response_sequencer_type());
}
+ private void verifyMergeChunkSize(int expected, int value) {
+ StorFilestorConfig.Builder builder = new StorFilestorConfig.Builder();
+ simpleCluster(new TestProperties().setMergeChunkSize(value)).getConfig(builder);
+ StorFilestorConfig config = new StorFilestorConfig(builder);
+ assertEquals(expected, config.bucket_merge_chunk_size());
+ }
+
+ @Test
+ public void testFeatureFlagControlOfMergeChunkSize() {
+ verifyMergeChunkSize(0x200000, 13);
+ verifyMergeChunkSize(0x1600000, 0x1500000);
+ }
+
+ private void verifyAsyncMessageHandlingOnSchedule(boolean expected, boolean value) {
+ StorFilestorConfig.Builder builder = new StorFilestorConfig.Builder();
+ simpleCluster(new TestProperties().setAsyncMessageHandlingOnSchedule(value)).getConfig(builder);
+ StorFilestorConfig config = new StorFilestorConfig(builder);
+ assertEquals(expected, config.use_async_message_handling_on_schedule());
+ }
+ @Test
+ public void testFeatureFlagControlOfAsyncMessageHandlingOnSchedule() {
+ verifyAsyncMessageHandlingOnSchedule(false, false);
+ verifyAsyncMessageHandlingOnSchedule(true, true);
+ }
+
@Test
public void integrity_checker_explicitly_disabled_when_not_running_with_vds_provider() {
StorIntegritycheckerConfig.Builder builder = new StorIntegritycheckerConfig.Builder();
diff --git a/config-model/src/test/schema-test-files/services.xml b/config-model/src/test/schema-test-files/services.xml
index 683e2dc0b0d..51310682f78 100644
--- a/config-model/src/test/schema-test-files/services.xml
+++ b/config-model/src/test/schema-test-files/services.xml
@@ -158,13 +158,6 @@
<timeout>5.55</timeout>
<route>default</route>
<maxpendingdocs>100</maxpendingdocs>
- <rest-api>
- <threadpool>
- <max-threads>50</max-threads>
- <min-threads>10</min-threads>
- <queue-size>1000</queue-size>
- </threadpool>
- </rest-api>
<http-client-api>
<threadpool>
<max-threads>50</max-threads>
diff --git a/config-provisioning/src/main/java/com/yahoo/config/provision/ApplicationTransaction.java b/config-provisioning/src/main/java/com/yahoo/config/provision/ApplicationTransaction.java
index d21f7cfe5ae..327dfd408b8 100644
--- a/config-provisioning/src/main/java/com/yahoo/config/provision/ApplicationTransaction.java
+++ b/config-provisioning/src/main/java/com/yahoo/config/provision/ApplicationTransaction.java
@@ -1,7 +1,6 @@
// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.config.provision;
-import com.yahoo.transaction.Mutex;
import com.yahoo.transaction.NestedTransaction;
import java.io.Closeable;
@@ -33,12 +32,7 @@ public class ApplicationTransaction implements Closeable {
@Override
public void close() {
- try {
- transaction.commit();
- }
- finally {
- lock.close();
- }
+ lock.close();
}
}
diff --git a/configdefinitions/src/vespa/reindexing.def b/configdefinitions/src/vespa/reindexing.def
index 468a10e8199..76ac79b6758 100644
--- a/configdefinitions/src/vespa/reindexing.def
+++ b/configdefinitions/src/vespa/reindexing.def
@@ -4,4 +4,4 @@
namespace=vespa.config.content.reindexing
# Epoch millis after which latest reprocessing may begin, per document type
-status{}.readyAtMillis int
+status{}.readyAtMillis long
diff --git a/configdefinitions/src/vespa/stor-filestor.def b/configdefinitions/src/vespa/stor-filestor.def
index b498eea3ff2..bab19c923d9 100644
--- a/configdefinitions/src/vespa/stor-filestor.def
+++ b/configdefinitions/src/vespa/stor-filestor.def
@@ -46,9 +46,8 @@ common_merge_chain_optimalization_minimum_size int default=64 restart
## Chunksize to use while merging buckets between nodes.
##
-## Default is set to 4 MB - 4k. This is to allow for malloc to waste some bytes
-## with tracking info or to align to 512b without passing allocation limit,
-## while still reading 4k blocks from disk.
+## Default is set to 4 MB.
+## Note that this will gradually be increased to reach stor-distributormanager:splitsize which is currently at 32M
bucket_merge_chunk_size int default=4190208 restart
## When merging, it is possible to send more metadata than needed in order to
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java b/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java
index 5d8cd43cc44..bda78ce5a71 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java
@@ -38,6 +38,7 @@ import com.yahoo.vespa.config.server.application.HttpProxy;
import com.yahoo.vespa.config.server.application.TenantApplications;
import com.yahoo.vespa.config.server.configchange.ConfigChangeActions;
import com.yahoo.vespa.config.server.configchange.RefeedActions;
+import com.yahoo.vespa.config.server.configchange.ReindexActions;
import com.yahoo.vespa.config.server.configchange.RestartActions;
import com.yahoo.vespa.config.server.deploy.DeployHandlerLogger;
import com.yahoo.vespa.config.server.deploy.Deployment;
@@ -505,12 +506,13 @@ public class ApplicationRepository implements com.yahoo.config.provision.Deploye
if (applicationTransaction.isPresent()) {
hostProvisioner.get().remove(applicationTransaction.get());
+ applicationTransaction.get().nested().commit();
} else {
transaction.commit();
}
return true;
} finally {
- applicationTransaction.ifPresent(ApplicationTransaction::close); // Commits transaction and releases lock
+ applicationTransaction.ifPresent(ApplicationTransaction::close);
}
}
@@ -734,12 +736,13 @@ public class ApplicationRepository implements com.yahoo.config.provision.Deploye
hostProvisioner.get().activate(session.getAllocatedHosts().getHosts(),
new ActivationContext(session.getSessionId()),
applicationTransaction.get());
+ applicationTransaction.get().nested().commit();
} else {
transaction.commit();
}
return new Activation(waiter, activeSession);
} finally {
- applicationTransaction.ifPresent(ApplicationTransaction::close); // Commits transaction and releases lock
+ applicationTransaction.ifPresent(ApplicationTransaction::close);
}
}
@@ -1005,6 +1008,13 @@ public class ApplicationRepository implements com.yahoo.config.provision.Deploye
"Change(s) between active and new application that may require re-feed:\n" +
refeedActions.format());
}
+ ReindexActions reindexActions = actions.getReindexActions();
+ if ( ! reindexActions.isEmpty()) {
+ boolean allAllowed = reindexActions.getEntries().stream().allMatch(ReindexActions.Entry::allowed);
+ logger.log(allAllowed ? Level.INFO : Level.WARNING,
+ "Change(s) between active and new application that may require re-index:\n" +
+ reindexActions.format());
+ }
}
private String getLogServerURI(ApplicationId applicationId, Optional<String> hostname) {
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/application/ApplicationCuratorDatabase.java b/configserver/src/main/java/com/yahoo/vespa/config/server/application/ApplicationCuratorDatabase.java
index c4cc1cefc6e..8c3c9882aa1 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/application/ApplicationCuratorDatabase.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/application/ApplicationCuratorDatabase.java
@@ -5,88 +5,206 @@ import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.TenantName;
import com.yahoo.path.Path;
import com.yahoo.slime.Cursor;
+import com.yahoo.slime.Inspector;
import com.yahoo.slime.Slime;
import com.yahoo.slime.SlimeUtils;
-import com.yahoo.vespa.config.server.application.ReindexingStatus.Status;
+import com.yahoo.text.Utf8;
+import com.yahoo.transaction.Transaction;
+import com.yahoo.vespa.config.server.application.ApplicationReindexing.Cluster;
+import com.yahoo.vespa.config.server.application.ApplicationReindexing.Status;
import com.yahoo.vespa.config.server.tenant.TenantRepository;
import com.yahoo.vespa.curator.Curator;
+import com.yahoo.vespa.curator.Lock;
+import com.yahoo.vespa.curator.transaction.CuratorOperations;
+import com.yahoo.vespa.curator.transaction.CuratorTransaction;
import com.yahoo.yolean.Exceptions;
+import java.time.Duration;
import java.time.Instant;
+import java.util.List;
+import java.util.Optional;
+import java.util.concurrent.ExecutorService;
+import java.util.stream.Collectors;
import static java.util.stream.Collectors.toUnmodifiableMap;
/**
- * Stores data and holds locks for application, backed by a {@link Curator}.
+ * Stores data and holds locks for the applications of a tenant, backed by a {@link Curator}.
+ *
+ * Each application is stored under /config/v2/tenants/&lt;tenant&gt;/applications/&lt;application&gt;,
+ * the root contains the currently active session, if any. Children of this node may hold more data.
+ * Locks for synchronising writes to these paths, and changes to the config of this application, are found
+ * under /config/v2/tenants/&lt;tenant&gt;/locks/&lt;application&gt;.
*
* @author jonmv
*/
public class ApplicationCuratorDatabase {
+ final TenantName tenant;
+ final Path applicationsPath;
+ final Path locksPath;
+
private final Curator curator;
- public ApplicationCuratorDatabase(Curator curator) {
+ public ApplicationCuratorDatabase(TenantName tenant, Curator curator) {
+ this.tenant = tenant;
+ this.applicationsPath = TenantRepository.getApplicationsPath(tenant);
+ this.locksPath = TenantRepository.getLocksPath(tenant);
this.curator = curator;
}
- public ReindexingStatus readReindexingStatus(ApplicationId id) {
+ /** Returns the lock for changing the session status of the given application. */
+ public Lock lock(ApplicationId id) {
+ return curator.lock(lockPath(id), Duration.ofMinutes(1)); // These locks shouldn't be held for very long.
+ }
+
+ public boolean exists(ApplicationId id) {
+ return curator.exists(applicationPath(id));
+ }
+
+ /**
+ * Creates a node for the given application, marking its existence.
+ */
+ public void createApplication(ApplicationId id) {
+ if ( ! id.tenant().equals(tenant))
+ throw new IllegalArgumentException("Cannot write application id '" + id + "' for tenant '" + tenant + "'");
+ try (Lock lock = lock(id)) {
+ curator.create(applicationPath(id));
+ }
+ }
+
+ /**
+ * Returns a transaction which writes the given session id as the currently active for the given application.
+ *
+ * @param applicationId An {@link ApplicationId} that represents an active application.
+ * @param sessionId Id of the session containing the application package for this id.
+ */
+ public Transaction createPutTransaction(ApplicationId applicationId, long sessionId) {
+ return new CuratorTransaction(curator).add(CuratorOperations.setData(applicationPath(applicationId).getAbsolute(), Utf8.toAsciiBytes(sessionId)));
+ }
+
+ /**
+ * Returns a transaction which deletes this application.
+ */
+ public CuratorTransaction createDeleteTransaction(ApplicationId applicationId) {
+ return CuratorTransaction.from(CuratorOperations.deleteAll(applicationPath(applicationId).getAbsolute(), curator), curator);
+ }
+
+ /**
+ * Returns the active session id for the given application.
+ * Returns Optional.empty if application not found or no active session exists.
+ */
+ public Optional<Long> activeSessionOf(ApplicationId id) {
+ Optional<byte[]> data = curator.getData(applicationPath(id));
+ return (data.isEmpty() || data.get().length == 0)
+ ? Optional.empty()
+ : data.map(bytes -> Long.parseLong(Utf8.toString(bytes)));
+ }
+
+ /**
+ * List the active applications of a tenant in this config server.
+ *
+ * @return a list of {@link ApplicationId}s that are active.
+ */
+ public List<ApplicationId> activeApplications() {
+ return curator.getChildren(applicationsPath).stream()
+ .sorted()
+ .map(ApplicationId::fromSerializedForm)
+ .filter(id -> activeSessionOf(id).isPresent())
+ .collect(Collectors.toUnmodifiableList());
+ }
+
+ public Optional<ApplicationReindexing> readReindexingStatus(ApplicationId id) {
return curator.getData(reindexingDataPath(id))
- .map(data -> ReindexingStatusSerializer.fromBytes(data))
- .orElse(ReindexingStatus.empty());
+ .map(data -> ReindexingStatusSerializer.fromBytes(data));
}
- public void writeReindexingStatus(ApplicationId id, ReindexingStatus status) {
+ public void writeReindexingStatus(ApplicationId id, ApplicationReindexing status) {
curator.set(reindexingDataPath(id), ReindexingStatusSerializer.toBytes(status));
}
- private static Path applicationsRoot(TenantName tenant) {
- return TenantRepository.getApplicationsPath(tenant);
+
+ /** Sets up a listenable cache with the given listener, over the applications path of this tenant. */
+ public Curator.DirectoryCache createApplicationsPathCache(ExecutorService zkCacheExecutor) {
+ return curator.createDirectoryCache(applicationsPath.getAbsolute(), false, false, zkCacheExecutor);
+ }
+
+
+ private Path lockPath(ApplicationId id) {
+ return locksPath.append(id.serializedForm());
}
- private static Path applicationPath(ApplicationId id) {
- return applicationsRoot(id.tenant()).append(id.serializedForm());
+ private Path applicationPath(ApplicationId id) {
+ return applicationsPath.append(id.serializedForm());
}
- private static Path reindexingDataPath(ApplicationId id) {
+ private Path reindexingDataPath(ApplicationId id) {
return applicationPath(id).append("reindexing");
}
private static class ReindexingStatusSerializer {
+ private static final String COMMON = "common";
+ private static final String CLUSTERS = "clusters";
private static final String PENDING = "pending";
private static final String READY = "ready";
private static final String TYPE = "type";
+ private static final String NAME = "name";
private static final String GENERATION = "generation";
private static final String EPOCH_MILLIS = "epochMillis";
- private static byte[] toBytes(ReindexingStatus reindexingStatus) {
+ private static byte[] toBytes(ApplicationReindexing reindexing) {
Cursor root = new Slime().setObject();
- Cursor pendingArray = root.setArray(PENDING);
- reindexingStatus.pending().forEach((type, generation) -> {
- Cursor pendingObject = pendingArray.addObject();
- pendingObject.setString(TYPE, type);
- pendingObject.setLong(GENERATION, generation);
- });
- Cursor readyArray = root.setArray(READY);
- reindexingStatus.status().forEach((type, status) -> {
- Cursor readyObject = readyArray.addObject();
- readyObject.setString(TYPE, type);
- readyObject.setLong(EPOCH_MILLIS, status.ready().toEpochMilli());
+ setStatus(root.setObject(COMMON), reindexing.common());
+
+ Cursor clustersArray = root.setArray(CLUSTERS);
+ reindexing.clusters().forEach((name, cluster) -> {
+ Cursor clusterObject = clustersArray.addObject();
+ clusterObject.setString(NAME, name);
+ setStatus(clusterObject.setObject(COMMON), cluster.common());
+
+ Cursor pendingArray = clusterObject.setArray(PENDING);
+ cluster.pending().forEach((type, generation) -> {
+ Cursor pendingObject = pendingArray.addObject();
+ pendingObject.setString(TYPE, type);
+ pendingObject.setLong(GENERATION, generation);
+ });
+
+ Cursor readyArray = clusterObject.setArray(READY);
+ cluster.ready().forEach((type, status) -> {
+ Cursor statusObject = readyArray.addObject();
+ statusObject.setString(TYPE, type);
+ setStatus(statusObject, status);
+ });
});
return Exceptions.uncheck(() -> SlimeUtils.toJsonBytes(root));
}
- private static ReindexingStatus fromBytes(byte[] data) {
+ private static void setStatus(Cursor statusObject, Status status) {
+ statusObject.setLong(EPOCH_MILLIS, status.ready().toEpochMilli());
+ }
+
+ private static ApplicationReindexing fromBytes(byte[] data) {
Cursor root = SlimeUtils.jsonToSlimeOrThrow(data).get();
- return new ReindexingStatus(SlimeUtils.entriesStream(root.field(PENDING))
- .filter(entry -> entry.field(TYPE).valid() && entry.field(GENERATION).valid())
- .collect(toUnmodifiableMap(entry -> entry.field(TYPE).asString(),
- entry -> entry.field(GENERATION).asLong())),
- SlimeUtils.entriesStream(root.field(READY))
- .filter(entry -> entry.field(TYPE).valid() && entry.field(EPOCH_MILLIS).valid())
- .collect(toUnmodifiableMap(entry -> entry.field(TYPE).asString(),
- entry -> new Status(Instant.ofEpochMilli(entry.field(EPOCH_MILLIS).asLong())))));
+ return new ApplicationReindexing(getStatus(root.field(COMMON)),
+ SlimeUtils.entriesStream(root.field(CLUSTERS))
+ .collect(toUnmodifiableMap(object -> object.field(NAME).asString(),
+ object -> getCluster(object))));
+ }
+
+ private static Cluster getCluster(Inspector object) {
+ return new Cluster(getStatus(object.field(COMMON)),
+ SlimeUtils.entriesStream(object.field(PENDING))
+ .collect(toUnmodifiableMap(entry -> entry.field(TYPE).asString(),
+ entry -> entry.field(GENERATION).asLong())),
+ SlimeUtils.entriesStream(object.field(READY))
+ .collect(toUnmodifiableMap(entry -> entry.field(TYPE).asString(),
+ entry -> getStatus(entry))));
+ }
+
+ private static Status getStatus(Inspector statusObject) {
+ return new Status(Instant.ofEpochMilli(statusObject.field(EPOCH_MILLIS).asLong()));
}
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/application/ApplicationReindexing.java b/configserver/src/main/java/com/yahoo/vespa/config/server/application/ApplicationReindexing.java
new file mode 100644
index 00000000000..eef65d969fc
--- /dev/null
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/application/ApplicationReindexing.java
@@ -0,0 +1,229 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.config.server.application;
+
+import com.yahoo.config.model.api.Reindexing;
+
+import java.time.Instant;
+import java.util.Map;
+import java.util.Objects;
+import java.util.stream.Stream;
+
+import static java.util.Objects.requireNonNull;
+import static java.util.stream.Collectors.toUnmodifiableMap;
+
+/**
+ * Pending and ready reindexing per document type. Each document type can have either a pending or a ready reindexing.
+ * Each cluster may also have a global status, which is merged with its document type-specific status, by selecting
+ * whichever status is ready the latest. The application may also have a global status, which is merged likewise.
+ * This is immutable.
+ *
+ * @author jonmv
+ */
+public class ApplicationReindexing implements Reindexing {
+
+ private final Status common;
+ private final Map<String, Cluster> clusters;
+
+ public ApplicationReindexing(Status common, Map<String, Cluster> clusters) {
+ this.common = requireNonNull(common);
+ this.clusters = Map.copyOf(clusters);
+ }
+
+ /** Reindexing for the whole application ready now. */
+ public static ApplicationReindexing ready(Instant now) {
+ return new ApplicationReindexing(new Status(now), Map.of());
+ }
+
+ /** Returns a copy of this with common reindexing for the whole application ready at the given instant. */
+ public ApplicationReindexing withReady(Instant readyAt) {
+ return new ApplicationReindexing(new Status(readyAt), clusters);
+ }
+
+ /** Returns a copy of this with common reindexing for the given cluster ready at the given instant. */
+ public ApplicationReindexing withReady(String cluster, Instant readyAt) {
+ Cluster current = clusters.getOrDefault(cluster, Cluster.empty);
+ Cluster modified = new Cluster(new Status(readyAt), current.pending, current.ready);
+ return new ApplicationReindexing(common, with(cluster, modified, clusters));
+ }
+
+ /** Returns a copy of this with reindexing for the given document type in the given cluster ready at the given instant. */
+ public ApplicationReindexing withReady(String cluster, String documentType, Instant readyAt) {
+ Cluster current = clusters.getOrDefault(cluster, Cluster.empty);
+ Cluster modified = new Cluster(current.common,
+ without(documentType, current.pending),
+ with(documentType, new Status(readyAt), current.ready));
+ return new ApplicationReindexing(common, with(cluster, modified, clusters));
+ }
+
+ /** Returns a copy of this with a pending reindexing at the given generation, for the given document type. */
+ public ApplicationReindexing withPending(String cluster, String documentType, long requiredGeneration) {
+ Cluster current = clusters.getOrDefault(cluster, Cluster.empty);
+ Cluster modified = new Cluster(current.common,
+ with(documentType, requirePositive(requiredGeneration), current.pending),
+ without(documentType, current.ready));
+ return new ApplicationReindexing(common, with(cluster, modified, clusters));
+ }
+
+ /** The common reindexing status for the whole application. */
+ public Status common() {
+ return common;
+ }
+
+ /** The reindexing status of each of the clusters of this application. */
+ public Map<String, Cluster> clusters() { return clusters; }
+
+ @Override
+ public Reindexing.Status status(String cluster, String documentType) {
+ if (clusters.containsKey(cluster)) {
+ if (clusters.get(cluster).pending().containsKey(documentType))
+ return () -> Instant.MAX;
+
+ Status documentStatus = clusters.get(cluster).ready().get(documentType);
+ Status clusterStatus = clusters.get(cluster).common();
+ if (documentStatus == null || documentStatus.ready().isBefore(clusterStatus.ready()))
+ documentStatus = clusterStatus;
+
+ if (documentStatus.ready().isAfter(common().ready()))
+ return documentStatus;
+ }
+ return common();
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ ApplicationReindexing that = (ApplicationReindexing) o;
+ return common.equals(that.common) &&
+ clusters.equals(that.clusters);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(common, clusters);
+ }
+
+ @Override
+ public String toString() {
+ return "ApplicationReindexing{" +
+ "common=" + common +
+ ", clusters=" + clusters +
+ '}';
+ }
+
+
+ /** Reindexing status for a single content cluster in an application. */
+ public static class Cluster {
+
+ private static final Cluster empty = new Cluster(Status.ALWAYS_READY, Map.of(), Map.of());
+
+ private final Status common;
+ private final Map<String, Long> pending;
+ private final Map<String, Status> ready;
+
+ Cluster(Status common, Map<String, Long> pending, Map<String, Status> ready) {
+ this.common = requireNonNull(common);
+ this.pending = Map.copyOf(pending);
+ this.ready = Map.copyOf(ready);
+ }
+
+ /** The common reindexing status for all document types in this cluster. */
+ public Status common() {
+ return common;
+ }
+
+ /** The config generation at which the application must have converged for the latest reindexing to begin, per document type. */
+ public Map<String, Long> pending() {
+ return pending;
+ }
+
+ /** The reindexing status for ready document types in this cluster. */
+ public Map<String, Status> ready() {
+ return ready;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ Cluster cluster = (Cluster) o;
+ return common.equals(cluster.common) &&
+ pending.equals(cluster.pending) &&
+ ready.equals(cluster.ready);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(common, pending, ready);
+ }
+
+ @Override
+ public String toString() {
+ return "Cluster{" +
+ "common=" + common +
+ ", pending=" + pending +
+ ", ready=" + ready +
+ '}';
+ }
+
+ }
+
+
+ /** Reindexing status common to an application, one of its clusters, or a single document type in a cluster. */
+ public static class Status implements Reindexing.Status {
+
+ /** Always ready, i.e., ignored when joining with more specific statuses. */
+ private static final Status ALWAYS_READY = new Status(Instant.EPOCH);
+
+ private final Instant ready;
+
+ Status(Instant ready) {
+ this.ready = requireNonNull(ready);
+ }
+
+ @Override
+ public Instant ready() { return ready; }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ Status status = (Status) o;
+ return ready.equals(status.ready);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(ready);
+ }
+
+ @Override
+ public String toString() {
+ return "ready at " + ready;
+ }
+
+ }
+
+
+ private static long requirePositive(long generation) {
+ if (generation <= 0)
+ throw new IllegalArgumentException("Generation must be positive, but was " + generation);
+
+ return generation;
+ }
+
+ private static <T> Map<String, T> without(String removed, Map<String, T> map) {
+ return map.keySet().stream()
+ .filter(key -> ! removed.equals(key))
+ .collect(toUnmodifiableMap(key -> key,
+ key -> map.get(key)));
+ }
+
+ private static <T> Map<String, T> with(String added, T value, Map<String, T> map) {
+ return Stream.concat(Stream.of(added), map.keySet().stream()).distinct()
+ .collect(toUnmodifiableMap(key -> key,
+ key -> added.equals(key) ? value
+ : map.get(key)));
+ }
+
+}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/application/ReindexingStatus.java b/configserver/src/main/java/com/yahoo/vespa/config/server/application/ReindexingStatus.java
deleted file mode 100644
index 465fe3a670c..00000000000
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/application/ReindexingStatus.java
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.config.server.application;
-
-import com.yahoo.config.model.api.Reindexing;
-
-import java.time.Instant;
-import java.util.Map;
-import java.util.Objects;
-import java.util.stream.Stream;
-
-import static java.util.stream.Collectors.toUnmodifiableMap;
-
-/**
- * Pending and ready reindexing per document type. Each document type can have either a pending or a ready reindexing.
- * This is immutable.
- *
- * @author jonmv
- */
-public class ReindexingStatus implements Reindexing {
-
- private static final ReindexingStatus empty = new ReindexingStatus(Map.of(), Map.of());
-
- private final Map<String, Long> pending;
- private final Map<String, Status> ready;
-
- ReindexingStatus(Map<String, Long> pending, Map<String, Status> ready) {
- this.pending = Map.copyOf(pending);
- this.ready = Map.copyOf(ready);
- }
-
- /** No reindexing pending or ready. */
- public static ReindexingStatus empty() {
- return empty;
- }
-
- /** Returns a copy of this with a pending reindexing at the given generation, for the given document type. */
- public ReindexingStatus withPending(String documentType, long requiredGeneration) {
- return new ReindexingStatus(with(documentType, requirePositive(requiredGeneration), pending),
- without(documentType, ready));
- }
-
- /** Returns a copy of this with reindexing for the given document type set ready at the given instant. */
- public ReindexingStatus withReady(String documentType, Instant readyAt) {
- return new ReindexingStatus(without(documentType, pending),
- with(documentType, new Status(readyAt), ready));
- }
-
- /** The config generation at which the application must have converged for the latest reindexing to begin, per document type. */
- public Map<String, Long> pending() {
- return pending;
- }
-
- @Override
- public Map<String, ? extends Reindexing.Status> status() {
- return ready;
- }
-
- private static long requirePositive(long generation) {
- if (generation <= 0)
- throw new IllegalArgumentException("Generation must be positive, but was " + generation);
-
- return generation;
- }
-
- private static <T> Map<String, T> without(String removed, Map<String, T> map) {
- return map.keySet().stream()
- .filter(key -> ! removed.equals(key))
- .collect(toUnmodifiableMap(key -> key,
- key -> map.get(key)));
- }
-
- private static <T> Map<String, T> with(String added, T value, Map<String, T> map) {
- return Stream.concat(Stream.of(added), map.keySet().stream()).distinct()
- .collect(toUnmodifiableMap(key -> key,
- key -> added.equals(key) ? value
- : map.get(key)));
- }
-
- @Override
- public boolean equals(Object o) {
- if (this == o) return true;
- if (o == null || getClass() != o.getClass()) return false;
- ReindexingStatus that = (ReindexingStatus) o;
- return pending.equals(that.pending) &&
- ready.equals(that.ready);
- }
-
- @Override
- public int hashCode() {
- return Objects.hash(pending, ready);
- }
-
- @Override
- public String toString() {
- return "ReindexingStatus{" +
- "pending=" + pending +
- ", ready=" + ready +
- '}';
- }
-
- static class Status implements Reindexing.Status {
-
- private final Instant ready;
-
- Status(Instant ready) {
- this.ready = Objects.requireNonNull(ready);
- }
-
- @Override
- public Instant ready() { return ready; }
-
- @Override
- public boolean equals(Object o) {
- if (this == o) return true;
- if (o == null || getClass() != o.getClass()) return false;
- Status status = (Status) o;
- return ready.equals(status.ready);
- }
-
- @Override
- public int hashCode() {
- return Objects.hash(ready);
- }
-
- @Override
- public String toString() {
- return "ready at " + ready;
- }
-
- }
-
-}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/application/TenantApplications.java b/configserver/src/main/java/com/yahoo/vespa/config/server/application/TenantApplications.java
index a01ce2e2cc3..7c54fd39a74 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/application/TenantApplications.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/application/TenantApplications.java
@@ -49,11 +49,7 @@ import java.util.stream.Collectors;
import static java.util.stream.Collectors.toSet;
/**
- * The applications of a tenant, backed by ZooKeeper.
- *
- * Each application is stored under /config/v2/tenants/&lt;tenant&gt;/applications/&lt;application&gt;,
- * the root contains the currently active session, if any. Locks for synchronising writes to these paths, and changes
- * to the config of this application, are found under /config/v2/tenants/&lt;tenant&gt;/locks/&lt;application&gt;.
+ * The applications of a tenant.
*
* @author Ulf Lilleengen
* @author jonmv
@@ -62,9 +58,7 @@ public class TenantApplications implements RequestHandler, HostValidator<Applica
private static final Logger log = Logger.getLogger(TenantApplications.class.getName());
- private final Curator curator;
- private final Path applicationsPath;
- private final Path locksPath;
+ private final ApplicationCuratorDatabase database;
private final Curator.DirectoryCache directoryCache;
private final Executor zkWatcherExecutor;
private final Metrics metrics;
@@ -81,12 +75,10 @@ public class TenantApplications implements RequestHandler, HostValidator<Applica
ExecutorService zkCacheExecutor, Metrics metrics, ReloadListener reloadListener,
ConfigserverConfig configserverConfig, HostRegistry<ApplicationId> hostRegistry,
TenantFileSystemDirs tenantFileSystemDirs) {
- this.curator = curator;
- this.applicationsPath = TenantRepository.getApplicationsPath(tenant);
- this.locksPath = TenantRepository.getLocksPath(tenant);
+ this.database = new ApplicationCuratorDatabase(tenant, curator);
this.tenant = tenant;
this.zkWatcherExecutor = command -> zkWatcherExecutor.execute(tenant, command);
- this.directoryCache = curator.createDirectoryCache(applicationsPath.getAbsolute(), false, false, zkCacheExecutor);
+ this.directoryCache = database.createApplicationsPathCache(zkCacheExecutor);
this.directoryCache.addListener(this::childEvent);
this.directoryCache.start();
this.metrics = metrics;
@@ -110,21 +102,20 @@ public class TenantApplications implements RequestHandler, HostValidator<Applica
new TenantFileSystemDirs(componentRegistry.getConfigServerDB(), tenantName));
}
+ /** The curator backed ZK storage of this. */
+ public ApplicationCuratorDatabase database() { return database; }
+
/**
* List the active applications of a tenant in this config server.
*
* @return a list of {@link ApplicationId}s that are active.
*/
public List<ApplicationId> activeApplications() {
- return curator.getChildren(applicationsPath).stream()
- .sorted()
- .map(ApplicationId::fromSerializedForm)
- .filter(id -> activeSessionOf(id).isPresent())
- .collect(Collectors.toUnmodifiableList());
+ return database().activeApplications();
}
public boolean exists(ApplicationId id) {
- return curator.exists(applicationPath(id));
+ return database().exists(id);
}
/**
@@ -132,10 +123,7 @@ public class TenantApplications implements RequestHandler, HostValidator<Applica
* Returns Optional.empty if application not found or no active session exists.
*/
public Optional<Long> activeSessionOf(ApplicationId id) {
- Optional<byte[]> data = curator.getData(applicationPath(id));
- return (data.isEmpty() || data.get().length == 0)
- ? Optional.empty()
- : data.map(bytes -> Long.parseLong(Utf8.toString(bytes)));
+ return database().activeSessionOf(id);
}
public boolean sessionExistsInFileSystem(long sessionId) {
@@ -149,18 +137,14 @@ public class TenantApplications implements RequestHandler, HostValidator<Applica
* @param sessionId Id of the session containing the application package for this id.
*/
public Transaction createPutTransaction(ApplicationId applicationId, long sessionId) {
- return new CuratorTransaction(curator).add(CuratorOperations.setData(applicationPath(applicationId).getAbsolute(), Utf8.toAsciiBytes(sessionId)));
+ return database().createPutTransaction(applicationId, sessionId);
}
/**
* Creates a node for the given application, marking its existence.
*/
public void createApplication(ApplicationId id) {
- if (! id.tenant().equals(tenant))
- throw new IllegalArgumentException("Cannot write application id '" + id + "' for tenant '" + tenant + "'");
- try (Lock lock = lock(id)) {
- curator.create(applicationPath(id));
- }
+ database().createApplication(id);
}
/**
@@ -179,7 +163,7 @@ public class TenantApplications implements RequestHandler, HostValidator<Applica
* Returns a transaction which deletes this application.
*/
public CuratorTransaction createDeleteTransaction(ApplicationId applicationId) {
- return CuratorTransaction.from(CuratorOperations.deleteAll(applicationPath(applicationId).getAbsolute(), curator), curator);
+ return database().createDeleteTransaction(applicationId);
}
/**
@@ -198,7 +182,7 @@ public class TenantApplications implements RequestHandler, HostValidator<Applica
/** Returns the lock for changing the session status of the given application. */
public Lock lock(ApplicationId id) {
- return curator.lock(lockPath(id), Duration.ofMinutes(1)); // These locks shouldn't be held for very long.
+ return database().lock(id);
}
private void childEvent(CuratorFramework ignored, PathChildrenCacheEvent event) {
@@ -232,15 +216,6 @@ public class TenantApplications implements RequestHandler, HostValidator<Applica
log.log(Level.FINE, TenantRepository.logPre(applicationId) + "Application added: " + applicationId);
}
- // TODO jonmv: Move curator stuff to ApplicationCuratorDatabase
- private Path applicationPath(ApplicationId id) {
- return applicationsPath.append(id.serializedForm());
- }
-
- private Path lockPath(ApplicationId id) {
- return locksPath.append(id.serializedForm());
- }
-
/**
* Gets a config for the given app, or null if not found
*/
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/configchange/ConfigChangeActions.java b/configserver/src/main/java/com/yahoo/vespa/config/server/configchange/ConfigChangeActions.java
index dd9c8e4b6bb..29013dfd17d 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/configchange/ConfigChangeActions.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/configchange/ConfigChangeActions.java
@@ -17,18 +17,20 @@ public class ConfigChangeActions {
private final RestartActions restartActions;
private final RefeedActions refeedActions;
+ private final ReindexActions reindexActions;
public ConfigChangeActions() {
- this(new RestartActions(), new RefeedActions());
+ this(new RestartActions(), new RefeedActions(), new ReindexActions());
}
public ConfigChangeActions(List<ConfigChangeAction> actions) {
- this(new RestartActions(actions), new RefeedActions(actions));
+ this(new RestartActions(actions), new RefeedActions(actions), new ReindexActions(actions));
}
- public ConfigChangeActions(RestartActions restartActions, RefeedActions refeedActions) {
+ public ConfigChangeActions(RestartActions restartActions, RefeedActions refeedActions, ReindexActions reindexActions) {
this.restartActions = Objects.requireNonNull(restartActions);
this.refeedActions = Objects.requireNonNull(refeedActions);
+ this.reindexActions = Objects.requireNonNull(reindexActions);
}
public RestartActions getRestartActions() {
@@ -39,4 +41,6 @@ public class ConfigChangeActions {
return refeedActions;
}
+ public ReindexActions getReindexActions() { return reindexActions; }
+
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/configchange/ConfigChangeActionsSlimeConverter.java b/configserver/src/main/java/com/yahoo/vespa/config/server/configchange/ConfigChangeActionsSlimeConverter.java
index efc17d8b6bd..ec48b671a5b 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/configchange/ConfigChangeActionsSlimeConverter.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/configchange/ConfigChangeActionsSlimeConverter.java
@@ -10,7 +10,6 @@ import java.util.Set;
* Class used to convert a ConfigChangeActions instance to Slime.
*
* @author geirst
- * @since 5.44
*/
public class ConfigChangeActionsSlimeConverter {
private final ConfigChangeActions actions;
@@ -23,6 +22,7 @@ public class ConfigChangeActionsSlimeConverter {
Cursor actionsCursor = root.setObject("configChangeActions");
restartActionsToSlime(actionsCursor);
refeedActionsToSlime(actionsCursor);
+ reindexActionsToSlime(actionsCursor);
}
private void restartActionsToSlime(Cursor actionsCursor) {
@@ -50,6 +50,19 @@ public class ConfigChangeActionsSlimeConverter {
}
}
+ private void reindexActionsToSlime(Cursor actionsCursor) {
+ Cursor refeedCursor = actionsCursor.setArray("reindex");
+ for (ReindexActions.Entry entry : actions.getReindexActions().getEntries()) {
+ Cursor entryCursor = refeedCursor.addObject();
+ entryCursor.setString("name", entry.name());
+ entryCursor.setBool("allowed", entry.allowed());
+ entryCursor.setString("documentType", entry.getDocumentType());
+ entryCursor.setString("clusterName", entry.getClusterName());
+ messagesToSlime(entryCursor, entry.getMessages());
+ servicesToSlime(entryCursor, entry.getServices());
+ }
+ }
+
private static void messagesToSlime(Cursor entryCursor, Set<String> messages) {
Cursor messagesCursor = entryCursor.setArray("messages");
for (String message : messages) {
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/configchange/ReindexActions.java b/configserver/src/main/java/com/yahoo/vespa/config/server/configchange/ReindexActions.java
new file mode 100644
index 00000000000..e328f9595b7
--- /dev/null
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/configchange/ReindexActions.java
@@ -0,0 +1,87 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.config.server.configchange;
+
+import com.yahoo.config.model.api.ConfigChangeAction;
+import com.yahoo.config.model.api.ConfigChangeReindexAction;
+import com.yahoo.config.model.api.ServiceInfo;
+
+import java.util.ArrayList;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.TreeSet;
+
+/**
+ * @author bjorncs
+ */
+public class ReindexActions {
+
+ private final Map<String, ReindexActions.Entry> actions = new TreeMap<>();
+
+ public ReindexActions() {}
+
+ public ReindexActions(List<ConfigChangeAction> actions) {
+ for (ConfigChangeAction action : actions) {
+ if (action.getType().equals(ConfigChangeAction.Type.REINDEX)) {
+ ConfigChangeReindexAction reindexChange = (ConfigChangeReindexAction) action;
+ for (ServiceInfo service : reindexChange.getServices()) {
+ addEntry(reindexChange.name(), reindexChange.allowed(), reindexChange.getDocumentType(), service).
+ addService(service).
+ addMessage(action.getMessage());
+ }
+ }
+ }
+ }
+
+ private Entry addEntry(String name, boolean allowed, String documentType, ServiceInfo service) {
+ String clusterName = service.getProperty("clustername").orElse("");
+ String entryId = name + "." + allowed + "." + clusterName + "." + documentType;
+ Entry entry = actions.get(entryId);
+ if (entry == null) {
+ entry = new Entry(name, allowed, documentType, clusterName);
+ actions.put(entryId, entry);
+ }
+ return entry;
+ }
+
+ public List<Entry> getEntries() { return new ArrayList<>(actions.values()); }
+ public String format() { return new ReindexActionsFormatter(this).format(); }
+ public boolean isEmpty() { return getEntries().isEmpty(); }
+
+ public static class Entry {
+
+ private final String name;
+ private final boolean allowed;
+ private final String documentType;
+ private final String clusterName;
+ private final Set<ServiceInfo> services = new LinkedHashSet<>();
+ private final Set<String> messages = new TreeSet<>();
+
+ private Entry(String name, boolean allowed, String documentType, String clusterName) {
+ this.name = name;
+ this.allowed = allowed;
+ this.documentType = documentType;
+ this.clusterName = clusterName;
+ }
+
+ private Entry addService(ServiceInfo service) {
+ services.add(service);
+ return this;
+ }
+
+ private Entry addMessage(String message) {
+ messages.add(message);
+ return this;
+ }
+
+ public String name() { return name; }
+ public boolean allowed() { return allowed; }
+ public String getDocumentType() { return documentType; }
+ public String getClusterName() { return clusterName; }
+ public Set<ServiceInfo> getServices() { return services; }
+ public Set<String> getMessages() { return messages; }
+
+ }
+}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/configchange/ReindexActionsFormatter.java b/configserver/src/main/java/com/yahoo/vespa/config/server/configchange/ReindexActionsFormatter.java
new file mode 100644
index 00000000000..e89bfd522cd
--- /dev/null
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/configchange/ReindexActionsFormatter.java
@@ -0,0 +1,32 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.config.server.configchange;
+
+/**
+ * Class used to format re-index actions for human readability.
+ *
+ * @author bjorncs
+ */
+class ReindexActionsFormatter {
+
+ private final ReindexActions actions;
+
+ ReindexActionsFormatter(ReindexActions actions) {
+ this.actions = actions;
+ }
+
+ String format() {
+ StringBuilder builder = new StringBuilder();
+ for (ReindexActions.Entry entry : actions.getEntries()) {
+ if (entry.allowed())
+ builder.append("(allowed) ");
+ builder.append(entry.name() + ": Consider re-indexing document type '" + entry.getDocumentType() +
+ "' in cluster '" + entry.getClusterName() + "' because:\n");
+ int counter = 1;
+ for (String message : entry.getMessages()) {
+ builder.append(" " + (counter++) + ") " + message + "\n");
+ }
+ }
+ return builder.toString();
+ }
+
+}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/Deployment.java b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/Deployment.java
index bf2d265dd16..f8acc2a92e5 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/Deployment.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/Deployment.java
@@ -140,7 +140,8 @@ public class Deployment implements com.yahoo.config.provision.Deployment {
deployLogger.log(Level.INFO, String.format("Scheduled service restart of %d nodes: %s",
hostnames.size(), hostnames.stream().sorted().collect(Collectors.joining(", "))));
- this.configChangeActions = new ConfigChangeActions(new RestartActions(), configChangeActions.getRefeedActions());
+ this.configChangeActions = new ConfigChangeActions(
+ new RestartActions(), configChangeActions.getRefeedActions(), configChangeActions.getReindexActions());
}
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java
index c5b2920babe..c122c7e7d17 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java
@@ -23,6 +23,7 @@ import com.yahoo.config.provision.Zone;
import com.yahoo.vespa.flags.FetchVector;
import com.yahoo.vespa.flags.FlagSource;
import com.yahoo.vespa.flags.Flags;
+import com.yahoo.vespa.flags.UnboundFlag;
import java.io.File;
import java.net.URI;
@@ -134,8 +135,27 @@ public class ModelContextImpl implements ModelContext {
@Override
public Version wantedNodeVespaVersion() { return wantedNodeVespaVersion; }
+ public static class FeatureFlags implements ModelContext.FeatureFlags {
+
+ private final boolean enableAutomaticReindexing;
+
+ public FeatureFlags(FlagSource source, ApplicationId appId) {
+ this.enableAutomaticReindexing = flagValue(source, appId, Flags.ENABLE_AUTOMATIC_REINDEXING);
+ }
+
+ @Override public boolean enableAutomaticReindexing() { return enableAutomaticReindexing; }
+
+ private static <V> V flagValue(FlagSource source, ApplicationId appId, UnboundFlag<? extends V, ?, ?> flag) {
+ return flag.bindTo(source)
+ .with(FetchVector.Dimension.APPLICATION_ID, appId.serializedForm())
+ .boxedValue();
+ }
+
+ }
+
public static class Properties implements ModelContext.Properties {
+ private final ModelContext.FeatureFlags featureFlags;
private final ApplicationId applicationId;
private final boolean multitenant;
private final List<ConfigServerSpec> configServerSpecs;
@@ -162,9 +182,10 @@ public class ModelContextImpl implements ModelContext {
private final Optional<AthenzDomain> athenzDomain;
private final Optional<ApplicationRoles> applicationRoles;
private final Quota quota;
- private final boolean useNewRestapiHandler;
private final boolean useAccessControlTlsHandshakeClientAuth;
- private final double jettyThreadpoolSizeFactor;
+ private final boolean useAsyncMessageHandlingOnSchedule;
+ private final int contentNodeBucketDBStripeBits;
+ private final int mergeChunkSize;
public Properties(ApplicationId applicationId,
boolean multitenantFromConfig,
@@ -182,6 +203,7 @@ public class ModelContextImpl implements ModelContext {
Optional<AthenzDomain> athenzDomain,
Optional<ApplicationRoles> applicationRoles,
Optional<Quota> maybeQuota) {
+ this.featureFlags = new FeatureFlags(flagSource, applicationId);
this.applicationId = applicationId;
this.multitenant = multitenantFromConfig || hostedVespa || Boolean.getBoolean("multitenant");
this.configServerSpecs = configServerSpecs;
@@ -219,18 +241,20 @@ public class ModelContextImpl implements ModelContext {
this.athenzDomain = athenzDomain;
this.applicationRoles = applicationRoles;
this.quota = maybeQuota.orElseGet(Quota::unlimited);
- this.useNewRestapiHandler = Flags.USE_NEW_RESTAPI_HANDLER.bindTo(flagSource)
- .with(FetchVector.Dimension.APPLICATION_ID, applicationId.serializedForm())
- .value();
this.useAccessControlTlsHandshakeClientAuth =
Flags.USE_ACCESS_CONTROL_CLIENT_AUTHENTICATION.bindTo(flagSource)
.with(FetchVector.Dimension.APPLICATION_ID, applicationId.serializedForm())
.value();
- this.jettyThreadpoolSizeFactor = Flags.JETTY_THREADPOOL_SCALE_FACTOR.bindTo(flagSource)
- .with(FetchVector.Dimension.APPLICATION_ID, applicationId.serializedForm())
- .value();
+ useAsyncMessageHandlingOnSchedule = Flags.USE_ASYNC_MESSAGE_HANDLING_ON_SCHEDULE.bindTo(flagSource)
+ .with(FetchVector.Dimension.APPLICATION_ID, applicationId.serializedForm()).value();
+ contentNodeBucketDBStripeBits = Flags.CONTENT_NODE_BUCKET_DB_STRIPE_BITS.bindTo(flagSource)
+ .with(FetchVector.Dimension.APPLICATION_ID, applicationId.serializedForm()).value();
+ mergeChunkSize = Flags.MERGE_CHUNK_SIZE.bindTo(flagSource)
+ .with(FetchVector.Dimension.APPLICATION_ID, applicationId.serializedForm()).value();
}
+ @Override public ModelContext.FeatureFlags featureFlags() { return featureFlags; }
+
@Override
public boolean multitenant() { return multitenant; }
@@ -300,19 +324,15 @@ public class ModelContextImpl implements ModelContext {
@Override public String jvmGCOptions() { return jvmGCOPtions; }
@Override public String feedSequencerType() { return feedSequencer; }
@Override public String responseSequencerType() { return responseSequencer; }
- @Override public int defaultNumResponseThreads() {
- return numResponseThreads;
- }
+ @Override public int defaultNumResponseThreads() { return numResponseThreads; }
@Override public boolean skipCommunicationManagerThread() { return skipCommunicationManagerThread; }
@Override public boolean skipMbusRequestThread() { return skipMbusRequestThread; }
@Override public boolean skipMbusReplyThread() { return skipMbusReplyThread; }
@Override public Quota quota() { return quota; }
-
- @Override public boolean useNewRestapiHandler() { return useNewRestapiHandler; }
-
@Override public boolean useAccessControlTlsHandshakeClientAuth() { return useAccessControlTlsHandshakeClientAuth; }
-
- @Override public double jettyThreadpoolSizeFactor() { return jettyThreadpoolSizeFactor; }
+ @Override public boolean useAsyncMessageHandlingOnSchedule() { return useAsyncMessageHandlingOnSchedule; }
+ @Override public int contentNodeBucketDBStripeBits() { return contentNodeBucketDBStripeBits; }
+ @Override public int mergeChunkSize() { return mergeChunkSize; }
}
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ConfigServerMaintainer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ConfigServerMaintainer.java
index 5854b1d85da..61d411802ad 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ConfigServerMaintainer.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ConfigServerMaintainer.java
@@ -5,6 +5,7 @@ import com.yahoo.concurrent.maintenance.JobControl;
import com.yahoo.concurrent.maintenance.JobControlState;
import com.yahoo.concurrent.maintenance.JobMetrics;
import com.yahoo.concurrent.maintenance.Maintainer;
+import com.yahoo.config.provision.HostName;
import com.yahoo.jdisc.Metric;
import com.yahoo.path.Path;
import com.yahoo.transaction.Mutex;
@@ -15,8 +16,12 @@ import com.yahoo.vespa.flags.Flags;
import com.yahoo.vespa.flags.ListFlag;
import java.time.Duration;
+import java.time.Instant;
+import java.util.Arrays;
+import java.util.List;
import java.util.Map;
import java.util.Set;
+import java.util.stream.Collectors;
/**
* A maintainer is some job which runs at a fixed interval to perform some maintenance task in the config server.
@@ -27,6 +32,14 @@ public abstract class ConfigServerMaintainer extends Maintainer {
protected final ApplicationRepository applicationRepository;
+ /** Creates a maintainer where maintainers on different nodes in this cluster run with even delay. */
+ ConfigServerMaintainer(ApplicationRepository applicationRepository, Curator curator, FlagSource flagSource,
+ Instant now, Duration interval) {
+ super(null, interval, now, new JobControl(new JobControlFlags(curator, flagSource)),
+ jobMetrics(applicationRepository.metric()), cluster(curator));
+ this.applicationRepository = applicationRepository;
+ }
+
ConfigServerMaintainer(ApplicationRepository applicationRepository, Curator curator, FlagSource flagSource,
Duration initialDelay, Duration interval) {
super(null, interval, initialDelay, new JobControl(new JobControlFlags(curator, flagSource)),
@@ -66,4 +79,13 @@ public abstract class ConfigServerMaintainer extends Maintainer {
}
+ /** Returns all hosts configured to be part of this ZooKeeper cluster */
+ public static List<String> cluster(Curator curator) {
+ return Arrays.stream(curator.zooKeeperEnsembleConnectionSpec().split(","))
+ .filter(hostAndPort -> !hostAndPort.isEmpty())
+ .map(hostAndPort -> hostAndPort.split(":")[0])
+ .collect(Collectors.toList());
+ }
+
+
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ConfigServerMaintenance.java b/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ConfigServerMaintenance.java
index 0e75d683478..47b20d5f64b 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ConfigServerMaintenance.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ConfigServerMaintenance.java
@@ -7,6 +7,7 @@ import com.yahoo.component.AbstractComponent;
import com.yahoo.jdisc.Metric;
import com.yahoo.vespa.config.server.ApplicationRepository;
import com.yahoo.vespa.config.server.ConfigServerBootstrap;
+import com.yahoo.vespa.config.server.application.ConfigConvergenceChecker;
import com.yahoo.vespa.curator.Curator;
import com.yahoo.vespa.flags.FlagSource;
@@ -27,6 +28,7 @@ public class ConfigServerMaintenance extends AbstractComponent {
private final FileDistributionMaintainer fileDistributionMaintainer;
private final SessionsMaintainer sessionsMaintainer;
private final ApplicationPackageMaintainer applicationPackageMaintainer;
+ private final ReindexingMaintainer reindexingMaintainer;
@Inject
public ConfigServerMaintenance(ConfigServerBootstrap configServerBootstrap,
@@ -34,12 +36,13 @@ public class ConfigServerMaintenance extends AbstractComponent {
ApplicationRepository applicationRepository,
Curator curator,
FlagSource flagSource,
- Metric metric) {
+ ConfigConvergenceChecker convergence) {
DefaultTimes defaults = new DefaultTimes(configserverConfig);
tenantsMaintainer = new TenantsMaintainer(applicationRepository, curator, flagSource, defaults.defaultInterval, Clock.systemUTC());
fileDistributionMaintainer = new FileDistributionMaintainer(applicationRepository, curator, defaults.defaultInterval, flagSource);
sessionsMaintainer = new SessionsMaintainer(applicationRepository, curator, Duration.ofSeconds(30), flagSource);
applicationPackageMaintainer = new ApplicationPackageMaintainer(applicationRepository, curator, Duration.ofSeconds(30), flagSource);
+ reindexingMaintainer = new ReindexingMaintainer(applicationRepository, curator, flagSource, Duration.ofMinutes(30), convergence, Clock.systemUTC());
}
@Override
@@ -48,6 +51,7 @@ public class ConfigServerMaintenance extends AbstractComponent {
sessionsMaintainer.close();
applicationPackageMaintainer.close();
tenantsMaintainer.close();
+ reindexingMaintainer.close();
}
/*
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ReindexingMaintainer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ReindexingMaintainer.java
new file mode 100644
index 00000000000..a301b091a0f
--- /dev/null
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ReindexingMaintainer.java
@@ -0,0 +1,110 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.config.server.maintenance;
+
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.vespa.config.server.ApplicationRepository;
+import com.yahoo.vespa.config.server.application.Application;
+import com.yahoo.vespa.config.server.application.ApplicationCuratorDatabase;
+import com.yahoo.vespa.config.server.application.ApplicationReindexing;
+import com.yahoo.vespa.config.server.application.ConfigConvergenceChecker;
+import com.yahoo.vespa.config.server.tenant.Tenant;
+import com.yahoo.vespa.curator.Curator;
+import com.yahoo.vespa.curator.Lock;
+import com.yahoo.vespa.flags.FlagSource;
+import com.yahoo.yolean.Exceptions;
+
+import java.time.Clock;
+import java.time.Duration;
+import java.time.Instant;
+import java.util.Collection;
+import java.util.Comparator;
+import java.util.Optional;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.function.Supplier;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+/**
+ * Watches pending reindexing, and sets these to ready when config convergence is observed.
+ *
+ * @author jonmv
+ */
+public class ReindexingMaintainer extends ConfigServerMaintainer {
+
+ private static final Logger log = Logger.getLogger(ReindexingMaintainer.class.getName());
+
+ /** Timeout per service when getting config generations. */
+ private static final Duration timeout = Duration.ofSeconds(10);
+
+ static final Duration reindexingInterval = Duration.ofDays(30);
+
+ private final ConfigConvergenceChecker convergence;
+ private final Clock clock;
+
+ public ReindexingMaintainer(ApplicationRepository applicationRepository, Curator curator, FlagSource flagSource,
+ Duration interval, ConfigConvergenceChecker convergence, Clock clock) {
+ super(applicationRepository, curator, flagSource, clock.instant(), interval);
+ this.convergence = convergence;
+ this.clock = clock;
+ }
+
+ @Override
+ protected boolean maintain() {
+ AtomicBoolean success = new AtomicBoolean(true);
+ for (Tenant tenant : applicationRepository.tenantRepository().getAllTenants()) {
+ ApplicationCuratorDatabase database = tenant.getApplicationRepo().database();
+ for (ApplicationId id : database.activeApplications())
+ applicationRepository.getActiveApplicationSet(id)
+ .map(application -> application.getForVersionOrLatest(Optional.empty(), clock.instant()))
+ .ifPresent(application -> {
+ try {
+ Collection<Long> generations = convergence.getServiceConfigGenerations(application, timeout).values();
+ try (Lock lock = database.lock(id)) {
+ ApplicationReindexing reindexing = database.readReindexingStatus(id)
+ .orElse(ApplicationReindexing.ready(clock.instant()));
+ database.writeReindexingStatus(id, withReady(reindexing, lazyGeneration(application), clock.instant()));
+ }
+ }
+ catch (RuntimeException e) {
+ log.log(Level.INFO, "Failed to update reindexing status for " + id + ": " + Exceptions.toMessageString(e));
+ success.set(false);
+ }
+ });
+ }
+ return success.get();
+ }
+
+ private Supplier<Long> lazyGeneration(Application application) {
+ AtomicLong oldest = new AtomicLong();
+ return () -> {
+ if (oldest.get() == 0)
+ oldest.set(convergence.getServiceConfigGenerations(application, timeout).values().stream()
+ .min(Comparator.naturalOrder())
+ .orElse(-1L));
+
+ return oldest.get();
+ };
+ }
+
+ static ApplicationReindexing withReady(ApplicationReindexing reindexing, Supplier<Long> oldestGeneration, Instant now) {
+ for (var cluster : reindexing.clusters().entrySet()) {
+ for (var pending : cluster.getValue().pending().entrySet())
+ if (pending.getValue() <= oldestGeneration.get())
+ reindexing = reindexing.withReady(cluster.getKey(), pending.getKey(), now);
+
+ for (var documentType : cluster.getValue().ready().entrySet())
+ if (documentType.getValue().ready().isBefore(now.minus(reindexingInterval)))
+ reindexing = reindexing.withReady(cluster.getKey(), documentType.getKey(), now);
+
+ if (cluster.getValue().common().ready().isBefore(now.minus(reindexingInterval)))
+ reindexing = reindexing.withReady(cluster.getKey(), now);
+ }
+ if (reindexing.common().ready().isBefore(now.minus(reindexingInterval)))
+ reindexing = reindexing.withReady(now);
+
+ return reindexing;
+ }
+
+}
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/ApplicationRepositoryTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/ApplicationRepositoryTest.java
index b71d1b99cf0..e45839bc00c 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/ApplicationRepositoryTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/ApplicationRepositoryTest.java
@@ -149,6 +149,7 @@ public class ApplicationRepositoryTest {
public void prepareAndActivate() {
PrepareResult result = prepareAndActivate(testApp);
assertTrue(result.configChangeActions().getRefeedActions().isEmpty());
+ assertTrue(result.configChangeActions().getReindexActions().isEmpty());
assertTrue(result.configChangeActions().getRestartActions().isEmpty());
Tenant tenant = applicationRepository.getTenant(applicationId());
@@ -164,6 +165,7 @@ public class ApplicationRepositoryTest {
Instant deployTime = clock.instant();
PrepareResult result = prepareAndActivate(testApp);
assertTrue(result.configChangeActions().getRefeedActions().isEmpty());
+ assertTrue(result.configChangeActions().getReindexActions().isEmpty());
assertTrue(result.configChangeActions().getRestartActions().isEmpty());
Tenant tenant = applicationRepository.getTenant(applicationId());
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/ModelContextImplTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/ModelContextImplTest.java
index 9b827293516..dd219ce7b95 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/ModelContextImplTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/ModelContextImplTest.java
@@ -87,6 +87,9 @@ public class ModelContextImplTest {
assertEquals(new Version(7), context.modelVespaVersion());
assertEquals(new Version(8), context.wantedNodeVespaVersion());
assertEquals(1.0, context.properties().defaultTermwiseLimit(), 0.0);
+ assertFalse(context.properties().useAsyncMessageHandlingOnSchedule());
+ assertEquals(0, context.properties().contentNodeBucketDBStripeBits());
+ assertEquals(0x400000, context.properties().mergeChunkSize());
}
}
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/application/ApplicationCuratorDatabaseTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/application/ApplicationCuratorDatabaseTest.java
index d9a4f65de66..b74df3ffd9a 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/application/ApplicationCuratorDatabaseTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/application/ApplicationCuratorDatabaseTest.java
@@ -3,10 +3,10 @@ package com.yahoo.vespa.config.server.application;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.vespa.curator.mock.MockCurator;
-import org.junit.Assert;
import org.junit.Test;
import java.time.Instant;
+import java.util.Optional;
import static org.junit.Assert.assertEquals;
@@ -18,17 +18,21 @@ public class ApplicationCuratorDatabaseTest {
@Test
public void testReindexingStatusSerialization() {
ApplicationId id = ApplicationId.defaultId();
- ApplicationCuratorDatabase db = new ApplicationCuratorDatabase(new MockCurator());
+ ApplicationCuratorDatabase db = new ApplicationCuratorDatabase(id.tenant(), new MockCurator());
- assertEquals(ReindexingStatus.empty(), db.readReindexingStatus(id));
+ assertEquals(Optional.empty(), db.readReindexingStatus(id));
- ReindexingStatus status = ReindexingStatus.empty()
- .withPending("pending1", 1)
- .withPending("pending2", 2)
- .withReady("ready1", Instant.ofEpochMilli(123))
- .withReady("ready2", Instant.ofEpochMilli(321));
- db.writeReindexingStatus(id, status);
- assertEquals(status, db.readReindexingStatus(id));
+ ApplicationReindexing reindexing = ApplicationReindexing.ready(Instant.EPOCH)
+ .withReady(Instant.ofEpochMilli(1 << 20))
+ .withPending("one", "a", 10)
+ .withReady("two", "b", Instant.ofEpochMilli(2))
+ .withPending("two", "b", 20)
+ .withReady("two", Instant.ofEpochMilli(2 << 10))
+ .withReady("one", "a", Instant.ofEpochMilli(1))
+ .withReady("two", "c", Instant.ofEpochMilli(3));
+
+ db.writeReindexingStatus(id, reindexing);
+ assertEquals(reindexing, db.readReindexingStatus(id).orElseThrow());
}
}
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/application/ApplicationReindexingTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/application/ApplicationReindexingTest.java
new file mode 100644
index 00000000000..71661776095
--- /dev/null
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/application/ApplicationReindexingTest.java
@@ -0,0 +1,63 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.config.server.application;
+
+import com.yahoo.vespa.config.server.application.ApplicationReindexing.Status;
+import org.junit.Test;
+
+import java.time.Instant;
+import java.util.Map;
+import java.util.Set;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * @author jonmv
+ */
+public class ApplicationReindexingTest {
+
+ @Test
+ public void test() {
+ ApplicationReindexing reindexing = ApplicationReindexing.ready(Instant.EPOCH)
+ .withReady(Instant.ofEpochMilli(1 << 20))
+ .withPending("one", "a", 10)
+ .withReady("two", "b", Instant.ofEpochMilli(2))
+ .withPending("two", "b", 20)
+ .withReady("two", Instant.ofEpochMilli(2 << 10))
+ .withReady("one", "a", Instant.ofEpochMilli(1))
+ .withReady("two", "c", Instant.ofEpochMilli(3));
+
+ assertEquals(Instant.ofEpochMilli(1 << 20),
+ reindexing.status("one", "a").ready());
+
+ assertEquals(Instant.ofEpochMilli(1 << 20),
+ reindexing.status("one", "d").ready());
+
+ assertEquals(Instant.ofEpochMilli(1 << 20),
+ reindexing.status("three", "a").ready());
+
+ assertEquals(new Status(Instant.ofEpochMilli(1 << 20)),
+ reindexing.common());
+
+ assertEquals(Set.of("one", "two"),
+ reindexing.clusters().keySet());
+
+ assertEquals(new Status(Instant.EPOCH),
+ reindexing.clusters().get("one").common());
+
+ assertEquals(Map.of("a", new Status(Instant.ofEpochMilli(1))),
+ reindexing.clusters().get("one").ready());
+
+ assertEquals(Map.of(),
+ reindexing.clusters().get("one").pending());
+
+ assertEquals(new Status(Instant.ofEpochMilli(2 << 10)),
+ reindexing.clusters().get("two").common());
+
+ assertEquals(Map.of("c", new Status(Instant.ofEpochMilli(3))),
+ reindexing.clusters().get("two").ready());
+
+ assertEquals(Map.of("b", 20L),
+ reindexing.clusters().get("two").pending());
+ }
+
+}
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/application/ReindexingStatusTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/application/ReindexingStatusTest.java
deleted file mode 100644
index 2f09b5afba4..00000000000
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/application/ReindexingStatusTest.java
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.config.server.application;
-
-import org.junit.Test;
-
-import java.time.Instant;
-import java.util.Map;
-
-import static org.junit.Assert.assertEquals;
-
-/**
- * @author jonmv
- */
-public class ReindexingStatusTest {
-
- @Test
- public void test() {
- ReindexingStatus status = ReindexingStatus.empty()
- .withPending("one", 1)
- .withPending("two", 2)
- .withReady("two", Instant.EPOCH)
- .withPending("three", 2)
- .withReady("three", Instant.EPOCH)
- .withPending("three", 3)
- .withReady("four", Instant.MIN)
- .withReady("four", Instant.MAX);
- assertEquals(Map.of("one", 1L,
- "three", 3L), status.pending());
- assertEquals(Map.of("two", new ReindexingStatus.Status(Instant.EPOCH),
- "four", new ReindexingStatus.Status(Instant.MAX)),
- status.status());
- }
-
-}
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/configchange/ConfigChangeActionsBuilder.java b/configserver/src/test/java/com/yahoo/vespa/config/server/configchange/ConfigChangeActionsBuilder.java
index fe3155b251c..b5194432682 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/configchange/ConfigChangeActionsBuilder.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/configchange/ConfigChangeActionsBuilder.java
@@ -2,14 +2,22 @@
package com.yahoo.vespa.config.server.configchange;
import com.google.common.collect.ImmutableMap;
+import com.yahoo.config.application.api.ValidationId;
+import com.yahoo.config.application.api.ValidationOverrides;
import com.yahoo.config.model.api.ConfigChangeAction;
import com.yahoo.config.model.api.ServiceInfo;
import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.vespa.model.application.validation.change.VespaReindexAction;
import com.yahoo.vespa.model.application.validation.change.VespaRestartAction;
+import java.time.Instant;
import java.util.ArrayList;
import java.util.List;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
/**
* @author geirst
*/
@@ -44,6 +52,15 @@ public class ConfigChangeActionsBuilder {
return this;
}
+ ConfigChangeActionsBuilder reindex(String name, boolean allowed, String message, String documentType, String clusterName, String serviceName) {
+ List<ServiceInfo> services = List.of(createService(clusterName, "myclustertype", "myservicetype", serviceName));
+ ValidationOverrides overrides = mock(ValidationOverrides.class);
+ when(overrides.allows((String) any(), any())).thenReturn(allowed);
+ when(overrides.allows((ValidationId) any(), any())).thenReturn(allowed);
+ actions.add(VespaReindexAction.of(ClusterSpec.Id.from(clusterName), name, overrides, message, services, documentType, Instant.now()));
+ return this;
+ }
+
public ConfigChangeActions build() {
return new ConfigChangeActions(actions);
}
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/configchange/ConfigChangeActionsSlimeConverterTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/configchange/ConfigChangeActionsSlimeConverterTest.java
index c7078bb0703..d145a796725 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/configchange/ConfigChangeActionsSlimeConverterTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/configchange/ConfigChangeActionsSlimeConverterTest.java
@@ -35,6 +35,8 @@ public class ConfigChangeActionsSlimeConverterTest {
" \"restart\": [\n" +
" ],\n" +
" \"refeed\": [\n" +
+ " ],\n" +
+ " \"reindex\": [\n" +
" ]\n" +
" }\n" +
"}\n",
@@ -76,6 +78,8 @@ public class ConfigChangeActionsSlimeConverterTest {
" }\n" +
" ],\n" +
" \"refeed\": [\n" +
+ " ],\n" +
+ " \"reindex\": [\n" +
" ]\n" +
" }\n" +
"}\n",
@@ -126,10 +130,47 @@ public class ConfigChangeActionsSlimeConverterTest {
" }\n" +
" ]\n" +
" }\n" +
+ " ],\n" +
+ " \"reindex\": [\n" +
" ]\n" +
" }\n" +
"}\n",
- toJson(actions));
+ toJson(actions));
+ }
+
+ @Test
+ public void json_representation_of_reindex_actions() throws IOException {
+ ConfigChangeActions actions = new ConfigChangeActionsBuilder().
+ reindex(CHANGE_ID, true, CHANGE_MSG, DOC_TYPE, CLUSTER, SERVICE_TYPE).build();
+ assertEquals(
+ "{\n" +
+ " \"configChangeActions\": {\n" +
+ " \"restart\": [\n" +
+ " ],\n" +
+ " \"refeed\": [\n" +
+ " ],\n" +
+ " \"reindex\": [\n" +
+ " {\n" +
+ " \"name\": \"change-id\",\n" +
+ " \"allowed\": true,\n" +
+ " \"documentType\": \"music\",\n" +
+ " \"clusterName\": \"foo\",\n" +
+ " \"messages\": [\n" +
+ " \"change\"\n" +
+ " ],\n" +
+ " \"services\": [\n" +
+ " {\n" +
+ " \"serviceName\": \"searchnode\",\n" +
+ " \"serviceType\": \"myservicetype\",\n" +
+ " \"configId\": \"myservicetype/searchnode\",\n" +
+ " \"hostName\": \"hostname\"\n" +
+ " }\n" +
+ " ]\n" +
+ " }\n" +
+ " ]\n" +
+ " }\n" +
+ "}\n",
+ toJson(actions));
}
}
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/configchange/ReindexActionsFormatterTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/configchange/ReindexActionsFormatterTest.java
new file mode 100644
index 00000000000..e9dd3f3bbfc
--- /dev/null
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/configchange/ReindexActionsFormatterTest.java
@@ -0,0 +1,53 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.config.server.configchange;
+
+import org.junit.Test;
+
+import static com.yahoo.vespa.config.server.configchange.Utils.CHANGE_ID;
+import static com.yahoo.vespa.config.server.configchange.Utils.CHANGE_ID_2;
+import static com.yahoo.vespa.config.server.configchange.Utils.CHANGE_MSG;
+import static com.yahoo.vespa.config.server.configchange.Utils.CHANGE_MSG_2;
+import static com.yahoo.vespa.config.server.configchange.Utils.CLUSTER;
+import static com.yahoo.vespa.config.server.configchange.Utils.DOC_TYPE;
+import static com.yahoo.vespa.config.server.configchange.Utils.DOC_TYPE_2;
+import static com.yahoo.vespa.config.server.configchange.Utils.SERVICE_NAME;
+import static org.junit.Assert.assertEquals;
+
+/**
+ * @author bjorncs
+ */
+public class ReindexActionsFormatterTest {
+
+ @Test
+ public void formatting_of_single_action() {
+ ReindexActions actions = new ConfigChangeActionsBuilder().
+ reindex(CHANGE_ID, false, CHANGE_MSG, DOC_TYPE, CLUSTER, SERVICE_NAME).
+ build().getReindexActions();
+ assertEquals("change-id: Consider re-indexing document type 'music' in cluster 'foo' because:\n" +
+ " 1) change\n",
+ new ReindexActionsFormatter(actions).format());
+ }
+
+ @Test
+ public void formatting_of_multiple_actions() {
+ ReindexActions actions = new ConfigChangeActionsBuilder().
+ reindex(CHANGE_ID, false, CHANGE_MSG, DOC_TYPE, CLUSTER, SERVICE_NAME).
+ reindex(CHANGE_ID, false, CHANGE_MSG_2, DOC_TYPE, CLUSTER, SERVICE_NAME).
+ reindex(CHANGE_ID_2, false, CHANGE_MSG_2, DOC_TYPE, CLUSTER, SERVICE_NAME).
+ reindex(CHANGE_ID_2, true, CHANGE_MSG_2, DOC_TYPE, CLUSTER, SERVICE_NAME).
+ reindex(CHANGE_ID, false, CHANGE_MSG_2, DOC_TYPE_2, CLUSTER, SERVICE_NAME).
+ build().getReindexActions();
+ assertEquals("change-id: Consider re-indexing document type 'book' in cluster 'foo' because:\n" +
+ " 1) other change\n" +
+ "change-id: Consider re-indexing document type 'music' in cluster 'foo' because:\n" +
+ " 1) change\n" +
+ " 2) other change\n" +
+ "other-change-id: Consider re-indexing document type 'music' in cluster 'foo' because:\n" +
+ " 1) other change\n" +
+ "(allowed) other-change-id: Consider re-indexing document type 'music' in cluster 'foo' because:\n" +
+ " 1) other change\n",
+ new ReindexActionsFormatter(actions).format());
+ }
+
+
+}
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/SessionPrepareHandlerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/SessionPrepareHandlerTest.java
index 3af408b90f6..d24faa1b433 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/SessionPrepareHandlerTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/SessionPrepareHandlerTest.java
@@ -234,15 +234,16 @@ public class SessionPrepareHandlerTest extends SessionHandlerTest {
public void require_that_config_change_actions_are_in_response() throws Exception {
long sessionId = applicationRepository.createSession(applicationId(), timeoutBudget, app);
HttpResponse response = request(HttpRequest.Method.PUT, sessionId);
- assertResponseContains(response, "\"configChangeActions\":{\"restart\":[],\"refeed\":[]}");
+ assertResponseContains(response, "\"configChangeActions\":{\"restart\":[],\"refeed\":[],\"reindex\":[]}");
}
@Test
public void require_that_config_change_actions_are_not_logged_if_not_existing() throws Exception {
long sessionId = applicationRepository.createSession(applicationId(), timeoutBudget, app);
HttpResponse response = request(HttpRequest.Method.PUT, sessionId);
- assertResponseNotContains(response, "Change(s) between active and new application that require restart");
- assertResponseNotContains(response, "Change(s) between active and new application that require re-feed");
+ assertResponseNotContains(response, "Change(s) between active and new application that may require restart");
+ assertResponseNotContains(response, "Change(s) between active and new application that may require re-feed");
+ assertResponseNotContains(response, "Change(s) between active and new application that may require re-index");
}
@Test
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/maintenance/ReindexingMaintainerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/maintenance/ReindexingMaintainerTest.java
new file mode 100644
index 00000000000..f4a553e25b7
--- /dev/null
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/maintenance/ReindexingMaintainerTest.java
@@ -0,0 +1,43 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.config.server.maintenance;
+
+import com.yahoo.vespa.config.server.application.ApplicationReindexing;
+import org.junit.Test;
+
+import java.time.Instant;
+
+import static com.yahoo.vespa.config.server.maintenance.ReindexingMaintainer.withReady;
+import static org.junit.Assert.assertEquals;
+
+/**
+ * @author jonmv
+ */
+public class ReindexingMaintainerTest {
+
+ @Test
+ public void testReadyComputation() {
+ ApplicationReindexing reindexing = ApplicationReindexing.ready(Instant.ofEpochMilli(1 << 20))
+ .withPending("one", "a", 10)
+ .withReady("two", "b", Instant.ofEpochMilli(2))
+ .withPending("two", "b", 20)
+ .withReady("two", Instant.ofEpochMilli(2 << 10))
+ .withReady("one", "a", Instant.ofEpochMilli(1))
+ .withReady("two", "c", Instant.ofEpochMilli(3));
+
+ assertEquals(reindexing,
+ withReady(reindexing, () -> -1L, Instant.EPOCH));
+
+ assertEquals(reindexing,
+ withReady(reindexing, () -> 19L, Instant.EPOCH));
+
+ Instant later = Instant.ofEpochMilli(2).plus(ReindexingMaintainer.reindexingInterval);
+ assertEquals(reindexing.withReady("one", later) // Had EPOCH as previous, so is updated.
+ .withReady("two", "b", later) // Got config convergence, so is updated.
+ .withReady("one", "a", later), // Had EPOCH + 1 as previous, so is updated.
+ withReady(reindexing, () -> 20L, later));
+
+ // Verify generation supplier isn't called when no pending document types.
+ withReady(reindexing.withReady("two", "b", later), () -> { throw new AssertionError("not supposed to run"); }, later);
+ }
+
+}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/aws/MockResourceTagger.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/aws/MockResourceTagger.java
index 0fd114095d7..a74a362330b 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/aws/MockResourceTagger.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/aws/MockResourceTagger.java
@@ -8,21 +8,22 @@ import com.yahoo.config.provision.zone.ZoneId;
import java.util.HashMap;
import java.util.Map;
+import java.util.Optional;
/**
* @author olaa
*/
public class MockResourceTagger implements ResourceTagger {
- Map<ZoneId, Map<HostName, ApplicationId>> values = new HashMap<>();
+ Map<ZoneId, Map<HostName, Optional<ApplicationId>>> values = new HashMap<>();
@Override
- public int tagResources(ZoneApi zone, Map<HostName, ApplicationId> tenantOfHosts) {
- values.put(zone.getId(), tenantOfHosts);
+ public int tagResources(ZoneApi zone, Map<HostName, Optional<ApplicationId>> ownerOfHosts) {
+ values.put(zone.getId(), ownerOfHosts);
return 0;
}
- public Map<ZoneId, Map<HostName, ApplicationId>> getValues() {
+ public Map<ZoneId, Map<HostName, Optional<ApplicationId>>> getValues() {
return values;
}
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/aws/ResourceTagger.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/aws/ResourceTagger.java
index 5b3cd18403b..61f8a57ac8b 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/aws/ResourceTagger.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/aws/ResourceTagger.java
@@ -6,6 +6,7 @@ import com.yahoo.config.provision.HostName;
import com.yahoo.config.provision.zone.ZoneApi;
import java.util.Map;
+import java.util.Optional;
/**
* @author olaa
@@ -15,7 +16,7 @@ public interface ResourceTagger {
/**
* Returns number of tagged resources
*/
- int tagResources(ZoneApi zone, Map<HostName, ApplicationId> tenantOfHosts);
+ int tagResources(ZoneApi zone, Map<HostName, Optional<ApplicationId>> ownerOfHosts);
static ResourceTagger empty() {
return (zone, tenantOfHosts) -> 0;
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/Node.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/Node.java
index b57b2dbc496..d1faacaeba7 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/Node.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/Node.java
@@ -51,6 +51,7 @@ public class Node {
private final boolean wantToRetire;
private final boolean wantToDeprovision;
private final Optional<TenantName> reservedTo;
+ private final Optional<ApplicationId> exclusiveTo;
private final Map<String, JsonNode> reports;
public Node(HostName hostname, Optional<HostName> parentHostname, State state, NodeType type, NodeResources resources, Optional<ApplicationId> owner,
@@ -58,7 +59,8 @@ public class Node {
Optional<Instant> currentFirmwareCheck, Optional<Instant> wantedFirmwareCheck, ServiceState serviceState,
Optional<Instant> suspendedSince, long restartGeneration, long wantedRestartGeneration, long rebootGeneration, long wantedRebootGeneration,
int cost, String flavor, String clusterId, ClusterType clusterType, boolean wantToRetire, boolean wantToDeprovision,
- Optional<TenantName> reservedTo, DockerImage wantedDockerImage, DockerImage currentDockerImage, Map<String, JsonNode> reports) {
+ Optional<TenantName> reservedTo, Optional<ApplicationId> exclusiveTo,
+ DockerImage wantedDockerImage, DockerImage currentDockerImage, Map<String, JsonNode> reports) {
this.hostname = hostname;
this.parentHostname = parentHostname;
this.state = state;
@@ -84,6 +86,7 @@ public class Node {
this.wantToRetire = wantToRetire;
this.wantToDeprovision = wantToDeprovision;
this.reservedTo = reservedTo;
+ this.exclusiveTo = exclusiveTo;
this.wantedDockerImage = wantedDockerImage;
this.currentDockerImage = currentDockerImage;
this.reports = reports;
@@ -193,6 +196,8 @@ public class Node {
public Optional<TenantName> reservedTo() { return reservedTo; }
+ public Optional<ApplicationId> exclusiveTo() { return exclusiveTo; }
+
public Map<String, JsonNode> reports() {
return reports;
}
@@ -267,6 +272,7 @@ public class Node {
private boolean wantToRetire;
private boolean wantToDeprovision;
private Optional<TenantName> reservedTo = Optional.empty();
+ private Optional<ApplicationId> exclusiveTo = Optional.empty();
private Map<String, JsonNode> reports = new HashMap<>();
public Builder() { }
@@ -299,6 +305,7 @@ public class Node {
this.wantToRetire = node.wantToRetire;
this.wantToDeprovision = node.wantToDeprovision;
this.reservedTo = node.reservedTo;
+ this.exclusiveTo = node.exclusiveTo;
this.reports = node.reports;
}
@@ -437,11 +444,16 @@ public class Node {
return this;
}
+ public Builder exclusiveTo(ApplicationId exclusiveTo) {
+ this.exclusiveTo = Optional.of(exclusiveTo);
+ return this;
+ }
+
public Node build() {
return new Node(hostname, parentHostname, state, type, resources, owner, currentVersion, wantedVersion,
currentOsVersion, wantedOsVersion, currentFirmwareCheck, wantedFirmwareCheck, serviceState,
suspendedSince, restartGeneration, wantedRestartGeneration, rebootGeneration, wantedRebootGeneration,
- cost, flavor, clusterId, clusterType, wantToRetire, wantToDeprovision, reservedTo,
+ cost, flavor, clusterId, clusterType, wantToRetire, wantToDeprovision, reservedTo, exclusiveTo,
wantedDockerImage, currentDockerImage, reports);
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/NodeRepository.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/NodeRepository.java
index 6f4b39ac9b9..1285670d52c 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/NodeRepository.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/NodeRepository.java
@@ -129,6 +129,7 @@ public interface NodeRepository {
node.getWantToRetire(),
node.getWantToDeprovision(),
Optional.ofNullable(node.getReservedTo()).map(TenantName::from),
+ Optional.ofNullable(node.getExclusiveTo()).map(ApplicationId::fromSerializedForm),
dockerImageFrom(node.getWantedDockerImage()),
dockerImageFrom(node.getCurrentDockerImage()),
node.getReports());
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/NodeRepositoryNode.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/NodeRepositoryNode.java
index a57080324ca..2e5132fe17d 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/NodeRepositoryNode.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/NodeRepositoryNode.java
@@ -418,6 +418,7 @@ public class NodeRepositoryNode {
", reports=" + reports +
", modelName=" + modelName +
", reservedTo=" + reservedTo +
+ ", exclusiveTo=" + exclusiveTo +
'}';
}
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/PathGroup.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/PathGroup.java
index 95f394aefef..b6e8dcee2af 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/PathGroup.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/PathGroup.java
@@ -57,6 +57,7 @@ enum PathGroup {
tenantInfo(Matcher.tenant,
PathPrefix.api,
"/application/v4/tenant/{tenant}/application/",
+ "/application/v4/tenant/{tenant}/info/",
"/routing/v1/status/tenant/{tenant}/{*}"),
tenantKeys(Matcher.tenant,
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/LockedTenant.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/LockedTenant.java
index 9e54d887952..b998ed29b71 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/LockedTenant.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/LockedTenant.java
@@ -134,6 +134,9 @@ public abstract class LockedTenant {
return new Cloud(name, creator, keys, info);
}
+ public Cloud withInfo(TenantInfo newInfo) {
+ return new Cloud(name, creator, developerKeys, newInfo);
+ }
}
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java
index cb6e5d123ef..cba78843cba 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java
@@ -544,12 +544,12 @@ public class InternalStepRunner implements StepRunner {
private String wantedPlatform(Node node) {
- return node.wantedDockerImage().untagged() + ":" + node.wantedVersion();
+ return node.wantedDockerImage().repository() + ":" + node.wantedVersion();
}
private String currentPlatform(Node node) {
- String currentRepo = node.currentDockerImage().untagged();
- String wantedRepo = node.wantedDockerImage().untagged();
+ String currentRepo = node.currentDockerImage().repository();
+ String wantedRepo = node.wantedDockerImage().repository();
return (currentRepo.equals(wantedRepo) ? "" : currentRepo + ":") + node.currentVersion();
}
@@ -828,13 +828,14 @@ public class InternalStepRunner implements StepRunner {
}
static NodeResources testerResourcesFor(ZoneId zone, DeploymentInstanceSpec spec) {
- return spec.steps().stream()
+ NodeResources nodeResources = spec.steps().stream()
.filter(step -> step.concerns(zone.environment()))
.findFirst()
.flatMap(step -> step.zones().get(0).testerFlavor())
.map(NodeResources::fromLegacyName)
.orElse(zone.region().value().contains("aws-") ?
DEFAULT_TESTER_RESOURCES_AWS : DEFAULT_TESTER_RESOURCES);
+ return nodeResources.with(NodeResources.DiskSpeed.any);
}
/** Returns the generated services.xml content for the tester application. */
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceTagMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceTagMaintainer.java
index 863302223ac..634e5ba10ce 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceTagMaintainer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceTagMaintainer.java
@@ -7,9 +7,11 @@ import com.yahoo.config.provision.HostName;
import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.vespa.hosted.controller.Controller;
import com.yahoo.vespa.hosted.controller.api.integration.aws.ResourceTagger;
+import com.yahoo.vespa.hosted.controller.api.integration.configserver.Node;
import java.time.Duration;
import java.util.Map;
+import java.util.Optional;
import java.util.logging.Level;
import java.util.stream.Collectors;
@@ -31,7 +33,7 @@ public class ResourceTagMaintainer extends ControllerMaintainer {
.ofCloud(CloudName.from("aws"))
.reachable()
.zones().forEach(zone -> {
- Map<HostName, ApplicationId> applicationOfHosts = getTenantOfParentHosts(zone.getId());
+ Map<HostName, Optional<ApplicationId>> applicationOfHosts = getTenantOfParentHosts(zone.getId());
int taggedResources = resourceTagger.tagResources(zone, applicationOfHosts);
if (taggedResources > 0)
log.log(Level.INFO, "Tagged " + taggedResources + " resources in " + zone.getId());
@@ -39,14 +41,14 @@ public class ResourceTagMaintainer extends ControllerMaintainer {
return true;
}
- private Map<HostName, ApplicationId> getTenantOfParentHosts(ZoneId zoneId) {
+ private Map<HostName, Optional<ApplicationId>> getTenantOfParentHosts(ZoneId zoneId) {
return controller().serviceRegistry().configServer().nodeRepository()
.list(zoneId)
.stream()
- .filter(node -> node.parentHostname().isPresent() && node.owner().isPresent())
+ .filter(node -> node.type().isHost())
.collect(Collectors.toMap(
- node -> node.parentHostname().get(),
- node -> node.owner().get(),
+ Node::hostname,
+ Node::exclusiveTo,
(node1, node2) -> node1
));
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
index 3856beb65bf..0541cc91159 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
@@ -91,6 +91,9 @@ import com.yahoo.vespa.hosted.controller.security.Credentials;
import com.yahoo.vespa.hosted.controller.tenant.AthenzTenant;
import com.yahoo.vespa.hosted.controller.tenant.CloudTenant;
import com.yahoo.vespa.hosted.controller.tenant.Tenant;
+import com.yahoo.vespa.hosted.controller.tenant.TenantInfo;
+import com.yahoo.vespa.hosted.controller.tenant.TenantInfoAddress;
+import com.yahoo.vespa.hosted.controller.tenant.TenantInfoBillingContact;
import com.yahoo.vespa.hosted.controller.versions.VersionStatus;
import com.yahoo.vespa.hosted.controller.versions.VespaVersion;
import com.yahoo.vespa.serviceview.bindings.ApplicationView;
@@ -212,6 +215,7 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
if (path.matches("/application/v4/")) return root(request);
if (path.matches("/application/v4/tenant")) return tenants(request);
if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request);
+ if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"));
@@ -354,6 +358,51 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
return new SlimeJsonResponse(slime);
}
+ private HttpResponse tenantInfo(String tenantName, HttpRequest request) {
+ return controller.tenants().get(TenantName.from(tenantName))
+ .filter(tenant -> tenant.type() == Tenant.Type.cloud)
+ .map(tenant -> tenantInfo(((CloudTenant)tenant).info(), request))
+ .orElseGet(() -> ErrorResponse.notFoundError("Tenant '" + tenantName + "' does not exist or does not support this"));
+ }
+
+ private SlimeJsonResponse tenantInfo(TenantInfo info, HttpRequest request) {
+ Slime slime = new Slime();
+ Cursor infoCursor = slime.setObject();
+ if (!info.isEmpty()) {
+ infoCursor.setString("name", info.name());
+ infoCursor.setString("email", info.email());
+ infoCursor.setString("website", info.website());
+ infoCursor.setString("invoiceEmail", info.invoiceEmail());
+ infoCursor.setString("contactName", info.contactName());
+ infoCursor.setString("contactEmail", info.contactEmail());
+ toSlime(info.address(), infoCursor);
+ toSlime(info.billingContact(), infoCursor);
+ }
+
+ return new SlimeJsonResponse(slime);
+ }
+
+ private void toSlime(TenantInfoAddress address, Cursor parentCursor) {
+ if (address.isEmpty()) return;
+
+ Cursor addressCursor = parentCursor.setObject("address");
+ addressCursor.setString("addressLines", address.addressLines());
+ addressCursor.setString("postalCodeOrZip", address.postalCodeOrZip());
+ addressCursor.setString("city", address.city());
+ addressCursor.setString("stateRegionProvince", address.stateRegionProvince());
+ addressCursor.setString("country", address.country());
+ }
+
+ private void toSlime(TenantInfoBillingContact billingContact, Cursor parentCursor) {
+ if (billingContact.isEmpty()) return;
+
+ Cursor addressCursor = parentCursor.setObject("billingContact");
+ addressCursor.setString("name", billingContact.name());
+ addressCursor.setString("email", billingContact.email());
+ addressCursor.setString("phone", billingContact.phone());
+ toSlime(billingContact.address(), addressCursor);
+ }
+
private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
if (controller.tenants().get(tenantName).isEmpty())
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/NodeRepositoryMock.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/NodeRepositoryMock.java
index 72cc000ef98..01112cbb696 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/NodeRepositoryMock.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/NodeRepositoryMock.java
@@ -97,6 +97,7 @@ public class NodeRepositoryMock implements NodeRepository {
.resources(new NodeResources(24, 24, 500, 1))
.clusterId("clusterA")
.clusterType(Node.ClusterType.container)
+ .exclusiveTo(ApplicationId.from("t1", "a1", "i1"))
.build();
var nodeB = new Node.Builder()
.hostname(HostName.from("hostB"))
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceTagMaintainerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceTagMaintainerTest.java
index e42d99c1fee..ad8d0050c73 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceTagMaintainerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceTagMaintainerTest.java
@@ -3,16 +3,20 @@ package com.yahoo.vespa.hosted.controller.maintenance;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.HostName;
+import com.yahoo.config.provision.NodeType;
import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.vespa.hosted.controller.ControllerTester;
import com.yahoo.vespa.hosted.controller.api.integration.aws.MockResourceTagger;
+import com.yahoo.vespa.hosted.controller.api.integration.configserver.Node;
import com.yahoo.vespa.hosted.controller.integration.ZoneApiMock;
import org.junit.Test;
import java.time.Duration;
+import java.util.List;
import java.util.Map;
+import java.util.Optional;
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
/**
* @author olaa
@@ -30,24 +34,39 @@ public class ResourceTagMaintainerTest {
mockResourceTagger);
resourceTagMaintainer.maintain();
assertEquals(2, mockResourceTagger.getValues().size());
- Map<HostName, ApplicationId> applicationForHost = mockResourceTagger.getValues().get(ZoneId.from("prod.region-2"));
- assertEquals(ApplicationId.from("tenant1", "app1", "default"), applicationForHost.get(HostName.from("parentHostA")));
- assertEquals(ApplicationId.from("tenant2", "app2", "default"), applicationForHost.get(HostName.from("parentHostB")));
-
-
+ Map<HostName, Optional<ApplicationId>> applicationForHost = mockResourceTagger.getValues().get(ZoneId.from("prod.region-2"));
+ assertEquals(ApplicationId.from("t1", "a1", "i1"), applicationForHost.get(HostName.from("parentHostA.prod.region-2")).get());
+ assertEquals(Optional.empty(), applicationForHost.get(HostName.from("parentHostB.prod.region-2")));
}
private void setUpZones() {
ZoneApiMock nonAwsZone = ZoneApiMock.newBuilder().withId("test.region-1").build();
ZoneApiMock awsZone1 = ZoneApiMock.newBuilder().withId("prod.region-2").withCloud("aws").build();
ZoneApiMock awsZone2 = ZoneApiMock.newBuilder().withId("test.region-3").withCloud("aws").build();
- tester.zoneRegistry().setZones(
- nonAwsZone,
- awsZone1,
- awsZone2);
- tester.configServer().nodeRepository().setFixedNodes(nonAwsZone.getId());
- tester.configServer().nodeRepository().setFixedNodes(awsZone1.getId());
- tester.configServer().nodeRepository().setFixedNodes(awsZone2.getId());
+ tester.zoneRegistry().setZones(nonAwsZone, awsZone1, awsZone2);
+ setNodes(awsZone1.getId());
+ setNodes(nonAwsZone.getId());
+ }
+
+ public void setNodes(ZoneId zone) {
+ var hostA = new Node.Builder()
+ .hostname(HostName.from("parentHostA." + zone.value()))
+ .type(NodeType.host)
+ .owner(ApplicationId.from("hosted-vespa", "tenant-host", "default"))
+ .exclusiveTo(ApplicationId.from("t1", "a1", "i1"))
+ .build();
+ var nodeA = new Node.Builder()
+ .hostname(HostName.from("hostA." + zone.value()))
+ .type(NodeType.tenant)
+ .parentHostname(HostName.from("parentHostA." + zone.value()))
+ .owner(ApplicationId.from("tenant1", "app1", "default"))
+ .build();
+ var hostB = new Node.Builder()
+ .hostname(HostName.from("parentHostB." + zone.value()))
+ .type(NodeType.host)
+ .owner(ApplicationId.from("hosted-vespa", "tenant-host", "default"))
+ .build();
+ tester.configServer().nodeRepository().setNodes(zone, List.of(hostA, nodeA, hostB));
}
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiCloudTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiCloudTest.java
index a1b06262241..59340458a9c 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiCloudTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiCloudTest.java
@@ -23,6 +23,7 @@ import org.junit.Test;
import java.util.Collections;
import java.util.Set;
+import static com.yahoo.application.container.handler.Request.Method.GET;
import static com.yahoo.application.container.handler.Request.Method.POST;
import static com.yahoo.vespa.hosted.controller.restapi.application.ApplicationApiTest.createApplicationSubmissionData;
@@ -44,12 +45,11 @@ public class ApplicationApiCloudTest extends ControllerContainerCloudTest {
.withBooleanFlag(Flags.ENABLE_PUBLIC_SIGNUP_FLOW.id(), true);
deploymentTester = new DeploymentTester(new ControllerTester(tester));
deploymentTester.controllerTester().computeVersionStatus();
+ setupTenantAndApplication();
}
@Test
public void test_missing_security_clients_pem() {
- setupTenantAndApplication();
-
var application = prodBuilder().build();
var deployRequest = request("/application/v4/tenant/scoober/application/albums/submit", POST)
@@ -62,6 +62,13 @@ public class ApplicationApiCloudTest extends ControllerContainerCloudTest {
400);
}
+ @Test
+ public void get_empty_tenant_info() {
+ var infoRequest =
+ request("/application/v4/tenant/scoober/info", GET)
+ .roles(Set.of(Role.reader(tenantName)));
+ tester.assertResponse(infoRequest, "{}", 200);
+ }
private ApplicationPackageBuilder prodBuilder() {
return new ApplicationPackageBuilder()
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/dev-us-east-1-log-first-part.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/dev-us-east-1-log-first-part.json
index 835bca7966e..895b49157db 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/dev-us-east-1-log-first-part.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/dev-us-east-1-log-first-part.json
@@ -33,7 +33,7 @@
{
"at": 0,
"type": "info",
- "message": "--- platform registry.example.com/vespa/vespa:6.1"
+ "message": "--- platform vespa/vespa:6.1"
},
{
"at": 0,
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/staging-test-log.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/staging-test-log.json
index 32bee6c7e7e..5aa13f1cf3c 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/staging-test-log.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/staging-test-log.json
@@ -23,7 +23,7 @@
{
"at": 14503000,
"type": "info",
- "message": "--- platform registry.example.com/vespa/vespa:6.1"
+ "message": "--- platform vespa/vespa:6.1"
},
{
"at": 14503000,
@@ -38,7 +38,7 @@
{
"at": 14503000,
"type": "info",
- "message": "--- platform registry.example.com/vespa/vespa:6.1"
+ "message": "--- platform vespa/vespa:6.1"
},
{
"at": 14503000,
@@ -53,7 +53,7 @@
{
"at": 14503000,
"type": "info",
- "message": "--- platform registry.example.com/vespa/vespa:6.1"
+ "message": "--- platform vespa/vespa:6.1"
},
{
"at": 14503000,
@@ -92,7 +92,7 @@
{
"at": 14503000,
"type": "info",
- "message": "--- platform registry.example.com/vespa/vespa:6.1"
+ "message": "--- platform vespa/vespa:6.1"
},
{
"at": 14503000,
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/system-test-details.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/system-test-details.json
index 0e7c100ea4e..342db367807 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/system-test-details.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/system-test-details.json
@@ -28,7 +28,7 @@
{
"at": "(ignore)",
"type": "info",
- "message": "--- platform registry.example.com/vespa/vespa:6.1"
+ "message": "--- platform vespa/vespa:6.1"
},
{
"at": "(ignore)",
@@ -43,7 +43,7 @@
{
"at": "(ignore)",
"type": "info",
- "message": "--- platform registry.example.com/vespa/vespa:6.1"
+ "message": "--- platform vespa/vespa:6.1"
},
{
"at": "(ignore)",
@@ -58,7 +58,7 @@
{
"at": "(ignore)",
"type": "info",
- "message": "--- platform registry.example.com/vespa/vespa:6.1"
+ "message": "--- platform vespa/vespa:6.1"
},
{
"at": "(ignore)",
@@ -73,7 +73,7 @@
{
"at": "(ignore)",
"type": "info",
- "message": "--- platform registry.example.com/vespa/vespa:6.1"
+ "message": "--- platform vespa/vespa:6.1"
},
{
"at": "(ignore)",
@@ -112,7 +112,7 @@
{
"at": "(ignore)",
"type": "info",
- "message": "--- platform registry.example.com/vespa/vespa:6.1"
+ "message": "--- platform vespa/vespa:6.1"
},
{
"at": "(ignore)",
@@ -127,7 +127,7 @@
{
"at": "(ignore)",
"type": "info",
- "message": "--- platform registry.example.com/vespa/vespa:6.1"
+ "message": "--- platform vespa/vespa:6.1"
},
{
"at": "(ignore)",
@@ -142,7 +142,7 @@
{
"at": "(ignore)",
"type": "info",
- "message": "--- platform registry.example.com/vespa/vespa:6.1"
+ "message": "--- platform vespa/vespa:6.1"
},
{
"at": "(ignore)",
@@ -157,7 +157,7 @@
{
"at": "(ignore)",
"type": "info",
- "message": "--- platform registry.example.com/vespa/vespa:6.1"
+ "message": "--- platform vespa/vespa:6.1"
},
{
"at": "(ignore)",
@@ -172,7 +172,7 @@
{
"at": "(ignore)",
"type": "info",
- "message": "--- platform registry.example.com/vespa/vespa:6.1"
+ "message": "--- platform vespa/vespa:6.1"
},
{
"at": "(ignore)",
@@ -187,7 +187,7 @@
{
"at": "(ignore)",
"type": "info",
- "message": "--- platform registry.example.com/vespa/vespa:6.1"
+ "message": "--- platform vespa/vespa:6.1"
},
{
"at": "(ignore)",
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/system-test-log.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/system-test-log.json
index edaa01326ad..2f5e93ea3a0 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/system-test-log.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/system-test-log.json
@@ -23,7 +23,7 @@
{
"at": 0,
"type": "info",
- "message": "--- platform registry.example.com/vespa/vespa:6.1"
+ "message": "--- platform vespa/vespa:6.1"
},
{
"at": 0,
@@ -38,7 +38,7 @@
{
"at": 0,
"type": "info",
- "message": "--- platform registry.example.com/vespa/vespa:6.1"
+ "message": "--- platform vespa/vespa:6.1"
},
{
"at": 0,
@@ -53,7 +53,7 @@
{
"at": 0,
"type": "info",
- "message": "--- platform registry.example.com/vespa/vespa:6.1"
+ "message": "--- platform vespa/vespa:6.1"
},
{
"at": 0,
@@ -68,7 +68,7 @@
{
"at": 0,
"type": "info",
- "message": "--- platform registry.example.com/vespa/vespa:6.1"
+ "message": "--- platform vespa/vespa:6.1"
},
{
"at": 0,
@@ -107,7 +107,7 @@
{
"at": 0,
"type": "info",
- "message": "--- platform registry.example.com/vespa/vespa:6.1"
+ "message": "--- platform vespa/vespa:6.1"
},
{
"at": 0,
@@ -122,7 +122,7 @@
{
"at": 0,
"type": "info",
- "message": "--- platform registry.example.com/vespa/vespa:6.1"
+ "message": "--- platform vespa/vespa:6.1"
},
{
"at": 0,
@@ -137,7 +137,7 @@
{
"at": 0,
"type": "info",
- "message": "--- platform registry.example.com/vespa/vespa:6.1"
+ "message": "--- platform vespa/vespa:6.1"
},
{
"at": 0,
@@ -152,7 +152,7 @@
{
"at": 0,
"type": "info",
- "message": "--- platform registry.example.com/vespa/vespa:6.1"
+ "message": "--- platform vespa/vespa:6.1"
},
{
"at": 0,
@@ -167,7 +167,7 @@
{
"at": 0,
"type": "info",
- "message": "--- platform registry.example.com/vespa/vespa:6.1"
+ "message": "--- platform vespa/vespa:6.1"
},
{
"at": 0,
@@ -182,7 +182,7 @@
{
"at": 0,
"type": "info",
- "message": "--- platform registry.example.com/vespa/vespa:6.1"
+ "message": "--- platform vespa/vespa:6.1"
},
{
"at": 0,
diff --git a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/DockerEngine.java b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/DockerEngine.java
index 81074c5ea37..33b301256b2 100644
--- a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/DockerEngine.java
+++ b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/DockerEngine.java
@@ -2,11 +2,11 @@
package com.yahoo.vespa.hosted.dockerapi;
import com.github.dockerjava.api.DockerClient;
-import com.github.dockerjava.api.command.DockerCmdExecFactory;
import com.github.dockerjava.api.command.ExecCreateCmdResponse;
import com.github.dockerjava.api.command.InspectContainerResponse;
import com.github.dockerjava.api.command.InspectExecResponse;
import com.github.dockerjava.api.command.InspectImageResponse;
+import com.github.dockerjava.api.command.PullImageCmd;
import com.github.dockerjava.api.command.UpdateContainerCmd;
import com.github.dockerjava.api.exception.DockerClientException;
import com.github.dockerjava.api.exception.NotFoundException;
@@ -19,7 +19,6 @@ import com.github.dockerjava.core.DefaultDockerClientConfig;
import com.github.dockerjava.core.DockerClientConfig;
import com.github.dockerjava.core.DockerClientImpl;
import com.github.dockerjava.core.async.ResultCallbackTemplate;
-import com.github.dockerjava.core.command.AuthCmdImpl;
import com.github.dockerjava.core.command.ExecStartResultCallback;
import com.github.dockerjava.core.command.PullImageResultCallback;
import com.github.dockerjava.jaxrs.JerseyDockerCmdExecFactory;
@@ -63,7 +62,6 @@ public class DockerEngine implements ContainerEngine {
private final Set<DockerImage> scheduledPulls = new HashSet<>();
private final DockerClient dockerClient;
- private final DockerCmdExecFactory dockerFactory;
private final DockerImageGarbageCollector dockerImageGC;
private final Metrics metrics;
private final Counter numberOfDockerApiFails;
@@ -74,9 +72,8 @@ public class DockerEngine implements ContainerEngine {
this(createDockerClient(), metrics, Clock.systemUTC());
}
- DockerEngine(DockerClientWithExecFactory clientWithExecFactory, Metrics metrics, Clock clock) {
- this.dockerClient = clientWithExecFactory.dockerClient;
- this.dockerFactory = clientWithExecFactory.dockerCmdExecFactory;
+ DockerEngine(DockerClient dockerClient, Metrics metrics, Clock clock) {
+ this.dockerClient = dockerClient;
this.dockerImageGC = new DockerImageGarbageCollector(this);
this.metrics = metrics;
this.clock = clock;
@@ -94,16 +91,15 @@ public class DockerEngine implements ContainerEngine {
scheduledPulls.add(image);
logger.log(Level.INFO, "Starting download of " + image.asString());
+ PullImageCmd pullCmd = dockerClient.pullImageCmd(image.asString());
if (!registryCredentials.equals(RegistryCredentials.none)) {
+ logger.log(Level.INFO, "Authenticating with " + registryCredentials.registryAddress());
AuthConfig authConfig = new AuthConfig().withUsername(registryCredentials.username())
- .withPassword(registryCredentials.password())
- .withRegistryAddress(registryCredentials.registryAddress());
-
- // Need to create AuthCmdImpl directly since DockerClient.authCmd() will throw
- // exception when username/registry url is not set
- new AuthCmdImpl(this.dockerFactory.createAuthCmdExec(), authConfig).exec();
+ .withPassword(registryCredentials.password())
+ .withRegistryAddress(registryCredentials.registryAddress());
+ pullCmd = pullCmd.withAuthConfig(authConfig);
}
- dockerClient.pullImageCmd(image.asString()).exec(new ImagePullCallback(image));
+ pullCmd.exec(new ImagePullCallback(image));
return true;
}
} catch (RuntimeException e) {
@@ -419,7 +415,7 @@ public class DockerEngine implements ContainerEngine {
}
}
- private static DockerClientWithExecFactory createDockerClient() {
+ private static DockerClient createDockerClient() {
JerseyDockerCmdExecFactory dockerFactory = new JerseyDockerCmdExecFactory()
.withMaxPerRouteConnections(10)
.withMaxTotalConnections(100)
@@ -430,18 +426,7 @@ public class DockerEngine implements ContainerEngine {
.withDockerHost("unix:///var/run/docker.sock")
.build();
- return new DockerClientWithExecFactory(
- DockerClientImpl.getInstance(dockerClientConfig).withDockerCmdExecFactory(dockerFactory),
- dockerFactory);
- }
-
- static class DockerClientWithExecFactory {
- private final DockerClient dockerClient;
- private final DockerCmdExecFactory dockerCmdExecFactory;
-
- public DockerClientWithExecFactory(DockerClient dockerClient, DockerCmdExecFactory dockerCmdExecFactory) {
- this.dockerClient = dockerClient;
- this.dockerCmdExecFactory = dockerCmdExecFactory;
- }
+ return DockerClientImpl.getInstance(dockerClientConfig)
+ .withDockerCmdExecFactory(dockerFactory);
}
}
diff --git a/docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/DockerEngineTest.java b/docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/DockerEngineTest.java
index 66bcf89090b..71bdb321305 100644
--- a/docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/DockerEngineTest.java
+++ b/docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/DockerEngineTest.java
@@ -3,7 +3,6 @@ package com.yahoo.vespa.hosted.dockerapi;
import com.github.dockerjava.api.DockerClient;
import com.github.dockerjava.api.async.ResultCallback;
-import com.github.dockerjava.api.command.DockerCmdExecFactory;
import com.github.dockerjava.api.command.ExecCreateCmd;
import com.github.dockerjava.api.command.ExecCreateCmdResponse;
import com.github.dockerjava.api.command.ExecStartCmd;
@@ -43,8 +42,7 @@ public class DockerEngineTest {
private final DockerClient dockerClient = mock(DockerClient.class);
private final Metrics metrics = new Metrics();
private final ManualClock clock = new ManualClock();
- private final DockerEngine docker = new DockerEngine(
- new DockerEngine.DockerClientWithExecFactory(dockerClient, mock(DockerCmdExecFactory.class)), metrics, clock);
+ private final DockerEngine docker = new DockerEngine(dockerClient, metrics, clock);
@Test
public void testExecuteCompletes() {
diff --git a/eval/CMakeLists.txt b/eval/CMakeLists.txt
index b0418c4f80d..19b75c7ff46 100644
--- a/eval/CMakeLists.txt
+++ b/eval/CMakeLists.txt
@@ -42,6 +42,7 @@ vespa_define_module(
src/tests/instruction/generic_join
src/tests/instruction/generic_map
src/tests/instruction/generic_merge
+ src/tests/instruction/generic_peek
src/tests/instruction/generic_reduce
src/tests/instruction/generic_rename
src/tests/tensor/default_value_builder_factory
diff --git a/eval/src/tests/eval/fast_value/fast_value_test.cpp b/eval/src/tests/eval/fast_value/fast_value_test.cpp
index 19ddcb3d788..03658d8351b 100644
--- a/eval/src/tests/eval/fast_value/fast_value_test.cpp
+++ b/eval/src/tests/eval/fast_value/fast_value_test.cpp
@@ -1,6 +1,8 @@
// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include <vespa/eval/eval/fast_value.hpp>
+#include <vespa/eval/eval/fast_value.h>
+#include <vespa/eval/eval/value_codec.h>
#include <vespa/vespalib/gtest/gtest.h>
using namespace vespalib;
@@ -56,4 +58,63 @@ TEST(FastCellsTest, add_cells_works) {
EXPECT_EQ(*cells.get(5), 6.0);
}
+using SA = std::vector<vespalib::stringref>;
+
+TEST(FastValueBuilderTest, dense_add_subspace_robustness) {
+ auto factory = FastValueBuilderFactory::get();
+ ValueType type = ValueType::from_spec("tensor(x[2])");
+ auto builder = factory.create_value_builder<double>(type);
+ auto subspace = builder->add_subspace({});
+ subspace[0] = 17.0;
+ subspace[1] = 666;
+ auto other = builder->add_subspace({});
+ other[1] = 42.0;
+ auto value = builder->build(std::move(builder));
+ auto actual = spec_from_value(*value);
+ auto expected = TensorSpec("tensor(x[2])").
+ add({{"x", 0}}, 17.0).
+ add({{"x", 1}}, 42.0);
+ EXPECT_EQ(actual, expected);
+}
+
+TEST(FastValueBuilderTest, sparse_add_subspace_robustness) {
+ auto factory = FastValueBuilderFactory::get();
+ ValueType type = ValueType::from_spec("tensor(x{})");
+ auto builder = factory.create_value_builder<double>(type);
+ auto subspace = builder->add_subspace(SA{"foo"});
+ subspace[0] = 17.0;
+ subspace = builder->add_subspace(SA{"bar"});
+ subspace[0] = 18.0;
+ auto other = builder->add_subspace(SA{"foo"});
+ other[0] = 42.0;
+ auto value = builder->build(std::move(builder));
+ auto actual = spec_from_value(*value);
+ auto expected = TensorSpec("tensor(x{})").
+ add({{"x", "bar"}}, 18.0).
+ add({{"x", "foo"}}, 42.0);
+ EXPECT_EQ(actual, expected);
+}
+
+TEST(FastValueBuilderTest, mixed_add_subspace_robustness) {
+ auto factory = FastValueBuilderFactory::get();
+ ValueType type = ValueType::from_spec("tensor(x{},y[2])");
+ auto builder = factory.create_value_builder<double>(type);
+ auto subspace = builder->add_subspace(SA{"foo"});
+ subspace[0] = 17.0;
+ subspace[1] = 666;
+ subspace = builder->add_subspace(SA{"bar"});
+ subspace[0] = 18.0;
+ subspace[1] = 19.0;
+ auto other = builder->add_subspace(SA{"foo"});
+ other[1] = 42.0;
+ auto value = builder->build(std::move(builder));
+ auto actual = spec_from_value(*value);
+ auto expected = TensorSpec("tensor(x{},y[2])").
+ add({{"x", "foo"}, {"y", 0}}, 17.0).
+ add({{"x", "bar"}, {"y", 0}}, 18.0).
+ add({{"x", "bar"}, {"y", 1}}, 19.0).
+ add({{"x", "foo"}, {"y", 1}}, 42.0);
+ EXPECT_EQ(actual, expected);
+}
+
GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/eval/src/tests/eval/tensor_function/tensor_function_test.cpp b/eval/src/tests/eval/tensor_function/tensor_function_test.cpp
index 5c33cdacc44..9441061d6e1 100644
--- a/eval/src/tests/eval/tensor_function/tensor_function_test.cpp
+++ b/eval/src/tests/eval/tensor_function/tensor_function_test.cpp
@@ -38,9 +38,6 @@ struct EvalCtx {
ictx = std::make_unique<InterpretedFunction::Context>(*ifun);
return ifun->eval(*ictx, SimpleObjectParams(params));
}
- const TensorFunction &compile(const TensorFunction &expr) {
- return engine.optimize(expr, stash);
- }
Value::UP make_double(double value) {
return engine.from_spec(TensorSpec("double").add({}, value));
}
@@ -196,8 +193,7 @@ TEST("require that const_value works") {
const auto &fun = const_value(*my_const, ctx.stash);
EXPECT_TRUE(!fun.result_is_mutable());
EXPECT_EQUAL(expect->type(), fun.result_type());
- const auto &prog = ctx.compile(fun);
- TEST_DO(verify_equal(*expect, ctx.eval(prog)));
+ TEST_DO(verify_equal(*expect, ctx.eval(fun)));
}
TEST("require that tensor injection works") {
@@ -207,8 +203,7 @@ TEST("require that tensor injection works") {
const auto &fun = inject(ValueType::from_spec("tensor(x[2],y[2])"), a_id, ctx.stash);
EXPECT_TRUE(!fun.result_is_mutable());
EXPECT_EQUAL(expect->type(), fun.result_type());
- const auto &prog = ctx.compile(fun);
- TEST_DO(verify_equal(*expect, ctx.eval(prog)));
+ TEST_DO(verify_equal(*expect, ctx.eval(fun)));
}
TEST("require that partial tensor reduction works") {
@@ -218,8 +213,7 @@ TEST("require that partial tensor reduction works") {
const auto &fun = reduce(inject(ValueType::from_spec("tensor(x[3],y[2])"), a_id, ctx.stash), Aggr::SUM, {"y"}, ctx.stash);
EXPECT_TRUE(fun.result_is_mutable());
EXPECT_EQUAL(expect->type(), fun.result_type());
- const auto &prog = ctx.compile(fun);
- TEST_DO(verify_equal(*expect, ctx.eval(prog)));
+ TEST_DO(verify_equal(*expect, ctx.eval(fun)));
}
TEST("require that full tensor reduction works") {
@@ -228,8 +222,7 @@ TEST("require that full tensor reduction works") {
const auto &fun = reduce(inject(ValueType::from_spec("tensor(x[3],y[2])"), a_id, ctx.stash), Aggr::SUM, {}, ctx.stash);
EXPECT_TRUE(fun.result_is_mutable());
EXPECT_EQUAL(ValueType::from_spec("double"), fun.result_type());
- const auto &prog = ctx.compile(fun);
- const Value &result = ctx.eval(prog);
+ const Value &result = ctx.eval(fun);
EXPECT_TRUE(result.is_double());
EXPECT_EQUAL(21.0, result.as_double());
}
@@ -241,8 +234,7 @@ TEST("require that tensor map works") {
const auto &fun = map(inject(ValueType::from_spec("tensor(x{},y{})"), a_id, ctx.stash), operation::Neg::f, ctx.stash);
EXPECT_TRUE(fun.result_is_mutable());
EXPECT_EQUAL(expect->type(), fun.result_type());
- const auto &prog = ctx.compile(fun);
- TEST_DO(verify_equal(*expect, ctx.eval(prog)));
+ TEST_DO(verify_equal(*expect, ctx.eval(fun)));
}
TEST("require that tensor join works") {
@@ -255,8 +247,7 @@ TEST("require that tensor join works") {
operation::Mul::f, ctx.stash);
EXPECT_TRUE(fun.result_is_mutable());
EXPECT_EQUAL(expect->type(), fun.result_type());
- const auto &prog = ctx.compile(fun);
- TEST_DO(verify_equal(*expect, ctx.eval(prog)));
+ TEST_DO(verify_equal(*expect, ctx.eval(fun)));
}
TEST("require that tensor merge works") {
@@ -269,8 +260,7 @@ TEST("require that tensor merge works") {
operation::Add::f, ctx.stash);
EXPECT_TRUE(fun.result_is_mutable());
EXPECT_EQUAL(expect->type(), fun.result_type());
- const auto &prog = ctx.compile(fun);
- TEST_DO(verify_equal(*expect, ctx.eval(prog)));
+ TEST_DO(verify_equal(*expect, ctx.eval(fun)));
}
TEST("require that tensor concat works") {
@@ -283,8 +273,7 @@ TEST("require that tensor concat works") {
"y", ctx.stash);
EXPECT_TRUE(fun.result_is_mutable());
EXPECT_EQUAL(expect->type(), fun.result_type());
- const auto &prog = ctx.compile(fun);
- TEST_DO(verify_equal(*expect, ctx.eval(prog)));
+ TEST_DO(verify_equal(*expect, ctx.eval(fun)));
}
TEST("require that tensor create works") {
@@ -305,8 +294,7 @@ TEST("require that tensor create works") {
ctx.stash);
EXPECT_TRUE(fun.result_is_mutable());
EXPECT_EQUAL(expect->type(), fun.result_type());
- const auto &prog = ctx.compile(fun);
- TEST_DO(verify_equal(*expect, ctx.eval(prog)));
+ TEST_DO(verify_equal(*expect, ctx.eval(fun)));
}
TEST("require that single value tensor peek works") {
@@ -328,8 +316,7 @@ TEST("require that single value tensor peek works") {
ctx.stash);
EXPECT_TRUE(fun.result_is_mutable());
EXPECT_EQUAL(expect->type(), fun.result_type());
- const auto &prog = ctx.compile(fun);
- TEST_DO(verify_equal(*expect, ctx.eval(prog)));
+ TEST_DO(verify_equal(*expect, ctx.eval(fun)));
}
TEST("require that tensor subspace tensor peek works") {
@@ -340,8 +327,7 @@ TEST("require that tensor subspace tensor peek works") {
const auto &fun = peek(t, {{"x", "bar"}}, ctx.stash);
EXPECT_TRUE(fun.result_is_mutable());
EXPECT_EQUAL(expect->type(), fun.result_type());
- const auto &prog = ctx.compile(fun);
- TEST_DO(verify_equal(*expect, ctx.eval(prog)));
+ TEST_DO(verify_equal(*expect, ctx.eval(fun)));
}
TEST("require that automatic string conversion tensor peek works") {
@@ -353,8 +339,7 @@ TEST("require that automatic string conversion tensor peek works") {
const auto &fun = peek(t, {{"x", a}}, ctx.stash);
EXPECT_TRUE(fun.result_is_mutable());
EXPECT_TRUE(fun.result_type().is_double());
- const auto &prog = ctx.compile(fun);
- const Value &result = ctx.eval(prog);
+ const Value &result = ctx.eval(fun);
EXPECT_TRUE(result.is_double());
EXPECT_EQUAL(2.0, result.as_double());
}
@@ -367,8 +352,7 @@ TEST("require that tensor rename works") {
{"x"}, {"z"}, ctx.stash);
EXPECT_TRUE(fun.result_is_mutable());
EXPECT_EQUAL(expect->type(), fun.result_type());
- const auto &prog = ctx.compile(fun);
- TEST_DO(verify_equal(*expect, ctx.eval(prog)));
+ TEST_DO(verify_equal(*expect, ctx.eval(fun)));
}
TEST("require that if_node works") {
@@ -383,10 +367,9 @@ TEST("require that if_node works") {
inject(ValueType::from_spec("tensor(x[2])"), c_id, ctx.stash), ctx.stash);
EXPECT_TRUE(!fun.result_is_mutable());
EXPECT_EQUAL(expect_true->type(), fun.result_type());
- const auto &prog = ctx.compile(fun);
- TEST_DO(verify_equal(*expect_true, ctx.eval(prog)));
+ TEST_DO(verify_equal(*expect_true, ctx.eval(fun)));
ctx.replace_tensor(a_id, ctx.make_false());
- TEST_DO(verify_equal(*expect_false, ctx.eval(prog)));
+ TEST_DO(verify_equal(*expect_false, ctx.eval(fun)));
}
TEST("require that if_node result is mutable only when both children produce mutable results") {
diff --git a/eval/src/tests/instruction/generic_peek/CMakeLists.txt b/eval/src/tests/instruction/generic_peek/CMakeLists.txt
new file mode 100644
index 00000000000..11732c865ec
--- /dev/null
+++ b/eval/src/tests/instruction/generic_peek/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(eval_generic_peek_test_app TEST
+ SOURCES
+ generic_peek_test.cpp
+ DEPENDS
+ vespaeval
+ GTest::GTest
+)
+vespa_add_test(NAME eval_generic_peek_test_app NO_VALGRIND COMMAND eval_generic_peek_test_app)
diff --git a/eval/src/tests/instruction/generic_peek/generic_peek_test.cpp b/eval/src/tests/instruction/generic_peek/generic_peek_test.cpp
new file mode 100644
index 00000000000..3874b254ad8
--- /dev/null
+++ b/eval/src/tests/instruction/generic_peek/generic_peek_test.cpp
@@ -0,0 +1,236 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/eval/eval/simple_value.h>
+#include <vespa/eval/eval/fast_value.h>
+#include <vespa/eval/eval/tensor_function.h>
+#include <vespa/eval/eval/value_codec.h>
+#include <vespa/eval/instruction/generic_peek.h>
+#include <vespa/eval/eval/interpreted_function.h>
+#include <vespa/eval/eval/test/tensor_model.hpp>
+#include <vespa/vespalib/util/stringfmt.h>
+#include <vespa/vespalib/util/overload.h>
+#include <vespa/vespalib/gtest/gtest.h>
+#include <vespa/vespalib/stllike/asciistream.h>
+#include <stdlib.h>
+#include <variant>
+
+using namespace vespalib;
+using namespace vespalib::eval;
+using namespace vespalib::eval::instruction;
+using namespace vespalib::eval::test;
+
+using vespalib::make_string_short::fmt;
+
+std::vector<Layout> peek_layouts = {
+ {x(4)},
+ {x(4),y(5)},
+ {x(4),y(5),z(3)},
+ float_cells({x(4),y(5),z(3)}),
+ {x({"-1","0","2"})},
+ {x({"-1","0","2"}),y({"-2","0","1"}),z({"-2","-1","0","1","2"})},
+ float_cells({x({"-1","0","2"}),y({"-2","0","1"})}),
+ {x(4),y({"-2","0","1"}),z(3)},
+ {x({"-1","0","2"}),y(5),z({"-2","-1","0","1","2"})},
+ float_cells({x({"-1","0","2"}),y(5),z({"-2","-1","0","1","2"})})
+};
+
+using PeekSpec = GenericPeek::SpecMap;
+
+TensorSpec reference_peek(const TensorSpec &param, const vespalib::string &result_type, const PeekSpec &spec) {
+ TensorSpec result(result_type);
+ ValueType param_type = ValueType::from_spec(param.type());
+ auto is_mapped_dim = [&](const vespalib::string &name) {
+ size_t dim_idx = param_type.dimension_index(name);
+ assert(dim_idx != ValueType::Dimension::npos);
+ const auto &param_dim = param_type.dimensions()[dim_idx];
+ return param_dim.is_mapped();
+ };
+ TensorSpec::Address addr;
+ for (const auto & [dim_name, label_or_child] : spec) {
+ std::visit(vespalib::overload
+ {
+ [&](const TensorSpec::Label &label) {
+ addr.emplace(dim_name, label);
+ },
+ [&](const size_t &child_value) {
+ // here, label_or_child is a size_t specifying the value
+ // we pretend a child produced
+ if (is_mapped_dim(dim_name)) {
+ // (but cast to signed first, to allow labels like the string "-2")
+ addr.emplace(dim_name, vespalib::make_string("%zd", ssize_t(child_value)));
+ } else {
+ addr.emplace(dim_name, child_value);
+ }
+ }
+ }, label_or_child);
+ }
+ for (const auto &cell: param.cells()) {
+ bool keep = true;
+ TensorSpec::Address my_addr;
+ for (const auto &binding: cell.first) {
+ auto pos = addr.find(binding.first);
+ if (pos == addr.end()) {
+ my_addr.emplace(binding.first, binding.second);
+ } else {
+ if (!(pos->second == binding.second)) {
+ keep = false;
+ }
+ }
+ }
+ if (keep) {
+ result.add(my_addr, cell.second);
+ }
+ }
+ return spec_from_value(*value_from_spec(result, SimpleValueBuilderFactory::get()));
+}
+
+
+TensorSpec perform_generic_peek(const TensorSpec &a, const ValueType &result_type,
+ PeekSpec spec, const ValueBuilderFactory &factory)
+{
+ auto param = value_from_spec(a, factory);
+ EXPECT_FALSE(param->type().is_error());
+ EXPECT_FALSE(result_type.is_error());
+ Stash stash;
+ std::vector<Value::CREF> my_stack;
+ my_stack.push_back(*param);
+ size_t child_idx = 0;
+ for (auto & [dim_name, label_or_child] : spec) {
+ if (std::holds_alternative<size_t>(label_or_child)) {
+ // here, label_or_child is a size_t specifying the value
+ // this child should produce (but cast to signed first,
+ // to allow negative values)
+ ssize_t child_value = std::get<size_t>(label_or_child);
+ my_stack.push_back(stash.create<DoubleValue>(child_value));
+ // overwrite label_or_child, now it should be the index of
+ // the child for make_instruction
+ label_or_child = child_idx++;
+ }
+ }
+ auto my_op = GenericPeek::make_instruction(param->type(), result_type, spec, factory, stash);
+ InterpretedFunction::EvalSingle single(factory, my_op);
+ return spec_from_value(single.eval(my_stack));
+}
+
+TensorSpec tensor_function_peek(const TensorSpec &a, const ValueType &result_type,
+ PeekSpec spec, const ValueBuilderFactory &factory)
+{
+ Stash stash;
+ auto param = value_from_spec(a, factory);
+ EXPECT_FALSE(param->type().is_error());
+ EXPECT_FALSE(result_type.is_error());
+ std::vector<Value::CREF> my_stack;
+ my_stack.push_back(*param);
+ const auto &func_double = tensor_function::inject(ValueType::double_type(), 1, stash);
+ std::map<vespalib::string, std::variant<TensorSpec::Label, TensorFunction::CREF>> func_spec;
+ for (auto & [dim_name, label_or_child] : spec) {
+ if (std::holds_alternative<size_t>(label_or_child)) {
+ // here, label_or_child is a size_t specifying the value
+ // this child should produce (but cast to signed first,
+ // to allow negative values)
+ ssize_t child_value = std::get<size_t>(label_or_child);
+ my_stack.push_back(stash.create<DoubleValue>(double(child_value)));
+ func_spec.emplace(dim_name, func_double);
+ } else {
+ auto label = std::get<TensorSpec::Label>(label_or_child);
+ func_spec.emplace(dim_name, label);
+ }
+ }
+ const auto &func_param = tensor_function::inject(param->type(), 0, stash);
+ const auto &peek_node = tensor_function::peek(func_param, func_spec, stash);
+ auto my_op = peek_node.compile_self(factory, stash);
+ InterpretedFunction::EvalSingle single(factory, my_op);
+ return spec_from_value(single.eval(my_stack));
+}
+
+vespalib::string to_str(const PeekSpec &spec) {
+ vespalib::asciistream os;
+ os << "{ ";
+ for (const auto & [dim, label_or_index] : spec) {
+ os << dim << " : ";
+ if (std::holds_alternative<size_t>(label_or_index)) {
+ os << "[" << ssize_t(std::get<size_t>(label_or_index)) << "] ";
+ } else {
+ auto label = std::get<TensorSpec::Label>(label_or_index);
+ if (label.is_mapped()) {
+ os << "'" << label.name << "' ";
+ } else {
+ os << "(" << ssize_t(label.index) << ") ";
+ }
+ }
+ }
+ os << "}";
+ return os.str();
+}
+
+void verify_peek_equal(const TensorSpec &input,
+ const PeekSpec &spec,
+ const ValueBuilderFactory &factory)
+{
+ ValueType param_type = ValueType::from_spec(input.type());
+ std::vector<vespalib::string> reduce_dims;
+ for (const auto & [dim_name, ignored] : spec) {
+ reduce_dims.push_back(dim_name);
+ }
+ if (reduce_dims.empty()) return;
+ ValueType result_type = param_type.reduce(reduce_dims);
+ auto expect = reference_peek(input, result_type.to_spec(), spec);
+ SCOPED_TRACE(fmt("peek input: %s\n peek spec: %s\n peek result %s\n",
+ input.to_string().c_str(),
+ to_str(spec).c_str(),
+ expect.to_string().c_str()));
+ auto actual = perform_generic_peek(input, result_type, spec, factory);
+ EXPECT_EQ(actual, expect);
+ auto from_func = tensor_function_peek(input, result_type, spec, factory);
+ EXPECT_EQ(from_func, expect);
+}
+
+void fill_dims_and_check(const TensorSpec &input,
+ PeekSpec spec,
+ std::vector<ValueType::Dimension> dimensions,
+ const ValueBuilderFactory &factory)
+{
+ if (dimensions.empty()) {
+ verify_peek_equal(input, spec, factory);
+ return;
+ }
+ auto dim = dimensions.back();
+ dimensions.pop_back();
+ fill_dims_and_check(input, spec, dimensions, factory);
+ for (int64_t label_value : {-2, -1, 0, 1, 3}) {
+ if (dim.is_indexed()) {
+ size_t index = label_value;
+ if (index >= dim.size) continue;
+ TensorSpec::Label label(index);
+ spec.insert_or_assign(dim.name, label);
+ } else {
+ TensorSpec::Label label(make_string("%" PRId64, label_value));
+ spec.insert_or_assign(dim.name, label);
+ }
+ fill_dims_and_check(input, spec, dimensions, factory);
+ }
+ for (int64_t child_value : {-2, -1, 0, 1, 3}) {
+ spec.insert_or_assign(dim.name, size_t(child_value));
+ fill_dims_and_check(input, spec, dimensions, factory);
+ }
+}
+
+void test_generic_peek_with(const ValueBuilderFactory &factory) {
+ for (const auto & layout : peek_layouts) {
+ TensorSpec input = spec(layout, N());
+ ValueType input_type = ValueType::from_spec(input.type());
+ const auto &dims = input_type.dimensions();
+ PeekSpec spec;
+ fill_dims_and_check(input, spec, dims, factory);
+ }
+}
+
+TEST(GenericPeekTest, generic_peek_works_for_simple_values) {
+ test_generic_peek_with(SimpleValueBuilderFactory::get());
+}
+
+TEST(GenericPeekTest, generic_peek_works_for_fast_values) {
+ test_generic_peek_with(FastValueBuilderFactory::get());
+}
+
+GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/eval/src/tests/tensor/instruction_benchmark/instruction_benchmark.cpp b/eval/src/tests/tensor/instruction_benchmark/instruction_benchmark.cpp
index 7182d66f8aa..bcd021b05fb 100644
--- a/eval/src/tests/tensor/instruction_benchmark/instruction_benchmark.cpp
+++ b/eval/src/tests/tensor/instruction_benchmark/instruction_benchmark.cpp
@@ -34,6 +34,7 @@
#include <vespa/eval/eval/value_codec.h>
#include <vespa/eval/eval/operation.h>
#include <vespa/eval/eval/tensor_function.h>
+#include <vespa/eval/eval/optimize_tensor_function.h>
#include <vespa/eval/tensor/default_tensor_engine.h>
#include <vespa/eval/tensor/default_value_builder_factory.h>
#include <vespa/vespalib/util/benchmark_timer.h>
@@ -136,21 +137,21 @@ struct Impl {
const auto &lhs_node = tensor_function::inject(lhs, 0, stash);
const auto &rhs_node = tensor_function::inject(rhs, 1, stash);
const auto &join_node = tensor_function::join(lhs_node, rhs_node, function, stash);
- const auto &node = optimize ? engine.optimize(join_node, stash) : join_node;
+ const auto &node = optimize ? optimize_tensor_function(engine, join_node, stash) : join_node;
return node.compile_self(engine, stash);
}
Instruction create_reduce(const ValueType &lhs, Aggr aggr, const std::vector<vespalib::string> &dims, Stash &stash) const {
// create a complete tensor function, but only compile the relevant instruction
const auto &lhs_node = tensor_function::inject(lhs, 0, stash);
const auto &reduce_node = tensor_function::reduce(lhs_node, aggr, dims, stash);
- const auto &node = optimize ? engine.optimize(reduce_node, stash) : reduce_node;
+ const auto &node = optimize ? optimize_tensor_function(engine, reduce_node, stash) : reduce_node;
return node.compile_self(engine, stash);
}
Instruction create_rename(const ValueType &lhs, const std::vector<vespalib::string> &from, const std::vector<vespalib::string> &to, Stash &stash) const {
// create a complete tensor function, but only compile the relevant instruction
const auto &lhs_node = tensor_function::inject(lhs, 0, stash);
const auto &rename_node = tensor_function::rename(lhs_node, from, to, stash);
- const auto &node = optimize ? engine.optimize(rename_node, stash) : rename_node;
+ const auto &node = optimize ? optimize_tensor_function(engine, rename_node, stash) : rename_node;
return node.compile_self(engine, stash);
}
Instruction create_merge(const ValueType &lhs, const ValueType &rhs, operation::op2_t function, Stash &stash) const {
@@ -158,7 +159,7 @@ struct Impl {
const auto &lhs_node = tensor_function::inject(lhs, 0, stash);
const auto &rhs_node = tensor_function::inject(rhs, 1, stash);
const auto &merge_node = tensor_function::merge(lhs_node, rhs_node, function, stash);
- const auto &node = optimize ? engine.optimize(merge_node, stash) : merge_node;
+ const auto &node = optimize ? optimize_tensor_function(engine, merge_node, stash) : merge_node;
return node.compile_self(engine, stash);
}
Instruction create_concat(const ValueType &lhs, const ValueType &rhs, const std::string &dimension, Stash &stash) const {
@@ -167,14 +168,14 @@ struct Impl {
const auto &rhs_node = tensor_function::inject(rhs, 1, stash);
const auto &concat_node = tensor_function::concat(lhs_node, rhs_node, dimension, stash);
return concat_node.compile_self(engine, stash);
- const auto &node = optimize ? engine.optimize(concat_node, stash) : concat_node;
+ const auto &node = optimize ? optimize_tensor_function(engine, concat_node, stash) : concat_node;
return node.compile_self(engine, stash);
}
Instruction create_map(const ValueType &lhs, operation::op1_t function, Stash &stash) const {
// create a complete tensor function, but only compile the relevant instruction
const auto &lhs_node = tensor_function::inject(lhs, 0, stash);
const auto &map_node = tensor_function::map(lhs_node, function, stash);
- const auto &node = optimize ? engine.optimize(map_node, stash) : map_node;
+ const auto &node = optimize ? optimize_tensor_function(engine, map_node, stash) : map_node;
return node.compile_self(engine, stash);
}
Instruction create_tensor_create(const ValueType &proto_type, const TensorSpec &proto, Stash &stash) const {
@@ -185,7 +186,7 @@ struct Impl {
spec.emplace(cell.first, my_double);
}
const auto &create_tensor_node = tensor_function::create(proto_type, spec, stash);
- const auto &node = optimize ? engine.optimize(create_tensor_node, stash) : create_tensor_node;
+ const auto &node = optimize ? optimize_tensor_function(engine, create_tensor_node, stash) : create_tensor_node;
return node.compile_self(engine, stash);
}
Instruction create_tensor_lambda(const ValueType &type, const Function &function, const ValueType &p0_type, Stash &stash) const {
@@ -194,7 +195,7 @@ struct Impl {
NodeTypes types(function, arg_types);
EXPECT_EQ(types.errors(), std::vector<vespalib::string>());
const auto &tensor_lambda_node = tensor_function::lambda(type, {0}, function, std::move(types), stash);
- const auto &node = optimize ? engine.optimize(tensor_lambda_node, stash) : tensor_lambda_node;
+ const auto &node = optimize ? optimize_tensor_function(engine, tensor_lambda_node, stash) : tensor_lambda_node;
return node.compile_self(engine, stash);
}
Instruction create_tensor_peek(const ValueType &type, const MyPeekSpec &my_spec, Stash &stash) const {
@@ -218,30 +219,30 @@ struct Impl {
}
}
const auto &peek_node = tensor_function::peek(my_param, spec, stash);
- const auto &node = optimize ? engine.optimize(peek_node, stash) : peek_node;
+ const auto &node = optimize ? optimize_tensor_function(engine, peek_node, stash) : peek_node;
return node.compile_self(engine, stash);
}
};
//-----------------------------------------------------------------------------
-Impl default_tensor_engine_impl(1, "DefaultTensorEngine", "OLD PROD", DefaultTensorEngine::ref(), false);
-Impl simple_value_impl(3, " SimpleValue", " SimpleV", SimpleValueBuilderFactory::get(), false);
-Impl fast_value_impl(0, " FastValue", "NEW PROD", FastValueBuilderFactory::get(), false);
-Impl optimized_fast_value_impl(2, "Optimized FastValue", "Optimize", FastValueBuilderFactory::get(), true);
-Impl default_tensor_value_impl(4, " DefaultValue", "DefaultV", DefaultValueBuilderFactory::get(), false);
-vespalib::string short_header("--------");
+Impl optimized_fast_value_impl(0, " Optimized FastValue", "NEW PROD", FastValueBuilderFactory::get(), true);
+Impl optimized_default_tensor_engine_impl(1, "Optimized DefaultTensorEngine", "OLD PROD", DefaultTensorEngine::ref(), true);
+Impl fast_value_impl(2, " FastValue", " FastV", FastValueBuilderFactory::get(), false);
+Impl default_tensor_engine_impl(3, " DefaultTensorEngine", "DefaultT", DefaultTensorEngine::ref(), false);
+Impl simple_value_impl(4, " SimpleValue", " SimpleV", SimpleValueBuilderFactory::get(), false);
+vespalib::string short_header("--------");
constexpr double budget = 5.0;
constexpr double best_limit = 0.95; // everything within 95% of best performance gets a star
-constexpr double bad_limit = 0.90; // BAD: new prod has performance lower than 90% of old prod
+constexpr double bad_limit = 0.90; // BAD: new prod has performance lower than 90% of old prod
constexpr double good_limit = 1.10; // GOOD: new prod has performance higher than 110% of old prod
-std::vector<CREF<Impl>> impl_list = {default_tensor_engine_impl,
- simple_value_impl,
- fast_value_impl,
+std::vector<CREF<Impl>> impl_list = {simple_value_impl,
optimized_fast_value_impl,
- default_tensor_value_impl};
+ optimized_default_tensor_engine_impl,
+ fast_value_impl,
+ default_tensor_engine_impl};
//-----------------------------------------------------------------------------
@@ -982,8 +983,8 @@ int main(int argc, char **argv) {
const std::string run_only_prod_option = "--limit-implementations";
if ((argc > 1) && (argv[1] == run_only_prod_option )) {
impl_list.clear();
- impl_list.push_back(fast_value_impl);
- impl_list.push_back(default_tensor_engine_impl);
+ impl_list.push_back(optimized_fast_value_impl);
+ impl_list.push_back(optimized_default_tensor_engine_impl);
++argv;
--argc;
}
diff --git a/eval/src/vespa/eval/eval/CMakeLists.txt b/eval/src/vespa/eval/eval/CMakeLists.txt
index 6c1f99265a7..d27de8e3d21 100644
--- a/eval/src/vespa/eval/eval/CMakeLists.txt
+++ b/eval/src/vespa/eval/eval/CMakeLists.txt
@@ -22,6 +22,7 @@ vespa_add_library(eval_eval OBJECT
node_types.cpp
operation.cpp
operator_nodes.cpp
+ optimize_tensor_function.cpp
param_usage.cpp
simple_tensor.cpp
simple_tensor_engine.cpp
diff --git a/eval/src/vespa/eval/eval/engine_or_factory.cpp b/eval/src/vespa/eval/eval/engine_or_factory.cpp
index e4f710be625..4a95a57e10e 100644
--- a/eval/src/vespa/eval/eval/engine_or_factory.cpp
+++ b/eval/src/vespa/eval/eval/engine_or_factory.cpp
@@ -36,17 +36,6 @@ EngineOrFactory::get_shared(EngineOrFactory hint)
return shared;
}
-const TensorFunction &
-EngineOrFactory::optimize(const TensorFunction &expr, Stash &stash) const {
- if (is_engine()) {
- return engine().optimize(expr, stash);
- } else if (&factory() == &FastValueBuilderFactory::get()) {
- return tensor::DefaultTensorEngine::ref().optimize(expr, stash);
- } else {
- return expr;
- }
-}
-
TensorSpec
EngineOrFactory::to_spec(const Value &value) const
{
diff --git a/eval/src/vespa/eval/eval/engine_or_factory.h b/eval/src/vespa/eval/eval/engine_or_factory.h
index 4784356ae8d..e1f7c503bcd 100644
--- a/eval/src/vespa/eval/eval/engine_or_factory.h
+++ b/eval/src/vespa/eval/eval/engine_or_factory.h
@@ -42,7 +42,6 @@ public:
const TensorEngine &engine() const { return *std::get<engine_t>(_value); }
const ValueBuilderFactory &factory() const { return *std::get<factory_t>(_value); }
// functions that can be called with either engine or factory
- const TensorFunction &optimize(const TensorFunction &expr, Stash &stash) const;
TensorSpec to_spec(const Value &value) const;
std::unique_ptr<Value> from_spec(const TensorSpec &spec) const;
void encode(const Value &value, nbostream &output) const;
diff --git a/eval/src/vespa/eval/eval/fast_sparse_map.h b/eval/src/vespa/eval/eval/fast_sparse_map.h
index 2f3409a45f6..19c171cfed8 100644
--- a/eval/src/vespa/eval/eval/fast_sparse_map.h
+++ b/eval/src/vespa/eval/eval/fast_sparse_map.h
@@ -126,24 +126,39 @@ public:
return h;
}
+ // used to add a mapping, but in the unlikely case
+ // of hash collision it works like a lookup instead.
template <typename T>
- void add_mapping(ConstArrayRef<T> addr, uint64_t hash) {
+ uint32_t add_mapping(ConstArrayRef<T> addr, uint64_t hash) {
uint32_t value = _map.size();
- for (const auto &label: addr) {
- _labels.emplace_back(label);
+ auto [iter, did_add] = _map.insert(std::make_pair(Key(hash), value));
+ if (__builtin_expect(did_add, true)) {
+ for (const auto &label: addr) {
+ _labels.emplace_back(label);
+ }
+ return value;
}
- _map.insert(std::make_pair(Key(hash), value));
+ return iter->second;
}
+ // used to add a mapping, but in the unlikely case
+ // of hash collision it works like a lookup instead.
template <typename T>
- void add_mapping(ConstArrayRef<T> addr) {
- uint64_t h = 0;
- uint32_t value = _map.size();
+ uint32_t add_mapping(ConstArrayRef<T> addr) {
+ uint64_t hash = 0;
+ size_t old_labels_size = _labels.size();
for (const auto &label: addr) {
_labels.emplace_back(label);
- h = 31 * h + hash_label(_labels.back());
+ hash = 31 * hash + hash_label(_labels.back());
+ }
+ uint32_t value = _map.size();
+ auto [iter, did_add] = _map.insert(std::make_pair(Key(hash), value));
+ if (__builtin_expect(did_add, true)) {
+ return value;
}
- _map.insert(std::make_pair(Key(h), value));
+ // undo adding to _labels
+ _labels.resize(old_labels_size);
+ return iter->second;
}
size_t lookup(uint64_t hash) const {
diff --git a/eval/src/vespa/eval/eval/fast_value.hpp b/eval/src/vespa/eval/eval/fast_value.hpp
index ba5c44baf1f..f02d02489b8 100644
--- a/eval/src/vespa/eval/eval/fast_value.hpp
+++ b/eval/src/vespa/eval/eval/fast_value.hpp
@@ -230,8 +230,11 @@ struct FastValue final : Value, ValueBuilder<T> {
const Value::Index &index() const override { return my_index; }
TypedCells cells() const override { return TypedCells(my_cells.memory, get_cell_type<T>(), my_cells.size); }
ArrayRef<T> add_subspace(ConstArrayRef<vespalib::stringref> addr) override {
- my_index.map.add_mapping(addr);
- return my_cells.add_cells(my_subspace_size);
+ size_t idx = my_index.map.add_mapping(addr) * my_subspace_size;
+ if (__builtin_expect((idx == my_cells.size), true)) {
+ return my_cells.add_cells(my_subspace_size);
+ }
+ return ArrayRef<T>(my_cells.get(idx), my_subspace_size);
}
std::unique_ptr<Value> build(std::unique_ptr<ValueBuilder<T>> self) override {
if (my_index.map.num_dims() == 0) {
@@ -304,12 +307,16 @@ FastValueIndex::sparse_full_overlap_join(const ValueType &res_type, const Fun &f
ConstArrayRef<LCT> lhs_cells, ConstArrayRef<RCT> rhs_cells, Stash &stash)
{
auto &result = stash.create<FastValue<OCT>>(res_type, lhs.map.num_dims(), 1, lhs.map.size());
+ auto &result_map = result.my_index.map;
lhs.map.each_map_entry([&](auto lhs_subspace, auto hash)
{
auto rhs_subspace = rhs.map.lookup(hash);
if (rhs_subspace != FastSparseMap::npos()) {
- result.my_index.map.add_mapping(lhs.map.make_addr(lhs_subspace), hash);
- result.my_cells.push_back_fast(fun(lhs_cells[lhs_subspace], rhs_cells[rhs_subspace]));
+ auto idx = result_map.add_mapping(lhs.map.make_addr(lhs_subspace), hash);
+ if (__builtin_expect((idx == result.my_cells.size), true)) {
+ auto cell_value = fun(lhs_cells[lhs_subspace], rhs_cells[rhs_subspace]);
+ result.my_cells.push_back_fast(cell_value);
+ }
}
});
return result;
@@ -326,20 +333,25 @@ FastValueIndex::sparse_only_merge(const ValueType &res_type, const Fun &fun,
auto &result = stash.create<FastValue<OCT>>(res_type, lhs.map.num_dims(), 1, lhs.map.size()+rhs.map.size());
lhs.map.each_map_entry([&](auto lhs_subspace, auto hash)
{
- auto rhs_subspace = rhs.map.lookup(hash);
- result.my_index.map.add_mapping(lhs.map.make_addr(lhs_subspace), hash);
- if (rhs_subspace != FastSparseMap::npos()) {
- result.my_cells.push_back_fast(fun(lhs_cells[lhs_subspace], rhs_cells[rhs_subspace]));
- } else {
- result.my_cells.push_back_fast(lhs_cells[lhs_subspace]);
+ auto idx = result.my_index.map.add_mapping(lhs.map.make_addr(lhs_subspace), hash);
+ if (__builtin_expect((idx == result.my_cells.size), true)) {
+ auto rhs_subspace = rhs.map.lookup(hash);
+ if (rhs_subspace != FastSparseMap::npos()) {
+ auto cell_value = fun(lhs_cells[lhs_subspace], rhs_cells[rhs_subspace]);
+ result.my_cells.push_back_fast(cell_value);
+ } else {
+ result.my_cells.push_back_fast(lhs_cells[lhs_subspace]);
+ }
}
});
rhs.map.each_map_entry([&](auto rhs_subspace, auto hash)
{
auto lhs_subspace = lhs.map.lookup(hash);
if (lhs_subspace == FastSparseMap::npos()) {
- result.my_index.map.add_mapping(rhs.map.make_addr(rhs_subspace), hash);
- result.my_cells.push_back_fast(rhs_cells[rhs_subspace]);
+ auto idx = result.my_index.map.add_mapping(rhs.map.make_addr(rhs_subspace), hash);
+ if (__builtin_expect((idx == result.my_cells.size), true)) {
+ result.my_cells.push_back_fast(rhs_cells[rhs_subspace]);
+ }
}
});
diff --git a/eval/src/vespa/eval/eval/interpreted_function.cpp b/eval/src/vespa/eval/eval/interpreted_function.cpp
index 2b0e915d69a..1016b929574 100644
--- a/eval/src/vespa/eval/eval/interpreted_function.cpp
+++ b/eval/src/vespa/eval/eval/interpreted_function.cpp
@@ -6,6 +6,7 @@
#include "tensor_nodes.h"
#include "tensor_engine.h"
#include "make_tensor_function.h"
+#include "optimize_tensor_function.h"
#include "compile_tensor_function.h"
#include "simple_tensor_engine.h"
#include <vespa/vespalib/util/classname.h>
@@ -73,7 +74,7 @@ InterpretedFunction::InterpretedFunction(EngineOrFactory engine, const nodes::No
_tensor_engine(engine)
{
const TensorFunction &plain_fun = make_tensor_function(engine, root, types, _stash);
- const TensorFunction &optimized = engine.optimize(plain_fun, _stash);
+ const TensorFunction &optimized = optimize_tensor_function(engine, plain_fun, _stash);
_program = compile_tensor_function(engine, optimized, _stash);
}
diff --git a/eval/src/vespa/eval/eval/optimize_tensor_function.cpp b/eval/src/vespa/eval/eval/optimize_tensor_function.cpp
new file mode 100644
index 00000000000..83f806178e8
--- /dev/null
+++ b/eval/src/vespa/eval/eval/optimize_tensor_function.cpp
@@ -0,0 +1,95 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "optimize_tensor_function.h"
+#include "tensor_function.h"
+#include "tensor_engine.h"
+#include "simple_value.h"
+
+#include <vespa/eval/tensor/dense/dense_dot_product_function.h>
+#include <vespa/eval/tensor/dense/dense_xw_product_function.h>
+#include <vespa/eval/tensor/dense/dense_matmul_function.h>
+#include <vespa/eval/tensor/dense/dense_multi_matmul_function.h>
+#include <vespa/eval/tensor/dense/dense_fast_rename_optimizer.h>
+#include <vespa/eval/tensor/dense/dense_add_dimension_optimizer.h>
+#include <vespa/eval/tensor/dense/dense_single_reduce_function.h>
+#include <vespa/eval/tensor/dense/dense_remove_dimension_optimizer.h>
+#include <vespa/eval/tensor/dense/dense_lambda_peek_optimizer.h>
+#include <vespa/eval/tensor/dense/dense_lambda_function.h>
+#include <vespa/eval/tensor/dense/dense_simple_expand_function.h>
+#include <vespa/eval/tensor/dense/dense_simple_join_function.h>
+#include <vespa/eval/tensor/dense/dense_number_join_function.h>
+#include <vespa/eval/tensor/dense/dense_pow_as_map_optimizer.h>
+#include <vespa/eval/tensor/dense/dense_simple_map_function.h>
+#include <vespa/eval/tensor/dense/vector_from_doubles_function.h>
+#include <vespa/eval/tensor/dense/dense_tensor_create_function.h>
+#include <vespa/eval/tensor/dense/dense_tensor_peek_function.h>
+
+#include <vespa/log/log.h>
+LOG_SETUP(".eval.eval.optimize_tensor_function");
+
+namespace vespalib::eval {
+
+namespace {
+
+using namespace vespalib::tensor;
+
+const TensorFunction &optimize_for_factory(const ValueBuilderFactory &factory, const TensorFunction &expr, Stash &stash) {
+ if (&factory == &SimpleValueBuilderFactory::get()) {
+ // never optimize simple value evaluation
+ return expr;
+ }
+ using Child = TensorFunction::Child;
+ Child root(expr);
+ {
+ std::vector<Child::CREF> nodes({root});
+ for (size_t i = 0; i < nodes.size(); ++i) {
+ nodes[i].get().get().push_children(nodes);
+ }
+ while (!nodes.empty()) {
+ const Child &child = nodes.back().get();
+ child.set(DenseDotProductFunction::optimize(child.get(), stash));
+ child.set(DenseXWProductFunction::optimize(child.get(), stash));
+ child.set(DenseMatMulFunction::optimize(child.get(), stash));
+ child.set(DenseMultiMatMulFunction::optimize(child.get(), stash));
+ nodes.pop_back();
+ }
+ }
+ {
+ std::vector<Child::CREF> nodes({root});
+ for (size_t i = 0; i < nodes.size(); ++i) {
+ nodes[i].get().get().push_children(nodes);
+ }
+ while (!nodes.empty()) {
+ const Child &child = nodes.back().get();
+ child.set(DenseSimpleExpandFunction::optimize(child.get(), stash));
+ child.set(DenseAddDimensionOptimizer::optimize(child.get(), stash));
+ child.set(DenseRemoveDimensionOptimizer::optimize(child.get(), stash));
+ child.set(VectorFromDoublesFunction::optimize(child.get(), stash));
+ child.set(DenseTensorCreateFunction::optimize(child.get(), stash));
+ child.set(DenseTensorPeekFunction::optimize(child.get(), stash));
+ child.set(DenseLambdaPeekOptimizer::optimize(child.get(), stash));
+ child.set(DenseLambdaFunction::optimize(child.get(), stash));
+ child.set(DenseFastRenameOptimizer::optimize(child.get(), stash));
+ child.set(DensePowAsMapOptimizer::optimize(child.get(), stash));
+ child.set(DenseSimpleMapFunction::optimize(child.get(), stash));
+ child.set(DenseSimpleJoinFunction::optimize(child.get(), stash));
+ child.set(DenseNumberJoinFunction::optimize(child.get(), stash));
+ child.set(DenseSingleReduceFunction::optimize(child.get(), stash));
+ nodes.pop_back();
+ }
+ }
+ return root.get();
+}
+
+} // namespace vespalib::eval::<unnamed>
+
+const TensorFunction &optimize_tensor_function(EngineOrFactory engine, const TensorFunction &function, Stash &stash) {
+ LOG(debug, "tensor function before optimization:\n%s\n", function.as_string().c_str());
+ const TensorFunction &optimized = (engine.is_engine())
+ ? engine.engine().optimize(function, stash)
+ : optimize_for_factory(engine.factory(), function, stash);
+ LOG(debug, "tensor function after optimization:\n%s\n", optimized.as_string().c_str());
+ return optimized;
+}
+
+} // namespace vespalib::eval
diff --git a/eval/src/vespa/eval/eval/optimize_tensor_function.h b/eval/src/vespa/eval/eval/optimize_tensor_function.h
new file mode 100644
index 00000000000..bc2bc10cca6
--- /dev/null
+++ b/eval/src/vespa/eval/eval/optimize_tensor_function.h
@@ -0,0 +1,15 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "engine_or_factory.h"
+
+namespace vespalib { class Stash; }
+
+namespace vespalib::eval {
+
+struct TensorFunction;
+
+const TensorFunction &optimize_tensor_function(EngineOrFactory engine, const TensorFunction &function, Stash &stash);
+
+} // namespace vespalib::eval
diff --git a/eval/src/vespa/eval/eval/tensor_function.cpp b/eval/src/vespa/eval/eval/tensor_function.cpp
index 57036f04e9a..8464aa14b59 100644
--- a/eval/src/vespa/eval/eval/tensor_function.cpp
+++ b/eval/src/vespa/eval/eval/tensor_function.cpp
@@ -13,6 +13,7 @@
#include <vespa/eval/instruction/generic_join.h>
#include <vespa/eval/instruction/generic_map.h>
#include <vespa/eval/instruction/generic_merge.h>
+#include <vespa/eval/instruction/generic_peek.h>
#include <vespa/eval/instruction/generic_reduce.h>
#include <vespa/eval/instruction/generic_rename.h>
#include <vespa/vespalib/objects/objectdumper.h>
@@ -494,8 +495,23 @@ Peek::push_children(std::vector<Child::CREF> &children) const
}
Instruction
-Peek::compile_self(EngineOrFactory, Stash &) const
+Peek::compile_self(EngineOrFactory engine, Stash &stash) const
{
+ if (engine.is_factory()) {
+ instruction::GenericPeek::SpecMap generic_spec;
+ size_t child_idx = 0;
+ for (const auto & [dim_name, label_or_child] : spec()) {
+ std::visit(vespalib::overload {
+ [&](const TensorSpec::Label &label) {
+ generic_spec.emplace(dim_name, label);
+ },
+ [&](const TensorFunction::Child &) {
+ generic_spec.emplace(dim_name, child_idx++);
+ }
+ }, label_or_child);
+ }
+ return instruction::GenericPeek::make_instruction(param_type(), result_type(), generic_spec, engine.factory(), stash);
+ }
return Instruction(op_tensor_peek, wrap_param<Peek>(*this));
}
@@ -613,6 +629,7 @@ const TensorFunction &peek(const TensorFunction &param, const std::map<vespalib:
for (const auto &dim_spec: spec) {
dimensions.push_back(dim_spec.first);
}
+ assert(!dimensions.empty());
ValueType result_type = param.result_type().reduce(dimensions);
return stash.create<Peek>(result_type, param, spec);
}
diff --git a/eval/src/vespa/eval/eval/test/eval_fixture.cpp b/eval/src/vespa/eval/eval/test/eval_fixture.cpp
index a353f3a9ae2..b7655a6ee2f 100644
--- a/eval/src/vespa/eval/eval/test/eval_fixture.cpp
+++ b/eval/src/vespa/eval/eval/test/eval_fixture.cpp
@@ -3,6 +3,7 @@
#include <vespa/vespalib/testkit/test_kit.h>
#include "eval_fixture.h"
#include <vespa/eval/eval/make_tensor_function.h>
+#include <vespa/eval/eval/optimize_tensor_function.h>
#include <vespa/vespalib/util/stringfmt.h>
using vespalib::make_string_short::fmt;
@@ -203,7 +204,7 @@ EvalFixture::EvalFixture(EngineOrFactory engine,
_mutable_set(get_mutable(*_function, param_repo)),
_plain_tensor_function(make_tensor_function(_engine, _function->root(), _node_types, _stash)),
_patched_tensor_function(maybe_patch(allow_mutable, _plain_tensor_function, _mutable_set, _stash)),
- _tensor_function(optimized ? _engine.optimize(_patched_tensor_function, _stash) : _patched_tensor_function),
+ _tensor_function(optimized ? optimize_tensor_function(engine, _patched_tensor_function, _stash) : _patched_tensor_function),
_ifun(_engine, _tensor_function),
_ictx(_ifun),
_param_values(make_params(_engine, *_function, param_repo)),
diff --git a/eval/src/vespa/eval/instruction/CMakeLists.txt b/eval/src/vespa/eval/instruction/CMakeLists.txt
index 2cd0577acc9..52f411dc543 100644
--- a/eval/src/vespa/eval/instruction/CMakeLists.txt
+++ b/eval/src/vespa/eval/instruction/CMakeLists.txt
@@ -5,6 +5,7 @@ vespa_add_library(eval_instruction OBJECT
generic_concat
generic_create
generic_join
+ generic_peek
generic_reduce
generic_map
generic_merge
diff --git a/eval/src/vespa/eval/instruction/generic_peek.cpp b/eval/src/vespa/eval/instruction/generic_peek.cpp
new file mode 100644
index 00000000000..651ce4df28a
--- /dev/null
+++ b/eval/src/vespa/eval/instruction/generic_peek.cpp
@@ -0,0 +1,352 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "generic_peek.h"
+#include <vespa/eval/eval/nested_loop.h>
+#include <vespa/eval/eval/wrap_param.h>
+#include <vespa/vespalib/util/overload.h>
+#include <vespa/vespalib/util/stash.h>
+#include <vespa/vespalib/util/typify.h>
+#include <vespa/vespalib/util/visit_ranges.h>
+#include <cassert>
+
+using namespace vespalib::eval::tensor_function;
+
+namespace vespalib::eval::instruction {
+
+using State = InterpretedFunction::State;
+using Instruction = InterpretedFunction::Instruction;
+
+namespace {
+
+static constexpr size_t npos = -1;
+
+using Spec = GenericPeek::SpecMap;
+
+size_t count_children(const Spec &spec)
+{
+ size_t num_children = 0;
+ for (const auto & [dim_name, child_or_label] : spec) {
+ if (std::holds_alternative<size_t>(child_or_label)) {
+ ++num_children;
+ }
+ }
+ return num_children;
+}
+
+struct DimSpec {
+ vespalib::stringref name;
+ GenericPeek::MyLabel child_or_label;
+ bool has_child() const {
+ return std::holds_alternative<size_t>(child_or_label);
+ }
+ bool has_label() const {
+ return std::holds_alternative<TensorSpec::Label>(child_or_label);
+ }
+ size_t get_child_idx() const {
+ return std::get<size_t>(child_or_label);
+ }
+ vespalib::stringref get_label_name() const {
+ auto label = std::get<TensorSpec::Label>(child_or_label);
+ assert(label.is_mapped());
+ return label.name;
+ }
+ size_t get_label_index() const {
+ auto label = std::get<TensorSpec::Label>(child_or_label);
+ assert(label.is_indexed());
+ return label.index;
+ }
+};
+
+struct ExtractedSpecs {
+ using Dimension = ValueType::Dimension;
+ struct MyComp {
+ bool operator() (const Dimension &a, const Spec::value_type &b) { return a.name < b.first; }
+ bool operator() (const Spec::value_type &a, const Dimension &b) { return a.first < b.name; }
+ };
+ std::vector<Dimension> dimensions;
+ std::vector<DimSpec> specs;
+
+ ExtractedSpecs(bool indexed,
+ const std::vector<Dimension> &input_dims,
+ const Spec &spec)
+ {
+ auto visitor = overload
+ {
+ [&](visit_ranges_first, const auto &a) {
+ if (a.is_indexed() == indexed) dimensions.push_back(a);
+ },
+ [&](visit_ranges_second, const auto &) {
+ // spec has unknown dimension
+ abort();
+ },
+ [&](visit_ranges_both, const auto &a, const auto &b) {
+ if (a.is_indexed() == indexed) {
+ dimensions.push_back(a);
+ const auto & [spec_dim_name, child_or_label] = b;
+ assert(a.name == spec_dim_name);
+ specs.emplace_back(DimSpec{a.name, child_or_label});
+ }
+ }
+ };
+ visit_ranges(visitor,
+ input_dims.begin(), input_dims.end(),
+ spec.begin(), spec.end(), MyComp());
+ }
+ ~ExtractedSpecs();
+};
+ExtractedSpecs::~ExtractedSpecs() = default;
+
+struct DenseSizes {
+ std::vector<size_t> size;
+ std::vector<size_t> stride;
+ size_t cur_size;
+
+ DenseSizes(const std::vector<ValueType::Dimension> &dims)
+ : size(), stride(), cur_size(1)
+ {
+ for (const auto &dim : dims) {
+ assert(dim.is_indexed());
+ size.push_back(dim.size);
+ }
+ stride.resize(size.size());
+ for (size_t i = size.size(); i-- > 0; ) {
+ stride[i] = cur_size;
+ cur_size *= size[i];
+ }
+ }
+};
+
+/** Compute input offsets for all output cells */
+struct DensePlan {
+ size_t in_dense_size;
+ size_t out_dense_size;
+ std::vector<size_t> loop_cnt;
+ std::vector<size_t> in_stride;
+ size_t verbatim_offset = 0;
+ struct Child {
+ size_t idx;
+ size_t stride;
+ size_t limit;
+ };
+ std::vector<Child> children;
+
+ DensePlan(const ValueType &input_type, const Spec &spec)
+ {
+ const ExtractedSpecs mine(true, input_type.dimensions(), spec);
+ DenseSizes sizes(mine.dimensions);
+ in_dense_size = sizes.cur_size;
+ out_dense_size = 1;
+ auto pos = mine.specs.begin();
+ for (size_t i = 0; i < mine.dimensions.size(); ++i) {
+ const auto &dim = mine.dimensions[i];
+ if ((pos == mine.specs.end()) || (dim.name < pos->name)) {
+ loop_cnt.push_back(sizes.size[i]);
+ in_stride.push_back(sizes.stride[i]);
+ out_dense_size *= sizes.size[i];
+ } else {
+ assert(dim.name == pos->name);
+ if (pos->has_child()) {
+ children.push_back(Child{pos->get_child_idx(), sizes.stride[i], sizes.size[i]});
+ } else {
+ assert(pos->has_label());
+ size_t label_index = pos->get_label_index();
+ assert(label_index < sizes.size[i]);
+ verbatim_offset += label_index * sizes.stride[i];
+ }
+ ++pos;
+ }
+ }
+ assert(pos == mine.specs.end());
+ }
+
+ /** Get initial offset (from verbatim labels and child values) */
+ template <typename Getter>
+ size_t get_offset(const Getter &get_child_value) const {
+ size_t offset = verbatim_offset;
+ for (size_t i = 0; i < children.size(); ++i) {
+ size_t from_child = get_child_value(children[i].idx);
+ if (from_child < children[i].limit) {
+ offset += from_child * children[i].stride;
+ } else {
+ return npos;
+ }
+ }
+ return offset;
+ }
+
+ template<typename F> void execute(size_t offset, const F &f) const {
+ run_nested_loop<F>(offset, loop_cnt, in_stride, f);
+ }
+};
+
+struct SparseState {
+ std::vector<vespalib::string> view_addr;
+ std::vector<vespalib::stringref> view_refs;
+ std::vector<const vespalib::stringref *> lookup_refs;
+ std::vector<vespalib::stringref> output_addr;
+ std::vector<vespalib::stringref *> fetch_addr;
+
+ SparseState(std::vector<vespalib::string> view_addr_in, size_t out_dims)
+ : view_addr(std::move(view_addr_in)),
+ view_refs(view_addr.size()),
+ lookup_refs(view_addr.size()),
+ output_addr(out_dims),
+ fetch_addr(out_dims)
+ {
+ for (size_t i = 0; i < view_addr.size(); ++i) {
+ view_refs[i] = view_addr[i];
+ lookup_refs[i] = &view_refs[i];
+ }
+ for (size_t i = 0; i < out_dims; ++i) {
+ fetch_addr[i] = &output_addr[i];
+ }
+ }
+ ~SparseState();
+};
+SparseState::~SparseState() = default;
+
+struct SparsePlan {
+ size_t out_mapped_dims;
+ std::vector<DimSpec> lookup_specs;
+ std::vector<size_t> view_dims;
+
+ SparsePlan(const ValueType &input_type,
+ const GenericPeek::SpecMap &spec)
+ : out_mapped_dims(0),
+ view_dims()
+ {
+ ExtractedSpecs mine(false, input_type.dimensions(), spec);
+ lookup_specs = std::move(mine.specs);
+ auto pos = lookup_specs.begin();
+ for (size_t dim_idx = 0; dim_idx < mine.dimensions.size(); ++dim_idx) {
+ const auto & dim = mine.dimensions[dim_idx];
+ if ((pos == lookup_specs.end()) || (dim.name < pos->name)) {
+ ++out_mapped_dims;
+ } else {
+ assert(dim.name == pos->name);
+ view_dims.push_back(dim_idx);
+ ++pos;
+ }
+ }
+ assert(pos == lookup_specs.end());
+ }
+
+ ~SparsePlan();
+
+ template <typename Getter>
+ SparseState make_state(const Getter &get_child_value) const {
+ std::vector<vespalib::string> view_addr;
+ for (const auto & dim : lookup_specs) {
+ if (dim.has_child()) {
+ int64_t child_value = get_child_value(dim.get_child_idx());
+ view_addr.push_back(vespalib::make_string("%" PRId64, child_value));
+ } else {
+ view_addr.push_back(dim.get_label_name());
+ }
+ }
+ assert(view_addr.size() == view_dims.size());
+ return SparseState(std::move(view_addr), out_mapped_dims);
+ }
+};
+SparsePlan::~SparsePlan() = default;
+
+struct PeekParam {
+ const ValueType res_type;
+ DensePlan dense_plan;
+ SparsePlan sparse_plan;
+ size_t num_children;
+ const ValueBuilderFactory &factory;
+
+ PeekParam(const ValueType &input_type,
+ const ValueType &res_type_in,
+ const GenericPeek::SpecMap &spec_in,
+ const ValueBuilderFactory &factory_in)
+ : res_type(res_type_in),
+ dense_plan(input_type, spec_in),
+ sparse_plan(input_type, spec_in),
+ num_children(count_children(spec_in)),
+ factory(factory_in)
+ {
+ assert(dense_plan.in_dense_size == input_type.dense_subspace_size());
+ assert(dense_plan.out_dense_size == res_type.dense_subspace_size());
+ }
+};
+
+template <typename ICT, typename OCT, typename Getter>
+Value::UP
+generic_mixed_peek(const ValueType &res_type,
+ const Value &input_value,
+ const SparsePlan &sparse_plan,
+ const DensePlan &dense_plan,
+ const ValueBuilderFactory &factory,
+ const Getter &get_child_value)
+{
+ auto input_cells = input_value.cells().typify<ICT>();
+ size_t bad_guess = 1;
+ auto builder = factory.create_value_builder<OCT>(res_type,
+ sparse_plan.out_mapped_dims,
+ dense_plan.out_dense_size,
+ bad_guess);
+ size_t filled_subspaces = 0;
+ size_t dense_offset = dense_plan.get_offset(get_child_value);
+ if (dense_offset != npos) {
+ SparseState state = sparse_plan.make_state(get_child_value);
+ auto view = input_value.index().create_view(sparse_plan.view_dims);
+ view->lookup(state.lookup_refs);
+ size_t input_subspace;
+ while (view->next_result(state.fetch_addr, input_subspace)) {
+ auto dst = builder->add_subspace(state.output_addr).begin();
+ auto input_offset = input_subspace * dense_plan.in_dense_size;
+ dense_plan.execute(dense_offset + input_offset,
+ [&](size_t idx) { *dst++ = input_cells[idx]; });
+ ++filled_subspaces;
+ }
+ }
+ if ((sparse_plan.out_mapped_dims == 0) && (filled_subspaces == 0)) {
+ for (auto & v : builder->add_subspace({})) {
+ v = OCT{};
+ }
+ }
+ return builder->build(std::move(builder));
+}
+
+template <typename ICT, typename OCT>
+void my_generic_peek_op(State &state, uint64_t param_in) {
+ const auto &param = unwrap_param<PeekParam>(param_in);
+ const Value & input_value = state.peek(param.num_children);
+ const size_t last_child = param.num_children - 1;
+ auto get_child_value = [&] (size_t child_idx) {
+ size_t stack_idx = last_child - child_idx;
+ return int64_t(state.peek(stack_idx).as_double());
+ };
+ auto up = generic_mixed_peek<ICT,OCT>(param.res_type, input_value,
+ param.sparse_plan, param.dense_plan,
+ param.factory, get_child_value);
+ const Value &result = *state.stash.create<Value::UP>(std::move(up));
+ // num_children does not include the "input" param
+ state.pop_n_push(param.num_children + 1, result);
+}
+
+struct SelectGenericPeekOp {
+ template <typename ICT, typename OCT> static auto invoke() {
+ return my_generic_peek_op<ICT,OCT>;
+ }
+};
+
+//-----------------------------------------------------------------------------
+
+} // namespace <unnamed>
+
+Instruction
+GenericPeek::make_instruction(const ValueType &input_type,
+ const ValueType &res_type,
+ const SpecMap &spec,
+ const ValueBuilderFactory &factory,
+ Stash &stash)
+{
+ const auto &param = stash.create<PeekParam>(input_type, res_type, spec, factory);
+ auto fun = typify_invoke<2,TypifyCellType,SelectGenericPeekOp>(input_type.cell_type(), res_type.cell_type());
+ return Instruction(fun, wrap_param<PeekParam>(param));
+}
+
+} // namespace
diff --git a/eval/src/vespa/eval/instruction/generic_peek.h b/eval/src/vespa/eval/instruction/generic_peek.h
new file mode 100644
index 00000000000..d31b47238cb
--- /dev/null
+++ b/eval/src/vespa/eval/instruction/generic_peek.h
@@ -0,0 +1,29 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <vespa/eval/eval/value_type.h>
+#include <vespa/eval/eval/tensor_spec.h>
+#include <vespa/eval/eval/interpreted_function.h>
+#include <map>
+
+namespace vespalib { class Stash; }
+namespace vespalib::eval { struct ValueBuilderFactory; }
+
+namespace vespalib::eval::instruction {
+
+//-----------------------------------------------------------------------------
+
+struct GenericPeek {
+ using MyLabel = std::variant<TensorSpec::Label, size_t>;
+ using SpecMap = std::map<vespalib::string, MyLabel>;
+
+ static InterpretedFunction::Instruction
+ make_instruction(const ValueType &input_type,
+ const ValueType &res_type,
+ const SpecMap &spec,
+ const ValueBuilderFactory &factory,
+ Stash &stash);
+};
+
+} // namespace
diff --git a/eval/src/vespa/eval/tensor/default_tensor_engine.cpp b/eval/src/vespa/eval/tensor/default_tensor_engine.cpp
index d44e822792b..b50092c88b5 100644
--- a/eval/src/vespa/eval/tensor/default_tensor_engine.cpp
+++ b/eval/src/vespa/eval/tensor/default_tensor_engine.cpp
@@ -277,7 +277,6 @@ DefaultTensorEngine::optimize(const TensorFunction &expr, Stash &stash) const
{
using Child = TensorFunction::Child;
Child root(expr);
- LOG(debug, "tensor function before optimization:\n%s\n", root.get().as_string().c_str());
{
std::vector<Child::CREF> nodes({root});
for (size_t i = 0; i < nodes.size(); ++i) {
@@ -316,7 +315,6 @@ DefaultTensorEngine::optimize(const TensorFunction &expr, Stash &stash) const
nodes.pop_back();
}
}
- LOG(debug, "tensor function after optimization:\n%s\n", root.get().as_string().c_str());
return root.get();
}
diff --git a/eval/src/vespa/eval/tensor/mixed/packed_mixed_tensor_builder.cpp b/eval/src/vespa/eval/tensor/mixed/packed_mixed_tensor_builder.cpp
index dff1f3cbd21..ef08ed20a9d 100644
--- a/eval/src/vespa/eval/tensor/mixed/packed_mixed_tensor_builder.cpp
+++ b/eval/src/vespa/eval/tensor/mixed/packed_mixed_tensor_builder.cpp
@@ -11,10 +11,8 @@ PackedMixedTensorBuilder<T>::add_subspace(ConstArrayRef<vespalib::stringref> add
std::vector<vespalib::stringref> addr(addr_in.begin(), addr_in.end());
uint32_t idx = _mappings_builder.add_mapping_for(addr);
size_t offset = idx * _subspace_size;
- assert(offset <= _cells.size());
- if (offset == _cells.size()) {
- _cells.resize(offset + _subspace_size);
- }
+ assert(offset == _cells.size());
+ _cells.resize(offset + _subspace_size);
return ArrayRef<T>(&_cells[offset], _subspace_size);
}
diff --git a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
index bf3b497bc3f..8a0d5b2dd83 100644
--- a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
+++ b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
@@ -6,7 +6,6 @@ import com.yahoo.vespa.defaults.Defaults;
import com.yahoo.vespa.flags.custom.HostCapacity;
import com.yahoo.vespa.flags.custom.SharedHost;
-import java.math.BigDecimal;
import java.util.List;
import java.util.Optional;
import java.util.TreeMap;
@@ -303,12 +302,6 @@ public class Flags {
"Takes effect at next run of maintainer",
APPLICATION_ID);
- public static final UnboundBooleanFlag USE_NEW_RESTAPI_HANDLER = defineFeatureFlag(
- "use-new-restapi-handler",
- false,
- "Whether application containers should use the new restapi handler implementation",
- "Takes effect on next internal redeployment");
-
public static final UnboundBooleanFlag USE_ACCESS_CONTROL_CLIENT_AUTHENTICATION = defineFeatureFlag(
"use-access-control-client-authentication",
false,
@@ -323,12 +316,21 @@ public class Flags {
"Takes effect on next internal redeploy",
APPLICATION_ID);
- public static final UnboundDoubleFlag JETTY_THREADPOOL_SCALE_FACTOR = defineDoubleFlag(
- "jetty-threadpool-size-factor",
- 0.0,
- "Size of Jetty threadpool as a factor of vcpu",
- "Takes effect on next internal redeployment",
- APPLICATION_ID);
+ public static final UnboundBooleanFlag USE_ASYNC_MESSAGE_HANDLING_ON_SCHEDULE = defineFeatureFlag(
+ "async-message-handling-on-schedule", false,
+ "Optionally deliver async messages in own thread",
+ "Takes effect at redeployment",
+ ZONE_ID, APPLICATION_ID);
+ public static final UnboundIntFlag CONTENT_NODE_BUCKET_DB_STRIPE_BITS = defineIntFlag(
+ "content-node-bucket-db-stripe-bits", 0,
+ "Number of bits used for striping the bucket DB in service layer",
+ "Takes effect at redeployment",
+ ZONE_ID, APPLICATION_ID);
+ public static final UnboundIntFlag MERGE_CHUNK_SIZE = defineIntFlag(
+ "merge-chunk-size", 0x400000,
+ "Size of merge buffer in service layer",
+ "Takes effect at redeployment",
+ ZONE_ID, APPLICATION_ID);
public static final UnboundBooleanFlag REGIONAL_CONTAINER_REGISTRY = defineFeatureFlag(
"regional-container-registry",
@@ -336,6 +338,13 @@ public class Flags {
"Whether host-admin should download images from the zone's regional container registry",
"Takes effect on host-admin restart");
+ public static final UnboundBooleanFlag ENABLE_AUTOMATIC_REINDEXING = defineFeatureFlag(
+ "enable-automatic-reindexing",
+ false,
+ "Whether to automatically trigger reindexing from config change",
+ "Takes effect on next internal redeployment",
+ APPLICATION_ID);
+
/** WARNING: public for testing: All flags should be defined in {@link Flags}. */
public static UnboundBooleanFlag defineFeatureFlag(String flagId, boolean defaultValue, String description,
String modificationEffect, FetchVector.Dimension... dimensions) {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java
index 8e691b538a1..3cae4a5a5ea 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java
@@ -4,14 +4,11 @@ package com.yahoo.vespa.hosted.provision.provisioning;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ApplicationTransaction;
import com.yahoo.config.provision.ClusterMembership;
-import com.yahoo.config.provision.ClusterResources;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.Flavor;
import com.yahoo.config.provision.HostSpec;
import com.yahoo.config.provision.ParentHostUnavailableException;
-import com.yahoo.config.provision.ProvisionLock;
import com.yahoo.transaction.Mutex;
-import com.yahoo.transaction.NestedTransaction;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesResponse.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesResponse.java
index 511b397be1a..39375494d01 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesResponse.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesResponse.java
@@ -137,9 +137,8 @@ class NodesResponse extends HttpResponse {
if ( ! allFields) return;
object.setString("id", node.hostname());
object.setString("state", NodeSerializer.toString(node.state()));
- object.setString("type", node.type().name());
- object.setString("hostname", node.hostname());
object.setString("type", NodeSerializer.toString(node.type()));
+ object.setString("hostname", node.hostname());
if (node.parentHostname().isPresent()) {
object.setString("parentHostname", node.parentHostname().get());
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java
new file mode 100644
index 00000000000..af14876c20c
--- /dev/null
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java
@@ -0,0 +1,140 @@
+package com.yahoo.vespa.hosted.provision;
+
+import com.yahoo.component.Version;
+import com.yahoo.config.model.builder.xml.XmlHelper;
+import com.yahoo.config.provision.ActivationContext;
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.ApplicationTransaction;
+import com.yahoo.config.provision.Capacity;
+import com.yahoo.config.provision.Cloud;
+import com.yahoo.config.provision.ClusterResources;
+import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.config.provision.Environment;
+import com.yahoo.config.provision.HostSpec;
+import com.yahoo.config.provision.NodeResources;
+import com.yahoo.config.provision.ProvisionLock;
+import com.yahoo.config.provision.RegionName;
+import com.yahoo.config.provision.SystemName;
+import com.yahoo.config.provision.Zone;
+import com.yahoo.config.provisioning.FlavorsConfig;
+import com.yahoo.transaction.NestedTransaction;
+import com.yahoo.vespa.config.ConfigPayload;
+import com.yahoo.vespa.hosted.provision.node.Agent;
+import com.yahoo.vespa.hosted.provision.persistence.NodeSerializer;
+import com.yahoo.vespa.hosted.provision.provisioning.ProvisioningTester;
+import com.yahoo.vespa.model.builder.xml.dom.DomConfigPayloadBuilder;
+import org.junit.Ignore;
+import org.junit.Test;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.UncheckedIOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.function.Consumer;
+import java.util.logging.Logger;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+
+import static com.yahoo.config.provision.NodeResources.DiskSpeed.any;
+import static com.yahoo.config.provision.NodeResources.DiskSpeed.fast;
+import static com.yahoo.config.provision.NodeResources.StorageType.local;
+import static com.yahoo.config.provision.NodeResources.StorageType.remote;
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+/**
+ * Scenario tester with real node-repository data loaded from ZK snapshot file
+ *
+ * @author valerijf
+ */
+public class RealDataScenarioTest {
+ private static final Logger log = Logger.getLogger(RealDataScenarioTest.class.getSimpleName());
+
+ @Ignore
+ @Test
+ public void test() {
+ ProvisioningTester tester = new ProvisioningTester.Builder()
+ .zone(new Zone(Cloud.builder().dynamicProvisioning(true).build(), SystemName.defaultSystem(), Environment.prod, RegionName.defaultName()))
+ .flavorsConfig(parseFlavors(Paths.get("flavors.xml")))
+ .build();
+ initFromZk(tester.nodeRepository(), Paths.get("snapshot"));
+
+ ApplicationId app = ApplicationId.from("tenant", "app", "default");
+ Version version = Version.fromString("7.123.4");
+
+ Capacity[] capacities = new Capacity[]{
+ Capacity.from(new ClusterResources(1, 1, new NodeResources(0.5, 4, 50, 0.3, any, remote))),
+ Capacity.from(new ClusterResources(4, 1, new NodeResources(8, 16, 100, 0.3, fast, remote))),
+ Capacity.from(new ClusterResources(2, 1, new NodeResources(4, 8, 100, 0.3, fast, local)))
+ };
+ ClusterSpec[] specs = new ClusterSpec[]{
+ ClusterSpec.request(ClusterSpec.Type.admin, ClusterSpec.Id.from("logserver")).vespaVersion(version).build(),
+ ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("container")).vespaVersion(version).build(),
+ ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("content")).vespaVersion(version).build()
+ };
+
+ deploy(tester, app, specs, capacities);
+ tester.nodeRepository().list(app).cluster(specs[1].id()).forEach(System.out::println);
+ }
+
+ private void deploy(ProvisioningTester tester, ApplicationId app, ClusterSpec[] specs, Capacity[] capacities) {
+ List<HostSpec> hostSpecs = IntStream.range(0, capacities.length)
+ .mapToObj(i -> tester.provisioner().prepare(app, specs[i], capacities[i], log::log).stream())
+ .flatMap(s -> s)
+ .collect(Collectors.toList());
+ NestedTransaction transaction = new NestedTransaction();
+ tester.provisioner().activate(hostSpecs, new ActivationContext(0), new ApplicationTransaction(new ProvisionLock(app, () -> {}), transaction));
+ transaction.commit();
+ }
+
+ private static FlavorsConfig parseFlavors(Path path) {
+ try {
+ var element = XmlHelper.getDocumentBuilder().parse(path.toFile()).getDocumentElement();
+ return ConfigPayload.fromBuilder(new DomConfigPayloadBuilder(null).build(element)).toInstance(FlavorsConfig.class, "");
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ private static void initFromZk(NodeRepository nodeRepository, Path pathToZkSnapshot) {
+ NodeSerializer nodeSerializer = new NodeSerializer(nodeRepository.flavors(), 1000);
+ AtomicReference<Node.State> state = new AtomicReference<>();
+ Pattern zkNodePathPattern = Pattern.compile(".?/provision/v1/([a-z]+)/[a-z0-9.-]+\\.(com|cloud).?");
+ Consumer<String> consumer = input -> {
+ if (state.get() != null) {
+ String json = input.substring(input.indexOf("{\""), input.lastIndexOf('}') + 1);
+ Node node = nodeSerializer.fromJson(state.get(), json.getBytes(UTF_8));
+ nodeRepository.database().addNodesInState(List.of(node), state.get(), Agent.system);
+ state.set(null);
+ } else {
+ Matcher matcher = zkNodePathPattern.matcher(input);
+ if (!matcher.matches()) return;
+ String stateStr = matcher.group(1);
+ Node.State s = "deallocated".equals(stateStr) ? Node.State.inactive :
+ "allocated".equals(stateStr) ? Node.State.active : Node.State.valueOf(stateStr);
+ state.set(s);
+ }
+ };
+
+ try (BufferedReader reader = new BufferedReader(
+ new InputStreamReader(Files.newInputStream(pathToZkSnapshot), UTF_8))) {
+ StringBuilder sb = new StringBuilder(1000);
+ for (int r; (r = reader.read()) != -1; ) {
+ if (r < 0x20 || r >= 0x7F) {
+ if (sb.length() > 0) {
+ consumer.accept(sb.toString());
+ sb.setLength(0);
+ }
+ } else sb.append((char) r);
+ }
+ } catch (IOException e) {
+ throw new UncheckedIOException(e);
+ }
+ }
+}
diff --git a/searchcore/src/apps/vespa-feed-bm/vespa_feed_bm.cpp b/searchcore/src/apps/vespa-feed-bm/vespa_feed_bm.cpp
index d50d19584d7..7082b9bfef2 100644
--- a/searchcore/src/apps/vespa-feed-bm/vespa_feed_bm.cpp
+++ b/searchcore/src/apps/vespa-feed-bm/vespa_feed_bm.cpp
@@ -282,6 +282,7 @@ class BMParams {
bool _use_storage_chain;
bool _use_legacy_bucket_db;
bool _use_async_message_handling_on_schedule;
+ uint32_t _bucket_db_stripe_bits;
uint32_t get_start(uint32_t thread_id) const {
return (_documents / _client_threads) * thread_id + std::min(thread_id, _documents % _client_threads);
}
@@ -305,7 +306,8 @@ public:
_use_message_bus(false),
_use_storage_chain(false),
_use_legacy_bucket_db(false),
- _use_async_message_handling_on_schedule(false)
+ _use_async_message_handling_on_schedule(false),
+ _bucket_db_stripe_bits(0)
{
}
BMRange get_range(uint32_t thread_id) const {
@@ -329,6 +331,7 @@ public:
bool get_use_storage_chain() const { return _use_storage_chain; }
bool get_use_legacy_bucket_db() const { return _use_legacy_bucket_db; }
bool get_use_async_message_handling_on_schedule() const { return _use_async_message_handling_on_schedule; }
+ uint32_t get_bucket_db_stripe_bits() const { return _bucket_db_stripe_bits; }
void set_documents(uint32_t documents_in) { _documents = documents_in; }
void set_max_pending(uint32_t max_pending_in) { _max_pending = max_pending_in; }
void set_client_threads(uint32_t threads_in) { _client_threads = threads_in; }
@@ -348,6 +351,7 @@ public:
void set_use_storage_chain(bool value) { _use_storage_chain = value; }
void set_use_legacy_bucket_db(bool value) { _use_legacy_bucket_db = value; }
void set_use_async_message_handling_on_schedule(bool value) { _use_async_message_handling_on_schedule = value; }
+ void set_bucket_db_stripe_bits(uint32_t value) { _bucket_db_stripe_bits = value; }
bool check() const;
bool needs_service_layer() const { return _enable_service_layer || _enable_distributor || _use_storage_chain || _use_message_bus || _use_document_api; }
bool needs_distributor() const { return _enable_distributor || _use_document_api; }
@@ -487,6 +491,7 @@ struct MyStorageConfig
}
stor_server.isDistributor = distributor;
stor_server.useContentNodeBtreeBucketDb = !params.get_use_legacy_bucket_db();
+ stor_server.contentNodeBucketDbStripeBits = params.get_bucket_db_stripe_bits();
if (distributor) {
stor_server.rootFolder = "distributor";
} else {
@@ -1367,6 +1372,7 @@ App::usage()
"USAGE:\n";
std::cerr <<
"vespa-feed-bm\n"
+ "[--bucket-db-stripe-bits]\n"
"[--client-threads threads]\n"
"[--get-passes get-passes]\n"
"[--indexing-sequencer [latency,throughput,adaptive]]\n"
@@ -1395,6 +1401,7 @@ App::get_options()
const char *opt_argument = nullptr;
int long_opt_index = 0;
static struct option long_opts[] = {
+ { "bucket-db-stripe-bits", 1, nullptr, 0 },
{ "client-threads", 1, nullptr, 0 },
{ "documents", 1, nullptr, 0 },
{ "enable-distributor", 0, nullptr, 0 },
@@ -1416,6 +1423,7 @@ App::get_options()
{ "use-storage-chain", 0, nullptr, 0 }
};
enum longopts_enum {
+ LONGOPT_BUCKET_DB_STRIPE_BITS,
LONGOPT_CLIENT_THREADS,
LONGOPT_DOCUMENTS,
LONGOPT_ENABLE_DISTRIBUTOR,
@@ -1442,6 +1450,9 @@ App::get_options()
switch (c) {
case 0:
switch(long_opt_index) {
+ case LONGOPT_BUCKET_DB_STRIPE_BITS:
+ _bm_params.set_bucket_db_stripe_bits(atoi(opt_argument));
+ break;
case LONGOPT_CLIENT_THREADS:
_bm_params.set_client_threads(atoi(opt_argument));
break;
diff --git a/searchcore/src/tests/proton/matching/docid_range_scheduler/docid_range_scheduler_test.cpp b/searchcore/src/tests/proton/matching/docid_range_scheduler/docid_range_scheduler_test.cpp
index 1841d8bb531..4814ab4cb49 100644
--- a/searchcore/src/tests/proton/matching/docid_range_scheduler/docid_range_scheduler_test.cpp
+++ b/searchcore/src/tests/proton/matching/docid_range_scheduler/docid_range_scheduler_test.cpp
@@ -85,10 +85,6 @@ TEST("require that the docid range splitter gives empty ranges if accessed with
TEST("require that the partition scheduler acts as expected") {
PartitionDocidRangeScheduler scheduler(4, 16);
- TEST_DO(verify_range(scheduler.total_span(0), DocidRange(1, 5)));
- TEST_DO(verify_range(scheduler.total_span(1), DocidRange(5, 9)));
- TEST_DO(verify_range(scheduler.total_span(2), DocidRange(9, 13)));
- TEST_DO(verify_range(scheduler.total_span(3), DocidRange(13, 16)));
EXPECT_EQUAL(scheduler.total_size(0), 4u);
EXPECT_EQUAL(scheduler.total_size(1), 4u);
EXPECT_EQUAL(scheduler.total_size(2), 4u);
@@ -106,8 +102,6 @@ TEST("require that the partition scheduler acts as expected") {
TEST("require that the partition scheduler protects against documents underflow") {
PartitionDocidRangeScheduler scheduler(2, 0);
- TEST_DO(verify_range(scheduler.total_span(0), DocidRange(1,1)));
- TEST_DO(verify_range(scheduler.total_span(1), DocidRange(1,1)));
EXPECT_EQUAL(scheduler.total_size(0), 0u);
EXPECT_EQUAL(scheduler.total_size(1), 0u);
EXPECT_EQUAL(scheduler.unassigned_size(), 0u);
@@ -122,8 +116,6 @@ TEST("require that the partition scheduler protects against documents underflow"
TEST("require that the task scheduler acts as expected") {
TaskDocidRangeScheduler scheduler(2, 5, 20);
EXPECT_EQUAL(scheduler.unassigned_size(), 19u);
- TEST_DO(verify_range(scheduler.total_span(0), DocidRange(1, 20)));
- TEST_DO(verify_range(scheduler.total_span(1), DocidRange(1, 20)));
EXPECT_EQUAL(scheduler.total_size(0), 0u);
EXPECT_EQUAL(scheduler.total_size(1), 0u);
TEST_DO(verify_range(scheduler.first_range(1), DocidRange(1, 5)));
@@ -141,8 +133,6 @@ TEST("require that the task scheduler acts as expected") {
TEST("require that the task scheduler protects against documents underflow") {
TaskDocidRangeScheduler scheduler(2, 4, 0);
- TEST_DO(verify_range(scheduler.total_span(0), DocidRange(1,1)));
- TEST_DO(verify_range(scheduler.total_span(1), DocidRange(1,1)));
EXPECT_EQUAL(scheduler.total_size(0), 0u);
EXPECT_EQUAL(scheduler.total_size(1), 0u);
EXPECT_EQUAL(scheduler.unassigned_size(), 0u);
@@ -167,13 +157,6 @@ TEST("require that the adaptive scheduler starts by dividing the docid space equ
TEST_DO(verify_range(scheduler.first_range(3), DocidRange(13, 16)));
}
-TEST("require that the adaptive scheduler reports the full span to all threads") {
- AdaptiveDocidRangeScheduler scheduler(3, 1, 16);
- TEST_DO(verify_range(scheduler.total_span(0), DocidRange(1,16)));
- TEST_DO(verify_range(scheduler.total_span(1), DocidRange(1,16)));
- TEST_DO(verify_range(scheduler.total_span(2), DocidRange(1,16)));
-}
-
TEST_MT_FF("require that the adaptive scheduler terminates when all workers request more work",
4, AdaptiveDocidRangeScheduler(num_threads, 1, 16), TimeBomb(60))
{
diff --git a/searchcore/src/tests/proton/matching/match_loop_communicator/match_loop_communicator_test.cpp b/searchcore/src/tests/proton/matching/match_loop_communicator/match_loop_communicator_test.cpp
index f5564ac22a7..0b0f28962c4 100644
--- a/searchcore/src/tests/proton/matching/match_loop_communicator/match_loop_communicator_test.cpp
+++ b/searchcore/src/tests/proton/matching/match_loop_communicator/match_loop_communicator_test.cpp
@@ -1,48 +1,63 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include <vespa/vespalib/testkit/test_kit.h>
#include <vespa/searchcore/proton/matching/match_loop_communicator.h>
-#include <vespa/vespalib/util/box.h>
+#include <algorithm>
using namespace proton::matching;
-using vespalib::Box;
-using vespalib::make_box;
-
using Range = MatchLoopCommunicator::Range;
using RangePair = MatchLoopCommunicator::RangePair;
using Matches = MatchLoopCommunicator::Matches;
using Hit = MatchLoopCommunicator::Hit;
using Hits = MatchLoopCommunicator::Hits;
+using TaggedHit = MatchLoopCommunicator::TaggedHit;
+using TaggedHits = MatchLoopCommunicator::TaggedHits;
using search::queryeval::SortedHitSequence;
+std::vector<Hit> hit_vec(std::vector<Hit> list) { return list; }
+
Hits makeScores(size_t id) {
switch (id) {
- case 0: return make_box<Hit>({1, 5.4}, {2, 4.4}, {3, 3.4}, {4, 2.4}, {5, 1.4});
- case 1: return make_box<Hit>({11, 5.3}, {12, 4.3}, {13, 3.3}, {14, 2.3}, {15, 1.3});
- case 2: return make_box<Hit>({21, 5.2}, {22, 4.2}, {23, 3.2}, {24, 2.2}, {25, 1.2});
- case 3: return make_box<Hit>({31, 5.1}, {32, 4.1}, {33, 3.1}, {34, 2.1}, {35, 1.1});
- case 4: return make_box<Hit>({41, 5.0}, {42, 4.0}, {43, 3.0}, {44, 2.0}, {45, 1.0});
+ case 0: return {{1, 5.4}, {2, 4.4}, {3, 3.4}, {4, 2.4}, {5, 1.4}};
+ case 1: return {{11, 5.3}, {12, 4.3}, {13, 3.3}, {14, 2.3}, {15, 1.3}};
+ case 2: return {{21, 5.2}, {22, 4.2}, {23, 3.2}, {24, 2.2}, {25, 1.2}};
+ case 3: return {{31, 5.1}, {32, 4.1}, {33, 3.1}, {34, 2.1}, {35, 1.1}};
+ case 4: return {{41, 5.0}, {42, 4.0}, {43, 3.0}, {44, 2.0}, {45, 1.0}};
}
- return Box<Hit>();
+ return {};
}
-Hits selectBest(MatchLoopCommunicator &com, const Hits &hits) {
+std::tuple<size_t,Hits,RangePair> second_phase(MatchLoopCommunicator &com, const Hits &hits, size_t thread_id, double delta = 0.0) {
std::vector<uint32_t> refs;
for (size_t i = 0; i < hits.size(); ++i) {
refs.push_back(i);
}
- return com.selectBest(SortedHitSequence(&hits[0], &refs[0], refs.size()));
+ auto my_work = com.get_second_phase_work(SortedHitSequence(&hits[0], &refs[0], refs.size()), thread_id);
+ // the DocumentScorer used by the match thread will sort on docid here to ensure increasing seek order, this is not needed here
+ size_t work_size = my_work.size();
+ for (auto &[hit, tag]: my_work) {
+ hit.second += delta; // second phase ranking is first phase + delta
+ }
+ auto [best_hits, ranges] = com.complete_second_phase(std::move(my_work), thread_id);
+ // the HitCollector will sort on docid to prepare for result merging, we do it to simplify comparing with expected results
+ auto sort_on_docid = [](const auto &a, const auto &b){ return (a.first < b.first); };
+ std::sort(best_hits.begin(), best_hits.end(), sort_on_docid);
+ return {work_size, best_hits, ranges};
}
-RangePair makeRanges(size_t id) {
- switch (id) {
- case 0: return std::make_pair(Range(5, 5), Range(7, 7));
- case 1: return std::make_pair(Range(2, 2), Range(8, 8));
- case 2: return std::make_pair(Range(3, 3), Range(6, 6));
- case 3: return std::make_pair(Range(1, 1), Range(5, 5));
- case 4: return std::make_pair(Range(4, 4), Range(9, 9));
- }
- return std::make_pair(Range(-50, -60), Range(60, 50));
+Hits selectBest(MatchLoopCommunicator &com, const Hits &hits, size_t thread_id) {
+ auto [work_size, best_hits, ranges] = second_phase(com, hits, thread_id);
+ return best_hits;
+}
+
+RangePair rangeCover(MatchLoopCommunicator &com, const Hits &hits, size_t thread_id, double delta) {
+ auto [work_size, best_hits, ranges] = second_phase(com, hits, thread_id, delta);
+ return ranges;
+}
+
+size_t my_work_size(MatchLoopCommunicator &com, const Hits &hits, size_t thread_id) {
+ auto [work_size, best_hits, ranges] = second_phase(com, hits, thread_id);
+ return work_size;
}
void equal(size_t count, const Hits & a, const Hits & b) {
@@ -59,114 +74,137 @@ void equal_range(const Range &a, const Range &b) {
EXPECT_EQUAL(a.high, b.high);
}
+void equal_ranges(const RangePair &a, const RangePair &b) {
+ TEST_DO(equal_range(a.first, b.first));
+ TEST_DO(equal_range(a.second, b.second));
+}
+
struct EveryOdd : public search::queryeval::IDiversifier {
bool accepted(uint32_t docId) override {
return docId & 0x01;
}
};
+struct None : public search::queryeval::IDiversifier {
+ bool accepted(uint32_t) override { return false; }
+};
+
TEST_F("require that selectBest gives appropriate results for single thread", MatchLoopCommunicator(num_threads, 3)) {
- TEST_DO(equal(2u, make_box<Hit>({1, 5}, {2, 4}), selectBest(f1, make_box<Hit>({1, 5}, {2, 4}))));
- TEST_DO(equal(3u, make_box<Hit>({1, 5}, {2, 4}, {3, 3}), selectBest(f1, make_box<Hit>({1, 5}, {2, 4}, {3, 3}))));
- TEST_DO(equal(3u, make_box<Hit>({1, 5}, {2, 4}, {3, 3}), selectBest(f1, make_box<Hit>({1, 5}, {2, 4}, {3, 3}, {4, 2}))));
+ TEST_DO(equal(2u, hit_vec({{1, 5}, {2, 4}}), selectBest(f1, hit_vec({{1, 5}, {2, 4}}), thread_id)));
+ TEST_DO(equal(3u, hit_vec({{1, 5}, {2, 4}, {3, 3}}), selectBest(f1, hit_vec({{1, 5}, {2, 4}, {3, 3}}), thread_id)));
+ TEST_DO(equal(3u, hit_vec({{1, 5}, {2, 4}, {3, 3}}), selectBest(f1, hit_vec({{1, 5}, {2, 4}, {3, 3}, {4, 2}}), thread_id)));
}
TEST_F("require that selectBest gives appropriate results for single thread with filter",
MatchLoopCommunicator(num_threads, 3, std::make_unique<EveryOdd>()))
{
- TEST_DO(equal(1u, make_box<Hit>({1, 5}), selectBest(f1, make_box<Hit>({1, 5}, {2, 4}))));
- TEST_DO(equal(2u, make_box<Hit>({1, 5}, {3, 3}), selectBest(f1, make_box<Hit>({1, 5}, {2, 4}, {3, 3}))));
- TEST_DO(equal(3u, make_box<Hit>({1, 5}, {3, 3}, {5, 1}), selectBest(f1, make_box<Hit>({1, 5}, {2, 4}, {3, 3}, {4, 2}, {5, 1}, {6, 0}))));
+ TEST_DO(equal(1u, hit_vec({{1, 5}}), selectBest(f1, hit_vec({{1, 5}, {2, 4}}), thread_id)));
+ TEST_DO(equal(2u, hit_vec({{1, 5}, {3, 3}}), selectBest(f1, hit_vec({{1, 5}, {2, 4}, {3, 3}}), thread_id)));
+ TEST_DO(equal(3u, hit_vec({{1, 5}, {3, 3}, {5, 1}}), selectBest(f1, hit_vec({{1, 5}, {2, 4}, {3, 3}, {4, 2}, {5, 1}, {6, 0}}), thread_id)));
}
TEST_MT_F("require that selectBest works with no hits", 10, MatchLoopCommunicator(num_threads, 10)) {
- EXPECT_TRUE(selectBest(f1, Box<Hit>()).empty());
+ EXPECT_TRUE(selectBest(f1, hit_vec({}), thread_id).empty());
}
TEST_MT_F("require that selectBest works with too many hits from all threads", 5, MatchLoopCommunicator(num_threads, 13)) {
if (thread_id < 3) {
- TEST_DO(equal(3u, makeScores(thread_id), selectBest(f1, makeScores(thread_id))));
+ TEST_DO(equal(3u, makeScores(thread_id), selectBest(f1, makeScores(thread_id), thread_id)));
} else {
- TEST_DO(equal(2u, makeScores(thread_id), selectBest(f1, makeScores(thread_id))));
+ TEST_DO(equal(2u, makeScores(thread_id), selectBest(f1, makeScores(thread_id), thread_id)));
}
}
TEST_MT_F("require that selectBest works with some exhausted threads", 5, MatchLoopCommunicator(num_threads, 22)) {
if (thread_id < 2) {
- TEST_DO(equal(5u, makeScores(thread_id), selectBest(f1, makeScores(thread_id))));
+ TEST_DO(equal(5u, makeScores(thread_id), selectBest(f1, makeScores(thread_id), thread_id)));
} else {
- TEST_DO(equal(4u, makeScores(thread_id), selectBest(f1, makeScores(thread_id))));
+ TEST_DO(equal(4u, makeScores(thread_id), selectBest(f1, makeScores(thread_id), thread_id)));
}
}
TEST_MT_F("require that selectBest can select all hits from all threads", 5, MatchLoopCommunicator(num_threads, 100)) {
- EXPECT_EQUAL(5u, selectBest(f1, makeScores(thread_id)).size());
+ EXPECT_EQUAL(5u, selectBest(f1, makeScores(thread_id), thread_id).size());
}
TEST_MT_F("require that selectBest works with some empty threads", 10, MatchLoopCommunicator(num_threads, 7)) {
if (thread_id < 2) {
- TEST_DO(equal(2u, makeScores(thread_id), selectBest(f1, makeScores(thread_id))));
+ TEST_DO(equal(2u, makeScores(thread_id), selectBest(f1, makeScores(thread_id), thread_id)));
} else if (thread_id < 5) {
- TEST_DO(equal(1u, makeScores(thread_id), selectBest(f1, makeScores(thread_id))));
+ TEST_DO(equal(1u, makeScores(thread_id), selectBest(f1, makeScores(thread_id), thread_id)));
} else {
- EXPECT_TRUE(selectBest(f1, makeScores(thread_id)).empty());
+ EXPECT_TRUE(selectBest(f1, makeScores(thread_id), thread_id).empty());
}
}
-TEST_F("require that rangeCover is identity function for single thread", MatchLoopCommunicator(num_threads, 5)) {
- RangePair res = f1.rangeCover(std::make_pair(Range(2, 4), Range(3, 5)));
- TEST_DO(equal_range(Range(2, 4), res.first));
- TEST_DO(equal_range(Range(3, 5), res.second));
+TEST_F("require that rangeCover works with a single thread", MatchLoopCommunicator(num_threads, 5)) {
+ RangePair res = rangeCover(f1, hit_vec({{1, 7.5}, {2, 1.5}}), thread_id, 10);
+ TEST_DO(equal_ranges(RangePair({1.5, 7.5}, {11.5, 17.5}), res));
}
-TEST_MT_F("require that rangeCover can mix ranges from multiple threads", 5, MatchLoopCommunicator(num_threads, 5)) {
- RangePair res = f1.rangeCover(makeRanges(thread_id));
- TEST_DO(equal_range(Range(1, 5), res.first));
- TEST_DO(equal_range(Range(5, 9), res.second));
+TEST_MT_F("require that rangeCover works with multiple threads", 5, MatchLoopCommunicator(num_threads, 10)) {
+ RangePair res = rangeCover(f1, hit_vec({{thread_id * 100 + 1, 100.0 + thread_id}, {thread_id * 100 + 2, 100.0 - thread_id}}), thread_id, 10);
+ TEST_DO(equal_ranges(RangePair({96.0, 104.0}, {106.0, 114.0}), res));
}
-TEST_MT_F("require that invalid ranges are ignored", 10, MatchLoopCommunicator(num_threads, 5)) {
- RangePair res = f1.rangeCover(makeRanges(thread_id));
- TEST_DO(equal_range(Range(1, 5), res.first));
- TEST_DO(equal_range(Range(5, 9), res.second));
+TEST_MT_F("require that rangeCover works with no hits", 10, MatchLoopCommunicator(num_threads, 5)) {
+ RangePair res = rangeCover(f1, hit_vec({}), thread_id, 10);
+ TEST_DO(equal_ranges(RangePair({}, {}), res));
}
-TEST_MT_F("require that only invalid ranges produce default invalid range", 3, MatchLoopCommunicator(num_threads, 5)) {
- RangePair res = f1.rangeCover(makeRanges(10));
- Range expect;
- TEST_DO(equal_range(expect, res.first));
- TEST_DO(equal_range(expect, res.second));
-}
-
-TEST_F("require that hits dropped due to lack of diversity affects range cover result",
- MatchLoopCommunicator(num_threads, 3, std::make_unique<EveryOdd>()))
+TEST_FFF("require that hits dropped due to lack of diversity affects range cover result",
+ MatchLoopCommunicator(num_threads, 3),
+ MatchLoopCommunicator(num_threads, 3, std::make_unique<EveryOdd>()),
+ MatchLoopCommunicator(num_threads, 3, std::make_unique<None>()))
{
- TEST_DO(equal(3u, make_box<Hit>({1, 5}, {3, 3}, {5, 1}), selectBest(f1, make_box<Hit>({1, 5}, {2, 4}, {3, 3}, {4, 2}, {5, 1}))));
- // best dropped: 4
- std::vector<RangePair> input = {
- std::make_pair(Range(), Range()),
- std::make_pair(Range(3, 5), Range(1, 10)),
- std::make_pair(Range(5, 10), Range(1, 10)),
- std::make_pair(Range(1, 3), Range(1, 10))
- };
- std::vector<RangePair> expect = {
- std::make_pair(Range(), Range()),
- std::make_pair(Range(4, 5), Range(1, 10)),
- std::make_pair(Range(5, 10), Range(1, 10)),
- std::make_pair(Range(4, 4), Range(1, 10))
- };
- ASSERT_EQUAL(input.size(), expect.size());
- for (size_t i = 0; i < input.size(); ++i) {
- auto output = f1.rangeCover(input[i]);
- TEST_STATE(vespalib::make_string("case: %zu", i).c_str());
- TEST_DO(equal_range(expect[i].first, output.first));
- TEST_DO(equal_range(expect[i].second, output.second));
- }
+ auto hits_in = hit_vec({{1, 5}, {2, 4}, {3, 3}, {4, 2}, {5, 1}});
+ auto [my_work1, hits1, ranges1] = second_phase(f1, hits_in, thread_id, 10);
+ auto [my_work2, hits2, ranges2] = second_phase(f2, hits_in, thread_id, 10);
+ auto [my_work3, hits3, ranges3] = second_phase(f3, hits_in, thread_id, 10);
+
+ EXPECT_EQUAL(my_work1, 3u);
+ EXPECT_EQUAL(my_work2, 3u);
+ EXPECT_EQUAL(my_work3, 0u);
+
+ TEST_DO(equal(3u, hit_vec({{1, 15}, {2, 14}, {3, 13}}), hits1));
+ TEST_DO(equal(3u, hit_vec({{1, 15}, {3, 13}, {5, 11}}), hits2));
+ TEST_DO(equal(0u, hit_vec({}), hits3));
+
+ TEST_DO(equal_ranges(RangePair({3,5},{13,15}), ranges1));
+ TEST_DO(equal_ranges(RangePair({4,5},{11,15}), ranges2)); // best dropped: 4
+
+ // note that the 'drops all hits due to diversity' case will
+ // trigger much of the same code path as dropping second phase
+ // ranking due to hard doom.
+
+ TEST_DO(equal_ranges(RangePair({},{}), ranges3));
}
-TEST_MT_F("require that count_matches will count hits and docs across threads", 4, MatchLoopCommunicator(num_threads, 5)) {
+TEST_MT_F("require that estimate_match_frequency will count hits and docs across threads", 4, MatchLoopCommunicator(num_threads, 5)) {
double freq = (0.0/10.0 + 1.0/11.0 + 2.0/12.0 + 3.0/13.0) / 4.0;
EXPECT_APPROX(freq, f1.estimate_match_frequency(Matches(thread_id, thread_id + 10)), 0.00001);
}
+TEST_MT_F("require that second phase work is evenly distributed among search threads", 5, MatchLoopCommunicator(num_threads, 20)) {
+ size_t num_hits = thread_id * 5;
+ size_t docid = thread_id * 100;
+ double score = thread_id * 100.0;
+ Hits my_hits;
+ for(size_t i = 0; i < num_hits; ++i) {
+ my_hits.emplace_back(++docid, score);
+ score -= 1.0;
+ }
+ auto [my_work, best_hits, ranges] = second_phase(f1, my_hits, thread_id, 1000.0);
+ EXPECT_EQUAL(my_work, 4u);
+ TEST_DO(equal_ranges(RangePair({381,400},{1381,1400}), ranges));
+ if (thread_id == 4) {
+ for (auto &hit: my_hits) {
+ hit.second += 1000.0;
+ }
+ TEST_DO(equal(num_hits, my_hits, best_hits));
+ } else {
+ EXPECT_TRUE(best_hits.empty());
+ }
+}
+
TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchcore/src/vespa/searchcore/proton/matching/docid_range_scheduler.h b/searchcore/src/vespa/searchcore/proton/matching/docid_range_scheduler.h
index e6fce89e82d..19e5ec0343d 100644
--- a/searchcore/src/vespa/searchcore/proton/matching/docid_range_scheduler.h
+++ b/searchcore/src/vespa/searchcore/proton/matching/docid_range_scheduler.h
@@ -71,17 +71,15 @@ public:
* 'next_range' function. When a worker is assigned an empty range,
* its work is done.
*
- * The 'total_span' function returns a range that is guaranteed to
- * contain all ranges assigned to the given worker. The 'total_size'
- * function returns the accumulated size of all ranges assigned to the
- * given worker. The 'unassigned_size' function returns the
- * accumulated size of all currently unassigned ranges.
+ * The 'total_size' function returns the accumulated size of all
+ * ranges assigned to the given worker. The 'unassigned_size' function
+ * returns the accumulated size of all currently unassigned ranges.
*
- * Note that the return values from 'total_span', 'total_size' and
- * 'unassigned_size' may or may not account for the range returned
- * from 'first_range' since the scheduler is allowed to pre-assign
- * ranges to workers. Calling 'first_range' first ensures that all
- * other return values make sense.
+ * Note that the return values from 'total_size' and 'unassigned_size'
+ * may or may not account for the range returned from 'first_range'
+ * since the scheduler is allowed to pre-assign ranges to
+ * workers. Calling 'first_range' first ensures that all other return
+ * values make sense.
*
* The 'idle_observer' and 'share_range' functions are used for
* work-sharing, where a worker thread potentially can offload some of
@@ -109,7 +107,6 @@ struct DocidRangeScheduler {
typedef std::unique_ptr<DocidRangeScheduler> UP;
virtual DocidRange first_range(size_t thread_id) = 0;
virtual DocidRange next_range(size_t thread_id) = 0;
- virtual DocidRange total_span(size_t thread_id) const = 0;
virtual size_t total_size(size_t thread_id) const = 0;
virtual size_t unassigned_size() const = 0;
virtual IdleObserver make_idle_observer() const = 0;
@@ -130,7 +127,6 @@ public:
PartitionDocidRangeScheduler(size_t num_threads, uint32_t docid_limit);
DocidRange first_range(size_t thread_id) override { return _ranges[thread_id]; }
DocidRange next_range(size_t) override { return DocidRange(); }
- DocidRange total_span(size_t thread_id) const override { return _ranges[thread_id]; }
size_t total_size(size_t thread_id) const override { return _ranges[thread_id].size(); }
size_t unassigned_size() const override { return 0; }
IdleObserver make_idle_observer() const override { return IdleObserver(); }
@@ -157,7 +153,6 @@ public:
TaskDocidRangeScheduler(size_t num_threads, size_t num_tasks, uint32_t docid_limit);
DocidRange first_range(size_t thread_id) override { return next_task(thread_id); }
DocidRange next_range(size_t thread_id) override { return next_task(thread_id); }
- DocidRange total_span(size_t) const override { return _splitter.full_range(); }
size_t total_size(size_t thread_id) const override { return _assigned[thread_id]; }
size_t unassigned_size() const override { return _unassigned.load(std::memory_order::memory_order_relaxed); }
IdleObserver make_idle_observer() const override { return IdleObserver(); }
@@ -197,7 +192,6 @@ public:
~AdaptiveDocidRangeScheduler();
DocidRange first_range(size_t thread_id) override;
DocidRange next_range(size_t thread_id) override;
- DocidRange total_span(size_t) const override { return _splitter.full_range(); }
size_t total_size(size_t thread_id) const override { return _assigned[thread_id]; }
size_t unassigned_size() const override { return 0; }
IdleObserver make_idle_observer() const override { return IdleObserver(_num_idle); }
diff --git a/searchcore/src/vespa/searchcore/proton/matching/document_scorer.cpp b/searchcore/src/vespa/searchcore/proton/matching/document_scorer.cpp
index c7721b428b9..608e8adf5c2 100644
--- a/searchcore/src/vespa/searchcore/proton/matching/document_scorer.cpp
+++ b/searchcore/src/vespa/searchcore/proton/matching/document_scorer.cpp
@@ -1,6 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "document_scorer.h"
+#include <algorithm>
#include <cassert>
using search::feature_t;
@@ -30,10 +31,14 @@ DocumentScorer::DocumentScorer(RankProgram &rankProgram,
{
}
-feature_t
-DocumentScorer::score(uint32_t docId)
+void
+DocumentScorer::score(TaggedHits &hits)
{
- return doScore(docId);
+ auto sort_on_docid = [](const TaggedHit &a, const TaggedHit &b){ return (a.first.first < b.first.first); };
+ std::sort(hits.begin(), hits.end(), sort_on_docid);
+ for (auto &hit: hits) {
+ hit.first.second = doScore(hit.first.first);
+ }
}
}
diff --git a/searchcore/src/vespa/searchcore/proton/matching/document_scorer.h b/searchcore/src/vespa/searchcore/proton/matching/document_scorer.h
index f29f70b2cfa..63ee00c3412 100644
--- a/searchcore/src/vespa/searchcore/proton/matching/document_scorer.h
+++ b/searchcore/src/vespa/searchcore/proton/matching/document_scorer.h
@@ -2,24 +2,28 @@
#pragma once
+#include "i_match_loop_communicator.h"
#include <vespa/searchlib/fef/rank_program.h>
-#include <vespa/searchlib/queryeval/hitcollector.h>
#include <vespa/searchlib/queryeval/searchiterator.h>
namespace proton::matching {
/**
* Class used to calculate the rank score for a set of documents using
- * a rank program for calculation and a search iterator for unpacking match data.
- * The calculateScore() function is always called in increasing docId order.
+ * a rank program for calculation and a search iterator for unpacking
+ * match data. The doScore function must be called with increasing
+ * docid.
*/
-class DocumentScorer : public search::queryeval::HitCollector::DocumentScorer
+class DocumentScorer
{
private:
search::queryeval::SearchIterator &_searchItr;
search::fef::LazyValue _scoreFeature;
public:
+ using TaggedHit = IMatchLoopCommunicator::TaggedHit;
+ using TaggedHits = IMatchLoopCommunicator::TaggedHits;
+
DocumentScorer(search::fef::RankProgram &rankProgram,
search::queryeval::SearchIterator &searchItr);
@@ -28,7 +32,8 @@ public:
return _scoreFeature.as_number(docId);
}
- virtual search::feature_t score(uint32_t docId) override;
+ // annotate hits with rank score, may change order
+ void score(TaggedHits &hits);
};
}
diff --git a/searchcore/src/vespa/searchcore/proton/matching/i_match_loop_communicator.h b/searchcore/src/vespa/searchcore/proton/matching/i_match_loop_communicator.h
index 15ca9921524..c9a4a61f9c5 100644
--- a/searchcore/src/vespa/searchcore/proton/matching/i_match_loop_communicator.h
+++ b/searchcore/src/vespa/searchcore/proton/matching/i_match_loop_communicator.h
@@ -17,6 +17,8 @@ struct IMatchLoopCommunicator {
using SortedHitSequence = search::queryeval::SortedHitSequence;
using Hit = SortedHitSequence::Hit;
using Hits = std::vector<Hit>;
+ using TaggedHit = std::pair<Hit,size_t>;
+ using TaggedHits = std::vector<TaggedHit>;
struct Matches {
size_t hits;
size_t docs;
@@ -28,8 +30,8 @@ struct IMatchLoopCommunicator {
}
};
virtual double estimate_match_frequency(const Matches &matches) = 0;
- virtual Hits selectBest(SortedHitSequence sortedHits) = 0;
- virtual RangePair rangeCover(const RangePair &ranges) = 0;
+ virtual TaggedHits get_second_phase_work(SortedHitSequence sortedHits, size_t thread_id) = 0;
+ virtual std::pair<Hits,RangePair> complete_second_phase(TaggedHits my_results, size_t thread_id) = 0;
virtual ~IMatchLoopCommunicator() {}
};
diff --git a/searchcore/src/vespa/searchcore/proton/matching/match_loop_communicator.cpp b/searchcore/src/vespa/searchcore/proton/matching/match_loop_communicator.cpp
index 07a8b224b89..4db26f6308a 100644
--- a/searchcore/src/vespa/searchcore/proton/matching/match_loop_communicator.cpp
+++ b/searchcore/src/vespa/searchcore/proton/matching/match_loop_communicator.cpp
@@ -9,10 +9,11 @@ MatchLoopCommunicator::MatchLoopCommunicator(size_t threads, size_t topN)
: MatchLoopCommunicator(threads, topN, std::unique_ptr<IDiversifier>())
{}
MatchLoopCommunicator::MatchLoopCommunicator(size_t threads, size_t topN, std::unique_ptr<IDiversifier> diversifier)
- : _best_dropped(),
+ : _best_scores(),
+ _best_dropped(),
_estimate_match_frequency(threads),
- _selectBest(threads, topN, _best_dropped, std::move(diversifier)),
- _rangeCover(threads, _best_dropped)
+ _get_second_phase_work(threads, topN, _best_scores, _best_dropped, std::move(diversifier)),
+ _complete_second_phase(threads, topN, _best_scores, _best_dropped)
{}
MatchLoopCommunicator::~MatchLoopCommunicator() = default;
@@ -33,25 +34,30 @@ MatchLoopCommunicator::EstimateMatchFrequency::mingle()
}
}
-MatchLoopCommunicator::SelectBest::SelectBest(size_t n, size_t topN_in, BestDropped &best_dropped_in, std::unique_ptr<IDiversifier> diversifier)
- : vespalib::Rendezvous<SortedHitSequence, Hits>(n),
+MatchLoopCommunicator::GetSecondPhaseWork::GetSecondPhaseWork(size_t n, size_t topN_in, Range &best_scores_in, BestDropped &best_dropped_in, std::unique_ptr<IDiversifier> diversifier)
+ : vespalib::Rendezvous<SortedHitSequence, TaggedHits, true>(n),
topN(topN_in),
+ best_scores(best_scores_in),
best_dropped(best_dropped_in),
_diversifier(std::move(diversifier))
{}
-MatchLoopCommunicator::SelectBest::~SelectBest() = default;
+MatchLoopCommunicator::GetSecondPhaseWork::~GetSecondPhaseWork() = default;
template<typename Q, typename F>
void
-MatchLoopCommunicator::SelectBest::mingle(Q &queue, F &&accept)
+MatchLoopCommunicator::GetSecondPhaseWork::mingle(Q &queue, F &&accept)
{
- best_dropped.valid = false;
- for (size_t picked = 0; picked < topN && !queue.empty(); ) {
+ size_t picked = 0;
+ search::feature_t last_score = 0.0;
+ while ((picked < topN) && !queue.empty()) {
uint32_t i = queue.front();
const Hit & hit = in(i).get();
if (accept(hit.first)) {
- out(i).push_back(hit);
- ++picked;
+ out(picked % size()).emplace_back(hit, i);
+ last_score = hit.second;
+ if (++picked == 1) {
+ best_scores.high = hit.second;
+ }
} else if (!best_dropped.valid) {
best_dropped.valid = true;
best_dropped.score = hit.second;
@@ -63,16 +69,21 @@ MatchLoopCommunicator::SelectBest::mingle(Q &queue, F &&accept)
queue.pop_front();
}
}
+ if (picked > 0) {
+ best_scores.low = last_score;
+ }
}
void
-MatchLoopCommunicator::SelectBest::mingle()
+MatchLoopCommunicator::GetSecondPhaseWork::mingle()
{
- size_t est_out = (topN / size()) + 16;
+ best_scores = Range();
+ best_dropped.valid = false;
+ size_t est_out = (topN / size()) + 1;
vespalib::PriorityQueue<uint32_t, SelectCmp> queue(SelectCmp(*this));
for (size_t i = 0; i < size(); ++i) {
+ out(i).reserve(est_out);
if (in(i).valid()) {
- out(i).reserve(est_out);
queue.push(i);
}
}
@@ -84,28 +95,26 @@ MatchLoopCommunicator::SelectBest::mingle()
}
void
-MatchLoopCommunicator::RangeCover::mingle()
+MatchLoopCommunicator::CompleteSecondPhase::mingle()
{
- size_t i = 0;
- while (i < size() && (!in(i).first.isValid() || !in(i).second.isValid())) {
- ++i;
+ RangePair score_ranges(best_scores, Range());
+ Range &new_scores = score_ranges.second;
+ size_t est_out = (topN / size()) + 16;
+ for (size_t i = 0; i < size(); ++i) {
+ out(i).first.reserve(est_out);
}
- if (i < size()) {
- RangePair result = in(i++);
- for (; i < size(); ++i) {
- if (in(i).first.isValid() && in(i).second.isValid()) {
- result.first.low = std::min(result.first.low, in(i).first.low);
- result.first.high = std::max(result.first.high, in(i).first.high);
- result.second.low = std::min(result.second.low, in(i).second.low);
- result.second.high = std::max(result.second.high, in(i).second.high);
- }
+ for (size_t i = 0; i < size(); ++i) {
+ for (const auto &[hit, tag]: in(i)) {
+ out(tag).first.push_back(hit);
+ new_scores.update(hit.second);
}
+ }
+ if (score_ranges.first.isValid() && score_ranges.second.isValid()) {
if (best_dropped.valid) {
- result.first.low = std::max(result.first.low, best_dropped.score);
- result.first.high = std::max(result.first.low, result.first.high);
+ score_ranges.first.low = std::max(score_ranges.first.low, best_dropped.score);
}
- for (size_t j = 0; j < size(); ++j) {
- out(j) = result;
+ for (size_t i = 0; i < size(); ++i) {
+ out(i).second = score_ranges;
}
}
}
diff --git a/searchcore/src/vespa/searchcore/proton/matching/match_loop_communicator.h b/searchcore/src/vespa/searchcore/proton/matching/match_loop_communicator.h
index 425197fac3b..42dd82587fa 100644
--- a/searchcore/src/vespa/searchcore/proton/matching/match_loop_communicator.h
+++ b/searchcore/src/vespa/searchcore/proton/matching/match_loop_communicator.h
@@ -20,12 +20,13 @@ private:
EstimateMatchFrequency(size_t n) : vespalib::Rendezvous<Matches, double>(n) {}
void mingle() override;
};
- struct SelectBest : vespalib::Rendezvous<SortedHitSequence, Hits> {
+ struct GetSecondPhaseWork : vespalib::Rendezvous<SortedHitSequence, TaggedHits, true> {
size_t topN;
+ Range &best_scores;
BestDropped &best_dropped;
std::unique_ptr<IDiversifier> _diversifier;
- SelectBest(size_t n, size_t topN_in, BestDropped &best_dropped_in, std::unique_ptr<IDiversifier>);
- ~SelectBest() override;
+ GetSecondPhaseWork(size_t n, size_t topN_in, Range &best_scores_in, BestDropped &best_dropped_in, std::unique_ptr<IDiversifier>);
+ ~GetSecondPhaseWork() override;
void mingle() override;
template<typename Q, typename F>
void mingle(Q &queue, F &&accept);
@@ -34,23 +35,27 @@ private:
}
};
struct SelectCmp {
- SelectBest &sb;
- SelectCmp(SelectBest &sb_in) : sb(sb_in) {}
+ GetSecondPhaseWork &sb;
+ SelectCmp(GetSecondPhaseWork &sb_in) : sb(sb_in) {}
bool operator()(uint32_t a, uint32_t b) const {
return (sb.cmp(a, b));
}
};
- struct RangeCover : vespalib::Rendezvous<RangePair, RangePair> {
- BestDropped &best_dropped;
- RangeCover(size_t n, BestDropped &best_dropped_in)
- : vespalib::Rendezvous<RangePair, RangePair>(n), best_dropped(best_dropped_in) {}
+ struct CompleteSecondPhase : vespalib::Rendezvous<TaggedHits, std::pair<Hits,RangePair>, true> {
+ size_t topN;
+ const Range &best_scores;
+ const BestDropped &best_dropped;
+ CompleteSecondPhase(size_t n, size_t topN_in, const Range &best_scores_in, const BestDropped &best_dropped_in)
+ : vespalib::Rendezvous<TaggedHits, std::pair<Hits,RangePair>, true>(n),
+ topN(topN_in), best_scores(best_scores_in), best_dropped(best_dropped_in) {}
void mingle() override;
};
- BestDropped _best_dropped;
- EstimateMatchFrequency _estimate_match_frequency;
- SelectBest _selectBest;
- RangeCover _rangeCover;
+ Range _best_scores;
+ BestDropped _best_dropped;
+ EstimateMatchFrequency _estimate_match_frequency;
+ GetSecondPhaseWork _get_second_phase_work;
+ CompleteSecondPhase _complete_second_phase;
public:
MatchLoopCommunicator(size_t threads, size_t topN);
@@ -60,11 +65,13 @@ public:
double estimate_match_frequency(const Matches &matches) override {
return _estimate_match_frequency.rendezvous(matches);
}
- Hits selectBest(SortedHitSequence sortedHits) override {
- return _selectBest.rendezvous(sortedHits);
+
+ TaggedHits get_second_phase_work(SortedHitSequence sortedHits, size_t thread_id) override {
+ return _get_second_phase_work.rendezvous(sortedHits, thread_id);
}
- RangePair rangeCover(const RangePair &ranges) override {
- return _rangeCover.rendezvous(ranges);
+
+ std::pair<Hits,RangePair> complete_second_phase(TaggedHits my_results, size_t thread_id) override {
+ return _complete_second_phase.rendezvous(std::move(my_results), thread_id);
}
};
diff --git a/searchcore/src/vespa/searchcore/proton/matching/match_master.cpp b/searchcore/src/vespa/searchcore/proton/matching/match_master.cpp
index 3cbf88facd5..6c86f6c2d1c 100644
--- a/searchcore/src/vespa/searchcore/proton/matching/match_master.cpp
+++ b/searchcore/src/vespa/searchcore/proton/matching/match_master.cpp
@@ -32,13 +32,13 @@ struct TimedMatchLoopCommunicator : IMatchLoopCommunicator {
double estimate_match_frequency(const Matches &matches) override {
return communicator.estimate_match_frequency(matches);
}
- Hits selectBest(SortedHitSequence sortedHits) override {
- auto result = communicator.selectBest(sortedHits);
+ TaggedHits get_second_phase_work(SortedHitSequence sortedHits, size_t thread_id) override {
+ auto result = communicator.get_second_phase_work(sortedHits, thread_id);
timer = vespalib::Timer();
return result;
}
- RangePair rangeCover(const RangePair &ranges) override {
- RangePair result = communicator.rangeCover(ranges);
+ std::pair<Hits,RangePair> complete_second_phase(TaggedHits my_results, size_t thread_id) override {
+ auto result = communicator.complete_second_phase(std::move(my_results), thread_id);
elapsed = timer.elapsed();
return result;
}
diff --git a/searchcore/src/vespa/searchcore/proton/matching/match_thread.cpp b/searchcore/src/vespa/searchcore/proton/matching/match_thread.cpp
index 0e80d31a063..5cb4394880f 100644
--- a/searchcore/src/vespa/searchcore/proton/matching/match_thread.cpp
+++ b/searchcore/src/vespa/searchcore/proton/matching/match_thread.cpp
@@ -202,8 +202,8 @@ MatchThread::match_loop(MatchTools &tools, HitCollector &hits)
uint32_t matches = context.matches;
if (do_limit && context.isBelowLimit()) {
const size_t searchedSoFar = scheduler.total_size(thread_id);
- LOG(debug, "Limit not reached (had %d) at docid=%d which is after %zu docs.",
- matches, scheduler.total_span(thread_id).end, searchedSoFar);
+ LOG(debug, "Limit not reached (had %d) after %zu docs.",
+ matches, searchedSoFar);
estimate_match_frequency(matches, searchedSoFar);
tools.match_limiter().updateDocIdSpaceEstimate(searchedSoFar, 0);
}
@@ -293,35 +293,31 @@ MatchThread::findMatches(MatchTools &tools)
trace->addEvent(4, "Start match and first phase rank");
match_loop_helper(tools, hits);
if (tools.has_second_phase_rank()) {
- { // 2nd phase ranking
- trace->addEvent(4, "Start second phase rerank");
- tools.setup_second_phase();
- DocidRange docid_range = scheduler.total_span(thread_id);
- tools.search().initRange(docid_range.begin, docid_range.end);
- auto sorted_hit_seq = matchToolsFactory.should_diversify()
- ? hits.getSortedHitSequence(matchParams.arraySize)
- : hits.getSortedHitSequence(matchParams.heapSize);
- trace->addEvent(5, "Synchronize before second phase rerank");
- WaitTimer select_best_timer(wait_time_s);
- auto kept_hits = communicator.selectBest(sorted_hit_seq);
- select_best_timer.done();
- DocumentScorer scorer(tools.rank_program(), tools.search());
- if (tools.getDoom().hard_doom()) {
- kept_hits.clear();
- }
- uint32_t reRanked = hits.reRank(scorer, std::move(kept_hits));
- if (auto onReRankTask = matchToolsFactory.createOnReRankTask()) {
- onReRankTask->run(hits.getReRankedHits());
- }
- thread_stats.docsReRanked(reRanked);
+ trace->addEvent(4, "Start second phase rerank");
+ tools.setup_second_phase();
+ DocidRange docid_range(1, matchParams.numDocs);
+ tools.search().initRange(docid_range.begin, docid_range.end);
+ auto sorted_hit_seq = matchToolsFactory.should_diversify()
+ ? hits.getSortedHitSequence(matchParams.arraySize)
+ : hits.getSortedHitSequence(matchParams.heapSize);
+ trace->addEvent(5, "Synchronize before second phase rerank");
+ WaitTimer get_second_phase_work_timer(wait_time_s);
+ auto my_work = communicator.get_second_phase_work(sorted_hit_seq, thread_id);
+ get_second_phase_work_timer.done();
+ DocumentScorer scorer(tools.rank_program(), tools.search());
+ if (tools.getDoom().hard_doom()) {
+ my_work.clear();
}
- { // rank scaling
- trace->addEvent(5, "Synchronize before rank scaling");
- auto my_ranges = hits.getRanges();
- WaitTimer range_cover_timer(wait_time_s);
- auto ranges = communicator.rangeCover(my_ranges);
- range_cover_timer.done();
- hits.setRanges(ranges);
+ scorer.score(my_work);
+ thread_stats.docsReRanked(my_work.size());
+ trace->addEvent(5, "Synchronize before rank scaling");
+ WaitTimer complete_second_phase_timer(wait_time_s);
+ auto [kept_hits, ranges] = communicator.complete_second_phase(my_work, thread_id);
+ complete_second_phase_timer.done();
+ hits.setReRankedHits(std::move(kept_hits));
+ hits.setRanges(ranges);
+ if (auto onReRankTask = matchToolsFactory.createOnReRankTask()) {
+ onReRankTask->run(hits.getReRankedHits());
}
}
trace->addEvent(4, "Create result set");
diff --git a/searchlib/src/tests/hitcollector/hitcollector_test.cpp b/searchlib/src/tests/hitcollector/hitcollector_test.cpp
index 2274314c7da..8aceb583b9c 100644
--- a/searchlib/src/tests/hitcollector/hitcollector_test.cpp
+++ b/searchlib/src/tests/hitcollector/hitcollector_test.cpp
@@ -14,26 +14,28 @@ using namespace search::queryeval;
typedef std::map<uint32_t, feature_t> ScoreMap;
-struct BasicScorer : public HitCollector::DocumentScorer
+using Ranges = std::pair<Scores, Scores>;
+
+struct BasicScorer
{
feature_t _scoreDelta;
explicit BasicScorer(feature_t scoreDelta) : _scoreDelta(scoreDelta) {}
- feature_t score(uint32_t docId) override {
- return docId + _scoreDelta;
+ feature_t score(uint32_t docid) const {
+ return (docid + _scoreDelta);
}
};
-struct PredefinedScorer : public HitCollector::DocumentScorer
+struct PredefinedScorer
{
ScoreMap _scores;
explicit PredefinedScorer(ScoreMap scores) : _scores(std::move(scores)) {}
- feature_t score(uint32_t docId) override {
- feature_t retval = default_rank_value;
- auto itr = _scores.find(docId);
+ feature_t score(uint32_t docid) const {
+ feature_t my_score = default_rank_value;
+ auto itr = _scores.find(docid);
if (itr != _scores.end()) {
- retval = itr->second;
+ my_score = itr->second;
}
- return retval;
+ return my_score;
}
};
@@ -46,6 +48,20 @@ std::vector<HitCollector::Hit> extract(SortedHitSequence seq) {
return ret;
}
+template <typename Scorer>
+size_t do_reRank(const Scorer &scorer, HitCollector &hc, size_t count) {
+ Ranges ranges;
+ auto hits = extract(hc.getSortedHitSequence(count));
+ for (auto &[docid, score]: hits) {
+ ranges.first.update(score);
+ score = scorer.score(docid);
+ ranges.second.update(score);
+ }
+ hc.setRanges(ranges);
+ hc.setReRankedHits(std::move(hits));
+ return hc.getReRankedHits().size();
+}
+
void checkResult(const ResultSet & rs, const std::vector<RankedHit> & exp)
{
if ( ! exp.empty()) {
@@ -156,7 +172,7 @@ struct Fixture {
}
}
size_t reRank(size_t count) {
- return hc.reRank(scorer, extract(hc.getSortedHitSequence(count)));
+ return do_reRank(scorer, hc, count);
}
size_t reRank() { return reRank(5); }
};
@@ -295,7 +311,7 @@ void testScaling(const std::vector<feature_t> &initScores,
PredefinedScorer scorer(std::move(finalScores));
// perform second phase ranking
- EXPECT_EQUAL(2u, hc.reRank(scorer, extract(hc.getSortedHitSequence(2))));
+ EXPECT_EQUAL(2u, do_reRank(scorer, hc, 2));
// check results
std::unique_ptr<ResultSet> rs = hc.getResultSet();
@@ -462,7 +478,7 @@ TEST_F("require that result set is merged correctly with second phase ranking (d
f.hc.addHit(i, i + 1000);
addExpectedHitForMergeTest(f, expRh, i);
}
- EXPECT_EQUAL(f.maxHeapSize, f.hc.reRank(scorer, extract(f.hc.getSortedHitSequence(f.maxHeapSize))));
+ EXPECT_EQUAL(f.maxHeapSize, do_reRank(scorer, f.hc, f.maxHeapSize));
std::unique_ptr<ResultSet> rs = f.hc.getResultSet();
TEST_DO(checkResult(*rs, expRh));
}
diff --git a/searchlib/src/vespa/searchlib/queryeval/hitcollector.cpp b/searchlib/src/vespa/searchlib/queryeval/hitcollector.cpp
index 7002faa6451..3f41dc3553b 100644
--- a/searchlib/src/vespa/searchlib/queryeval/hitcollector.cpp
+++ b/searchlib/src/vespa/searchlib/queryeval/hitcollector.cpp
@@ -45,9 +45,7 @@ HitCollector::HitCollector(uint32_t numDocs,
_bitVector(),
_reRankedHits(),
_scale(1.0),
- _adjust(0),
- _hasReRanked(false),
- _needReScore(false)
+ _adjust(0)
{
if (_maxHitsSize > 0) {
_collector = std::make_unique<RankedHitCollector>(*this);
@@ -174,32 +172,12 @@ HitCollector::getSortedHitSequence(size_t max_hits)
return SortedHitSequence(&_hits[0], &_scoreOrder[0], num_hits);
}
-size_t
-HitCollector::reRank(DocumentScorer &scorer, std::vector<Hit> hits) {
- if (hits.empty()) { return 0; }
-
- size_t hitsToReRank = hits.size();
- Scores &initScores = _ranges.first;
- Scores &finalScores = _ranges.second;
- initScores = Scores(hits.back().second, hits.front().second);
- finalScores = Scores(std::numeric_limits<feature_t>::max(),
- -std::numeric_limits<feature_t>::max());
-
- std::sort(hits.begin(), hits.end()); // sort on docId
- for (auto &hit : hits) {
- hit.second = scorer.score(hit.first);
- finalScores.low = std::min(finalScores.low, hit.second);
- finalScores.high = std::max(finalScores.high, hit.second);
- }
- _reRankedHits = std::move(hits);
- _hasReRanked = true;
- return hitsToReRank;
-}
-
-std::pair<Scores, Scores>
-HitCollector::getRanges() const
+void
+HitCollector::setReRankedHits(std::vector<Hit> hits)
{
- return _ranges;
+ auto sort_on_docid = [](const Hit &a, const Hit &b){ return (a.first < b.first); };
+ std::sort(hits.begin(), hits.end(), sort_on_docid);
+ _reRankedHits = std::move(hits);
}
void
@@ -230,6 +208,7 @@ mergeHitsIntoResultSet(const std::vector<HitCollector::Hit> &hits, ResultSet &re
std::unique_ptr<ResultSet>
HitCollector::getResultSet(HitRank default_value)
{
+ bool needReScore = false;
Scores &initHeapScores = _ranges.first;
Scores &finalHeapScores = _ranges.second;
if (initHeapScores.low > finalHeapScores.low) {
@@ -242,7 +221,7 @@ HitCollector::getResultSet(HitRank default_value)
if (finalRange < 1.0) finalRange = 1.0f;
_scale = finalRange / initRange;
_adjust = initHeapScores.low * _scale - finalHeapScores.low;
- _needReScore = true;
+ needReScore = true;
}
// destroys the heap property or score sort order
@@ -252,7 +231,7 @@ HitCollector::getResultSet(HitRank default_value)
if ( ! _collector->isDocIdCollector() ) {
unsigned int iSize = _hits.size();
rs->allocArray(iSize);
- if (_needReScore) {
+ if (needReScore) {
for (uint32_t i = 0; i < iSize; ++i) {
rs->push_back(RankedHit(_hits[i].first, getReScore(_hits[i].second)));
}
@@ -269,7 +248,7 @@ HitCollector::getResultSet(HitRank default_value)
unsigned int jSize = _docIdVector.size();
rs->allocArray(jSize);
uint32_t i = 0;
- if (_needReScore) {
+ if (needReScore) {
for (uint32_t j = 0; j < jSize; ++j) {
uint32_t docId = _docIdVector[j];
if (i < iSize && docId == _hits[i].first) {
@@ -292,7 +271,7 @@ HitCollector::getResultSet(HitRank default_value)
}
}
- if (_hasReRanked) {
+ if (!_reRankedHits.empty()) {
mergeHitsIntoResultSet(_reRankedHits, *rs);
}
diff --git a/searchlib/src/vespa/searchlib/queryeval/hitcollector.h b/searchlib/src/vespa/searchlib/queryeval/hitcollector.h
index 97beaa0fd55..a7a6af68e1f 100644
--- a/searchlib/src/vespa/searchlib/queryeval/hitcollector.h
+++ b/searchlib/src/vespa/searchlib/queryeval/hitcollector.h
@@ -20,14 +20,6 @@ class HitCollector {
public:
using Hit = std::pair<uint32_t, feature_t>;
- /**
- * Interface used to calculate the second phase score for the documents being re-ranked.
- */
- struct DocumentScorer {
- virtual ~DocumentScorer() {}
- virtual feature_t score(uint32_t docId) = 0;
- };
-
private:
enum class SortOrder { NONE, DOC_ID, HEAP };
@@ -47,9 +39,6 @@ private:
feature_t _scale;
feature_t _adjust;
- bool _hasReRanked;
- bool _needReScore;
-
struct ScoreComparator {
bool operator() (const Hit & lhs, const Hit & rhs) const {
if (lhs.second == rhs.second) {
@@ -178,15 +167,9 @@ public:
SortedHitSequence getSortedHitSequence(size_t max_hits);
const std::vector<Hit> & getReRankedHits() const { return _reRankedHits; }
+ void setReRankedHits(std::vector<Hit> hits);
- /**
- * Re-ranks the given hits by invoking the score() method on the
- * given document scorer. The hits are sorted on doc id so that
- * score() is called in doc id order.
- **/
- size_t reRank(DocumentScorer &scorer, std::vector<Hit> hits);
-
- std::pair<Scores, Scores> getRanges() const;
+ const std::pair<Scores, Scores> &getRanges() const { return _ranges; }
void setRanges(const std::pair<Scores, Scores> &ranges);
/**
diff --git a/searchlib/src/vespa/searchlib/queryeval/scores.h b/searchlib/src/vespa/searchlib/queryeval/scores.h
index e8dae898909..08106e0c750 100644
--- a/searchlib/src/vespa/searchlib/queryeval/scores.h
+++ b/searchlib/src/vespa/searchlib/queryeval/scores.h
@@ -3,6 +3,7 @@
#pragma once
#include <vespa/searchlib/common/feature.h>
+#include <algorithm>
namespace search::queryeval {
@@ -13,6 +14,16 @@ struct Scores {
Scores(feature_t l, feature_t h) : low(l), high(h) {}
bool isValid() const { return low <= high; }
+
+ void update(feature_t score) {
+ if (isValid()) {
+ low = std::min(low, score);
+ high = std::max(high, score);
+ } else {
+ low = score;
+ high = score;
+ }
+ }
};
}
diff --git a/serviceview/pom.xml b/serviceview/pom.xml
index 1d1ad765d89..6d8ae0751ea 100644
--- a/serviceview/pom.xml
+++ b/serviceview/pom.xml
@@ -34,6 +34,11 @@
<artifactId>bundle-plugin</artifactId>
<extensions>true</extensions>
</plugin>
+ <plugin>
+ <!-- Explicit for IntelliJ to detect correct language level from parent -->
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-compiler-plugin</artifactId>
+ </plugin>
</plugins>
</build>
</project>
diff --git a/storage/src/tests/common/teststorageapp.cpp b/storage/src/tests/common/teststorageapp.cpp
index 14f1e78f5ca..1a6201d8aa3 100644
--- a/storage/src/tests/common/teststorageapp.cpp
+++ b/storage/src/tests/common/teststorageapp.cpp
@@ -143,7 +143,6 @@ TestServiceLayerApp::TestServiceLayerApp(vespalib::stringref configId)
_executor(vespalib::SequencedTaskExecutor::create(1))
{
lib::NodeState ns(*_nodeStateUpdater.getReportedNodeState());
- ns.setDiskCount(1);
_nodeStateUpdater.setReportedNodeState(ns);
}
@@ -156,7 +155,6 @@ TestServiceLayerApp::TestServiceLayerApp(NodeIndex index,
_executor(vespalib::SequencedTaskExecutor::create(1))
{
lib::NodeState ns(*_nodeStateUpdater.getReportedNodeState());
- ns.setDiskCount(1);
_nodeStateUpdater.setReportedNodeState(ns);
}
diff --git a/storage/src/tests/distributor/bucketdbupdatertest.cpp b/storage/src/tests/distributor/bucketdbupdatertest.cpp
index 4b8ea56e5ca..fa540669b4b 100644
--- a/storage/src/tests/distributor/bucketdbupdatertest.cpp
+++ b/storage/src/tests/distributor/bucketdbupdatertest.cpp
@@ -1474,14 +1474,6 @@ TEST_F(BucketDBUpdaterTest, pending_cluster_state_send_messages) {
getSentNodes("distributor:10 storage:2",
"distributor:10 .1.s:d storage:2"));
- EXPECT_EQ(getNodeList({1}),
- getSentNodes("distributor:2 storage:2",
- "distributor:2 storage:2 .1.d:3 .1.d.1.s:d"));
-
- EXPECT_EQ(getNodeList({1}),
- getSentNodes("distributor:2 storage:2 .1.s:d",
- "distributor:2 storage:2 .1.d:3 .1.d.1.s:d"));
-
EXPECT_EQ(std::string(""),
getSentNodes("distributor:2 storage:2",
"distributor:3 .2.s:i storage:2"));
@@ -1723,16 +1715,6 @@ TEST_F(BucketDBUpdaterTest, pending_cluster_state_merge) {
"",
"0:1,2,4,5|1:2,3,4,6|2:1,3,5,6"));
- // Node came up with fewer buckets (lost disk)
- EXPECT_EQ(
- std::string("4:1|2:0,1|6:1,2|1:2,0|5:2|3:2,1|"),
- mergeBucketLists(
- lib::ClusterState("distributor:1 storage:3"),
- "0:1,2,4,5|1:2,3,4,6|2:1,3,5,6",
- lib::ClusterState("distributor:1 storage:3 .0.d:3 .0.d.1.s:d"),
- "0:1,2")
- );
-
// New node came up
EXPECT_EQ(
std::string("4:0,1|2:0,1|6:1,2,3|1:0,2,3|5:2,0,3|3:2,1,3|"),
@@ -1749,27 +1731,6 @@ TEST_F(BucketDBUpdaterTest, pending_cluster_state_merge) {
"0:1,2,4,5|1:2,3,4,6|2:1,3,5,6",
"0:1,2,6,8"));
- // Node came up with no buckets
- EXPECT_EQ(
- std::string("4:1|2:1|6:1,2|1:2|5:2|3:2,1|"),
- mergeBucketLists(
- lib::ClusterState("distributor:1 storage:3"),
- "0:1,2,4,5|1:2,3,4,6|2:1,3,5,6",
- lib::ClusterState("distributor:1 storage:3 .0.d:3 .0.d.1.s:d"),
- "0:")
- );
-
- // One node lost a disk, another was just reasked (distributor
- // change)
- EXPECT_EQ(
- std::string("2:0,1|6:1,2|1:2,0|5:2|3:2,1|"),
- mergeBucketLists(
- lib::ClusterState("distributor:1 storage:3"),
- "0:1,2,4,5|1:2,3,6|2:1,3,5,6",
- lib::ClusterState("distributor:1 storage:3 .0.d:3 .0.d.1.s:d"),
- "0:1,2|1:2,3")
- );
-
// Bucket info format is "bucketid/checksum/count/size"
// Node went from initializing to up and invalid bucket went to empty.
EXPECT_EQ(
@@ -2168,14 +2129,6 @@ TEST_F(BucketDBUpdaterTest, DISABLED_cluster_config_downsize_only_sends_to_avail
EXPECT_EQ((nodeVec{0, 1, 2}), getSendSet());
}
-TEST_F(BucketDBUpdaterTest, changed_disk_set_triggers_re_fetch) {
- // Same number of online disks, but the set of disks has changed.
- EXPECT_EQ(
- getNodeList({1}),
- getSentNodes("distributor:2 storage:2 .1.d:3 .1.d.2.s:d",
- "distributor:2 storage:2 .1.d:3 .1.d.1.s:d"));
-}
-
/**
* Test scenario where a cluster is downsized by removing a subset of the nodes
* from the distribution configuration. The system must be able to deal with
diff --git a/storage/src/tests/distributor/statecheckerstest.cpp b/storage/src/tests/distributor/statecheckerstest.cpp
index 0bb01af2ae1..f66aab26dc9 100644
--- a/storage/src/tests/distributor/statecheckerstest.cpp
+++ b/storage/src/tests/distributor/statecheckerstest.cpp
@@ -518,80 +518,6 @@ StateCheckersTest::enableInconsistentJoinInConfig(bool enabled)
getConfig().configure(config);
}
-TEST_F(StateCheckersTest, allow_inconsistent_join_in_differing_sibling_ideal_state) {
- // Normally, bucket siblings have an ideal state on the same node in order
- // to enable joining these back together. However, the ideal disks assigned
- // may differ and it's sufficient for a sibling bucket's ideal disk to be
- // down on the node of its other sibling for it to be assigned a different
- // node. In this case, there's no other way to get buckets joined back
- // together than if we allow bucket replicas to get temporarily out of sync
- // by _forcing_ a join across all replicas no matter their placement.
- // This will trigger a merge to reconcile and move the new bucket copies to
- // their ideal location.
- setupDistributor(2, 3, "distributor:1 storage:3 .0.d:20 .0.d.14.s:d .2.d:20");
- document::BucketId sibling1(33, 0x000000001); // ideal disk 14 on node 0
- document::BucketId sibling2(33, 0x100000001); // ideal disk 1 on node 0
-
- // Full node sequence sorted by score for sibling(1|2) is [0, 2, 1].
- // Node 0 cannot be used, so use 1 instead.
- assertCurrentIdealState(sibling1, {2, 1});
- assertCurrentIdealState(sibling2, {0, 2});
-
- insertBucketInfo(sibling1, 2, 0x1, 2, 3);
- insertBucketInfo(sibling1, 1, 0x1, 2, 3);
- insertBucketInfo(sibling2, 0, 0x1, 2, 3);
- insertBucketInfo(sibling2, 2, 0x1, 2, 3);
-
- enableInconsistentJoinInConfig(true);
-
- EXPECT_EQ("BucketId(0x8000000000000001): "
- "[Joining buckets BucketId(0x8400000000000001) and "
- "BucketId(0x8400000100000001) because their size "
- "(6 bytes, 4 docs) is less than the configured limit "
- "of (100, 10)",
- testJoin(10, 100, 16, sibling1));
-}
-
-TEST_F(StateCheckersTest, do_not_allow_inconsistent_join_when_not_in_ideal_state) {
- setupDistributor(2, 4, "distributor:1 storage:4 .0.d:20 .0.d.14.s:d .2.d:20 .3.d:20");
- document::BucketId sibling1(33, 0x000000001);
- document::BucketId sibling2(33, 0x100000001);
-
- assertCurrentIdealState(sibling1, {3, 2});
- assertCurrentIdealState(sibling2, {3, 0});
-
- insertBucketInfo(sibling1, 3, 0x1, 2, 3);
- insertBucketInfo(sibling1, 2, 0x1, 2, 3);
- insertBucketInfo(sibling2, 3, 0x1, 2, 3);
- insertBucketInfo(sibling2, 1, 0x1, 2, 3); // not in ideal state
-
- enableInconsistentJoinInConfig(true);
-
- EXPECT_EQ("NO OPERATIONS GENERATED",
- testJoin(10, 100, 16, sibling1));
-}
-
-TEST_F(StateCheckersTest, do_not_allow_inconsistent_join_when_config_disabled) {
- setupDistributor(2, 3, "distributor:1 storage:3 .0.d:20 .0.d.14.s:d .2.d:20");
- document::BucketId sibling1(33, 0x000000001); // ideal disk 14 on node 0
- document::BucketId sibling2(33, 0x100000001); // ideal disk 1 on node 0
-
- // Full node sequence sorted by score for sibling(1|2) is [0, 2, 1].
- // Node 0 cannot be used, so use 1 instead.
- assertCurrentIdealState(sibling1, {2, 1});
- assertCurrentIdealState(sibling2, {0, 2});
-
- insertBucketInfo(sibling1, 2, 0x1, 2, 3);
- insertBucketInfo(sibling1, 1, 0x1, 2, 3);
- insertBucketInfo(sibling2, 0, 0x1, 2, 3);
- insertBucketInfo(sibling2, 2, 0x1, 2, 3);
-
- enableInconsistentJoinInConfig(false);
-
- EXPECT_EQ("NO OPERATIONS GENERATED",
- testJoin(10, 100, 16, sibling1));
-}
-
TEST_F(StateCheckersTest, no_join_when_invalid_copy_exists) {
setupDistributor(3, 10, "distributor:1 storage:3");
diff --git a/storage/src/tests/persistence/CMakeLists.txt b/storage/src/tests/persistence/CMakeLists.txt
index f922689b941..971d7b8f410 100644
--- a/storage/src/tests/persistence/CMakeLists.txt
+++ b/storage/src/tests/persistence/CMakeLists.txt
@@ -2,6 +2,7 @@
vespa_add_executable(storage_persistence_gtest_runner_app TEST
SOURCES
+ apply_bucket_diff_entry_result_test.cpp
bucketownershipnotifiertest.cpp
has_mask_remapper_test.cpp
mergehandlertest.cpp
diff --git a/storage/src/tests/persistence/apply_bucket_diff_entry_result_test.cpp b/storage/src/tests/persistence/apply_bucket_diff_entry_result_test.cpp
new file mode 100644
index 00000000000..4bca987152e
--- /dev/null
+++ b/storage/src/tests/persistence/apply_bucket_diff_entry_result_test.cpp
@@ -0,0 +1,70 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/storage/persistence/apply_bucket_diff_entry_result.h>
+#include <vespa/document/base/documentid.h>
+#include <vespa/document/bucket/bucketid.h>
+#include <vespa/document/bucket/bucketidfactory.h>
+#include <vespa/document/test/make_document_bucket.h>
+#include <vespa/persistence/spi/result.h>
+#include <gtest/gtest.h>
+
+using document::DocumentId;
+using document::test::makeDocumentBucket;
+
+namespace storage {
+
+using ResultVector = std::vector<ApplyBucketDiffEntryResult>;
+
+namespace {
+
+spi::Result spi_result_ok;
+spi::Result spi_result_fail(spi::Result::ErrorType::RESOURCE_EXHAUSTED, "write blocked");
+document::BucketIdFactory bucket_id_factory;
+const char *test_op = "put";
+metrics::DoubleAverageMetric dummy_metric("dummy", metrics::DoubleAverageMetric::Tags(), "dummy desc");
+
+ApplyBucketDiffEntryResult
+make_result(spi::Result &spi_result, const DocumentId &doc_id)
+{
+ std::promise<std::pair<std::unique_ptr<spi::Result>, double>> result_promise;
+ result_promise.set_value(std::make_pair(std::make_unique<spi::Result>(spi_result), 0.1));
+ spi::Bucket bucket(makeDocumentBucket(bucket_id_factory.getBucketId(doc_id)));
+ return ApplyBucketDiffEntryResult(result_promise.get_future(), bucket, doc_id, test_op, dummy_metric);
+}
+
+void
+check_results(ResultVector results)
+{
+ for (auto& result : results) {
+ result.wait();
+ }
+ for (auto& result : results) {
+ result.check_result();
+ }
+}
+
+}
+
+TEST(ApplyBucketDiffEntryResultTest, ok_results_can_be_checked)
+{
+ ResultVector results;
+ results.push_back(make_result(spi_result_ok, DocumentId("id::test::0")));
+ results.push_back(make_result(spi_result_ok, DocumentId("id::test::1")));
+ check_results(std::move(results));
+}
+
+TEST(ApplyBucketDiffEntryResultTest, first_failed_result_throws_exception)
+{
+ ResultVector results;
+ results.push_back(make_result(spi_result_ok, DocumentId("id::test::0")));
+ results.push_back(make_result(spi_result_fail, DocumentId("id::test::1")));
+ results.push_back(make_result(spi_result_fail, DocumentId("id::test::2")));
+ try {
+ check_results(std::move(results));
+ FAIL() << "Failed to throw exception for failed result";
+ } catch (std::exception &e) {
+ EXPECT_EQ("Failed put for id::test::1 in Bucket(0xeb4700c03842cac4): Result(5, write blocked)", std::string(e.what()));
+ }
+}
+
+}
diff --git a/storage/src/tests/persistence/common/filestortestfixture.h b/storage/src/tests/persistence/common/filestortestfixture.h
index 548dd4f3bfd..dcfeb42b4fd 100644
--- a/storage/src/tests/persistence/common/filestortestfixture.h
+++ b/storage/src/tests/persistence/common/filestortestfixture.h
@@ -28,7 +28,7 @@ public:
void SetUp() override;
void TearDown() override;
- void setupPersistenceThreads(uint32_t diskCount);
+ void setupPersistenceThreads(uint32_t threads);
void createBucket(const document::BucketId& bid);
bool bucketExistsInDb(const document::BucketId& bucket) const;
diff --git a/storage/src/tests/persistence/mergehandlertest.cpp b/storage/src/tests/persistence/mergehandlertest.cpp
index 335863322d9..81f98136575 100644
--- a/storage/src/tests/persistence/mergehandlertest.cpp
+++ b/storage/src/tests/persistence/mergehandlertest.cpp
@@ -896,7 +896,7 @@ TEST_F(MergeHandlerTest, apply_bucket_diff_spi_failures) {
ExpectedExceptionSpec exceptions[] = {
{ PersistenceProviderWrapper::FAIL_CREATE_ITERATOR, "create iterator" },
{ PersistenceProviderWrapper::FAIL_ITERATE, "iterate" },
- { PersistenceProviderWrapper::FAIL_PUT, "Failed put" },
+ { PersistenceProviderWrapper::FAIL_PUT | PersistenceProviderWrapper::FAIL_REMOVE, "Failed put" },
{ PersistenceProviderWrapper::FAIL_REMOVE, "Failed remove" },
};
diff --git a/storage/src/vespa/storage/bucketdb/btree_lockable_map.hpp b/storage/src/vespa/storage/bucketdb/btree_lockable_map.hpp
index c8597915e44..5f137029915 100644
--- a/storage/src/vespa/storage/bucketdb/btree_lockable_map.hpp
+++ b/storage/src/vespa/storage/bucketdb/btree_lockable_map.hpp
@@ -407,7 +407,7 @@ void BTreeLockableMap<T>::ReadGuardImpl::for_each(std::function<void(uint64_t, c
template <typename T>
std::unique_ptr<ConstIterator<const T&>>
BTreeLockableMap<T>::ReadGuardImpl::create_iterator() const {
- return _snapshot.template create_iterator(); // TODO test
+ return _snapshot.create_iterator(); // TODO test
}
template <typename T>
diff --git a/storage/src/vespa/storage/persistence/CMakeLists.txt b/storage/src/vespa/storage/persistence/CMakeLists.txt
index ff8d29f7f45..647d7fa1098 100644
--- a/storage/src/vespa/storage/persistence/CMakeLists.txt
+++ b/storage/src/vespa/storage/persistence/CMakeLists.txt
@@ -1,6 +1,8 @@
# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
vespa_add_library(storage_spersistence OBJECT
SOURCES
+ apply_bucket_diff_entry_complete.cpp
+ apply_bucket_diff_entry_result.cpp
asynchandler.cpp
bucketownershipnotifier.cpp
bucketprocessor.cpp
diff --git a/storage/src/vespa/storage/persistence/apply_bucket_diff_entry_complete.cpp b/storage/src/vespa/storage/persistence/apply_bucket_diff_entry_complete.cpp
new file mode 100644
index 00000000000..7f1665a92bb
--- /dev/null
+++ b/storage/src/vespa/storage/persistence/apply_bucket_diff_entry_complete.cpp
@@ -0,0 +1,34 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "apply_bucket_diff_entry_complete.h"
+#include <vespa/persistence/spi/result.h>
+#include <cassert>
+
+namespace storage {
+
+ApplyBucketDiffEntryComplete::ApplyBucketDiffEntryComplete(ResultPromise result_promise, const framework::Clock& clock)
+ : _result_handler(nullptr),
+ _result_promise(std::move(result_promise)),
+ _start_time(clock)
+{
+}
+
+ApplyBucketDiffEntryComplete::~ApplyBucketDiffEntryComplete() = default;
+
+void
+ApplyBucketDiffEntryComplete::onComplete(std::unique_ptr<spi::Result> result)
+{
+ if (_result_handler != nullptr) {
+ _result_handler->handle(*result);
+ }
+ _result_promise.set_value(std::make_pair(std::move(result), _start_time.getElapsedTimeAsDouble()));
+}
+
+void
+ApplyBucketDiffEntryComplete::addResultHandler(const spi::ResultHandler* resultHandler)
+{
+ assert(_result_handler == nullptr);
+ _result_handler = resultHandler;
+}
+
+}
diff --git a/storage/src/vespa/storage/persistence/apply_bucket_diff_entry_complete.h b/storage/src/vespa/storage/persistence/apply_bucket_diff_entry_complete.h
new file mode 100644
index 00000000000..f492727b0e6
--- /dev/null
+++ b/storage/src/vespa/storage/persistence/apply_bucket_diff_entry_complete.h
@@ -0,0 +1,28 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <vespa/persistence/spi/operationcomplete.h>
+#include <vespa/storageframework/generic/clock/timer.h>
+#include <future>
+
+namespace storage {
+
+/*
+ * Complete handler for a bucket diff entry spi operation (putAsync
+ * or removeAsync)
+ */
+class ApplyBucketDiffEntryComplete : public spi::OperationComplete
+{
+ using ResultPromise = std::promise<std::pair<std::unique_ptr<spi::Result>, double>>;
+ const spi::ResultHandler* _result_handler;
+ ResultPromise _result_promise;
+ framework::MilliSecTimer _start_time;
+public:
+ ApplyBucketDiffEntryComplete(ResultPromise result_promise, const framework::Clock& clock);
+ ~ApplyBucketDiffEntryComplete();
+ void onComplete(std::unique_ptr<spi::Result> result) override;
+ void addResultHandler(const spi::ResultHandler* resultHandler) override;
+};
+
+}
diff --git a/storage/src/vespa/storage/persistence/apply_bucket_diff_entry_result.cpp b/storage/src/vespa/storage/persistence/apply_bucket_diff_entry_result.cpp
new file mode 100644
index 00000000000..d582168fcf9
--- /dev/null
+++ b/storage/src/vespa/storage/persistence/apply_bucket_diff_entry_result.cpp
@@ -0,0 +1,46 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "apply_bucket_diff_entry_result.h"
+#include <vespa/persistence/spi/result.h>
+#include <vespa/vespalib/stllike/asciistream.h>
+#include <cassert>
+
+namespace storage {
+
+ApplyBucketDiffEntryResult::ApplyBucketDiffEntryResult(FutureResult future_result, spi::Bucket bucket, document::DocumentId doc_id, const char *op, metrics::DoubleAverageMetric& latency_metric)
+ : _future_result(std::move(future_result)),
+ _bucket(bucket),
+ _doc_id(std::move(doc_id)),
+ _op(op),
+ _latency_metric(latency_metric)
+{
+}
+
+ApplyBucketDiffEntryResult::ApplyBucketDiffEntryResult(ApplyBucketDiffEntryResult &&rhs) = default;
+
+ApplyBucketDiffEntryResult::~ApplyBucketDiffEntryResult() = default;
+
+void
+ApplyBucketDiffEntryResult::wait()
+{
+ assert(_future_result.valid());
+ _future_result.wait();
+}
+
+void
+ApplyBucketDiffEntryResult::check_result()
+{
+ assert(_future_result.valid());
+ auto result = _future_result.get();
+ if (result.first->hasError()) {
+ vespalib::asciistream ss;
+ ss << "Failed " << _op
+ << " for " << _doc_id.toString()
+ << " in " << _bucket
+ << ": " << result.first->toString();
+ throw std::runtime_error(ss.str());
+ }
+ _latency_metric.addValue(result.second);
+}
+
+}
diff --git a/storage/src/vespa/storage/persistence/apply_bucket_diff_entry_result.h b/storage/src/vespa/storage/persistence/apply_bucket_diff_entry_result.h
new file mode 100644
index 00000000000..f7653cd35a5
--- /dev/null
+++ b/storage/src/vespa/storage/persistence/apply_bucket_diff_entry_result.h
@@ -0,0 +1,33 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <vespa/document/base/documentid.h>
+#include <vespa/persistence/spi/bucket.h>
+#include <vespa/metrics/valuemetric.h>
+#include <future>
+
+namespace storage::spi { class Result; }
+
+namespace storage {
+
+/*
+ * Result of a bucket diff entry spi operation (putAsync or removeAsync)
+ */
+class ApplyBucketDiffEntryResult {
+ using FutureResult = std::future<std::pair<std::unique_ptr<spi::Result>, double>>;
+ FutureResult _future_result;
+ spi::Bucket _bucket;
+ document::DocumentId _doc_id;
+ const char* _op;
+ metrics::DoubleAverageMetric& _latency_metric;
+
+public:
+ ApplyBucketDiffEntryResult(FutureResult future_result, spi::Bucket bucket, document::DocumentId doc_id, const char *op, metrics::DoubleAverageMetric& latency_metric);
+ ApplyBucketDiffEntryResult(ApplyBucketDiffEntryResult &&rhs);
+ ~ApplyBucketDiffEntryResult();
+ void wait();
+ void check_result();
+};
+
+}
diff --git a/storage/src/vespa/storage/persistence/mergehandler.cpp b/storage/src/vespa/storage/persistence/mergehandler.cpp
index 51b575548d8..cab35e77bac 100644
--- a/storage/src/vespa/storage/persistence/mergehandler.cpp
+++ b/storage/src/vespa/storage/persistence/mergehandler.cpp
@@ -2,12 +2,15 @@
#include "mergehandler.h"
#include "persistenceutil.h"
+#include "apply_bucket_diff_entry_complete.h"
+#include "apply_bucket_diff_entry_result.h"
#include <vespa/vespalib/stllike/asciistream.h>
#include <vespa/vdslib/distribution/distribution.h>
#include <vespa/document/fieldset/fieldsets.h>
#include <vespa/vespalib/objects/nbostream.h>
#include <vespa/vespalib/util/exceptions.h>
#include <algorithm>
+#include <future>
#include <vespa/log/log.h>
LOG_SETUP(".persistence.mergehandler");
@@ -38,22 +41,6 @@ constexpr int getDeleteFlag() {
* Throws std::runtime_error if result has an error.
*/
void
-checkResult(const spi::Result& result,
- const spi::Bucket& bucket,
- const document::DocumentId& docId,
- const char* op)
-{
- if (result.hasError()) {
- vespalib::asciistream ss;
- ss << "Failed " << op
- << " for " << docId.toString()
- << " in " << bucket
- << ": " << result.toString();
- throw std::runtime_error(ss.str());
- }
-}
-
-void
checkResult(const spi::Result& result, const spi::Bucket& bucket, const char* op)
{
if (result.hasError()) {
@@ -493,25 +480,27 @@ MergeHandler::deserializeDiffDocument(
return doc;
}
-void
+ApplyBucketDiffEntryResult
MergeHandler::applyDiffEntry(const spi::Bucket& bucket,
const api::ApplyBucketDiffCommand::Entry& e,
spi::Context& context,
const document::DocumentTypeRepo& repo) const
{
+ std::promise<std::pair<std::unique_ptr<spi::Result>, double>> result_promise;
+ auto future_result = result_promise.get_future();
spi::Timestamp timestamp(e._entry._timestamp);
if (!(e._entry._flags & (DELETED | DELETED_IN_PLACE))) {
// Regular put entry
Document::SP doc(deserializeDiffDocument(e, repo));
DocumentId docId = doc->getId();
- framework::MilliSecTimer start_time(_clock);
- checkResult(_spi.put(bucket, timestamp, std::move(doc), context), bucket, docId, "put");
- _env._metrics.merge_handler_metrics.put_latency.addValue(start_time.getElapsedTimeAsDouble());
+ auto complete = std::make_unique<ApplyBucketDiffEntryComplete>(std::move(result_promise), _clock);
+ _spi.putAsync(bucket, timestamp, std::move(doc), context, std::move(complete));
+ return ApplyBucketDiffEntryResult(std::move(future_result), bucket, std::move(docId), "put", _env._metrics.merge_handler_metrics.put_latency);
} else {
DocumentId docId(e._docName);
- framework::MilliSecTimer start_time(_clock);
- checkResult(_spi.remove(bucket, timestamp, docId, context), bucket, docId, "remove");
- _env._metrics.merge_handler_metrics.remove_latency.addValue(start_time.getElapsedTimeAsDouble());
+ auto complete = std::make_unique<ApplyBucketDiffEntryComplete>(std::move(result_promise), _clock);
+ _spi.removeAsync(bucket, timestamp, docId, context, std::move(complete));
+ return ApplyBucketDiffEntryResult(std::move(future_result), bucket, std::move(docId), "remove", _env._metrics.merge_handler_metrics.remove_latency);
}
}
@@ -534,6 +523,7 @@ MergeHandler::applyDiffLocally(
uint32_t byteCount = 0;
uint32_t addedCount = 0;
uint32_t notNeededByteCount = 0;
+ std::vector<ApplyBucketDiffEntryResult> async_results;
std::vector<spi::DocEntry::UP> entries;
populateMetaData(bucket, MAX_TIMESTAMP, entries, context);
@@ -572,7 +562,7 @@ MergeHandler::applyDiffLocally(
++i;
LOG(spam, "ApplyBucketDiff(%s): Adding slot %s",
bucket.toString().c_str(), e.toString().c_str());
- applyDiffEntry(bucket, e, context, repo);
+ async_results.push_back(applyDiffEntry(bucket, e, context, repo));
} else {
assert(spi::Timestamp(e._entry._timestamp) == existing.getTimestamp());
// Diffing for existing timestamp; should either both be put
@@ -585,7 +575,7 @@ MergeHandler::applyDiffLocally(
"timestamp in %s. Diff slot: %s. Existing slot: %s",
bucket.toString().c_str(), e.toString().c_str(),
existing.toString().c_str());
- applyDiffEntry(bucket, e, context, repo);
+ async_results.push_back(applyDiffEntry(bucket, e, context, repo));
} else {
// Duplicate put, just ignore it.
LOG(debug, "During diff apply, attempting to add slot "
@@ -617,9 +607,15 @@ MergeHandler::applyDiffLocally(
LOG(spam, "ApplyBucketDiff(%s): Adding slot %s",
bucket.toString().c_str(), e.toString().c_str());
- applyDiffEntry(bucket, e, context, repo);
+ async_results.push_back(applyDiffEntry(bucket, e, context, repo));
byteCount += e._headerBlob.size() + e._bodyBlob.size();
}
+ for (auto &result_to_check : async_results) {
+ result_to_check.wait();
+ }
+ for (auto &result_to_check : async_results) {
+ result_to_check.check_result();
+ }
if (byteCount + notNeededByteCount != 0) {
_env._metrics.merge_handler_metrics.mergeAverageDataReceivedNeeded.addValue(
diff --git a/storage/src/vespa/storage/persistence/mergehandler.h b/storage/src/vespa/storage/persistence/mergehandler.h
index 5e65e1a39ec..64b1448577a 100644
--- a/storage/src/vespa/storage/persistence/mergehandler.h
+++ b/storage/src/vespa/storage/persistence/mergehandler.h
@@ -24,6 +24,7 @@ namespace storage {
namespace spi { struct PersistenceProvider; }
class PersistenceUtil;
+class ApplyBucketDiffEntryResult;
class MergeHandler : public Types {
@@ -82,10 +83,10 @@ private:
* Invoke either put, remove or unrevertable remove on the SPI
* depending on the flags in the diff entry.
*/
- void applyDiffEntry(const spi::Bucket&,
- const api::ApplyBucketDiffCommand::Entry&,
- spi::Context& context,
- const document::DocumentTypeRepo& repo) const;
+ ApplyBucketDiffEntryResult applyDiffEntry(const spi::Bucket&,
+ const api::ApplyBucketDiffCommand::Entry&,
+ spi::Context& context,
+ const document::DocumentTypeRepo& repo) const;
/**
* Fill entries-vector with metadata for bucket up to maxTimestamp,
diff --git a/storage/src/vespa/storage/storageserver/communicationmanager.cpp b/storage/src/vespa/storage/storageserver/communicationmanager.cpp
index 6ad2c960896..a35b9d1d59a 100644
--- a/storage/src/vespa/storage/storageserver/communicationmanager.cpp
+++ b/storage/src/vespa/storage/storageserver/communicationmanager.cpp
@@ -618,15 +618,13 @@ CommunicationManager::sendCommand(
}
void
-CommunicationManager::serializeNodeState(const api::GetNodeStateReply& gns, std::ostream& os,
- bool includeDescription, bool includeDiskDescription, bool useOldFormat) const
+CommunicationManager::serializeNodeState(const api::GetNodeStateReply& gns, std::ostream& os, bool includeDescription) const
{
vespalib::asciistream tmp;
if (gns.hasNodeState()) {
- gns.getNodeState().serialize(tmp, "", includeDescription, includeDiskDescription, useOldFormat);
+ gns.getNodeState().serialize(tmp, "", includeDescription);
} else {
- _component.getStateUpdater().getReportedNodeState()->serialize(tmp, "", includeDescription,
- includeDiskDescription, useOldFormat);
+ _component.getStateUpdater().getReportedNodeState()->serialize(tmp, "", includeDescription);
}
os << tmp.str();
}
@@ -643,14 +641,14 @@ CommunicationManager::sendDirectRPCReply(
} else if (requestName == "getnodestate3") {
auto& gns(dynamic_cast<api::GetNodeStateReply&>(*reply));
std::ostringstream ns;
- serializeNodeState(gns, ns, true, true, false);
+ serializeNodeState(gns, ns, true);
request.addReturnString(ns.str().c_str());
request.addReturnString(gns.getNodeInfo().c_str());
LOGBP(debug, "Sending getnodestate3 reply with host info '%s'.", gns.getNodeInfo().c_str());
} else if (requestName == "getnodestate2") {
auto& gns(dynamic_cast<api::GetNodeStateReply&>(*reply));
std::ostringstream ns;
- serializeNodeState(gns, ns, true, true, false);
+ serializeNodeState(gns, ns, true);
request.addReturnString(ns.str().c_str());
LOGBP(debug, "Sending getnodestate2 reply with no host info.");
} else if (requestName == "setsystemstate2" || requestName == "setdistributionstates") {
@@ -667,7 +665,7 @@ CommunicationManager::sendDirectRPCReply(
if (reply->getType() == api::MessageType::GETNODESTATE_REPLY) {
api::GetNodeStateReply& gns(static_cast<api::GetNodeStateReply&>(*reply));
std::ostringstream ns;
- serializeNodeState(gns, ns, false, false, true);
+ serializeNodeState(gns, ns, false);
request.addReturnString(ns.str().c_str());
request.addReturnInt(static_cast<int>(gns.getNodeState().getInitProgress().getValue() * 100));
}
diff --git a/storage/src/vespa/storage/storageserver/communicationmanager.h b/storage/src/vespa/storage/storageserver/communicationmanager.h
index b10b12f404e..7227f1d7e5b 100644
--- a/storage/src/vespa/storage/storageserver/communicationmanager.h
+++ b/storage/src/vespa/storage/storageserver/communicationmanager.h
@@ -103,8 +103,7 @@ private:
void fail_with_unresolvable_bucket_space(std::unique_ptr<documentapi::DocumentMessage> msg,
const vespalib::string& error_message);
- void serializeNodeState(const api::GetNodeStateReply& gns, std::ostream& os, bool includeDescription,
- bool includeDiskDescription, bool useOldFormat) const;
+ void serializeNodeState(const api::GetNodeStateReply& gns, std::ostream& os, bool includeDescription) const;
static const uint64_t FORWARDED_MESSAGE = 0;
diff --git a/storage/src/vespa/storage/storageserver/servicelayernode.cpp b/storage/src/vespa/storage/storageserver/servicelayernode.cpp
index 1c8fdd4178d..ce487a38840 100644
--- a/storage/src/vespa/storage/storageserver/servicelayernode.cpp
+++ b/storage/src/vespa/storage/storageserver/servicelayernode.cpp
@@ -105,21 +105,10 @@ ServiceLayerNode::initializeNodeSpecific()
// node state.
NodeStateUpdater::Lock::SP lock(_component->getStateUpdater().grabStateChangeLock());
lib::NodeState ns(*_component->getStateUpdater().getReportedNodeState());
- ns.setDiskCount(1u);
ns.setCapacity(_serverConfig->nodeCapacity);
ns.setReliability(_serverConfig->nodeReliability);
- for (uint16_t i=0; i<_serverConfig->diskCapacity.size(); ++i) {
- if (i >= ns.getDiskCount()) {
- LOG(warning, "Capacity configured for partition %zu but only %u partitions found.",
- _serverConfig->diskCapacity.size(), ns.getDiskCount());
- continue;
- }
- lib::DiskState ds(ns.getDiskState(i));
- ds.setCapacity(_serverConfig->diskCapacity[i]);
- ns.setDiskState(i, ds);
- }
- LOG(debug, "Adjusting reported node state to include partition count and states, capacity and reliability: %s",
+ LOG(debug, "Adjusting reported node state to include capacity and reliability: %s",
ns.toString().c_str());
_component->getStateUpdater().setReportedNodeState(ns);
}
@@ -147,18 +136,6 @@ ServiceLayerNode::handleLiveConfigUpdate(const InitialGuard & initGuard)
ASSIGN(nodeCapacity);
ns.setCapacity(newC.nodeCapacity);
}
- if (DIFFER(diskCapacity)) {
- for (uint32_t i=0; i<newC.diskCapacity.size() && i<ns.getDiskCount(); ++i) {
- if (newC.diskCapacity[i] != oldC.diskCapacity[i]) {
- lib::DiskState ds(ns.getDiskState(i));
- ds.setCapacity(newC.diskCapacity[i]);
- ns.setDiskState(i, ds);
- LOG(info, "Live config update: Disk capacity of disk %u changed from %f to %f.",
- i, oldC.diskCapacity[i], newC.diskCapacity[i]);
- }
- }
- ASSIGN(diskCapacity);
- }
if (DIFFER(nodeReliability)) {
LOG(info, "Live config update: Node reliability changed from %u to %u.",
oldC.nodeReliability, newC.nodeReliability);
diff --git a/storage/src/vespa/storage/storageserver/statemanager.cpp b/storage/src/vespa/storage/storageserver/statemanager.cpp
index a041ab0cfff..653822626ed 100644
--- a/storage/src/vespa/storage/storageserver/statemanager.cpp
+++ b/storage/src/vespa/storage/storageserver/statemanager.cpp
@@ -253,15 +253,6 @@ StateManager::setReportedNodeState(const lib::NodeState& state)
"grabbed external lock");
assert(false);
}
- if (_nodeState->getDiskCount() != 0 &&
- state.getDiskCount() != _nodeState->getDiskCount())
- {
- std::ostringstream ost;
- ost << "Illegal to alter disk count after initialization. Tried to "
- << "alter disk count from " << _nodeState->getDiskCount()
- << " to " << state.getDiskCount();
- throw vespalib::IllegalArgumentException(ost.str(), VESPA_STRLOC);
- }
LOG(debug, "Adjusting reported node state to %s -> %s",
_nodeState->toString().c_str(), state.toString().c_str());
_nextNodeState = std::make_shared<lib::NodeState>(state);
diff --git a/storage/src/vespa/storage/storageserver/storagenode.cpp b/storage/src/vespa/storage/storageserver/storagenode.cpp
index cb9d5730fa8..afe21733a10 100644
--- a/storage/src/vespa/storage/storageserver/storagenode.cpp
+++ b/storage/src/vespa/storage/storageserver/storagenode.cpp
@@ -60,18 +60,6 @@ namespace {
}
}
-
-bool
-allDisksDown(const lib::NodeState &nodeState)
-{
- for (uint32_t i = 0; i < nodeState.getDiskCount(); ++i) {
- if (nodeState.getDiskState(i).getState() != lib::State::DOWN)
- return false;
- }
- return true;
-}
-
-
} // End of anonymous namespace
StorageNode::StorageNode(
@@ -599,28 +587,6 @@ StorageNode::requestShutdown(vespalib::stringref reason)
_attemptedStopped = true;
}
-void
-StorageNode::notifyPartitionDown(int partId, vespalib::stringref reason)
-{
- if (!_component)
- return;
- NodeStateUpdater::Lock::SP lock(_component->getStateUpdater().grabStateChangeLock());
- lib::NodeState nodeState(*_component->getStateUpdater().getReportedNodeState());
- if (partId >= nodeState.getDiskCount())
- return;
- lib::DiskState diskState(nodeState.getDiskState(partId));
- if (diskState.getState() == lib::State::DOWN)
- return;
- diskState.setState(lib::State::DOWN);
- diskState.setDescription(reason);
- nodeState.setDiskState(partId, diskState);
- if (allDisksDown(nodeState)) {
- nodeState.setState(lib::State::DOWN);
- nodeState.setDescription("All partitions are down");
- }
- _component->getStateUpdater().setReportedNodeState(nodeState);
-}
-
std::unique_ptr<StateManager>
StorageNode::releaseStateManager() {
return std::move(_stateManager);
diff --git a/storage/src/vespa/storage/storageserver/storagenode.h b/storage/src/vespa/storage/storageserver/storagenode.h
index a0997c4bacd..9450cdce5ff 100644
--- a/storage/src/vespa/storage/storageserver/storagenode.h
+++ b/storage/src/vespa/storage/storageserver/storagenode.h
@@ -91,7 +91,6 @@ public:
*/
virtual ResumeGuard pause() = 0;
void requestShutdown(vespalib::stringref reason) override;
- void notifyPartitionDown(int partId, vespalib::stringref reason);
DoneInitializeHandler& getDoneInitializeHandler() { return *this; }
// For testing
diff --git a/vdslib/src/tests/distribution/distributiontest.cpp b/vdslib/src/tests/distribution/distributiontest.cpp
index 5a337433bdb..d9ebba39916 100644
--- a/vdslib/src/tests/distribution/distributiontest.cpp
+++ b/vdslib/src/tests/distribution/distributiontest.cpp
@@ -280,30 +280,6 @@ struct MyTest {
}
return result;
}
- std::vector<uint16_t> getDiskCounts(uint16_t node) const {
- std::vector<uint16_t> result(3, 0);
- for (uint32_t i=0; i<_bucketsToTest; ++i) {
- document::BucketId bucket(16, i);
- std::vector<uint16_t> nodes;
- ClusterState clusterState(_state);
- _distribution->getIdealNodes(
- *_nodeType, clusterState, bucket, nodes,
- _upStates, _redundancy);
- for (uint32_t j=0; j<nodes.size(); ++j) {
- if (nodes[j] == node) {
- const NodeState& nodeState(clusterState.getNodeState(
- Node(NodeType::STORAGE, node)));
- // If disk was down, bucket should not map to this
- // node at all
- uint16_t disk = _distribution->getIdealDisk(
- nodeState, node, bucket,
- Distribution::IDEAL_DISK_EVEN_IF_DOWN);
- ++result[disk];
- }
- }
- }
- return result;
- }
};
MyTest::MyTest()
@@ -350,13 +326,6 @@ std::vector<uint16_t> createNodeCountList(const std::string& source,
EXPECT_EQ(exp123, cnt123); \
}
-#define ASSERT_BUCKET_DISK_COUNTS(node, test, result) \
-{ \
- std::vector<uint16_t> cnt123(test.getDiskCounts(node)); \
- std::vector<uint16_t> exp123(createNodeCountList(result, cnt123)); \
- EXPECT_EQ(exp123, cnt123); \
-}
-
TEST(DistributionTest, test_down)
{
ASSERT_BUCKET_NODE_COUNTS(
@@ -370,14 +339,6 @@ TEST(DistributionTest, test_down)
"0:+ 1:+ 2:+ 3:+ 8:+ 9:+");
}
-TEST(DistributionTest, testDiskDown)
-{
- ASSERT_BUCKET_DISK_COUNTS(
- 2,
- MyTest().state("storage:10 .2.d:3 .2.d.0:d"),
- "1:+ 2:+");
-}
-
TEST(DistributionTest, test_serialize_deserialize)
{
MyTest t1;
@@ -386,14 +347,6 @@ TEST(DistributionTest, test_serialize_deserialize)
EXPECT_EQ(t1.getNodeCounts(), t2.getNodeCounts());
}
-TEST(DistributionTest, test_disk_down_maintenance)
-{
- ASSERT_BUCKET_DISK_COUNTS(
- 2,
- MyTest().state("storage:10 .2.s:m .2.d:3 .2.d.0:d").upStates("um"),
- "1:+ 2:+");
-}
-
TEST(DistributionTest, test_initializing)
{
ASSERT_BUCKET_NODE_COUNTS(
@@ -448,132 +401,6 @@ TEST(DistributionTest, testHighSplitBit)
EXPECT_EQ(ost1.str(), ost2.str());
}
-TEST(DistributionTest, test_disk_capacity_weights)
-{
- uint16_t num_disks = 10;
- std::vector<double> capacities(num_disks);
-
- RandomGen rg(13);
- std::ostringstream ost;
- ost << "d:" << num_disks;
- for (unsigned i = 0; i < num_disks; ++i) {
- capacities[i] = rg.nextDouble();
- ost << " d." << i << ".c:" << capacities[i];
- }
-
- NodeState nodeState(ost.str(), &NodeType::STORAGE);
-
- Distribution distr(Distribution::getDefaultDistributionConfig(2, 3));
-
- for(int j=0; j < 10; ++j) {
- std::vector<float> diskDist(num_disks);
- for(int i=0; i < 1000; ++i) {
- document::BucketId id(16, i);
- int index = distr.getPreferredAvailableDisk(nodeState, j, id);
- diskDist[index]+=1;
- }
-
- //normalization
- for (unsigned i = 0; i < num_disks; ++i) {
- diskDist[i] /= capacities[i];
- }
-
- std::sort(diskDist.begin(), diskDist.end());
-
- double avg=0.0;
- for (unsigned i = 0; i < num_disks; ++i) {
- avg+=diskDist[i];
- }
- avg /= num_disks;
-
- double skew = (diskDist[num_disks-1]-avg)/(diskDist[num_disks-1]);
-
- EXPECT_LT(skew, 0.3);
- }
-}
-
-TEST(DistributionTest, test_disk_skew_local)
-{
- Distribution distr(Distribution::getDefaultDistributionConfig(2, 3, Distribution::MODULO_INDEX));
- std::vector<float> diskDist(100);
- NodeState nodeState;
- nodeState.setDiskCount(100);
- for(int i=0; i < 65536; i++) {
- document::BucketId id(16, i);
- int index = distr.getPreferredAvailableDisk(nodeState, 7, id);
- diskDist[index]+=1;
- }
-
- std::sort(diskDist.begin(), diskDist.end());
-
- EXPECT_LT((diskDist[99]-diskDist[0])/(diskDist[99]), 0.05);
-}
-
-TEST(DistributionTest, test_disk_skew_global)
-{
- uint16_t num_disks = 10;
- uint16_t num_nodes = 10;
- Distribution distr(Distribution::getDefaultDistributionConfig(2, num_nodes, Distribution::MODULO_INDEX));
- std::vector<std::vector<float> > diskDist(num_nodes, std::vector<float>(num_disks));
- NodeState nodeState;
- nodeState.setDiskCount(num_disks);
- for(uint16_t idx=0; idx < num_nodes; idx++) {
- for(int i=0; i < 1000; i++) {
- document::BucketId id(16, i);
- int diskIndex = distr.getPreferredAvailableDisk(nodeState, idx, id);
- diskDist[idx][diskIndex]+=1;
- }
- }
-
- std::vector<float> diskDist2;
- for(uint16_t idx=0; idx < num_nodes; idx++) {
- for(uint16_t d=0; d < num_disks; d++) {
- diskDist2.push_back(diskDist[idx][d]);
- }
- }
-
- std::sort(diskDist2.begin(), diskDist2.end());
-
- double skew = (diskDist2[num_nodes*num_disks-1]-diskDist2[0])/(diskDist2[num_nodes*num_disks-1]);
-
- EXPECT_LT(skew, 0.2);
-}
-
-TEST(DistributionTest, test_disk_intersection)
-{
- uint16_t num_disks = 8;
- uint16_t num_nodes = 20;
- float max = 0;
- Distribution distr(Distribution::getDefaultDistributionConfig(2, num_nodes, Distribution::MODULO_INDEX));
-
- NodeState nodeState;
- nodeState.setDiskCount(num_disks);
-
- for(uint16_t i=0; i < num_nodes-1; i++) {
- for(uint16_t j=i+1; j < num_nodes; j++) {
- uint64_t count =0;
-//std::cerr << "Comparing node " << i << " and node " << j << ":\n";
- for(int b=0; b < 1000; b++) {
- document::BucketId id(16, b);
- int idxI = distr.getPreferredAvailableDisk(nodeState, i, id);
- int idxJ = distr.getPreferredAvailableDisk(nodeState, j, id);
-//if (b < 50) std::cerr << " " << b << ": " << idxI << ", " << idxJ << "\n";
- if(idxI == idxJ){
- count++;
- }
- }
- if(count > max){
- max = count;
- }
- }
- }
- if (max / 1000 > 0.5) {
- std::ostringstream ost;
- ost << "Value of " << max << " / " << 1000 << " is more than 0.5";
- FAIL() << ost.str();
- }
-}
-
TEST(DistributionTest, test_distribution)
{
const int min_buckets = 1024*64;
diff --git a/vdslib/src/tests/state/clusterstatetest.cpp b/vdslib/src/tests/state/clusterstatetest.cpp
index 143f3aed0e9..1880683232d 100644
--- a/vdslib/src/tests/state/clusterstatetest.cpp
+++ b/vdslib/src/tests/state/clusterstatetest.cpp
@@ -78,13 +78,6 @@ TEST(ClusterStateTest, test_basic_functionality)
VERIFYNEW("storage:10 .1.s:i .2.s:u .3.s:d .4.s:m .5.s:r",
"storage:10 .1.s:i .1.i:0 .3.s:d .4.s:m .5.s:r");
- // Test legal disk states
- VERIFYNEW("storage:10 .1.d:4 .1.d.0.s:u .1.d.1.s:d",
- "storage:10 .1.d:4 .1.d.1.s:d");
-
- // Test other disk properties
- VERIFYSAMENEW("storage:10 .1.d:4 .1.d.0.c:1.4");
-
// Test other distributor node propertise
// (Messages is excluded from system states to not make them too long as
// most nodes have no use for them)
@@ -143,20 +136,10 @@ TEST(ClusterStateTest, test_error_behaviour)
// VERIFY_FAIL("distributor:4 .2.s:r",
// "Retired is not a legal distributor state");
- // Test illegal storage states
- VERIFY_FAIL("storage:4 .2.d:2 .2.d.5.s:d", "Cannot index disk 5 of 2");
-
// Test blatantly illegal values for known attributes:
VERIFY_FAIL("distributor:4 .2.s:z", "Unknown state z given.*");
VERIFY_FAIL("distributor:4 .2.i:foobar",
".*Init progress must be a floating point number from .*");
- VERIFY_FAIL("storage:4 .2.d:foobar", "Invalid disk count 'foobar'. Need.*");
- VERIFY_FAIL("storage:4 .2.d:2 .2.d.1.s:foobar",
- "Unknown state foobar given.*");
- VERIFY_FAIL("storage:4 .2.d:2 .2.d.1.c:foobar",
- "Illegal disk capacity 'foobar'. Capacity must be a .*");
- VERIFY_FAIL("storage:4 .2.d:2 .2.d.a.s:d",
- "Invalid disk index 'a'. Need a positive integer .*");
// Lacking absolute path first
VERIFY_FAIL(".2.s:d distributor:4", "The first path in system state.*");
@@ -168,34 +151,7 @@ TEST(ClusterStateTest, test_error_behaviour)
VERIFYNEW("distributor:4 .2:foo storage:5 .4:d", "distributor:4 storage:5");
VERIFYNEW("ballalaika:true distributor:4 .2.urk:oj .2.z:foo .2.s:s "
".2.j:foo storage:10 .3.d:4 .3.d.2.a:boo .3.s:s",
- "distributor:4 .2.s:s storage:10 .3.s:s .3.d:4");
-}
-
-TEST(ClusterStateTest, test_backwards_compability)
-{
- // 4.1 and older nodes do not support some features, and the java parser
- // do not allow unknown elements as it was supposed to do, thus we should
- // avoid using new features when talking to 4.1 nodes.
-
- // - 4.1 nodes should not see new cluster, version, initializing and
- // description tags.
- VERIFYOLD("version:4 cluster:i storage:2 .0.s:i .0.i:0.5 .1.m:foobar",
- "distributor:0 storage:2 .0.s:i");
-
- // - 4.1 nodes have only one disk property being state, so in 4.1, a
- // disk state is typically set as .4.d.2:d while in new format it
- // specifies that this is the state .4.d.2.s:d
- VERIFYSAMEOLD("distributor:0 storage:3 .2.d:10 .2.d.4:d");
- VERIFYOLD("distributor:0 storage:3 .2.d:10 .2.d.4.s:d",
- "distributor:0 storage:3 .2.d:10 .2.d.4:d");
-
- // - 4.1 nodes should always have distributor and storage tags with counts.
- VERIFYOLD("storage:4", "distributor:0 storage:4");
- VERIFYOLD("distributor:4", "distributor:4 storage:0");
-
- // - 4.1 nodes should not see the state stopping
- VERIFYOLD("storage:4 .2.s:s", "distributor:0 storage:4 .2.s:d");
-
+ "distributor:4 .2.s:s storage:10 .3.s:s");
}
TEST(ClusterStateTest, test_detailed)
@@ -249,41 +205,7 @@ TEST(ClusterStateTest, test_detailed)
} else {
EXPECT_EQ(State::UP, ns.getState());
}
- // Test disk states
- if (i == 2) {
- EXPECT_EQ(uint16_t(16), ns.getDiskCount());
- } else if (i == 8) {
- EXPECT_EQ(uint16_t(10), ns.getDiskCount());
- } else {
- EXPECT_EQ(uint16_t(0), ns.getDiskCount());
- }
- if (i == 2) {
- for (uint16_t j = 0; j < 16; ++j) {
- if (j == 3) {
- EXPECT_EQ(State::DOWN,
- ns.getDiskState(j).getState());
- } else {
- EXPECT_EQ(State::UP,
- ns.getDiskState(j).getState());
- }
- }
- } else if (i == 8) {
- for (uint16_t j = 0; j < 10; ++j) {
- if (j == 4) {
- EXPECT_DOUBLE_EQ(0.6, ns.getDiskState(j).getCapacity().getValue());
- EXPECT_EQ(
- string("small"),
- ns.getDiskState(j).getDescription());
- } else {
- EXPECT_DOUBLE_EQ(
- 1.0, ns.getDiskState(j).getCapacity().getValue());
- EXPECT_EQ(
- string(""),
- ns.getDiskState(j).getDescription());
- }
- }
- }
- // Test message
+ // Test message
if (i == 6) {
EXPECT_EQ(string("bar\tfoo"), ns.getDescription());
} else {
diff --git a/vdslib/src/vespa/vdslib/distribution/distribution.cpp b/vdslib/src/vespa/vdslib/distribution/distribution.cpp
index 474ed63e8c3..c9e1fd1bef6 100644
--- a/vdslib/src/vespa/vdslib/distribution/distribution.cpp
+++ b/vdslib/src/vespa/vdslib/distribution/distribution.cpp
@@ -237,48 +237,6 @@ Distribution::getStorageSeed(
return seed;
}
-uint32_t
-Distribution::getDiskSeed(const document::BucketId& bucket, uint16_t nodeIndex) const
-{
- switch (_diskDistribution) {
- case DiskDistribution::MODULO:
- {
- uint32_t seed(static_cast<uint32_t>(bucket.getRawId())
- & _distributionBitMasks[16]);
- return 0xdeadbeef ^ seed;
- }
- case DiskDistribution::MODULO_INDEX:
- {
- uint32_t seed(static_cast<uint32_t>(bucket.getRawId())
- & _distributionBitMasks[16]);
- return 0xdeadbeef ^ seed ^ nodeIndex;
- }
- case DiskDistribution::MODULO_KNUTH:
- {
- uint32_t seed(static_cast<uint32_t>(bucket.getRawId())
- & _distributionBitMasks[16]);
- return 0xdeadbeef ^ seed ^ (1664525L * nodeIndex + 1013904223L);
- }
- case DiskDistribution::MODULO_BID:
- {
- uint64_t currentid = bucket.withoutCountBits();
- char ordered[8];
- ordered[0] = currentid >> (0*8);
- ordered[1] = currentid >> (1*8);
- ordered[2] = currentid >> (2*8);
- ordered[3] = currentid >> (3*8);
- ordered[4] = currentid >> (4*8);
- ordered[5] = currentid >> (5*8);
- ordered[6] = currentid >> (6*8);
- ordered[7] = currentid >> (7*8);
- uint32_t initval = (1664525 * nodeIndex + 0xdeadbeef);
- return vespalib::BobHash::hash(ordered, 8, initval);
- }
- }
- throw vespalib::IllegalStateException("Unknown disk distribution: "
- + getDiskDistributionName(_diskDistribution), VESPA_STRLOC);
-}
-
vespalib::string Distribution::getDiskDistributionName(DiskDistribution dist) {
return DistributionConfig::getDiskDistributionName(toConfig(dist));
@@ -294,57 +252,6 @@ Distribution::print(std::ostream& out, bool, const std::string&) const {
out << serialize();
}
-// This function should only depend on disk distribution and node index. It is
-// assumed that any other change, for instance in hierarchical grouping, does
-// not change disk index on disk.
-uint16_t
-Distribution::getIdealDisk(const NodeState& nodeState, uint16_t nodeIndex,
- const document::BucketId& bucket,
- DISK_MODE flag) const
-{
- // Catch special cases in a single if statement
- if (nodeState.getDiskCount() < 2) {
- if (nodeState.getDiskCount() == 1) return 0;
- throw vespalib::IllegalArgumentException(
- "Cannot pick ideal disk without knowing disk count.",
- VESPA_STRLOC);
- }
- RandomGen randomizer(getDiskSeed(bucket, nodeIndex));
- switch (_diskDistribution) {
- case DiskDistribution::MODULO_BID:
- {
- double maxScore = 0.0;
- uint16_t idealDisk = 0xffff;
- for (uint32_t i=0, n=nodeState.getDiskCount(); i<n; ++i) {
- double score = randomizer.nextDouble();
- const DiskState& diskState(nodeState.getDiskState(i));
- if (flag == BEST_AVAILABLE_DISK
- && !diskState.getState().oneOf("uis"))
- {
- continue;
- }
- if (diskState.getCapacity() != 1.0) {
- score = std::pow(score,
- 1.0 / diskState.getCapacity().getValue());
- }
- if (score > maxScore) {
- maxScore = score;
- idealDisk = i;
- }
- }
- if (idealDisk == 0xffff) {
- throw vespalib::IllegalStateException(
- "There are no available disks.", VESPA_STRLOC);
- }
- return idealDisk;
- }
- default:
- {
- return randomizer.nextUint32() % nodeState.getDiskCount();
- }
- }
-}
-
namespace {
/** Used to record scored groups during ideal groups calculation. */
@@ -579,12 +486,6 @@ Distribution::getIdealNodes(const NodeType& nodeType,
// seed if the node that is out of order is illegal anyways.
const NodeState& nodeState(clusterState.getNodeState(Node(nodeType, nodes[j])));
if (!nodeState.getState().oneOf(upStates)) continue;
- if (nodeState.isAnyDiskDown()) {
- uint16_t idealDiskIndex(getIdealDisk(nodeState, nodes[j], bucket, IDEAL_DISK_EVEN_IF_DOWN));
- if (nodeState.getDiskState(idealDiskIndex).getState() != State::UP) {
- continue;
- }
- }
// Get the score from the random number generator. Make sure we
// pick correct random number. Optimize for the case where we
// pick in rising order.
diff --git a/vdslib/src/vespa/vdslib/distribution/distribution.h b/vdslib/src/vespa/vdslib/distribution/distribution.h
index 14146af918f..db68f123cbf 100644
--- a/vdslib/src/vespa/vdslib/distribution/distribution.h
+++ b/vdslib/src/vespa/vdslib/distribution/distribution.h
@@ -77,15 +77,6 @@ private:
*/
uint32_t getStorageSeed(
const document::BucketId&, const ClusterState&) const;
- /**
- * Get seed to use for ideal state algorithm's random number generator
- * to decide which disk on a storage node this bucket should be mapped to.
- * Uses node index to ensure that copies of buckets goes to different disks
- * on different nodes, such that 2 disks missing will have less overlapping
- * data and all disks will add on some extra load if one disk goes missing.
- */
- uint32_t getDiskSeed(
- const document::BucketId&, uint16_t nodeIndex) const;
void getIdealGroups(const document::BucketId& bucket,
const ClusterState& clusterState,
@@ -144,17 +135,6 @@ public:
void print(std::ostream& out, bool, const std::string&) const override;
- enum DISK_MODE {
- IDEAL_DISK_EVEN_IF_DOWN,
- BEST_AVAILABLE_DISK
- };
- uint16_t getIdealDisk(const NodeState&, uint16_t nodeIndex,
- const document::BucketId&, DISK_MODE flag) const;
-
- uint16_t getPreferredAvailableDisk(const NodeState& ns, uint16_t nodeIndex,
- const document::BucketId& bucket) const
- { return getIdealDisk(ns, nodeIndex, bucket, BEST_AVAILABLE_DISK); }
-
/** Simplified wrapper for getIdealNodes() */
std::vector<uint16_t> getIdealStorageNodes(
const ClusterState&, const document::BucketId&,
diff --git a/vdslib/src/vespa/vdslib/state/CMakeLists.txt b/vdslib/src/vespa/vdslib/state/CMakeLists.txt
index 620e86c2677..f6bff6ce9a7 100644
--- a/vdslib/src/vespa/vdslib/state/CMakeLists.txt
+++ b/vdslib/src/vespa/vdslib/state/CMakeLists.txt
@@ -4,7 +4,6 @@ vespa_add_library(vdslib_state OBJECT
nodetype.cpp
node.cpp
state.cpp
- diskstate.cpp
nodestate.cpp
clusterstate.cpp
cluster_state_bundle.cpp
diff --git a/vdslib/src/vespa/vdslib/state/clusterstate.cpp b/vdslib/src/vespa/vdslib/state/clusterstate.cpp
index b90e78104fc..ff792517bf9 100644
--- a/vdslib/src/vespa/vdslib/state/clusterstate.cpp
+++ b/vdslib/src/vespa/vdslib/state/clusterstate.cpp
@@ -217,8 +217,7 @@ ClusterState::serialize(vespalib::asciistream & out, bool ignoreNewFeatures) con
vespalib::asciistream prefix;
prefix << "." << it->first.getIndex() << ".";
vespalib::asciistream ost;
- it->second.serialize(ost, prefix.str(), false, false,
- ignoreNewFeatures);
+ it->second.serialize(ost, prefix.str(), false);
vespalib::stringref content = ost.str();
if (content.size() > 0) {
out << " " << content;
@@ -236,8 +235,7 @@ ClusterState::serialize(vespalib::asciistream & out, bool ignoreNewFeatures) con
vespalib::asciistream prefix;
prefix << "." << it->first.getIndex() << ".";
vespalib::asciistream ost;
- it->second.serialize(ost, prefix.str(), false, false,
- ignoreNewFeatures);
+ it->second.serialize(ost, prefix.str(), false);
vespalib::stringref content = ost.str();
if ( !content.empty()) {
out << " " << content;
diff --git a/vdslib/src/vespa/vdslib/state/diskstate.cpp b/vdslib/src/vespa/vdslib/state/diskstate.cpp
deleted file mode 100644
index 0147b422a8e..00000000000
--- a/vdslib/src/vespa/vdslib/state/diskstate.cpp
+++ /dev/null
@@ -1,163 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include "diskstate.h"
-#include <boost/lexical_cast.hpp>
-#include <vespa/vespalib/text/stringtokenizer.h>
-#include <vespa/document/util/stringutil.h>
-#include <vespa/vespalib/util/exceptions.h>
-#include <vespa/vespalib/stllike/asciistream.h>
-#include <vespa/log/log.h>
-
-LOG_SETUP(".vdslib.diskstate");
-
-namespace storage::lib {
-
-DiskState::DiskState()
- : _state(0),
- _description(""),
- _capacity(1.0)
-{
- setState(State::UP);
-}
-
-DiskState::DiskState(const State& state, vespalib::stringref description,
- double capacity)
- : _state(0),
- _description(description),
- _capacity(1.0)
-{
- setState(state);
- setCapacity(capacity);
-}
-
-DiskState::DiskState(vespalib::stringref serialized)
- : _state(&State::UP),
- _description(""),
- _capacity(1.0)
-{
- vespalib::StringTokenizer st(serialized, " \t\f\r\n");
- st.removeEmptyTokens();
- for (vespalib::StringTokenizer::Iterator it = st.begin();
- it != st.end(); ++it)
- {
- std::string::size_type index = it->find(':');
- if (index == std::string::npos) {
- throw vespalib::IllegalArgumentException(
- "Token " + *it + " does not contain ':': " + serialized,
- VESPA_STRLOC);
- }
- std::string key = it->substr(0, index);
- std::string value = it->substr(index + 1);
- if (key.size() > 0) switch (key[0]) {
- case 's':
- if (key.size() > 1) break;
- setState(State::get(value));
- continue;
- case 'c':
- if (key.size() > 1) break;
- try{
- setCapacity(boost::lexical_cast<double>(value));
- } catch (...) {
- throw vespalib::IllegalArgumentException(
- "Illegal disk capacity '" + value + "'. Capacity "
- "must be a positive floating point number",
- VESPA_STRLOC);
- }
- continue;
- case 'm':
- if (key.size() > 1) break;
- _description = document::StringUtil::unescape(value);
- continue;
- default:
- break;
- }
- LOG(debug, "Unknown key %s in diskstate. Ignoring it, assuming it's a "
- "new feature from a newer version than ourself: %s",
- key.c_str(), vespalib::string(serialized).c_str());
- }
-
-}
-
-void
-DiskState::serialize(vespalib::asciistream & out, vespalib::stringref prefix,
- bool includeDescription, bool useOldFormat) const
-{
- // Always give node state if not part of a system state
- // to prevent empty serialization
- bool empty = true;
- if (*_state != State::UP || prefix.size() == 0) {
- if (useOldFormat && prefix.size() > 0) {
- out << prefix.substr(0, prefix.size() - 1)
- << ":" << _state->serialize();
- } else {
- out << prefix << "s:" << _state->serialize();
- }
- empty = false;
- }
- if (_capacity != 1.0) {
- if (empty) { empty = false; } else { out << ' '; }
- out << prefix << "c:" << _capacity;
- }
- if (includeDescription && _description.size() > 0) {
- if (empty) { empty = false; } else { out << ' '; }
- out << prefix << "m:"
- << document::StringUtil::escape(_description, ' ');
- }
-}
-
-
-void
-DiskState::setState(const State& state)
-{
- if (!state.validDiskState()) {
- throw vespalib::IllegalArgumentException(
- "State " + state.toString() + " is not a valid disk state.",
- VESPA_STRLOC);
- }
- _state = &state;
-}
-
-void
-DiskState::setCapacity(double capacity)
-{
- if (capacity < 0) {
- throw vespalib::IllegalArgumentException(
- "Negative capacity makes no sense.", VESPA_STRLOC);
- }
- _capacity = capacity;
-}
-
-void
-DiskState::print(std::ostream& out, bool verbose,
- const std::string& indent) const
-{
- (void) indent;
- if (verbose) {
- out << "DiskState(" << *_state;
- } else {
- out << _state->serialize();
- }
- if (_capacity != 1.0) {
- out << (verbose ? ", capacity " : ", c ") << _capacity;
- }
- if (_description.size() > 0) {
- out << ": " << _description;
- }
- if (verbose) {
- out << ")";
- }
-}
-
-bool
-DiskState::operator==(const DiskState& other) const
-{
- return (_state == other._state && _capacity == other._capacity);
-}
-
-bool
-DiskState::operator!=(const DiskState& other) const
-{
- return (_state != other._state || _capacity != other._capacity);
-}
-
-}
diff --git a/vdslib/src/vespa/vdslib/state/diskstate.h b/vdslib/src/vespa/vdslib/state/diskstate.h
deleted file mode 100644
index cb28e24daac..00000000000
--- a/vdslib/src/vespa/vdslib/state/diskstate.h
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-/**
- * @class vdslib::DiskState
- *
- * Defines the state a given disk can have.
- */
-#pragma once
-
-#include "state.h"
-#include <vespa/document/util/printable.h>
-#include <vespa/vespalib/objects/floatingpointtype.h>
-
-namespace storage::lib {
-
-class DiskState : public document::Printable {
- const State* _state;
- vespalib::string _description;
- vespalib::Double _capacity;
-
-public:
- typedef std::shared_ptr<const DiskState> CSP;
- typedef std::shared_ptr<DiskState> SP;
-
- DiskState();
- DiskState(const State&, vespalib::stringref description = "", double capacity = 1.0);
- explicit DiskState(vespalib::stringref serialized);
-
- void serialize(vespalib::asciistream & out, vespalib::stringref prefix = "",
- bool includeReason = true, bool useOldFormat = false) const;
-
- const State& getState() const { return *_state; }
- vespalib::Double getCapacity() const { return _capacity; }
- const vespalib::string& getDescription() const { return _description; }
-
- void setState(const State& state);
- void setCapacity(double capacity);
- void setDescription(vespalib::stringref desc) { _description = desc; }
-
- void print(std::ostream& out, bool verbose, const std::string& indent) const override;
- bool operator==(const DiskState& other) const;
- bool operator!=(const DiskState& other) const;
-
-};
-
-}
diff --git a/vdslib/src/vespa/vdslib/state/nodestate.cpp b/vdslib/src/vespa/vdslib/state/nodestate.cpp
index 41d42fd5c6f..fc4e18fa0cc 100644
--- a/vdslib/src/vespa/vdslib/state/nodestate.cpp
+++ b/vdslib/src/vespa/vdslib/state/nodestate.cpp
@@ -29,8 +29,6 @@ NodeState::NodeState()
_reliability(1),
_initProgress(0.0),
_minUsedBits(16),
- _diskStates(),
- _anyDiskDown(false),
_startTimestamp(0)
{
setState(State::UP);
@@ -46,8 +44,6 @@ NodeState::NodeState(const NodeType& type, const State& state,
_reliability(1),
_initProgress(0.0),
_minUsedBits(16),
- _diskStates(),
- _anyDiskDown(false),
_startTimestamp(0)
{
setState(state);
@@ -57,27 +53,6 @@ NodeState::NodeState(const NodeType& type, const State& state,
}
}
-namespace {
- struct DiskData {
- bool empty;
- uint16_t diskIndex;
- std::ostringstream ost;
-
- DiskData() : empty(true), diskIndex(0), ost() {}
-
- void addTo(std::vector<DiskState>& diskStates) {
- if (!empty) {
- while (diskIndex >= diskStates.size()) {
- diskStates.push_back(DiskState(State::UP));
- }
- diskStates[diskIndex] = DiskState(ost.str());
- empty = true;
- ost.str("");
- }
- }
- };
-}
-
NodeState::NodeState(vespalib::stringref serialized, const NodeType* type)
: _type(type),
_state(&State::UP),
@@ -86,14 +61,11 @@ NodeState::NodeState(vespalib::stringref serialized, const NodeType* type)
_reliability(1),
_initProgress(0.0),
_minUsedBits(16),
- _diskStates(),
- _anyDiskDown(false),
_startTimestamp(0)
{
vespalib::StringTokenizer st(serialized, " \t\f\r\n");
st.removeEmptyTokens();
- DiskData diskData;
for (vespalib::StringTokenizer::Iterator it = st.begin();
it != st.end(); ++it)
{
@@ -172,58 +144,6 @@ NodeState::NodeState(vespalib::stringref serialized, const NodeType* type)
if (key.size() > 1) break;
_description = document::StringUtil::unescape(value);
continue;
- case 'd':
- {
- if (_type != 0 && *type != NodeType::STORAGE) break;
- if (key.size() == 1) {
- uint16_t size(0);
- try{
- size = boost::lexical_cast<uint16_t>(value);
- } catch (...) {
- throw vespalib::IllegalArgumentException(
- "Invalid disk count '" + value + "'. Need a "
- "positive integer value", VESPA_STRLOC);
- }
- while (_diskStates.size() < size) {
- _diskStates.push_back(DiskState(State::UP));
- }
- continue;
- }
- if (key[1] != '.') break;
- uint16_t diskIndex;
- std::string::size_type endp = key.find('.', 2);
- std::string indexStr;
- if (endp == std::string::npos) {
- indexStr = key.substr(2);
- } else {
- indexStr = key.substr(2, endp - 2);
- }
- try{
- diskIndex = boost::lexical_cast<uint16_t>(indexStr);
- } catch (...) {
- throw vespalib::IllegalArgumentException(
- "Invalid disk index '" + indexStr + "'. Need a "
- "positive integer value", VESPA_STRLOC);
- }
- if (diskIndex >= _diskStates.size()) {
- std::ostringstream ost;
- ost << "Cannot index disk " << diskIndex << " of "
- << _diskStates.size();
- throw vespalib::IllegalArgumentException(
- ost.str(), VESPA_STRLOC);
- }
- if (diskData.diskIndex != diskIndex) {
- diskData.addTo(_diskStates);
- }
- if (endp == std::string::npos) {
- diskData.ost << " s:" << value;
- } else {
- diskData.ost << " " << key.substr(endp + 1) << ':' << value;
- }
- diskData.diskIndex = diskIndex;
- diskData.empty = false;
- continue;
- }
default:
break;
}
@@ -231,19 +151,6 @@ NodeState::NodeState(vespalib::stringref serialized, const NodeType* type)
"new feature from a newer version than ourself: %s",
key.c_str(), vespalib::string(serialized).c_str());
}
- diskData.addTo(_diskStates);
- updateAnyDiskDownFlag();
-}
-
-void
-NodeState::updateAnyDiskDownFlag() {
- bool anyDown = false;
- for (uint32_t i=0; i<_diskStates.size(); ++i) {
- if (_diskStates[i].getState() != State::UP) {
- anyDown = true;
- }
- }
- _anyDiskDown = anyDown;
}
namespace {
@@ -270,19 +177,14 @@ namespace {
void
NodeState::serialize(vespalib::asciistream & out, vespalib::stringref prefix,
- bool includeDescription, bool includeDiskDescription,
- bool useOldFormat) const
+ bool includeDescription) const
{
SeparatorPrinter sep;
// Always give node state if not part of a system state
// to prevent empty serialization
if (*_state != State::UP || prefix.size() == 0) {
out << sep << prefix << "s:";
- if (useOldFormat && *_state == State::STOPPING) {
- out << State::DOWN.serialize();
- } else {
- out << _state->serialize();
- }
+ out << _state->serialize();
}
if (_capacity != 1.0) {
out << sep << prefix << "c:" << _capacity;
@@ -293,45 +195,18 @@ NodeState::serialize(vespalib::asciistream & out, vespalib::stringref prefix,
if (_minUsedBits != 16) {
out << sep << prefix << "b:" << _minUsedBits;
}
- if (*_state == State::INITIALIZING && !useOldFormat) {
+ if (*_state == State::INITIALIZING) {
out << sep << prefix << "i:" << _initProgress;
}
if (_startTimestamp != 0) {
out << sep << prefix << "t:" << _startTimestamp;
}
- if (_diskStates.size() > 0) {
- out << sep << prefix << "d:" << _diskStates.size();
- for (uint16_t i = 0; i < _diskStates.size(); ++i) {
- vespalib::asciistream diskPrefix;
- diskPrefix << prefix << "d." << i << ".";
- vespalib::asciistream disk;
- _diskStates[i].serialize(disk, diskPrefix.str(),
- includeDiskDescription, useOldFormat);
- if ( ! disk.str().empty()) {
- out << " " << disk.str();
- }
- }
- }
if (includeDescription && ! _description.empty()) {
out << sep << prefix << "m:"
<< document::StringUtil::escape(_description, ' ');
}
}
-const DiskState&
-NodeState::getDiskState(uint16_t index) const
-{
- static const DiskState defaultState(State::UP);
- if (_diskStates.size() == 0) return defaultState;
- if (index >= _diskStates.size()) {
- std::ostringstream ost;
- ost << "Cannot get status of disk " << index << " of "
- << _diskStates.size() << ".";
- throw vespalib::IllegalArgumentException(ost.str(), VESPA_STRLOC);
- }
- return _diskStates[index];
-}
-
void
NodeState::setState(const State& state)
{
@@ -412,32 +287,6 @@ NodeState::setStartTimestamp(uint64_t startTimestamp)
}
void
-NodeState::setDiskCount(uint16_t count)
-{
- while (_diskStates.size() > count) {
- _diskStates.pop_back();
- }
- _diskStates.reserve(count);
- while (_diskStates.size() < count) {
- _diskStates.push_back(DiskState(State::UP));
- }
- updateAnyDiskDownFlag();
-}
-
-void
-NodeState::setDiskState(uint16_t index, const DiskState& state)
-{
- if (index >= _diskStates.size()) {
- throw vespalib::IllegalArgumentException(
- vespalib::make_string("Can't set state of disk %u of %u.",
- index, (uint32_t) _diskStates.size()),
- VESPA_STRLOC);
- }
- _diskStates[index] = state;
- updateAnyDiskDownFlag();
-}
-
-void
NodeState::print(std::ostream& out, bool verbose,
const std::string& indent) const
{
@@ -463,20 +312,6 @@ NodeState::print(std::ostream& out, bool verbose,
if (_startTimestamp != 0) {
out << ", start timestamp " << _startTimestamp;
}
- if (_diskStates.size() > 0) {
- bool printedHeader = false;
- for (uint32_t i=0; i<_diskStates.size(); ++i) {
- if (_diskStates[i] != DiskState(State::UP)) {
- if (!printedHeader) {
- out << ",";
- printedHeader = true;
- }
- out << " Disk " << i << "(";
- _diskStates[i].print(out, false, indent);
- out << ")";
- }
- }
- }
if (_description.size() > 0) {
out << ": " << _description;
}
@@ -495,13 +330,6 @@ NodeState::operator==(const NodeState& other) const
{
return false;
}
- for (uint32_t i=0, n=std::max(_diskStates.size(), other._diskStates.size());
- i < n; ++i)
- {
- if (getDiskState(i) != other.getDiskState(i)) {
- return false;
- }
- }
return true;
}
@@ -524,13 +352,6 @@ NodeState::similarTo(const NodeState& other) const
return false;
}
}
- for (uint32_t i=0, n=std::max(_diskStates.size(), other._diskStates.size());
- i < n; ++i)
- {
- if (getDiskState(i) != other.getDiskState(i)) {
- return false;
- }
- }
return true;
}
@@ -589,18 +410,6 @@ NodeState::getTextualDifference(const NodeState& other) const {
target << ", start timestamp " << other._startTimestamp;
}
- if (_diskStates.size() != other._diskStates.size()) {
- source << ", " << _diskStates.size() << " disks";
- target << ", " << other._diskStates.size() << " disks";
- } else {
- for (uint32_t i=0; i<_diskStates.size(); ++i) {
- if (_diskStates[i] != other._diskStates[i]) {
- source << ", disk " << i << _diskStates[i];
- target << ", disk " << i << other._diskStates[i];
- }
- }
- }
-
if (source.str().length() < 2 || target.str().length() < 2) {
return "no change";
}
diff --git a/vdslib/src/vespa/vdslib/state/nodestate.h b/vdslib/src/vespa/vdslib/state/nodestate.h
index 6317cb3fa84..a313c35704d 100644
--- a/vdslib/src/vespa/vdslib/state/nodestate.h
+++ b/vdslib/src/vespa/vdslib/state/nodestate.h
@@ -10,8 +10,9 @@
*/
#pragma once
-#include "diskstate.h"
+#include "state.h"
#include <vespa/document/bucket/bucketidfactory.h>
+#include <vespa/vespalib/objects/floatingpointtype.h>
namespace storage::lib {
@@ -24,12 +25,8 @@ class NodeState : public document::Printable
uint16_t _reliability;
vespalib::Double _initProgress;
uint32_t _minUsedBits;
- std::vector<DiskState> _diskStates;
- bool _anyDiskDown;
uint64_t _startTimestamp;
- void updateAnyDiskDownFlag();
-
public:
typedef std::shared_ptr<const NodeState> CSP;
typedef std::shared_ptr<NodeState> SP;
@@ -55,9 +52,7 @@ public:
* recreate the nodestate with NodeState(string) function.
*/
void serialize(vespalib::asciistream & out, vespalib::stringref prefix = "",
- bool includeDescription = true,
- bool includeDiskDescription = false,
- bool useOldFormat = false) const;
+ bool includeDescription = true) const;
const State& getState() const { return *_state; }
vespalib::Double getCapacity() const { return _capacity; }
@@ -67,10 +62,6 @@ public:
const vespalib::string& getDescription() const { return _description; }
uint64_t getStartTimestamp() const { return _startTimestamp; }
- bool isAnyDiskDown() const { return _anyDiskDown; }
- uint16_t getDiskCount() const { return _diskStates.size(); }
- const DiskState& getDiskState(uint16_t index) const;
-
void setState(const State& state);
void setCapacity(vespalib::Double capacity);
void setMinUsedBits(uint32_t usedBits);
@@ -79,9 +70,6 @@ public:
void setStartTimestamp(uint64_t startTimestamp);
void setDescription(vespalib::stringref desc) { _description = desc; }
- void setDiskCount(uint16_t count);
- void setDiskState(uint16_t index, const DiskState&);
-
void print(std::ostream& out, bool verbose,
const std::string& indent) const override;
bool operator==(const NodeState& other) const;
diff --git a/vdslib/src/vespa/vdslib/state/state.cpp b/vdslib/src/vespa/vdslib/state/state.cpp
index 5bf95b5530b..96829905c8a 100644
--- a/vdslib/src/vespa/vdslib/state/state.cpp
+++ b/vdslib/src/vespa/vdslib/state/state.cpp
@@ -8,19 +8,19 @@ namespace storage {
namespace lib {
const State State::UNKNOWN("Unknown", "-", 0,
- false, true, true, false, false, false);
+ true, true, false, false, false);
const State State::MAINTENANCE("Maintenance", "m", 1,
- false, false, false, true, true, false);
+ false, false, true, true, false);
const State State::DOWN("Down", "d", 2,
- true, false, false, true, true, true);
+ false, false, true, true, true);
const State State::STOPPING("Stopping", "s", 3,
- false, true, true, false, false, true);
+ true, true, false, false, true);
const State State::INITIALIZING("Initializing", "i", 4,
- false, true, true, false, false, true);
+ true, true, false, false, true);
const State State::RETIRED("Retired", "r", 5,
- false, false, false, true, true, false);
+ false, false, true, true, false);
const State State::UP("Up", "u", 6,
- true, true, true, true, true, true);
+ true, true, true, true, true);
const State&
State::get(vespalib::stringref serialized)
@@ -40,14 +40,13 @@ State::get(vespalib::stringref serialized)
}
State::State(vespalib::stringref name, vespalib::stringref serialized,
- uint8_t rank, bool validDisk,
+ uint8_t rank,
bool validDistributorReported, bool validStorageReported,
bool validDistributorWanted, bool validStorageWanted,
bool validCluster)
: _name(name),
_serialized(serialized),
_rankValue(rank),
- _validDiskState(validDisk),
_validReportedNodeState(2),
_validWantedNodeState(2),
_validClusterState(validCluster)
diff --git a/vdslib/src/vespa/vdslib/state/state.h b/vdslib/src/vespa/vdslib/state/state.h
index bec500a082d..61747f5eed2 100644
--- a/vdslib/src/vespa/vdslib/state/state.h
+++ b/vdslib/src/vespa/vdslib/state/state.h
@@ -4,7 +4,7 @@
*
* Defines legal states for various uses. Split this into its own class such
* that we can easily see what states are legal to use in what situations.
- * They double as disk states and node states nodes report they are in, and
+ * They double as node states nodes report they are in, and
* wanted states set external sources.
*/
#pragma once
@@ -20,14 +20,13 @@ class State : public vespalib::Printable {
vespalib::string _name;
vespalib::string _serialized;
uint8_t _rankValue;
- bool _validDiskState;
std::vector<bool> _validReportedNodeState;
std::vector<bool> _validWantedNodeState;
bool _validClusterState;
State(const State&);
State(vespalib::stringref name, vespalib::stringref serialized,
- uint8_t rank, bool validDisk,
+ uint8_t rank,
bool validDistributorReported, bool validStorageReported,
bool validDistributorWanted, bool validStorageWanted,
bool validCluster);
@@ -48,7 +47,6 @@ public:
static const State& get(vespalib::stringref serialized);
const vespalib::string& serialize() const { return _serialized; }
- bool validDiskState() const { return _validDiskState; }
bool validReportedNodeState(const NodeType& node) const
{ return _validReportedNodeState[node]; }
bool validWantedNodeState(const NodeType& node) const
diff --git a/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/LocalDataVisitorHandler.java b/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/LocalDataVisitorHandler.java
deleted file mode 100644
index 325c5492776..00000000000
--- a/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/LocalDataVisitorHandler.java
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.document.restapi;
-
-import com.yahoo.document.Document;
-import com.yahoo.document.DocumentId;
-import com.yahoo.document.json.JsonWriter;
-import com.yahoo.documentapi.DumpVisitorDataHandler;
-import com.yahoo.exception.ExceptionUtils;
-
-import java.nio.charset.StandardCharsets;
-
-/**
- * Handling data from visit.
- *
- * @author dybis
- */
-class LocalDataVisitorHandler extends DumpVisitorDataHandler {
-
- StringBuilder commaSeparatedJsonDocuments = new StringBuilder();
- final StringBuilder errors = new StringBuilder();
-
- private boolean isFirst = true;
- private final Object monitor = new Object();
-
- String getErrors() {
- return errors.toString();
- }
-
- String getCommaSeparatedJsonDocuments() {
- return commaSeparatedJsonDocuments.toString();
- }
-
- @Override
- public void onDocument(Document document, long l) {
- try {
- final String docJson = new String(JsonWriter.toByteArray(document), StandardCharsets.UTF_8.name());
- synchronized (monitor) {
- if (!isFirst) {
- commaSeparatedJsonDocuments.append(",");
- }
- isFirst = false;
- commaSeparatedJsonDocuments.append(docJson);
- }
- } catch (Exception e) {
- synchronized (monitor) {
- errors.append(ExceptionUtils.getStackTraceAsString(e)).append("\n");
- }
- }
- }
-
- // TODO: Not sure if we should support removal or not. Do nothing here maybe?
- @Override
- public void onRemove(DocumentId documentId) {
- try {
- final String removeJson = new String(JsonWriter.documentRemove(documentId), StandardCharsets.UTF_8.name());
- synchronized (monitor) {
- if (!isFirst) {
- commaSeparatedJsonDocuments.append(",");
- }
- isFirst = false;
- commaSeparatedJsonDocuments.append(removeJson);
- }
- } catch (Exception e) {
- synchronized (monitor) {
- errors.append(ExceptionUtils.getStackTraceAsString(e)).append("\n");
- }
- }
- }
-
-}
diff --git a/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/OperationHandler.java b/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/OperationHandler.java
deleted file mode 100644
index 848fe4b5726..00000000000
--- a/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/OperationHandler.java
+++ /dev/null
@@ -1,112 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.document.restapi;
-
-import com.yahoo.vespaxmlparser.FeedOperation;
-
-import java.util.Optional;
-
-/**
- * Abstract the backend stuff for the REST API, such as retrieving or updating documents.
- *
- * @author Haakon Dybdahl
- */
-public interface OperationHandler {
-
- class VisitResult {
-
- public final Optional<String> token;
- public final String documentsAsJsonList;
-
- public VisitResult(Optional<String> token, String documentsAsJsonList) {
- this.token = token;
- this.documentsAsJsonList = documentsAsJsonList;
- }
- }
-
- class VisitOptions {
- public final Optional<String> cluster;
- public final Optional<String> continuation;
- public final Optional<Integer> wantedDocumentCount;
- public final Optional<String> fieldSet;
- public final Optional<Integer> concurrency;
- public final Optional<String> bucketSpace;
-
- private VisitOptions(Builder builder) {
- this.cluster = Optional.ofNullable(builder.cluster);
- this.continuation = Optional.ofNullable(builder.continuation);
- this.wantedDocumentCount = Optional.ofNullable(builder.wantedDocumentCount);
- this.fieldSet = Optional.ofNullable(builder.fieldSet);
- this.concurrency = Optional.ofNullable(builder.concurrency);
- this.bucketSpace = Optional.ofNullable(builder.bucketSpace);
- }
-
- public static class Builder {
- String cluster;
- String continuation;
- Integer wantedDocumentCount;
- String fieldSet;
- Integer concurrency;
- String bucketSpace;
-
- public Builder cluster(String cluster) {
- this.cluster = cluster;
- return this;
- }
-
- public Builder continuation(String continuation) {
- this.continuation = continuation;
- return this;
- }
-
- public Builder wantedDocumentCount(Integer count) {
- this.wantedDocumentCount = count;
- return this;
- }
-
- public Builder fieldSet(String fieldSet) {
- this.fieldSet = fieldSet;
- return this;
- }
-
- public Builder concurrency(Integer concurrency) {
- this.concurrency = concurrency;
- return this;
- }
-
- public Builder bucketSpace(String bucketSpace) {
- this.bucketSpace = bucketSpace;
- return this;
- }
-
- public VisitOptions build() {
- return new VisitOptions(this);
- }
- }
-
- public static Builder builder() {
- return new Builder();
- }
- }
-
- VisitResult visit(RestUri restUri, String documentSelection, VisitOptions options) throws RestApiException;
-
- void put(RestUri restUri, FeedOperation data, Optional<String> route) throws RestApiException;
-
- void update(RestUri restUri, FeedOperation data, Optional<String> route) throws RestApiException;
-
- void delete(RestUri restUri, String condition, Optional<String> route) throws RestApiException;
-
- Optional<String> get(RestUri restUri) throws RestApiException;
-
- default Optional<String> get(RestUri restUri, Optional<String> fieldSet) throws RestApiException {
- return get(restUri);
- }
-
- default Optional<String> get(RestUri restUri, Optional<String> fieldSet, Optional<String> cluster) throws RestApiException {
- return get(restUri, fieldSet);
- }
-
- /** Called just before this is disposed of */
- default void shutdown() {}
-
-}
diff --git a/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/OperationHandlerImpl.java b/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/OperationHandlerImpl.java
deleted file mode 100644
index 3d3a8fc52ad..00000000000
--- a/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/OperationHandlerImpl.java
+++ /dev/null
@@ -1,460 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.document.restapi;
-
-import com.yahoo.document.Document;
-import com.yahoo.document.DocumentId;
-import com.yahoo.document.DocumentRemove;
-import com.yahoo.document.FixedBucketSpaces;
-import com.yahoo.document.TestAndSetCondition;
-import com.yahoo.document.fieldset.AllFields;
-import com.yahoo.document.json.JsonWriter;
-import com.yahoo.document.DocumentPut;
-import com.yahoo.documentapi.DocumentAccess;
-import com.yahoo.documentapi.DocumentAccessException;
-import com.yahoo.documentapi.ProgressToken;
-import com.yahoo.documentapi.SyncParameters;
-import com.yahoo.documentapi.SyncSession;
-import com.yahoo.documentapi.VisitorControlHandler;
-import com.yahoo.documentapi.VisitorParameters;
-import com.yahoo.documentapi.VisitorSession;
-import com.yahoo.documentapi.messagebus.MessageBusSyncSession;
-import com.yahoo.documentapi.messagebus.protocol.DocumentProtocol;
-import com.yahoo.documentapi.metrics.DocumentApiMetrics;
-import com.yahoo.documentapi.metrics.DocumentOperationStatus;
-import com.yahoo.documentapi.metrics.DocumentOperationType;
-import com.yahoo.exception.ExceptionUtils;
-import com.yahoo.messagebus.StaticThrottlePolicy;
-import com.yahoo.metrics.simple.MetricReceiver;
-import com.yahoo.vespaclient.ClusterDef;
-import com.yahoo.vespaxmlparser.FeedOperation;
-import com.yahoo.yolean.concurrent.ConcurrentResourcePool;
-import com.yahoo.yolean.concurrent.ResourceFactory;
-
-import java.io.ByteArrayOutputStream;
-import java.nio.charset.StandardCharsets;
-import java.time.Instant;
-import java.util.List;
-import java.util.Optional;
-import java.util.Set;
-import java.util.stream.Collectors;
-
-/**
- * Sends operations to messagebus via document api.
- *
- * @author dybis
- */
-public class OperationHandlerImpl implements OperationHandler {
-
- public interface ClusterEnumerator {
- List<ClusterDef> enumerateClusters();
- }
-
- public interface BucketSpaceResolver {
- Optional<String> clusterBucketSpaceFromDocumentType(String clusterId, String docType);
- }
-
- public static class BucketSpaceRoute {
- private final String clusterRoute;
- private final String bucketSpace;
-
- public BucketSpaceRoute(String clusterRoute, String bucketSpace) {
- this.clusterRoute = clusterRoute;
- this.bucketSpace = bucketSpace;
- }
-
- public String getClusterRoute() {
- return clusterRoute;
- }
-
- public String getBucketSpace() {
- return bucketSpace;
- }
- }
-
- public static final int VISIT_TIMEOUT_MS = 120000;
- public static final int WANTED_DOCUMENT_COUNT_UPPER_BOUND = 1000; // Approximates the max default size of a bucket
- public static final int CONCURRENCY_UPPER_BOUND = 100;
- private final DocumentAccess documentAccess;
- private final DocumentApiMetrics metricsHelper;
- private final ClusterEnumerator clusterEnumerator;
- private final BucketSpaceResolver bucketSpaceResolver;
-
- private static final class SyncSessionFactory extends ResourceFactory<SyncSession> {
- private final DocumentAccess documentAccess;
- SyncSessionFactory(DocumentAccess documentAccess) {
- this.documentAccess = documentAccess;
- }
- @Override
- public SyncSession create() {
- return documentAccess.createSyncSession(new SyncParameters.Builder().build());
- }
- }
-
- private final ConcurrentResourcePool<SyncSession> syncSessions;
-
- public OperationHandlerImpl(DocumentAccess documentAccess, ClusterEnumerator clusterEnumerator,
- BucketSpaceResolver bucketSpaceResolver, MetricReceiver metricReceiver) {
- this.documentAccess = documentAccess;
- this.clusterEnumerator = clusterEnumerator;
- this.bucketSpaceResolver = bucketSpaceResolver;
- syncSessions = new ConcurrentResourcePool<>(new SyncSessionFactory(documentAccess));
- metricsHelper = new DocumentApiMetrics(metricReceiver, "documentV1");
- }
-
- @Override
- public void shutdown() {
- for (SyncSession session : syncSessions) {
- session.destroy();
- }
- documentAccess.shutdown();
- }
-
- private static final int HTTP_STATUS_BAD_REQUEST = 400;
- private static final int HTTP_STATUS_INSUFFICIENT_STORAGE = 507;
- private static final int HTTP_PRECONDITION_FAILED = 412;
-
- public static int getHTTPStatusCode(Set<Integer> errorCodes) {
- if (errorCodes.size() == 1 && errorCodes.contains(DocumentProtocol.ERROR_NO_SPACE)) {
- return HTTP_STATUS_INSUFFICIENT_STORAGE;
- }
- if (errorCodes.contains(DocumentProtocol.ERROR_TEST_AND_SET_CONDITION_FAILED)) {
- return HTTP_PRECONDITION_FAILED;
- }
- return HTTP_STATUS_BAD_REQUEST;
- }
-
- private static Response createErrorResponse(DocumentAccessException documentException, RestUri restUri) {
- if (documentException.hasConditionNotMetError()) {
- return Response.createErrorResponse(getHTTPStatusCode(documentException.getErrorCodes()), "Condition did not match document.",
- restUri, RestUri.apiErrorCodes.DOCUMENT_CONDITION_NOT_MET);
- }
- return Response.createErrorResponse(getHTTPStatusCode(documentException.getErrorCodes()), documentException.getMessage(), restUri,
- RestUri.apiErrorCodes.DOCUMENT_EXCEPTION);
- }
-
- @Override
- public VisitResult visit(RestUri restUri, String documentSelection, VisitOptions options) throws RestApiException {
- VisitorParameters visitorParameters = createVisitorParameters(restUri, documentSelection, options);
-
- VisitorControlHandler visitorControlHandler = new VisitorControlHandler();
- visitorParameters.setControlHandler(visitorControlHandler);
- LocalDataVisitorHandler localDataVisitorHandler = new LocalDataVisitorHandler();
- visitorParameters.setLocalDataHandler(localDataVisitorHandler);
-
- final VisitorSession visitorSession;
- try {
- visitorSession = documentAccess.createVisitorSession(visitorParameters);
- // Not sure if this line is required
- visitorControlHandler.setSession(visitorSession);
- } catch (Exception e) {
- throw new RestApiException(Response.createErrorResponse(
- 500,
- "Failed during parsing of arguments for visiting: " + ExceptionUtils.getStackTraceAsString(e),
- restUri,
- RestUri.apiErrorCodes.VISITOR_ERROR));
- }
- try {
- return doVisit(visitorControlHandler, localDataVisitorHandler, restUri);
- } finally {
- visitorSession.destroy();
- }
- }
-
- private static void throwIfFatalVisitingError(VisitorControlHandler handler, RestUri restUri) throws RestApiException {
- final VisitorControlHandler.Result result = handler.getResult();
- if (result.getCode() == VisitorControlHandler.CompletionCode.TIMEOUT) {
- if (! handler.hasVisitedAnyBuckets()) {
- throw new RestApiException(Response.createErrorResponse(500, "Timed out", restUri, RestUri.apiErrorCodes.TIME_OUT));
- } // else: some progress has been made, let client continue with new token.
- } else if (result.getCode() != VisitorControlHandler.CompletionCode.SUCCESS) {
- throw new RestApiException(Response.createErrorResponse(400, result.toString(), RestUri.apiErrorCodes.VISITOR_ERROR));
- }
- }
-
- private VisitResult doVisit(VisitorControlHandler visitorControlHandler,
- LocalDataVisitorHandler localDataVisitorHandler,
- RestUri restUri) throws RestApiException {
- try {
- visitorControlHandler.waitUntilDone(); // VisitorParameters' session timeout implicitly triggers timeout failures.
- throwIfFatalVisitingError(visitorControlHandler, restUri);
- } catch (InterruptedException e) {
- throw new RestApiException(Response.createErrorResponse(500, ExceptionUtils.getStackTraceAsString(e), restUri, RestUri.apiErrorCodes.INTERRUPTED));
- }
- if (localDataVisitorHandler.getErrors().isEmpty()) {
- Optional<String> continuationToken;
- if (! visitorControlHandler.getProgress().isFinished()) {
- continuationToken = Optional.of(visitorControlHandler.getProgress().serializeToString());
- } else {
- continuationToken = Optional.empty();
- }
- return new VisitResult(continuationToken, localDataVisitorHandler.getCommaSeparatedJsonDocuments());
- }
- throw new RestApiException(Response.createErrorResponse(500, localDataVisitorHandler.getErrors(), restUri, RestUri.apiErrorCodes.UNSPECIFIED));
- }
-
- private void setRoute(SyncSession session, Optional<String> route) throws RestApiException {
- if (! (session instanceof MessageBusSyncSession)) {
- // Not sure if this ever could happen but better be safe.
- throw new RestApiException(Response.createErrorResponse(
- 400, "Can not set route since the API is not using message bus.",
- RestUri.apiErrorCodes.NO_ROUTE_WHEN_NOT_PART_OF_MESSAGEBUS));
- }
- ((MessageBusSyncSession) session).setRoute(route.orElse("default"));
- }
-
- @Override
- public void put(RestUri restUri, FeedOperation data, Optional<String> route) throws RestApiException {
- SyncSession syncSession = syncSessions.alloc();
- Response response;
- try {
- Instant startTime = Instant.now();
- DocumentPut put = new DocumentPut(data.getDocument());
- put.setCondition(data.getCondition());
- setRoute(syncSession, route);
- syncSession.put(put);
- metricsHelper.reportSuccessful(DocumentOperationType.PUT, startTime);
- return;
- } catch (DocumentAccessException documentException) {
- response = createErrorResponse(documentException, restUri);
- } catch (Exception e) {
- response = Response.createErrorResponse(500, ExceptionUtils.getStackTraceAsString(e), restUri, RestUri.apiErrorCodes.INTERNAL_EXCEPTION);
- } finally {
- syncSessions.free(syncSession);
- }
-
- metricsHelper.reportFailure(DocumentOperationType.PUT, DocumentOperationStatus.fromHttpStatusCode(response.getStatus()));
- throw new RestApiException(response);
- }
-
- @Override
- public void update(RestUri restUri, FeedOperation data, Optional<String> route) throws RestApiException {
- SyncSession syncSession = syncSessions.alloc();
- Response response;
- try {
- Instant startTime = Instant.now();
- setRoute(syncSession, route);
- syncSession.update(data.getDocumentUpdate());
- metricsHelper.reportSuccessful(DocumentOperationType.UPDATE, startTime);
- return;
- } catch (DocumentAccessException documentException) {
- response = createErrorResponse(documentException, restUri);
- } catch (Exception e) {
- response = Response.createErrorResponse(500, ExceptionUtils.getStackTraceAsString(e), restUri, RestUri.apiErrorCodes.INTERNAL_EXCEPTION);
- } finally {
- syncSessions.free(syncSession);
- }
-
- metricsHelper.reportFailure(DocumentOperationType.UPDATE, DocumentOperationStatus.fromHttpStatusCode(response.getStatus()));
- throw new RestApiException(response);
- }
-
- @Override
- public void delete(RestUri restUri, String condition, Optional<String> route) throws RestApiException {
- SyncSession syncSession = syncSessions.alloc();
- Response response;
- try {
- Instant startTime = Instant.now();
- DocumentId id = new DocumentId(restUri.generateFullId());
- DocumentRemove documentRemove = new DocumentRemove(id);
- setRoute(syncSession, route);
- if (condition != null && ! condition.isEmpty()) {
- documentRemove.setCondition(new TestAndSetCondition(condition));
- }
- syncSession.remove(documentRemove);
- metricsHelper.reportSuccessful(DocumentOperationType.REMOVE, startTime);
- return;
- } catch (DocumentAccessException documentException) {
- if (documentException.hasConditionNotMetError()) {
- response = Response.createErrorResponse(412, "Condition not met: " + documentException.getMessage(),
- restUri, RestUri.apiErrorCodes.DOCUMENT_CONDITION_NOT_MET);
- } else {
- response = Response.createErrorResponse(400, documentException.getMessage(), restUri, RestUri.apiErrorCodes.DOCUMENT_EXCEPTION);
- }
- } catch (Exception e) {
- response = Response.createErrorResponse(500, ExceptionUtils.getStackTraceAsString(e), restUri, RestUri.apiErrorCodes.UNSPECIFIED);
- } finally {
- syncSessions.free(syncSession);
- }
-
- metricsHelper.reportFailure(DocumentOperationType.REMOVE, DocumentOperationStatus.fromHttpStatusCode(response.getStatus()));
- throw new RestApiException(response);
- }
-
- @Override
- public Optional<String> get(RestUri restUri, Optional<String> fieldSet, Optional<String> cluster) throws RestApiException {
- SyncSession syncSession = syncSessions.alloc();
- // Explicit unary used instead of map() due to unhandled exceptions, blargh.
- Optional<String> route = cluster.isPresent()
- ? Optional.of(clusterDefToRoute(resolveClusterDef(cluster, clusterEnumerator.enumerateClusters())))
- : Optional.empty();
- setRoute(syncSession, route);
- try {
- DocumentId id = new DocumentId(restUri.generateFullId());
- final Document document = syncSession.get(id, fieldSet.orElse(restUri.getDocumentType() + ":[document]"), DocumentProtocol.Priority.NORMAL_1);
- if (document == null) {
- return Optional.empty();
- }
- ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
- JsonWriter jsonWriter = new JsonWriter(outputStream);
- jsonWriter.write(document);
- return Optional.of(outputStream.toString(StandardCharsets.UTF_8.name()));
-
- } catch (Exception e) {
- throw new RestApiException(Response.createErrorResponse(500, ExceptionUtils.getStackTraceAsString(e), restUri, RestUri.apiErrorCodes.UNSPECIFIED));
- } finally {
- syncSessions.free(syncSession);
- }
- }
-
- @Override
- public Optional<String> get(RestUri restUri, Optional<String> fieldSet) throws RestApiException {
- return get(restUri, fieldSet, Optional.empty());
- }
-
- @Override
- public Optional<String> get(RestUri restUri) throws RestApiException {
- return get(restUri, Optional.empty());
- }
-
- private static boolean isValidBucketSpace(String spaceName) {
- // TODO need bucket space repo in Java as well
- return (FixedBucketSpaces.defaultSpace().equals(spaceName)
- || FixedBucketSpaces.globalSpace().equals(spaceName));
- }
-
- protected BucketSpaceRoute resolveBucketSpaceRoute(Optional<String> wantedCluster,
- Optional<String> wantedBucketSpace,
- RestUri restUri) throws RestApiException {
- final List<ClusterDef> clusters = clusterEnumerator.enumerateClusters();
- ClusterDef clusterDef = resolveClusterDef(wantedCluster, clusters);
-
- String targetBucketSpace;
- if (!restUri.isRootOnly()) {
- String docType = restUri.getDocumentType();
- Optional<String> resolvedSpace = bucketSpaceResolver.clusterBucketSpaceFromDocumentType(clusterDef.getName(), docType);
- if (!resolvedSpace.isPresent()) {
- throw new RestApiException(Response.createErrorResponse(400, String.format(
- "Document type '%s' in cluster '%s' is not mapped to a known bucket space", docType, clusterDef.getName()),
- RestUri.apiErrorCodes.UNKNOWN_BUCKET_SPACE));
- }
- targetBucketSpace = resolvedSpace.get();
- } else {
- if (wantedBucketSpace.isPresent() && !isValidBucketSpace(wantedBucketSpace.get())) {
- // TODO enumerate known bucket spaces from a repo instead of having a fixed set
- throw new RestApiException(Response.createErrorResponse(400, String.format(
- "Bucket space '%s' is not a known bucket space (expected '%s' or '%s')",
- wantedBucketSpace.get(), FixedBucketSpaces.defaultSpace(), FixedBucketSpaces.globalSpace()),
- RestUri.apiErrorCodes.UNKNOWN_BUCKET_SPACE));
- }
- targetBucketSpace = wantedBucketSpace.orElse(FixedBucketSpaces.defaultSpace());
- }
-
- return new BucketSpaceRoute(clusterDefToRoute(clusterDef), targetBucketSpace);
- }
-
- protected static ClusterDef resolveClusterDef(Optional<String> wantedCluster, List<ClusterDef> clusters) throws RestApiException {
- if (clusters.size() == 0) {
- throw new IllegalArgumentException("Your Vespa cluster does not have any content clusters " +
- "declared. Visiting feature is not available.");
- }
- if (! wantedCluster.isPresent()) {
- if (clusters.size() != 1) {
- String message = "Several clusters exist: " +
- clusters.stream().map(c -> "'" + c.getName() + "'").collect(Collectors.joining(", ")) +
- ". You must specify one.";
- throw new RestApiException(Response.createErrorResponse(400,
- message,
- RestUri.apiErrorCodes.SEVERAL_CLUSTERS));
- }
- return clusters.get(0);
- }
-
- for (ClusterDef clusterDef : clusters) {
- if (clusterDef.getName().equals(wantedCluster.get())) {
- return clusterDef;
- }
- }
- String message = "Your vespa cluster contains the content clusters " +
- clusters.stream().map(c -> "'" + c.getName() + "'").collect(Collectors.joining(", ")) +
- ", not '" + wantedCluster.get() + "'. Please select a valid vespa cluster.";
- throw new RestApiException(Response.createErrorResponse(400,
- message,
- RestUri.apiErrorCodes.MISSING_CLUSTER));
- }
-
- protected static String clusterDefToRoute(ClusterDef clusterDef) {
- return "[Storage:cluster=" + clusterDef.getName() + ";clusterconfigid=" + clusterDef.getConfigId() + "]";
- }
-
- private static String buildAugmentedDocumentSelection(RestUri restUri, String documentSelection) {
- if (restUri.isRootOnly()) {
- return documentSelection; // May be empty, that's fine.
- }
- StringBuilder selection = new StringBuilder();
- if (! documentSelection.isEmpty()) {
- selection.append("((").append(documentSelection).append(") and ");
- }
- selection.append(restUri.getDocumentType()).append(" and (id.namespace=='").append(restUri.getNamespace()).append("')");
- if (! documentSelection.isEmpty()) {
- selection.append(")");
- }
- return selection.toString();
- }
-
- private static int computeEffectiveConcurrency(Optional<Integer> requestConcurrency) {
- int wantedConcurrency = requestConcurrency.orElse(1);
- return Math.min(Math.max(wantedConcurrency, 1), CONCURRENCY_UPPER_BOUND);
- }
-
- private VisitorParameters createVisitorParameters(
- RestUri restUri,
- String documentSelection,
- VisitOptions options)
- throws RestApiException {
-
- if (restUri.isRootOnly() && !options.cluster.isPresent()) {
- throw new RestApiException(Response.createErrorResponse(400,
- "Must set 'cluster' parameter to a valid content cluster id when visiting at a root /document/v1/ level",
- RestUri.apiErrorCodes.MISSING_CLUSTER));
- }
-
- String augmentedSelection = buildAugmentedDocumentSelection(restUri, documentSelection);
-
- VisitorParameters params = new VisitorParameters(augmentedSelection);
- // Only return fieldset that is part of the document, unless we're visiting across all
- // document types in which case we can't explicitly state a single document type.
- // This matches legacy /visit API and vespa-visit tool behavior.
- params.fieldSet(options.fieldSet.orElse(
- restUri.isRootOnly() ? AllFields.NAME : restUri.getDocumentType() + ":[document]"));
- params.setMaxBucketsPerVisitor(1);
- params.setMaxPending(32);
- params.setMaxFirstPassHits(1);
- params.setMaxTotalHits(options.wantedDocumentCount
- .map(n -> Math.min(Math.max(n, 1), WANTED_DOCUMENT_COUNT_UPPER_BOUND))
- .orElse(1));
- params.setThrottlePolicy(new StaticThrottlePolicy().setMaxPendingCount(computeEffectiveConcurrency(options.concurrency)));
- params.setToTimestamp(0L);
- params.setFromTimestamp(0L);
- params.setSessionTimeoutMs(VISIT_TIMEOUT_MS);
-
- params.visitInconsistentBuckets(true); // TODO document this as part of consistency doc
-
- BucketSpaceRoute bucketSpaceRoute = resolveBucketSpaceRoute(options.cluster, options.bucketSpace, restUri);
- params.setRoute(bucketSpaceRoute.getClusterRoute());
- params.setBucketSpace(bucketSpaceRoute.getBucketSpace());
-
- params.setTraceLevel(0);
- params.setPriority(DocumentProtocol.Priority.NORMAL_4);
- params.setVisitRemoves(false);
-
- if (options.continuation.isPresent()) {
- try {
- params.setResumeToken(ProgressToken.fromSerializedString(options.continuation.get()));
- } catch (Exception e) {
- throw new RestApiException(Response.createErrorResponse(500, ExceptionUtils.getStackTraceAsString(e), restUri, RestUri.apiErrorCodes.UNSPECIFIED));
- }
- }
- return params;
- }
-
-}
diff --git a/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/Response.java b/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/Response.java
deleted file mode 100644
index 663f77e7eea..00000000000
--- a/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/Response.java
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.document.restapi;
-
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.node.ObjectNode;
-import com.yahoo.container.jdisc.HttpResponse;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import java.nio.charset.StandardCharsets;
-import java.util.Optional;
-
-public class Response extends HttpResponse {
-
- private final static ObjectMapper objectMapper = new ObjectMapper();
- private final String jsonMessage;
-
- public Response(int code, Optional<ObjectNode> element, Optional<RestUri> restPath) {
- super(code);
- ObjectNode objectNode = element.orElse(objectMapper.createObjectNode());
- if (restPath.isPresent()) {
- objectNode.put("id", restPath.get().generateFullId());
- objectNode.put("pathId", restPath.get().getRawPath());
- }
- jsonMessage = objectNode.toString();
- }
-
- public static Response createErrorResponse(int code, String errorMessage, RestUri.apiErrorCodes errorID) {
- return createErrorResponse(code, errorMessage, null, errorID);
- }
-
- public static Response createErrorResponse(int code, String errorMessage, RestUri restUri, RestUri.apiErrorCodes errorID) {
- ObjectNode errorNode = objectMapper.createObjectNode();
- errorNode.put("description", errorID.name() + " " + errorMessage);
- errorNode.put("id", errorID.value);
-
- ObjectNode objectNode = objectMapper.createObjectNode();
- objectNode.putArray("errors").add(errorNode);
- return new Response(code, Optional.of(objectNode), Optional.ofNullable(restUri));
- }
-
- @Override
- public void render(OutputStream stream) throws IOException {
- stream.write(jsonMessage.getBytes(StandardCharsets.UTF_8));
- }
-
- @Override
- public String getContentType() { return "application/json"; }
-
-}
diff --git a/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/RestApiException.java b/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/RestApiException.java
deleted file mode 100644
index 29843801bee..00000000000
--- a/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/RestApiException.java
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.document.restapi;
-
-/**
- * Exceptions for Rest API
- *
- * @author dybis
- */
-public class RestApiException extends Exception {
-
- private final Response response;
-
- public RestApiException(Response response) {
- this.response = response;
- }
-
- public Response getResponse() {
- return response;
- }
-
-}
diff --git a/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/RestUri.java b/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/RestUri.java
deleted file mode 100644
index 975075fd2fa..00000000000
--- a/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/RestUri.java
+++ /dev/null
@@ -1,177 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.document.restapi;
-
-import com.google.common.base.Joiner;
-import com.google.common.base.Splitter;
-
-import java.io.UnsupportedEncodingException;
-import java.net.URI;
-import java.net.URLDecoder;
-import java.nio.charset.StandardCharsets;
-import java.util.List;
-import java.util.Optional;
-import static com.yahoo.jdisc.Response.Status.*;
-
-/**
- * Represents the request URI with its values.
- *
- * @author dybis
- */
-public class RestUri {
-
- public static final char NUMBER_STREAMING = 'n';
- public static final char GROUP_STREAMING = 'g';
- public static final String DOCUMENT = "document";
- public static final String V_1 = "v1";
- public static final String ID = "id:";
-
- public enum apiErrorCodes {
- ERROR_ID_BASIC_USAGE(-1),
- ERROR_ID_DECODING_PATH(-2),
- VISITOR_ERROR(-3),
- NO_ROUTE_WHEN_NOT_PART_OF_MESSAGEBUS(-4),
- SEVERAL_CLUSTERS(-5),
- URL_PARSING(-6),
- INVALID_CREATE_VALUE(-7),
- TOO_MANY_PARALLEL_REQUESTS(-8),
- MISSING_CLUSTER(-9), INTERNAL_EXCEPTION(-9),
- DOCUMENT_CONDITION_NOT_MET(-10),
- DOCUMENT_EXCEPTION(-11),
- PARSER_ERROR(-11),
- GROUP_AND_EXPRESSION_ERROR(-12),
- TIME_OUT(-13),
- INTERRUPTED(-14),
- UNSPECIFIED(-15),
- UNKNOWN_BUCKET_SPACE(-16);
-
- public final long value;
- apiErrorCodes(long value) {
- this.value = value;
- }
- }
-
- /**
- * Represents the "grouping" part of document id which can be used with streaming model.
- */
- public static class Group {
- public final char name;
- public final String value;
- Group(char name, String value) {
- this.name = name;
- this.value = value;
- }
- }
- private final String namespace;
- private final String documentType;
- private final String docId;
- private Optional<Group> group = Optional.empty();
- private final String rawPath;
-
- public boolean isRootOnly() {
- return namespace == null;
- }
-
- public String getRawPath() {
- return rawPath;
- }
-
- public String getNamespace() {
- return namespace;
- }
-
- public String getDocumentType() {
- return documentType;
- }
-
- public String getDocId() {
- return docId;
- }
-
- public Optional<Group> getGroup() {
- return group;
- }
-
- public String generateFullId() {
- return ID + namespace + ":" + documentType + ":"
- + group.map(g -> String.format("%s=%s", g.name, g.value)).orElse("")
- + ":" + docId;
- }
-
- static class PathParser {
- public static final long ERROR_ID_DECODING_PATH = -10L;
- final List<String> rawParts;
- final String originalPath;
- int readPos = 0;
- public PathParser(String path) {
- this.originalPath = path;
- this.rawParts = Splitter.on('/').splitToList(path);
- }
-
- boolean hasNextToken() {
- return readPos < rawParts.size();
- }
-
- String nextTokenOrException() throws RestApiException {
- if (readPos >= rawParts.size()) {
- throwUsage(originalPath);
- }
- String nextToken = rawParts.get(readPos++);
- return urlDecodeOrException(nextToken);
- }
-
- String restOfPath() throws RestApiException {
- String rawId = Joiner.on("/").join(rawParts.listIterator(readPos));
- return urlDecodeOrException(rawId);
- }
-
- String urlDecodeOrException(String url) throws RestApiException {
- try {
- return URLDecoder.decode(url, StandardCharsets.UTF_8.name());
- } catch (UnsupportedEncodingException e) {
- throw new RestApiException(Response.createErrorResponse(BAD_REQUEST,"Problems decoding the URI: " + e.getMessage(), apiErrorCodes.ERROR_ID_DECODING_PATH));
- }
- }
- }
-
- public RestUri(URI uri) throws RestApiException {
- rawPath = uri.getRawPath();
- PathParser pathParser = new PathParser(rawPath);
- if (! pathParser.nextTokenOrException().equals("") ||
- ! pathParser.nextTokenOrException().equals(DOCUMENT) ||
- ! pathParser.nextTokenOrException().equals(V_1)) {
- throwUsage(uri.getRawPath());
- }
- // If /document/v1 root request, there's an empty token at the end.
- String maybeNamespace = pathParser.nextTokenOrException();
- if (maybeNamespace.isEmpty()) {
- namespace = null;
- documentType = null;
- docId = null;
- return;
- }
- namespace = maybeNamespace;
- documentType = pathParser.nextTokenOrException();
- switch (pathParser.nextTokenOrException()) {
- case "number":
- group = Optional.of(new Group(NUMBER_STREAMING, pathParser.nextTokenOrException()));
- break;
- case "docid":
- group = Optional.empty();
- break;
- case "group":
- group = Optional.of(new Group(GROUP_STREAMING, pathParser.nextTokenOrException()));
- break;
- default: throwUsage(uri.getRawPath());
- }
- docId = pathParser.restOfPath();
- }
-
- private static void throwUsage(String inputPath) throws RestApiException {
- throw new RestApiException(Response.createErrorResponse(BAD_REQUEST,
- "Expected: " +
- ".../{namespace}/{document-type}/group/{name}/[{user-specified}] " +
- ".../{namespace}/{document-type}/docid/[{user-specified}] : but got " + inputPath, apiErrorCodes.ERROR_ID_BASIC_USAGE));
- }
-
-}
-
diff --git a/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/DocumentV1ApiHandler.java b/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/DocumentV1ApiHandler.java
index 8287b6f1630..35f3a5d4057 100644
--- a/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/DocumentV1ApiHandler.java
+++ b/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/DocumentV1ApiHandler.java
@@ -311,8 +311,10 @@ public class DocumentV1ApiHandler extends AbstractRequestHandler {
private ContentChannel getDocument(HttpRequest request, DocumentPath path, ResponseHandler handler) {
enqueueAndDispatch(request, handler, () -> {
- DocumentOperationParameters parameters = parametersFromRequest(request, CLUSTER, FIELD_SET)
- .withResponseHandler(response -> {
+ DocumentOperationParameters rawParameters = parametersFromRequest(request, CLUSTER, FIELD_SET);
+ if (rawParameters.fieldSet().isEmpty())
+ rawParameters = rawParameters.withFieldSet(path.documentType().orElseThrow() + ":[document]");
+ DocumentOperationParameters parameters = rawParameters.withResponseHandler(response -> {
handle(path, handler, response, (document, jsonResponse) -> {
if (document != null) {
jsonResponse.writeSingleDocument(document);
diff --git a/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/RestApi.java b/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/RestApi.java
index e603a150b34..bd63a2ecbfc 100644
--- a/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/RestApi.java
+++ b/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/RestApi.java
@@ -1,443 +1,26 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.document.restapi.resource;
-import com.fasterxml.jackson.databind.JsonNode;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.node.ObjectNode;
import com.google.inject.Inject;
-import com.yahoo.cloud.config.ClusterListConfig;
-import com.yahoo.container.handler.ThreadpoolConfig;
-import com.yahoo.container.handler.threadpool.ContainerThreadPool;
import com.yahoo.container.jdisc.HttpRequest;
import com.yahoo.container.jdisc.HttpResponse;
import com.yahoo.container.jdisc.LoggingRequestHandler;
-import com.yahoo.container.logging.AccessLog;
-import com.yahoo.document.DocumentTypeManager;
-import com.yahoo.document.TestAndSetCondition;
-import com.yahoo.document.config.DocumentmanagerConfig;
-import com.yahoo.document.json.SingleDocumentParser;
-import com.yahoo.document.restapi.OperationHandler;
-import com.yahoo.document.restapi.OperationHandlerImpl;
-import com.yahoo.document.restapi.Response;
-import com.yahoo.document.restapi.RestApiException;
-import com.yahoo.document.restapi.RestUri;
-import com.yahoo.document.select.DocumentSelector;
-import com.yahoo.document.select.parser.ParseException;
-import com.yahoo.documentapi.DocumentAccess;
-import com.yahoo.documentapi.messagebus.MessageBusDocumentAccess;
-import com.yahoo.documentapi.messagebus.MessageBusParams;
-import com.yahoo.documentapi.messagebus.loadtypes.LoadTypeSet;
-import com.yahoo.jdisc.Metric;
-import com.yahoo.metrics.simple.MetricReceiver;
-import com.yahoo.text.Text;
-import com.yahoo.vespa.config.content.AllClustersBucketSpacesConfig;
-import com.yahoo.vespa.config.content.LoadTypeConfig;
-import com.yahoo.vespaclient.ClusterDef;
-import com.yahoo.vespaclient.ClusterList;
-import com.yahoo.vespaxmlparser.DocumentFeedOperation;
-import com.yahoo.vespaxmlparser.FeedOperation;
-import com.yahoo.yolean.Exceptions;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import java.nio.charset.StandardCharsets;
-import java.util.Collections;
-import java.util.List;
-import java.util.Optional;
-import java.util.concurrent.Executor;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.logging.Level;
-
-import static com.yahoo.jdisc.Response.Status.BAD_REQUEST;
/**
- * API for handling single operation on a document and visiting.
+ * Dummy for internal use.
*
- * @author Haakon Dybdahl
+ * @author jonmv
*/
public class RestApi extends LoggingRequestHandler {
- private static final String CREATE_PARAMETER_NAME = "create";
- private static final String CONDITION_PARAMETER_NAME = "condition";
- private static final String ROUTE_PARAMETER_NAME = "route";
- private static final String DOCUMENTS = "documents";
- private static final String FIELDS = "fields";
- private static final String DOC_ID_NAME = "id";
- private static final String PATH_NAME = "pathId";
- private static final String SELECTION = "selection";
- private static final String CLUSTER = "cluster";
- private static final String CONTINUATION = "continuation";
- private static final String WANTED_DOCUMENT_COUNT = "wantedDocumentCount";
- private static final String FIELD_SET = "fieldSet";
- private static final String CONCURRENCY = "concurrency";
- private static final String BUCKET_SPACE = "bucketSpace";
- private static final String APPLICATION_JSON = "application/json";
- private final OperationHandler operationHandler;
- private SingleDocumentParser singleDocumentParser;
- private final ObjectMapper mapper = new ObjectMapper();
- private final AtomicInteger threadsAvailableForApi;
-
@Inject
- public RestApi(ContainerThreadPool threadpool,
- AccessLog accessLog,
- Metric metric,
- DocumentmanagerConfig documentManagerConfig,
- LoadTypeConfig loadTypeConfig,
- ThreadpoolConfig threadpoolConfig,
- AllClustersBucketSpacesConfig bucketSpacesConfig,
- ClusterListConfig clusterListConfig,
- MetricReceiver metricReceiver) {
- super(threadpool.executor(), accessLog, metric);
- MessageBusParams params = new MessageBusParams(new LoadTypeSet(loadTypeConfig));
- params.setDocumentmanagerConfig(documentManagerConfig);
- this.operationHandler = new OperationHandlerImpl(new MessageBusDocumentAccess(params),
- fixedClusterEnumeratorFromConfig(clusterListConfig),
- fixedBucketSpaceResolverFromConfig(bucketSpacesConfig),
- metricReceiver);
- this.singleDocumentParser = new SingleDocumentParser(new DocumentTypeManager(documentManagerConfig));
- // 40% of the threads can be blocked before we deny requests.
- if (threadpoolConfig != null) {
- threadsAvailableForApi = new AtomicInteger(Math.max((int) (0.4 * threadpoolConfig.maxthreads()), 1));
- } else {
- log.warning("No config for threadpool, using 200 for max blocking threads for document rest API.");
- threadsAvailableForApi = new AtomicInteger(200);
- }
- }
-
- // For testing and development
- RestApi(Executor executor, AccessLog accessLog, OperationHandler operationHandler, int threadsAvailable) {
- super(executor, accessLog, null);
- this.operationHandler = operationHandler;
- this.threadsAvailableForApi = new AtomicInteger(threadsAvailable);
- }
-
- @Override
- public void destroy() {
- operationHandler.shutdown();
- }
-
- // For testing and development
- protected void setDocTypeManagerForTests(DocumentTypeManager docTypeManager) {
- this.singleDocumentParser = new SingleDocumentParser(docTypeManager);
- }
-
- private static OperationHandlerImpl.ClusterEnumerator fixedClusterEnumeratorFromConfig(ClusterListConfig config) {
- List<ClusterDef> clusters = Collections.unmodifiableList(new ClusterList(config).getStorageClusters());
- return () -> clusters;
- }
-
- private static OperationHandlerImpl.BucketSpaceResolver fixedBucketSpaceResolverFromConfig(AllClustersBucketSpacesConfig bucketSpacesConfig) {
- return (clusterId, docType) ->
- Optional.ofNullable(bucketSpacesConfig.cluster(clusterId))
- .map(cluster -> cluster.documentType(docType))
- .map(type -> type.bucketSpace());
- }
-
- private static Optional<String> requestProperty(String parameter, HttpRequest request) {
- String property = request.getProperty(parameter);
- if (property != null && ! property.isEmpty()) {
- return Optional.of(property);
- }
- return Optional.empty();
- }
-
- private static boolean parseBooleanStrict(String value) {
- if ("true".equalsIgnoreCase(value)) {
- return true;
- } else if ("false".equalsIgnoreCase(value)) {
- return false;
- }
- throw new IllegalArgumentException(String.format("Value not convertible to bool: '%s'", value));
- }
-
- private static Optional<Boolean> parseBoolean(String parameter, HttpRequest request) {
- try {
- Optional<String> property = requestProperty(parameter, request);
- return property.map(RestApi::parseBooleanStrict);
- }
- catch (IllegalArgumentException e) {
- throw new IllegalArgumentException("Invalid value for '" + parameter + "' parameter: " +
- "Must be empty, true, or false but was '" +
- request.getProperty(parameter) + "'");
- }
- }
-
- private static int parsePositiveInt(String str) throws NumberFormatException {
- int parsed = Integer.parseInt(str);
- if (parsed <= 0) {
- throw new IllegalArgumentException("Parsed number was negative or zero");
- }
- return parsed;
+ public RestApi() {
+ super(ignored -> { throw new IllegalStateException("Not supposed to handle anything"); }, null, null);
}
@Override
public HttpResponse handle(HttpRequest request) {
- try {
- if (threadsAvailableForApi.decrementAndGet() < 1) {
- return Response.createErrorResponse(429 /* Too Many Requests */,
- "Too many parallel requests, consider using http-vespa-java-client. Please try again later.",
- RestUri.apiErrorCodes.TOO_MANY_PARALLEL_REQUESTS);
- }
- return handleInternal(request);
- } finally {
- threadsAvailableForApi.incrementAndGet();
- }
- }
-
- private static void validateUriStructureForRequestMethod(RestUri uri, com.yahoo.jdisc.http.HttpRequest.Method method)
- throws RestApiException {
- if ((method != com.yahoo.jdisc.http.HttpRequest.Method.GET) && uri.isRootOnly()) {
- throw new RestApiException(Response.createErrorResponse(BAD_REQUEST,
- "Root /document/v1/ requests only supported for HTTP GET",
- RestUri.apiErrorCodes.ERROR_ID_BASIC_USAGE));
- }
- }
-
- private static boolean isVisitRequestUri(RestUri uri) {
- return (uri.isRootOnly() || uri.getDocId().isEmpty());
- }
-
- // protected for testing
- protected HttpResponse handleInternal(HttpRequest request) {
- RestUri restUri = null;
- try {
- restUri = new RestUri(request.getUri());
- validateUriStructureForRequestMethod(restUri, request.getMethod());
-
- Optional<Boolean> create;
- try {
- create = parseBoolean(CREATE_PARAMETER_NAME, request);
- }
- catch (IllegalArgumentException e) {
- return Response.createErrorResponse(400, e.getMessage(), RestUri.apiErrorCodes.INVALID_CREATE_VALUE);
- }
-
- String condition = request.getProperty(CONDITION_PARAMETER_NAME);
- Optional<String> route = Optional.ofNullable(nonEmpty(request.getProperty(ROUTE_PARAMETER_NAME), ROUTE_PARAMETER_NAME));
-
- Optional<ObjectNode> resultJson = Optional.empty();
- switch (request.getMethod()) {
- case GET: // Vespa Visit/Get
- return isVisitRequestUri(restUri) ? handleVisit(restUri, request) : handleGet(restUri, request);
- case POST: // Vespa Put
- operationHandler.put(restUri, createPutOperation(request, restUri.generateFullId(), condition), route);
- break;
- case PUT: // Vespa Update
- operationHandler.update(restUri, createUpdateOperation(request, restUri.generateFullId(), condition, create), route);
- break;
- case DELETE: // Vespa Delete
- operationHandler.delete(restUri, condition, route);
- break;
- default:
- return new Response(405, Optional.empty(), Optional.of(restUri));
- }
- return new Response(200, resultJson, Optional.of(restUri));
- }
- catch (RestApiException e) {
- return e.getResponse();
- }
- catch (IllegalArgumentException userException) {
- return Response.createErrorResponse(400, Exceptions.toMessageString(userException),
- restUri,
- RestUri.apiErrorCodes.PARSER_ERROR);
- }
- catch (RuntimeException systemException) {
- log.log(Level.WARNING, "Internal runtime exception during Document V1 request handling", systemException);
- return Response.createErrorResponse(500, Exceptions.toMessageString(systemException),
- restUri,
- RestUri.apiErrorCodes.UNSPECIFIED);
- }
- }
-
- private FeedOperation createPutOperation(HttpRequest request, String id, String condition) {
- FeedOperation put = singleDocumentParser.parsePut(request.getData(), id);
- if (condition != null && ! condition.isEmpty()) {
- return new DocumentFeedOperation(put.getDocument(), new TestAndSetCondition(condition));
- }
- return put;
+ throw new IllegalStateException("Not supposed to handle anything");
}
- private FeedOperation createUpdateOperation(HttpRequest request, String id, String condition, Optional<Boolean> create) {
- FeedOperation update = singleDocumentParser.parseUpdate(request.getData(), id);
- if (condition != null && ! condition.isEmpty()) {
- update.getDocumentUpdate().setCondition(new TestAndSetCondition(condition));
- }
- create.ifPresent(c -> update.getDocumentUpdate().setCreateIfNonExistent(c));
- return update;
- }
-
- private HttpResponse handleGet(RestUri restUri, HttpRequest request) throws RestApiException {
- final Optional<String> fieldSet = requestProperty(FIELD_SET, request);
- final Optional<String> cluster = requestProperty(CLUSTER, request);
- final Optional<String> getDocument = operationHandler.get(restUri, fieldSet, cluster);
- final ObjectNode resultNode = mapper.createObjectNode();
- if (getDocument.isPresent()) {
- final JsonNode parseNode;
- try {
- parseNode = mapper.readTree(getDocument.get());
- } catch (IOException e) {
- throw new RuntimeException("Failed while parsing my own results", e);
- }
- resultNode.putPOJO(FIELDS, parseNode.get(FIELDS));
- }
- resultNode.put(DOC_ID_NAME, restUri.generateFullId());
- resultNode.put(PATH_NAME, restUri.getRawPath());
-
- return new HttpResponse(getDocument.isPresent() ? 200 : 404) {
- @Override
- public String getContentType() { return APPLICATION_JSON; }
- @Override
- public void render(OutputStream outputStream) throws IOException {
- outputStream.write(resultNode.toString().getBytes(StandardCharsets.UTF_8.name()));
- }
- };
- }
-
- private static HttpResponse createInvalidParameterResponse(String parameter, String explanation) {
- return Response.createErrorResponse(400, String.format("Invalid '%s' value. %s", parameter, explanation), RestUri.apiErrorCodes.UNSPECIFIED);
- }
-
- static class BadRequestParameterException extends IllegalArgumentException {
- private String parameter;
-
- BadRequestParameterException(String parameter, String message) {
- super(message);
- this.parameter = parameter;
- }
-
- String getParameter() {
- return parameter;
- }
- }
-
- private static Optional<Integer> parsePositiveIntegerRequestParameter(String parameter, HttpRequest request) {
- Optional<String> property = requestProperty(parameter, request);
- if (!property.isPresent()) {
- return Optional.empty();
- }
- try {
- return property.map(RestApi::parsePositiveInt);
- } catch (IllegalArgumentException e) {
- throw new BadRequestParameterException(parameter, "Expected positive integer");
- }
- }
-
- private static OperationHandler.VisitOptions visitOptionsFromRequest(HttpRequest request) {
- final OperationHandler.VisitOptions.Builder optionsBuilder = OperationHandler.VisitOptions.builder();
-
- Optional.ofNullable(request.getProperty(CLUSTER)).ifPresent(c -> optionsBuilder.cluster(c));
- Optional.ofNullable(request.getProperty(CONTINUATION)).ifPresent(c -> optionsBuilder.continuation(c));
- Optional.ofNullable(request.getProperty(FIELD_SET)).ifPresent(fs -> optionsBuilder.fieldSet(fs));
- Optional.ofNullable(request.getProperty(BUCKET_SPACE)).ifPresent(s -> optionsBuilder.bucketSpace(s));
- parsePositiveIntegerRequestParameter(WANTED_DOCUMENT_COUNT, request).ifPresent(c -> optionsBuilder.wantedDocumentCount(c));
- parsePositiveIntegerRequestParameter(CONCURRENCY, request).ifPresent(c -> optionsBuilder.concurrency(c));
-
- return optionsBuilder.build();
- }
-
- /**
- * Escapes all single quotes in input string.
- * @param original non-escaped string that may contain single quotes
- * @return original if no quotes to escaped were found, otherwise a quote-escaped string
- */
- private static String singleQuoteEscapedString(String original) {
- if (original.indexOf('\'') == -1) {
- return original;
- }
- StringBuilder builder = new StringBuilder(original.length() + 1);
- for (int i = 0; i < original.length(); ++i) {
- char c = original.charAt(i);
- if (c != '\'') {
- builder.append(c);
- } else {
- builder.append("\\'");
- }
- }
- return builder.toString();
- }
-
-
- private String nonEmpty(String value, String name) {
- if (value != null && value.isEmpty())
- throw new IllegalArgumentException("'" + name + "' cannot be empty");
- return value;
- }
-
- private static long parseAndValidateVisitNumericId(String value) {
- try {
- return Long.parseLong(value);
- } catch (NumberFormatException e) {
- throw new BadRequestParameterException(SELECTION, "Failed to parse numeric part of selection URI");
- }
- }
-
- private static String validateAndBuildLocationSubExpression(RestUri.Group group) {
- if (group.name == 'n') {
- return String.format("id.user==%d", parseAndValidateVisitNumericId(group.value));
- } else {
- // Cannot feed documents with groups that don't pass this test, so it makes sense
- // to enforce this symmetry when trying to retrieve them as well.
- Text.validateTextString(group.value).ifPresent(codepoint -> {
- throw new BadRequestParameterException(SELECTION, String.format(
- "Failed to parse group part of selection URI; contains invalid text code point U%04X", codepoint));
- });
- return String.format("id.group=='%s'", singleQuoteEscapedString(group.value));
- }
- }
-
- private static void validateDocumentSelectionSyntax(String expression) {
- try {
- new DocumentSelector(expression);
- } catch (ParseException e) {
- throw new BadRequestParameterException(SELECTION, String.format("Failed to parse expression given in 'selection'" +
- " parameter. Must be a complete and valid sub-expression. Error: %s", e.getMessage()));
- }
- }
-
- private static String documentSelectionFromRequest(RestUri restUri, HttpRequest request) throws BadRequestParameterException {
- String documentSelection = Optional.ofNullable(request.getProperty(SELECTION)).orElse("");
- if (!documentSelection.isEmpty()) {
- // Ensure that the selection parameter sub-expression is complete and valid by itself.
- validateDocumentSelectionSyntax(documentSelection);
- }
- if (restUri.getGroup().isPresent() && ! restUri.getGroup().get().value.isEmpty()) {
- String locationSubExpression = validateAndBuildLocationSubExpression(restUri.getGroup().get());
- if (documentSelection.isEmpty()) {
- documentSelection = locationSubExpression;
- } else {
- documentSelection = String.format("%s and (%s)", locationSubExpression, documentSelection);
- }
- }
- return documentSelection;
- }
-
- private HttpResponse handleVisit(RestUri restUri, HttpRequest request) throws RestApiException {
- String documentSelection;
- OperationHandler.VisitOptions options;
- try {
- documentSelection = documentSelectionFromRequest(restUri, request);
- options = visitOptionsFromRequest(request);
- } catch (BadRequestParameterException e) {
- return createInvalidParameterResponse(e.getParameter(), e.getMessage());
- }
- OperationHandler.VisitResult visit = operationHandler.visit(restUri, documentSelection, options);
- ObjectNode resultNode = mapper.createObjectNode();
- visit.token.ifPresent(t -> resultNode.put(CONTINUATION, t));
- resultNode.putArray(DOCUMENTS).addPOJO(visit.documentsAsJsonList);
- resultNode.put(PATH_NAME, restUri.getRawPath());
-
- HttpResponse httpResponse = new HttpResponse(200) {
- @Override
- public String getContentType() { return APPLICATION_JSON; }
- @Override
- public void render(OutputStream outputStream) throws IOException {
- try {
- outputStream.write(resultNode.toString().getBytes(StandardCharsets.UTF_8));
- } catch (Exception e) {
- throw new RuntimeException(e);
- }
- }
- };
- return httpResponse;
- }
}
diff --git a/vespaclient-container-plugin/src/main/java/com/yahoo/documentapi/metrics/DocumentOperationStatus.java b/vespaclient-container-plugin/src/main/java/com/yahoo/documentapi/metrics/DocumentOperationStatus.java
index f0529f3d55a..c665eca1cac 100644
--- a/vespaclient-container-plugin/src/main/java/com/yahoo/documentapi/metrics/DocumentOperationStatus.java
+++ b/vespaclient-container-plugin/src/main/java/com/yahoo/documentapi/metrics/DocumentOperationStatus.java
@@ -1,7 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.documentapi.metrics;
-import com.yahoo.document.restapi.OperationHandlerImpl;
+import com.yahoo.documentapi.messagebus.protocol.DocumentProtocol;
import java.util.Set;
@@ -29,7 +29,10 @@ public enum DocumentOperationStatus {
}
public static DocumentOperationStatus fromMessageBusErrorCodes(Set<Integer> errorCodes) {
- return fromHttpStatusCode(OperationHandlerImpl.getHTTPStatusCode(errorCodes));
+ if (errorCodes.size() == 1 && errorCodes.contains(DocumentProtocol.ERROR_NO_SPACE))
+ return SERVER_ERROR;
+
+ return REQUEST_ERROR;
}
}
diff --git a/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/DocumentApiApplicationTest.java b/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/DocumentApiApplicationTest.java
deleted file mode 100644
index fd45a0d5dd7..00000000000
--- a/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/DocumentApiApplicationTest.java
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.document.restapi;
-
-import com.yahoo.application.Application;
-import com.yahoo.application.Networking;
-import org.junit.Test;
-
-/**
- * @author bratseth
- */
-public class DocumentApiApplicationTest {
-
- /** Test that it is possible to instantiate an Application with a document-api */
- @Test
- public void application_with_document_api() {
- String services =
- "<container version='1.0'>" +
- " <http><server port=\"0\" id=\"foobar\"/></http>" +
- " <document-api/>" +
- "</container>";
- try (Application application = Application.fromServicesXml(services, Networking.enable)) {
- }
- }
-
-}
diff --git a/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/OperationHandlerImplTest.java b/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/OperationHandlerImplTest.java
deleted file mode 100644
index efb25f0e2b3..00000000000
--- a/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/OperationHandlerImplTest.java
+++ /dev/null
@@ -1,445 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.document.restapi;
-
-import com.yahoo.document.fieldset.AllFields;
-import com.yahoo.documentapi.DocumentAccess;
-import com.yahoo.documentapi.ProgressToken;
-import com.yahoo.documentapi.SyncParameters;
-import com.yahoo.documentapi.VisitorControlHandler;
-import com.yahoo.documentapi.VisitorParameters;
-import com.yahoo.documentapi.VisitorSession;
-import com.yahoo.documentapi.messagebus.MessageBusSyncSession;
-import com.yahoo.messagebus.StaticThrottlePolicy;
-import com.yahoo.metrics.simple.MetricReceiver;
-import com.yahoo.vdslib.VisitorStatistics;
-import com.yahoo.vespaclient.ClusterDef;
-import org.junit.Test;
-
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.net.URI;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Optional;
-import java.util.concurrent.atomic.AtomicReference;
-
-import static org.hamcrest.CoreMatchers.containsString;
-import static org.hamcrest.CoreMatchers.instanceOf;
-import static org.hamcrest.core.Is.is;
-import static org.hamcrest.core.IsEqual.equalTo;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.fail;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.eq;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
-
-
-public class OperationHandlerImplTest {
-
- @Test(expected = IllegalArgumentException.class)
- public void missingClusterDef() throws RestApiException {
- List<ClusterDef> clusterDef = new ArrayList<>();
- OperationHandlerImpl.resolveClusterDef(Optional.empty(), clusterDef);
- }
-
- @Test(expected = IllegalArgumentException.class)
- public void missingClusterDefSpecifiedCluster() throws RestApiException {
- List<ClusterDef> clusterDef = new ArrayList<>();
- OperationHandlerImpl.resolveClusterDef(Optional.of("cluster"), clusterDef);
- }
-
- @Test(expected = RestApiException.class)
- public void oneClusterPresentNotMatching() throws RestApiException {
- List<ClusterDef> clusterDef = new ArrayList<>();
- clusterDef.add(new ClusterDef("foo", "configId"));
- OperationHandlerImpl.resolveClusterDef(Optional.of("cluster"), clusterDef);
- }
-
- private static String toRoute(ClusterDef clusterDef) {
- return OperationHandlerImpl.clusterDefToRoute(clusterDef);
- }
-
- @Test()
- public void oneClusterMatching() throws RestApiException {
- List<ClusterDef> clusterDef = new ArrayList<>();
- clusterDef.add(new ClusterDef("foo", "configId"));
- assertThat(toRoute(OperationHandlerImpl.resolveClusterDef(Optional.of("foo"), clusterDef)),
- is("[Storage:cluster=foo;clusterconfigid=configId]"));
- }
-
- @Test()
- public void oneClusterMatchingManyAvailable() throws RestApiException {
- List<ClusterDef> clusterDef = new ArrayList<>();
- clusterDef.add(new ClusterDef("foo2", "configId2"));
- clusterDef.add(new ClusterDef("foo", "configId"));
- clusterDef.add(new ClusterDef("foo3", "configId2"));
- assertThat(toRoute(OperationHandlerImpl.resolveClusterDef(Optional.of("foo"), clusterDef)),
- is("[Storage:cluster=foo;clusterconfigid=configId]"));
- }
-
- @Test()
- public void unknown_target_cluster_throws_exception() throws RestApiException, IOException {
- List<ClusterDef> clusterDef = new ArrayList<>();
- clusterDef.add(new ClusterDef("foo2", "configId2"));
- clusterDef.add(new ClusterDef("foo", "configId"));
- clusterDef.add(new ClusterDef("foo3", "configId2"));
- try {
- OperationHandlerImpl.resolveClusterDef(Optional.of("wrong"), clusterDef);
- } catch(RestApiException e) {
- assertThat(e.getResponse().getStatus(), is(400));
- String errorMsg = renderRestApiExceptionAsString(e);
- assertThat(errorMsg, is("{\"errors\":[{\"description\":" +
- "\"MISSING_CLUSTER Your vespa cluster contains the content clusters 'foo2', 'foo'," +
- " 'foo3', not 'wrong'. Please select a valid vespa cluster.\",\"id\":-9}]}"));
- return;
- }
- fail("Expected exception");
- }
-
- private String renderRestApiExceptionAsString(RestApiException e) throws IOException {
- ByteArrayOutputStream stream = new ByteArrayOutputStream();
- e.getResponse().render(stream);
- return new String( stream.toByteArray());
- }
-
- private static class OperationHandlerImplFixture {
- DocumentAccess documentAccess = mock(DocumentAccess.class);
- AtomicReference<VisitorParameters> assignedParameters = new AtomicReference<>();
- VisitorControlHandler.CompletionCode completionCode = VisitorControlHandler.CompletionCode.SUCCESS;
- int bucketsVisited = 0;
- Map<String, String> bucketSpaces = new HashMap<>();
- MessageBusSyncSession mockSyncSession = mock(MessageBusSyncSession.class); // MBus session needed to avoid setRoute throwing.
-
- OperationHandlerImplFixture() {
- bucketSpaces.put("foo", "global");
- bucketSpaces.put("document-type", "default");
- }
-
- OperationHandlerImpl createHandler() throws Exception {
- VisitorSession visitorSession = mock(VisitorSession.class);
- // Pre-bake an already completed session
- when(documentAccess.createVisitorSession(any(VisitorParameters.class))).thenAnswer(p -> {
- VisitorParameters params = (VisitorParameters)p.getArguments()[0];
- assignedParameters.set(params);
-
- VisitorStatistics statistics = new VisitorStatistics();
- statistics.setBucketsVisited(bucketsVisited);
- params.getControlHandler().onVisitorStatistics(statistics);
-
- ProgressToken progress = new ProgressToken();
- params.getControlHandler().onProgress(progress);
-
- params.getControlHandler().onDone(completionCode, "bork bork");
- return visitorSession;
- });
- when(documentAccess.createSyncSession(any(SyncParameters.class))).thenReturn(mockSyncSession);
- OperationHandlerImpl.ClusterEnumerator clusterEnumerator = () -> Arrays.asList(new ClusterDef("foo", "configId"));
- OperationHandlerImpl.BucketSpaceResolver bucketSpaceResolver = (clusterId, docType) -> Optional.ofNullable(bucketSpaces.get(docType));
- return new OperationHandlerImpl(documentAccess, clusterEnumerator, bucketSpaceResolver, MetricReceiver.nullImplementation);
- }
- }
-
- private static OperationHandler.VisitOptions.Builder optionsBuilder() {
- return OperationHandler.VisitOptions.builder();
- }
-
- private static RestUri dummyVisitUri() throws Exception {
- return new RestUri(new URI("http://localhost/document/v1/namespace/document-type/docid/"));
- }
-
- private static RestUri apiRootVisitUri() throws Exception {
- return new RestUri(new URI("http://localhost/document/v1/"));
- }
-
- private static RestUri dummyGetUri() throws Exception {
- return new RestUri(new URI("http://localhost/document/v1/namespace/document-type/docid/foo"));
- }
-
- private static OperationHandler.VisitOptions visitOptionsWithWantedDocumentCount(int wantedDocumentCount) {
- return optionsBuilder().wantedDocumentCount(wantedDocumentCount).build();
- }
-
- private static OperationHandler.VisitOptions emptyVisitOptions() {
- return optionsBuilder().build();
- }
-
- @Test
- public void timeout_without_buckets_visited_throws_timeout_error() throws Exception {
- OperationHandlerImplFixture fixture = new OperationHandlerImplFixture();
- fixture.completionCode = VisitorControlHandler.CompletionCode.TIMEOUT;
- fixture.bucketsVisited = 0;
- // RestApiException hides its guts internally, so cannot trivially use @Rule directly to check for error category
- try {
- OperationHandlerImpl handler = fixture.createHandler();
- handler.visit(dummyVisitUri(), "", emptyVisitOptions());
- fail("Exception expected");
- } catch (RestApiException e) {
- assertThat(e.getResponse().getStatus(), is(500));
- assertThat(renderRestApiExceptionAsString(e), containsString("Timed out"));
- }
- }
-
- @Test
- public void timeout_with_buckets_visited_does_not_throw_timeout_error() throws Exception {
- OperationHandlerImplFixture fixture = new OperationHandlerImplFixture();
- fixture.completionCode = VisitorControlHandler.CompletionCode.TIMEOUT;
- fixture.bucketsVisited = 1;
-
- OperationHandlerImpl handler = fixture.createHandler();
- handler.visit(dummyVisitUri(), "", emptyVisitOptions());
- }
-
- @Test
- public void handler_sets_default_visitor_session_timeout_parameter() throws Exception {
- OperationHandlerImplFixture fixture = new OperationHandlerImplFixture();
- OperationHandlerImpl handler = fixture.createHandler();
-
- handler.visit(dummyVisitUri(), "", emptyVisitOptions());
-
- assertThat(fixture.assignedParameters.get().getSessionTimeoutMs(), is((long)OperationHandlerImpl.VISIT_TIMEOUT_MS));
- }
-
- private static VisitorParameters generatedVisitParametersFrom(RestUri restUri, String documentSelection,
- OperationHandler.VisitOptions options) throws Exception {
- OperationHandlerImplFixture fixture = new OperationHandlerImplFixture();
- OperationHandlerImpl handler = fixture.createHandler();
-
- handler.visit(restUri, documentSelection, options);
- return fixture.assignedParameters.get();
- }
-
- private static VisitorParameters generatedParametersFromVisitOptions(OperationHandler.VisitOptions options) throws Exception {
- return generatedVisitParametersFrom(dummyVisitUri(), "", options);
- }
-
- @Test
- public void document_type_is_mapped_to_correct_bucket_space() throws Exception {
- OperationHandlerImplFixture fixture = new OperationHandlerImplFixture();
- fixture.bucketSpaces.put("document-type", "langbein");
- OperationHandlerImpl handler = fixture.createHandler();
- handler.visit(dummyVisitUri(), "", emptyVisitOptions());
-
- VisitorParameters parameters = fixture.assignedParameters.get();
- assertEquals("langbein", parameters.getBucketSpace());
- }
-
- @Test
- public void unknown_bucket_space_mapping_throws_exception() throws Exception {
- OperationHandlerImplFixture fixture = new OperationHandlerImplFixture();
- fixture.bucketSpaces.remove("document-type");
- try {
- OperationHandlerImpl handler = fixture.createHandler();
- handler.visit(dummyVisitUri(), "", emptyVisitOptions());
- fail("Exception expected");
- } catch (RestApiException e) {
- assertThat(e.getResponse().getStatus(), is(400));
- String errorMsg = renderRestApiExceptionAsString(e);
- // FIXME isn't this really more of a case of unknown document type..?
- assertThat(errorMsg, is("{\"errors\":[{\"description\":" +
- "\"UNKNOWN_BUCKET_SPACE Document type 'document-type' in cluster 'foo' is not mapped to a known bucket space\",\"id\":-16}]}"));
- }
- }
-
- @Test
- public void provided_wanted_document_count_is_propagated_to_visitor_parameters() throws Exception {
- VisitorParameters params = generatedParametersFromVisitOptions(visitOptionsWithWantedDocumentCount(123));
- assertThat(params.getMaxTotalHits(), is((long)123));
- }
-
- @Test
- public void wanted_document_count_is_1_unless_specified() throws Exception {
- VisitorParameters params = generatedParametersFromVisitOptions(emptyVisitOptions());
- assertThat(params.getMaxTotalHits(), is((long)1));
- }
-
- @Test
- public void too_low_wanted_document_count_is_bounded_to_1() throws Exception {
- VisitorParameters params = generatedParametersFromVisitOptions(visitOptionsWithWantedDocumentCount(-1));
- assertThat(params.getMaxTotalHits(), is((long)1));
-
- params = generatedParametersFromVisitOptions(visitOptionsWithWantedDocumentCount(Integer.MIN_VALUE));
- assertThat(params.getMaxTotalHits(), is((long)1));
-
- params = generatedParametersFromVisitOptions(visitOptionsWithWantedDocumentCount(0));
- assertThat(params.getMaxTotalHits(), is((long)1));
- }
-
- @Test
- public void too_high_wanted_document_count_is_bounded_to_upper_bound() throws Exception {
- VisitorParameters params = generatedParametersFromVisitOptions(visitOptionsWithWantedDocumentCount(OperationHandlerImpl.WANTED_DOCUMENT_COUNT_UPPER_BOUND + 1));
- assertThat(params.getMaxTotalHits(), is((long)OperationHandlerImpl.WANTED_DOCUMENT_COUNT_UPPER_BOUND));
-
- params = generatedParametersFromVisitOptions(visitOptionsWithWantedDocumentCount(Integer.MAX_VALUE));
- assertThat(params.getMaxTotalHits(), is((long)OperationHandlerImpl.WANTED_DOCUMENT_COUNT_UPPER_BOUND));
- }
-
- @Test
- public void visit_field_set_covers_all_fields_by_default() throws Exception {
- VisitorParameters params = generatedParametersFromVisitOptions(emptyVisitOptions());
- assertThat(params.fieldSet(), equalTo("document-type:[document]"));
- }
-
- @Test
- public void provided_visit_fieldset_is_propagated_to_visitor_parameters() throws Exception {
- VisitorParameters params = generatedParametersFromVisitOptions(optionsBuilder().fieldSet("document-type:bjarne").build());
- assertThat(params.fieldSet(), equalTo("document-type:bjarne"));
- }
-
- private void assertConcurrencyPropagated(VisitorParameters params, int expectedConcurrency) {
- assertThat(params.getThrottlePolicy(), instanceOf(StaticThrottlePolicy.class));
- assertThat(((StaticThrottlePolicy)params.getThrottlePolicy()).getMaxPendingCount(), is(expectedConcurrency));
- }
-
- @Test
- public void visit_concurrency_is_1_by_default() throws Exception {
- VisitorParameters params = generatedParametersFromVisitOptions(emptyVisitOptions());
- assertConcurrencyPropagated(params, 1);
- }
-
- @Test
- public void visit_concurrency_is_propagated_to_visitor_parameters() throws Exception {
- VisitorParameters params = generatedParametersFromVisitOptions(optionsBuilder().concurrency(3).build());
- assertConcurrencyPropagated(params, 3);
- }
-
- @Test
- public void too_low_visit_concurrency_is_capped_to_1() throws Exception {
- VisitorParameters params = generatedParametersFromVisitOptions(optionsBuilder().concurrency(0).build());
- assertConcurrencyPropagated(params, 1);
- }
-
- @Test
- public void too_high_visit_concurrency_is_capped_to_max() throws Exception {
- VisitorParameters params = generatedParametersFromVisitOptions(
- optionsBuilder().concurrency(OperationHandlerImpl.CONCURRENCY_UPPER_BOUND + 1).build());
- assertConcurrencyPropagated(params, OperationHandlerImpl.CONCURRENCY_UPPER_BOUND);
- }
-
- @Test
- public void get_field_covers_all_fields_by_default() throws Exception {
- OperationHandlerImplFixture fixture = new OperationHandlerImplFixture();
- OperationHandlerImpl handler = fixture.createHandler();
- handler.get(dummyGetUri(), Optional.empty());
-
- verify(fixture.mockSyncSession).get(any(), eq("document-type:[document]"), any());
- }
-
- @Test
- public void provided_get_fieldset_is_propagated_to_sync_session() throws Exception {
- OperationHandlerImplFixture fixture = new OperationHandlerImplFixture();
- OperationHandlerImpl handler = fixture.createHandler();
- handler.get(dummyGetUri(), Optional.of("donald,duck"));
-
- verify(fixture.mockSyncSession).get(any(), eq("donald,duck"), any());
- }
-
- @Test
- public void get_route_has_default_value_if_no_cluster_is_provided() throws Exception {
- OperationHandlerImplFixture fixture = new OperationHandlerImplFixture();
- OperationHandlerImpl handler = fixture.createHandler();
- handler.get(dummyGetUri(), Optional.empty(), Optional.empty());
-
- // TODO shouldn't this be default-get?
- verify(fixture.mockSyncSession).setRoute(eq("default"));
- }
-
- @Test
- public void provided_get_cluster_is_propagated_as_route_to_sync_session() throws Exception {
- OperationHandlerImplFixture fixture = new OperationHandlerImplFixture();
- OperationHandlerImpl handler = fixture.createHandler();
- handler.get(dummyGetUri(), Optional.empty(), Optional.of("foo"));
-
- verify(fixture.mockSyncSession).setRoute(eq("[Storage:cluster=foo;clusterconfigid=configId]"));
- }
-
- @Test
- public void api_root_visit_uri_requires_cluster_set() throws Exception {
- OperationHandlerImplFixture fixture = new OperationHandlerImplFixture();
- OperationHandlerImpl handler = fixture.createHandler();
- try {
- handler.visit(apiRootVisitUri(), "", emptyVisitOptions());
- fail("Exception expected");
- } catch (RestApiException e) {
- assertThat(e.getResponse().getStatus(), is(400));
- assertThat(renderRestApiExceptionAsString(e), containsString(
- "MISSING_CLUSTER Must set 'cluster' parameter to a valid content cluster id " +
- "when visiting at a root /document/v1/ level"));
- }
- }
-
- @Test
- public void api_root_visiting_propagates_request_route() throws Exception {
- VisitorParameters parameters = generatedVisitParametersFrom(apiRootVisitUri(), "", optionsBuilder().cluster("foo").build());
- assertEquals("[Storage:cluster=foo;clusterconfigid=configId]", parameters.getRoute().toString());
- }
-
- @Test
- public void api_root_visiting_targets_default_bucket_space_by_default() throws Exception {
- VisitorParameters parameters = generatedVisitParametersFrom(apiRootVisitUri(), "", optionsBuilder().cluster("foo").build());
- assertEquals("default", parameters.getBucketSpace());
- }
-
- @Test
- public void api_root_visiting_can_explicitly_specify_bucket_space() throws Exception {
- VisitorParameters parameters = generatedVisitParametersFrom(apiRootVisitUri(), "",
- optionsBuilder().cluster("foo").bucketSpace("global").build());
- assertEquals("global", parameters.getBucketSpace());
- }
-
- @Test
- public void api_root_visiting_throws_exception_on_unknown_bucket_space_name() throws Exception {
- try {
- generatedVisitParametersFrom(apiRootVisitUri(), "", optionsBuilder().cluster("foo").bucketSpace("langbein").build());
- } catch (RestApiException e) {
- assertThat(e.getResponse().getStatus(), is(400));
- assertThat(renderRestApiExceptionAsString(e), containsString(
- "UNKNOWN_BUCKET_SPACE Bucket space 'langbein' is not a known bucket space " +
- "(expected 'default' or 'global')"));
- }
- }
-
- @Test
- public void api_root_visiting_has_empty_document_selection_by_default() throws Exception {
- VisitorParameters parameters = generatedVisitParametersFrom(apiRootVisitUri(), "", optionsBuilder().cluster("foo").build());
- assertEquals("", parameters.getDocumentSelection());
- }
-
- @Test
- public void api_root_visiting_propagates_provided_document_selection() throws Exception {
- VisitorParameters parameters = generatedVisitParametersFrom(apiRootVisitUri(), "baz.blarg", optionsBuilder().cluster("foo").build());
- // Note: syntax correctness of selection is checked and enforced by RestApi
- assertEquals("baz.blarg", parameters.getDocumentSelection());
- }
-
- @Test
- public void api_root_visiting_uses_all_fieldset_by_default() throws Exception {
- VisitorParameters parameters = generatedVisitParametersFrom(apiRootVisitUri(), "", optionsBuilder().cluster("foo").build());
- assertEquals(AllFields.NAME, parameters.getFieldSet());
- }
-
- @Test
- public void api_root_visiting_propagates_provided_fieldset() throws Exception {
- VisitorParameters parameters = generatedVisitParametersFrom(apiRootVisitUri(), "",
- optionsBuilder().cluster("foo").fieldSet("zoidberg:[document]").build());
- assertEquals("zoidberg:[document]", parameters.getFieldSet());
- }
-
- @Test
- public void namespace_and_doctype_augmented_selection_has_parenthesized_selection_sub_expression() throws Exception {
- VisitorParameters parameters = generatedVisitParametersFrom(dummyVisitUri(), "1 != 2", optionsBuilder().cluster("foo").build());
- assertEquals("((1 != 2) and document-type and (id.namespace=='namespace'))", parameters.getDocumentSelection());
- }
-
- @Test
- public void namespace_and_doctype_visit_without_selection_does_not_contain_selection_sub_expression() throws Exception {
- VisitorParameters parameters = generatedVisitParametersFrom(dummyVisitUri(), "", optionsBuilder().cluster("foo").build());
- assertEquals("document-type and (id.namespace=='namespace')", parameters.getDocumentSelection());
- }
-
-}
diff --git a/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/RestUriTest.java b/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/RestUriTest.java
deleted file mode 100644
index bdeee12a32a..00000000000
--- a/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/RestUriTest.java
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.document.restapi;
-
-import org.apache.http.client.utils.URIBuilder;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.net.URLEncoder;
-import java.nio.charset.StandardCharsets;
-import java.util.Optional;
-
-import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.assertThat;
-
-public class RestUriTest {
-
- URI createUri(String path, String query) throws URISyntaxException {
- return new URIBuilder()
- .addParameter("foo", "bar")
- .setHost("host")
- .setScheme("http")
- .setPort(666)
- .setPath(path)
- .setCustomQuery(query)
- .setFragment("fargment").build();
- }
-
- @Rule
- public ExpectedException thrown= ExpectedException.none();
-
- @Test
- public void testBasic() throws Exception {
- RestUri restUri = new RestUri(createUri("/document/v1/namespace/doctype/docid/myid", "query"));
- assertThat(restUri.getDocId(), is("myid"));
- assertThat(restUri.getDocumentType(), is("doctype"));
- assertThat(restUri.getNamespace(), is("namespace"));
- assertThat(restUri.getGroup(), is(Optional.<RestUri.Group>empty()));
- assertThat(restUri.generateFullId(), is("id:namespace:doctype::myid"));
- }
-
- @Test
- public void encodingSlashes() throws Exception {
- // Try with slashes encoded.
- final String id = " !\"øæåp/:;&,.:;'1Q";
- String encodedId = URLEncoder.encode(id, StandardCharsets.UTF_8.name());
- RestUri restUri = new RestUri(URI.create("/document/v1/namespace/doctype/docid/" + encodedId));
- assertThat(restUri.getDocId(), is(id));
- assertThat(restUri.getDocumentType(), is("doctype"));
- assertThat(restUri.getNamespace(), is("namespace"));
- assertThat(restUri.getGroup(), is(Optional.<RestUri.Group>empty()));
- assertThat(restUri.generateFullId(), is("id:namespace:doctype::" + id));
- }
-
- @Test
- public void encodingSlashes2() throws Exception {
- // This will decode the slashes.
- final String id = " !\"øæåp/:;&,.:;'1Q ";
- RestUri restUri = new RestUri(createUri("/document/v1/namespace/doctype/docid/" + id, "query"));
- assertThat(restUri.getDocId(), is(id));
- assertThat(restUri.getDocumentType(), is("doctype"));
- assertThat(restUri.getNamespace(), is("namespace"));
- assertThat(restUri.getGroup(), is(Optional.<RestUri.Group>empty()));
- assertThat(restUri.generateFullId(), is("id:namespace:doctype::" + id));
- }
-
-
- @Test
- public void testVisit() throws Exception {
- RestUri restUri = new RestUri(createUri("/document/v1/namespace/doctype/docid/", "query"));
- assertThat(restUri.getDocId(), is(""));
- assertThat(restUri.getDocumentType(), is("doctype"));
- assertThat(restUri.getNamespace(), is("namespace"));
- assertThat(restUri.getGroup(), is(Optional.<RestUri.Group>empty()));
- assertThat(restUri.generateFullId(), is("id:namespace:doctype::"));
- }
-
- @Test
- public void testOneSlashTooMuchWhichIsFine() throws Exception {
- RestUri restUri = new RestUri(createUri("/document/v1/namespace/doctype/docid/myid:342:23/wrong", ""));
- assertThat(restUri.getDocId(), is("myid:342:23/wrong"));
- }
-
- @Test
- public void testGroupG() throws Exception {
- RestUri restUri = new RestUri(createUri("/document/v1/namespace/doctype/group/group/myid", ""));
- assertThat(restUri.getDocId(), is("myid"));
- assertThat(restUri.getDocumentType(), is("doctype"));
- assertThat(restUri.getGroup().get().name, is('g'));
- assertThat(restUri.getGroup().get().value, is("group"));
- assertThat(restUri.generateFullId(), is("id:namespace:doctype:g=group:myid"));
- }
-
- @Test
- public void testGroupUrlDecode() throws Exception {
- RestUri restUri = new RestUri(createUri("/document/v1/namespace/doctype/group/group#123/myid", ""));
- assertThat(restUri.getDocId(), is("myid"));
- assertThat(restUri.getDocumentType(), is("doctype"));
- assertThat(restUri.getGroup().get().name, is('g'));
- assertThat(restUri.getGroup().get().value, is("group#123"));
- assertThat(restUri.generateFullId(), is("id:namespace:doctype:g=group#123:myid"));
- }
-
- @Test
- public void testGroupN() throws Exception {
- RestUri restUri = new RestUri(createUri("/document/v1/namespace/doctype/number/group/myid", ""));
- assertThat(restUri.getGroup().get().name, is('n'));
- assertThat(restUri.getGroup().get().value, is("group"));
- }
-
- @Test
- public void testGroupUnknown() throws Exception {
- thrown.expect(RestApiException.class);
- new RestUri(createUri("/document/v1/namespace/doctype/Q/myid", ""));
- }
-
- @Test
- public void testDocIdAsIs() throws Exception {
- RestUri restUri = new RestUri(new URI("/document/v1/test/newsarticle/docid/http%3a%2f%2fvn.news.yahoo.com%2fgi-th-ng-t-n-ng-khoa-h-205000458.html").normalize());
- assertThat(restUri.getNamespace(), is("test"));
- assertThat(restUri.getDocumentType(), is("newsarticle"));
- assertThat(restUri.getDocId(), is("http://vn.news.yahoo.com/gi-th-ng-t-n-ng-khoa-h-205000458.html"));
- assertThat(restUri.generateFullId(), is("id:test:newsarticle::http://vn.news.yahoo.com/gi-th-ng-t-n-ng-khoa-h-205000458.html"));
- }
-
-}
diff --git a/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/feed-document1.json b/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/feed-document1.json
deleted file mode 100644
index e69de29bb2d..00000000000
--- a/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/feed-document1.json
+++ /dev/null
diff --git a/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/DocumentV1ApiTest.java b/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/DocumentV1ApiTest.java
index c85db0582eb..2775250765f 100644
--- a/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/DocumentV1ApiTest.java
+++ b/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/DocumentV1ApiTest.java
@@ -289,7 +289,7 @@ public class DocumentV1ApiTest {
// GET with full document ID is a document get operation.
access.session.expect((id, parameters) -> {
assertEquals(doc1.getId(), id);
- assertEquals(parameters(), parameters);
+ assertEquals(parameters().withFieldSet("music:[document]"), parameters);
parameters.responseHandler().get().handleResponse(new DocumentResponse(0, doc1));
return new Result(Result.ResultType.SUCCESS, null);
});
@@ -306,7 +306,7 @@ public class DocumentV1ApiTest {
// GET with not encoded / in user specified part of document id is perfectly OK ... щ(ಥДಥщ)
access.session.expect((id, parameters) -> {
assertEquals(new DocumentId("id:space:music::one/two/three"), id);
- assertEquals(parameters(), parameters);
+ assertEquals(parameters().withFieldSet("music:[document]"), parameters);
parameters.responseHandler().get().handleResponse(new DocumentResponse(0));
return new Result(Result.ResultType.SUCCESS, null);
});
diff --git a/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/MockedOperationHandler.java b/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/MockedOperationHandler.java
deleted file mode 100644
index eb6bb609970..00000000000
--- a/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/MockedOperationHandler.java
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.document.restapi.resource;
-
-import com.yahoo.document.restapi.OperationHandler;
-import com.yahoo.document.restapi.Response;
-import com.yahoo.document.restapi.RestApiException;
-import com.yahoo.document.restapi.RestUri;
-import com.yahoo.vespaxmlparser.FeedOperation;
-
-import java.util.Optional;
-
-/**
- * Mock that collects info about operation and returns them on second delete.
- */
-public class MockedOperationHandler implements OperationHandler {
-
- StringBuilder log = new StringBuilder();
- int deleteCount = 0;
-
- @Override
- public VisitResult visit(RestUri restUri, String documentSelection, VisitOptions options) throws RestApiException {
- return new VisitResult(Optional.of("token"), "List of json docs, cont token "
- + options.continuation.orElse("not set") + ", doc selection: '"
- + documentSelection + "'"
- + options.wantedDocumentCount.map(n -> String.format(", min docs returned: %d", n)).orElse("")
- + options.fieldSet.map(s -> String.format(", field set: '%s'", s)).orElse("")
- + options.concurrency.map(n -> String.format(", concurrency: %d", n)).orElse("")
- + options.bucketSpace.map(s -> String.format(", bucket space: '%s'", s)).orElse("")
- + options.cluster.map(s -> String.format(", cluster: '%s'", s)).orElse(""));
- }
-
- @Override
- @SuppressWarnings("deprecation")
- public void put(RestUri restUri, FeedOperation data, Optional<String> route) throws RestApiException {
- log.append("PUT: " + data.getDocument().getId());
- log.append(data.getDocument().getHeader().toString());
- }
-
- @Override
- public void update(RestUri restUri, FeedOperation data, Optional<String> route) throws RestApiException {
- log.append("UPDATE: " + data.getDocumentUpdate().getId());
- log.append(data.getDocumentUpdate().fieldUpdates().toString());
- if (data.getDocumentUpdate().getCreateIfNonExistent()) {
- log.append("[CREATE IF NON EXISTENT IS TRUE]");
- }
- }
-
- @Override
- public void delete(RestUri restUri, String condition, Optional<String> route) throws RestApiException {
- deleteCount++;
- if (deleteCount == 2) {
- String theLog = log.toString();
- log = new StringBuilder();
- deleteCount = 0;
- throw new RestApiException(Response.createErrorResponse(666, theLog, RestUri.apiErrorCodes.ERROR_ID_BASIC_USAGE));
- }
- log.append("DELETE: " + restUri.generateFullId());
- }
-
- @Override
- public Optional<String> get(RestUri restUri, Optional<String> fieldSet, Optional<String> cluster) throws RestApiException {
- log.append("GET: " + restUri.generateFullId());
- // This is _not_ an elegant way to return data back to the test.
- // An alternative is removing this entire class in favor of explicit mock expectations.
- if (!fieldSet.isPresent() && !cluster.isPresent()) {
- return Optional.empty();
- }
- return Optional.of(String.format("{\"fields\": {\"fieldset\": \"%s\",\"cluster\":\"%s\"}}",
- fieldSet.orElse(""), cluster.orElse("")));
- }
-
- @Override
- public Optional<String> get(RestUri restUri, Optional<String> fieldSet) throws RestApiException {
- return get(restUri, fieldSet, Optional.empty());
- }
-
- @Override
- public Optional<String> get(RestUri restUri) throws RestApiException {
- return get(restUri, Optional.empty());
- }
-
-}
diff --git a/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/RestApiMaxThreadTest.java b/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/RestApiMaxThreadTest.java
deleted file mode 100644
index 39d5617dd4f..00000000000
--- a/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/RestApiMaxThreadTest.java
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.document.restapi.resource;
-
-import com.yahoo.container.jdisc.HttpRequest;
-import com.yahoo.container.jdisc.HttpResponse;
-import com.yahoo.document.restapi.OperationHandler;
-import org.junit.Test;
-
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.Executor;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import static org.hamcrest.CoreMatchers.is;
-import static org.junit.Assert.assertThat;
-import static org.mockito.Mockito.mock;
-
-public class RestApiMaxThreadTest {
- final CountDownLatch latch = new CountDownLatch(1);
- final AtomicInteger requestsInFlight = new AtomicInteger(0);
- private class RestApiMocked extends RestApi {
-
- public RestApiMocked() {
- super(mock(Executor.class), null, (OperationHandler)null, 20);
- }
-
- @Override
- protected HttpResponse handleInternal(HttpRequest request) {
- requestsInFlight.incrementAndGet();
- try {
- latch.await();
- } catch (InterruptedException e) {
- throw new RuntimeException(e);
- }
- return null;
- }
- }
-
- @Test
- public void testCallsAreThrottled() throws InterruptedException {
- RestApiMocked restApiMocked = new RestApiMocked();
- // Fire lots of requests.
- for (int x = 0; x < 30; x++) {
- new Thread(() -> restApiMocked.handle(null)).start();
- }
- // Wait for all threads to be used
- while (requestsInFlight.get() != 19) {
- Thread.sleep(1);
- }
- // A new request should be blocked.
- final HttpResponse response = restApiMocked.handle(null);
- assertThat(response.getStatus(), is(429));
- latch.countDown();
- }
-}
diff --git a/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/RestApiTest.java b/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/RestApiTest.java
deleted file mode 100644
index 0661363477f..00000000000
--- a/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/RestApiTest.java
+++ /dev/null
@@ -1,537 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.document.restapi.resource;
-
-import com.yahoo.application.Application;
-import com.yahoo.application.Networking;
-import com.yahoo.application.container.handler.Request;
-import com.yahoo.container.Container;
-import com.yahoo.jdisc.http.server.jetty.JettyHttpServer;
-import org.apache.http.HttpEntity;
-import org.apache.http.HttpResponse;
-import org.apache.http.client.HttpClient;
-import org.apache.http.client.methods.HttpDelete;
-import org.apache.http.client.methods.HttpGet;
-import org.apache.http.client.methods.HttpPost;
-import org.apache.http.client.methods.HttpPut;
-import org.apache.http.client.methods.HttpRequestBase;
-import org.apache.http.entity.ContentType;
-import org.apache.http.entity.StringEntity;
-import org.apache.http.impl.client.HttpClientBuilder;
-import org.apache.http.util.EntityUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Ignore;
-import org.junit.Test;
-
-import java.io.IOException;
-import java.io.UncheckedIOException;
-import java.io.UnsupportedEncodingException;
-import java.net.URLEncoder;
-import java.nio.charset.StandardCharsets;
-import java.nio.file.Paths;
-import java.util.function.Function;
-
-import static org.hamcrest.core.Is.is;
-import static org.hamcrest.core.IsNot.not;
-import static org.hamcrest.core.StringContains.containsString;
-import static org.hamcrest.core.StringStartsWith.startsWith;
-import static org.junit.Assert.assertThat;
-
-public class RestApiTest {
-
- Application application;
-
- @Before
- public void setup() throws Exception {
- application = Application.fromApplicationPackage(Paths.get("src/test/rest-api-application"), Networking.enable);
- }
-
- @After
- public void tearDown() throws Exception {
- application.close();
- }
-
- private static class Response {
- final int code;
- final String body;
-
- Response(int code, String body) {
- this.code = code;
- this.body = body;
- }
- }
-
- String post_test_uri = "/document/v1/namespace/testdocument/docid/c";
- String post_test_doc = "{\n" +
- "\"foo\" : \"bar\"," +
- "\"fields\": {\n" +
- "\"title\": \"This is the title\",\n" +
- "\"body\": \"This is the body\"" +
- "}" +
- "}";
- String post_test_response = "{\"id\":\"id:namespace:testdocument::c\"," +
- "\"pathId\":\"/document/v1/namespace/testdocument/docid/c\"}";
-
- // Run this test to manually do request against the REST-API with backend mock.
- @Ignore
- @Test
- public void blockingTest() throws Exception {
- System.out.println("Running on port " + getFirstListenPort());
- Thread.sleep(Integer.MAX_VALUE);
- }
-
- @Test
- public void testbasicPost() throws Exception {
- Request request = new Request("http://localhost:" + getFirstListenPort() + post_test_uri);
- HttpPost httpPost = new HttpPost(request.getUri());
- StringEntity entity = new StringEntity(post_test_doc, ContentType.create("application/json"));
- httpPost.setEntity(entity);
- Response response = doRest(httpPost);
- assertThat(response.code, is(200));
- assertThat(response.body, is(post_test_response));
- }
-
- String post_test_uri_cond = "/document/v1/namespace/testdocument/docid/c?condition=foo";
- String post_test_doc_cond = "{\n" +
- "\"foo\" : \"bar\"," +
- "\"fields\": {\n" +
- "\"title\": \"This is the title\",\n" +
- "\"body\": \"This is the body\"" +
- "}" +
- "}";
- String post_test_response_cond = "{\"id\":\"id:namespace:testdocument::c\"," +
- "\"pathId\":\"/document/v1/namespace/testdocument/docid/c\"}";
-
- @Test
- public void testConditionalPost() throws Exception {
- Request request = new Request("http://localhost:" + getFirstListenPort() + post_test_uri_cond);
- HttpPost httpPost = new HttpPost(request.getUri());
- StringEntity entity = new StringEntity(post_test_doc_cond, ContentType.create("application/json"));
- httpPost.setEntity(entity);
- Response response = doRest(httpPost);
- assertThat(response.code, is(200));
- assertThat(response.body, is(post_test_response_cond));
- }
-
- @Test
- public void testEmptyPost() throws Exception {
- Request request = new Request("http://localhost:" + getFirstListenPort() + post_test_uri);
- HttpPost httpPost = new HttpPost(request.getUri());
- StringEntity entity = new StringEntity("", ContentType.create("application/json"));
- httpPost.setEntity(entity);
- assertHttp400ResponseContains(doRest(httpPost), "Could not read document, no document?");
- }
-
- String update_test_uri = "/document/v1/namespace/testdocument/docid/c";
- String update_test_doc = "{\n" +
- "\t\"fields\": {\n" +
- "\"title\": {\n" +
- "\"assign\": \"Oh lala\"\n" +
- "}\n" +
- "}\n" +
- "}\n";
-
- String update_test_response = "{\"id\":\"id:namespace:testdocument::c\"," +
- "\"pathId\":\"/document/v1/namespace/testdocument/docid/c\"}";
-
- @Test
- public void testbasicUpdate() throws Exception {
- Request request = new Request("http://localhost:" + getFirstListenPort() + update_test_uri);
- HttpPut httpPut = new HttpPut(request.getUri());
- StringEntity entity = new StringEntity(update_test_doc, ContentType.create("application/json"));
- httpPut.setEntity(entity);
- Response response = doRest(httpPut);
- assertThat(response.code, is(200));
- assertThat(response.body, is(update_test_response));
- assertThat(getLog(), not(containsString("CREATE IF NON EXISTING IS TRUE")));
- }
-
- @Test
- public void testbasicUpdateCreateTrue() throws Exception {
- Request request = new Request("http://localhost:" + getFirstListenPort() + update_test_uri + "?create=true");
- HttpPut httpPut = new HttpPut(request.getUri());
- StringEntity entity = new StringEntity(update_test_doc, ContentType.create("application/json"));
- httpPut.setEntity(entity);
- Response response = doRest(httpPut);
- assertThat(response.code, is(200));
- assertThat(response.body, is(update_test_response));
- assertThat(getLog(), containsString("CREATE IF NON EXISTENT IS TRUE"));
- }
-
- String update_test_create_if_non_existient_uri = "/document/v1/namespace/testdocument/docid/c";
- String update_test_create_if_non_existient_doc = "{\n" +
- "\"create\":true," +
- "\t\"fields\": {\n" +
- "\"title\": {\n" +
- "\"assign\": \"Oh lala\"\n" +
- "}\n" +
- "}\n" +
- "}\n";
-
- String update_test_create_if_non_existing_response = "{\"id\":\"id:namespace:testdocument::c\"," +
- "\"pathId\":\"/document/v1/namespace/testdocument/docid/c\"}";
-
- @Test
- public void testCreateIfNonExistingUpdateInDocTrue() throws Exception {
- Request request = new Request("http://localhost:" + getFirstListenPort() + update_test_create_if_non_existient_uri);
- HttpPut httpPut = new HttpPut(request.getUri());
- StringEntity entity = new StringEntity(update_test_create_if_non_existient_doc, ContentType.create("application/json"));
- httpPut.setEntity(entity);
- assertThat(doRest(httpPut).body, is(update_test_create_if_non_existing_response));
- assertThat(getLog(), containsString("CREATE IF NON EXISTENT IS TRUE"));
- }
-
- @Test
- public void testCreateIfNonExistingUpdateInDocTrueButQueryParamsFalse() throws Exception {
- Request request = new Request("http://localhost:" + getFirstListenPort() + update_test_create_if_non_existient_uri + "?create=false");
- HttpPut httpPut = new HttpPut(request.getUri());
- StringEntity entity = new StringEntity(update_test_create_if_non_existient_doc, ContentType.create("application/json"));
- httpPut.setEntity(entity);
- assertThat(doRest(httpPut).body, is(update_test_create_if_non_existing_response));
- assertThat(getLog(), not(containsString("CREATE IF NON EXISTENT IS TRUE")));
- }
-
- @Test
- public void bogus_create_parameter_value_returns_http_400_error() throws Exception {
- Request request = new Request("http://localhost:" + getFirstListenPort() + update_test_uri + "?create=batman");
- HttpPut httpPut = new HttpPut(request.getUri());
- StringEntity entity = new StringEntity(update_test_doc, ContentType.create("application/json"));
- httpPut.setEntity(entity);
- assertHttp400ResponseContains(doRest(httpPut), "Invalid value for 'create' parameter: Must be empty, true, or false but was 'batman'");
- }
-
- // Get logs through some hackish fetch method. Logs is something the mocked backend write.
- String getLog() throws IOException {
- // The mocked backend will throw a runtime exception with a log if delete is called three times..
- Request request = new Request("http://localhost:" + getFirstListenPort() + remove_test_uri);
- HttpDelete delete = new HttpDelete(request.getUri());
- doRest(delete);
- return doRest(delete).body;
- }
-
-
- String remove_test_uri = "/document/v1/namespace/testdocument/docid/c";
- String remove_test_response = "{\"id\":\"id:namespace:testdocument::c\"," +
- "\"pathId\":\"/document/v1/namespace/testdocument/docid/c\"}";
-
- @Test
- public void testbasicRemove() throws Exception {
- Request request = new Request("http://localhost:" + getFirstListenPort() + remove_test_uri);
- HttpDelete delete = new HttpDelete(request.getUri());
- Response response = doRest(delete);
- assertThat(response.code, is(200));
- assertThat(response.body, is(remove_test_response));
- }
-
- String get_test_uri = "/document/v1/namespace/document-type/docid/c";
- String get_response_part1 = "\"pathId\":\"/document/v1/namespace/document-type/docid/c\"";
- String get_response_part2 = "\"id\":\"id:namespace:document-type::c\"";
-
-
- @Test
- public void testbasicGet() throws Exception {
- Request request = new Request("http://localhost:" + getFirstListenPort() + get_test_uri);
- HttpGet get = new HttpGet(request.getUri());
- Response response = doRest(get);
- assertThat(response.code, is(404)); // Mock returns Not Found
- assertThat(response.body, containsString(get_response_part1));
- assertThat(response.body, containsString(get_response_part2));
- }
-
- String id_test_uri = "/document/v1/namespace/document-type/docid/f/u/n/n/y/!";
- String id_response_part1 = "\"pathId\":\"/document/v1/namespace/document-type/docid/f/u/n/n/y/!\"";
- String id_response_part2 = "\"id\":\"id:namespace:document-type::f/u/n/n/y/!\"";
-
- @Test
- public void testSlashesInId() throws Exception {
- Request request = new Request("http://localhost:" + getFirstListenPort() + id_test_uri);
- HttpGet get = new HttpGet(request.getUri());
- Response response = doRest(get);
- assertThat(response.code, is(404)); // Mock returns Not Found
- assertThat(response.body, containsString(id_response_part1));
- assertThat(response.body, containsString(id_response_part2));
- }
-
-
- String get_enc_id = "!\":æøå@/& Q1+";
- // Space encoded as %20, not encoding !
- String get_enc_id_encoded_v1 = "!%22%3A%C3%A6%C3%B8%C3%A5%40%2F%26%20Q1%2B";
- // Space encoded as +
- String get_enc_id_encoded_v2 = "%21%22%3A%C3%A6%C3%B8%C3%A5%40%2F%26+Q1%2B";
- String get_enc_test_uri_v1 = "/document/v1/namespace/document-type/docid/" + get_enc_id_encoded_v1;
- String get_enc_test_uri_v2 = "/document/v1/namespace/document-type/docid/" + get_enc_id_encoded_v2;
- String get_enc_response_part1 = "\"pathId\":\"/document/v1/namespace/document-type/docid/" + get_enc_id_encoded_v1 + "\"";
- String get_enc_response_part1_v2 = "\"pathId\":\"/document/v1/namespace/document-type/docid/" + get_enc_id_encoded_v2 + "\"";
-
- // JSON encode " as \"
- String get_enc_response_part2 = "\"id\":\"id:namespace:document-type::" + get_enc_id.replace("\"", "\\\"") + "\"";
-
-
- @Test
- public void testbasicEncodingV1() throws Exception {
- Request request = new Request("http://localhost:" + getFirstListenPort() + get_enc_test_uri_v1);
- HttpGet get = new HttpGet(request.getUri());
- Response response = doRest(get);
- assertThat(response.code, is(404)); // Mock returns Not Found
- assertThat(response.body, containsString(get_enc_response_part1));
- assertThat(response.body, containsString(get_enc_response_part2));
- }
-
- @Test
- public void testbasicEncodingV2() throws Exception {
- Request request = new Request("http://localhost:" + getFirstListenPort() + get_enc_test_uri_v2);
- HttpGet get = new HttpGet(request.getUri());
- Response response = doRest(get);
- assertThat(response.code, is(404)); // Mock returns Not Found
- assertThat(response.body, containsString(get_enc_response_part1_v2));
- assertThat(response.body, containsString(get_enc_response_part2));
- }
-
- @Test
- public void get_fieldset_parameter_is_propagated() {
- Request request = new Request(String.format("http://localhost:%s/document/v1/namespace/document-type/docid/bar?fieldSet=foo,baz", getFirstListenPort()));
- HttpGet get = new HttpGet(request.getUri());
- assertHttp200ResponseContains(doRest(get), "\"fieldset\":\"foo,baz\"");
- }
-
- @Test
- public void get_cluster_parameter_is_propagated() {
- Request request = new Request(String.format("http://localhost:%s/document/v1/namespace/document-type/docid/bar?cluster=my_cool_cluster", getFirstListenPort()));
- HttpGet get = new HttpGet(request.getUri());
- assertHttp200ResponseContains(doRest(get), "\"cluster\":\"my_cool_cluster\"");
- }
-
- String visit_test_uri = "/document/v1/namespace/document-type/docid/?continuation=abc";
- String visit_response_part1 = "\"documents\":[List of json docs, cont token abc, doc selection: '']";
- String visit_response_part2 = "\"continuation\":\"token\"";
- String visit_response_part3 = "\"pathId\":\"/document/v1/namespace/document-type/docid/\"";
-
- @Test
- public void testbasicVisit() throws Exception {
- Request request = new Request("http://localhost:" + getFirstListenPort() + visit_test_uri);
- HttpGet get = new HttpGet(request.getUri());
- Response response = doRest(get);
- assertThat(response.code, is(200));
- assertThat(response.body, containsString(visit_response_part1));
- assertThat(response.body, containsString(visit_response_part2));
- assertThat(response.body, containsString(visit_response_part3));
- }
-
- private static String encoded(String original) {
- try {
- return URLEncoder.encode(original, StandardCharsets.UTF_8.name());
- } catch (UnsupportedEncodingException e) {
- throw new RuntimeException(e);
- }
- }
-
- private static String defaultPathPrefix() {
- return "namespace/document-type/";
- }
-
- private Response performV1RestCall(String pathPrefix, String pathSuffix, Function<Request, HttpRequestBase> methodOp) {
- try {
- Request request = new Request(String.format("http://localhost:%s/document/v1/%s%s",
- getFirstListenPort(), pathPrefix, pathSuffix));
- HttpRequestBase restOp = methodOp.apply(request);
- return doRest(restOp);
- } catch (Exception e) {
- throw new RuntimeException(e);
- }
- }
-
- private Response performV1GetRestCall(String pathSuffix) {
- return performV1RestCall(defaultPathPrefix(), pathSuffix, (request) -> new HttpGet(request.getUri()));
- }
-
- private void doTestRootPathNotAccepted(Function<Request, HttpRequestBase> methodOpFactory) {
- Response response = performV1RestCall("", "", methodOpFactory);
- assertHttp400ResponseContains(response, "Root /document/v1/ requests only supported for HTTP GET");
- }
-
- @Test
- public void root_api_path_not_accepted_for_http_put() {
- doTestRootPathNotAccepted((request) -> new HttpPut(request.getUri()));
- }
-
- @Test
- public void root_api_path_not_accepted_for_http_post() {
- doTestRootPathNotAccepted((request) -> new HttpPost(request.getUri()));
- }
-
- @Test
- public void root_api_path_not_accepted_for_http_delete() {
- doTestRootPathNotAccepted((request) -> new HttpDelete(request.getUri()));
- }
-
- private void assertResultingDocumentSelection(String suffix, String expected) {
- Response response = performV1GetRestCall(suffix);
- assertHttp200ResponseContains(response, String.format("doc selection: '%s'", expected));
- }
-
- @Test
- public void testUseExpressionOnVisit() throws Exception {
- assertResultingDocumentSelection("group/abc?continuation=xyz", "id.group=='abc'");
- }
-
- private void assertGroupDocumentSelection(String group, String expected) {
- assertResultingDocumentSelection("group/" + encoded(group), expected);
- }
-
- @Test
- public void group_strings_are_escaped() {
- assertGroupDocumentSelection("'", "id.group=='\\''");
- assertGroupDocumentSelection("hello 'world'", "id.group=='hello \\'world\\''");
- assertGroupDocumentSelection("' goodbye moon", "id.group=='\\' goodbye moon'");
- }
-
- private void assertNumericIdFailsParsing(String id) {
- Response response = performV1GetRestCall(String.format("number/%s", encoded(id)));
- assertHttp400ResponseContains(response, "Failed to parse numeric part of selection URI");
- }
-
- @Test
- public void invalid_numeric_id_returns_error() {
- assertNumericIdFailsParsing("123a");
- assertNumericIdFailsParsing("a123");
- assertNumericIdFailsParsing("0x1234");
- assertNumericIdFailsParsing("\u0000");
- }
-
- @Test
- public void non_text_group_string_character_returns_error() {
- Response response = performV1GetRestCall(String.format("group/%s", encoded("\u001f")));
- assertHttp400ResponseContains(response, "Failed to parse group part of selection URI; contains invalid text code point U001F");
- }
-
- @Test
- public void can_specify_numeric_id_without_explicit_selection() {
- assertResultingDocumentSelection("number/1234", "id.user==1234");
- }
-
- @Test
- public void can_specify_group_id_without_explicit_selection() {
- assertResultingDocumentSelection("group/foo", "id.group=='foo'");
- }
-
- @Test
- public void can_specify_both_numeric_id_and_explicit_selection() {
- assertResultingDocumentSelection(String.format("number/1234?selection=%s", encoded("1 != 2")),
- "id.user==1234 and (1 != 2)");
- }
-
- @Test
- public void can_specify_both_group_id_and_explicit_selection() {
- assertResultingDocumentSelection(String.format("group/bar?selection=%s", encoded("3 != 4")),
- "id.group=='bar' and (3 != 4)");
- }
-
- private void assertDocumentSelectionFailsParsing(String expression) {
- Response response = performV1GetRestCall(String.format("number/1234?selection=%s", encoded(expression)));
- assertHttp400ResponseContains(response, "Failed to parse expression given in 'selection' parameter. Must be a complete and valid sub-expression.");
- }
-
- // Make sure that typoing the selection parameter doesn't corrupt the entire selection expression
- @Test
- public void explicit_selection_sub_expression_is_validated_for_completeness() {
- assertDocumentSelectionFailsParsing("1 +");
- assertDocumentSelectionFailsParsing(") or true");
- assertDocumentSelectionFailsParsing("((1 + 2)");
- assertDocumentSelectionFailsParsing("true) or (true");
- }
-
- @Test
- public void wanted_document_count_returned_parameter_is_propagated() {
- Request request = new Request(String.format("http://localhost:%s/document/v1/namespace/document-type/docid/?wantedDocumentCount=321", getFirstListenPort()));
- HttpGet get = new HttpGet(request.getUri());
- assertHttp200ResponseContains(doRest(get), "min docs returned: 321");
- }
-
- @Test
- public void invalid_wanted_document_count_parameter_returns_error_response() {
- Request request = new Request(String.format("http://localhost:%s/document/v1/namespace/document-type/docid/?wantedDocumentCount=aardvark", getFirstListenPort()));
- HttpGet get = new HttpGet(request.getUri());
- assertHttp400ResponseContains(doRest(get), "Invalid 'wantedDocumentCount' value. Expected positive integer");
- }
-
- @Test
- public void negative_document_count_parameter_returns_error_response() {
- Request request = new Request(String.format("http://localhost:%s/document/v1/namespace/document-type/docid/?wantedDocumentCount=-1", getFirstListenPort()));
- HttpGet get = new HttpGet(request.getUri());
- assertHttp400ResponseContains(doRest(get), "Invalid 'wantedDocumentCount' value. Expected positive integer");
- }
-
- @Test
- public void visit_fieldset_parameter_is_propagated() {
- Request request = new Request(String.format("http://localhost:%s/document/v1/namespace/document-type/docid/?fieldSet=foo,baz", getFirstListenPort()));
- HttpGet get = new HttpGet(request.getUri());
- assertHttp200ResponseContains(doRest(get), "field set: 'foo,baz'");
- }
-
- @Test
- public void visit_concurrency_parameter_is_propagated() {
- Request request = new Request(String.format("http://localhost:%s/document/v1/namespace/document-type/docid/?concurrency=42", getFirstListenPort()));
- HttpGet get = new HttpGet(request.getUri());
- assertHttp200ResponseContains(doRest(get), "concurrency: 42");
- }
-
- @Test
- public void root_api_visit_cluster_parameter_is_propagated() {
- Request request = new Request(String.format("http://localhost:%s/document/v1/?cluster=vaffel", getFirstListenPort()));
- HttpGet get = new HttpGet(request.getUri());
- assertHttp200ResponseContains(doRest(get), "cluster: 'vaffel'");
- }
-
- @Test
- public void root_api_visit_selection_parameter_is_propagated() {
- Request request = new Request(String.format("http://localhost:%s/document/v1/?cluster=foo&selection=yoshi", getFirstListenPort()));
- HttpGet get = new HttpGet(request.getUri());
- assertHttp200ResponseContains(doRest(get), "doc selection: 'yoshi'");
- }
-
- @Test
- public void root_api_visit_bucket_space_parameter_is_propagated() {
- Request request = new Request(String.format("http://localhost:%s/document/v1/?cluster=foo&bucketSpace=global", getFirstListenPort()));
- HttpGet get = new HttpGet(request.getUri());
- assertHttp200ResponseContains(doRest(get), "bucket space: 'global'");
- }
-
- @Test
- public void invalid_visit_concurrency_parameter_returns_error_response() {
- Request request = new Request(String.format("http://localhost:%s/document/v1/namespace/document-type/docid/?concurrency=badgers", getFirstListenPort()));
- HttpGet get = new HttpGet(request.getUri());
- assertHttp400ResponseContains(doRest(get), "Invalid 'concurrency' value. Expected positive integer");
- }
-
- private void assertHttpResponseContains(Response response, int expectedStatusCode, String expectedSubstring) {
- assertThat(response.code, is(expectedStatusCode));
- assertThat(response.body, containsString(expectedSubstring));
- }
-
- private void assertHttp200ResponseContains(Response response, String expectedSubstring) {
- assertHttpResponseContains(response, 200, expectedSubstring);
- }
-
- private void assertHttp400ResponseContains(Response response, String expectedSubstring) {
- assertHttpResponseContains(response, 400, expectedSubstring);
- }
-
- private Response doRest(HttpRequestBase request) {
- HttpClient client = HttpClientBuilder.create().build();
- try {
- HttpResponse response = client.execute(request);
- assertThat(response.getEntity().getContentType().getValue().toString(), startsWith("application/json;"));
- HttpEntity entity = response.getEntity();
- return new Response(response.getStatusLine().getStatusCode(), EntityUtils.toString(entity));
- } catch (IOException e) {
- throw new UncheckedIOException(e);
- }
- }
-
- private String getFirstListenPort() {
- JettyHttpServer serverProvider =
- (JettyHttpServer) Container.get().getServerProviderRegistry().allComponents().get(0);
- return Integer.toString(serverProvider.getListenPort());
- }
-
-}
diff --git a/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/RestApiWithTestDocumentHandler.java b/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/RestApiWithTestDocumentHandler.java
deleted file mode 100644
index db782877a6f..00000000000
--- a/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/RestApiWithTestDocumentHandler.java
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.document.restapi.resource;
-
-import com.yahoo.container.logging.AccessLog;
-import com.yahoo.document.DataType;
-import com.yahoo.document.DocumentType;
-import com.yahoo.document.DocumentTypeManager;
-import com.yahoo.document.restapi.OperationHandler;
-
-import java.util.concurrent.Executor;
-
-/**
- * For setting up RestApi with a simple document type manager.
- *
- * @author dybis
- */
-public class RestApiWithTestDocumentHandler extends RestApi{
-
- private DocumentTypeManager docTypeManager = new DocumentTypeManager();
-
- public RestApiWithTestDocumentHandler(
- Executor executor,
- AccessLog accessLog,
- OperationHandler operationHandler) {
- super(executor, accessLog, operationHandler, 20);
-
- DocumentType documentType = new DocumentType("testdocument");
-
- documentType.addField("title", DataType.STRING);
- documentType.addField("body", DataType.STRING);
- docTypeManager.registerDocumentType(documentType);
-
- setDocTypeManagerForTests(docTypeManager);
- }
-
-}
diff --git a/vespaclient-container-plugin/src/test/rest-api-application/services.xml b/vespaclient-container-plugin/src/test/rest-api-application/services.xml
deleted file mode 100644
index ae1b87635a9..00000000000
--- a/vespaclient-container-plugin/src/test/rest-api-application/services.xml
+++ /dev/null
@@ -1,19 +0,0 @@
-<?xml version="1.0" encoding="utf-8" ?>
-<!-- Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -->
-<container version="1.0" jetty="true">
-
- <accesslog type="disabled"/>
-
- <handler id="com.yahoo.document.restapi.resource.RestApiWithTestDocumentHandler" bundle="integration-test">
- <binding>http://*/document/v1/*</binding>
- </handler>
-
- <component id="injected" class="com.yahoo.document.restapi.resource.MockedOperationHandler" bundle="integration-test">
- </component>
-
-
- <http>
- <!-- This indicates that we want JDisc to allocate a port for us -->
- <server id="mainServer" port="0" />
- </http>
-</container>