aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--CMakeLists.txt3
-rw-r--r--Code-map.md3
-rw-r--r--application-model/src/main/java/com/yahoo/vespa/applicationmodel/ServiceStatusInfo.java10
-rw-r--r--application/pom.xml6
-rw-r--r--bundle-plugin/src/main/java/com/yahoo/container/plugin/mojo/GenerateOsgiManifestMojo.java11
-rw-r--r--cloud-tenant-base-dependencies-enforcer/pom.xml14
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/HierarchicalGroupVisiting.java4
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/HierarchicalGroupVisitingAdapter.java24
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeStateChangeChecker.java91
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/DummyVdsNode.java1
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/FleetControllerTest.java1
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/NodeStateChangeCheckerTest.java169
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/restapiv2/SetNodeStateTest.java38
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/restapiv2/StateRestApiTest.java5
-rw-r--r--component/CMakeLists.txt2
-rw-r--r--component/abi-spec.json15
-rw-r--r--component/src/main/java/com/yahoo/container/di/componentgraph/Provider.java (renamed from container-di/src/main/java/com/yahoo/container/di/componentgraph/Provider.java)0
-rw-r--r--component/src/main/java/com/yahoo/container/di/componentgraph/package-info.java12
-rw-r--r--config-application-package/src/main/java/com/yahoo/config/model/application/provider/SchemaValidator.java32
-rw-r--r--config-model-api/abi-spec.json3
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/application/api/DeployLogger.java8
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/application/api/xml/DeploymentSpecXmlReader.java2
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java6
-rw-r--r--config-model/src/main/java/com/yahoo/config/model/deploy/DeployState.java2
-rw-r--r--config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java21
-rw-r--r--config-model/src/main/java/com/yahoo/config/model/provision/InMemoryProvisioner.java13
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/MapEvaluationTypeContext.java39
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/RankProfile.java24
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/SDDocumentTypeOrderer.java2
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/derived/AttributeFields.java21
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/derived/IndexInfo.java39
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/derived/SummaryClass.java2
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/document/Attribute.java9
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/document/Case.java15
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/document/Dictionary.java27
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/document/Matching.java4
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/document/SDField.java24
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/fieldoperation/DictionaryOperation.java36
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/fieldoperation/MatchOperation.java9
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/processing/CreatePositionZCurve.java2
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/processing/DictionaryProcessor.java20
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/processing/FilterFieldNames.java2
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/processing/ImplicitSummaries.java4
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/processing/Processor.java8
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/processing/RankingExpressionTypeResolver.java2
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/processing/ReservedFunctionNames.java2
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/processing/SummaryDiskAccessValidator.java2
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/processing/TensorFieldProcessor.java8
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/processing/multifieldresolver/IndexCommandResolver.java4
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/processing/multifieldresolver/RankTypeResolver.java2
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/processing/multifieldresolver/StemmingResolver.java2
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/HostSystem.java4
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/admin/Admin.java15
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/admin/LogserverContainerCluster.java3
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/admin/clustercontroller/ClusterControllerContainer.java15
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/admin/clustercontroller/ClusterControllerContainerCluster.java1
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainer.java1
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/application/validation/ComponentValidator.java2
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/application/validation/RankSetupValidator.java2
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/application/validation/StreamingValidator.java4
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/application/validation/UriBindingsValidator.java2
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/ConfigValueChangeValidator.java4
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/VespaRestartAction.java1
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/AttributeChangeValidator.java17
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/builder/UserConfigBuilder.java4
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomAdminV2Builder.java6
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomAdminV4Builder.java2
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomHandlerBuilder.java2
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainer.java12
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/Container.java5
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/component/AccessLogComponent.java8
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/http/ConnectorFactory.java9
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/http/ssl/HostedSslConnectorFactory.java23
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/http/xml/HttpBuilder.java19
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/http/xml/JettyConnectorBuilder.java4
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/search/QueryProfiles.java2
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java11
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java16
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/DistributorCluster.java9
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/Redundancy.java8
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/StorageGroup.java44
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java130
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/cluster/DomTuningDispatchBuilder.java2
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/cluster/RedundancyBuilder.java10
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/ml/ConvertedModel.java46
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/search/SearchNode.java17
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/search/TransactionLogServer.java17
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/utils/FileSender.java2
-rw-r--r--config-model/src/main/javacc/SDParser.jj59
-rwxr-xr-xconfig-model/src/main/perl/vespa-deploy2
-rw-r--r--config-model/src/main/resources/schema/common.rnc2
-rw-r--r--config-model/src/main/resources/schema/content.rnc1
-rw-r--r--config-model/src/test/cfg/application/invalid-services-syntax/services.xml11
-rw-r--r--config-model/src/test/derived/advanced/attributes.cfg3
-rw-r--r--config-model/src/test/derived/array_of_struct_attribute/attributes.cfg6
-rw-r--r--config-model/src/test/derived/array_of_struct_attribute/index-info.cfg2
-rw-r--r--config-model/src/test/derived/attributeprefetch/attributes.cfg54
-rw-r--r--config-model/src/test/derived/attributeprefetch/index-info.cfg6
-rw-r--r--config-model/src/test/derived/attributes/attributes.cfg54
-rw-r--r--config-model/src/test/derived/attributes/index-info.cfg12
-rw-r--r--config-model/src/test/derived/combinedattributeandindexsearch/index-info.cfg4
-rw-r--r--config-model/src/test/derived/complex/attributes.cfg27
-rw-r--r--config-model/src/test/derived/exactmatch/index-info.cfg6
-rw-r--r--config-model/src/test/derived/hnsw_index/attributes.cfg6
-rw-r--r--config-model/src/test/derived/imported_fields_inherited_reference/attributes.cfg12
-rw-r--r--config-model/src/test/derived/imported_position_field/attributes.cfg6
-rw-r--r--config-model/src/test/derived/imported_struct_fields/attributes.cfg24
-rw-r--r--config-model/src/test/derived/imported_struct_fields/index-info.cfg8
-rw-r--r--config-model/src/test/derived/importedfields/attributes.cfg24
-rw-r--r--config-model/src/test/derived/importedfields/index-info.cfg4
-rw-r--r--config-model/src/test/derived/indexinfo_lowercase/index-info.cfg6
-rw-r--r--config-model/src/test/derived/indexschema/index-info.cfg8
-rw-r--r--config-model/src/test/derived/inheritance/attributes.cfg9
-rw-r--r--config-model/src/test/derived/inheritfromparent/attributes.cfg3
-rw-r--r--config-model/src/test/derived/map_attribute/attributes.cfg9
-rw-r--r--config-model/src/test/derived/map_attribute/index-info.cfg4
-rw-r--r--config-model/src/test/derived/map_of_struct_attribute/attributes.cfg15
-rw-r--r--config-model/src/test/derived/map_of_struct_attribute/index-info.cfg6
-rw-r--r--config-model/src/test/derived/music/attributes.cfg33
-rw-r--r--config-model/src/test/derived/music/index-info.cfg4
-rw-r--r--config-model/src/test/derived/newrank/attributes.cfg30
-rw-r--r--config-model/src/test/derived/predicate_attribute/attributes.cfg3
-rw-r--r--config-model/src/test/derived/prefixexactattribute/attributes.cfg6
-rw-r--r--config-model/src/test/derived/prefixexactattribute/index-info.cfg4
-rw-r--r--config-model/src/test/derived/reference_fields/attributes.cfg9
-rw-r--r--config-model/src/test/derived/sorting/attributes.cfg9
-rw-r--r--config-model/src/test/derived/tensor/attributes.cfg15
-rw-r--r--config-model/src/test/derived/types/attributes.cfg39
-rw-r--r--config-model/src/test/derived/types/index-info.cfg12
-rw-r--r--config-model/src/test/java/com/yahoo/config/model/ApplicationDeployTest.java12
-rw-r--r--config-model/src/test/java/com/yahoo/config/model/application/provider/SchemaValidatorTest.java16
-rw-r--r--config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java245
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/RankingExpressionShadowingTestCase.java10
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/processing/DictionaryTestCase.java204
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionTypeResolverTestCase.java106
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/processing/TensorFieldTestCase.java19
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/admin/ClusterControllerTestCase.java8
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerTest.java4
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/application/validation/QuotaValidatorTest.java14
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/application/validation/ValidationTester.java4
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/ClusterSizeReductionValidatorTest.java4
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/ConfigChangeTestUtils.java1
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/ContentClusterRemovalValidatorTest.java2
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/AttributeChangeValidatorTest.java24
-rwxr-xr-xconfig-model/src/test/java/com/yahoo/vespa/model/builder/xml/dom/DomAdminV2BuilderTest.java4
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/container/xml/AccessControlTest.java56
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/container/xml/AccessLogTest.java3
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/content/ContentClusterTest.java67
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/content/ContentSearchClusterTest.java13
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/content/utils/ContentClusterBuilder.java13
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/ml/ModelEvaluationTest.java1
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/search/test/SearchNodeTest.java17
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/test/VespaModelTester.java8
-rw-r--r--config/src/main/java/com/yahoo/config/subscription/impl/JRTConfigRequester.java12
-rw-r--r--config/src/main/java/com/yahoo/config/subscription/impl/MockConnection.java8
-rw-r--r--config/src/main/java/com/yahoo/vespa/config/ConnectionPool.java26
-rw-r--r--config/src/main/java/com/yahoo/vespa/config/JRTConnection.java62
-rw-r--r--config/src/main/java/com/yahoo/vespa/config/JRTConnectionPool.java51
-rw-r--r--config/src/test/java/com/yahoo/vespa/config/JRTConnectionPoolTest.java61
-rw-r--r--configdefinitions/src/vespa/attributes.def3
-rw-r--r--configdefinitions/src/vespa/configserver.def1
-rw-r--r--configserver-client/pom.xml8
-rw-r--r--configserver-client/src/main/java/ai/vespa/hosted/client/AbstractConfigServerClient.java179
-rw-r--r--configserver-client/src/main/java/ai/vespa/hosted/client/ConfigServerClient.java223
-rw-r--r--configserver-client/src/main/java/ai/vespa/hosted/client/HttpConfigServerClient.java13
-rw-r--r--configserver-client/src/test/java/ai/vespa/hosted/client/HttpConfigServerClientTest.java25
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java22
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/ConfigServerBootstrap.java16
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/application/ConfigConvergenceChecker.java5
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/deploy/DeployHandlerLogger.java25
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java17
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDistributionUtil.java2
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/http/LogRetriever.java5
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/http/ProxyResponse.java10
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/http/SecretStoreValidator.java1
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/http/TesterClient.java5
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/DeploymentMetricsResponse.java26
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/ProtonMetricsResponse.java29
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetriever.java12
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/metrics/DeploymentMetricsAggregator.java11
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/metrics/DeploymentMetricsRetriever.java2
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/metrics/ProtonMetricsRetriever.java2
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionRepository.java2
-rw-r--r--configserver/src/main/resources/configserver-app/services.xml39
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetrieverTest.java7
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/metrics/ClusterProtonMetricsRetrieverTest.java2
-rw-r--r--configserver/src/test/resources/metrics/container_metrics53
-rw-r--r--configserver/src/test/resources/metrics/container_metrics.json51
-rw-r--r--configserver/src/test/resources/metrics/content_metrics20
-rw-r--r--configserver/src/test/resources/metrics/content_metrics.json36
-rw-r--r--configserver/src/test/resources/metrics/node_metrics_1.json (renamed from configserver/src/test/resources/metrics/node_metrics_1)0
-rw-r--r--configserver/src/test/resources/metrics/node_metrics_2.json (renamed from configserver/src/test/resources/metrics/node_metrics_2)0
-rw-r--r--container-core/OWNERS1
-rw-r--r--container-core/abi-spec.json448
-rw-r--r--container-core/pom.xml41
-rw-r--r--container-core/src/main/java/com/yahoo/container/bundle/BundleInstantiationSpecification.java (renamed from container-di/src/main/java/com/yahoo/container/bundle/BundleInstantiationSpecification.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/container/bundle/MockBundle.java (renamed from container-di/src/main/java/com/yahoo/container/bundle/MockBundle.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/container/bundle/package-info.java (renamed from container-di/src/main/java/com/yahoo/container/bundle/package-info.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/container/di/CloudSubscriberFactory.java (renamed from container-di/src/main/java/com/yahoo/container/di/CloudSubscriberFactory.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/container/di/ComponentDeconstructor.java (renamed from container-di/src/main/java/com/yahoo/container/di/ComponentDeconstructor.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/container/di/ConfigRetriever.java (renamed from container-di/src/main/java/com/yahoo/container/di/ConfigRetriever.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/container/di/Container.java (renamed from container-di/src/main/java/com/yahoo/container/di/Container.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/container/di/Osgi.java (renamed from container-di/src/main/java/com/yahoo/container/di/Osgi.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/container/di/componentgraph/core/ComponentGraph.java (renamed from container-di/src/main/java/com/yahoo/container/di/componentgraph/core/ComponentGraph.java)7
-rw-r--r--container-core/src/main/java/com/yahoo/container/di/componentgraph/core/ComponentNode.java (renamed from container-di/src/main/java/com/yahoo/container/di/componentgraph/core/ComponentNode.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/container/di/componentgraph/core/ComponentRegistryNode.java (renamed from container-di/src/main/java/com/yahoo/container/di/componentgraph/core/ComponentRegistryNode.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/container/di/componentgraph/core/Exceptions.java (renamed from container-di/src/main/java/com/yahoo/container/di/componentgraph/core/Exceptions.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/container/di/componentgraph/core/GuiceNode.java (renamed from container-di/src/main/java/com/yahoo/container/di/componentgraph/core/GuiceNode.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/container/di/componentgraph/core/JerseyNode.java (renamed from container-di/src/main/java/com/yahoo/container/di/componentgraph/core/JerseyNode.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/container/di/componentgraph/core/Keys.java (renamed from container-di/src/main/java/com/yahoo/container/di/componentgraph/core/Keys.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/container/di/componentgraph/core/Node.java (renamed from container-di/src/main/java/com/yahoo/container/di/componentgraph/core/Node.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/container/di/componentgraph/core/package-info.java (renamed from container-di/src/main/java/com/yahoo/container/di/componentgraph/core/package-info.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/container/di/componentgraph/cycle/CycleFinder.java (renamed from container-di/src/main/java/com/yahoo/container/di/componentgraph/cycle/CycleFinder.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/container/di/componentgraph/cycle/Graph.java (renamed from container-di/src/main/java/com/yahoo/container/di/componentgraph/cycle/Graph.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/container/di/config/ResolveDependencyException.java (renamed from container-di/src/main/java/com/yahoo/container/di/config/ResolveDependencyException.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/container/di/config/RestApiContext.java (renamed from container-di/src/main/java/com/yahoo/container/di/config/RestApiContext.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/container/di/config/Subscriber.java (renamed from container-di/src/main/java/com/yahoo/container/di/config/Subscriber.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/container/di/config/SubscriberFactory.java (renamed from container-di/src/main/java/com/yahoo/container/di/config/SubscriberFactory.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/container/di/config/package-info.java (renamed from container-di/src/main/java/com/yahoo/container/di/config/package-info.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/container/di/osgi/BundleClasses.java (renamed from container-di/src/main/java/com/yahoo/container/di/osgi/BundleClasses.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/container/di/osgi/OsgiUtil.java (renamed from container-di/src/main/java/com/yahoo/container/di/osgi/OsgiUtil.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/container/di/osgi/package-info.java (renamed from container-di/src/main/java/com/yahoo/container/di/osgi/package-info.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/container/handler/LogHandler.java80
-rw-r--r--container-core/src/main/java/com/yahoo/container/jdisc/AclMapping.java50
-rw-r--r--container-core/src/main/java/com/yahoo/container/jdisc/HttpMethodAclMapping.java71
-rw-r--r--container-core/src/main/java/com/yahoo/container/jdisc/HttpRequestBuilder.java71
-rw-r--r--container-core/src/main/java/com/yahoo/container/jdisc/HttpRequestHandler.java20
-rw-r--r--container-core/src/main/java/com/yahoo/container/jdisc/HttpResponse.java3
-rw-r--r--container-core/src/main/java/com/yahoo/container/jdisc/RequestHandlerSpec.java46
-rw-r--r--container-core/src/main/java/com/yahoo/container/jdisc/RequestView.java18
-rw-r--r--container-core/src/main/java/com/yahoo/container/jdisc/ThreadedHttpRequestHandler.java87
-rw-r--r--container-core/src/main/java/com/yahoo/container/logging/AccessLogHandler.java2
-rw-r--r--container-core/src/main/java/com/yahoo/container/logging/ConnectionLogEntry.java17
-rw-r--r--container-core/src/main/java/com/yahoo/container/logging/ConnectionLogHandler.java4
-rw-r--r--container-core/src/main/java/com/yahoo/container/logging/FileConnectionLog.java2
-rw-r--r--container-core/src/main/java/com/yahoo/container/logging/JsonConnectionLogWriter.java32
-rw-r--r--container-core/src/main/java/com/yahoo/container/logging/LogFileHandler.java23
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/HttpRequest.java9
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/filter/DiscFilterRequest.java14
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/AccessLogRequestLog.java2
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/AccessLoggingRequestHandler.java7
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ConnectorFactory.java89
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/FilterResolver.java32
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/FilteringRequestHandler.java30
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/FormPostRequestHandler.java8
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/HealthCheckProxyHandler.java4
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/HttpRequestDispatch.java31
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/HttpRequestFactory.java9
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JDiscFilterInvokerFilter.java16
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JDiscHttpServlet.java9
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JettyConnectionLogger.java22
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ReferenceCountingRequestHandler.java8
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/RequestUtils.java (renamed from container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/HttpServletRequestUtils.java)25
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/SecuredRedirectHandler.java4
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/TlsClientAuthenticationEnforcer.java10
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/testutils/ConnectorFactoryRegistryModule.java (renamed from container-core/src/test/java/com/yahoo/jdisc/http/guiceModules/ConnectorFactoryRegistryModule.java)18
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/testutils/ServletModule.java (renamed from container-core/src/test/java/com/yahoo/jdisc/http/guiceModules/ServletModule.java)7
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/testutils/TestDriver.java122
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/servlet/ServletRequest.java5
-rw-r--r--container-core/src/main/java/com/yahoo/metrics/simple/Bucket.java (renamed from simplemetrics/src/main/java/com/yahoo/metrics/simple/Bucket.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/metrics/simple/Counter.java (renamed from simplemetrics/src/main/java/com/yahoo/metrics/simple/Counter.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/metrics/simple/DimensionCache.java (renamed from simplemetrics/src/main/java/com/yahoo/metrics/simple/DimensionCache.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/metrics/simple/Gauge.java (renamed from simplemetrics/src/main/java/com/yahoo/metrics/simple/Gauge.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/metrics/simple/Identifier.java (renamed from simplemetrics/src/main/java/com/yahoo/metrics/simple/Identifier.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/metrics/simple/Measurement.java (renamed from simplemetrics/src/main/java/com/yahoo/metrics/simple/Measurement.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/metrics/simple/MetricAggregator.java (renamed from simplemetrics/src/main/java/com/yahoo/metrics/simple/MetricAggregator.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/metrics/simple/MetricManager.java (renamed from simplemetrics/src/main/java/com/yahoo/metrics/simple/MetricManager.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/metrics/simple/MetricReceiver.java (renamed from simplemetrics/src/main/java/com/yahoo/metrics/simple/MetricReceiver.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/metrics/simple/MetricSettings.java (renamed from simplemetrics/src/main/java/com/yahoo/metrics/simple/MetricSettings.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/metrics/simple/MetricUpdater.java (renamed from simplemetrics/src/main/java/com/yahoo/metrics/simple/MetricUpdater.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/metrics/simple/Point.java (renamed from simplemetrics/src/main/java/com/yahoo/metrics/simple/Point.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/metrics/simple/PointBuilder.java (renamed from simplemetrics/src/main/java/com/yahoo/metrics/simple/PointBuilder.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/metrics/simple/Sample.java (renamed from simplemetrics/src/main/java/com/yahoo/metrics/simple/Sample.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/metrics/simple/UnitTestSetup.java (renamed from simplemetrics/src/main/java/com/yahoo/metrics/simple/UnitTestSetup.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/metrics/simple/UntypedMetric.java (renamed from simplemetrics/src/main/java/com/yahoo/metrics/simple/UntypedMetric.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/metrics/simple/Value.java (renamed from simplemetrics/src/main/java/com/yahoo/metrics/simple/Value.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/metrics/simple/jdisc/JdiscMetricsFactory.java (renamed from simplemetrics/src/main/java/com/yahoo/metrics/simple/jdisc/JdiscMetricsFactory.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/metrics/simple/jdisc/SimpleMetricConsumer.java (renamed from simplemetrics/src/main/java/com/yahoo/metrics/simple/jdisc/SimpleMetricConsumer.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/metrics/simple/jdisc/SnapshotConverter.java (renamed from simplemetrics/src/main/java/com/yahoo/metrics/simple/jdisc/SnapshotConverter.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/metrics/simple/jdisc/package-info.java (renamed from simplemetrics/src/main/java/com/yahoo/metrics/simple/jdisc/package-info.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/metrics/simple/package-info.java (renamed from simplemetrics/src/main/java/com/yahoo/metrics/simple/package-info.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/metrics/simple/runtime/MetricProperties.java (renamed from simplemetrics/src/main/java/com/yahoo/metrics/simple/runtime/MetricProperties.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/metrics/simple/runtime/package-info.java (renamed from simplemetrics/src/main/java/com/yahoo/metrics/simple/runtime/package-info.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/osgi/provider/model/ComponentModel.java (renamed from container-di/src/main/java/com/yahoo/osgi/provider/model/ComponentModel.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/osgi/provider/model/package-info.java (renamed from container-di/src/main/java/com/yahoo/osgi/provider/model/package-info.java)0
-rw-r--r--container-core/src/main/java/com/yahoo/processing/test/ProcessorLibrary.java1
-rw-r--r--container-core/src/main/java/com/yahoo/restapi/ErrorResponse.java8
-rw-r--r--container-core/src/main/java/com/yahoo/restapi/RestApi.java1
-rw-r--r--container-core/src/main/java/com/yahoo/restapi/RestApiException.java19
-rw-r--r--container-core/src/main/java/com/yahoo/restapi/RestApiImpl.java9
-rw-r--r--container-core/src/main/java/com/yahoo/restapi/RestApiRequestHandler.java19
-rw-r--r--container-core/src/main/java/com/yahoo/restapi/RestApiTestDriver.java96
-rw-r--r--container-core/src/main/resources/configdefinitions/application-bundles.def (renamed from container-di/src/main/resources/configdefinitions/application-bundles.def)0
-rw-r--r--container-core/src/main/resources/configdefinitions/container.components.def (renamed from container-di/src/main/resources/configdefinitions/container.components.def)0
-rw-r--r--container-core/src/main/resources/configdefinitions/container.core.access-log.def3
-rw-r--r--container-core/src/main/resources/configdefinitions/container.di.config.jersey-bundles.def (renamed from container-di/src/main/resources/configdefinitions/container.di.config.jersey-bundles.def)0
-rw-r--r--container-core/src/main/resources/configdefinitions/container.di.config.jersey-injection.def (renamed from container-di/src/main/resources/configdefinitions/container.di.config.jersey-injection.def)0
-rw-r--r--container-core/src/main/resources/configdefinitions/container.logging.connection-log.def5
-rw-r--r--container-core/src/main/resources/configdefinitions/jdisc.http.jdisc.http.connector.def3
-rw-r--r--container-core/src/main/resources/configdefinitions/metrics.manager.def (renamed from simplemetrics/src/main/resources/configdefinitions/metrics.manager.def)0
-rw-r--r--container-core/src/main/resources/configdefinitions/platform-bundles.def (renamed from container-di/src/main/resources/configdefinitions/platform-bundles.def)0
-rw-r--r--container-core/src/test/java/com/yahoo/component/ComponentSpecTestCase.java (renamed from container-di/src/test/java/com/yahoo/component/ComponentSpecTestCase.java)0
-rw-r--r--container-core/src/test/java/com/yahoo/component/provider/test/ComponentRegistryTestCase.java (renamed from container-di/src/test/java/com/yahoo/component/provider/test/ComponentRegistryTestCase.java)0
-rw-r--r--container-core/src/test/java/com/yahoo/component/test/ComponentIdTestCase.java (renamed from container-di/src/test/java/com/yahoo/component/test/ComponentIdTestCase.java)0
-rw-r--r--container-core/src/test/java/com/yahoo/container/di/ConfigRetrieverTest.java (renamed from container-di/src/test/java/com/yahoo/container/di/ConfigRetrieverTest.java)0
-rw-r--r--container-core/src/test/java/com/yahoo/container/di/ContainerTest.java (renamed from container-di/src/test/java/com/yahoo/container/di/ContainerTest.java)0
-rw-r--r--container-core/src/test/java/com/yahoo/container/di/ContainerTestBase.java (renamed from container-di/src/test/java/com/yahoo/container/di/ContainerTestBase.java)0
-rw-r--r--container-core/src/test/java/com/yahoo/container/di/DirConfigSource.java (renamed from container-di/src/test/java/com/yahoo/container/di/DirConfigSource.java)0
-rw-r--r--container-core/src/test/java/com/yahoo/container/di/componentgraph/core/ComponentGraphTest.java (renamed from container-di/src/test/java/com/yahoo/container/di/componentgraph/core/ComponentGraphTest.java)0
-rw-r--r--container-core/src/test/java/com/yahoo/container/di/componentgraph/core/FallbackToGuiceInjectorTest.java (renamed from container-di/src/test/java/com/yahoo/container/di/componentgraph/core/FallbackToGuiceInjectorTest.java)0
-rw-r--r--container-core/src/test/java/com/yahoo/container/di/componentgraph/core/JerseyNodeTest.java (renamed from container-di/src/test/java/com/yahoo/container/di/componentgraph/core/JerseyNodeTest.java)0
-rw-r--r--container-core/src/test/java/com/yahoo/container/di/componentgraph/core/ReuseComponentsTest.java (renamed from container-di/src/test/java/com/yahoo/container/di/componentgraph/core/ReuseComponentsTest.java)0
-rw-r--r--container-core/src/test/java/com/yahoo/container/di/componentgraph/cycle/CycleFinderTest.java (renamed from container-di/src/test/java/com/yahoo/container/di/componentgraph/cycle/CycleFinderTest.java)0
-rw-r--r--container-core/src/test/java/com/yahoo/container/di/componentgraph/cycle/GraphTest.java (renamed from container-di/src/test/java/com/yahoo/container/di/componentgraph/cycle/GraphTest.java)0
-rw-r--r--container-core/src/test/java/com/yahoo/container/handler/LogHandlerTest.java5
-rw-r--r--container-core/src/test/java/com/yahoo/container/jdisc/ThreadedHttpRequestHandlerTest.java4
-rw-r--r--container-core/src/test/java/com/yahoo/container/jdisc/ThreadedRequestHandlerTestCase.java50
-rw-r--r--container-core/src/test/java/com/yahoo/container/logging/LogFileHandlerTestCase.java15
-rw-r--r--container-core/src/test/java/com/yahoo/jdisc/http/filter/ServletFilterRequestTest.java27
-rw-r--r--container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/AccessLogRequestLogTest.java89
-rw-r--r--container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/FilterTestCase.java111
-rw-r--r--container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpRequestFactoryTest.java30
-rw-r--r--container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerConformanceTest.java2
-rw-r--r--container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerTest.java234
-rw-r--r--container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/JDiscHttpServletTest.java27
-rw-r--r--container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/JettyMockRequestBuilder.java176
-rw-r--r--container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/JettyMockResponseBuilder.java29
-rw-r--r--container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/JettyTestDriver.java90
-rw-r--r--container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/SimpleHttpClient.java83
-rw-r--r--container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/TestDriver.java78
-rw-r--r--container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/TestDrivers.java94
-rw-r--r--container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/servlet/JDiscFilterForServletTest.java21
-rw-r--r--container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/servlet/ServletAccessLoggingTest.java11
-rw-r--r--container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/servlet/ServletTestBase.java4
-rw-r--r--container-core/src/test/java/com/yahoo/metrics/simple/BucketTest.java (renamed from simplemetrics/src/test/java/com/yahoo/metrics/simple/BucketTest.java)0
-rw-r--r--container-core/src/test/java/com/yahoo/metrics/simple/CounterTest.java (renamed from simplemetrics/src/test/java/com/yahoo/metrics/simple/CounterTest.java)0
-rw-r--r--container-core/src/test/java/com/yahoo/metrics/simple/DimensionsCacheTest.java (renamed from simplemetrics/src/test/java/com/yahoo/metrics/simple/DimensionsCacheTest.java)0
-rw-r--r--container-core/src/test/java/com/yahoo/metrics/simple/GaugeTest.java (renamed from simplemetrics/src/test/java/com/yahoo/metrics/simple/GaugeTest.java)0
-rw-r--r--container-core/src/test/java/com/yahoo/metrics/simple/MetricsTest.java (renamed from simplemetrics/src/test/java/com/yahoo/metrics/simple/MetricsTest.java)0
-rw-r--r--container-core/src/test/java/com/yahoo/metrics/simple/PointTest.java (renamed from simplemetrics/src/test/java/com/yahoo/metrics/simple/PointTest.java)0
-rw-r--r--container-core/src/test/java/com/yahoo/metrics/simple/jdisc/SnapshotConverterTest.java (renamed from simplemetrics/src/test/java/com/yahoo/metrics/simple/jdisc/SnapshotConverterTest.java)0
-rw-r--r--container-core/src/test/java/com/yahoo/restapi/RestApiImplTest.java24
-rw-r--r--container-core/src/test/vespa-configdef/config.di.int.def (renamed from container-di/src/test/vespa-configdef/config.di.int.def)0
-rw-r--r--container-core/src/test/vespa-configdef/config.di.string.def (renamed from container-di/src/test/vespa-configdef/config.di.string.def)0
-rw-r--r--container-core/src/test/vespa-configdef/config.test.bootstrap1.def (renamed from container-di/src/test/vespa-configdef/config.test.bootstrap1.def)0
-rw-r--r--container-core/src/test/vespa-configdef/config.test.bootstrap2.def (renamed from container-di/src/test/vespa-configdef/config.test.bootstrap2.def)0
-rw-r--r--container-core/src/test/vespa-configdef/config.test.components1.def (renamed from container-di/src/test/vespa-configdef/config.test.components1.def)0
-rw-r--r--container-core/src/test/vespa-configdef/config.test.test.def (renamed from container-di/src/test/vespa-configdef/config.test.test.def)0
-rw-r--r--container-core/src/test/vespa-configdef/config.test.test2.def (renamed from container-di/src/test/vespa-configdef/config.test.test2.def)0
-rw-r--r--container-core/src/test/vespa-configdef/config.test.thread-pool.def (renamed from container-di/src/test/vespa-configdef/config.test.thread-pool.def)0
-rw-r--r--container-dependency-versions/pom.xml69
-rw-r--r--container-dev/pom.xml15
-rw-r--r--container-di/.gitignore2
-rw-r--r--container-di/CMakeLists.txt2
-rw-r--r--container-di/OWNERS1
-rw-r--r--container-di/README.md4
-rw-r--r--container-di/abi-spec.json17
-rw-r--r--container-di/benchmarks/src/test/java/com/yahoo/component/ComponentIdBenchmark.java50
-rw-r--r--container-di/pom.xml127
-rw-r--r--container-di/src/main/java/com/yahoo/container/di/componentgraph/package-info.java7
-rw-r--r--container-disc/pom.xml39
-rw-r--r--container-jersey2/pom.xml2
-rw-r--r--container-messagebus/pom.xml6
-rw-r--r--container-search-and-docproc/pom.xml6
-rw-r--r--container-search/abi-spec.json9
-rw-r--r--container-search/pom.xml6
-rw-r--r--container-search/src/main/antlr4/com/yahoo/search/yql/yqlplus.g4248
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/Index.java20
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/cluster/ClusterSearcher.java8
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/fastsearch/FS4ResourcePool.java62
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/query/FalseItem.java2
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/query/QueryCanonicalizer.java11
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/query/SameElementItem.java1
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/query/WandItem.java9
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/query/WeakAndItem.java59
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/query/WordAlternativesItem.java23
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/query/parser/AdvancedParser.java4
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/querytransform/QueryRewrite.java1
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/statistics/StatisticsSearcher.java29
-rw-r--r--container-search/src/main/java/com/yahoo/search/dispatch/CloseableInvoker.java2
-rw-r--r--container-search/src/main/java/com/yahoo/search/dispatch/Dispatcher.java32
-rw-r--r--container-search/src/main/java/com/yahoo/search/dispatch/FillInvoker.java4
-rw-r--r--container-search/src/main/java/com/yahoo/search/dispatch/InterleavedSearchInvoker.java12
-rw-r--r--container-search/src/main/java/com/yahoo/search/dispatch/InvokerFactory.java4
-rw-r--r--container-search/src/main/java/com/yahoo/search/dispatch/InvokerResult.java7
-rw-r--r--container-search/src/main/java/com/yahoo/search/dispatch/LeanHit.java6
-rw-r--r--container-search/src/main/java/com/yahoo/search/dispatch/LoadBalancer.java6
-rw-r--r--container-search/src/main/java/com/yahoo/search/dispatch/ResponseMonitor.java2
-rw-r--r--container-search/src/main/java/com/yahoo/search/dispatch/SearchErrorInvoker.java1
-rw-r--r--container-search/src/main/java/com/yahoo/search/dispatch/SearchPath.java12
-rw-r--r--container-search/src/main/java/com/yahoo/search/dispatch/TopKEstimator.java12
-rw-r--r--container-search/src/main/java/com/yahoo/search/dispatch/rpc/ProtobufSerialization.java4
-rw-r--r--container-search/src/main/java/com/yahoo/search/dispatch/rpc/RpcPingFactory.java4
-rw-r--r--container-search/src/main/java/com/yahoo/search/dispatch/rpc/RpcProtobufFillInvoker.java1
-rw-r--r--container-search/src/main/java/com/yahoo/search/dispatch/rpc/RpcResourcePool.java1
-rw-r--r--container-search/src/main/java/com/yahoo/search/dispatch/rpc/RpcSearchInvoker.java4
-rw-r--r--container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/Group.java2
-rw-r--r--container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/Node.java2
-rw-r--r--container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/PingFactory.java4
-rw-r--r--container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/Pinger.java2
-rw-r--r--container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/PongHandler.java2
-rw-r--r--container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchCluster.java101
-rw-r--r--container-search/src/main/java/com/yahoo/search/handler/SearchHandler.java17
-rw-r--r--container-search/src/main/java/com/yahoo/search/query/parser/ParserEnvironment.java1
-rw-r--r--container-search/src/main/java/com/yahoo/search/querytransform/VespaLowercasingSearcher.java6
-rw-r--r--container-search/src/main/java/com/yahoo/search/rendering/JsonRenderer.java4
-rw-r--r--container-search/src/main/java/com/yahoo/search/yql/ProgramParser.java1239
-rw-r--r--container-search/src/test/java/com/yahoo/prelude/cluster/ClusterSearcherTestCase.java3
-rw-r--r--container-search/src/test/java/com/yahoo/prelude/query/ItemsCommonStuffTestCase.java2
-rw-r--r--container-search/src/test/java/com/yahoo/prelude/query/parser/test/ParseTestCase.java6
-rw-r--r--container-search/src/test/java/com/yahoo/prelude/query/test/QueryCanonicalizerTestCase.java43
-rw-r--r--container-search/src/test/java/com/yahoo/prelude/query/test/SameElementItemTestCase.java19
-rw-r--r--container-search/src/test/java/com/yahoo/prelude/querytransform/test/QueryRewriteTestCase.java4
-rw-r--r--container-search/src/test/java/com/yahoo/search/dispatch/DispatcherTest.java3
-rw-r--r--container-search/src/test/java/com/yahoo/search/dispatch/InterleavedSearchInvokerTest.java9
-rw-r--r--container-search/src/test/java/com/yahoo/search/dispatch/LoadBalancerTest.java1
-rw-r--r--container-search/src/test/java/com/yahoo/search/dispatch/MockInvoker.java26
-rw-r--r--container-search/src/test/java/com/yahoo/search/dispatch/MockSearchCluster.java1
-rw-r--r--container-search/src/test/java/com/yahoo/search/dispatch/searchcluster/SearchClusterCoverageTest.java89
-rw-r--r--container-search/src/test/java/com/yahoo/search/dispatch/searchcluster/SearchClusterTest.java2
-rw-r--r--container-search/src/test/java/com/yahoo/search/dispatch/searchcluster/SearchClusterTester.java33
-rw-r--r--container-search/src/test/java/com/yahoo/search/searchchain/test/AsyncExecutionOfOneChainTestCase.java12
-rw-r--r--container-search/src/test/java/com/yahoo/search/yql/YqlParserTestCase.java6
-rw-r--r--container-search/src/test/java/com/yahoo/select/SelectTestCase.java6
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/ClusterMetrics.java11
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/Cluster.java5
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ConfigServer.java4
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ConfigServerException.java53
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/LoadBalancer.java6
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/Log.java1
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/Node.java15
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/NodeRepository.java31
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/ProvisionResource.java3
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/ScalingEventData.java12
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/vcmr/ChangeRequest.java2
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/vcmr/ChangeRequestClient.java3
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/vcmr/ChangeRequestSource.java9
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/vcmr/HostAction.java72
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/vcmr/MockChangeRequestClient.java2
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/vcmr/VespaChangeRequest.java98
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/PathGroup.java1
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java30
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Controller.java7
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/TenantController.java2
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java61
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobController.java2
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ChangeManagementAssessor.java4
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ChangeRequestMaintainer.java99
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/CloudEventReporter.java2
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintenance.java7
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/HostInfoUpdater.java (renamed from controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/HostSwitchUpdater.java)42
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainer.java2
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceTagMaintainer.java2
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/VCMRMaintainer.java256
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/metric/CostCalculator.java2
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/Notification.java74
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/NotificationSource.java152
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDb.java84
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ChangeRequestSerializer.java150
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/CuratorDb.java59
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/NotificationsSerializer.java104
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java76
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/ChangeManagementApiHandler.java16
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicies.java3
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentContext.java6
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunnerTest.java6
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java34
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/NodeRepositoryMock.java44
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ChangeManagementAssessorTest.java2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ChangeRequestMaintainerTest.java66
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/CloudEventReporterTest.java2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainerTest.java5
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/HostInfoUpdaterTest.java (renamed from controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/HostSwitchUpdaterTest.java)32
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/MetricsReporterTest.java4
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/SystemRoutingPolicyMaintainerTest.java2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/TrafficShareUpdaterTest.java4
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/VCMRMaintainerTest.java214
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/metric/ConfigServerMetricsTest.java11
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDbTest.java107
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/ChangeRequestSerializerTest.java45
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/NotificationsSerializerTest.java59
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java23
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelperTest.java2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application-clusters.json3
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/notifications-tenant1-app2.json16
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/notifications-tenant1.json25
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/us-east-3-log-without-first.json2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/ChangeManagementApiHandlerTest.java31
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/responses/vcmrs.json40
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/controller/responses/maintenance.json5
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java4
-rw-r--r--dist/vespa.spec19
-rwxr-xr-xdocker/build/build-vespa-internal.sh9
-rw-r--r--document/src/tests/tensor_fieldvalue/partial_add/partial_add_test.cpp58
-rw-r--r--document/src/tests/tensor_fieldvalue/partial_modify/partial_modify_test.cpp68
-rw-r--r--document/src/tests/tensor_fieldvalue/partial_remove/partial_remove_test.cpp55
-rw-r--r--eval/src/apps/tensor_conformance/generate.cpp634
-rw-r--r--eval/src/apps/tensor_conformance/generate.h7
-rw-r--r--eval/src/apps/tensor_conformance/tensor_conformance.cpp296
-rw-r--r--eval/src/tests/eval/node_types/node_types_test.cpp51
-rw-r--r--eval/src/tests/eval/tensor_lambda/tensor_lambda_test.cpp54
-rw-r--r--eval/src/tests/eval/tensor_spec/tensor_spec_test.cpp13
-rw-r--r--eval/src/tests/eval/value_type/value_type_test.cpp190
-rw-r--r--eval/src/tests/tensor/binary_format/binary_format_test.cpp2
-rwxr-xr-xeval/src/tests/tensor/onnx_wrapper/dynamic.py2
-rw-r--r--eval/src/tests/tensor/onnx_wrapper/float_to_int8.onnx12
-rwxr-xr-xeval/src/tests/tensor/onnx_wrapper/float_to_int8.py23
-rwxr-xr-xeval/src/tests/tensor/onnx_wrapper/guess_batch.py2
-rwxr-xr-xeval/src/tests/tensor/onnx_wrapper/int_types.py2
-rw-r--r--eval/src/tests/tensor/onnx_wrapper/onnx_wrapper_test.cpp130
-rwxr-xr-xeval/src/tests/tensor/onnx_wrapper/simple.py3
-rw-r--r--eval/src/tests/tensor/onnx_wrapper/unstable_types.onnx23
-rwxr-xr-xeval/src/tests/tensor/onnx_wrapper/unstable_types.py31
-rw-r--r--eval/src/tests/tensor/tensor_conformance/tensor_conformance_test.cpp26
-rw-r--r--eval/src/vespa/eval/eval/tensor_spec.cpp157
-rw-r--r--eval/src/vespa/eval/eval/tensor_spec.h2
-rw-r--r--eval/src/vespa/eval/eval/test/CMakeLists.txt2
-rw-r--r--eval/src/vespa/eval/eval/test/gen_spec.cpp4
-rw-r--r--eval/src/vespa/eval/eval/test/gen_spec.h5
-rw-r--r--eval/src/vespa/eval/eval/test/tensor_conformance.cpp681
-rw-r--r--eval/src/vespa/eval/eval/test/tensor_conformance.h18
-rw-r--r--eval/src/vespa/eval/eval/test/tensor_model.cpp52
-rw-r--r--eval/src/vespa/eval/eval/test/tensor_model.h71
-rw-r--r--eval/src/vespa/eval/onnx/onnx_wrapper.cpp63
-rw-r--r--eval/src/vespa/eval/onnx/onnx_wrapper.h2
-rw-r--r--fat-model-dependencies/pom.xml5
-rw-r--r--filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReferenceDownloader.java2
-rw-r--r--filedistribution/src/test/java/com/yahoo/vespa/filedistribution/FileDownloaderTest.java2
-rw-r--r--flags/src/main/java/com/yahoo/vespa/flags/Flags.java77
-rw-r--r--flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java8
-rw-r--r--hosted-tenant-base/pom.xml5
-rw-r--r--jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/athenz/AthenzAuthorizationFilter.java2
-rw-r--r--jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/athenz/RequestResourceMapper.java8
-rw-r--r--jdisc_core/src/main/java/com/yahoo/jdisc/AbstractResource.java2
-rw-r--r--jdisc_core/src/main/java/com/yahoo/jdisc/ProxyRequestHandler.java8
-rw-r--r--jdisc_core/src/main/java/com/yahoo/jdisc/core/ContainerSnapshot.java8
-rw-r--r--jdisc_core/src/main/java/com/yahoo/jdisc/core/TimeoutManagerImpl.java8
-rw-r--r--jdisc_core/src/main/java/com/yahoo/jdisc/handler/DelegatedRequestHandler.java15
-rw-r--r--jdisc_jetty/pom.xml17
-rw-r--r--jrt/src/com/yahoo/jrt/Connection.java4
-rw-r--r--jrt/src/com/yahoo/jrt/slobrok/api/BackOff.java15
-rw-r--r--jrt/src/com/yahoo/jrt/slobrok/api/BackOffPolicy.java8
-rw-r--r--jrt/src/com/yahoo/jrt/slobrok/api/Mirror.java30
-rw-r--r--jrt/src/com/yahoo/jrt/slobrok/api/Register.java34
-rw-r--r--jrt/src/com/yahoo/jrt/slobrok/api/SlobrokList.java2
-rw-r--r--linguistics/src/main/java/com/yahoo/language/LinguisticsCase.java13
-rw-r--r--linguistics/src/main/java/com/yahoo/language/LocaleFactory.java22
-rw-r--r--linguistics/src/main/java/com/yahoo/language/opennlp/OpenNlpLinguistics.java2
-rw-r--r--linguistics/src/main/java/com/yahoo/language/opennlp/OpenNlpTokenizer.java139
-rw-r--r--linguistics/src/main/java/com/yahoo/language/simple/SimpleLinguistics.java1
-rw-r--r--linguistics/src/test/java/com/yahoo/language/detect/AbstractDetectorTestCase.java3
-rw-r--r--linguistics/src/test/java/com/yahoo/language/opennlp/OpenNlpTokenizationTestCase.java65
-rw-r--r--linguistics/src/test/java/com/yahoo/language/process/NormalizationTestCase.java2
-rw-r--r--linguistics/src/test/java/com/yahoo/language/process/StemListTestCase.java2
-rw-r--r--linguistics/src/test/java/com/yahoo/language/process/TokenTypeTestCase.java2
-rw-r--r--linguistics/src/test/java/com/yahoo/language/process/TokenizationTestCase.java20
-rw-r--r--linguistics/src/test/java/com/yahoo/language/simple/SimpleTokenTypeTestCase.java2
-rw-r--r--linguistics/src/test/java/com/yahoo/language/simple/SimpleTokenizerTestCase.java2
-rw-r--r--metrics-proxy/pom.xml6
-rw-r--r--metrics/pom.xml33
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/orchestrator/OrchestratorImpl.java6
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/ContainerOperations.java15
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/ContainerOperationsImpl.java21
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainer.java1
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoredumpHandler.java19
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdater.java18
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentContext.java2
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentContextImpl.java19
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java30
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/ContainerEngineMock.java2
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerTester.java2
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoredumpHandlerTest.java26
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdaterTest.java24
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImplTest.java15
-rw-r--r--node-repository/pom.xml13
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/Node.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeList.java6
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java23
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/QuestMetricsDb.java81
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancer.java12
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainer.java9
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/CapacityChecker.java10
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/DynamicProvisioningMaintainer.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirer.java14
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java54
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ProvisionedExpirer.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/NodeAcl.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java102
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/filter/ApplicationFilter.java37
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/filter/NodeFilter.java29
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/filter/NodeHostFilter.java38
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/filter/NodeListFilter.java32
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/filter/NodeOsVersionFilter.java24
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/filter/NodeTypeFilter.java32
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/filter/ParentHostFilter.java26
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/filter/StateFilter.java32
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/DelegatingOsUpgrader.java3
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/OsVersions.java40
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/RebuildingOsUpgrader.java26
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/RetiringOsUpgrader.java39
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java12
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializer.java33
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/HostCapacity.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java42
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java3
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeSpec.java11
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/ApplicationSerializer.java1
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/LoadBalancersResponse.java21
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesResponse.java6
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java34
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTest.java65
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/InactiveAndFailedExpirerTest.java29
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirerTest.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java33
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirerTest.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java75
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializerTest.java14
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AclProvisioningTest.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerAllocationTest.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java62
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java23
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java7
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiTest.java36
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/cfg1.json2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/cfg2.json2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node1.json2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node10.json2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node2.json2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node3.json2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4-after-changes.json8
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4-with-hostnames.json2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4.json2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node5-after-changes.json2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node5.json2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node6.json2
-rw-r--r--orchestrator-restapi/pom.xml12
-rw-r--r--orchestrator-restapi/src/main/java/com/yahoo/vespa/orchestrator/restapi/ApplicationSuspensionApi.java91
-rw-r--r--orchestrator-restapi/src/main/java/com/yahoo/vespa/orchestrator/restapi/HostApi.java82
-rw-r--r--orchestrator-restapi/src/main/java/com/yahoo/vespa/orchestrator/restapi/HostSuspensionApi.java35
-rw-r--r--orchestrator-restapi/src/main/java/com/yahoo/vespa/orchestrator/restapi/package-info.java7
-rw-r--r--orchestrator-restapi/src/main/java/com/yahoo/vespa/orchestrator/restapi/wire/SlobrokEntryResponse.java4
-rw-r--r--orchestrator/src/main/java/com/yahoo/vespa/orchestrator/resources/ApplicationSuspensionRequestHandler.java158
-rw-r--r--orchestrator/src/main/java/com/yahoo/vespa/orchestrator/resources/HealthRequestHandler.java (renamed from orchestrator/src/main/java/com/yahoo/vespa/orchestrator/resources/health/HealthResource.java)57
-rw-r--r--orchestrator/src/main/java/com/yahoo/vespa/orchestrator/resources/HostRequestHandler.java (renamed from orchestrator/src/main/java/com/yahoo/vespa/orchestrator/resources/host/HostResource.java)152
-rw-r--r--orchestrator/src/main/java/com/yahoo/vespa/orchestrator/resources/HostSuspensionRequestHandler.java (renamed from orchestrator/src/main/java/com/yahoo/vespa/orchestrator/resources/HostSuspensionHandler.java)10
-rw-r--r--orchestrator/src/main/java/com/yahoo/vespa/orchestrator/resources/InstanceRequestHandler.java (renamed from orchestrator/src/main/java/com/yahoo/vespa/orchestrator/resources/instance/InstanceResource.java)121
-rw-r--r--orchestrator/src/main/java/com/yahoo/vespa/orchestrator/resources/appsuspension/ApplicationSuspensionResource.java122
-rw-r--r--orchestrator/src/test/java/com/yahoo/vespa/orchestrator/resources/ApplicationSuspensionRequestHandlerTest.java151
-rw-r--r--orchestrator/src/test/java/com/yahoo/vespa/orchestrator/resources/HostRequestHandlerTest.java (renamed from orchestrator/src/test/java/com/yahoo/vespa/orchestrator/resources/host/HostResourceTest.java)175
-rw-r--r--orchestrator/src/test/java/com/yahoo/vespa/orchestrator/resources/HostSuspensionRequestHandlerTest.java (renamed from orchestrator/src/test/java/com/yahoo/vespa/orchestrator/resources/HostSuspensionHandlerTest.java)52
-rw-r--r--orchestrator/src/test/java/com/yahoo/vespa/orchestrator/resources/InstanceRequestHandlerTest.java117
-rw-r--r--orchestrator/src/test/java/com/yahoo/vespa/orchestrator/resources/appsuspension/ApplicationSuspensionResourceTest.java168
-rw-r--r--orchestrator/src/test/java/com/yahoo/vespa/orchestrator/resources/instance/InstanceResourceTest.java92
-rw-r--r--parent/pom.xml109
-rw-r--r--pom.xml2
-rw-r--r--screwdriver.yaml3
-rw-r--r--searchcommon/src/vespa/searchcommon/attribute/config.cpp3
-rw-r--r--searchcommon/src/vespa/searchcommon/attribute/config.h4
-rw-r--r--searchcore/src/apps/tests/persistenceconformance_test.cpp30
-rw-r--r--searchcore/src/apps/vespa-feed-bm/vespa_feed_bm.cpp24
-rw-r--r--searchcore/src/tests/proton/docsummary/docsummary.cpp12
-rw-r--r--searchcore/src/tests/proton/documentdb/documentbucketmover/documentbucketmover_v2_test.cpp4
-rw-r--r--searchcore/src/tests/proton/documentdb/documentdb_test.cpp2
-rw-r--r--searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_jobtest.cpp9
-rw-r--r--searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_jobtest.h6
-rw-r--r--searchcore/src/tests/proton/documentdb/maintenancecontroller/maintenancecontroller_test.cpp17
-rw-r--r--searchcore/src/vespa/searchcore/config/proton.def6
-rw-r--r--searchcore/src/vespa/searchcore/proton/common/monitored_refcount.cpp4
-rw-r--r--searchcore/src/vespa/searchcore/proton/common/monitored_refcount.h38
-rw-r--r--searchcore/src/vespa/searchcore/proton/documentmetastore/documentmetastoreattribute.cpp1
-rw-r--r--searchcore/src/vespa/searchcore/proton/matching/sessionmanager.cpp2
-rw-r--r--searchcore/src/vespa/searchcore/proton/metrics/documentdb_tagged_metrics.cpp1
-rw-r--r--searchcore/src/vespa/searchcore/proton/metrics/documentdb_tagged_metrics.h1
-rw-r--r--searchcore/src/vespa/searchcore/proton/reference/document_db_reference_resolver.cpp9
-rw-r--r--searchcore/src/vespa/searchcore/proton/reference/gid_to_lid_change_listener.cpp10
-rw-r--r--searchcore/src/vespa/searchcore/proton/reference/gid_to_lid_change_listener.h4
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/blockable_maintenance_job.cpp7
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/blockable_maintenance_job.h3
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/bucketmovejob.h1
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/bucketmovejobv2.cpp90
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/bucketmovejobv2.h23
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/disk_mem_usage_forwarder.cpp13
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/disk_mem_usage_forwarder.h2
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/documentbucketmover.cpp61
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/documentbucketmover.h55
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/documentdb.cpp57
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/documentdb.h120
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/feedhandler.cpp10
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/feedhandler.h2
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/flushhandlerproxy.cpp12
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/flushhandlerproxy.h4
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/i_maintenance_job.h2
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job.h1
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job_take2.cpp28
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job_take2.h13
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/maintenance_controller_explorer.cpp2
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/maintenance_controller_explorer.h3
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/maintenance_jobs_injector.cpp12
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/maintenance_jobs_injector.h1
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/maintenancecontroller.cpp20
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/maintenancecontroller.h13
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/maintenancedocumentsubdb.cpp1
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/maintenancejobrunner.cpp7
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/maintenancejobrunner.h1
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/persistencehandlerproxy.cpp12
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/persistencehandlerproxy.h10
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/proton.cpp10
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/pruneremoveddocumentsjob.h1
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/searchhandlerproxy.cpp12
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/searchhandlerproxy.h2
-rw-r--r--searchcorespi/src/vespa/searchcorespi/index/iindexcollection.h6
-rw-r--r--searchcorespi/src/vespa/searchcorespi/index/warmupindexcollection.cpp4
-rw-r--r--searchlib/CMakeLists.txt1
-rw-r--r--searchlib/src/apps/tests/CMakeLists.txt8
-rw-r--r--searchlib/src/apps/tests/document_weight_attribute_lookup_stress_test.cpp150
-rw-r--r--searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/evaluation/MapTypeContext.java2
-rw-r--r--searchlib/src/main/java/com/yahoo/searchlib/tensor/EvaluateTensorConformance.java5
-rwxr-xr-xsearchlib/src/main/sh/vespa-evaluate-tensor-conformance.sh2
-rw-r--r--searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/evaluation/EvaluationTestCase.java11
-rw-r--r--searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/evaluation/EvaluationTester.java2
-rw-r--r--searchlib/src/tests/attribute/document_weight_iterator/document_weight_iterator_test.cpp17
-rw-r--r--searchlib/src/tests/attribute/enum_comparator/enum_comparator_test.cpp25
-rw-r--r--searchlib/src/tests/attribute/enumstore/enumstore_test.cpp67
-rw-r--r--searchlib/src/tests/attribute/posting_store/CMakeLists.txt9
-rw-r--r--searchlib/src/tests/attribute/posting_store/posting_store_test.cpp257
-rw-r--r--searchlib/src/tests/attribute/stringattribute/stringattribute_test.cpp86
-rw-r--r--searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp2
-rw-r--r--searchlib/src/tests/query/streaming_query_test.cpp18
-rw-r--r--searchlib/src/tests/tensor/distance_functions/distance_functions_test.cpp34
-rw-r--r--searchlib/src/tests/tensor/hnsw_index/hnsw_index_test.cpp3
-rw-r--r--searchlib/src/tests/tensor/hnsw_index/stress_hnsw_mt.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/attribute/configconverter.cpp16
-rw-r--r--searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.cpp8
-rw-r--r--searchlib/src/vespa/searchlib/attribute/enumcomparator.cpp67
-rw-r--r--searchlib/src/vespa/searchlib/attribute/enumcomparator.h81
-rw-r--r--searchlib/src/vespa/searchlib/attribute/enumstore.cpp14
-rw-r--r--searchlib/src/vespa/searchlib/attribute/enumstore.h68
-rw-r--r--searchlib/src/vespa/searchlib/attribute/enumstore.hpp91
-rw-r--r--searchlib/src/vespa/searchlib/attribute/fixedsourceselector.cpp18
-rw-r--r--searchlib/src/vespa/searchlib/attribute/fixedsourceselector.h2
-rw-r--r--searchlib/src/vespa/searchlib/attribute/i_enum_store.h5
-rw-r--r--searchlib/src/vespa/searchlib/attribute/ipostinglistattributebase.h4
-rw-r--r--searchlib/src/vespa/searchlib/attribute/multienumattribute.hpp18
-rw-r--r--searchlib/src/vespa/searchlib/attribute/multinumericpostattribute.hpp7
-rw-r--r--searchlib/src/vespa/searchlib/attribute/multistringattribute.hpp4
-rw-r--r--searchlib/src/vespa/searchlib/attribute/multistringpostattribute.hpp8
-rw-r--r--searchlib/src/vespa/searchlib/attribute/postinglistattribute.cpp29
-rw-r--r--searchlib/src/vespa/searchlib/attribute/postinglistattribute.h14
-rw-r--r--searchlib/src/vespa/searchlib/attribute/postinglistsearchcontext.h30
-rw-r--r--searchlib/src/vespa/searchlib/attribute/postingstore.cpp140
-rw-r--r--searchlib/src/vespa/searchlib/attribute/postingstore.h17
-rw-r--r--searchlib/src/vespa/searchlib/attribute/reference_attribute.cpp41
-rw-r--r--searchlib/src/vespa/searchlib/attribute/reference_attribute.h6
-rw-r--r--searchlib/src/vespa/searchlib/attribute/singleenumattribute.hpp17
-rw-r--r--searchlib/src/vespa/searchlib/attribute/singlenumericpostattribute.h2
-rw-r--r--searchlib/src/vespa/searchlib/attribute/singlenumericpostattribute.hpp17
-rw-r--r--searchlib/src/vespa/searchlib/attribute/singlestringattribute.hpp4
-rw-r--r--searchlib/src/vespa/searchlib/attribute/singlestringpostattribute.h9
-rw-r--r--searchlib/src/vespa/searchlib/attribute/singlestringpostattribute.hpp17
-rw-r--r--searchlib/src/vespa/searchlib/attribute/stringattribute.h1
-rw-r--r--searchlib/src/vespa/searchlib/attribute/stringbase.cpp61
-rw-r--r--searchlib/src/vespa/searchlib/attribute/stringbase.h61
-rw-r--r--searchlib/src/vespa/searchlib/engine/proto_converter.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/query/query_term_simple.h1
-rw-r--r--searchlib/src/vespa/searchlib/query/query_term_ucs4.h1
-rw-r--r--searchlib/src/vespa/searchlib/query/streaming/query.cpp244
-rw-r--r--searchlib/src/vespa/searchlib/query/streaming/query.h19
-rw-r--r--searchlib/src/vespa/searchlib/query/streaming/querynode.cpp24
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_blueprint.cpp73
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_iterator.cpp5
-rw-r--r--searchlib/src/vespa/searchlib/tensor/CMakeLists.txt6
-rw-r--r--searchlib/src/vespa/searchlib/tensor/angular_distance.cpp52
-rw-r--r--searchlib/src/vespa/searchlib/tensor/angular_distance.h76
-rw-r--r--searchlib/src/vespa/searchlib/tensor/distance_function.h13
-rw-r--r--searchlib/src/vespa/searchlib/tensor/distance_function_factory.cpp61
-rw-r--r--searchlib/src/vespa/searchlib/tensor/distance_functions.cpp22
-rw-r--r--searchlib/src/vespa/searchlib/tensor/distance_functions.h247
-rw-r--r--searchlib/src/vespa/searchlib/tensor/euclidean_distance.cpp51
-rw-r--r--searchlib/src/vespa/searchlib/tensor/euclidean_distance.h78
-rw-r--r--searchlib/src/vespa/searchlib/tensor/geo_degrees_distance.cpp50
-rw-r--r--searchlib/src/vespa/searchlib/tensor/geo_degrees_distance.h53
-rw-r--r--searchlib/src/vespa/searchlib/tensor/hamming_distance.cpp61
-rw-r--r--searchlib/src/vespa/searchlib/tensor/hamming_distance.h38
-rw-r--r--searchlib/src/vespa/searchlib/tensor/inner_product_distance.cpp44
-rw-r--r--searchlib/src/vespa/searchlib/tensor/inner_product_distance.h64
-rw-r--r--searchlib/src/vespa/searchlib/util/foldedstringcompare.cpp36
-rw-r--r--searchlib/src/vespa/searchlib/util/foldedstringcompare.h22
-rw-r--r--simplemetrics/.gitignore2
-rw-r--r--simplemetrics/CMakeLists.txt4
-rw-r--r--simplemetrics/OWNERS1
-rw-r--r--simplemetrics/README1
-rw-r--r--simplemetrics/abi-spec.json304
-rw-r--r--slobrok/src/tests/backoff/testbackoff.cpp88
-rw-r--r--slobrok/src/vespa/slobrok/CMakeLists.txt1
-rw-r--r--slobrok/src/vespa/slobrok/backoff.cpp39
-rw-r--r--slobrok/src/vespa/slobrok/backoff.h39
-rw-r--r--slobrok/src/vespa/slobrok/sblist.cpp8
-rw-r--r--slobrok/src/vespa/slobrok/sbmirror.cpp15
-rw-r--r--slobrok/src/vespa/slobrok/sbmirror.h1
-rw-r--r--slobrok/src/vespa/slobrok/sbregister.cpp21
-rw-r--r--slobrok/src/vespa/slobrok/sbregister.h1
-rw-r--r--storage/src/tests/distributor/bucketdbupdatertest.cpp2
-rw-r--r--storage/src/tests/distributor/distributortest.cpp39
-rw-r--r--storage/src/tests/distributor/distributortestutil.cpp6
-rw-r--r--storage/src/tests/distributor/distributortestutil.h4
-rw-r--r--storage/src/vespa/storage/common/distributorcomponent.cpp15
-rw-r--r--storage/src/vespa/storage/common/distributorcomponent.h19
-rw-r--r--storage/src/vespa/storage/config/distributorconfiguration.cpp25
-rw-r--r--storage/src/vespa/storage/config/distributorconfiguration.h4
-rw-r--r--storage/src/vespa/storage/config/stor-distributormanager.def4
-rw-r--r--storage/src/vespa/storage/distributor/CMakeLists.txt3
-rw-r--r--storage/src/vespa/storage/distributor/bucket_space_distribution_configs.cpp17
-rw-r--r--storage/src/vespa/storage/distributor/bucket_space_distribution_configs.h27
-rw-r--r--storage/src/vespa/storage/distributor/bucketdbupdater.cpp847
-rw-r--r--storage/src/vespa/storage/distributor/bucketdbupdater.h174
-rw-r--r--storage/src/vespa/storage/distributor/distributor.cpp117
-rw-r--r--storage/src/vespa/storage/distributor/distributor.h17
-rw-r--r--storage/src/vespa/storage/distributor/distributor_bucket_space.h1
-rw-r--r--storage/src/vespa/storage/distributor/distributor_stripe.cpp86
-rw-r--r--storage/src/vespa/storage/distributor/distributor_stripe.h45
-rw-r--r--storage/src/vespa/storage/distributor/idealstatemanager.cpp10
-rw-r--r--storage/src/vespa/storage/distributor/idealstatemanager.h3
-rw-r--r--storage/src/vespa/storage/distributor/legacy_single_stripe_accessor.cpp92
-rw-r--r--storage/src/vespa/storage/distributor/legacy_single_stripe_accessor.h69
-rw-r--r--storage/src/vespa/storage/distributor/pending_bucket_space_db_transition.cpp71
-rw-r--r--storage/src/vespa/storage/distributor/pending_bucket_space_db_transition.h110
-rw-r--r--storage/src/vespa/storage/distributor/pendingclusterstate.cpp14
-rw-r--r--storage/src/vespa/storage/distributor/pendingclusterstate.h3
-rw-r--r--storage/src/vespa/storage/distributor/potential_data_loss_report.h22
-rw-r--r--storage/src/vespa/storage/distributor/stripe_access_guard.h71
-rw-r--r--storage/src/vespa/storage/distributor/stripe_bucket_db_updater.cpp1139
-rw-r--r--storage/src/vespa/storage/distributor/stripe_bucket_db_updater.h286
-rw-r--r--storage/src/vespa/storage/storageserver/distributornode.cpp7
-rw-r--r--storage/src/vespa/storage/storageserver/distributornode.h9
-rw-r--r--storageapi/src/vespa/storageapi/message/bucket.cpp6
-rw-r--r--storageapi/src/vespa/storageapi/message/bucket.h7
-rw-r--r--storageserver/src/vespa/storageserver/app/distributorprocess.cpp15
-rw-r--r--storageserver/src/vespa/storageserver/app/distributorprocess.h2
-rw-r--r--vespa-athenz/pom.xml24
-rw-r--r--vespa-athenz/src/main/java/com/yahoo/vespa/athenz/aws/AwsCredentials.java12
-rw-r--r--vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identityprovider/api/bindings/IdentityDocumentApi.java26
-rw-r--r--vespa_feed_perf/src/main/java/com/yahoo/vespa/feed/perf/SimpleFeeder.java3
-rw-r--r--vespaclient-java/src/main/java/com/yahoo/vespaget/Main.java5
-rw-r--r--vespaclient-java/src/main/java/com/yahoo/vespastat/Main.java4
-rwxr-xr-xvespaclient-java/src/main/sh/vespa-visit.sh4
-rw-r--r--vespajlib/abi-spec.json22
-rw-r--r--vespajlib/src/main/java/com/yahoo/protect/Validator.java39
-rw-r--r--vespajlib/src/main/java/com/yahoo/tensor/TypeResolver.java264
-rw-r--r--vespajlib/src/main/java/com/yahoo/tensor/functions/CellCast.java6
-rw-r--r--vespajlib/src/main/java/com/yahoo/tensor/functions/Concat.java273
-rw-r--r--vespajlib/src/main/java/com/yahoo/tensor/functions/Join.java3
-rw-r--r--vespajlib/src/main/java/com/yahoo/tensor/functions/Map.java13
-rw-r--r--vespajlib/src/main/java/com/yahoo/tensor/functions/Merge.java5
-rw-r--r--vespajlib/src/main/java/com/yahoo/tensor/functions/Reduce.java30
-rw-r--r--vespajlib/src/main/java/com/yahoo/tensor/functions/Rename.java6
-rw-r--r--vespajlib/src/main/java/com/yahoo/tensor/functions/ScalarFunctions.java86
-rw-r--r--vespajlib/src/main/java/com/yahoo/tensor/functions/Slice.java40
-rw-r--r--vespajlib/src/test/java/com/yahoo/tensor/TypeResolverTestCase.java475
-rw-r--r--vespajlib/src/test/java/com/yahoo/tensor/functions/ConcatTestCase.java147
-rw-r--r--vespajlib/src/test/java/com/yahoo/tensor/functions/ScalarFunctionsTestCase.java66
-rw-r--r--vespalib/CMakeLists.txt1
-rw-r--r--vespalib/src/tests/btree/btree_store/CMakeLists.txt9
-rw-r--r--vespalib/src/tests/btree/btree_store/btree_store_test.cpp145
-rw-r--r--vespalib/src/tests/btree/btree_test.cpp10
-rw-r--r--vespalib/src/tests/datastore/sharded_hash_map/sharded_hash_map_test.cpp13
-rw-r--r--vespalib/src/tests/datastore/unique_store_dictionary/unique_store_dictionary_test.cpp71
-rw-r--r--vespalib/src/tests/require/require_test.cpp15
-rw-r--r--vespalib/src/tests/stllike/hash_test.cpp7
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreerootbase.h5
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreestore.h7
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreestore.hpp66
-rw-r--r--vespalib/src/vespa/vespalib/datastore/bufferstate.h1
-rw-r--r--vespalib/src/vespa/vespalib/datastore/i_unique_store_dictionary.h2
-rw-r--r--vespalib/src/vespa/vespalib/datastore/sharded_hash_map.cpp26
-rw-r--r--vespalib/src/vespa/vespalib/datastore/sharded_hash_map.h2
-rw-r--r--vespalib/src/vespa/vespalib/datastore/unique_store.hpp7
-rw-r--r--vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.h2
-rw-r--r--vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.hpp37
-rw-r--r--vespalib/src/vespa/vespalib/datastore/unique_store_enumerator.hpp2
-rw-r--r--vespalib/src/vespa/vespalib/datastore/unique_store_remapper.h3
-rw-r--r--vespalib/src/vespa/vespalib/stllike/hashtable.h2
-rw-r--r--vespalib/src/vespa/vespalib/trace/tracenode.cpp8
-rw-r--r--vespalib/src/vespa/vespalib/util/require.h7
-rw-r--r--zookeeper-client-common/src/main/java/com/yahoo/vespa/zookeeper/client/ZkClientConfigBuilder.java6
-rw-r--r--zookeeper-server/CMakeLists.txt1
-rw-r--r--zookeeper-server/pom.xml1
-rw-r--r--zookeeper-server/zookeeper-server-3.6.3/CMakeLists.txt4
-rw-r--r--zookeeper-server/zookeeper-server-3.6.3/pom.xml (renamed from simplemetrics/pom.xml)94
-rw-r--r--zookeeper-server/zookeeper-server-3.6.3/src/main/java/com/yahoo/vespa/zookeeper/ReconfigurableVespaZooKeeperServer.java43
-rw-r--r--zookeeper-server/zookeeper-server-3.6.3/src/main/java/com/yahoo/vespa/zookeeper/VespaMtlsAuthenticationProvider.java41
-rw-r--r--zookeeper-server/zookeeper-server-3.6.3/src/main/java/com/yahoo/vespa/zookeeper/VespaQuorumPeer.java60
-rw-r--r--zookeeper-server/zookeeper-server-3.6.3/src/main/java/com/yahoo/vespa/zookeeper/VespaZooKeeperAdminImpl.java59
-rw-r--r--zookeeper-server/zookeeper-server-3.6.3/src/main/java/com/yahoo/vespa/zookeeper/VespaZooKeeperServerImpl.java47
-rw-r--r--zookeeper-server/zookeeper-server-3.6.3/src/main/java/org/apache/zookeeper/common/NetUtils.java94
-rw-r--r--zookeeper-server/zookeeper-server-3.6.3/src/main/java/org/apache/zookeeper/server/VespaNettyServerCnxnFactory.java37
-rw-r--r--zookeeper-server/zookeeper-server-common/src/main/java/com/yahoo/vespa/zookeeper/Configurator.java130
-rw-r--r--zookeeper-server/zookeeper-server-common/src/main/java/com/yahoo/vespa/zookeeper/ZooKeeperRunner.java3
-rw-r--r--zookeeper-server/zookeeper-server-common/src/test/java/com/yahoo/vespa/zookeeper/ConfiguratorTest.java56
902 files changed, 18358 insertions, 10063 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 1b4a87aa39c..d42c24ed49b 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -45,7 +45,6 @@ include_directories(BEFORE ${CMAKE_BINARY_DIR}/configdefinitions/src)
add_subdirectory(application-model)
add_subdirectory(application-preprocessor)
add_subdirectory(athenz-identity-provider-service)
-add_subdirectory(component)
add_subdirectory(config-bundle)
add_subdirectory(config-model)
add_subdirectory(config-model-api)
@@ -60,7 +59,6 @@ add_subdirectory(configserver)
add_subdirectory(configserver-flags)
add_subdirectory(configutil)
add_subdirectory(container-core)
-add_subdirectory(container-di)
add_subdirectory(container-disc)
add_subdirectory(container-jersey2)
add_subdirectory(container-messagebus)
@@ -119,7 +117,6 @@ add_subdirectory(searchsummary)
add_subdirectory(security-tools)
add_subdirectory(security-utils)
add_subdirectory(service-monitor)
-add_subdirectory(simplemetrics)
add_subdirectory(slobrok)
add_subdirectory(staging_vespalib)
add_subdirectory(standalone-container)
diff --git a/Code-map.md b/Code-map.md
index a02b52c76a8..fd5a583bc79 100644
--- a/Code-map.md
+++ b/Code-map.md
@@ -44,9 +44,8 @@ jDisc core modules:
jDisc container modules, layered on jDisc core:
- [container-disc](https://github.com/vespa-engine/vespa/tree/master/container-disc) - integration between the jDisc container and jDisc core layers.
-- [container-core](https://github.com/vespa-engine/vespa/tree/master/container-core) - core jDisc container functionality: Metrics, OSGi integration for component bundles, HTTP connector, etc.
+- [container-core](https://github.com/vespa-engine/vespa/tree/master/container-core) - core jDisc container functionality: Metrics, OSGi integration for component bundles, dependency injection, HTTP connector, etc.
- [component](https://github.com/vespa-engine/vespa/tree/master/component) - the component model. Components (in Java) will implement or subclass a type for this module.
-- [container-di](https://github.com/vespa-engine/vespa/tree/master/container-di) - component dependency injection framework for the container, compatible with Guice annotations but an separate implementation which handles injection of config and injection of component collections.
Search container, layered on jDisc container:
diff --git a/application-model/src/main/java/com/yahoo/vespa/applicationmodel/ServiceStatusInfo.java b/application-model/src/main/java/com/yahoo/vespa/applicationmodel/ServiceStatusInfo.java
index e781195eb2a..36cf6f4101e 100644
--- a/application-model/src/main/java/com/yahoo/vespa/applicationmodel/ServiceStatusInfo.java
+++ b/application-model/src/main/java/com/yahoo/vespa/applicationmodel/ServiceStatusInfo.java
@@ -1,6 +1,7 @@
// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.applicationmodel;
+import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
@@ -37,6 +38,15 @@ public class ServiceStatusInfo {
this.endpoint = endpoint;
}
+ @JsonCreator
+ public ServiceStatusInfo(@JsonProperty("serviceStatus") ServiceStatus status,
+ @JsonProperty("since") Instant since,
+ @JsonProperty("lastChecked") Instant lastChecked,
+ @JsonProperty("error") String error,
+ @JsonProperty("endpoint") String endpoint) {
+ this(status, Optional.ofNullable(since), Optional.ofNullable(lastChecked), Optional.ofNullable(error), Optional.ofNullable(endpoint));
+ }
+
@JsonProperty("endpoint")
public String endpointOrNull() {
return endpoint.orElse(null);
diff --git a/application/pom.xml b/application/pom.xml
index 1e14610af69..25e17aca3f9 100644
--- a/application/pom.xml
+++ b/application/pom.xml
@@ -66,6 +66,12 @@
<artifactId>jdisc_jetty</artifactId>
<version>${project.version}</version>
<scope>compile</scope>
+ <exclusions>
+ <exclusion>
+ <groupId>org.apache.aries.spifly</groupId>
+ <artifactId>org.apache.aries.spifly.dynamic.bundle</artifactId>
+ </exclusion>
+ </exclusions>
</dependency>
<dependency>
<groupId>org.hamcrest</groupId>
diff --git a/bundle-plugin/src/main/java/com/yahoo/container/plugin/mojo/GenerateOsgiManifestMojo.java b/bundle-plugin/src/main/java/com/yahoo/container/plugin/mojo/GenerateOsgiManifestMojo.java
index 31c8049b0dd..e1d199c4c9b 100644
--- a/bundle-plugin/src/main/java/com/yahoo/container/plugin/mojo/GenerateOsgiManifestMojo.java
+++ b/bundle-plugin/src/main/java/com/yahoo/container/plugin/mojo/GenerateOsgiManifestMojo.java
@@ -61,12 +61,13 @@ public class GenerateOsgiManifestMojo extends AbstractGenerateOsgiManifestMojo {
public void execute() throws MojoExecutionException {
try {
- if (discPreInstallBundle != null && !buildLegacyVespaPlatformBundle)
+ if (discPreInstallBundle != null && ! buildLegacyVespaPlatformBundle)
throw new MojoExecutionException("The 'discPreInstallBundle' parameter can only be used by legacy Vespa platform bundles.");
Artifacts.ArtifactSet artifactSet = Artifacts.getArtifacts(project);
warnOnUnsupportedArtifacts(artifactSet.getNonJarArtifacts());
- warnIfInternalContainerArtifactsAreIncluded(artifactSet.getJarArtifactsToInclude());
+ if (! isContainerDiscArtifact(project.getArtifact()))
+ throwIfInternalContainerArtifactsAreIncluded(artifactSet.getJarArtifactsToInclude());
List<Export> exportedPackagesFromProvidedJars = exportedPackagesAggregated(
artifactSet.getJarArtifactsProvided().stream().map(Artifact::getFile).collect(Collectors.toList()));
@@ -188,7 +189,7 @@ public class GenerateOsgiManifestMojo extends AbstractGenerateOsgiManifestMojo {
artifact.getId(), artifact.getType())));
}
- private void warnIfInternalContainerArtifactsAreIncluded(Collection<Artifact> includedArtifacts) throws MojoExecutionException {
+ private void throwIfInternalContainerArtifactsAreIncluded(Collection<Artifact> includedArtifacts) throws MojoExecutionException {
/* In most cases it's sufficient to test for 'component', as it's the lowest level container artifact,
* Embedding container artifacts will cause class loading issues at runtime, because the classes will
* not be equal to those seen by the framework (e.g. AbstractComponent). */
@@ -204,6 +205,10 @@ public class GenerateOsgiManifestMojo extends AbstractGenerateOsgiManifestMojo {
return a.getArtifactId().equals("component") && a.getGroupId().equals("com.yahoo.vespa");
}
+ private boolean isContainerDiscArtifact(Artifact a) {
+ return a.getArtifactId().equals("container-disc") && a.getGroupId().equals("com.yahoo.vespa");
+ }
+
private PackageTally getProjectClassesTally() {
File outputDirectory = new File(project.getBuild().getOutputDirectory());
diff --git a/cloud-tenant-base-dependencies-enforcer/pom.xml b/cloud-tenant-base-dependencies-enforcer/pom.xml
index 8a504635f25..8d1f257fca8 100644
--- a/cloud-tenant-base-dependencies-enforcer/pom.xml
+++ b/cloud-tenant-base-dependencies-enforcer/pom.xml
@@ -20,7 +20,7 @@
Copied here because vz-tenant-base does not have a parent. -->
<properties>
<aopalliance.version>1.0</aopalliance.version>
- <athenz.version>1.8.49</athenz.version>
+ <athenz.version>1.10.11</athenz.version>
<bouncycastle.version>1.65</bouncycastle.version>
<felix.version>6.0.3</felix.version>
<felix.log.version>1.0.1</felix.log.version>
@@ -30,7 +30,8 @@
<javax.inject.version>1</javax.inject.version>
<javax.servlet-api.version>3.1.0</javax.servlet-api.version>
<jaxb.version>2.3.0</jaxb.version>
- <jetty.version>9.4.38.v20210224</jetty.version>
+ <jetty.version>9.4.40.v20210413</jetty.version>
+ <jetty-alpn.version>1.1.3.v20160715</jetty-alpn.version>
<junit5.version>5.7.0</junit5.version>
<junit5.platform.version>1.7.0</junit5.platform.version>
<org.lz4.version>1.7.1</org.lz4.version>
@@ -150,7 +151,6 @@
<include>com.yahoo.vespa:configgen:*:jar:provided</include>
<include>com.yahoo.vespa:container-core:*:jar:provided</include>
<include>com.yahoo.vespa:container-dev:*:jar:provided</include>
- <include>com.yahoo.vespa:container-di:*:jar:provided</include>
<include>com.yahoo.vespa:container-disc:*:jar:provided</include>
<include>com.yahoo.vespa:container-documentapi:*:jar:provided</include>
<include>com.yahoo.vespa:container-jersey2:*:jar:provided</include>
@@ -177,7 +177,6 @@
<include>com.yahoo.vespa:searchcore:*:jar:provided</include>
<include>com.yahoo.vespa:searchlib:*:jar:provided</include>
<include>com.yahoo.vespa:security-utils:*:jar:provided</include>
- <include>com.yahoo.vespa:simplemetrics:*:jar:provided</include>
<include>com.yahoo.vespa:statistics:*:jar:provided</include>
<include>com.yahoo.vespa:vdslib:*:jar:provided</include>
<include>com.yahoo.vespa:vespa-http-client:*:jar:provided</include>
@@ -236,7 +235,6 @@
<include>commons-digester:commons-digester:1.8:jar:test</include>
<include>io.airlift:aircompressor:0.17:jar:test</include>
<include>io.airlift:airline:0.7:jar:test</include>
- <include>io.jsonwebtoken:jjwt:0.9.1:jar:test</include>
<include>io.prometheus:simpleclient:0.6.0:jar:test</include>
<include>io.prometheus:simpleclient_common:0.6.0:jar:test</include>
<include>joda-time:joda-time:2.8.1:jar:test</include>
@@ -253,6 +251,12 @@
<include>org.apache.opennlp:opennlp-tools:1.8.4:jar:test</include>
<include>org.apiguardian:apiguardian-api:1.1.0:jar:test</include>
<include>org.codehaus.woodstox:stax2-api:3.1.4:jar:test</include>
+ <include>org.eclipse.jetty.alpn:alpn-api:[${jetty-alpn.version}]:jar:test</include>
+ <include>org.eclipse.jetty.http2:http2-common:[${jetty.version}]:jar:test</include>
+ <include>org.eclipse.jetty.http2:http2-hpack:[${jetty.version}]:jar:test</include>
+ <include>org.eclipse.jetty.http2:http2-server:[${jetty.version}]:jar:test</include>
+ <include>org.eclipse.jetty:jetty-alpn-server:[${jetty.version}]:jar:test</include>
+ <include>org.eclipse.jetty:jetty-alpn-java-server:[${jetty.version}]:jar:test</include>
<include>org.eclipse.jetty:jetty-continuation:[${jetty.version}]:jar:test</include>
<include>org.eclipse.jetty:jetty-jmx:[${jetty.version}]:jar:test</include>
<include>org.eclipse.jetty:jetty-security:[${jetty.version}]:jar:test</include>
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/HierarchicalGroupVisiting.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/HierarchicalGroupVisiting.java
index 1e8fb9e2ffb..0ff370fc57d 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/HierarchicalGroupVisiting.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/HierarchicalGroupVisiting.java
@@ -4,8 +4,10 @@ package com.yahoo.vespa.clustercontroller.core;
import com.yahoo.vdslib.distribution.GroupVisitor;
-@FunctionalInterface
public interface HierarchicalGroupVisiting {
+ /** Returns true if the group contains more than one (leaf) group. */
+ boolean isHierarchical();
+
/**
* Invoke the visitor for each leaf group of an implied group. If the group is non-hierarchical
* (flat), the visitor will not be invoked.
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/HierarchicalGroupVisitingAdapter.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/HierarchicalGroupVisitingAdapter.java
index b0d69750c77..b638604c311 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/HierarchicalGroupVisitingAdapter.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/HierarchicalGroupVisitingAdapter.java
@@ -18,18 +18,20 @@ public class HierarchicalGroupVisitingAdapter implements HierarchicalGroupVisiti
}
@Override
- public void visit(GroupVisitor visitor) {
- if (distribution.getRootGroup().isLeafGroup()) {
- // A flat non-hierarchical cluster
- return;
- }
+ public boolean isHierarchical() {
+ return !distribution.getRootGroup().isLeafGroup();
+ }
- distribution.visitGroups(group -> {
- if (group.isLeafGroup()) {
- return visitor.visitGroup(group);
- }
+ @Override
+ public void visit(GroupVisitor visitor) {
+ if (isHierarchical()) {
+ distribution.visitGroups(group -> {
+ if (group.isLeafGroup()) {
+ return visitor.visitGroup(group);
+ }
- return true;
- });
+ return true;
+ });
+ }
}
}
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeStateChangeChecker.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeStateChangeChecker.java
index dd33646dd31..413e0bbf03f 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeStateChangeChecker.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeStateChangeChecker.java
@@ -2,6 +2,7 @@
package com.yahoo.vespa.clustercontroller.core;
import com.yahoo.lang.MutableBoolean;
+import com.yahoo.lang.SettableOptional;
import com.yahoo.vdslib.distribution.ConfiguredNode;
import com.yahoo.vdslib.distribution.Group;
import com.yahoo.vdslib.state.ClusterState;
@@ -212,10 +213,13 @@ public class NodeStateChangeChecker {
oldWantedState.getState() + ": " + oldWantedState.getDescription());
}
- switch (clusterState.getNodeState(nodeInfo.getNode()).getState()) {
- case MAINTENANCE:
- case DOWN:
- return Result.allowSettingOfWantedState();
+ Result otherGroupCheck = anotherNodeInAnotherGroupHasWantedState(nodeInfo);
+ if (!otherGroupCheck.settingWantedStateIsAllowed()) {
+ return otherGroupCheck;
+ }
+
+ if (clusterState.getNodeState(nodeInfo.getNode()).getState() == State.DOWN) {
+ return Result.allowSettingOfWantedState();
}
if (anotherNodeInGroupAlreadyAllowed(nodeInfo, newDescription)) {
@@ -235,6 +239,85 @@ public class NodeStateChangeChecker {
return Result.allowSettingOfWantedState();
}
+ /**
+ * Returns a disallow-result if there is another node (in another group, if hierarchical)
+ * that has a wanted state != UP. We disallow more than 1 suspended node/group at a time.
+ */
+ private Result anotherNodeInAnotherGroupHasWantedState(StorageNodeInfo nodeInfo) {
+ if (groupVisiting.isHierarchical()) {
+ SettableOptional<Result> anotherNodeHasWantedState = new SettableOptional<>();
+
+ groupVisiting.visit(group -> {
+ if (!groupContainsNode(group, nodeInfo.getNode())) {
+ Result result = otherNodeInGroupHasWantedState(group);
+ if (!result.settingWantedStateIsAllowed()) {
+ anotherNodeHasWantedState.set(result);
+ // Have found a node that is suspended, halt the visiting
+ return false;
+ }
+ }
+
+ return true;
+ });
+
+ return anotherNodeHasWantedState.asOptional().orElseGet(Result::allowSettingOfWantedState);
+ } else {
+ // Return a disallow-result if there is another node with a wanted state
+ return otherNodeHasWantedState(nodeInfo);
+ }
+ }
+
+ /** Returns a disallow-result, if there is a node in the group with wanted state != UP. */
+ private Result otherNodeInGroupHasWantedState(Group group) {
+ for (var configuredNode : group.getNodes()) {
+ StorageNodeInfo storageNodeInfo = clusterInfo.getStorageNodeInfo(configuredNode.index());
+ if (storageNodeInfo == null) continue; // needed for tests only
+ State storageNodeWantedState = storageNodeInfo
+ .getUserWantedState().getState();
+ if (storageNodeWantedState != State.UP) {
+ return Result.createDisallowed(
+ "At most one group can have wanted state: Other storage node " + configuredNode.index() +
+ " in group " + group.getIndex() + " has wanted state " + storageNodeWantedState);
+ }
+
+ State distributorWantedState = clusterInfo.getDistributorNodeInfo(configuredNode.index())
+ .getUserWantedState().getState();
+ if (distributorWantedState != State.UP) {
+ return Result.createDisallowed(
+ "At most one group can have wanted state: Other distributor " + configuredNode.index() +
+ " in group " + group.getIndex() + " has wanted state " + distributorWantedState);
+ }
+ }
+
+ return Result.allowSettingOfWantedState();
+ }
+
+ private Result otherNodeHasWantedState(StorageNodeInfo nodeInfo) {
+ for (var configuredNode : clusterInfo.getConfiguredNodes().values()) {
+ if (configuredNode.index() == nodeInfo.getNodeIndex()) {
+ continue;
+ }
+
+ State storageNodeWantedState = clusterInfo.getStorageNodeInfo(configuredNode.index())
+ .getUserWantedState().getState();
+ if (storageNodeWantedState != State.UP) {
+ return Result.createDisallowed(
+ "At most one node can have a wanted state when #groups = 1: Other storage node " +
+ configuredNode.index() + " has wanted state " + storageNodeWantedState);
+ }
+
+ State distributorWantedState = clusterInfo.getDistributorNodeInfo(configuredNode.index())
+ .getUserWantedState().getState();
+ if (distributorWantedState != State.UP) {
+ return Result.createDisallowed(
+ "At most one node can have a wanted state when #groups = 1: Other distributor " +
+ configuredNode.index() + " has wanted state " + distributorWantedState);
+ }
+ }
+
+ return Result.allowSettingOfWantedState();
+ }
+
private boolean anotherNodeInGroupAlreadyAllowed(StorageNodeInfo nodeInfo, String newDescription) {
MutableBoolean alreadyAllowed = new MutableBoolean(false);
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/DummyVdsNode.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/DummyVdsNode.java
index 8a25f9a9e76..b54d816308b 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/DummyVdsNode.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/DummyVdsNode.java
@@ -72,6 +72,7 @@ public class DummyVdsNode {
public void reset() {}
public double get() { return 0.01; }
public boolean shouldWarn(double v) { return false; }
+ public boolean shouldInform(double v) { return false; }
}
private final List<Req> waitingRequests = new LinkedList<>();
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/FleetControllerTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/FleetControllerTest.java
index 1d7b6886222..ce2a08effce 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/FleetControllerTest.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/FleetControllerTest.java
@@ -93,6 +93,7 @@ public abstract class FleetControllerTest implements Waiter {
public void reset() { counter = 0; }
public double get() { ++counter; return 0.01; }
public boolean shouldWarn(double v) { return ((counter % 1000) == 10); }
+ public boolean shouldInform(double v) { return false; }
}
protected class CleanupZookeeperLogsOnSuccess extends TestWatcher {
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/NodeStateChangeCheckerTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/NodeStateChangeCheckerTest.java
index 5e3dbbe713b..5d08669714c 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/NodeStateChangeCheckerTest.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/NodeStateChangeCheckerTest.java
@@ -4,6 +4,7 @@ package com.yahoo.vespa.clustercontroller.core;
import com.yahoo.vdslib.distribution.ConfiguredNode;
import com.yahoo.vdslib.distribution.Distribution;
import com.yahoo.vdslib.distribution.Group;
+import com.yahoo.vdslib.distribution.GroupVisitor;
import com.yahoo.vdslib.state.ClusterState;
import com.yahoo.vdslib.state.Node;
import com.yahoo.vdslib.state.NodeState;
@@ -11,6 +12,7 @@ import com.yahoo.vdslib.state.NodeType;
import com.yahoo.vdslib.state.State;
import com.yahoo.vespa.clustercontroller.core.hostinfo.HostInfo;
import com.yahoo.vespa.clustercontroller.utils.staterestapi.requests.SetUnitStateRequest;
+import com.yahoo.vespa.config.content.StorDistributionConfig;
import org.junit.Test;
import java.text.ParseException;
@@ -40,6 +42,11 @@ public class NodeStateChangeCheckerTest {
private static final NodeState MAINTENANCE_NODE_STATE = createNodeState(State.MAINTENANCE, "Orchestrator");
private static final NodeState DOWN_NODE_STATE = createNodeState(State.DOWN, "RetireEarlyExpirer");
+ private static final HierarchicalGroupVisiting noopVisiting = new HierarchicalGroupVisiting() {
+ @Override public boolean isHierarchical() { return false; }
+ @Override public void visit(GroupVisitor visitor) { }
+ };
+
private static NodeState createNodeState(State state, String description) {
return new NodeState(NodeType.STORAGE, state).setDescription(description);
}
@@ -57,12 +64,12 @@ public class NodeStateChangeCheckerTest {
}
private NodeStateChangeChecker createChangeChecker(ContentCluster cluster) {
- return new NodeStateChangeChecker(requiredRedundancy, visitor -> {}, cluster.clusterInfo(), false);
+ return new NodeStateChangeChecker(requiredRedundancy, noopVisiting, cluster.clusterInfo(), false);
}
private ContentCluster createCluster(Collection<ConfiguredNode> nodes) {
Distribution distribution = mock(Distribution.class);
- Group group = new Group(2, "to");
+ Group group = new Group(2, "two");
when(distribution.getRootGroup()).thenReturn(group);
return new ContentCluster("Clustername", nodes, distribution);
}
@@ -117,7 +124,7 @@ public class NodeStateChangeCheckerTest {
public void testDeniedInMoratorium() {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = new NodeStateChangeChecker(
- requiredRedundancy, visitor -> {}, cluster.clusterInfo(), true);
+ requiredRedundancy, noopVisiting, cluster.clusterInfo(), true);
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
new Node(NodeType.STORAGE, 10), defaultAllUpClusterState(), SetUnitStateRequest.Condition.SAFE,
UP_NODE_STATE, MAINTENANCE_NODE_STATE);
@@ -130,7 +137,7 @@ public class NodeStateChangeCheckerTest {
public void testUnknownStorageNode() {
ContentCluster cluster = createCluster(createNodes(4));
NodeStateChangeChecker nodeStateChangeChecker = new NodeStateChangeChecker(
- requiredRedundancy, visitor -> {}, cluster.clusterInfo(), false);
+ requiredRedundancy, noopVisiting, cluster.clusterInfo(), false);
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
new Node(NodeType.STORAGE, 10), defaultAllUpClusterState(), SetUnitStateRequest.Condition.SAFE,
UP_NODE_STATE, MAINTENANCE_NODE_STATE);
@@ -140,6 +147,158 @@ public class NodeStateChangeCheckerTest {
}
@Test
+ public void testSafeMaintenanceDisallowedWhenOtherStorageNodeInFlatClusterIsSuspended() {
+ // Nodes 0-3, storage node 0 being in maintenance with "Orchestrator" description.
+ ContentCluster cluster = createCluster(createNodes(4));
+ cluster.clusterInfo().getStorageNodeInfo(0).setWantedState(new NodeState(NodeType.STORAGE, State.MAINTENANCE).setDescription("Orchestrator"));
+ NodeStateChangeChecker nodeStateChangeChecker = new NodeStateChangeChecker(
+ requiredRedundancy, noopVisiting, cluster.clusterInfo(), false);
+ ClusterState clusterStateWith0InMaintenance = clusterState(String.format(
+ "version:%d distributor:4 storage:4 .0.s:m",
+ currentClusterStateVersion));
+
+ NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
+ new Node(NodeType.STORAGE, 1), clusterStateWith0InMaintenance,
+ SetUnitStateRequest.Condition.SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE);
+ assertFalse(result.settingWantedStateIsAllowed());
+ assertFalse(result.wantedStateAlreadySet());
+ assertThat(result.getReason(), is("At most one node can have a wanted state when #groups = 1: " +
+ "Other storage node 0 has wanted state Maintenance"));
+ }
+
+ @Test
+ public void testSafeMaintenanceDisallowedWhenOtherDistributorInFlatClusterIsSuspended() {
+ // Nodes 0-3, storage node 0 being in maintenance with "Orchestrator" description.
+ ContentCluster cluster = createCluster(createNodes(4));
+ cluster.clusterInfo().getDistributorNodeInfo(0)
+ .setWantedState(new NodeState(NodeType.DISTRIBUTOR, State.DOWN).setDescription("Orchestrator"));
+ NodeStateChangeChecker nodeStateChangeChecker = new NodeStateChangeChecker(
+ requiredRedundancy, noopVisiting, cluster.clusterInfo(), false);
+ ClusterState clusterStateWith0InMaintenance = clusterState(String.format(
+ "version:%d distributor:4 .0.s:d storage:4",
+ currentClusterStateVersion));
+
+ NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
+ new Node(NodeType.STORAGE, 1), clusterStateWith0InMaintenance,
+ SetUnitStateRequest.Condition.SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE);
+ assertFalse(result.settingWantedStateIsAllowed());
+ assertFalse(result.wantedStateAlreadySet());
+ assertThat(result.getReason(), is("At most one node can have a wanted state when #groups = 1: " +
+ "Other distributor 0 has wanted state Down"));
+ }
+
+ @Test
+ public void testSafeMaintenanceDisallowedWhenDistributorInGroupIsDown() {
+ // Nodes 0-3, distributor 0 being in maintenance with "Orchestrator" description.
+ // 2 groups: nodes 0-1 is group 0, 2-3 is group 1.
+ ContentCluster cluster = createCluster(createNodes(4));
+ cluster.clusterInfo().getDistributorNodeInfo(0)
+ .setWantedState(new NodeState(NodeType.STORAGE, State.DOWN).setDescription("Orchestrator"));
+ HierarchicalGroupVisiting visiting = makeHierarchicalGroupVisitingWith2Groups(4);
+ NodeStateChangeChecker nodeStateChangeChecker = new NodeStateChangeChecker(
+ requiredRedundancy, visiting, cluster.clusterInfo(), false);
+ ClusterState clusterStateWith0InMaintenance = clusterState(String.format(
+ "version:%d distributor:4 .0.s:d storage:4",
+ currentClusterStateVersion));
+
+ {
+ // Denied for node 2 in group 1, since distributor 0 in group 0 is down
+ NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
+ new Node(NodeType.STORAGE, 2), clusterStateWith0InMaintenance,
+ SetUnitStateRequest.Condition.SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE);
+ assertFalse(result.settingWantedStateIsAllowed());
+ assertFalse(result.wantedStateAlreadySet());
+ assertThat(result.getReason(), is("At most one group can have wanted state: " +
+ "Other distributor 0 in group 0 has wanted state Down"));
+ }
+
+ {
+ // Even node 1 of group 0 is not permitted, as node 0 is not considered
+ // suspended since only the distributor has been set down.
+ NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
+ new Node(NodeType.STORAGE, 1), clusterStateWith0InMaintenance,
+ SetUnitStateRequest.Condition.SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE);
+ assertFalse(result.getReason(), result.settingWantedStateIsAllowed());
+ assertEquals("Another distributor wants state DOWN: 0", result.getReason());
+ }
+ }
+
+ @Test
+ public void testSafeMaintenanceWhenOtherStorageNodeInGroupIsSuspended() {
+ // Nodes 0-3, storage node 0 being in maintenance with "Orchestrator" description.
+ // 2 groups: nodes 0-1 is group 0, 2-3 is group 1.
+ ContentCluster cluster = createCluster(createNodes(4));
+ cluster.clusterInfo().getStorageNodeInfo(0).setWantedState(new NodeState(NodeType.STORAGE, State.MAINTENANCE).setDescription("Orchestrator"));
+ HierarchicalGroupVisiting visiting = makeHierarchicalGroupVisitingWith2Groups(4);
+ NodeStateChangeChecker nodeStateChangeChecker = new NodeStateChangeChecker(
+ requiredRedundancy, visiting, cluster.clusterInfo(), false);
+ ClusterState clusterStateWith0InMaintenance = clusterState(String.format(
+ "version:%d distributor:4 storage:4 .0.s:m",
+ currentClusterStateVersion));
+
+ {
+ // Denied for node 2 in group 1, since node 0 in group 0 is in maintenance
+ NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
+ new Node(NodeType.STORAGE, 2), clusterStateWith0InMaintenance,
+ SetUnitStateRequest.Condition.SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE);
+ assertFalse(result.settingWantedStateIsAllowed());
+ assertFalse(result.wantedStateAlreadySet());
+ assertThat(result.getReason(), is("At most one group can have wanted state: " +
+ "Other storage node 0 in group 0 has wanted state Maintenance"));
+ }
+
+ {
+ // Permitted for node 1 in group 0, since node 0 is already in maintenance with
+ // description Orchestrator, and it is in the same group
+ NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
+ new Node(NodeType.STORAGE, 1), clusterStateWith0InMaintenance,
+ SetUnitStateRequest.Condition.SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE);
+ assertTrue(result.getReason(), result.settingWantedStateIsAllowed());
+ assertFalse(result.wantedStateAlreadySet());
+ }
+ }
+
+ /**
+ * Make a HierarchicalGroupVisiting with the given number of nodes, with 2 groups:
+ * Group "0" is nodes 0-1, group "1" is 2-3.
+ */
+ private HierarchicalGroupVisiting makeHierarchicalGroupVisitingWith2Groups(int nodes) {
+ int groups = 2;
+ if (nodes % groups != 0) {
+ throw new IllegalArgumentException("Cannot have 2 groups with an odd number of nodes: " + nodes);
+ }
+ int nodesPerGroup = nodes / groups;
+
+ var configBuilder = new StorDistributionConfig.Builder()
+ .active_per_leaf_group(true)
+ .ready_copies(2)
+ .redundancy(2)
+ .initial_redundancy(2);
+
+ configBuilder.group(new StorDistributionConfig.Group.Builder()
+ .index("invalid")
+ .name("invalid")
+ .capacity(nodes)
+ .partitions("1|*"));
+
+ int nodeIndex = 0;
+ for (int i = 0; i < groups; ++i) {
+ var groupBuilder = new StorDistributionConfig.Group.Builder()
+ .index(String.valueOf(i))
+ .name(String.valueOf(i))
+ .capacity(nodesPerGroup)
+ .partitions("");
+ for (int j = 0; j < nodesPerGroup; ++j, ++nodeIndex) {
+ groupBuilder.nodes(new StorDistributionConfig.Group.Nodes.Builder()
+ .index(nodeIndex));
+ }
+ configBuilder.group(groupBuilder);
+ }
+
+ return new HierarchicalGroupVisitingAdapter(new Distribution(configBuilder.build()));
+ }
+
+ @Test
public void testSafeSetStateDistributors() {
NodeStateChangeChecker nodeStateChangeChecker = createChangeChecker(createCluster(createNodes(1)));
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
@@ -162,7 +321,7 @@ public class NodeStateChangeCheckerTest {
// We should then be denied setting storage node 1 safely to maintenance.
NodeStateChangeChecker nodeStateChangeChecker = new NodeStateChangeChecker(
- requiredRedundancy, visitor -> {}, cluster.clusterInfo(), false);
+ requiredRedundancy, noopVisiting, cluster.clusterInfo(), false);
NodeStateChangeChecker.Result result = nodeStateChangeChecker.evaluateTransition(
nodeStorage, clusterStateWith3Down, SetUnitStateRequest.Condition.SAFE,
UP_NODE_STATE, MAINTENANCE_NODE_STATE);
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/restapiv2/SetNodeStateTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/restapiv2/SetNodeStateTest.java
index 712c34eae4b..83dbdbe31c8 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/restapiv2/SetNodeStateTest.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/restapiv2/SetNodeStateTest.java
@@ -215,7 +215,7 @@ public class SetNodeStateTest extends StateRestApiTest {
@Test
public void testShouldModifyStorageSafeBlocked() throws Exception {
// Sets up 2 groups: [0, 2, 4] and [1, 3, 5]
- setUpMusicGroup(6, false);
+ setUpMusicGroup(6, "");
assertUnitState(1, "user", State.UP, "");
assertSetUnitState(1, State.MAINTENANCE, null);
@@ -223,28 +223,28 @@ public class SetNodeStateTest extends StateRestApiTest {
assertSetUnitState(1, State.MAINTENANCE, null); // sanity-check
// Because 2 is in a different group maintenance should be denied
- assertSetUnitStateCausesAlreadyInWantedMaintenance(2, State.MAINTENANCE);
+ assertSetUnitStateCausesAtMostOneGroupError(2, State.MAINTENANCE);
// Because 3 and 5 are in the same group as 1, these should be OK
assertSetUnitState(3, State.MAINTENANCE, null);
assertUnitState(1, "user", State.MAINTENANCE, "whatever reason."); // sanity-check
assertUnitState(3, "user", State.MAINTENANCE, "whatever reason."); // sanity-check
assertSetUnitState(5, State.MAINTENANCE, null);
- assertSetUnitStateCausesAlreadyInWantedMaintenance(2, State.MAINTENANCE); // sanity-check
+ assertSetUnitStateCausesAtMostOneGroupError(2, State.MAINTENANCE); // sanity-check
// Set all to up
assertSetUnitState(1, State.UP, null);
assertSetUnitState(1, State.UP, null); // sanity-check
assertSetUnitState(3, State.UP, null);
- assertSetUnitStateCausesAlreadyInWantedMaintenance(2, State.MAINTENANCE); // sanity-check
+ assertSetUnitStateCausesAtMostOneGroupError(2, State.MAINTENANCE); // sanity-check
assertSetUnitState(5, State.UP, null);
// Now we should be allowed to upgrade second group, while the first group will be denied
assertSetUnitState(2, State.MAINTENANCE, null);
- assertSetUnitStateCausesAlreadyInWantedMaintenance(1, State.MAINTENANCE); // sanity-check
+ assertSetUnitStateCausesAtMostOneGroupError(1, State.MAINTENANCE); // sanity-check
assertSetUnitState(0, State.MAINTENANCE, null);
assertSetUnitState(4, State.MAINTENANCE, null);
- assertSetUnitStateCausesAlreadyInWantedMaintenance(1, State.MAINTENANCE); // sanity-check
+ assertSetUnitStateCausesAtMostOneGroupError(1, State.MAINTENANCE); // sanity-check
// And set second group up again
assertSetUnitState(0, State.MAINTENANCE, null);
@@ -253,10 +253,10 @@ public class SetNodeStateTest extends StateRestApiTest {
}
@Test
- public void settingSafeMaintenanceWhenNodeAlreadyInMaintenance() throws Exception {
- // Sets up 2 groups: [0, 2, 4] and [1, 3, 5], with 1 being in maintenance
- setUpMusicGroup(6, true);
- assertUnitState(1, "generated", State.MAINTENANCE, "");
+ public void settingSafeMaintenanceWhenNodeDown() throws Exception {
+ // Sets up 2 groups: [0, 2, 4] and [1, 3, 5], with 1 being down
+ setUpMusicGroup(6, " .1.s:d");
+ assertUnitState(1, "generated", State.DOWN, "");
assertUnitState(1, "user", State.UP, "");
assertSetUnitState(1, State.MAINTENANCE, null);
@@ -264,14 +264,14 @@ public class SetNodeStateTest extends StateRestApiTest {
assertSetUnitState(1, State.MAINTENANCE, null); // sanity-check
// Because 2 is in a different group maintenance should be denied
- assertSetUnitStateCausesAlreadyInWantedMaintenance(2, State.MAINTENANCE);
+ assertSetUnitStateCausesAtMostOneGroupError(2, State.MAINTENANCE);
// Because 3 and 5 are in the same group as 1, these should be OK
assertSetUnitState(3, State.MAINTENANCE, null);
assertUnitState(1, "user", State.MAINTENANCE, "whatever reason."); // sanity-check
assertUnitState(3, "user", State.MAINTENANCE, "whatever reason."); // sanity-check
assertSetUnitState(5, State.MAINTENANCE, null);
- assertSetUnitStateCausesAlreadyInWantedMaintenance(2, State.MAINTENANCE); // sanity-check
+ assertSetUnitStateCausesAtMostOneGroupError(2, State.MAINTENANCE); // sanity-check
// Set all to up
assertSetUnitState(1, State.UP, null);
@@ -279,7 +279,7 @@ public class SetNodeStateTest extends StateRestApiTest {
assertSetUnitState(3, State.UP, null);
// Because 1 is in maintenance, even though user wanted state is UP, trying to set 2 to
// maintenance will fail.
- assertSetUnitStateCausesAlreadyInMaintenance(2, State.MAINTENANCE);
+ assertSetUnitStateCausesAnotherNodeHasStateError(2, State.MAINTENANCE);
assertSetUnitState(5, State.UP, null);
}
@@ -306,15 +306,17 @@ public class SetNodeStateTest extends StateRestApiTest {
}
}
- private void assertSetUnitStateCausesAlreadyInWantedMaintenance(int index, State state) throws StateRestApiException {
- assertSetUnitStateCausesAlreadyInMaintenance(index, state, "^Another storage node wants state MAINTENANCE: ([0-9]+)$");
+ private void assertSetUnitStateCausesAtMostOneGroupError(int index, State state) throws StateRestApiException {
+ assertSetUnitStateFails(index, state, "^At most one group can have wanted state: " +
+ "Other storage node ([0-9]+) in group ([0-9]+) has wanted state Maintenance$");
}
- private void assertSetUnitStateCausesAlreadyInMaintenance(int index, State state) throws StateRestApiException {
- assertSetUnitStateCausesAlreadyInMaintenance(index, state, "^Another storage node has state MAINTENANCE: ([0-9]+)$");
+ private void assertSetUnitStateCausesAnotherNodeHasStateError(int index, State state) throws StateRestApiException {
+ assertSetUnitStateFails(index, state, "^At most one group can have wanted state: " +
+ "Other storage node ([0-9]+) in group ([0-9]+) has wanted state Maintenance$");
}
- private void assertSetUnitStateCausesAlreadyInMaintenance(int index, State state, String reasonRegex)
+ private void assertSetUnitStateFails(int index, State state, String reasonRegex)
throws StateRestApiException {
SetResponse setResponse = restAPI.setUnitState(new SetUnitStateRequestImpl("music/storage/" + index)
.setNewState("user", state.toString().toLowerCase(), "whatever reason.")
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/restapiv2/StateRestApiTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/restapiv2/StateRestApiTest.java
index 14eab503885..cc2c100e105 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/restapiv2/StateRestApiTest.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/restapiv2/StateRestApiTest.java
@@ -92,15 +92,14 @@ public abstract class StateRestApiTest {
}, ccSockets);
}
- protected void setUpMusicGroup(int nodeCount, boolean node1InMaintenance) {
+ protected void setUpMusicGroup(int nodeCount, String node1StateString) {
books = null;
Distribution distribution = new Distribution(Distribution.getSimpleGroupConfig(2, nodeCount));
jsonWriter.setDefaultPathPrefix("/cluster/v2");
ContentCluster cluster = new ContentCluster("music", distribution.getNodes(), distribution);
initializeCluster(cluster, distribution.getNodes());
AnnotatedClusterState baselineState = AnnotatedClusterState
- .withoutAnnotations(ClusterState.stateFromString("distributor:" + nodeCount + " storage:" + nodeCount +
- (node1InMaintenance ? " .1.s:m" : "")));
+ .withoutAnnotations(ClusterState.stateFromString("distributor:" + nodeCount + " storage:" + nodeCount + node1StateString));
Map<String, AnnotatedClusterState> bucketSpaceStates = new HashMap<>();
bucketSpaceStates.put("default", AnnotatedClusterState
.withoutAnnotations(ClusterState.stateFromString("distributor:" + nodeCount + " storage:" + nodeCount)));
diff --git a/component/CMakeLists.txt b/component/CMakeLists.txt
deleted file mode 100644
index 87d0a4989ba..00000000000
--- a/component/CMakeLists.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-install_fat_java_artifact(component)
diff --git a/component/abi-spec.json b/component/abi-spec.json
index 2dfa3e0d71d..0e33fde3c5a 100644
--- a/component/abi-spec.json
+++ b/component/abi-spec.json
@@ -348,5 +348,20 @@
"public bridge synthetic java.lang.Object clone()"
],
"fields": []
+ },
+ "com.yahoo.container.di.componentgraph.Provider": {
+ "superClass": "java.lang.Object",
+ "interfaces": [
+ "com.yahoo.component.Deconstructable"
+ ],
+ "attributes": [
+ "public",
+ "interface",
+ "abstract"
+ ],
+ "methods": [
+ "public abstract java.lang.Object get()"
+ ],
+ "fields": []
}
} \ No newline at end of file
diff --git a/container-di/src/main/java/com/yahoo/container/di/componentgraph/Provider.java b/component/src/main/java/com/yahoo/container/di/componentgraph/Provider.java
index 3fd3195e5dd..3fd3195e5dd 100644
--- a/container-di/src/main/java/com/yahoo/container/di/componentgraph/Provider.java
+++ b/component/src/main/java/com/yahoo/container/di/componentgraph/Provider.java
diff --git a/component/src/main/java/com/yahoo/container/di/componentgraph/package-info.java b/component/src/main/java/com/yahoo/container/di/componentgraph/package-info.java
new file mode 100644
index 00000000000..054abe9cff9
--- /dev/null
+++ b/component/src/main/java/com/yahoo/container/di/componentgraph/package-info.java
@@ -0,0 +1,12 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * Note that this package also exists in another Vespa module, for historical
+ * reasons. All Java classes should be kept in this module, and the other
+ * module's package should remain empty.
+ */
+@ExportPackage
+@PublicApi
+package com.yahoo.container.di.componentgraph;
+
+import com.yahoo.api.annotations.PublicApi;
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/config-application-package/src/main/java/com/yahoo/config/model/application/provider/SchemaValidator.java b/config-application-package/src/main/java/com/yahoo/config/model/application/provider/SchemaValidator.java
index b0b1209aa90..03024ab6a53 100644
--- a/config-application-package/src/main/java/com/yahoo/config/model/application/provider/SchemaValidator.java
+++ b/config-application-package/src/main/java/com/yahoo/config/model/application/provider/SchemaValidator.java
@@ -7,6 +7,7 @@ import com.thaiopensource.validate.ValidateProperty;
import com.thaiopensource.validate.ValidationDriver;
import com.thaiopensource.validate.rng.CompactSchemaReader;
import com.yahoo.config.application.api.DeployLogger;
+import com.yahoo.io.IOUtils;
import com.yahoo.io.reader.NamedReader;
import com.yahoo.yolean.Exceptions;
import org.xml.sax.ErrorHandler;
@@ -52,18 +53,20 @@ public class SchemaValidator {
}
public void validate(File file, String fileName) throws IOException {
- validate(ValidationDriver.fileInputSource(file), fileName);
+ validate(IOUtils.createReader(file.getAbsolutePath()), fileName);
}
public void validate(Reader reader) throws IOException {
- validate(new InputSource(reader), null);
+ validate(reader, null);
}
public void validate(NamedReader reader) throws IOException {
- validate(new InputSource(reader), reader.getName());
+ validate(reader, reader.getName());
}
- public void validate(InputSource inputSource, String fileName) throws IOException {
+ @Deprecated
+ /* @deprecated Will not give proper context from errors, use another validate method instead */
+ public void validate(InputSource inputSource, String fileName) throws IOException {
errorHandler.fileName = (fileName == null ? "input" : fileName);
errorHandler.reader = inputSource.getCharacterStream();
try {
@@ -72,8 +75,23 @@ public class SchemaValidator {
throw new RuntimeException("Aborting due to earlier XML errors.");
}
} catch (SAXException e) {
- // This should never happen, as it is handled by the ErrorHandler
- // installed for the driver.
+ // Shouldn't happen, error handler should have thrown
+ throw new IllegalArgumentException("XML error in " + errorHandler.fileName + ": " + Exceptions.toMessageString(e));
+ }
+ }
+
+ private void validate(Reader reader, String fileName) throws IOException {
+ errorHandler.fileName = (fileName == null ? "input" : fileName);
+ // We need to read from a reader in error handler, so need to read all content into a new one
+ Reader newReader = new StringReader(IOUtils.readAll(reader));
+ errorHandler.reader = newReader;
+ try {
+ if ( ! driver.validate(new InputSource(newReader))) {
+ // Shouldn't happen, error handler should have thrown
+ throw new RuntimeException("Aborting due to earlier XML errors.");
+ }
+ } catch (SAXException e) {
+ // Shouldn't happen, error handler should have thrown
throw new IllegalArgumentException("XML error in " + errorHandler.fileName + ": " + Exceptions.toMessageString(e));
}
}
@@ -89,7 +107,7 @@ public class SchemaValidator {
volatile Reader reader;
public void warning(SAXParseException e) {
- deployLogger.log(Level.WARNING, message(e));
+ deployLogger.logApplicationPackage(Level.WARNING, message(e));
}
public void error(SAXParseException e) {
diff --git a/config-model-api/abi-spec.json b/config-model-api/abi-spec.json
index ca736dd90d8..bdf2b53bc92 100644
--- a/config-model-api/abi-spec.json
+++ b/config-model-api/abi-spec.json
@@ -180,7 +180,8 @@
"abstract"
],
"methods": [
- "public abstract void log(java.util.logging.Level, java.lang.String)"
+ "public abstract void log(java.util.logging.Level, java.lang.String)",
+ "public void logApplicationPackage(java.util.logging.Level, java.lang.String)"
],
"fields": []
},
diff --git a/config-model-api/src/main/java/com/yahoo/config/application/api/DeployLogger.java b/config-model-api/src/main/java/com/yahoo/config/application/api/DeployLogger.java
index 61cab2f6ce7..7271f3a394e 100644
--- a/config-model-api/src/main/java/com/yahoo/config/application/api/DeployLogger.java
+++ b/config-model-api/src/main/java/com/yahoo/config/application/api/DeployLogger.java
@@ -10,6 +10,14 @@ import java.util.logging.Level;
*/
public interface DeployLogger {
+ /** Log a message unrelated to the application package, e.g. internal error/status. */
void log(Level level, String message);
+ /**
+ * Log a message related to the application package. These messages should be actionable by the user, f.ex. to
+ * signal usage of invalid/deprecated syntax
+ */
+ default void logApplicationPackage(Level level, String message) {
+ log(level, message);
+ }
}
diff --git a/config-model-api/src/main/java/com/yahoo/config/application/api/xml/DeploymentSpecXmlReader.java b/config-model-api/src/main/java/com/yahoo/config/application/api/xml/DeploymentSpecXmlReader.java
index 48a675fa182..4bd819b3b6a 100644
--- a/config-model-api/src/main/java/com/yahoo/config/application/api/xml/DeploymentSpecXmlReader.java
+++ b/config-model-api/src/main/java/com/yahoo/config/application/api/xml/DeploymentSpecXmlReader.java
@@ -418,7 +418,7 @@ public class DeploymentSpecXmlReader {
if ("true".equals(activeValue)) return true;
if ("false".equals(activeValue)) return false;
throw new IllegalArgumentException("Region tags must have an 'active' attribute set to 'true' or 'false' " +
- "to control whether the region should receive production traffic");
+ "to control whether this region should receive traffic from the global endpoint of this application");
}
private static boolean isEmptySpec(Element root) {
diff --git a/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java b/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java
index f15d700a398..3a2f71e2b8c 100644
--- a/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java
+++ b/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java
@@ -90,6 +90,9 @@ public interface ModelContext {
@ModelFeatureFlag(owners = {"tokle"}) default boolean tenantIamRole() { return false; }
@ModelFeatureFlag(owners = {"vekterli"}) default int maxActivationInhibitedOutOfSyncGroups() { return 0; }
@ModelFeatureFlag(owners = {"hmusum"}) default String jvmOmitStackTraceInFastThrowOption(ClusterSpec.Type type) { return ""; }
+ @ModelFeatureFlag(owners = {"bjorncs", "jonmv"}) default boolean enableJdiscHttp2() { return false; }
+ @ModelFeatureFlag(owners = {"tokle", "bjorncs"}) default boolean enableCustomAclMapping() { return false; }
+ @ModelFeatureFlag(owners = {"geirst", "vekterli"}) default int numDistributorStripes() { return 0; }
}
/** Warning: As elsewhere in this package, do not make backwards incompatible changes that will break old config models! */
@@ -126,8 +129,11 @@ public interface ModelContext {
// Note: Used in unit tests (set to false in TestProperties) to avoid needing to deal with implicitly created node for logserver
default boolean useDedicatedNodeForLogserver() { return true; }
+ // TODO: Remove after May 2021
default boolean dedicatedClusterControllerCluster() { return hostedVespa(); }
+ // Allow disabling mTLS for now, harden later
+ default boolean allowDisableMtls() { return true; }
}
@Retention(RetentionPolicy.RUNTIME)
diff --git a/config-model/src/main/java/com/yahoo/config/model/deploy/DeployState.java b/config-model/src/main/java/com/yahoo/config/model/deploy/DeployState.java
index 8befbb48016..a77dd65c608 100644
--- a/config-model/src/main/java/com/yahoo/config/model/deploy/DeployState.java
+++ b/config-model/src/main/java/com/yahoo/config/model/deploy/DeployState.java
@@ -214,7 +214,7 @@ public class DeployState implements ConfigDefinitionStore {
File importFrom = applicationPackage.getFileReference(ApplicationPackage.MODELS_DIR);
ImportedMlModels importedModels = new ImportedMlModels(importFrom, modelImporters);
for (var entry : importedModels.getSkippedModels().entrySet()) {
- deployLogger.log(Level.WARNING, "Skipping import of model " + entry.getKey() + " as an exception " +
+ deployLogger.logApplicationPackage(Level.WARNING, "Skipping import of model " + entry.getKey() + " as an exception " +
"occurred during import. Error: " + entry.getValue());
}
return importedModels;
diff --git a/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java b/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java
index 1402f728c73..75a1a167446 100644
--- a/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java
+++ b/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java
@@ -37,7 +37,6 @@ public class TestProperties implements ModelContext.Properties, ModelContext.Fea
private Zone zone;
private final Set<ContainerEndpoint> endpoints = Collections.emptySet();
private boolean useDedicatedNodeForLogserver = false;
- private boolean dedicatedClusterControllerCluster = true;
private boolean useThreePhaseUpdates = false;
private double defaultTermwiseLimit = 1.0;
private String jvmGCOptions = null;
@@ -61,6 +60,8 @@ public class TestProperties implements ModelContext.Properties, ModelContext.Fea
private int maxActivationInhibitedOutOfSyncGroups = 0;
private List<TenantSecretStore> tenantSecretStores = Collections.emptyList();
private String jvmOmitStackTraceInFastThrowOption;
+ private int numDistributorStripes = 0;
+ private boolean allowDisableMtls = true;
@Override public ModelContext.FeatureFlags featureFlags() { return this; }
@Override public boolean multitenant() { return multitenant; }
@@ -77,7 +78,6 @@ public class TestProperties implements ModelContext.Properties, ModelContext.Fea
@Override public boolean isBootstrap() { return false; }
@Override public boolean isFirstTimeDeployment() { return false; }
@Override public boolean useDedicatedNodeForLogserver() { return useDedicatedNodeForLogserver; }
- @Override public boolean dedicatedClusterControllerCluster() { return hostedVespa && dedicatedClusterControllerCluster; }
@Override public Optional<EndpointCertificateSecrets> endpointCertificateSecrets() { return endpointCertificateSecrets; }
@Override public double defaultTermwiseLimit() { return defaultTermwiseLimit; }
@Override public boolean useThreePhaseUpdates() { return useThreePhaseUpdates; }
@@ -102,6 +102,8 @@ public class TestProperties implements ModelContext.Properties, ModelContext.Fea
@Override public int maxActivationInhibitedOutOfSyncGroups() { return maxActivationInhibitedOutOfSyncGroups; }
@Override public List<TenantSecretStore> tenantSecretStores() { return tenantSecretStores; }
@Override public String jvmOmitStackTraceInFastThrowOption(ClusterSpec.Type type) { return jvmOmitStackTraceInFastThrowOption; }
+ @Override public int numDistributorStripes() { return numDistributorStripes; }
+ @Override public boolean allowDisableMtls() { return allowDisableMtls; }
public TestProperties setFeedConcurrency(double feedConcurrency) {
this.feedConcurrency = feedConcurrency;
@@ -168,11 +170,6 @@ public class TestProperties implements ModelContext.Properties, ModelContext.Fea
return this;
}
- public TestProperties setDedicatedClusterControllerCluster(boolean dedicatedClusterControllerCluster) {
- this.dedicatedClusterControllerCluster = dedicatedClusterControllerCluster;
- return this;
- }
-
public TestProperties setEndpointCertificateSecrets(Optional<EndpointCertificateSecrets> endpointCertificateSecrets) {
this.endpointCertificateSecrets = endpointCertificateSecrets;
return this;
@@ -248,6 +245,16 @@ public class TestProperties implements ModelContext.Properties, ModelContext.Fea
return this;
}
+ public TestProperties setNumDistributorStripes(int value) {
+ this.numDistributorStripes = value;
+ return this;
+ }
+
+ public TestProperties allowDisableMtls(boolean value) {
+ this.allowDisableMtls = value;
+ return this;
+ }
+
public static class Spec implements ConfigServerSpec {
private final String hostName;
diff --git a/config-model/src/main/java/com/yahoo/config/model/provision/InMemoryProvisioner.java b/config-model/src/main/java/com/yahoo/config/model/provision/InMemoryProvisioner.java
index 53df9045dfe..188504edd18 100644
--- a/config-model/src/main/java/com/yahoo/config/model/provision/InMemoryProvisioner.java
+++ b/config-model/src/main/java/com/yahoo/config/model/provision/InMemoryProvisioner.java
@@ -142,7 +142,6 @@ public class InMemoryProvisioner implements HostProvisioner {
public List<HostSpec> prepare(ClusterSpec cluster, ClusterResources requested, boolean required, boolean canFail) {
if (cluster.group().isPresent() && requested.groups() > 1)
throw new IllegalArgumentException("Cannot both be specifying a group and ask for groups to be created");
-
int capacity = failOnOutOfCapacity || required
? requested.nodes()
: Math.min(requested.nodes(), freeNodes.get(defaultResources).size() + totalAllocatedTo(cluster));
@@ -200,14 +199,16 @@ public class InMemoryProvisioner implements HostProvisioner {
for (int i = allocation.size() - 1; i >= 0; i--) {
NodeResources currentResources = allocation.get(0).advertisedResources();
if (currentResources.isUnspecified() || requestedResources.isUnspecified()) continue;
- if ( ! currentResources.compatibleWith(requestedResources)) {
+ if ( (! sharedHosts && ! currentResources.satisfies(requestedResources))
+ ||
+ (sharedHosts && ! currentResources.compatibleWith(requestedResources))) {
HostSpec removed = allocation.remove(i);
freeNodes.put(currentResources, new Host(removed.hostname())); // Return the node back to free pool
}
}
int nextIndex = nextIndexInCluster.getOrDefault(new Pair<>(clusterGroup.type(), clusterGroup.id()), startIndex);
- while (allocation.size() < nodesInGroup) {
+ while (nonRetiredIn(allocation).size() < nodesInGroup) {
// Find the smallest host that can fit the requested resources
Optional<NodeResources> hostResources = freeNodes.keySet().stream()
.sorted(new MemoryDiskCpu())
@@ -232,12 +233,16 @@ public class InMemoryProvisioner implements HostProvisioner {
}
nextIndexInCluster.put(new Pair<>(clusterGroup.type(), clusterGroup.id()), nextIndex);
- while (allocation.size() > nodesInGroup)
+ while (nonRetiredIn(allocation).size() > nodesInGroup)
allocation.remove(0);
return allocation;
}
+ private List<HostSpec> nonRetiredIn(List<HostSpec> hosts) {
+ return hosts.stream().filter(host -> ! retiredHostNames.contains(host.hostname())).collect(Collectors.toList());
+ }
+
private int totalAllocatedTo(ClusterSpec cluster) {
int count = 0;
for (Map.Entry<ClusterSpec, List<HostSpec>> allocation : allocations.entrySet()) {
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/MapEvaluationTypeContext.java b/config-model/src/main/java/com/yahoo/searchdefinition/MapEvaluationTypeContext.java
index bc98f0ab8c5..e8ee5e9ed57 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/MapEvaluationTypeContext.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/MapEvaluationTypeContext.java
@@ -39,6 +39,8 @@ import java.util.stream.Collectors;
*/
public class MapEvaluationTypeContext extends FunctionReferenceContext implements TypeContext<Reference> {
+ private final Optional<MapEvaluationTypeContext> parent;
+
private final Map<Reference, TensorType> featureTypes = new HashMap<>();
private final Map<Reference, TensorType> resolvedTypes = new HashMap<>();
@@ -54,6 +56,7 @@ public class MapEvaluationTypeContext extends FunctionReferenceContext implement
MapEvaluationTypeContext(Collection<ExpressionFunction> functions, Map<Reference, TensorType> featureTypes) {
super(functions);
+ this.parent = Optional.empty();
this.featureTypes.putAll(featureTypes);
this.currentResolutionCallStack = new ArrayDeque<>();
this.queryFeaturesNotDeclared = new TreeSet<>();
@@ -63,12 +66,14 @@ public class MapEvaluationTypeContext extends FunctionReferenceContext implement
private MapEvaluationTypeContext(Map<String, ExpressionFunction> functions,
Map<String, String> bindings,
+ Optional<MapEvaluationTypeContext> parent,
Map<Reference, TensorType> featureTypes,
Deque<Reference> currentResolutionCallStack,
SortedSet<Reference> queryFeaturesNotDeclared,
boolean tensorsAreUsed,
Map<Reference, TensorType> globallyResolvedTypes) {
super(functions, bindings);
+ this.parent = parent;
this.featureTypes.putAll(featureTypes);
this.currentResolutionCallStack = currentResolutionCallStack;
this.queryFeaturesNotDeclared = queryFeaturesNotDeclared;
@@ -124,19 +129,34 @@ public class MapEvaluationTypeContext extends FunctionReferenceContext implement
return resolvedType;
}
+ MapEvaluationTypeContext getParent(String forArgument, String boundTo) {
+ return parent.orElseThrow(
+ () -> new IllegalArgumentException("argument "+forArgument+" is bound to "+boundTo+" but there is no parent context"));
+ }
+
+ String resolveBinding(String argument) {
+ String bound = getBinding(argument);
+ if (bound == null) {
+ return argument;
+ }
+ return getParent(argument, bound).resolveBinding(bound);
+ }
+
private TensorType resolveType(Reference reference) {
if (currentResolutionCallStack.contains(reference))
throw new IllegalArgumentException("Invocation loop: " +
currentResolutionCallStack.stream().map(Reference::toString).collect(Collectors.joining(" -> ")) +
" -> " + reference);
- // Bound to a function argument, and not to a same-named identifier (which would lead to a loop)?
+ // Bound to a function argument?
Optional<String> binding = boundIdentifier(reference);
- if (binding.isPresent() && ! binding.get().equals(reference.toString())) {
+ if (binding.isPresent()) {
try {
// This is not pretty, but changing to bind expressions rather
// than their string values requires deeper changes
- return new RankingExpression(binding.get()).type(this);
+ var expr = new RankingExpression(binding.get());
+ var type = expr.type(getParent(reference.name(), binding.get()));
+ return type;
} catch (ParseException e) {
throw new IllegalArgumentException(e);
}
@@ -149,15 +169,18 @@ public class MapEvaluationTypeContext extends FunctionReferenceContext implement
if (FeatureNames.isSimpleFeature(reference)) {
// The argument may be a local identifier bound to the actual value
String argument = reference.simpleArgument().get();
- String argumentBinding = getBinding(argument);
- reference = Reference.simple(reference.name(), argumentBinding != null ? argumentBinding : argument);
+ String argumentBinding = resolveBinding(argument);
+ reference = Reference.simple(reference.name(), argumentBinding);
return featureTypes.get(reference);
}
// A reference to a function?
Optional<ExpressionFunction> function = functionInvocation(reference);
if (function.isPresent()) {
- return function.get().getBody().type(this.withBindings(bind(function.get().arguments(), reference.arguments())));
+ var body = function.get().getBody();
+ var child = this.withBindings(bind(function.get().arguments(), reference.arguments()));
+ var type = body.type(child);
+ return type;
}
// A reference to an ONNX model?
@@ -297,8 +320,7 @@ public class MapEvaluationTypeContext extends FunctionReferenceContext implement
Map<String, String> bindings = new HashMap<>(formalArguments.size());
for (int i = 0; i < formalArguments.size(); i++) {
String identifier = invocationArguments.expressions().get(i).toString();
- String identifierBinding = super.getBinding(identifier);
- bindings.put(formalArguments.get(i), identifierBinding != null ? identifierBinding : identifier);
+ bindings.put(formalArguments.get(i), identifier);
}
return bindings;
}
@@ -323,6 +345,7 @@ public class MapEvaluationTypeContext extends FunctionReferenceContext implement
public MapEvaluationTypeContext withBindings(Map<String, String> bindings) {
return new MapEvaluationTypeContext(functions(),
bindings,
+ Optional.of(this),
featureTypes,
currentResolutionCallStack,
queryFeaturesNotDeclared,
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/RankProfile.java b/config-model/src/main/java/com/yahoo/searchdefinition/RankProfile.java
index 8bef4c39ba1..b460752d7bd 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/RankProfile.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/RankProfile.java
@@ -103,6 +103,8 @@ public class RankProfile implements Cloneable {
private Map<String, RankingExpressionFunction> functions = new LinkedHashMap<>();
+ private Map<Reference, TensorType> inputFeatures = new LinkedHashMap<>();
+
private Set<String> filterFields = new HashSet<>();
private final RankProfileRegistry rankProfileRegistry;
@@ -578,6 +580,23 @@ public class RankProfile implements Cloneable {
return rankingExpressionFunction;
}
+ /**
+ * Use for rank profiles representing a model evaluation; it will assume
+ * that a input is provided with the declared type (for the purpose of
+ * type resolving).
+ **/
+ public void addInputFeature(String name, TensorType declaredType) {
+ Reference ref = Reference.fromIdentifier(name);
+ if (inputFeatures.containsKey(ref)) {
+ TensorType hadType = inputFeatures.get(ref);
+ if (! declaredType.equals(hadType)) {
+ throw new IllegalArgumentException("Tried to replace input feature "+name+" with different type: "+
+ hadType+" -> "+declaredType);
+ }
+ }
+ inputFeatures.put(ref, declaredType);
+ }
+
public RankingExpressionFunction findFunction(String name) {
RankingExpressionFunction function = functions.get(name);
return ((function == null) && (getInherited() != null))
@@ -677,6 +696,7 @@ public class RankProfile implements Cloneable {
clone.summaryFeatures = summaryFeatures != null ? new LinkedHashSet<>(this.summaryFeatures) : null;
clone.rankFeatures = rankFeatures != null ? new LinkedHashSet<>(this.rankFeatures) : null;
clone.rankProperties = new LinkedHashMap<>(this.rankProperties);
+ clone.inputFeatures = new LinkedHashMap<>(this.inputFeatures);
clone.functions = new LinkedHashMap<>(this.functions);
clone.filterFields = new HashSet<>(this.filterFields);
clone.constants = new HashMap<>(this.constants);
@@ -790,8 +810,12 @@ public class RankProfile implements Cloneable {
return typeContext(queryProfiles, collectFeatureTypes());
}
+ public MapEvaluationTypeContext typeContext() { return typeContext(new QueryProfileRegistry()); }
+
private Map<Reference, TensorType> collectFeatureTypes() {
Map<Reference, TensorType> featureTypes = new HashMap<>();
+ // Add input features
+ inputFeatures.forEach((k, v) -> featureTypes.put(k, v));
// Add attributes
allFields().forEach(field -> addAttributeFeatureTypes(field, featureTypes));
allImportedFields().forEach(field -> addAttributeFeatureTypes(field, featureTypes));
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/SDDocumentTypeOrderer.java b/config-model/src/main/java/com/yahoo/searchdefinition/SDDocumentTypeOrderer.java
index 0d6c517da82..16609355130 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/SDDocumentTypeOrderer.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/SDDocumentTypeOrderer.java
@@ -125,7 +125,7 @@ public class SDDocumentTypeOrderer {
} else if (type instanceof ReferenceDataType) {
//do nothing
} else {
- deployLogger.log(Level.WARNING, "Unknown type : " + type);
+ deployLogger.logApplicationPackage(Level.WARNING, "Unknown type : " + type);
}
}
}
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/derived/AttributeFields.java b/config-model/src/main/java/com/yahoo/searchdefinition/derived/AttributeFields.java
index 4a415fccbcc..125b0cf2763 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/derived/AttributeFields.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/derived/AttributeFields.java
@@ -6,6 +6,7 @@ import com.yahoo.document.DataType;
import com.yahoo.document.PositionDataType;
import com.yahoo.searchdefinition.Search;
import com.yahoo.searchdefinition.document.Attribute;
+import com.yahoo.searchdefinition.document.Case;
import com.yahoo.searchdefinition.document.Dictionary;
import com.yahoo.searchdefinition.document.ImmutableSDField;
import com.yahoo.searchdefinition.document.Ranking;
@@ -255,7 +256,9 @@ public class AttributeFields extends Derived implements AttributesConfig.Produce
Dictionary dictionary = attribute.getDictionary();
if (dictionary != null) {
aaB.dictionary.type(convert(dictionary.getType()));
+ aaB.dictionary.match(convert(dictionary.getMatch()));
}
+ aaB.match(convertMatch(attribute.getCase()));
return aaB;
}
@@ -270,6 +273,24 @@ public class AttributeFields extends Derived implements AttributesConfig.Produce
}
return AttributesConfig.Attribute.Dictionary.Type.BTREE;
}
+ private static AttributesConfig.Attribute.Dictionary.Match.Enum convert(Case type) {
+ switch (type) {
+ case CASED:
+ return AttributesConfig.Attribute.Dictionary.Match.CASED;
+ case UNCASED:
+ return AttributesConfig.Attribute.Dictionary.Match.UNCASED;
+ }
+ return AttributesConfig.Attribute.Dictionary.Match.UNCASED;
+ }
+ private static AttributesConfig.Attribute.Match.Enum convertMatch(Case type) {
+ switch (type) {
+ case CASED:
+ return AttributesConfig.Attribute.Match.CASED;
+ case UNCASED:
+ return AttributesConfig.Attribute.Match.UNCASED;
+ }
+ return AttributesConfig.Attribute.Match.UNCASED;
+ }
public void getConfig(AttributesConfig.Builder builder, FieldSet fs) {
for (Attribute attribute : attributes.values()) {
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/derived/IndexInfo.java b/config-model/src/main/java/com/yahoo/searchdefinition/derived/IndexInfo.java
index ae06d34dfb8..f30e2367117 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/derived/IndexInfo.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/derived/IndexInfo.java
@@ -3,12 +3,17 @@ package com.yahoo.searchdefinition.derived;
import com.yahoo.document.CollectionDataType;
import com.yahoo.document.DataType;
+import com.yahoo.document.Field;
+import com.yahoo.document.MapDataType;
import com.yahoo.document.NumericDataType;
import com.yahoo.document.PositionDataType;
+import com.yahoo.document.PrimitiveDataType;
+import com.yahoo.document.StructuredDataType;
import com.yahoo.searchdefinition.Index;
import com.yahoo.searchdefinition.Search;
import com.yahoo.searchdefinition.document.Attribute;
import com.yahoo.searchdefinition.document.BooleanIndexDefinition;
+import com.yahoo.searchdefinition.document.Case;
import com.yahoo.searchdefinition.document.FieldSet;
import com.yahoo.searchdefinition.document.ImmutableSDField;
import com.yahoo.searchdefinition.document.Matching;
@@ -47,9 +52,9 @@ public class IndexInfo extends Derived implements IndexInfoConfig.Producer {
private static final String CMD_PREDICATE_BOUNDS = "predicate-bounds";
private static final String CMD_NUMERICAL = "numerical";
private static final String CMD_PHRASE_SEGMENTING = "phrase-segmenting";
- private Set<IndexCommand> commands = new java.util.LinkedHashSet<>();
- private Map<String, String> aliases = new java.util.LinkedHashMap<>();
- private Map<String, FieldSet> fieldSets;
+ private final Set<IndexCommand> commands = new java.util.LinkedHashSet<>();
+ private final Map<String, String> aliases = new java.util.LinkedHashMap<>();
+ private final Map<String, FieldSet> fieldSets;
private Search search;
public IndexInfo(Search search) {
@@ -132,7 +137,7 @@ public class IndexInfo extends Derived implements IndexInfoConfig.Producer {
addIndexCommand(field, CMD_INDEX); // List the indices
- if (field.doesIndexing() || field.doesLowerCasing()) {
+ if (needLowerCase(field)) {
addIndexCommand(field, CMD_LOWERCASE);
}
@@ -172,6 +177,30 @@ public class IndexInfo extends Derived implements IndexInfoConfig.Producer {
}
+ private static boolean isAnyChildString(DataType dataType) {
+ PrimitiveDataType primitive = dataType.getPrimitiveType();
+ if (primitive == PrimitiveDataType.STRING) return true;
+ if (primitive != null) return false;
+ if (dataType instanceof StructuredDataType) {
+ StructuredDataType structured = (StructuredDataType) dataType;
+ for (Field field : structured.getFields()) {
+ if (isAnyChildString(field.getDataType())) return true;
+ }
+ } else if (dataType instanceof MapDataType) {
+ MapDataType mapType = (MapDataType) dataType;
+ return isAnyChildString(mapType.getKeyType()) || isAnyChildString(mapType.getValueType());
+ }
+ return false;
+ }
+
+ private static boolean needLowerCase(ImmutableSDField field) {
+ return field.doesIndexing()
+ || field.doesLowerCasing()
+ || ((field.doesAttributing() || (field.getAttribute() != null))
+ && isAnyChildString(field.getDataType())
+ && field.getMatching().getCase().equals(Case.UNCASED));
+ }
+
static String stemCmd(ImmutableSDField field, Search search) {
return CMD_STEM + ":" + field.getStemming(search).toStemMode();
}
@@ -316,7 +345,7 @@ public class IndexInfo extends Derived implements IndexInfoConfig.Producer {
if (field.doesAttributing()) {
anyAttributing = true;
}
- if (field.doesIndexing() || field.doesLowerCasing()) {
+ if (needLowerCase(field)) {
anyLowerCasing = true;
}
if (stemming(field)) {
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/derived/SummaryClass.java b/config-model/src/main/java/com/yahoo/searchdefinition/derived/SummaryClass.java
index 78400666c36..b08e9948ecd 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/derived/SummaryClass.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/derived/SummaryClass.java
@@ -75,7 +75,7 @@ public class SummaryClass extends Derived {
if (fields.containsKey(name)) {
SummaryClassField sf = fields.get(name);
if (!SummaryClassField.convertDataType(type, transform).equals(sf.getType())) {
- deployLogger.log(Level.WARNING, "Conflicting definition of field " + name + ". " +
+ deployLogger.logApplicationPackage(Level.WARNING, "Conflicting definition of field " + name + ". " +
"Declared as type " + sf.getType() + " and " + type);
}
} else {
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/document/Attribute.java b/config-model/src/main/java/com/yahoo/searchdefinition/document/Attribute.java
index f230a7c10eb..0cf7a031dbc 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/document/Attribute.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/document/Attribute.java
@@ -78,7 +78,8 @@ public final class Attribute implements Cloneable, Serializable {
/** The aliases for this attribute */
private final Set<String> aliases = new LinkedHashSet<>();
- private Dictionary dictionary = new Dictionary();
+ private Dictionary dictionary = null;
+ private Case casing = Case.UNCASED;
/**
* True if this attribute should be returned during first pass of search.
@@ -104,7 +105,7 @@ public final class Attribute implements Cloneable, Serializable {
private final String myName; // different from what name() returns.
private final String exportAttributeTypeName;
- private Type(String name, String exportAttributeTypeName) {
+ Type(String name, String exportAttributeTypeName) {
this.myName=name;
this.exportAttributeTypeName = exportAttributeTypeName;
}
@@ -126,7 +127,7 @@ public final class Attribute implements Cloneable, Serializable {
private final String name;
- private CollectionType(String name) {
+ CollectionType(String name) {
this.name=name;
}
@@ -211,6 +212,7 @@ public final class Attribute implements Cloneable, Serializable {
public Sorting getSorting() { return sorting; }
public Dictionary getDictionary() { return dictionary; }
+ public Case getCase() { return casing; }
public void setRemoveIfZero(boolean remove) { this.removeIfZero = remove; }
public void setCreateIfNonExistent(boolean create) { this.createIfNonExistent = create; }
@@ -235,6 +237,7 @@ public final class Attribute implements Cloneable, Serializable {
public void setDistanceMetric(DistanceMetric metric) { this.distanceMetric = Optional.of(metric); }
public void setHnswIndexParams(HnswIndexParams params) { this.hnswIndexParams = Optional.of(params); }
public void setDictionary(Dictionary dictionary) { this.dictionary = dictionary; }
+ public void setCase(Case casing) { this.casing = casing; }
public String getName() { return name; }
public Type getType() { return type; }
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/document/Case.java b/config-model/src/main/java/com/yahoo/searchdefinition/document/Case.java
new file mode 100644
index 00000000000..0fbee339b97
--- /dev/null
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/document/Case.java
@@ -0,0 +1,15 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.searchdefinition.document;
+
+/**
+ * Describes if items should preserve lower/upper case, or shall be uncased
+ * which normally mean they are all normalized to lowercase.
+ * @author baldersheim
+ */
+public enum Case {
+ CASED("cased"),
+ UNCASED("uncased");
+ private String name;
+ Case(String name) { this.name = name; }
+ public String getName() { return name;}
+}
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/document/Dictionary.java b/config-model/src/main/java/com/yahoo/searchdefinition/document/Dictionary.java
index e492d572f27..5a25953ce9e 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/document/Dictionary.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/document/Dictionary.java
@@ -9,8 +9,27 @@ package com.yahoo.searchdefinition.document;
*/
public class Dictionary {
public enum Type { BTREE, HASH, BTREE_AND_HASH };
- private final Type type;
- public Dictionary() { this(Type.BTREE); }
- public Dictionary(Type type) { this.type = type; }
- public Type getType() { return type; }
+ private Type type = null;
+ private Case casing= null;
+
+ public void updateType(Type type) {
+ if (this.type == null) {
+ this.type = type;
+ } else if ((this.type == Type.BTREE) && (type == Type.HASH)) {
+ this.type = Type.BTREE_AND_HASH;
+ } else if ((this.type == Type.HASH) && (type == Type.BTREE)) {
+ this.type = Type.BTREE_AND_HASH;
+ } else {
+ throw new IllegalArgumentException("Can not combine previous dictionary setting " + this.type +
+ " with current " + type);
+ }
+ }
+ public void updateMatch(Case casing) {
+ if (this.casing != null) {
+ throw new IllegalArgumentException("dictionary match mode has already been set to " + this.casing);
+ }
+ this.casing = casing;
+ }
+ public Type getType() { return (type != null) ? type : Type.BTREE; }
+ public Case getMatch() { return (casing != null) ? casing : Case.UNCASED; }
}
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/document/Matching.java b/config-model/src/main/java/com/yahoo/searchdefinition/document/Matching.java
index e10d313bd27..e2c036c2444 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/document/Matching.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/document/Matching.java
@@ -35,6 +35,7 @@ public class Matching implements Cloneable, Serializable {
}
private Type type = Type.TEXT;
+ private Case casing = Case.UNCASED;
/** The basic match algorithm */
private Algorithm algorithm = Algorithm.NORMAL;
@@ -59,12 +60,15 @@ public class Matching implements Cloneable, Serializable {
}
public Type getType() { return type; }
+ public Case getCase() { return casing; }
public void setType(Type type) {
this.type = type;
typeUserSet = true;
}
+ public void setCase(Case casing) { this.casing = casing; }
+
public Integer maxLength() { return maxLength; }
public Matching maxLength(int maxLength) { this.maxLength = maxLength; return this; }
public boolean isTypeUserSet() { return typeUserSet; }
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/document/SDField.java b/config-model/src/main/java/com/yahoo/searchdefinition/document/SDField.java
index 76b707fa19b..cf7bef654b0 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/document/SDField.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/document/SDField.java
@@ -538,15 +538,18 @@ public class SDField extends Field implements TypedKey, FieldOperationContainer,
* Returns Dictionary settings.
*/
public Dictionary getDictionary() { return dictionary; }
-
-
- public void setDictionary(Dictionary dictionary) { this.dictionary=dictionary; }
+ public Dictionary getOrSetDictionary() {
+ if (dictionary == null) {
+ dictionary = new Dictionary();
+ }
+ return dictionary;
+ }
/**
* Set the matching type for this field and all subfields.
*/
// TODO: When this is not the same as getMatching().setthis we have a potential for inconsistency. Find the right
- // Matching object for struct fields as lookup time instead.
+ // Matching object for struct fields at lookup time instead.
public void setMatchingType(Matching.Type type) {
this.getMatching().setType(type);
for (SDField structField : getStructFields()) {
@@ -555,10 +558,21 @@ public class SDField extends Field implements TypedKey, FieldOperationContainer,
}
/**
+ * Set the matching type for this field and all subfields.
+ */
+ // TODO: When this is not the same as getMatching().setthis we have a potential for inconsistency. Find the right
+ // Matching object for struct fields at lookup time instead.
+ public void setMatchingCase(Case casing) {
+ this.getMatching().setCase(casing);
+ for (SDField structField : getStructFields()) {
+ structField.setMatchingCase(casing);
+ }
+ }
+ /**
* Set matching algorithm for this field and all subfields.
*/
// TODO: When this is not the same as getMatching().setthis we have a potential for inconsistency. Find the right
- // Matching object for struct fields as lookup time instead.
+ // Matching object for struct fields at lookup time instead.
public void setMatchingAlgorithm(Matching.Algorithm algorithm) {
this.getMatching().setAlgorithm(algorithm);
for (SDField structField : getStructFields()) {
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/fieldoperation/DictionaryOperation.java b/config-model/src/main/java/com/yahoo/searchdefinition/fieldoperation/DictionaryOperation.java
index ce7c5a71a21..12f0509686a 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/fieldoperation/DictionaryOperation.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/fieldoperation/DictionaryOperation.java
@@ -2,6 +2,7 @@
package com.yahoo.searchdefinition.fieldoperation;
+import com.yahoo.searchdefinition.document.Case;
import com.yahoo.searchdefinition.document.Dictionary;
import com.yahoo.searchdefinition.document.SDField;
@@ -11,25 +12,30 @@ import com.yahoo.searchdefinition.document.SDField;
* @author baldersheim
*/
public class DictionaryOperation implements FieldOperation {
- private final Dictionary.Type type;
+ public enum Operation { HASH, BTREE, CASED, UNCASED }
+ private final Operation operation;
- public DictionaryOperation(Dictionary.Type type) {
- this.type = type;
+ public DictionaryOperation(Operation type) {
+ this.operation = type;
}
@Override
public void apply(SDField field) {
- Dictionary prev = field.getDictionary();
- if (prev == null) {
- field.setDictionary(new Dictionary(type));
- } else if ((prev.getType() == Dictionary.Type.BTREE && type == Dictionary.Type.HASH) ||
- (prev.getType() == Dictionary.Type.HASH && type == Dictionary.Type.BTREE))
- {
- field.setDictionary(new Dictionary(Dictionary.Type.BTREE_AND_HASH));
- } else {
- if (prev.getType() != type) {
- throw new IllegalArgumentException("Can not combine previous dictionary setting " + prev.getType() +
- " with current " + type);
- }
+ Dictionary dictionary = field.getOrSetDictionary();
+ switch (operation) {
+ case HASH:
+ dictionary.updateType(Dictionary.Type.HASH);
+ break;
+ case BTREE:
+ dictionary.updateType(Dictionary.Type.BTREE);
+ break;
+ case CASED:
+ dictionary.updateMatch(Case.CASED);
+ break;
+ case UNCASED:
+ dictionary.updateMatch(Case.UNCASED);
+ break;
+ default:
+ throw new IllegalArgumentException("Unhandled operation " + operation);
}
}
}
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/fieldoperation/MatchOperation.java b/config-model/src/main/java/com/yahoo/searchdefinition/fieldoperation/MatchOperation.java
index 5bfd2c38586..eba56bf964a 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/fieldoperation/MatchOperation.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/fieldoperation/MatchOperation.java
@@ -1,6 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.searchdefinition.fieldoperation;
+import com.yahoo.searchdefinition.document.Case;
import com.yahoo.searchdefinition.document.Matching;
import com.yahoo.searchdefinition.document.SDField;
@@ -10,6 +11,7 @@ import com.yahoo.searchdefinition.document.SDField;
public class MatchOperation implements FieldOperation {
private Matching.Type matchingType;
+ private Case casing;
private Integer gramSize;
private Integer maxLength;
private Matching.Algorithm matchingAlgorithm;
@@ -34,10 +36,17 @@ public class MatchOperation implements FieldOperation {
this.exactMatchTerminator = exactMatchTerminator;
}
+ public void setCase(Case casing) {
+ this.casing = casing;
+ }
+
public void apply(SDField field) {
if (matchingType != null) {
field.setMatchingType(matchingType);
}
+ if (casing != null) {
+ field.setMatchingCase(casing);
+ }
if (gramSize != null) {
field.getMatching().setGramSize(gramSize);
}
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/processing/CreatePositionZCurve.java b/config-model/src/main/java/com/yahoo/searchdefinition/processing/CreatePositionZCurve.java
index 8df5443b98f..2da1a7ac95b 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/processing/CreatePositionZCurve.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/processing/CreatePositionZCurve.java
@@ -123,7 +123,7 @@ public class CreatePositionZCurve extends Processor {
summary.addDestination("default");
summary.addDestinations(summaryTo);
} else if (summary.getTransform() != summaryTransform) {
- deployLogger.log(Level.WARNING, "Summary field " + summaryName + " has wrong transform: " + summary.getTransform());
+ deployLogger.logApplicationPackage(Level.WARNING, "Summary field " + summaryName + " has wrong transform: " + summary.getTransform());
return;
}
SummaryField.Source source = new SummaryField.Source(sourceName);
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/processing/DictionaryProcessor.java b/config-model/src/main/java/com/yahoo/searchdefinition/processing/DictionaryProcessor.java
index fd567ec2d54..1f07e3159da 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/processing/DictionaryProcessor.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/processing/DictionaryProcessor.java
@@ -3,10 +3,13 @@ package com.yahoo.searchdefinition.processing;
import com.yahoo.config.application.api.DeployLogger;
import com.yahoo.document.NumericDataType;
+import com.yahoo.document.PrimitiveDataType;
import com.yahoo.searchdefinition.RankProfileRegistry;
import com.yahoo.searchdefinition.Search;
import com.yahoo.searchdefinition.document.Attribute;
+import com.yahoo.searchdefinition.document.Case;
import com.yahoo.searchdefinition.document.Dictionary;
+import com.yahoo.searchdefinition.document.Matching;
import com.yahoo.searchdefinition.document.SDField;
import com.yahoo.vespa.model.container.search.QueryProfiles;
@@ -23,18 +26,29 @@ public class DictionaryProcessor extends Processor {
@Override
public void process(boolean validate, boolean documentsOnly) {
for (SDField field : search.allConcreteFields()) {
+ Attribute attribute = field.getAttribute();
+ if (attribute == null) continue;
+ attribute.setCase(field.getMatching().getCase());
Dictionary dictionary = field.getDictionary();
if (dictionary == null) continue;
-
- Attribute attribute = field.getAttribute();
if (attribute.getDataType().getPrimitiveType() instanceof NumericDataType ) {
if (attribute.isFastSearch()) {
attribute.setDictionary(dictionary);
} else {
fail(search, field, "You must specify 'attribute:fast-search' to allow dictionary control");
}
+ } else if (attribute.getDataType().getPrimitiveType() == PrimitiveDataType.STRING) {
+ attribute.setDictionary(dictionary);
+ if (dictionary.getType() == Dictionary.Type.HASH) {
+ if (dictionary.getMatch() != Case.CASED) {
+ fail(search, field, "hash dictionary require cased match");
+ }
+ }
+ if (! dictionary.getMatch().equals(attribute.getCase())) {
+ fail(search, field, "Dictionary casing '" + dictionary.getMatch() + "' does not match field match casing '" + attribute.getCase() + "'");
+ }
} else {
- fail(search, field, "You can only specify 'dictionary:' for numeric fields");
+ fail(search, field, "You can only specify 'dictionary:' for numeric or string fields");
}
}
}
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/processing/FilterFieldNames.java b/config-model/src/main/java/com/yahoo/searchdefinition/processing/FilterFieldNames.java
index adb8ab62aab..1337a0930c9 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/processing/FilterFieldNames.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/processing/FilterFieldNames.java
@@ -63,7 +63,7 @@ public class FilterFieldNames extends Processor {
filterFields.add(fieldName);
}
} else {
- deployLogger.log(Level.WARNING, "For rank profile '" + profile.getName() + "': Cannot apply rank filter setting to unexisting field '" + fieldName + "'");
+ deployLogger.logApplicationPackage(Level.WARNING, "For rank profile '" + profile.getName() + "': Cannot apply rank filter setting to unexisting field '" + fieldName + "'");
}
}
}
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/processing/ImplicitSummaries.java b/config-model/src/main/java/com/yahoo/searchdefinition/processing/ImplicitSummaries.java
index eddb0adb8d6..9a170a92447 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/processing/ImplicitSummaries.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/processing/ImplicitSummaries.java
@@ -170,7 +170,7 @@ public class ImplicitSummaries extends Processor {
{
// Summary transform attribute may indicate that the ilscript was rewritten to remove summary
// by another search that uses this same field in inheritance.
- deployLogger.log(Level.WARNING, "Ignoring " + summaryField + ": " + sourceField +
+ deployLogger.logApplicationPackage(Level.WARNING, "Ignoring " + summaryField + ": " + sourceField +
" is not creating a summary value in its indexing statement");
return false;
}
@@ -184,7 +184,7 @@ public class ImplicitSummaries extends Processor {
if (summaryField.getDestinations().size() >0) {
destinations = "document summaries " + summaryField.getDestinations();
}
- deployLogger.log(Level.WARNING,
+ deployLogger.logApplicationPackage(Level.WARNING,
"Will fetch the disk summary value of " + sourceField + " in " + destinations +
" since this summary field uses a dynamic summary value (snippet/bolding): Dynamic summaries and bolding " +
"is not supported with summary values fetched from in-memory attributes yet. If you want to see partial updates " +
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/processing/Processor.java b/config-model/src/main/java/com/yahoo/searchdefinition/processing/Processor.java
index 61b5e6f2a64..c548c62970b 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/processing/Processor.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/processing/Processor.java
@@ -72,7 +72,7 @@ public abstract class Processor {
protected SDField addField(Search search, SDField field, String suffix, String indexing, String queryCommand) {
SDField implementationField = search.getConcreteField(field.getName() + "_" + suffix);
if (implementationField != null) {
- deployLogger.log(Level.WARNING, "Implementation field " + implementationField + " added twice");
+ deployLogger.logApplicationPackage(Level.WARNING, "Implementation field " + implementationField + " added twice");
} else {
implementationField = new SDField(search.getDocument(), field.getName() + "_" + suffix, DataType.STRING);
}
@@ -130,7 +130,7 @@ public abstract class Processor {
protected void warn(String searchName, String fieldName, String message) {
String fullMsg = formatError(searchName, fieldName, message);
- deployLogger.log(Level.WARNING, fullMsg);
+ deployLogger.logApplicationPackage(Level.WARNING, fullMsg);
}
protected void warn(Search search, Field field, String message) {
@@ -139,11 +139,11 @@ public abstract class Processor {
protected void info(String searchName, String fieldName, String message) {
String fullMsg = formatError(searchName, fieldName, message);
- deployLogger.log(Level.INFO, fullMsg);
+ deployLogger.logApplicationPackage(Level.INFO, fullMsg);
}
protected void info(Search search, Field field, String message) {
- warn(search.getName(), field.getName(), message);
+ info(search.getName(), field.getName(), message);
}
}
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/processing/RankingExpressionTypeResolver.java b/config-model/src/main/java/com/yahoo/searchdefinition/processing/RankingExpressionTypeResolver.java
index 89b8889b4ae..81766659545 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/processing/RankingExpressionTypeResolver.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/processing/RankingExpressionTypeResolver.java
@@ -90,7 +90,7 @@ public class RankingExpressionTypeResolver extends Processor {
if ( context.tensorsAreUsed() &&
! context.queryFeaturesNotDeclared().isEmpty() &&
! warnedAbout.containsAll(context.queryFeaturesNotDeclared())) {
- deployLogger.log(Level.WARNING, "The following query features used in '" + profile.getName() +
+ deployLogger.logApplicationPackage(Level.WARNING, "The following query features used in '" + profile.getName() +
"' are not declared in query profile " +
"types and will be interpreted as scalars, not tensors: " +
context.queryFeaturesNotDeclared());
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/processing/ReservedFunctionNames.java b/config-model/src/main/java/com/yahoo/searchdefinition/processing/ReservedFunctionNames.java
index d7099215f17..4f5e6a6edb9 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/processing/ReservedFunctionNames.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/processing/ReservedFunctionNames.java
@@ -34,7 +34,7 @@ public class ReservedFunctionNames extends Processor {
for (RankProfile rp : rankProfileRegistry.all()) {
for (String functionName : rp.getFunctions().keySet()) {
if (reservedNames.contains(functionName)) {
- deployLogger.log(Level.WARNING, "Function '" + functionName + "' " +
+ deployLogger.logApplicationPackage(Level.WARNING, "Function '" + functionName + "' " +
"in rank profile '" + rp.getName() + "' " +
"has a reserved name. This might mean that the function shadows " +
"the built-in function with the same name."
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/processing/SummaryDiskAccessValidator.java b/config-model/src/main/java/com/yahoo/searchdefinition/processing/SummaryDiskAccessValidator.java
index f629b046c7d..4ae624d2c35 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/processing/SummaryDiskAccessValidator.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/processing/SummaryDiskAccessValidator.java
@@ -46,7 +46,7 @@ public class SummaryDiskAccessValidator extends Processor {
throw new IllegalArgumentException(summaryField + " in " + summary + " references " +
source + ", but this field does not exist");
if ( ! isInMemory(field, summaryField) && ! summary.isFromDisk()) {
- deployLogger.log(Level.WARNING, summaryField + " in " + summary + " references " +
+ deployLogger.logApplicationPackage(Level.WARNING, summaryField + " in " + summary + " references " +
source + ", which is not an attribute: Using this " +
"summary will cause disk accesses. " +
"Set 'from-disk' on this summary class to silence this warning.");
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/processing/TensorFieldProcessor.java b/config-model/src/main/java/com/yahoo/searchdefinition/processing/TensorFieldProcessor.java
index dcf238c71c2..397e1c3deab 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/processing/TensorFieldProcessor.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/processing/TensorFieldProcessor.java
@@ -29,6 +29,7 @@ public class TensorFieldProcessor extends Processor {
if (validate) {
validateIndexingScripsForTensorField(field);
validateAttributeSettingForTensorField(field);
+ validateHnswIndexParametersRequiresIndexing(field);
}
processIndexSettingsForTensorField(field, validate);
}
@@ -84,6 +85,13 @@ public class TensorFieldProcessor extends Processor {
}
}
+ private void validateHnswIndexParametersRequiresIndexing(SDField field) {
+ var index = field.getIndex(field.getName());
+ if (index != null && index.getHnswIndexParams().isPresent() && !field.doesIndexing()) {
+ fail(search, field, "A tensor that specifies hnsw index parameters must also specify 'index' in 'indexing'");
+ }
+ }
+
private void processIndexSettingsForTensorField(SDField field, boolean validate) {
if (!field.doesIndexing()) {
return;
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/processing/multifieldresolver/IndexCommandResolver.java b/config-model/src/main/java/com/yahoo/searchdefinition/processing/multifieldresolver/IndexCommandResolver.java
index d20e9f4c253..329f7a9a417 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/processing/multifieldresolver/IndexCommandResolver.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/processing/multifieldresolver/IndexCommandResolver.java
@@ -49,11 +49,11 @@ public class IndexCommandResolver extends MultiFieldResolver {
for (SDField field : fields) {
if (!field.hasQueryCommand(command)) {
if (harmonizedCommands.contains(command)) {
- deployLogger.log(Level.WARNING, command + " must be added to all fields going to the same index (" + indexName + ")" +
+ deployLogger.logApplicationPackage(Level.WARNING, command + " must be added to all fields going to the same index (" + indexName + ")" +
", adding to field " + field.getName());
field.addQueryCommand(command);
} else {
- deployLogger.log(Level.WARNING, "All fields going to the same index should have the same query-commands. Field \'" + field.getName() +
+ deployLogger.logApplicationPackage(Level.WARNING, "All fields going to the same index should have the same query-commands. Field \'" + field.getName() +
"\' doesn't contain command \'" + command+"\'");
}
}
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/processing/multifieldresolver/RankTypeResolver.java b/config-model/src/main/java/com/yahoo/searchdefinition/processing/multifieldresolver/RankTypeResolver.java
index a448716d755..e6e9de81f26 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/processing/multifieldresolver/RankTypeResolver.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/processing/multifieldresolver/RankTypeResolver.java
@@ -31,7 +31,7 @@ public class RankTypeResolver extends MultiFieldResolver {
rankType = fields.get(0).getRankType();
first = false;
} else if (!field.getRankType().equals(rankType)) {
- deployLogger.log(Level.WARNING, "In field '" + field.getName() + "' " +
+ deployLogger.logApplicationPackage(Level.WARNING, "In field '" + field.getName() + "' " +
field.getRankType() + " for index '" + indexName +
"' conflicts with " + rankType +
" defined for the same index in field '" +
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/processing/multifieldresolver/StemmingResolver.java b/config-model/src/main/java/com/yahoo/searchdefinition/processing/multifieldresolver/StemmingResolver.java
index 483fe2105a3..910493e2aad 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/processing/multifieldresolver/StemmingResolver.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/processing/multifieldresolver/StemmingResolver.java
@@ -32,7 +32,7 @@ public class StemmingResolver extends MultiFieldResolver {
stemming = field.getStemming(search);
stemmingField = field;
} else if (stemming != field.getStemming(search)) {
- deployLogger.log(Level.WARNING, "Field '" + field.getName() + "' has " + field.getStemming(search) +
+ deployLogger.logApplicationPackage(Level.WARNING, "Field '" + field.getName() + "' has " + field.getStemming(search) +
", whereas field '" + stemmingField.getName() + "' has " + stemming +
". All fields indexing to the index '" + indexName + "' must have the same stemming." +
" This should be corrected as it will make indexing fail in a few cases.");
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/HostSystem.java b/config-model/src/main/java/com/yahoo/vespa/model/HostSystem.java
index 6213f0592fd..09bbd446803 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/HostSystem.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/HostSystem.java
@@ -50,10 +50,10 @@ public class HostSystem extends AbstractConfigProducer<Host> {
@SuppressWarnings("unused")
Object ignore = java.net.InetAddress.getByName(hostname);
} catch (UnknownHostException e) {
- deployLogger.log(Level.WARNING, "Unable to lookup IP address of host: " + hostname);
+ deployLogger.logApplicationPackage(Level.WARNING, "Unable to lookup IP address of host: " + hostname);
}
if (! hostname.contains(".")) {
- deployLogger.log(Level.WARNING, "Host named '" + hostname + "' may not receive any config " +
+ deployLogger.logApplicationPackage(Level.WARNING, "Host named '" + hostname + "' may not receive any config " +
"since it is not a canonical hostname. " +
"Disregard this warning when testing in a Docker container.");
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/Admin.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/Admin.java
index 3f346f20144..efae00096df 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/admin/Admin.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/Admin.java
@@ -78,7 +78,7 @@ public class Admin extends AbstractConfigProducer<Admin> implements Serializable
private final FileDistributionConfigProducer fileDistribution;
private final boolean multitenant;
- public Admin(AbstractConfigProducer parent,
+ public Admin(AbstractConfigProducer<?> parent,
Monitoring monitoring,
Metrics metrics,
boolean multitenant,
@@ -186,19 +186,6 @@ public class Admin extends AbstractConfigProducer<Admin> implements Serializable
return fileDistribution;
}
- public List<HostResource> getClusterControllerHosts() {
- List<HostResource> hosts = new ArrayList<>();
- if (multitenant) {
- if (logserver != null)
- hosts.add(logserver.getHostResource());
- } else {
- for (Configserver configserver : getConfigservers()) {
- hosts.add(configserver.getHostResource());
- }
- }
- return hosts;
- }
-
/**
* Adds services to all hosts in the system.
*/
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/LogserverContainerCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/LogserverContainerCluster.java
index 43b394b7f42..787bc322eb5 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/admin/LogserverContainerCluster.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/LogserverContainerCluster.java
@@ -30,8 +30,7 @@ public class LogserverContainerCluster extends ContainerCluster<LogserverContain
@Override
public void getConfig(QrStartConfig.Builder builder) {
super.getConfig(builder);
- builder.jvm.heapsize(128)
- .verbosegc(true);
+ builder.jvm.heapsize(128);
}
protected boolean messageBusEnabled() { return false; }
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/clustercontroller/ClusterControllerContainer.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/clustercontroller/ClusterControllerContainer.java
index 60991a59a75..ed1dc80d71d 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/admin/clustercontroller/ClusterControllerContainer.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/clustercontroller/ClusterControllerContainer.java
@@ -40,6 +40,9 @@ public class ClusterControllerContainer extends Container implements
private static final ComponentSpecification CLUSTERCONTROLLER_BUNDLE = new ComponentSpecification("clustercontroller-apps");
private static final ComponentSpecification ZOOKEEPER_SERVER_BUNDLE = new ComponentSpecification("zookeeper-server");
private static final ComponentSpecification REINDEXING_CONTROLLER_BUNDLE = new ComponentSpecification("clustercontroller-reindexer");
+ // The below adjustments to default netty settings reduces default chunkSize from 16M to 1M
+ private static final int DEFAULT_NETTY_PAGE_SIZE = 4096; // Reduced from nettys default of 8192
+ private static final int DEFAULT_NETTY_MAX_ORDER = 8; // Reduced from nettys default of 11
private final Set<String> bundles = new TreeSet<>();
@@ -58,9 +61,9 @@ public class ClusterControllerContainer extends Container implements
"/cluster/v2/*",
CLUSTERCONTROLLER_BUNDLE);
addComponent(new AccessLogComponent(containerCluster().orElse(null), AccessLogComponent.AccessLogType.jsonAccessLog,
- AccessLogComponent.CompressionType.GZIP,
- "controller",
- deployState.isHosted()));
+ AccessLogComponent.CompressionType.GZIP,
+ "controller",
+ deployState.isHosted()));
// TODO: Why are bundles added here instead of in the cluster?
addFileBundle("clustercontroller-apps");
@@ -69,6 +72,12 @@ public class ClusterControllerContainer extends Container implements
addFileBundle("zookeeper-server");
configureReindexing();
configureZooKeeperServer(runStandaloneZooKeeper);
+ prependJvmOptions(defaultNettyBufferSize(DEFAULT_NETTY_PAGE_SIZE, DEFAULT_NETTY_MAX_ORDER));
+ }
+
+ private static String defaultNettyBufferSize(int pageSize, int maxOrder) {
+ return new StringBuffer("-Dio.netty.allocator.pageSize=").append(pageSize)
+ .append(" -Dio.netty.allocator.maxOrder=").append(maxOrder).toString();
}
@Override
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/clustercontroller/ClusterControllerContainerCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/clustercontroller/ClusterControllerContainerCluster.java
index c4bd7198e11..89993780869 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/admin/clustercontroller/ClusterControllerContainerCluster.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/clustercontroller/ClusterControllerContainerCluster.java
@@ -41,7 +41,6 @@ public class ClusterControllerContainerCluster extends ContainerCluster<ClusterC
super.getConfig(builder);
builder.jvm
- .verbosegc(true)
.heapsize(featureFlags.clusterControllerMaxHeapSizeInMb());
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainer.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainer.java
index 986fd0b77ba..234892c5cc3 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainer.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainer.java
@@ -159,7 +159,6 @@ public class MetricsProxyContainer extends Container implements
if (clusterMembership.isPresent()) {
int maxHeapSize = featureFlags.metricsProxyMaxHeapSizeInMb(clusterMembership.get().cluster().type());
builder.jvm
- .verbosegc(true)
.gcopts(jvmGCOptions)
.heapsize(maxHeapSize);
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/ComponentValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/ComponentValidator.java
index 7485bedf788..6d2295684dc 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/ComponentValidator.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/ComponentValidator.java
@@ -81,7 +81,7 @@ public class ComponentValidator extends Validator {
}
if (attributes.getValue("Bundle-Version").endsWith(".SNAPSHOT")) {
- deployLogger.log(Level.WARNING, "Deploying snapshot bundle " + jarFile.getName() +
+ deployLogger.logApplicationPackage(Level.WARNING, "Deploying snapshot bundle " + jarFile.getName() +
".\nTo use this bundle, you must include the qualifier 'SNAPSHOT' in the version specification in services.xml.");
}
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/RankSetupValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/RankSetupValidator.java
index f218c06754a..776ee675ffd 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/RankSetupValidator.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/RankSetupValidator.java
@@ -193,7 +193,7 @@ public class RankSetupValidator extends Validator {
private void validateWarn(Exception e, DeployLogger deployLogger) {
String msg = "Unable to execute '"+ binaryName + "', validation of rank expressions will only take place when you start Vespa: " +
Exceptions.toMessageString(e);
- deployLogger.log(Level.WARNING, msg);
+ deployLogger.logApplicationPackage(Level.WARNING, msg);
}
private void validateFail(String output, SearchCluster sc, String sdName, DeployLogger deployLogger) {
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/StreamingValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/StreamingValidator.java
index 39478730982..0afebbb6e40 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/StreamingValidator.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/StreamingValidator.java
@@ -40,7 +40,7 @@ public class StreamingValidator extends Validator {
if (sc.getSdConfig() != null) {
for (ImmutableSDField sd : sc.getSdConfig().getSearch().allConcreteFields()) {
if (sd.getMatching().getType().equals(Matching.Type.GRAM)) {
- logger.log(Level.WARNING, "For streaming search cluster '" + sc.getClusterName() +
+ logger.logApplicationPackage(Level.WARNING, "For streaming search cluster '" + sc.getClusterName() +
"', SD field '" + sd.getName() + "': n-gram matching is not supported for streaming search.");
}
}
@@ -67,7 +67,7 @@ public class StreamingValidator extends Validator {
// If the field is numeric, we can't print this, because we may have converted the field to
// attribute indexing ourselves (IntegerIndex2Attribute)
if (sd.getDataType() instanceof NumericDataType) return;
- logger.log(Level.WARNING, "For streaming search cluster '" + sc.getClusterName() +
+ logger.logApplicationPackage(Level.WARNING, "For streaming search cluster '" + sc.getClusterName() +
"', SD field '" + sd.getName() + "': 'attribute' has same match semantics as 'index'.");
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/UriBindingsValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/UriBindingsValidator.java
index e7191cc4bb9..78ee4f4f1c8 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/UriBindingsValidator.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/UriBindingsValidator.java
@@ -49,7 +49,7 @@ class UriBindingsValidator extends Validator {
if (binding.scheme().equals("https")) {
String message = createErrorMessage(
binding, "'https' bindings are deprecated, use 'http' instead to bind to both http and https traffic.");
- deployState.getDeployLogger().log(Level.WARNING, message);
+ deployState.getDeployLogger().logApplicationPackage(Level.WARNING, message);
}
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/ConfigValueChangeValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/ConfigValueChangeValidator.java
index a11628bb0ea..021055b8867 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/ConfigValueChangeValidator.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/ConfigValueChangeValidator.java
@@ -119,7 +119,7 @@ public class ConfigValueChangeValidator implements ChangeValidator {
AbstractConfigProducerRoot currentModel, AbstractConfigProducerRoot nextModel) {
if (!hasConfigFieldsFlaggedWithRestart(configClass, service.getClass())) {
- logger.log(Level.FINE, String.format("%s is listed in the annotation for %s, " +
+ logger.logApplicationPackage(Level.FINE, String.format("%s is listed in the annotation for %s, " +
"but does not have any restart flags in its config definition.",
configClass.getSimpleName(), service.getClass().getSimpleName()));
return Optional.empty();
@@ -127,7 +127,7 @@ public class ConfigValueChangeValidator implements ChangeValidator {
Optional<ConfigInstance> nextConfig = getConfigFromModel(nextModel, configClass, service.getConfigId());
if (!nextConfig.isPresent()) {
- logger.log(Level.FINE, String.format(
+ logger.logApplicationPackage(Level.FINE, String.format(
"%s is listed as restart config for %s, but the config does not exist in the new model.",
configClass.getSimpleName(), service.getClass().getSimpleName()));
return Optional.empty();
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/VespaRestartAction.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/VespaRestartAction.java
index ca3a408b2e0..69cd022d939 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/VespaRestartAction.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/VespaRestartAction.java
@@ -6,7 +6,6 @@ import com.yahoo.config.model.api.ServiceInfo;
import com.yahoo.config.provision.ClusterSpec;
import java.util.List;
-import java.util.Optional;
/**
* Represents an action to restart services in order to handle a config change.
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/AttributeChangeValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/AttributeChangeValidator.java
index 0aee0675ea7..7a17fe5e80a 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/AttributeChangeValidator.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/AttributeChangeValidator.java
@@ -6,6 +6,8 @@ import com.yahoo.documentmodel.NewDocumentType;
import com.yahoo.searchdefinition.derived.AttributeFields;
import com.yahoo.searchdefinition.derived.IndexSchema;
import com.yahoo.searchdefinition.document.Attribute;
+import com.yahoo.searchdefinition.document.Case;
+import com.yahoo.searchdefinition.document.Dictionary;
import com.yahoo.searchdefinition.document.HnswIndexParams;
import com.yahoo.vespa.model.application.validation.change.VespaConfigChangeAction;
import com.yahoo.vespa.model.application.validation.change.VespaRestartAction;
@@ -83,6 +85,16 @@ public class AttributeChangeValidator {
return attribute.hnswIndexParams().isPresent();
}
+ private static Dictionary.Type extractDictionaryType(Attribute attr) {
+ Dictionary dict = attr.getDictionary();
+ return dict != null ? dict.getType() : Dictionary.Type.BTREE;
+ }
+
+ private static Case extractDictionaryCase(Attribute attr) {
+ Dictionary dict = attr.getDictionary();
+ return dict != null ? dict.getMatch() : Case.UNCASED;
+ }
+
private List<VespaConfigChangeAction> validateAttributeSettings() {
List<VespaConfigChangeAction> result = new ArrayList<>();
for (Attribute nextAttr : nextFields.attributes()) {
@@ -90,11 +102,14 @@ public class AttributeChangeValidator {
if (currAttr != null) {
validateAttributeSetting(id, currAttr, nextAttr, Attribute::isFastSearch, "fast-search", result);
validateAttributeSetting(id, currAttr, nextAttr, Attribute::isFastAccess, "fast-access", result);
+ validateAttributeSetting(id, currAttr, nextAttr, AttributeChangeValidator::extractDictionaryType, "dictionary: btree/hash", result);
+ validateAttributeSetting(id, currAttr, nextAttr, AttributeChangeValidator::extractDictionaryCase, "dictionary: cased/uncased", result);
validateAttributeSetting(id, currAttr, nextAttr, Attribute::isHuge, "huge", result);
validateAttributeSetting(id, currAttr, nextAttr, Attribute::densePostingListThreshold, "dense-posting-list-threshold", result);
validateAttributeSetting(id, currAttr, nextAttr, Attribute::isEnabledOnlyBitVector, "rank: filter", result);
- validateAttributeSetting(id, currAttr, nextAttr, AttributeChangeValidator::hasHnswIndex, "indexing: index", result);
validateAttributeSetting(id, currAttr, nextAttr, Attribute::distanceMetric, "distance-metric", result);
+
+ validateAttributeSetting(id, currAttr, nextAttr, AttributeChangeValidator::hasHnswIndex, "indexing: index", result);
if (hasHnswIndex(currAttr) && hasHnswIndex(nextAttr)) {
validateAttributeHnswIndexSetting(id, currAttr, nextAttr, HnswIndexParams::maxLinksPerNode, "max-links-per-node", result);
validateAttributeHnswIndexSetting(id, currAttr, nextAttr, HnswIndexParams::neighborsToExploreAtInsert, "neighbors-to-explore-at-insert", result);
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/builder/UserConfigBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/builder/UserConfigBuilder.java
index be85861ddb6..3390eea21bc 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/builder/UserConfigBuilder.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/builder/UserConfigBuilder.java
@@ -40,13 +40,13 @@ public class UserConfigBuilder {
Optional<ConfigDefinition> def = configDefinitionStore.getConfigDefinition(key);
if ( ! def.isPresent()) { // TODO: Fail instead of warn
- logger.log(Level.WARNING, "Unable to find config definition '" + key.asFileName() +
+ logger.logApplicationPackage(Level.WARNING, "Unable to find config definition '" + key.asFileName() +
"'. Please ensure that the name is spelled correctly, and that the def file is included in a bundle.");
}
ConfigPayloadBuilder payloadBuilder = new DomConfigPayloadBuilder(def.orElse(null)).build(element);
ConfigPayloadBuilder old = builderMap.get(key);
if (old != null) {
- logger.log(Level.WARNING, "Multiple overrides for " + key + " found. Applying in the order they are discovered");
+ logger.logApplicationPackage(Level.WARNING, "Multiple overrides for " + key + " found. Applying in the order they are discovered");
old.override(payloadBuilder);
} else {
builderMap.put(key, payloadBuilder);
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomAdminV2Builder.java b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomAdminV2Builder.java
index e150e5791c0..d6e419cf63f 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomAdminV2Builder.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomAdminV2Builder.java
@@ -52,7 +52,7 @@ public class DomAdminV2Builder extends DomAdminBuilderBase {
addLogForwarders(adminElement.child("logforwarding"), admin);
if (adminElement.child("filedistribution") != null) {
- deployState.getDeployLogger().log(Level.WARNING, "'filedistribution' element is deprecated and ignored");
+ deployState.getDeployLogger().logApplicationPackage(Level.WARNING, "'filedistribution' element is deprecated and ignored");
}
}
@@ -65,7 +65,7 @@ public class DomAdminV2Builder extends DomAdminBuilderBase {
}
int count = configservers.size();
if (count % 2 == 0) {
- deployState.getDeployLogger().log(Level.WARNING, "An even number (" + count + ") of config servers have been configured. " +
+ deployState.getDeployLogger().logApplicationPackage(Level.WARNING, "An even number (" + count + ") of config servers have been configured. " +
"This is discouraged, see doc for configuration server ");
}
return configservers;
@@ -116,7 +116,7 @@ public class DomAdminV2Builder extends DomAdminBuilderBase {
if (configserverE == null) {
configserverE = XML.getChild(adminE, "adminserver");
} else {
- deployState.getDeployLogger().log(Level.INFO, "Specifying configserver without parent element configservers in services.xml is deprecated");
+ deployState.getDeployLogger().logApplicationPackage(Level.INFO, "Specifying configserver without parent element configservers in services.xml is deprecated");
}
Configserver cfgs0 = new ConfigserverBuilder(0, configServerSpecs).build(deployState, configServers, configserverE);
cfgs0.setProp("index", 0);
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomAdminV4Builder.java b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomAdminV4Builder.java
index 2e04007e6d2..f7b838de911 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomAdminV4Builder.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomAdminV4Builder.java
@@ -84,7 +84,7 @@ public class DomAdminV4Builder extends DomAdminBuilderBase {
createLogserver(deployState.getDeployLogger(), admin, hosts);
} else {
- context.getDeployLogger().log(Level.INFO, "No container host available to use for running logserver");
+ context.getDeployLogger().logApplicationPackage(Level.INFO, "No container host available to use for running logserver");
}
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomHandlerBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomHandlerBuilder.java
index 71ff129ffbe..0d253f9e7a8 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomHandlerBuilder.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomHandlerBuilder.java
@@ -82,7 +82,7 @@ public class DomHandlerBuilder extends VespaDomBuilder.DomConfigProducerBuilder<
for (BindingPattern serverBinding : handler.getServerBindings()) {
if (serverBinding.hasSamePattern(binding)) {
handler.removeServerBinding(serverBinding);
- log.log(INFO, "Binding '" + binding.patternString() + "' was already in use by handler '" +
+ log.logApplicationPackage(INFO, "Binding '" + binding.patternString() + "' was already in use by handler '" +
handler.getComponentId() + "', but will now be taken over by handler: " + newHandler.getComponentId());
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainer.java b/config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainer.java
index 523002afa25..02a0f606603 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainer.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainer.java
@@ -8,11 +8,7 @@ import com.yahoo.config.model.deploy.DeployState;
import com.yahoo.config.model.producer.AbstractConfigProducer;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.NodeResources;
-import com.yahoo.container.bundle.BundleInstantiationSpecification;
-import com.yahoo.osgi.provider.model.ComponentModel;
-import com.yahoo.prelude.fastsearch.FS4ResourcePool;
import com.yahoo.search.config.QrStartConfig;
-import com.yahoo.vespa.model.container.component.Component;
/**
* A container that is typically used by container clusters set up from the user application.
@@ -34,14 +30,6 @@ public final class ApplicationContainer extends Container implements
public ApplicationContainer(AbstractConfigProducer<?> parent, String name, boolean retired, int index, DeployState deployState) {
super(parent, name, retired, index, deployState);
this.isHostedVespa = deployState.isHosted();
-
- addComponent(getFS4ResourcePool()); // TODO Remove when FS4 based search protocol is gone
- }
-
- private static Component<?, ComponentModel> getFS4ResourcePool() {
- BundleInstantiationSpecification spec = BundleInstantiationSpecification.
- getInternalSearcherSpecificationFromStrings(FS4ResourcePool.class.getName(), null);
- return new Component<>(new ComponentModel(spec));
}
@Override
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/Container.java b/config-model/src/main/java/com/yahoo/vespa/model/container/Container.java
index cd596038137..6ef29269bc1 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/Container.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/Container.java
@@ -79,6 +79,7 @@ public abstract class Container extends AbstractService implements
private final ComponentGroup<Component<?, ?>> components = new ComponentGroup<>(this, "components");
private final JettyHttpServer defaultHttpServer;
+ private final boolean enableJdiscHttp2;
protected Container(AbstractConfigProducer<?> parent, String name, int index, DeployState deployState) {
this(parent, name, false, index, deployState);
@@ -99,6 +100,8 @@ public abstract class Container extends AbstractService implements
addChild(new SimpleComponent("com.yahoo.container.jdisc.ConfiguredApplication$ApplicationContext"));
appendJvmOptions(jvmOmitStackTraceInFastThrowOption(deployState.featureFlags()));
+
+ this.enableJdiscHttp2 = deployState.featureFlags().enableJdiscHttp2();
}
protected String jvmOmitStackTraceInFastThrowOption(ModelContext.FeatureFlags featureFlags) {
@@ -180,7 +183,7 @@ public abstract class Container extends AbstractService implements
}
private void initDefaultJettyConnector() {
- defaultHttpServer.addConnector(new ConnectorFactory.Builder("SearchServer", getSearchPort()).build());
+ defaultHttpServer.addConnector(new ConnectorFactory.Builder("SearchServer", getSearchPort()).enableHttp2(enableJdiscHttp2).build());
}
private ContainerServiceType myServiceType = null;
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/component/AccessLogComponent.java b/config-model/src/main/java/com/yahoo/vespa/model/container/component/AccessLogComponent.java
index 2905471b02e..f18f10644ca 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/component/AccessLogComponent.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/component/AccessLogComponent.java
@@ -6,6 +6,7 @@ import com.yahoo.container.core.AccessLogConfig.FileHandler.CompressionFormat;
import com.yahoo.container.logging.JSONAccessLog;
import com.yahoo.container.logging.VespaAccessLog;
import com.yahoo.osgi.provider.model.ComponentModel;
+import com.yahoo.vespa.model.container.ApplicationContainerCluster;
import com.yahoo.vespa.model.container.ContainerCluster;
import java.util.OptionalInt;
@@ -27,6 +28,7 @@ public final class AccessLogComponent extends SimpleComponent implements AccessL
private final String symlinkName;
private final CompressionType compressionType;
private final int queueSize;
+ private final Integer bufferSize;
public AccessLogComponent(ContainerCluster<?> cluster, AccessLogType logType, CompressionType compressionType, String clusterName, boolean isHostedVespa)
{
@@ -57,6 +59,9 @@ public final class AccessLogComponent extends SimpleComponent implements AccessL
this.symlinkName = symlinkName;
this.compressionType = compressionType;
this.queueSize = queueSize(cluster).orElse(-1);
+ bufferSize = (cluster instanceof ApplicationContainerCluster)
+ ? 4*1024*1024
+ : null;
if (fileNamePattern == null)
throw new RuntimeException("File name pattern required when configuring access log.");
@@ -101,6 +106,9 @@ public final class AccessLogComponent extends SimpleComponent implements AccessL
if (queueSize >= 0) {
builder.queueSize(queueSize);
}
+ if (bufferSize != null) {
+ builder.bufferSize(bufferSize);
+ }
switch (compressionType) {
case GZIP:
builder.compressionFormat(CompressionFormat.GZIP);
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/http/ConnectorFactory.java b/config-model/src/main/java/com/yahoo/vespa/model/container/http/ConnectorFactory.java
index 989f911fc5e..9b0075c79c3 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/http/ConnectorFactory.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/http/ConnectorFactory.java
@@ -2,7 +2,6 @@
package com.yahoo.vespa.model.container.http;
import com.yahoo.component.ComponentId;
-import com.yahoo.container.bundle.BundleInstantiationSpecification;
import com.yahoo.jdisc.http.ConnectorConfig;
import com.yahoo.osgi.provider.model.ComponentModel;
import com.yahoo.vespa.model.container.component.SimpleComponent;
@@ -11,8 +10,6 @@ import com.yahoo.vespa.model.container.http.ssl.SslProvider;
import java.util.Optional;
-import static com.yahoo.component.ComponentSpecification.fromString;
-
/**
* @author Einar M R Rosenvinge
* @author bjorncs
@@ -23,6 +20,7 @@ public class ConnectorFactory extends SimpleComponent implements ConnectorConfig
private final String name;
private final int listenPort;
private final SslProvider sslProviderComponent;
+ private final boolean enableHttp2;
private volatile ComponentId defaultRequestFilterChain;
private volatile ComponentId defaultResponseFilterChain;
@@ -35,6 +33,7 @@ public class ConnectorFactory extends SimpleComponent implements ConnectorConfig
this.sslProviderComponent = builder.sslProvider != null ? builder.sslProvider : new DefaultSslProvider(name);
this.defaultRequestFilterChain = builder.defaultRequestFilterChain;
this.defaultResponseFilterChain = builder.defaultResponseFilterChain;
+ this.enableHttp2 = builder.enableHttp2 != null ? builder.enableHttp2 : false;
addChild(sslProviderComponent);
inject(sslProviderComponent);
}
@@ -43,6 +42,7 @@ public class ConnectorFactory extends SimpleComponent implements ConnectorConfig
public void getConfig(ConnectorConfig.Builder connectorBuilder) {
connectorBuilder.listenPort(listenPort);
connectorBuilder.name(name);
+ connectorBuilder.http2Enabled(enableHttp2);
sslProviderComponent.amendConnectorConfig(connectorBuilder);
}
@@ -69,6 +69,7 @@ public class ConnectorFactory extends SimpleComponent implements ConnectorConfig
private SslProvider sslProvider;
private ComponentId defaultRequestFilterChain;
private ComponentId defaultResponseFilterChain;
+ private Boolean enableHttp2;
public Builder(String name, int listenPort) {
this.name = name;
@@ -87,6 +88,8 @@ public class ConnectorFactory extends SimpleComponent implements ConnectorConfig
this.defaultResponseFilterChain = filterChain; return this;
}
+ public Builder enableHttp2(boolean enabled) { this.enableHttp2 = enabled; return this; }
+
public ConnectorFactory build() {
return new ConnectorFactory(this);
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/http/ssl/HostedSslConnectorFactory.java b/config-model/src/main/java/com/yahoo/vespa/model/container/http/ssl/HostedSslConnectorFactory.java
index 30ebb843aa7..9d715073f77 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/http/ssl/HostedSslConnectorFactory.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/http/ssl/HostedSslConnectorFactory.java
@@ -29,27 +29,32 @@ public class HostedSslConnectorFactory extends ConnectorFactory {
* Create connector factory that uses a certificate provided by the config-model / configserver and default hosted Vespa truststore.
*/
public static HostedSslConnectorFactory withProvidedCertificate(
- String serverName, EndpointCertificateSecrets endpointCertificateSecrets, boolean enforceHandshakeClientAuth) {
- return new HostedSslConnectorFactory(createConfiguredDirectSslProvider(serverName, endpointCertificateSecrets, DEFAULT_HOSTED_TRUSTSTORE, /*tlsCaCertificates*/null, enforceHandshakeClientAuth), false, enforceHandshakeClientAuth);
+ String serverName, EndpointCertificateSecrets endpointCertificateSecrets, boolean enforceHandshakeClientAuth, boolean enableHttp2) {
+ ConfiguredDirectSslProvider sslProvider = createConfiguredDirectSslProvider(
+ serverName, endpointCertificateSecrets, DEFAULT_HOSTED_TRUSTSTORE, /*tlsCaCertificates*/null, enforceHandshakeClientAuth);
+ return new HostedSslConnectorFactory(sslProvider, false, enforceHandshakeClientAuth, enableHttp2);
}
/**
* Create connector factory that uses a certificate provided by the config-model / configserver and a truststore configured by the application.
*/
public static HostedSslConnectorFactory withProvidedCertificateAndTruststore(
- String serverName, EndpointCertificateSecrets endpointCertificateSecrets, String tlsCaCertificates) {
- return new HostedSslConnectorFactory(createConfiguredDirectSslProvider(serverName, endpointCertificateSecrets, /*tlsCaCertificatesPath*/null, tlsCaCertificates, false), true, false);
+ String serverName, EndpointCertificateSecrets endpointCertificateSecrets, String tlsCaCertificates, boolean enableHttp2) {
+ ConfiguredDirectSslProvider sslProvider = createConfiguredDirectSslProvider(
+ serverName, endpointCertificateSecrets, /*tlsCaCertificatesPath*/null, tlsCaCertificates, false);
+ return new HostedSslConnectorFactory(sslProvider, true, false, enableHttp2);
}
/**
* Create connector factory that uses the default certificate and truststore provided by Vespa (through Vespa-global TLS configuration).
*/
- public static HostedSslConnectorFactory withDefaultCertificateAndTruststore(String serverName) {
- return new HostedSslConnectorFactory(new DefaultSslProvider(serverName), true, false);
+ public static HostedSslConnectorFactory withDefaultCertificateAndTruststore(String serverName, boolean enableHttp2) {
+ return new HostedSslConnectorFactory(new DefaultSslProvider(serverName), true, false, enableHttp2);
}
- private HostedSslConnectorFactory(SslProvider sslProvider, boolean enforceClientAuth, boolean enforceHandshakeClientAuth) {
- super(new Builder("tls4443", 4443).sslProvider(sslProvider));
+ private HostedSslConnectorFactory(SslProvider sslProvider, boolean enforceClientAuth,
+ boolean enforceHandshakeClientAuth, boolean enableHttp2) {
+ super(new Builder("tls4443", 4443).sslProvider(sslProvider).enableHttp2(enableHttp2));
this.enforceClientAuth = enforceClientAuth;
this.enforceHandshakeClientAuth = enforceHandshakeClientAuth;
}
@@ -85,7 +90,7 @@ public class HostedSslConnectorFactory extends ConnectorFactory {
connectorBuilder
.proxyProtocol(new ConnectorConfig.ProxyProtocol.Builder().enabled(true).mixedMode(true))
- .idleTimeout(Duration.ofMinutes(3).toSeconds())
+ .idleTimeout(Duration.ofSeconds(30).toSeconds())
.maxConnectionLife(Duration.ofMinutes(10).toSeconds());
}
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/http/xml/HttpBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/container/http/xml/HttpBuilder.java
index 5417a522d6a..f506a242a5b 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/http/xml/HttpBuilder.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/http/xml/HttpBuilder.java
@@ -78,13 +78,16 @@ public class HttpBuilder extends VespaDomBuilder.DomConfigProducerBuilder<Http>
readAttr -> builder.readEnabled(Boolean.valueOf(readAttr)));
XmlHelper.getOptionalAttribute(accessControlElem, "write").ifPresent(
writeAttr -> builder.writeEnabled(Boolean.valueOf(writeAttr)));
- builder.clientAuthentication(
+
+ AccessControl.ClientAuthentication clientAuth =
XmlHelper.getOptionalAttribute(accessControlElem, "tls-handshake-client-auth")
- .map(value -> "want".equals(value)
- ? AccessControl.ClientAuthentication.want
- : AccessControl.ClientAuthentication.need)
- .orElse(AccessControl.ClientAuthentication.need)
- );
+ .filter("want"::equals)
+ .map(value -> AccessControl.ClientAuthentication.want)
+ .orElse(AccessControl.ClientAuthentication.need);
+ if (! deployState.getProperties().allowDisableMtls() && clientAuth == AccessControl.ClientAuthentication.want) {
+ throw new IllegalArgumentException("Overriding 'tls-handshake-client-auth' for application is not allowed.");
+ }
+ builder.clientAuthentication(clientAuth);
Element excludeElem = XML.getChild(accessControlElem, "exclude");
if (excludeElem != null) {
@@ -105,14 +108,14 @@ public class HttpBuilder extends VespaDomBuilder.DomConfigProducerBuilder<Http>
if (explicitDomain == null) {
throw new IllegalStateException("No Athenz domain provided for 'access-control'");
}
- deployState.getDeployLogger().log(Level.WARNING, "Athenz tenant is not provided by deploy call. This will soon be handled as failure.");
+ deployState.getDeployLogger().logApplicationPackage(Level.WARNING, "Athenz tenant is not provided by deploy call. This will soon be handled as failure.");
}
if (explicitDomain != null) {
if (tenantDomain != null && !explicitDomain.equals(tenantDomain)) {
throw new IllegalArgumentException(
String.format("Domain in access-control ('%s') does not match tenant domain ('%s')", explicitDomain.value(), tenantDomain.value()));
}
- deployState.getDeployLogger().log(Level.WARNING, "Domain in 'access-control' is deprecated and will be removed soon");
+ deployState.getDeployLogger().logApplicationPackage(Level.WARNING, "Domain in 'access-control' is deprecated and will be removed soon");
}
return tenantDomain != null ? tenantDomain : explicitDomain;
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/http/xml/JettyConnectorBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/container/http/xml/JettyConnectorBuilder.java
index c3f6264a8cc..3b616c34a03 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/http/xml/JettyConnectorBuilder.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/http/xml/JettyConnectorBuilder.java
@@ -28,7 +28,7 @@ import static java.util.stream.Collectors.toList;
public class JettyConnectorBuilder extends VespaDomBuilder.DomConfigProducerBuilder<ConnectorFactory> {
@Override
- protected ConnectorFactory doBuild(DeployState deployState, AbstractConfigProducer ancestor, Element serverSpec) {
+ protected ConnectorFactory doBuild(DeployState deployState, AbstractConfigProducer<?> ancestor, Element serverSpec) {
String name = XmlHelper.getIdString(serverSpec);
int port = HttpBuilder.readPort(new ModelElement(serverSpec), deployState.isHosted(), deployState.getDeployLogger());
ConnectorFactory.Builder builder = new ConnectorFactory.Builder(name, port);
@@ -39,7 +39,7 @@ public class JettyConnectorBuilder extends VespaDomBuilder.DomConfigProducerBuil
.map(ComponentId::new)
.ifPresent(builder::defaultResponseFilterChain);
SslProvider sslProviderComponent = getSslConfigComponents(name, serverSpec);
- return builder.sslProvider(sslProviderComponent).build();
+ return builder.sslProvider(sslProviderComponent).enableHttp2(deployState.featureFlags().enableJdiscHttp2()).build();
}
SslProvider getSslConfigComponents(String serverName, Element serverSpec) {
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/search/QueryProfiles.java b/config-model/src/main/java/com/yahoo/vespa/model/container/search/QueryProfiles.java
index a001d66bc7b..3227646041a 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/search/QueryProfiles.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/search/QueryProfiles.java
@@ -64,7 +64,7 @@ public class QueryProfiles implements Serializable, QueryProfilesConfig.Producer
}
if ( registry.getTypeRegistry().hasApplicationTypes() && registry.allComponents().isEmpty()) {
- logger.log(Level.WARNING, "This application define query profile types, but has " +
+ logger.logApplicationPackage(Level.WARNING, "This application define query profile types, but has " +
"no query profiles referencing them so they have no effect. " +
(tensorFields.isEmpty()
? ""
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java
index d7ef7fbb143..87406b8bc9a 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java
@@ -431,6 +431,7 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
// If the deployment contains certificate/private key reference, setup TLS port
HostedSslConnectorFactory connectorFactory;
+ boolean enableHttp2 = deployState.featureFlags().enableJdiscHttp2();
if (deployState.endpointCertificateSecrets().isPresent()) {
boolean authorizeClient = deployState.zone().system().isPublic();
if (authorizeClient && deployState.tlsClientAuthority().isEmpty()) {
@@ -444,10 +445,10 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
.orElse(false);
connectorFactory = authorizeClient
- ? HostedSslConnectorFactory.withProvidedCertificateAndTruststore(serverName, endpointCertificateSecrets, deployState.tlsClientAuthority().get())
- : HostedSslConnectorFactory.withProvidedCertificate(serverName, endpointCertificateSecrets, enforceHandshakeClientAuth);
+ ? HostedSslConnectorFactory.withProvidedCertificateAndTruststore(serverName, endpointCertificateSecrets, deployState.tlsClientAuthority().get(), enableHttp2)
+ : HostedSslConnectorFactory.withProvidedCertificate(serverName, endpointCertificateSecrets, enforceHandshakeClientAuth, enableHttp2);
} else {
- connectorFactory = HostedSslConnectorFactory.withDefaultCertificateAndTruststore(serverName);
+ connectorFactory = HostedSslConnectorFactory.withDefaultCertificateAndTruststore(serverName, enableHttp2);
}
cluster.getHttp().getAccessControl().ifPresent(accessControl -> accessControl.configureHostedConnector(connectorFactory));
server.addConnector(connectorFactory);
@@ -593,7 +594,7 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
private void checkTagName(Element spec, DeployLogger logger) {
if (spec.getTagName().equals(DEPRECATED_CONTAINER_TAG)) {
- logger.log(WARNING, "'" + DEPRECATED_CONTAINER_TAG + "' is deprecated as tag name. Use '" + CONTAINER_TAG + "' instead.");
+ logger.logApplicationPackage(WARNING, "'" + DEPRECATED_CONTAINER_TAG + "' is deprecated as tag name. Use '" + CONTAINER_TAG + "' instead.");
}
}
@@ -636,7 +637,7 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
} else {
jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME);
if (incompatibleGCOptions(jvmOptions)) {
- deployLogger.log(WARNING, "You need to move out your GC related options from 'jvmargs' to 'jvm-gc-options'");
+ deployLogger.logApplicationPackage(WARNING, "You need to move out your GC related options from 'jvmargs' to 'jvm-gc-options'");
cluster.setJvmGCOptions(ContainerCluster.G1GC);
}
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java
index 6f7709efc24..1dd5074aedb 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java
@@ -44,6 +44,7 @@ import static java.util.stream.Collectors.toList;
public class ContentSearchCluster extends AbstractConfigProducer<SearchCluster> implements ProtonConfig.Producer, DispatchConfig.Producer {
private final boolean flushOnShutdown;
+ private final Boolean syncTransactionLog;
/** If this is set up for streaming search, it is modelled as one search cluster per search definition */
private final Map<String, AbstractSearchCluster> clusters = new TreeMap<>();
@@ -97,13 +98,15 @@ public class ContentSearchCluster extends AbstractConfigProducer<SearchCluster>
ModelElement clusterElem = new ModelElement(producerSpec);
String clusterName = ContentCluster.getClusterId(clusterElem);
Boolean flushOnShutdownElem = clusterElem.childAsBoolean("engine.proton.flush-on-shutdown");
+ Boolean syncTransactionLog = clusterElem.childAsBoolean("engine.proton.sync-transactionlog");
ContentSearchCluster search = new ContentSearchCluster(ancestor,
clusterName,
deployState.getProperties().featureFlags(),
documentDefinitions,
globallyDistributedDocuments,
- getFlushOnShutdown(flushOnShutdownElem, deployState),
+ getFlushOnShutdown(flushOnShutdownElem),
+ syncTransactionLog,
combined);
ModelElement tuning = clusterElem.childByPath("engine.proton.tuning");
@@ -117,11 +120,11 @@ public class ContentSearchCluster extends AbstractConfigProducer<SearchCluster>
return search;
}
- private boolean getFlushOnShutdown(Boolean flushOnShutdownElem, DeployState deployState) {
+ private boolean getFlushOnShutdown(Boolean flushOnShutdownElem) {
if (flushOnShutdownElem != null) {
return flushOnShutdownElem;
}
- return ! stateIsHosted(deployState);
+ return true;
}
private Double getQueryTimeout(ModelElement clusterElem) {
@@ -197,6 +200,7 @@ public class ContentSearchCluster extends AbstractConfigProducer<SearchCluster>
Map<String, NewDocumentType> documentDefinitions,
Set<NewDocumentType> globallyDistributedDocuments,
boolean flushOnShutdown,
+ Boolean syncTransactionLog,
boolean combined)
{
super(parent, "search");
@@ -204,6 +208,8 @@ public class ContentSearchCluster extends AbstractConfigProducer<SearchCluster>
this.documentDefinitions = documentDefinitions;
this.globallyDistributedDocuments = globallyDistributedDocuments;
this.flushOnShutdown = flushOnShutdown;
+ this.syncTransactionLog = syncTransactionLog;
+
this.combined = combined;
maxPendingMoveOps = featureFlags.maxPendingMoveOps();
feedSequencerType = convertFeedSequencerType(featureFlags.feedSequencerType());
@@ -278,12 +284,12 @@ public class ContentSearchCluster extends AbstractConfigProducer<SearchCluster>
searchNode.setHostResource(node.getHostResource());
searchNode.initService(deployState.getDeployLogger());
- tls = new TransactionLogServer(searchNode, clusterName);
+ tls = new TransactionLogServer(searchNode, clusterName, syncTransactionLog);
tls.setHostResource(searchNode.getHostResource());
tls.initService(deployState.getDeployLogger());
} else {
searchNode = new SearchNode.Builder(""+node.getDistributionKey(), spec, clusterName, node, flushOnShutdown, tuning, resourceLimits, combined).build(deployState, parent, element.getXml());
- tls = new TransactionLogServer.Builder(clusterName).build(deployState, searchNode, element.getXml());
+ tls = new TransactionLogServer.Builder(clusterName, syncTransactionLog).build(deployState, searchNode, element.getXml());
}
searchNode.setTls(tls);
if (hasIndexedCluster()) {
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/DistributorCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/content/DistributorCluster.java
index 0e3fee5a749..390c939c4b4 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/DistributorCluster.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/DistributorCluster.java
@@ -43,6 +43,7 @@ public class DistributorCluster extends AbstractConfigProducer<Distributor> impl
private final boolean hasIndexedDocumentType;
private final boolean useThreePhaseUpdates;
private final int maxActivationInhibitedOutOfSyncGroups;
+ private final int numDistributorStripes;
public static class Builder extends VespaDomBuilder.DomConfigProducerBuilder<DistributorCluster> {
@@ -105,18 +106,20 @@ public class DistributorCluster extends AbstractConfigProducer<Distributor> impl
final boolean hasIndexedDocumentType = clusterContainsIndexedDocumentType(documentsNode);
boolean useThreePhaseUpdates = deployState.getProperties().featureFlags().useThreePhaseUpdates();
int maxInhibitedGroups = deployState.getProperties().featureFlags().maxActivationInhibitedOutOfSyncGroups();
+ int numDistributorStripes = deployState.getProperties().featureFlags().numDistributorStripes();
return new DistributorCluster(parent,
new BucketSplitting.Builder().build(new ModelElement(producerSpec)), gc,
hasIndexedDocumentType, useThreePhaseUpdates,
- maxInhibitedGroups);
+ maxInhibitedGroups, numDistributorStripes);
}
}
private DistributorCluster(ContentCluster parent, BucketSplitting bucketSplitting,
GcOptions gc, boolean hasIndexedDocumentType,
boolean useThreePhaseUpdates,
- int maxActivationInhibitedOutOfSyncGroups)
+ int maxActivationInhibitedOutOfSyncGroups,
+ int numDistributorStripes)
{
super(parent, "distributor");
this.parent = parent;
@@ -125,6 +128,7 @@ public class DistributorCluster extends AbstractConfigProducer<Distributor> impl
this.hasIndexedDocumentType = hasIndexedDocumentType;
this.useThreePhaseUpdates = useThreePhaseUpdates;
this.maxActivationInhibitedOutOfSyncGroups = maxActivationInhibitedOutOfSyncGroups;
+ this.numDistributorStripes = numDistributorStripes;
}
@Override
@@ -138,6 +142,7 @@ public class DistributorCluster extends AbstractConfigProducer<Distributor> impl
builder.disable_bucket_activation(hasIndexedDocumentType == false);
builder.enable_metadata_only_fetch_phase_for_inconsistent_updates(useThreePhaseUpdates);
builder.max_activation_inhibited_out_of_sync_groups(maxActivationInhibitedOutOfSyncGroups);
+ builder.num_distributor_stripes(numDistributorStripes);
bucketSplitting.getConfig(builder);
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/Redundancy.java b/config-model/src/main/java/com/yahoo/vespa/model/content/Redundancy.java
index 655ee1d1d57..ba968411393 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/Redundancy.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/Redundancy.java
@@ -11,8 +11,8 @@ import com.yahoo.vespa.config.search.core.ProtonConfig;
*/
public class Redundancy implements StorDistributionConfig.Producer, ProtonConfig.Producer {
- // This numbers are all per group as wanted numbers.
- private final int initialRedundancy ;
+ // These numbers are all per group as wanted numbers.
+ private final int initialRedundancy;
private final int finalRedundancy;
private final int readyCopies;
@@ -29,12 +29,16 @@ public class Redundancy implements StorDistributionConfig.Producer, ProtonConfig
this.totalNodes = totalNodes;
}
+ /** Returns the final redundancy per group */
public int finalRedundancy() { return effectiveFinalRedundancy()/groups; }
+
public int readyCopies() { return effectiveReadyCopies()/groups; }
public int groups() { return groups; }
public int totalNodes() { return totalNodes; }
public int effectiveInitialRedundancy() { return Math.min(totalNodes, initialRedundancy * groups); }
+
+ /** Returns the final redundancy over all groups */
public int effectiveFinalRedundancy() { return Math.min(totalNodes, finalRedundancy * groups); }
public int effectiveReadyCopies() { return Math.min(totalNodes, readyCopies * groups); }
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/StorageGroup.java b/config-model/src/main/java/com/yahoo/vespa/model/content/StorageGroup.java
index 3c0f8996c22..f106b1f7bd5 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/StorageGroup.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/StorageGroup.java
@@ -143,10 +143,10 @@ public class StorageGroup {
}
public int getNumberOfLeafGroups() {
- int count = subgroups.isEmpty() ? 1 : 0;
- for (StorageGroup g : subgroups) {
+ if (subgroups.isEmpty()) return 1;
+ int count = 0;
+ for (StorageGroup g : subgroups)
count += g.getNumberOfLeafGroups();
- }
return count;
}
@@ -203,19 +203,25 @@ public class StorageGroup {
}
public StorageGroup buildRootGroup(DeployState deployState, RedundancyBuilder redundancyBuilder, ContentCluster owner) {
+ Optional<Integer> maxRedundancy = Optional.empty();
+ if (owner.isHosted())
+ maxRedundancy = validateRedundancyAndGroups();
+
Optional<ModelElement> group = Optional.ofNullable(clusterElement.child("group"));
Optional<ModelElement> nodes = getNodes(clusterElement);
if (group.isPresent() && nodes.isPresent())
throw new IllegalStateException("Both group and nodes exists, only one of these tags is legal");
if (group.isPresent() && (group.get().stringAttribute("name") != null || group.get().integerAttribute("distribution-key") != null))
- deployState.getDeployLogger().log(Level.INFO, "'distribution-key' attribute on a content cluster's root group is ignored");
+ deployState.getDeployLogger().logApplicationPackage(Level.INFO, "'distribution-key' attribute on a content cluster's root group is ignored");
GroupBuilder groupBuilder = collectGroup(owner.isHosted(), group, nodes, null, null);
StorageGroup storageGroup = (owner.isHosted())
? groupBuilder.buildHosted(deployState, owner, Optional.empty())
: groupBuilder.buildNonHosted(deployState, owner, Optional.empty());
- Redundancy redundancy = redundancyBuilder.build(owner.getName(), owner.isHosted(), storageGroup.subgroups.size(), storageGroup.getNumberOfLeafGroups(), storageGroup.countNodes());
+ Redundancy redundancy = redundancyBuilder.build(owner.getName(), owner.isHosted(), storageGroup.subgroups.size(),
+ storageGroup.getNumberOfLeafGroups(), storageGroup.countNodes(),
+ maxRedundancy);
owner.setRedundancy(redundancy);
if (storageGroup.partitions.isEmpty() && (redundancy.groups() > 1)) {
storageGroup.partitions = Optional.of(computePartitions(redundancy.finalRedundancy(), redundancy.groups()));
@@ -223,6 +229,29 @@ public class StorageGroup {
return storageGroup;
}
+ private Optional<Integer> validateRedundancyAndGroups() {
+ var redundancyElement = clusterElement.child("redundancy");
+ if (redundancyElement == null) return Optional.empty();
+ long redundancy = redundancyElement.asLong();
+
+ var nodesElement = clusterElement.child("nodes");
+ if (nodesElement == null) return Optional.empty();
+ var nodesSpec = NodesSpecification.from(nodesElement, context);
+
+ int minNodesPerGroup = (int)Math.ceil((double)nodesSpec.minResources().nodes() / nodesSpec.minResources().groups());
+
+ if (minNodesPerGroup < redundancy) { // TODO: Fail on this on Vespa 8, and simplify
+ context.getDeployLogger().logApplicationPackage(Level.WARNING,
+ "Cluster '" + clusterElement.stringAttribute("id") + "' " +
+ "specifies redundancy " + redundancy + " but cannot be higher than " +
+ "the minimum nodes per group, which is " + minNodesPerGroup);
+ return Optional.of(minNodesPerGroup);
+ }
+ else {
+ return Optional.empty();
+ }
+ }
+
/** This returns a partition string which specifies equal distribution between all groups */
// TODO: Make a partitions object
static private String computePartitions(int redundancyPerGroup, int numGroups) {
@@ -393,7 +422,8 @@ public class StorageGroup {
childAsString(groupElement, VespaDomBuilder.VESPAMALLOC_DEBUG),
childAsString(groupElement, VespaDomBuilder.VESPAMALLOC_DEBUG_STACKTRACE));
- List<GroupBuilder> subGroups = groupElement.isPresent() ? collectSubGroups(isHosted, group, groupElement.get()) : Collections.emptyList();
+ List<GroupBuilder> subGroups = groupElement.isPresent() ? collectSubGroups(isHosted, group, groupElement.get())
+ : List.of();
List<XmlNodeBuilder> explicitNodes = new ArrayList<>();
explicitNodes.addAll(collectExplicitNodes(groupElement));
@@ -407,7 +437,7 @@ public class StorageGroup {
nodeRequirement = Optional.of(NodesSpecification.from(nodesElement.get(), context));
else if (nodesElement.isPresent() && context.getDeployState().isHosted() && context.getDeployState().zone().environment().isManuallyDeployed() ) // default to 1 node
nodeRequirement = Optional.of(NodesSpecification.from(nodesElement.get(), context));
- else if (! nodesElement.isPresent() && subGroups.isEmpty() && context.getDeployState().isHosted()) // request one node
+ else if (nodesElement.isEmpty() && subGroups.isEmpty() && context.getDeployState().isHosted()) // request one node
nodeRequirement = Optional.of(NodesSpecification.nonDedicated(1, context));
else // Nodes or groups explicitly listed - resolve in GroupBuilder
nodeRequirement = Optional.empty();
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java
index 79ce9343560..d9bd413f053 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java
@@ -25,6 +25,7 @@ import com.yahoo.vespa.config.content.core.BucketspacesConfig;
import com.yahoo.vespa.config.content.core.StorDistributormanagerConfig;
import com.yahoo.vespa.model.HostResource;
import com.yahoo.vespa.model.admin.Admin;
+import com.yahoo.vespa.model.admin.Configserver;
import com.yahoo.vespa.model.admin.clustercontroller.ClusterControllerCluster;
import com.yahoo.vespa.model.admin.clustercontroller.ClusterControllerComponent;
import com.yahoo.vespa.model.admin.clustercontroller.ClusterControllerConfigurer;
@@ -170,7 +171,7 @@ public class ContentCluster extends AbstractConfigProducer implements
if (context.getParentProducer().getRoot() == null) return c;
- addClusterControllers(context, c.rootGroup, contentElement, c.clusterId, c, deployState);
+ addClusterControllers(context, contentElement, c, deployState);
return c;
}
@@ -284,47 +285,42 @@ public class ContentCluster extends AbstractConfigProducer implements
}
private void addClusterControllers(ConfigModelContext context,
- StorageGroup rootGroup,
ModelElement contentElement,
- String contentClusterName,
ContentCluster contentCluster,
DeployState deployState) {
if (admin == null) return; // only in tests
if (contentCluster.getPersistence() == null) return;
-
ClusterControllerContainerCluster clusterControllers;
-
- ContentCluster overlappingCluster = findOverlappingCluster(context.getParentProducer().getRoot(), contentCluster);
-
- if (overlappingCluster != null && overlappingCluster.getClusterControllers() != null) {
- // Borrow the cluster controllers of the other cluster in this case.
- // This condition only occurs on non-hosted systems with a shared config server,
- // a combination which only exists in system tests.
- clusterControllers = overlappingCluster.getClusterControllers();
+ if (context.properties().hostedVespa()) {
+ clusterControllers = getDedicatedSharedControllers(contentElement, admin, context, deployState);
}
- else if (admin.multitenant()) {
- if (context.properties().dedicatedClusterControllerCluster())
- clusterControllers = getDedicatedSharedControllers(contentElement, admin, context, deployState);
- else {
- clusterControllers = createClusterControllers(new ClusterControllerCluster(contentCluster, "standalone", deployState),
- drawControllerHosts(3, rootGroup),
- contentClusterName + "-controllers",
- true,
- context.getDeployState());
- contentCluster.clusterControllers = clusterControllers;
+ else if (admin.multitenant()) { // system tests: Put on logserver
+ if (admin.getClusterControllers() == null) {
+ // TODO: logserver== null only obtains in unit tests, disallow it
+ List<HostResource> host = admin.getLogserver() == null ? List.of() : List.of(admin.getLogserver().getHostResource());
+ admin.setClusterControllers(createClusterControllers(new ClusterControllerCluster(admin, "standalone", deployState),
+ host,
+ "cluster-controllers",
+ true,
+ deployState));
}
- }
- else {
clusterControllers = admin.getClusterControllers();
- if (clusterControllers == null) {
- List<HostResource> hosts = admin.getClusterControllerHosts();
+ }
+ else { // self hosted: Put on config servers or explicit cluster controllers
+ if (admin.getClusterControllers() == null) {
+ var hosts = admin.getConfigservers().stream().map(s -> s.getHostResource()).collect(toList());
if (hosts.size() > 1) {
- context.getDeployState().getDeployLogger().log(Level.INFO,
- "When having content cluster(s) and more than 1 config server it is recommended to configure cluster controllers explicitly.");
+ var message = "When having content clusters and more than 1 config server " +
+ "it is recommended to configure cluster controllers explicitly.";
+ deployState.getDeployLogger().logApplicationPackage(Level.INFO, message);
}
- clusterControllers = createClusterControllers(admin, hosts, "cluster-controllers", false, context.getDeployState());
- admin.setClusterControllers(clusterControllers);
+ admin.setClusterControllers(createClusterControllers(admin,
+ hosts,
+ "cluster-controllers",
+ false,
+ deployState));
}
+ clusterControllers = admin.getClusterControllers();
}
addClusterControllerComponentsForThisCluster(clusterControllers, contentCluster);
@@ -334,21 +330,6 @@ public class ContentCluster extends AbstractConfigProducer implements
}
}
- /** Returns any other content cluster which shares nodes with this, or null if none are built */
- private ContentCluster findOverlappingCluster(AbstractConfigProducerRoot root, ContentCluster contentCluster) {
- for (ContentCluster otherContentCluster : root.getChildrenByTypeRecursive(ContentCluster.class)) {
- if (otherContentCluster != contentCluster && overlaps(contentCluster, otherContentCluster))
- return otherContentCluster;
- }
- return null;
- }
-
- private boolean overlaps(ContentCluster c1, ContentCluster c2) {
- Set<HostResource> c1Hosts = c1.getRootGroup().recursiveGetNodes().stream().map(StorageNode::getHostResource).collect(Collectors.toSet());
- Set<HostResource> c2Hosts = c2.getRootGroup().recursiveGetNodes().stream().map(StorageNode::getHostResource).collect(Collectors.toSet());
- return ! Sets.intersection(c1Hosts, c2Hosts).isEmpty();
- }
-
public static final NodeResources clusterControllerResources = new NodeResources(0.5, 2, 10, 0.3, NodeResources.DiskSpeed.any, NodeResources.StorageType.any);
private ClusterControllerContainerCluster getDedicatedSharedControllers(ModelElement contentElement, Admin admin,
@@ -375,65 +356,19 @@ public class ContentCluster extends AbstractConfigProducer implements
return admin.getClusterControllers();
}
- private List<HostResource> drawControllerHosts(int count, StorageGroup rootGroup) {
- List<HostResource> hosts = drawControllerHosts(count, false, rootGroup);
- List<HostResource> retiredHosts = drawControllerHosts(count, true, rootGroup);
-
- // preserve the cluster state in case all pre-existing controllers are on retired nodes
- List<HostResource> all = new ArrayList<>(hosts);
- all.addAll(retiredHosts);
- return all;
- }
-
- private List<HostResource> drawControllerHosts(int count, boolean retired, StorageGroup rootGroup) {
- List<HostResource> hosts = drawContentHostsRecursively(count, retired, rootGroup);
- if (hosts.size() % 2 == 0 && ! hosts.isEmpty()) // ZK clusters of even sizes are less available (even in the size=2 case)
- hosts = hosts.subList(0, hosts.size()-1);
- return hosts;
- }
-
- /**
- * Draw <code>count</code> nodes from as many different content groups below this as possible.
- * This will only achieve maximum spread in the case where the groups are balanced and never on the same
- * physical node. It will not achieve maximum spread over all levels in a multilevel group hierarchy.
- */
- // Note: This method cannot be changed to draw different nodes without ensuring that it will draw nodes
- // which overlaps with previously drawn nodes as that will prevent rolling upgrade
- private List<HostResource> drawContentHostsRecursively(int count, boolean retired, StorageGroup group) {
- Set<HostResource> hosts = new HashSet<>();
- if (group.getNodes().isEmpty()) {
- int hostsPerSubgroup = (int)Math.ceil((double)count / group.getSubgroups().size());
- for (StorageGroup subgroup : group.getSubgroups())
- hosts.addAll(drawContentHostsRecursively(hostsPerSubgroup, retired, subgroup));
- }
- else {
- hosts.addAll(group.getNodes().stream()
- .filter(node -> node.isRetired() == retired)
- .map(StorageNode::getHostResource).collect(toList()));
- }
-
- List<HostResource> sortedHosts = new ArrayList<>(hosts);
- sortedHosts.sort(HostResource::comparePrimarilyByIndexTo);
- sortedHosts = sortedHosts.subList(0, Math.min(count, hosts.size()));
- return sortedHosts;
- }
-
private ClusterControllerContainerCluster createClusterControllers(AbstractConfigProducer<?> parent,
Collection<HostResource> hosts,
String name,
- boolean multitenant,
+ boolean runStandaloneZooKeeper,
DeployState deployState) {
var clusterControllers = new ClusterControllerContainerCluster(parent, name, name, deployState);
List<ClusterControllerContainer> containers = new ArrayList<>();
- // Add a cluster controller on each config server (there is always at least one).
if (clusterControllers.getContainers().isEmpty()) {
int index = 0;
for (HostResource host : hosts) {
- int ccIndex = deployState.getProperties().dedicatedClusterControllerCluster()
- ? host.spec().membership().map(ClusterMembership::index).orElse(index)
- : index;
+ int ccIndex = host.spec().membership().map(ClusterMembership::index).orElse(index);
boolean retired = host.spec().membership().map(ClusterMembership::retired).orElse(false);
- var clusterControllerContainer = new ClusterControllerContainer(clusterControllers, ccIndex, multitenant, deployState, retired);
+ var clusterControllerContainer = new ClusterControllerContainer(clusterControllers, ccIndex, runStandaloneZooKeeper, deployState, retired);
clusterControllerContainer.setHostResource(host);
clusterControllerContainer.initService(deployState.getDeployLogger());
clusterControllerContainer.setProp("clustertype", "admin");
@@ -511,11 +446,7 @@ public class ContentCluster extends AbstractConfigProducer implements
public PersistenceEngine.PersistenceFactory getPersistence() { return persistenceFactory; }
- /**
- * The list of documentdefinitions declared at the cluster level.
- *
- * @return the set of documenttype names
- */
+ /** Returns a list of th document types declared at the cluster level. */
public Map<String, NewDocumentType> getDocumentDefinitions() { return documentDefinitions; }
public boolean isGloballyDistributed(NewDocumentType docType) {
@@ -753,6 +684,7 @@ public class ContentCluster extends AbstractConfigProducer implements
* a previous generation of it only by restarting the consuming processes.
*/
public void setDeferChangesUntilRestart(boolean deferChangesUntilRestart) {
+ // TODO
}
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/DomTuningDispatchBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/DomTuningDispatchBuilder.java
index d599a1a1aca..64911acae1f 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/DomTuningDispatchBuilder.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/DomTuningDispatchBuilder.java
@@ -29,7 +29,7 @@ public class DomTuningDispatchBuilder {
builder.setMinActiveDocsCoverage(dispatchElement.childAsDouble("min-active-docs-coverage"));
if (dispatchElement.child("use-local-node") != null)
- logger.log(Level.WARNING, "Attribute 'use-local-node' is deprecated and ignored: " +
+ logger.logApplicationPackage(Level.WARNING, "Attribute 'use-local-node' is deprecated and ignored: " +
"The local node will automatically be preferred when appropriate.");
return builder.build();
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/RedundancyBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/RedundancyBuilder.java
index 2e97cdabd73..669d4ff6a1d 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/RedundancyBuilder.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/RedundancyBuilder.java
@@ -5,6 +5,8 @@ import com.yahoo.vespa.model.builder.xml.dom.ModelElement;
import com.yahoo.vespa.model.content.IndexedHierarchicDistributionValidator;
import com.yahoo.vespa.model.content.Redundancy;
+import java.util.Optional;
+
/**
* Builds redundancy config for a content cluster.
*/
@@ -37,7 +39,13 @@ public class RedundancyBuilder {
}
}
}
- public Redundancy build(String clusterName, boolean isHosted, int subGroups, int leafGroups, int totalNodes) {
+ public Redundancy build(String clusterName, boolean isHosted, int subGroups, int leafGroups, int totalNodes,
+ Optional<Integer> maxRedundancy) {
+ if (maxRedundancy.isPresent()) {
+ initialRedundancy = Math.min(initialRedundancy, maxRedundancy.get());
+ finalRedundancy = Math.min(finalRedundancy, maxRedundancy.get());
+ readyCopies = Math.min(readyCopies, maxRedundancy.get());
+ }
if (isHosted) {
return new Redundancy(initialRedundancy, finalRedundancy, readyCopies, leafGroups, totalNodes);
} else {
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/ml/ConvertedModel.java b/config-model/src/main/java/com/yahoo/vespa/model/ml/ConvertedModel.java
index 5ee6ed02e61..9086ca9f40e 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/ml/ConvertedModel.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/ml/ConvertedModel.java
@@ -210,6 +210,9 @@ public class ConvertedModel {
Map<String, ExpressionFunction> expressions = new HashMap<>();
for (ImportedMlFunction outputFunction : model.outputExpressions()) {
ExpressionFunction expression = asExpressionFunction(outputFunction);
+ for (Map.Entry<String, TensorType> input : expression.argumentTypes().entrySet()) {
+ profile.addInputFeature(input.getKey(), input.getValue());
+ }
addExpression(expression, expression.getName(),
constantsReplacedByFunctions,
model, store, profile, queryProfiles,
@@ -238,7 +241,7 @@ public class ConvertedModel {
function.returnType().map(TensorType::fromSpec));
}
catch (ParseException e) {
- throw new IllegalArgumentException("Gor an illegal argument from importing " + function.name(), e);
+ throw new IllegalArgumentException("Got an illegal argument from importing " + function.name(), e);
}
}
@@ -251,24 +254,45 @@ public class ConvertedModel {
QueryProfileRegistry queryProfiles,
Map<String, ExpressionFunction> expressions) {
expression = expression.withBody(replaceConstantsByFunctions(expression.getBody(), constantsReplacedByFunctions));
+ if (expression.returnType().isEmpty()) {
+ TensorType type = expression.getBody().type(profile.typeContext(queryProfiles));
+ if (type != null) {
+ expression = expression.withReturnType(type);
+ }
+ }
store.writeExpression(expressionName, expression);
expressions.put(expressionName, expression);
}
private static Map<String, ExpressionFunction> convertStored(ModelStore store, RankProfile profile) {
- for (Pair<String, Tensor> constant : store.readSmallConstants())
+ for (Pair<String, Tensor> constant : store.readSmallConstants()) {
profile.addConstant(constant.getFirst(), asValue(constant.getSecond()));
+ }
for (RankingConstant constant : store.readLargeConstants()) {
- if ( ! profile.rankingConstants().asMap().containsKey(constant.getName()))
+ if ( ! profile.rankingConstants().asMap().containsKey(constant.getName())) {
profile.rankingConstants().add(constant);
+ }
}
for (Pair<String, RankingExpression> function : store.readFunctions()) {
addGeneratedFunctionToProfile(profile, function.getFirst(), function.getSecond());
}
- return store.readExpressions();
+ Map<String, ExpressionFunction> expressions = new HashMap<>();
+ for (Pair<String, ExpressionFunction> output : store.readExpressions()) {
+ String name = output.getFirst();
+ ExpressionFunction expression = output.getSecond();
+ for (Map.Entry<String, TensorType> input : expression.argumentTypes().entrySet()) {
+ profile.addInputFeature(input.getKey(), input.getValue());
+ }
+ TensorType type = expression.getBody().type(profile.typeContext());
+ if (type != null) {
+ expression = expression.withReturnType(type);
+ }
+ expressions.put(name, expression);
+ }
+ return expressions;
}
private static void transformSmallConstant(ModelStore store, RankProfile profile, String constantName,
@@ -320,7 +344,9 @@ public class ConvertedModel {
"\nwant to add " + expression + "\n");
return;
}
- profile.addFunction(new ExpressionFunction(functionName, expression), false); // TODO: Inline if only used once
+ ExpressionFunction function = new ExpressionFunction(functionName, expression);
+ // XXX should we resolve type here?
+ profile.addFunction(function, false); // TODO: Inline if only used once
}
/**
@@ -463,14 +489,14 @@ public class ConvertedModel {
application.getFile(modelFiles.expressionPath(name)).writeFile(new StringReader(b.toString()));
}
- Map<String, ExpressionFunction> readExpressions() {
- Map<String, ExpressionFunction> expressions = new HashMap<>();
+ List<Pair<String, ExpressionFunction>> readExpressions() {
+ List<Pair<String, ExpressionFunction>> expressions = new ArrayList<>();
ApplicationFile expressionPath = application.getFile(modelFiles.expressionsPath());
- if ( ! expressionPath.exists() || ! expressionPath.isDirectory()) return Collections.emptyMap();
+ if ( ! expressionPath.exists() || ! expressionPath.isDirectory()) return Collections.emptyList();
for (ApplicationFile expressionFile : expressionPath.listFiles()) {
- try (BufferedReader reader = new BufferedReader(expressionFile.createReader())){
+ try (BufferedReader reader = new BufferedReader(expressionFile.createReader())) {
String name = expressionFile.getPath().getName();
- expressions.put(name, readExpression(name, reader));
+ expressions.add(new Pair<>(name, readExpression(name, reader)));
}
catch (IOException e) {
throw new UncheckedIOException("Failed reading " + expressionFile.getPath(), e);
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/search/SearchNode.java b/config-model/src/main/java/com/yahoo/vespa/model/search/SearchNode.java
index 9f129f65281..ab8c36ae99d 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/search/SearchNode.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/search/SearchNode.java
@@ -2,9 +2,9 @@
package com.yahoo.vespa.model.search;
import com.yahoo.cloud.config.filedistribution.FiledistributorrpcConfig;
-import com.yahoo.config.model.api.ModelContext;
import com.yahoo.config.model.deploy.DeployState;
import com.yahoo.config.model.producer.AbstractConfigProducer;
+import com.yahoo.config.provision.NodeResources;
import com.yahoo.metrics.MetricsmanagerConfig;
import com.yahoo.searchlib.TranslogserverConfig;
import com.yahoo.vespa.config.content.LoadTypeConfig;
@@ -213,6 +213,12 @@ public class SearchNode extends AbstractService implements
@Override
public void getConfig(TranslogserverConfig.Builder builder) {
+ Optional<NodeResources> nodeResources = getSpecifiedNodeResources();
+ if (nodeResources.isPresent()) {
+ if (nodeResources.get().storageType() == NodeResources.StorageType.remote) {
+ builder.usefsync(false);
+ }
+ }
tls.getConfig(builder);
}
@@ -272,8 +278,9 @@ public class SearchNode extends AbstractService implements
// to make sure the node failer has done its work
builder.pruneremoveddocumentsage(4 * 24 * 3600 + 3600 + 60);
}
- if (getHostResource() != null && ! getHostResource().realResources().isUnspecified()) {
- var nodeResourcesTuning = new NodeResourcesTuning(getHostResource().realResources(),
+ Optional<NodeResources> nodeResources = getSpecifiedNodeResources();
+ if (nodeResources.isPresent()) {
+ var nodeResourcesTuning = new NodeResourcesTuning(nodeResources.get(),
tuning.map(Tuning::threadsPerSearch).orElse(1),
combined);
nodeResourcesTuning.getConfig(builder);
@@ -283,6 +290,10 @@ public class SearchNode extends AbstractService implements
}
}
+ private Optional<NodeResources> getSpecifiedNodeResources() {
+ return (getHostResource() != null) ? getHostResource().realResources().asOptional() : Optional.empty();
+ }
+
@Override
public HashMap<String, String> getDefaultMetricDimensions() {
HashMap<String, String> dimensions = new HashMap<>();
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/search/TransactionLogServer.java b/config-model/src/main/java/com/yahoo/vespa/model/search/TransactionLogServer.java
index 7c3f9bc1001..ed12a161805 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/search/TransactionLogServer.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/search/TransactionLogServer.java
@@ -25,9 +25,11 @@ public class TransactionLogServer extends AbstractService {
}
}
- public TransactionLogServer(AbstractConfigProducer searchNode, String clusterName) {
+ private final Boolean useFsync;
+ public TransactionLogServer(AbstractConfigProducer searchNode, String clusterName, Boolean useFsync) {
super(searchNode, "transactionlogserver");
portsMeta.on(0).tag("tls");
+ this.useFsync = useFsync;
setProp("clustername", clusterName);
setProp("clustertype", "search");
}
@@ -35,13 +37,15 @@ public class TransactionLogServer extends AbstractService {
public static class Builder extends VespaDomBuilder.DomConfigProducerBuilder<TransactionLogServer> {
private final String clusterName;
- public Builder(String clusterName) {
+ private final Boolean useFsync;
+ public Builder(String clusterName, Boolean useFsync) {
this.clusterName = clusterName;
+ this.useFsync = useFsync;
}
@Override
protected TransactionLogServer doBuild(DeployState deployState, AbstractConfigProducer ancestor, Element producerSpec) {
- return new TransactionLogServer(ancestor, clusterName);
+ return new TransactionLogServer(ancestor, clusterName, useFsync);
}
}
@@ -75,8 +79,11 @@ public class TransactionLogServer extends AbstractService {
}
public void getConfig(TranslogserverConfig.Builder builder) {
- builder.listenport(getTlsPort()).basedir(getTlsDir());
-
+ builder.listenport(getTlsPort())
+ .basedir(getTlsDir());
+ if (useFsync != null) {
+ builder.usefsync(useFsync);
+ }
}
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/utils/FileSender.java b/config-model/src/main/java/com/yahoo/vespa/model/utils/FileSender.java
index 7cc9fa7ae02..5e7ac0cabec 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/utils/FileSender.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/utils/FileSender.java
@@ -88,7 +88,7 @@ public class FileSender implements Serializable {
ConfigDefinition configDefinition = builder.getConfigDefinition();
if (configDefinition == null) {
// TODO: throw new IllegalArgumentException("Not able to find config definition for " + builder);
- logger.log(Level.FINE, "Not able to find config definition for " + key + ". Will not send files for this config");
+ logger.logApplicationPackage(Level.FINE, "Not able to find config definition for " + key + ". Will not send files for this config");
return;
}
// Inspect fields at this level
diff --git a/config-model/src/main/javacc/SDParser.jj b/config-model/src/main/javacc/SDParser.jj
index f8a648006e9..88c45a18e4e 100644
--- a/config-model/src/main/javacc/SDParser.jj
+++ b/config-model/src/main/javacc/SDParser.jj
@@ -270,6 +270,8 @@ TOKEN :
| < NORMALIZING: "normalizing" >
| < HASH: "hash" >
| < BTREE: "btree" >
+| < CASED: "cased" >
+| < UNCASED: "uncased" >
| < BOLDING: "bolding" >
| < BODY: "body" >
| < HEADER: "header" >
@@ -587,7 +589,7 @@ void bodycfg(SDDocumentType document) : { }
*/
void compression(SDDocumentType document, String name) :
{
- deployLogger.log(Level.WARNING, "'compression' for a document is deprecated and ignored");
+ deployLogger.logApplicationPackage(Level.WARNING, "'compression' for a document is deprecated and ignored");
CompressionConfig cfg = new CompressionConfig(CompressionType.LZ4);
}
{
@@ -875,7 +877,7 @@ DataType dataType() :
}
if (isArrayOldStyle) {
- deployLogger.log(Level.WARNING, "Data type syntax '" + typeName + "[]' is deprecated, use 'array<" + typeName + ">' instead.");
+ deployLogger.logApplicationPackage(Level.WARNING, "Data type syntax '" + typeName + "[]' is deprecated, use 'array<" + typeName + ">' instead.");
type = DataType.getArray(type);
}
if ("tag".equalsIgnoreCase(typeName) && type instanceof WeightedSetDataType) ((WeightedSetDataType)type).setTag(true);
@@ -1257,7 +1259,7 @@ Object attributeTensorType(AttributeOperation attribute) :
tensorType = tensorType("For attribute field '" + attribute.getName() + "'")
{
// TODO: Remove on Vespa 8
- deployLogger.log(Level.WARNING, "In field '" + attribute.getName() + "': Specifying tensor type on the attribute is deprecated and has no effect.");
+ deployLogger.logApplicationPackage(Level.WARNING, "In field '" + attribute.getName() + "': Specifying tensor type on the attribute is deprecated and has no effect.");
}
{ return null; }
}
@@ -1530,24 +1532,25 @@ void bolding(FieldOperationContainer field) :
*/
void dictionary(FieldOperationContainer field) :
{
- Dictionary.Type type;
}
{
- <DICTIONARY> <COLON> type = dictionaryType()
+ <DICTIONARY>
+ ( (<COLON> dictionarySetting(field))
+ | (lbrace() (dictionarySetting(field) (<NL>)*)* <RBRACE>))
{
- field.addOperation(new DictionaryOperation(type));
}
}
-Dictionary.Type dictionaryType() :
+void dictionarySetting(FieldOperationContainer field) :
{
Dictionary.Type type;
}
{
- ( <HASH> { type = Dictionary.Type.HASH; }
- | <BTREE> { type = Dictionary.Type.BTREE; } )
+ ( <HASH> { field.addOperation(new DictionaryOperation(DictionaryOperation.Operation.HASH)); }
+ | <BTREE> { field.addOperation(new DictionaryOperation(DictionaryOperation.Operation.BTREE)); }
+ | <CASED> { field.addOperation(new DictionaryOperation(DictionaryOperation.Operation.CASED)); }
+ | <UNCASED> { field.addOperation(new DictionaryOperation(DictionaryOperation.Operation.UNCASED)); })
{
- return type;
}
}
@@ -1560,7 +1563,7 @@ void body(SDField field) : { }
{
<BODY>
{
- deployLogger.log(Level.WARNING, field + ": 'header/body' is deprecated and has no effect.");
+ deployLogger.logApplicationPackage(Level.WARNING, field + ": 'header/body' is deprecated and has no effect.");
}
}
@@ -1573,7 +1576,7 @@ void header(SDField field) : { }
{
<HEADER>
{
- deployLogger.log(Level.WARNING, field + ": 'header/body' is deprecated and has no effect.");
+ deployLogger.logApplicationPackage(Level.WARNING, field + ": 'header/body' is deprecated and has no effect.");
}
}
@@ -1624,19 +1627,21 @@ Object matchItem(FieldOperationContainer field) : { }
Object matchType(FieldOperationContainer container) :
{
- MatchOperation field = new MatchOperation();
+ MatchOperation matchOp = new MatchOperation();
}
{
- ( <MTOKEN> { field.setMatchingType(Matching.Type.TEXT); } // Deprecated synonym to TEXT
- | <TEXT> { field.setMatchingType(Matching.Type.TEXT); }
- | <WORD> { field.setMatchingType(Matching.Type.WORD); }
- | <EXACT> { field.setMatchingType(Matching.Type.EXACT); }
- | <GRAM> { field.setMatchingType(Matching.Type.GRAM); }
- | <PREFIX> { field.setMatchingAlgorithm(Matching.Algorithm.PREFIX); }
- | <SUBSTRING> { field.setMatchingAlgorithm(Matching.Algorithm.SUBSTRING); }
- | <SUFFIX> { field.setMatchingAlgorithm(Matching.Algorithm.SUFFIX); } )
+ ( <MTOKEN> { matchOp.setMatchingType(Matching.Type.TEXT); } // Deprecated synonym to TEXT
+ | <TEXT> { matchOp.setMatchingType(Matching.Type.TEXT); }
+ | <WORD> { matchOp.setMatchingType(Matching.Type.WORD); }
+ | <EXACT> { matchOp.setMatchingType(Matching.Type.EXACT); }
+ | <GRAM> { matchOp.setMatchingType(Matching.Type.GRAM); }
+ | <CASED> { matchOp.setCase(Case.CASED); }
+ | <UNCASED> { matchOp.setCase(Case.UNCASED); }
+ | <PREFIX> { matchOp.setMatchingAlgorithm(Matching.Algorithm.PREFIX); }
+ | <SUBSTRING> { matchOp.setMatchingAlgorithm(Matching.Algorithm.SUBSTRING); }
+ | <SUFFIX> { matchOp.setMatchingAlgorithm(Matching.Algorithm.SUFFIX); } )
{
- container.addOperation(field);
+ container.addOperation(matchOp);
return null;
}
}
@@ -2427,7 +2432,7 @@ void rankDegradationBinSize() :
}
{
<RPBINSIZE> <COLON> freq = consumeFloat()
- { deployLogger.log(Level.WARNING, "Specifying 'doc-frequency' in 'rank-degradation' is deprecated and has no effect."); }
+ { deployLogger.logApplicationPackage(Level.WARNING, "Specifying 'doc-frequency' in 'rank-degradation' is deprecated and has no effect."); }
}
@@ -2440,7 +2445,7 @@ void rankDegradationBinLow() :
}
{
<RPBINLOW> <COLON> n = integer()
- { deployLogger.log(Level.WARNING, "Specifying 'min-fullrank-docs' in 'rank-degradation' is deprecated and has no effect."); }
+ { deployLogger.logApplicationPackage(Level.WARNING, "Specifying 'min-fullrank-docs' in 'rank-degradation' is deprecated and has no effect."); }
}
@@ -2453,7 +2458,7 @@ void rankDegradationPosbinSize() :
}
{
<RPPOSBINSIZE> <COLON> avgOcc = consumeFloat()
- { deployLogger.log(Level.WARNING, "Specifying 'occurrences-per-doc' in 'rank-degradation' is deprecated and has no effect."); }
+ { deployLogger.logApplicationPackage(Level.WARNING, "Specifying 'occurrences-per-doc' in 'rank-degradation' is deprecated and has no effect."); }
}
@@ -2481,7 +2486,7 @@ Object rankDegradation(RankProfile profile) :
}
{
( <RANKDEGRADATIONFREQ> <COLON> freq = consumeFloat()
- { deployLogger.log(Level.WARNING, "Specifying 'rank-degradation-frequency' in 'rank-profile' is deprecated and has no effect."); }
+ { deployLogger.logApplicationPackage(Level.WARNING, "Specifying 'rank-degradation-frequency' in 'rank-profile' is deprecated and has no effect."); }
| <RANKDEGRADATION> lbrace() ( rankDegradationItem() (<NL>)*)+ <RBRACE>
)
{
@@ -2642,6 +2647,7 @@ String identifier() : { }
| <BODY>
| <BOLDING>
| <BTREE>
+ | <CASED>
| <COMPRESSION>
| <COMPRESSIONLEVEL>
| <COMPRESSIONTHRESHOLD>
@@ -2747,6 +2753,7 @@ String identifier() : { }
| <TRUE>
| <TYPE>
| <UCA>
+ | <UNCASED>
| <URI>
| <UPPERBOUND>
| <USEDOCUMENT>
diff --git a/config-model/src/main/perl/vespa-deploy b/config-model/src/main/perl/vespa-deploy
index a128e4a8d4c..d66295d4c36 100755
--- a/config-model/src/main/perl/vespa-deploy
+++ b/config-model/src/main/perl/vespa-deploy
@@ -460,7 +460,7 @@ sub http_prepare {
my $url = $source . $pathPrefix . "/$session_id/prepared";
$url = add_url_property_from_flag($url, $opt_f, "ignoreValidationErrors");
- $url = add_url_property_from_flag($url, $opt_n, "dryrun");
+ $url = add_url_property_from_flag($url, $opt_n, "dryRun");
$url = add_url_property_from_flag($url, $opt_v, "verbose");
$url = add_url_property_from_flag($url, $opt_H, "hostedVespa");
$url = add_url_property_from_option($url, $opt_a, "applicationName");
diff --git a/config-model/src/main/resources/schema/common.rnc b/config-model/src/main/resources/schema/common.rnc
index 878faabfec1..fa9a6c28ce2 100644
--- a/config-model/src/main/resources/schema/common.rnc
+++ b/config-model/src/main/resources/schema/common.rnc
@@ -45,7 +45,7 @@ GenericConfig = element config {
attribute name { text },
attribute namespace { text }?, # TODO: Remove in Vespa 8
attribute version { text }?,
- anyElement +
+ anyElement*
}
ComponentSpec =
diff --git a/config-model/src/main/resources/schema/content.rnc b/config-model/src/main/resources/schema/content.rnc
index a48d38b9f2c..7f52eae6da8 100644
--- a/config-model/src/main/resources/schema/content.rnc
+++ b/config-model/src/main/resources/schema/content.rnc
@@ -167,6 +167,7 @@ Engine = element engine {
Proton = element proton {
element flush-on-shutdown { xsd:string }? &
+ element sync-transactionlog { xsd:string }? &
element visibility-delay { xsd:double { minInclusive = "0.0" } }? &
element query-timeout { xsd:double { minInclusive = "0.0" } }? &
element searchable-copies { xsd:integer { minInclusive = "0" } }? &
diff --git a/config-model/src/test/cfg/application/invalid-services-syntax/services.xml b/config-model/src/test/cfg/application/invalid-services-syntax/services.xml
new file mode 100644
index 00000000000..77b6fc0e33a
--- /dev/null
+++ b/config-model/src/test/cfg/application/invalid-services-syntax/services.xml
@@ -0,0 +1,11 @@
+<?xml version='1.0' encoding='utf-8' ?>
+<services>
+ <config name='standard'>
+ <basicStruct>
+ <stringVal>default</stringVal>
+ </basicStruct>
+ </confih>
+ <admin version='2.0'>
+ <adminserver hostalias='node1'>
+ </admin>
+</services>
diff --git a/config-model/src/test/derived/advanced/attributes.cfg b/config-model/src/test/derived/advanced/attributes.cfg
index bc95cf86071..63bd7d980a6 100644
--- a/config-model/src/test/derived/advanced/attributes.cfg
+++ b/config-model/src/test/derived/advanced/attributes.cfg
@@ -2,7 +2,8 @@ attribute[].name "location_zcurve"
attribute[].datatype INT64
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch true
diff --git a/config-model/src/test/derived/array_of_struct_attribute/attributes.cfg b/config-model/src/test/derived/array_of_struct_attribute/attributes.cfg
index 18bf8f12b32..eff0f7bd7a1 100644
--- a/config-model/src/test/derived/array_of_struct_attribute/attributes.cfg
+++ b/config-model/src/test/derived/array_of_struct_attribute/attributes.cfg
@@ -2,7 +2,8 @@ attribute[].name "elem_array.name"
attribute[].datatype STRING
attribute[].collectiontype ARRAY
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch true
@@ -31,7 +32,8 @@ attribute[].name "elem_array.weight"
attribute[].datatype INT32
attribute[].collectiontype ARRAY
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
diff --git a/config-model/src/test/derived/array_of_struct_attribute/index-info.cfg b/config-model/src/test/derived/array_of_struct_attribute/index-info.cfg
index de7b744a95b..28026b8a683 100644
--- a/config-model/src/test/derived/array_of_struct_attribute/index-info.cfg
+++ b/config-model/src/test/derived/array_of_struct_attribute/index-info.cfg
@@ -6,6 +6,8 @@ indexinfo[].command[].command "word"
indexinfo[].command[].indexname "elem_array.name"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "elem_array.name"
+indexinfo[].command[].command "lowercase"
+indexinfo[].command[].indexname "elem_array.name"
indexinfo[].command[].command "attribute"
indexinfo[].command[].indexname "elem_array.name"
indexinfo[].command[].command "fast-search"
diff --git a/config-model/src/test/derived/attributeprefetch/attributes.cfg b/config-model/src/test/derived/attributeprefetch/attributes.cfg
index acdd50db696..43f8b813139 100644
--- a/config-model/src/test/derived/attributeprefetch/attributes.cfg
+++ b/config-model/src/test/derived/attributeprefetch/attributes.cfg
@@ -2,7 +2,8 @@ attribute[].name "singlebyte"
attribute[].datatype INT8
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -31,7 +32,8 @@ attribute[].name "multibyte"
attribute[].datatype INT8
attribute[].collectiontype ARRAY
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -60,7 +62,8 @@ attribute[].name "wsbyte"
attribute[].datatype INT8
attribute[].collectiontype WEIGHTEDSET
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -89,7 +92,8 @@ attribute[].name "singleint"
attribute[].datatype INT32
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -118,7 +122,8 @@ attribute[].name "multiint"
attribute[].datatype INT32
attribute[].collectiontype ARRAY
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -147,7 +152,8 @@ attribute[].name "wsint"
attribute[].datatype INT32
attribute[].collectiontype WEIGHTEDSET
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -176,7 +182,8 @@ attribute[].name "singlelong"
attribute[].datatype INT64
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -205,7 +212,8 @@ attribute[].name "multilong"
attribute[].datatype INT64
attribute[].collectiontype ARRAY
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -234,7 +242,8 @@ attribute[].name "wslong"
attribute[].datatype INT64
attribute[].collectiontype WEIGHTEDSET
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -263,7 +272,8 @@ attribute[].name "singlefloat"
attribute[].datatype FLOAT
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -292,7 +302,8 @@ attribute[].name "multifloat"
attribute[].datatype FLOAT
attribute[].collectiontype ARRAY
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -321,7 +332,8 @@ attribute[].name "wsfloat"
attribute[].datatype FLOAT
attribute[].collectiontype WEIGHTEDSET
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -350,7 +362,8 @@ attribute[].name "singledouble"
attribute[].datatype DOUBLE
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -379,7 +392,8 @@ attribute[].name "multidouble"
attribute[].datatype DOUBLE
attribute[].collectiontype ARRAY
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -408,7 +422,8 @@ attribute[].name "wsdouble"
attribute[].datatype DOUBLE
attribute[].collectiontype WEIGHTEDSET
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -437,7 +452,8 @@ attribute[].name "singlestring"
attribute[].datatype STRING
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -466,7 +482,8 @@ attribute[].name "multistring"
attribute[].datatype STRING
attribute[].collectiontype ARRAY
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -495,7 +512,8 @@ attribute[].name "wsstring"
attribute[].datatype STRING
attribute[].collectiontype WEIGHTEDSET
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
diff --git a/config-model/src/test/derived/attributeprefetch/index-info.cfg b/config-model/src/test/derived/attributeprefetch/index-info.cfg
index dfce12f9b9f..e1b20b33e17 100644
--- a/config-model/src/test/derived/attributeprefetch/index-info.cfg
+++ b/config-model/src/test/derived/attributeprefetch/index-info.cfg
@@ -146,12 +146,16 @@ indexinfo[].command[].command "type WeightedSet<double>"
indexinfo[].command[].indexname "singlestring"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "singlestring"
+indexinfo[].command[].command "lowercase"
+indexinfo[].command[].indexname "singlestring"
indexinfo[].command[].command "attribute"
indexinfo[].command[].indexname "singlestring"
indexinfo[].command[].command "type string"
indexinfo[].command[].indexname "multistring"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "multistring"
+indexinfo[].command[].command "lowercase"
+indexinfo[].command[].indexname "multistring"
indexinfo[].command[].command "multivalue"
indexinfo[].command[].indexname "multistring"
indexinfo[].command[].command "attribute"
@@ -160,6 +164,8 @@ indexinfo[].command[].command "type Array<string>"
indexinfo[].command[].indexname "wsstring"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "wsstring"
+indexinfo[].command[].command "lowercase"
+indexinfo[].command[].indexname "wsstring"
indexinfo[].command[].command "multivalue"
indexinfo[].command[].indexname "wsstring"
indexinfo[].command[].command "attribute"
diff --git a/config-model/src/test/derived/attributes/attributes.cfg b/config-model/src/test/derived/attributes/attributes.cfg
index c5a6ff372ad..82d1f7c99dc 100644
--- a/config-model/src/test/derived/attributes/attributes.cfg
+++ b/config-model/src/test/derived/attributes/attributes.cfg
@@ -2,7 +2,8 @@ attribute[].name "a1"
attribute[].datatype STRING
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -31,7 +32,8 @@ attribute[].name "a2"
attribute[].datatype STRING
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -60,7 +62,8 @@ attribute[].name "a3"
attribute[].datatype STRING
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -89,7 +92,8 @@ attribute[].name "a5"
attribute[].datatype STRING
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -118,7 +122,8 @@ attribute[].name "a6"
attribute[].datatype STRING
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -147,7 +152,8 @@ attribute[].name "b1"
attribute[].datatype STRING
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -176,7 +182,8 @@ attribute[].name "b2"
attribute[].datatype STRING
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -205,7 +212,8 @@ attribute[].name "b3"
attribute[].datatype STRING
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -234,7 +242,8 @@ attribute[].name "b4"
attribute[].datatype INT32
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -263,7 +272,8 @@ attribute[].name "b5"
attribute[].datatype INT32
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -292,7 +302,8 @@ attribute[].name "b6"
attribute[].datatype INT64
attribute[].collectiontype ARRAY
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -321,7 +332,8 @@ attribute[].name "b7"
attribute[].datatype DOUBLE
attribute[].collectiontype WEIGHTEDSET
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -350,7 +362,8 @@ attribute[].name "a9"
attribute[].datatype INT32
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -379,7 +392,8 @@ attribute[].name "a10"
attribute[].datatype INT32
attribute[].collectiontype ARRAY
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch true
@@ -408,7 +422,8 @@ attribute[].name "a11"
attribute[].datatype INT32
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -437,7 +452,8 @@ attribute[].name "a12"
attribute[].datatype INT32
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -466,7 +482,8 @@ attribute[].name "a7_arr"
attribute[].datatype STRING
attribute[].collectiontype ARRAY
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -495,7 +512,8 @@ attribute[].name "a8_arr"
attribute[].datatype STRING
attribute[].collectiontype ARRAY
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
diff --git a/config-model/src/test/derived/attributes/index-info.cfg b/config-model/src/test/derived/attributes/index-info.cfg
index 77a52fa47ba..aa400c7de0a 100644
--- a/config-model/src/test/derived/attributes/index-info.cfg
+++ b/config-model/src/test/derived/attributes/index-info.cfg
@@ -6,18 +6,24 @@ indexinfo[].command[].command "word"
indexinfo[].command[].indexname "a1"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "a1"
+indexinfo[].command[].command "lowercase"
+indexinfo[].command[].indexname "a1"
indexinfo[].command[].command "attribute"
indexinfo[].command[].indexname "a1"
indexinfo[].command[].command "type string"
indexinfo[].command[].indexname "a2"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "a2"
+indexinfo[].command[].command "lowercase"
+indexinfo[].command[].indexname "a2"
indexinfo[].command[].command "attribute"
indexinfo[].command[].indexname "a2"
indexinfo[].command[].command "type string"
indexinfo[].command[].indexname "a3"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "a3"
+indexinfo[].command[].command "lowercase"
+indexinfo[].command[].indexname "a3"
indexinfo[].command[].command "attribute"
indexinfo[].command[].indexname "a3"
indexinfo[].command[].command "type string"
@@ -60,6 +66,8 @@ indexinfo[].command[].command "type string"
indexinfo[].command[].indexname "b1"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "b1"
+indexinfo[].command[].command "lowercase"
+indexinfo[].command[].indexname "b1"
indexinfo[].command[].command "attribute"
indexinfo[].command[].indexname "b1"
indexinfo[].command[].command "type string"
@@ -164,6 +172,8 @@ indexinfo[].command[].command "type int"
indexinfo[].command[].indexname "a7_arr"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "a7_arr"
+indexinfo[].command[].command "lowercase"
+indexinfo[].command[].indexname "a7_arr"
indexinfo[].command[].command "multivalue"
indexinfo[].command[].indexname "a7_arr"
indexinfo[].command[].command "attribute"
@@ -174,6 +184,8 @@ indexinfo[].command[].command "word"
indexinfo[].command[].indexname "a8_arr"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "a8_arr"
+indexinfo[].command[].command "lowercase"
+indexinfo[].command[].indexname "a8_arr"
indexinfo[].command[].command "multivalue"
indexinfo[].command[].indexname "a8_arr"
indexinfo[].command[].command "attribute"
diff --git a/config-model/src/test/derived/combinedattributeandindexsearch/index-info.cfg b/config-model/src/test/derived/combinedattributeandindexsearch/index-info.cfg
index e7bf410250f..a43c8a0b992 100644
--- a/config-model/src/test/derived/combinedattributeandindexsearch/index-info.cfg
+++ b/config-model/src/test/derived/combinedattributeandindexsearch/index-info.cfg
@@ -30,12 +30,16 @@ indexinfo[].command[].command "type string"
indexinfo[].command[].indexname "attribute1"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "attribute1"
+indexinfo[].command[].command "lowercase"
+indexinfo[].command[].indexname "attribute1"
indexinfo[].command[].command "attribute"
indexinfo[].command[].indexname "attribute1"
indexinfo[].command[].command "type string"
indexinfo[].command[].indexname "attribute2"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "attribute2"
+indexinfo[].command[].command "lowercase"
+indexinfo[].command[].indexname "attribute2"
indexinfo[].command[].command "attribute"
indexinfo[].command[].indexname "attribute2"
indexinfo[].command[].command "type string"
diff --git a/config-model/src/test/derived/complex/attributes.cfg b/config-model/src/test/derived/complex/attributes.cfg
index 6ca16458334..3c0f10722de 100644
--- a/config-model/src/test/derived/complex/attributes.cfg
+++ b/config-model/src/test/derived/complex/attributes.cfg
@@ -2,7 +2,8 @@ attribute[].name "prefixenabled"
attribute[].datatype STRING
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -31,7 +32,8 @@ attribute[].name "fleeting"
attribute[].datatype FLOAT
attribute[].collectiontype ARRAY
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -60,7 +62,8 @@ attribute[].name "fleeting2"
attribute[].datatype FLOAT
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -89,7 +92,8 @@ attribute[].name "foundat"
attribute[].datatype INT64
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -118,7 +122,8 @@ attribute[].name "collapseby"
attribute[].datatype INT32
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -147,7 +152,8 @@ attribute[].name "ts"
attribute[].datatype INT64
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -176,7 +182,8 @@ attribute[].name "combineda"
attribute[].datatype INT32
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -205,7 +212,8 @@ attribute[].name "year_arr"
attribute[].datatype INT32
attribute[].collectiontype ARRAY
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -234,7 +242,8 @@ attribute[].name "year_sub"
attribute[].datatype INT32
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
diff --git a/config-model/src/test/derived/exactmatch/index-info.cfg b/config-model/src/test/derived/exactmatch/index-info.cfg
index cdf38849a24..f5d928a8fd0 100644
--- a/config-model/src/test/derived/exactmatch/index-info.cfg
+++ b/config-model/src/test/derived/exactmatch/index-info.cfg
@@ -22,6 +22,8 @@ indexinfo[].command[].command "exact *!!!*"
indexinfo[].command[].indexname "string_map.key"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "string_map.key"
+indexinfo[].command[].command "lowercase"
+indexinfo[].command[].indexname "string_map.key"
indexinfo[].command[].command "attribute"
indexinfo[].command[].indexname "string_map.key"
indexinfo[].command[].command "type string"
@@ -44,6 +46,8 @@ indexinfo[].command[].command "type string"
indexinfo[].command[].indexname "elem_map.value.name"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "elem_map.value.name"
+indexinfo[].command[].command "lowercase"
+indexinfo[].command[].indexname "elem_map.value.name"
indexinfo[].command[].command "attribute"
indexinfo[].command[].indexname "elem_map.value.name"
indexinfo[].command[].command "type string"
@@ -68,6 +72,8 @@ indexinfo[].command[].command "type Map<string,elem>"
indexinfo[].command[].indexname "elem_array.name"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "elem_array.name"
+indexinfo[].command[].command "lowercase"
+indexinfo[].command[].indexname "elem_array.name"
indexinfo[].command[].command "attribute"
indexinfo[].command[].indexname "elem_array.name"
indexinfo[].command[].command "type string"
diff --git a/config-model/src/test/derived/hnsw_index/attributes.cfg b/config-model/src/test/derived/hnsw_index/attributes.cfg
index ec8eaab7dd4..f7f040bf182 100644
--- a/config-model/src/test/derived/hnsw_index/attributes.cfg
+++ b/config-model/src/test/derived/hnsw_index/attributes.cfg
@@ -2,7 +2,8 @@ attribute[].name "t1"
attribute[].datatype TENSOR
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -31,7 +32,8 @@ attribute[].name "t2"
attribute[].datatype TENSOR
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
diff --git a/config-model/src/test/derived/imported_fields_inherited_reference/attributes.cfg b/config-model/src/test/derived/imported_fields_inherited_reference/attributes.cfg
index 340ff11d0b2..26019842732 100644
--- a/config-model/src/test/derived/imported_fields_inherited_reference/attributes.cfg
+++ b/config-model/src/test/derived/imported_fields_inherited_reference/attributes.cfg
@@ -2,7 +2,8 @@ attribute[].name "ref_from_a"
attribute[].datatype REFERENCE
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -31,7 +32,8 @@ attribute[].name "ref_from_b"
attribute[].datatype REFERENCE
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -60,7 +62,8 @@ attribute[].name "from_a_int_field"
attribute[].datatype INT32
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -89,7 +92,8 @@ attribute[].name "from_b_int_field"
attribute[].datatype INT32
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
diff --git a/config-model/src/test/derived/imported_position_field/attributes.cfg b/config-model/src/test/derived/imported_position_field/attributes.cfg
index f8d3a37c00b..70b6ed14c55 100644
--- a/config-model/src/test/derived/imported_position_field/attributes.cfg
+++ b/config-model/src/test/derived/imported_position_field/attributes.cfg
@@ -2,7 +2,8 @@ attribute[].name "parent_ref"
attribute[].datatype REFERENCE
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -31,7 +32,8 @@ attribute[].name "my_pos_zcurve"
attribute[].datatype INT64
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch true
diff --git a/config-model/src/test/derived/imported_struct_fields/attributes.cfg b/config-model/src/test/derived/imported_struct_fields/attributes.cfg
index cfeff008f0d..e06d0f1a220 100644
--- a/config-model/src/test/derived/imported_struct_fields/attributes.cfg
+++ b/config-model/src/test/derived/imported_struct_fields/attributes.cfg
@@ -2,7 +2,8 @@ attribute[].name "parent_ref"
attribute[].datatype REFERENCE
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -31,7 +32,8 @@ attribute[].name "my_elem_array.name"
attribute[].datatype STRING
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch true
@@ -60,7 +62,8 @@ attribute[].name "my_elem_array.weight"
attribute[].datatype INT32
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -89,7 +92,8 @@ attribute[].name "my_elem_map.key"
attribute[].datatype STRING
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch true
@@ -118,7 +122,8 @@ attribute[].name "my_elem_map.value.name"
attribute[].datatype STRING
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch true
@@ -147,7 +152,8 @@ attribute[].name "my_elem_map.value.weight"
attribute[].datatype INT32
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -176,7 +182,8 @@ attribute[].name "my_str_int_map.key"
attribute[].datatype STRING
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch true
@@ -205,7 +212,8 @@ attribute[].name "my_str_int_map.value"
attribute[].datatype INT32
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
diff --git a/config-model/src/test/derived/imported_struct_fields/index-info.cfg b/config-model/src/test/derived/imported_struct_fields/index-info.cfg
index a4cef79f861..71ab3502f59 100644
--- a/config-model/src/test/derived/imported_struct_fields/index-info.cfg
+++ b/config-model/src/test/derived/imported_struct_fields/index-info.cfg
@@ -18,6 +18,8 @@ indexinfo[].command[].command "type string"
indexinfo[].command[].indexname "my_elem_array.name"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "my_elem_array.name"
+indexinfo[].command[].command "lowercase"
+indexinfo[].command[].indexname "my_elem_array.name"
indexinfo[].command[].command "attribute"
indexinfo[].command[].indexname "my_elem_array.name"
indexinfo[].command[].command "fast-search"
@@ -42,6 +44,8 @@ indexinfo[].command[].command "type Array<elem>"
indexinfo[].command[].indexname "my_elem_map.value.name"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "my_elem_map.value.name"
+indexinfo[].command[].command "lowercase"
+indexinfo[].command[].indexname "my_elem_map.value.name"
indexinfo[].command[].command "attribute"
indexinfo[].command[].indexname "my_elem_map.value.name"
indexinfo[].command[].command "fast-search"
@@ -64,6 +68,8 @@ indexinfo[].command[].command "type elem"
indexinfo[].command[].indexname "my_elem_map.key"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "my_elem_map.key"
+indexinfo[].command[].command "lowercase"
+indexinfo[].command[].indexname "my_elem_map.key"
indexinfo[].command[].command "attribute"
indexinfo[].command[].indexname "my_elem_map.key"
indexinfo[].command[].command "fast-search"
@@ -80,6 +86,8 @@ indexinfo[].command[].command "type Map<string,elem>"
indexinfo[].command[].indexname "my_str_int_map.key"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "my_str_int_map.key"
+indexinfo[].command[].command "lowercase"
+indexinfo[].command[].indexname "my_str_int_map.key"
indexinfo[].command[].command "attribute"
indexinfo[].command[].indexname "my_str_int_map.key"
indexinfo[].command[].command "fast-search"
diff --git a/config-model/src/test/derived/importedfields/attributes.cfg b/config-model/src/test/derived/importedfields/attributes.cfg
index e86581ecae5..57dd2408869 100644
--- a/config-model/src/test/derived/importedfields/attributes.cfg
+++ b/config-model/src/test/derived/importedfields/attributes.cfg
@@ -2,7 +2,8 @@ attribute[].name "a_ref"
attribute[].datatype REFERENCE
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -31,7 +32,8 @@ attribute[].name "b_ref"
attribute[].datatype REFERENCE
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -60,7 +62,8 @@ attribute[].name "b_ref_with_summary"
attribute[].datatype REFERENCE
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -89,7 +92,8 @@ attribute[].name "my_int_field"
attribute[].datatype INT32
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -118,7 +122,8 @@ attribute[].name "my_string_field"
attribute[].datatype STRING
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -147,7 +152,8 @@ attribute[].name "my_int_array_field"
attribute[].datatype INT32
attribute[].collectiontype ARRAY
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -176,7 +182,8 @@ attribute[].name "my_int_wset_field"
attribute[].datatype INT32
attribute[].collectiontype WEIGHTEDSET
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -205,7 +212,8 @@ attribute[].name "my_ancient_int_field"
attribute[].datatype INT32
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
diff --git a/config-model/src/test/derived/importedfields/index-info.cfg b/config-model/src/test/derived/importedfields/index-info.cfg
index ec44e4f1b11..af67f710ced 100644
--- a/config-model/src/test/derived/importedfields/index-info.cfg
+++ b/config-model/src/test/derived/importedfields/index-info.cfg
@@ -38,6 +38,8 @@ indexinfo[].command[].command "type int"
indexinfo[].command[].indexname "my_string_field"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "my_string_field"
+indexinfo[].command[].command "lowercase"
+indexinfo[].command[].indexname "my_string_field"
indexinfo[].command[].command "attribute"
indexinfo[].command[].indexname "my_string_field"
indexinfo[].command[].command "type string"
@@ -72,6 +74,8 @@ indexinfo[].command[].command "numerical"
indexinfo[].command[].indexname "my_ancient_int_field"
indexinfo[].command[].command "type int"
indexinfo[].command[].indexname "myfieldset"
+indexinfo[].command[].command "lowercase"
+indexinfo[].command[].indexname "myfieldset"
indexinfo[].command[].command "attribute"
indexinfo[].command[].indexname "myfieldset"
indexinfo[].command[].command "index"
diff --git a/config-model/src/test/derived/indexinfo_lowercase/index-info.cfg b/config-model/src/test/derived/indexinfo_lowercase/index-info.cfg
index ac640c09e8c..dc62fc1c101 100644
--- a/config-model/src/test/derived/indexinfo_lowercase/index-info.cfg
+++ b/config-model/src/test/derived/indexinfo_lowercase/index-info.cfg
@@ -18,6 +18,8 @@ indexinfo[].command[].command "type string"
indexinfo[].command[].indexname "nc_attribute"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "nc_attribute"
+indexinfo[].command[].command "lowercase"
+indexinfo[].command[].indexname "nc_attribute"
indexinfo[].command[].command "attribute"
indexinfo[].command[].indexname "nc_attribute"
indexinfo[].command[].command "type string"
@@ -130,6 +132,8 @@ indexinfo[].command[].command "stem:BEST"
indexinfo[].command[].indexname "lc_set7"
indexinfo[].command[].command "normalize"
indexinfo[].command[].indexname "nc_set1"
+indexinfo[].command[].command "lowercase"
+indexinfo[].command[].indexname "nc_set1"
indexinfo[].command[].command "attribute"
indexinfo[].command[].indexname "nc_set1"
indexinfo[].command[].command "index"
@@ -160,6 +164,8 @@ indexinfo[].command[].command "stem:BEST"
indexinfo[].command[].indexname "nc_set4"
indexinfo[].command[].command "normalize"
indexinfo[].command[].indexname "nc_set5"
+indexinfo[].command[].command "lowercase"
+indexinfo[].command[].indexname "nc_set5"
indexinfo[].command[].command "attribute"
indexinfo[].command[].indexname "nc_set5"
indexinfo[].command[].command "index"
diff --git a/config-model/src/test/derived/indexschema/index-info.cfg b/config-model/src/test/derived/indexschema/index-info.cfg
index 8a28038a18d..fd58423b868 100644
--- a/config-model/src/test/derived/indexschema/index-info.cfg
+++ b/config-model/src/test/derived/indexschema/index-info.cfg
@@ -76,6 +76,8 @@ indexinfo[].command[].command "type position"
indexinfo[].command[].indexname "se"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "se"
+indexinfo[].command[].command "lowercase"
+indexinfo[].command[].indexname "se"
indexinfo[].command[].command "attribute"
indexinfo[].command[].indexname "se"
indexinfo[].command[].command "type string"
@@ -112,11 +114,11 @@ indexinfo[].command[].command "type WeightedSet<string>"
indexinfo[].command[].indexname "sh"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "sh"
-indexinfo[].command[].command "lowercase"
-indexinfo[].command[].indexname "sh"
indexinfo[].command[].command "plain-tokens"
indexinfo[].command[].indexname "sh"
indexinfo[].command[].command "fullurl"
+indexinfo[].command[].indexname "sh"
+indexinfo[].command[].command "lowercase"
indexinfo[].command[].indexname "sh.sh"
indexinfo[].command[].command "fullurl"
indexinfo[].command[].indexname "sh.sh"
@@ -368,6 +370,8 @@ indexinfo[].command[].command "stem:BEST"
indexinfo[].command[].indexname "fs4"
indexinfo[].command[].command "normalize"
indexinfo[].command[].indexname "onlyattrib"
+indexinfo[].command[].command "lowercase"
+indexinfo[].command[].indexname "onlyattrib"
indexinfo[].command[].command "attribute"
indexinfo[].command[].indexname "onlyattrib"
indexinfo[].command[].command "index"
diff --git a/config-model/src/test/derived/inheritance/attributes.cfg b/config-model/src/test/derived/inheritance/attributes.cfg
index f20462ef647..9f5588b0000 100644
--- a/config-model/src/test/derived/inheritance/attributes.cfg
+++ b/config-model/src/test/derived/inheritance/attributes.cfg
@@ -2,7 +2,8 @@ attribute[].name "onlygrandparent"
attribute[].datatype INT32
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -31,7 +32,8 @@ attribute[].name "overridden"
attribute[].datatype INT32
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -60,7 +62,8 @@ attribute[].name "onlymother"
attribute[].datatype STRING
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
diff --git a/config-model/src/test/derived/inheritfromparent/attributes.cfg b/config-model/src/test/derived/inheritfromparent/attributes.cfg
index c6cda880b84..0a1cf0b9f0e 100644
--- a/config-model/src/test/derived/inheritfromparent/attributes.cfg
+++ b/config-model/src/test/derived/inheritfromparent/attributes.cfg
@@ -2,7 +2,8 @@ attribute[].name "weight"
attribute[].datatype FLOAT
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
diff --git a/config-model/src/test/derived/map_attribute/attributes.cfg b/config-model/src/test/derived/map_attribute/attributes.cfg
index 2416cd3b128..ebcca4fde78 100644
--- a/config-model/src/test/derived/map_attribute/attributes.cfg
+++ b/config-model/src/test/derived/map_attribute/attributes.cfg
@@ -2,7 +2,8 @@ attribute[].name "str_map.key"
attribute[].datatype STRING
attribute[].collectiontype ARRAY
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch true
@@ -31,7 +32,8 @@ attribute[].name "str_map.value"
attribute[].datatype STRING
attribute[].collectiontype ARRAY
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -60,7 +62,8 @@ attribute[].name "int_map.key"
attribute[].datatype INT32
attribute[].collectiontype ARRAY
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
diff --git a/config-model/src/test/derived/map_attribute/index-info.cfg b/config-model/src/test/derived/map_attribute/index-info.cfg
index 51ab1f07a43..07e0482f436 100644
--- a/config-model/src/test/derived/map_attribute/index-info.cfg
+++ b/config-model/src/test/derived/map_attribute/index-info.cfg
@@ -6,6 +6,8 @@ indexinfo[].command[].command "word"
indexinfo[].command[].indexname "str_map.key"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "str_map.key"
+indexinfo[].command[].command "lowercase"
+indexinfo[].command[].indexname "str_map.key"
indexinfo[].command[].command "attribute"
indexinfo[].command[].indexname "str_map.key"
indexinfo[].command[].command "fast-search"
@@ -16,6 +18,8 @@ indexinfo[].command[].command "word"
indexinfo[].command[].indexname "str_map.value"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "str_map.value"
+indexinfo[].command[].command "lowercase"
+indexinfo[].command[].indexname "str_map.value"
indexinfo[].command[].command "attribute"
indexinfo[].command[].indexname "str_map.value"
indexinfo[].command[].command "type string"
diff --git a/config-model/src/test/derived/map_of_struct_attribute/attributes.cfg b/config-model/src/test/derived/map_of_struct_attribute/attributes.cfg
index 45f1b6c036e..bc7a17ebd62 100644
--- a/config-model/src/test/derived/map_of_struct_attribute/attributes.cfg
+++ b/config-model/src/test/derived/map_of_struct_attribute/attributes.cfg
@@ -2,7 +2,8 @@ attribute[].name "str_elem_map.key"
attribute[].datatype STRING
attribute[].collectiontype ARRAY
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch true
@@ -31,7 +32,8 @@ attribute[].name "str_elem_map.value.name"
attribute[].datatype STRING
attribute[].collectiontype ARRAY
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -60,7 +62,8 @@ attribute[].name "str_elem_map.value.weight"
attribute[].datatype INT32
attribute[].collectiontype ARRAY
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -89,7 +92,8 @@ attribute[].name "int_elem_map.key"
attribute[].datatype INT32
attribute[].collectiontype ARRAY
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -118,7 +122,8 @@ attribute[].name "int_elem_map.value.name"
attribute[].datatype STRING
attribute[].collectiontype ARRAY
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch true
diff --git a/config-model/src/test/derived/map_of_struct_attribute/index-info.cfg b/config-model/src/test/derived/map_of_struct_attribute/index-info.cfg
index 606012c17e2..cee154645da 100644
--- a/config-model/src/test/derived/map_of_struct_attribute/index-info.cfg
+++ b/config-model/src/test/derived/map_of_struct_attribute/index-info.cfg
@@ -6,6 +6,8 @@ indexinfo[].command[].command "word"
indexinfo[].command[].indexname "str_elem_map.key"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "str_elem_map.key"
+indexinfo[].command[].command "lowercase"
+indexinfo[].command[].indexname "str_elem_map.key"
indexinfo[].command[].command "attribute"
indexinfo[].command[].indexname "str_elem_map.key"
indexinfo[].command[].command "fast-search"
@@ -16,6 +18,8 @@ indexinfo[].command[].command "word"
indexinfo[].command[].indexname "str_elem_map.value.name"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "str_elem_map.value.name"
+indexinfo[].command[].command "lowercase"
+indexinfo[].command[].indexname "str_elem_map.value.name"
indexinfo[].command[].command "attribute"
indexinfo[].command[].indexname "str_elem_map.value.name"
indexinfo[].command[].command "type string"
@@ -50,6 +54,8 @@ indexinfo[].command[].command "type int"
indexinfo[].command[].indexname "int_elem_map.value.name"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "int_elem_map.value.name"
+indexinfo[].command[].command "lowercase"
+indexinfo[].command[].indexname "int_elem_map.value.name"
indexinfo[].command[].command "attribute"
indexinfo[].command[].indexname "int_elem_map.value.name"
indexinfo[].command[].command "fast-search"
diff --git a/config-model/src/test/derived/music/attributes.cfg b/config-model/src/test/derived/music/attributes.cfg
index fbfd3b15281..bbe95047caa 100644
--- a/config-model/src/test/derived/music/attributes.cfg
+++ b/config-model/src/test/derived/music/attributes.cfg
@@ -2,7 +2,8 @@ attribute[].name "sales"
attribute[].datatype INT32
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -31,7 +32,8 @@ attribute[].name "pto"
attribute[].datatype INT32
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -60,7 +62,8 @@ attribute[].name "mid"
attribute[].datatype INT32
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -89,7 +92,8 @@ attribute[].name "weight"
attribute[].datatype FLOAT
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -118,7 +122,8 @@ attribute[].name "bgnpfrom"
attribute[].datatype FLOAT
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -147,7 +152,8 @@ attribute[].name "newestedition"
attribute[].datatype INT32
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -176,7 +182,8 @@ attribute[].name "year"
attribute[].datatype INT32
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -205,7 +212,8 @@ attribute[].name "did"
attribute[].datatype INT32
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -234,7 +242,8 @@ attribute[].name "cbid"
attribute[].datatype INT32
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -263,7 +272,8 @@ attribute[].name "hiphopvalue_arr"
attribute[].datatype STRING
attribute[].collectiontype ARRAY
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -292,7 +302,8 @@ attribute[].name "metalvalue_arr"
attribute[].datatype STRING
attribute[].collectiontype ARRAY
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
diff --git a/config-model/src/test/derived/music/index-info.cfg b/config-model/src/test/derived/music/index-info.cfg
index 4d44bc7acbe..306aa157623 100644
--- a/config-model/src/test/derived/music/index-info.cfg
+++ b/config-model/src/test/derived/music/index-info.cfg
@@ -268,6 +268,8 @@ indexinfo[].command[].command "type string"
indexinfo[].command[].indexname "hiphopvalue_arr"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "hiphopvalue_arr"
+indexinfo[].command[].command "lowercase"
+indexinfo[].command[].indexname "hiphopvalue_arr"
indexinfo[].command[].command "multivalue"
indexinfo[].command[].indexname "hiphopvalue_arr"
indexinfo[].command[].command "attribute"
@@ -278,6 +280,8 @@ indexinfo[].command[].command "word"
indexinfo[].command[].indexname "metalvalue_arr"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "metalvalue_arr"
+indexinfo[].command[].command "lowercase"
+indexinfo[].command[].indexname "metalvalue_arr"
indexinfo[].command[].command "multivalue"
indexinfo[].command[].indexname "metalvalue_arr"
indexinfo[].command[].command "attribute"
diff --git a/config-model/src/test/derived/newrank/attributes.cfg b/config-model/src/test/derived/newrank/attributes.cfg
index 84c7f6cd099..a690b075f22 100644
--- a/config-model/src/test/derived/newrank/attributes.cfg
+++ b/config-model/src/test/derived/newrank/attributes.cfg
@@ -2,7 +2,8 @@ attribute[].name "sales"
attribute[].datatype INT32
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -31,7 +32,8 @@ attribute[].name "pto"
attribute[].datatype INT32
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -60,7 +62,8 @@ attribute[].name "mid"
attribute[].datatype INT32
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -89,7 +92,8 @@ attribute[].name "weight"
attribute[].datatype FLOAT
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -118,7 +122,8 @@ attribute[].name "bgnpfrom"
attribute[].datatype FLOAT
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -147,7 +152,8 @@ attribute[].name "newestedition"
attribute[].datatype INT32
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -176,7 +182,8 @@ attribute[].name "year"
attribute[].datatype INT32
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -205,7 +212,8 @@ attribute[].name "did"
attribute[].datatype INT32
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -234,7 +242,8 @@ attribute[].name "scorekey"
attribute[].datatype INT32
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -263,7 +272,8 @@ attribute[].name "cbid"
attribute[].datatype INT32
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
diff --git a/config-model/src/test/derived/predicate_attribute/attributes.cfg b/config-model/src/test/derived/predicate_attribute/attributes.cfg
index f0e600b8875..1d6e0c9de32 100644
--- a/config-model/src/test/derived/predicate_attribute/attributes.cfg
+++ b/config-model/src/test/derived/predicate_attribute/attributes.cfg
@@ -2,7 +2,8 @@ attribute[].name "some_predicate_field"
attribute[].datatype PREDICATE
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
diff --git a/config-model/src/test/derived/prefixexactattribute/attributes.cfg b/config-model/src/test/derived/prefixexactattribute/attributes.cfg
index 4b39c7caa2c..bb6f61a6f04 100644
--- a/config-model/src/test/derived/prefixexactattribute/attributes.cfg
+++ b/config-model/src/test/derived/prefixexactattribute/attributes.cfg
@@ -2,7 +2,8 @@ attribute[].name "attributefield1"
attribute[].datatype STRING
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -31,7 +32,8 @@ attribute[].name "attributefield2"
attribute[].datatype STRING
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
diff --git a/config-model/src/test/derived/prefixexactattribute/index-info.cfg b/config-model/src/test/derived/prefixexactattribute/index-info.cfg
index 941c5b598cf..0203919140f 100644
--- a/config-model/src/test/derived/prefixexactattribute/index-info.cfg
+++ b/config-model/src/test/derived/prefixexactattribute/index-info.cfg
@@ -18,6 +18,8 @@ indexinfo[].command[].command "type string"
indexinfo[].command[].indexname "attributefield1"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "attributefield1"
+indexinfo[].command[].command "lowercase"
+indexinfo[].command[].indexname "attributefield1"
indexinfo[].command[].command "attribute"
indexinfo[].command[].indexname "attributefield1"
indexinfo[].command[].command "type string"
@@ -26,6 +28,8 @@ indexinfo[].command[].command "exact @"
indexinfo[].command[].indexname "attributefield2"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "attributefield2"
+indexinfo[].command[].command "lowercase"
+indexinfo[].command[].indexname "attributefield2"
indexinfo[].command[].command "attribute"
indexinfo[].command[].indexname "attributefield2"
indexinfo[].command[].command "type string"
diff --git a/config-model/src/test/derived/reference_fields/attributes.cfg b/config-model/src/test/derived/reference_fields/attributes.cfg
index 9f5b80dbe4c..cc10e657011 100644
--- a/config-model/src/test/derived/reference_fields/attributes.cfg
+++ b/config-model/src/test/derived/reference_fields/attributes.cfg
@@ -2,7 +2,8 @@ attribute[].name "campaign_ref"
attribute[].datatype REFERENCE
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -31,7 +32,8 @@ attribute[].name "other_ref"
attribute[].datatype REFERENCE
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -60,7 +62,8 @@ attribute[].name "yet_another_ref"
attribute[].datatype REFERENCE
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
diff --git a/config-model/src/test/derived/sorting/attributes.cfg b/config-model/src/test/derived/sorting/attributes.cfg
index 69c3510df97..5ee9d323041 100644
--- a/config-model/src/test/derived/sorting/attributes.cfg
+++ b/config-model/src/test/derived/sorting/attributes.cfg
@@ -2,7 +2,8 @@ attribute[].name "syntaxcheck"
attribute[].datatype STRING
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -31,7 +32,8 @@ attribute[].name "syntaxcheck2"
attribute[].datatype STRING
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -60,7 +62,8 @@ attribute[].name "infieldonly"
attribute[].datatype STRING
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
diff --git a/config-model/src/test/derived/tensor/attributes.cfg b/config-model/src/test/derived/tensor/attributes.cfg
index 29eff247aed..79f4d2a8a69 100644
--- a/config-model/src/test/derived/tensor/attributes.cfg
+++ b/config-model/src/test/derived/tensor/attributes.cfg
@@ -2,7 +2,8 @@ attribute[].name "f2"
attribute[].datatype TENSOR
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -31,7 +32,8 @@ attribute[].name "f3"
attribute[].datatype TENSOR
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -60,7 +62,8 @@ attribute[].name "f4"
attribute[].datatype TENSOR
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -89,7 +92,8 @@ attribute[].name "f5"
attribute[].datatype TENSOR
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -118,7 +122,8 @@ attribute[].name "f6"
attribute[].datatype FLOAT
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
diff --git a/config-model/src/test/derived/types/attributes.cfg b/config-model/src/test/derived/types/attributes.cfg
index 082752caa43..175dd083f60 100644
--- a/config-model/src/test/derived/types/attributes.cfg
+++ b/config-model/src/test/derived/types/attributes.cfg
@@ -2,7 +2,8 @@ attribute[].name "abyte"
attribute[].datatype INT8
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -31,7 +32,8 @@ attribute[].name "along"
attribute[].datatype INT64
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -60,7 +62,8 @@ attribute[].name "abool"
attribute[].datatype BOOL
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -89,7 +92,8 @@ attribute[].name "ashortfloat"
attribute[].datatype FLOAT16
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -118,7 +122,8 @@ attribute[].name "arrayfield"
attribute[].datatype INT32
attribute[].collectiontype ARRAY
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -147,7 +152,8 @@ attribute[].name "setfield"
attribute[].datatype STRING
attribute[].collectiontype WEIGHTEDSET
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -176,7 +182,8 @@ attribute[].name "setfield2"
attribute[].datatype STRING
attribute[].collectiontype WEIGHTEDSET
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero true
attribute[].createifnonexistent true
attribute[].fastsearch false
@@ -205,7 +212,8 @@ attribute[].name "setfield3"
attribute[].datatype STRING
attribute[].collectiontype WEIGHTEDSET
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero true
attribute[].createifnonexistent false
attribute[].fastsearch false
@@ -234,7 +242,8 @@ attribute[].name "setfield4"
attribute[].datatype STRING
attribute[].collectiontype WEIGHTEDSET
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent true
attribute[].fastsearch false
@@ -263,7 +272,8 @@ attribute[].name "tagfield"
attribute[].datatype STRING
attribute[].collectiontype WEIGHTEDSET
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero true
attribute[].createifnonexistent true
attribute[].fastsearch false
@@ -292,7 +302,8 @@ attribute[].name "juletre"
attribute[].datatype INT64
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch true
@@ -321,7 +332,8 @@ attribute[].name "album1"
attribute[].datatype STRING
attribute[].collectiontype WEIGHTEDSET
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero true
attribute[].createifnonexistent true
attribute[].fastsearch false
@@ -350,7 +362,8 @@ attribute[].name "other"
attribute[].datatype INT64
attribute[].collectiontype SINGLE
attribute[].dictionary.type BTREE
-attribute[].dictionary.match CASE_INSENSITIVE
+attribute[].dictionary.match UNCASED
+attribute[].match UNCASED
attribute[].removeifzero false
attribute[].createifnonexistent false
attribute[].fastsearch false
diff --git a/config-model/src/test/derived/types/index-info.cfg b/config-model/src/test/derived/types/index-info.cfg
index 3bcf43060fc..2db4ead180b 100644
--- a/config-model/src/test/derived/types/index-info.cfg
+++ b/config-model/src/test/derived/types/index-info.cfg
@@ -48,6 +48,8 @@ indexinfo[].command[].command "type Array<int>"
indexinfo[].command[].indexname "setfield"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "setfield"
+indexinfo[].command[].command "lowercase"
+indexinfo[].command[].indexname "setfield"
indexinfo[].command[].command "multivalue"
indexinfo[].command[].indexname "setfield"
indexinfo[].command[].command "attribute"
@@ -56,6 +58,8 @@ indexinfo[].command[].command "type WeightedSet<string>"
indexinfo[].command[].indexname "setfield2"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "setfield2"
+indexinfo[].command[].command "lowercase"
+indexinfo[].command[].indexname "setfield2"
indexinfo[].command[].command "multivalue"
indexinfo[].command[].indexname "setfield2"
indexinfo[].command[].command "attribute"
@@ -66,6 +70,8 @@ indexinfo[].command[].command "word"
indexinfo[].command[].indexname "setfield3"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "setfield3"
+indexinfo[].command[].command "lowercase"
+indexinfo[].command[].indexname "setfield3"
indexinfo[].command[].command "multivalue"
indexinfo[].command[].indexname "setfield3"
indexinfo[].command[].command "attribute"
@@ -74,6 +80,8 @@ indexinfo[].command[].command "type WeightedSet<string>"
indexinfo[].command[].indexname "setfield4"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "setfield4"
+indexinfo[].command[].command "lowercase"
+indexinfo[].command[].indexname "setfield4"
indexinfo[].command[].command "multivalue"
indexinfo[].command[].indexname "setfield4"
indexinfo[].command[].command "attribute"
@@ -82,6 +90,8 @@ indexinfo[].command[].command "type WeightedSet<string>"
indexinfo[].command[].indexname "tagfield"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "tagfield"
+indexinfo[].command[].command "lowercase"
+indexinfo[].command[].indexname "tagfield"
indexinfo[].command[].command "multivalue"
indexinfo[].command[].indexname "tagfield"
indexinfo[].command[].command "attribute"
@@ -672,6 +682,8 @@ indexinfo[].command[].command "type WeightedSet<string>"
indexinfo[].command[].indexname "album1"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "album1"
+indexinfo[].command[].command "lowercase"
+indexinfo[].command[].indexname "album1"
indexinfo[].command[].command "multivalue"
indexinfo[].command[].indexname "album1"
indexinfo[].command[].command "attribute"
diff --git a/config-model/src/test/java/com/yahoo/config/model/ApplicationDeployTest.java b/config-model/src/test/java/com/yahoo/config/model/ApplicationDeployTest.java
index 9780e9b503a..75cb41be13f 100644
--- a/config-model/src/test/java/com/yahoo/config/model/ApplicationDeployTest.java
+++ b/config-model/src/test/java/com/yahoo/config/model/ApplicationDeployTest.java
@@ -1,4 +1,4 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.config.model;
import com.google.common.io.Files;
@@ -197,13 +197,21 @@ public class ApplicationDeployTest {
@Test
public void testThatAppWithInvalidParallelDeploymentFails() throws IOException {
+ String expectedMessage = "4: <staging/>\n" +
+ "5: <prod global-service-id=\"query\">\n" +
+ "6: <parallel>\n" +
+ "7: <instance id=\"hello\" />\n" +
+ "8: </parallel>\n" +
+ "9: </prod>\n" +
+ "10:</deployment>\n";
File tmpDir = tmpFolder.getRoot();
IOUtils.copyDirectory(new File(TESTDIR, "invalid_parallel_deployment_xml"), tmpDir);
try {
ApplicationPackageTester.create(tmpDir.getAbsolutePath());
fail("Expected exception");
} catch (IllegalArgumentException e) {
- assertEquals("Invalid XML according to XML schema, error in deployment.xml: element \"instance\" not allowed here; expected the element end-tag or element \"delay\", \"region\", \"steps\" or \"test\" [7:30], input:\n", e.getMessage());
+ assertEquals("Invalid XML according to XML schema, error in deployment.xml: element \"instance\" not allowed here; expected the element end-tag or element \"delay\", \"region\", \"steps\" or \"test\" [7:30], input:\n" + expectedMessage,
+ e.getMessage());
}
}
diff --git a/config-model/src/test/java/com/yahoo/config/model/application/provider/SchemaValidatorTest.java b/config-model/src/test/java/com/yahoo/config/model/application/provider/SchemaValidatorTest.java
index c2938746443..3849e9e03fd 100644
--- a/config-model/src/test/java/com/yahoo/config/model/application/provider/SchemaValidatorTest.java
+++ b/config-model/src/test/java/com/yahoo/config/model/application/provider/SchemaValidatorTest.java
@@ -2,12 +2,14 @@
package com.yahoo.config.model.application.provider;
import com.yahoo.component.Version;
+import com.yahoo.io.IOUtils;
import com.yahoo.vespa.config.VespaVersion;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import org.xml.sax.InputSource;
+import java.io.File;
import java.io.IOException;
import java.io.StringReader;
@@ -47,15 +49,15 @@ public class SchemaValidatorTest {
@Test
public void testXMLParse() throws IOException {
SchemaValidator validator = createValidator();
- validator.validate(new InputSource(new StringReader(okServices)), "services.xml");
+ validator.validate(new StringReader(okServices));
}
@Test
public void testXMLParseError() throws IOException {
SchemaValidator validator = createValidator();
expectedException.expect(RuntimeException.class);
- expectedException.expectMessage(expectedErrorMessage("services.xml"));
- validator.validate(new InputSource(new StringReader(invalidServices)), "services.xml");
+ expectedException.expectMessage(expectedErrorMessage("input"));
+ validator.validate(new StringReader(invalidServices));
}
@Test
@@ -72,6 +74,14 @@ public class SchemaValidatorTest {
validator.validate(new StringReader(invalidServices));
}
+ @Test
+ public void testXMLParseErrorFromFile() throws IOException {
+ SchemaValidator validator = createValidator();
+ expectedException.expect(IllegalArgumentException.class);
+ expectedException.expectMessage(expectedErrorMessage("services.xml"));
+ validator.validate(new File("src/test/cfg/application/invalid-services-syntax/services.xml"));
+ }
+
private SchemaValidator createValidator() {
return new SchemaValidators(new Version(VespaVersion.major)).servicesXmlValidator();
}
diff --git a/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java b/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java
index 5a06379a1c2..86668fe3098 100644
--- a/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java
+++ b/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java
@@ -264,8 +264,7 @@ public class ModelProvisioningTest {
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
- tester.dedicatedClusterControllerCluster(false);
- tester.addHosts(2);
+ tester.addHosts(5);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size());
assertEquals("Nodes in container1", 2, model.getContainerClusters().get("container1").getContainers().size());
@@ -330,8 +329,7 @@ public class ModelProvisioningTest {
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
- tester.dedicatedClusterControllerCluster(false);
- tester.addHosts(2);
+ tester.addHosts(5);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size());
@@ -583,9 +581,8 @@ public class ModelProvisioningTest {
" </content>" +
"</services>";
- int numberOfHosts = 64;
+ int numberOfHosts = 67;
VespaModelTester tester = new VespaModelTester();
- tester.dedicatedClusterControllerCluster(false);
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
@@ -604,23 +601,17 @@ public class ModelProvisioningTest {
assertTrue("Slobroks are assigned from container nodes", containerHosts.containsAll(slobrokHosts));
assertTrue("Logserver is assigned from container nodes", containerHosts.contains(admin.getLogserver().getHost()));
assertEquals("No in-cluster config servers in a hosted environment", 0, admin.getConfigservers().size());
- assertEquals("No admin cluster controller when multitenant", null, admin.getClusterControllers());
+ assertEquals(3, admin.getClusterControllers().getContainers().size());
// Check content clusters
ContentCluster cluster = model.getContentClusters().get("bar");
- ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers();
- assertEquals(3, clusterControllers.getContainers().size());
- assertEquals("bar-controllers", clusterControllers.getName());
- assertEquals("node-1-3-10-54", clusterControllers.getContainers().get(0).getHostName());
- assertEquals("node-1-3-10-51", clusterControllers.getContainers().get(1).getHostName());
- assertEquals("node-1-3-10-48", clusterControllers.getContainers().get(2).getHostName());
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(9, cluster.getRootGroup().getSubgroups().size());
assertEquals("0", cluster.getRootGroup().getSubgroups().get(0).getIndex());
assertEquals(3, cluster.getRootGroup().getSubgroups().get(0).getNodes().size());
assertEquals(0, cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey());
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("bar/storage/0"));
- assertEquals("node-1-3-10-54", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName());
+ assertEquals("node-1-3-10-57", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getDistributionKey(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getConfigId(), is("bar/storage/1"));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(2).getDistributionKey(), is(2));
@@ -629,13 +620,13 @@ public class ModelProvisioningTest {
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(3));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(3));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("bar/storage/3"));
- assertEquals("node-1-3-10-51", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName());
+ assertEquals("node-1-3-10-54", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getDistributionKey(), is(4));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getConfigId(), is("bar/storage/4"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(2).getDistributionKey(), is(5));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(2).getConfigId(), is("bar/storage/5"));
// ...
- assertEquals("node-1-3-10-48", cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getHostName());
+ assertEquals("node-1-3-10-51", cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getHostName());
// ...
assertThat(cluster.getRootGroup().getSubgroups().get(8).getIndex(), is("8"));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().size(), is(3));
@@ -647,12 +638,6 @@ public class ModelProvisioningTest {
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(2).getConfigId(), is("bar/storage/26"));
cluster = model.getContentClusters().get("baz");
- clusterControllers = cluster.getClusterControllers();
- assertEquals(3, clusterControllers.getContainers().size());
- assertEquals("baz-controllers", clusterControllers.getName());
- assertEquals("node-1-3-10-27", clusterControllers.getContainers().get(0).getHostName());
- assertEquals("node-1-3-10-26", clusterControllers.getContainers().get(1).getHostName());
- assertEquals("node-1-3-10-25", clusterControllers.getContainers().get(2).getHostName());
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(27, cluster.getRootGroup().getSubgroups().size());
assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0"));
@@ -732,41 +717,6 @@ public class ModelProvisioningTest {
}
@Test
- public void testClusterControllersWithGroupSize2() {
- String services =
- "<?xml version='1.0' encoding='utf-8' ?>\n" +
- "<services>" +
- " <admin version='4.0'/>" +
- " <container version='1.0' id='foo'>" +
- " <nodes count='10'/>" +
- " </container>" +
- " <content version='1.0' id='bar'>" +
- " <redundancy>2</redundancy>" +
- " <documents>" +
- " <document type='type1' mode='index'/>" +
- " </documents>" +
- " <nodes count='8' groups='4'/>" +
- " </content>" +
- "</services>";
-
- int numberOfHosts = 18;
- VespaModelTester tester = new VespaModelTester();
- tester.dedicatedClusterControllerCluster(false);
- tester.addHosts(numberOfHosts);
- VespaModel model = tester.createModel(services, true);
- assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts));
-
- // Check content clusters
- ContentCluster cluster = model.getContentClusters().get("bar");
- ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers();
- assertEquals("We get the closest odd number", 3, clusterControllers.getContainers().size());
- assertEquals("bar-controllers", clusterControllers.getName());
- assertEquals("node-1-3-10-08", clusterControllers.getContainers().get(0).getHostName());
- assertEquals("node-1-3-10-06", clusterControllers.getContainers().get(1).getHostName());
- assertEquals("node-1-3-10-04", clusterControllers.getContainers().get(2).getHostName());
- }
-
- @Test
public void testSlobroksClustersAreExpandedToIncludeRetiredNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
@@ -777,7 +727,7 @@ public class ModelProvisioningTest {
" </container>" +
"</services>";
- int numberOfHosts = 10;
+ int numberOfHosts = 11;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true, "node-1-3-10-09");
@@ -785,9 +735,9 @@ public class ModelProvisioningTest {
// Check slobroks clusters
assertEquals("Includes retired node", 1+3, model.getAdmin().getSlobroks().size());
- assertEquals("node-1-3-10-10", model.getAdmin().getSlobroks().get(0).getHostName());
- assertEquals("node-1-3-10-08", model.getAdmin().getSlobroks().get(1).getHostName());
- assertEquals("node-1-3-10-07", model.getAdmin().getSlobroks().get(2).getHostName());
+ assertEquals("node-1-3-10-11", model.getAdmin().getSlobroks().get(0).getHostName());
+ assertEquals("node-1-3-10-10", model.getAdmin().getSlobroks().get(1).getHostName());
+ assertEquals("node-1-3-10-08", model.getAdmin().getSlobroks().get(2).getHostName());
assertEquals("Included in addition because it is retired", "node-1-3-10-09", model.getAdmin().getSlobroks().get(3).getHostName());
}
@@ -802,19 +752,19 @@ public class ModelProvisioningTest {
" </container>" +
"</services>";
- int numberOfHosts = 10;
+ int numberOfHosts = 12;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
- VespaModel model = tester.createModel(services, true, "node-1-3-10-01", "node-1-3-10-02");
- assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
+ VespaModel model = tester.createModel(services, true, "node-1-3-10-03", "node-1-3-10-04");
+ assertEquals(10+2, model.getRoot().hostSystem().getHosts().size());
// Check slobroks clusters
assertEquals("Includes retired node", 3+2, model.getAdmin().getSlobroks().size());
- assertEquals("node-1-3-10-10", model.getAdmin().getSlobroks().get(0).getHostName());
- assertEquals("node-1-3-10-09", model.getAdmin().getSlobroks().get(1).getHostName());
- assertEquals("node-1-3-10-08", model.getAdmin().getSlobroks().get(2).getHostName());
- assertEquals("Included in addition because it is retired", "node-1-3-10-02", model.getAdmin().getSlobroks().get(3).getHostName());
- assertEquals("Included in addition because it is retired", "node-1-3-10-01", model.getAdmin().getSlobroks().get(4).getHostName());
+ assertEquals("node-1-3-10-12", model.getAdmin().getSlobroks().get(0).getHostName());
+ assertEquals("node-1-3-10-11", model.getAdmin().getSlobroks().get(1).getHostName());
+ assertEquals("node-1-3-10-10", model.getAdmin().getSlobroks().get(2).getHostName());
+ assertEquals("Included in addition because it is retired", "node-1-3-10-04", model.getAdmin().getSlobroks().get(3).getHostName());
+ assertEquals("Included in addition because it is retired", "node-1-3-10-03", model.getAdmin().getSlobroks().get(4).getHostName());
}
@Test
@@ -831,48 +781,22 @@ public class ModelProvisioningTest {
" </container>" +
"</services>";
- int numberOfHosts = 13;
+ int numberOfHosts = 16;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
- VespaModel model = tester.createModel(services, true, "node-1-3-10-12", "node-1-3-10-03", "node-1-3-10-02");
+ VespaModel model = tester.createModel(services, true, "node-1-3-10-15", "node-1-3-10-05", "node-1-3-10-04");
assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts));
// Check slobroks clusters
// ... from cluster default
- assertEquals("Includes retired node", 3+3, model.getAdmin().getSlobroks().size());
- assertEquals("node-1-3-10-13", model.getAdmin().getSlobroks().get(0).getHostName());
- assertEquals("node-1-3-10-11", model.getAdmin().getSlobroks().get(1).getHostName());
- assertEquals("Included in addition because it is retired", "node-1-3-10-12", model.getAdmin().getSlobroks().get(2).getHostName());
+ assertEquals("Includes retired node", 7, model.getAdmin().getSlobroks().size());
+ assertEquals("node-1-3-10-16", model.getAdmin().getSlobroks().get(0).getHostName());
+ assertEquals("node-1-3-10-14", model.getAdmin().getSlobroks().get(1).getHostName());
+ assertEquals("Included in addition because it is retired", "node-1-3-10-15", model.getAdmin().getSlobroks().get(2).getHostName());
// ... from cluster bar
- assertEquals("node-1-3-10-01", model.getAdmin().getSlobroks().get(3).getHostName());
- assertEquals("Included in addition because it is retired", "node-1-3-10-03", model.getAdmin().getSlobroks().get(4).getHostName());
- assertEquals("Included in addition because it is retired", "node-1-3-10-02", model.getAdmin().getSlobroks().get(5).getHostName());
- }
-
- @Test
- public void test2ContentNodesProduces1ClusterController() {
- String services =
- "<?xml version='1.0' encoding='utf-8' ?>\n" +
- "<services>" +
- " <content version='1.0' id='bar'>" +
- " <redundancy>2</redundancy>" +
- " <documents>" +
- " <document type='type1' mode='index'/>" +
- " </documents>" +
- " <nodes count='2'/>" +
- " </content>" +
- "</services>";
-
- int numberOfHosts = 2;
- VespaModelTester tester = new VespaModelTester();
- tester.dedicatedClusterControllerCluster(false);
- tester.addHosts(numberOfHosts);
- VespaModel model = tester.createModel(services, true);
- assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts));
-
- ContentCluster cluster = model.getContentClusters().get("bar");
- ContainerCluster clusterControllers = cluster.getClusterControllers();
- assertEquals(1, clusterControllers.getContainers().size());
+ assertEquals("node-1-3-10-03", model.getAdmin().getSlobroks().get(3).getHostName());
+ assertEquals("Included in addition because it is retired", "node-1-3-10-05", model.getAdmin().getSlobroks().get(5).getHostName());
+ assertEquals("Included in addition because it is retired", "node-1-3-10-04", model.getAdmin().getSlobroks().get(6).getHostName());
}
@Test
@@ -967,7 +891,7 @@ public class ModelProvisioningTest {
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
- assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts));
+ assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
ContentCluster cluster = model.getContentClusters().get("bar");
assertEquals(2*3, cluster.redundancy().effectiveInitialRedundancy()); // Reduced from 3*3
@@ -997,6 +921,72 @@ public class ModelProvisioningTest {
}
@Test
+ public void testRedundancyWithGroupsTooHighRedundancyAndOneRetiredNode() {
+ String services =
+ "<?xml version='1.0' encoding='utf-8' ?>" +
+ "<services>" +
+ " <content version='1.0' id='bar'>" +
+ " <redundancy>2</redundancy>" + // Should have been illegal since we only have 1 node per group
+ " <documents>" +
+ " <document type='type1' mode='index'/>" +
+ " </documents>" +
+ " <nodes count='2' groups='2'/>" +
+ " </content>" +
+ "</services>";
+
+ int numberOfHosts = 3;
+ VespaModelTester tester = new VespaModelTester();
+ tester.addHosts(numberOfHosts);
+ VespaModel model = tester.createModel(services, false, "node-1-3-10-03");
+ assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
+
+ ContentCluster cluster = model.getContentClusters().get("bar");
+ assertEquals(2, cluster.redundancy().effectiveInitialRedundancy());
+ assertEquals(2, cluster.redundancy().effectiveFinalRedundancy());
+ assertEquals(2, cluster.redundancy().effectiveReadyCopies());
+ assertEquals("1|*", cluster.getRootGroup().getPartitions().get());
+ assertEquals(0, cluster.getRootGroup().getNodes().size());
+ assertEquals(2, cluster.getRootGroup().getSubgroups().size());
+ System.out.println("Nodes in group 0: ");
+ cluster.getRootGroup().getSubgroups().get(0).getNodes().forEach(n -> System.out.println(" " + n));
+ System.out.println("Nodes in group 1: ");
+ cluster.getRootGroup().getSubgroups().get(1).getNodes().forEach(n -> System.out.println(" " + n));
+ }
+
+ @Test
+ public void testRedundancyWithGroupsAndThreeRetiredNodes() {
+ String services =
+ "<?xml version='1.0' encoding='utf-8' ?>" +
+ "<services>" +
+ " <content version='1.0' id='bar'>" +
+ " <redundancy>1</redundancy>" +
+ " <documents>" +
+ " <document type='type1' mode='index'/>" +
+ " </documents>" +
+ " <nodes count='2' groups='2'/>" +
+ " </content>" +
+ "</services>";
+
+ int numberOfHosts = 5;
+ VespaModelTester tester = new VespaModelTester();
+ tester.addHosts(numberOfHosts);
+ VespaModel model = tester.createModel(services, false, "node-1-3-10-05", "node-1-3-10-04", "node-1-3-10-03");
+ assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
+
+ ContentCluster cluster = model.getContentClusters().get("bar");
+ assertEquals(2, cluster.redundancy().effectiveInitialRedundancy());
+ assertEquals(2, cluster.redundancy().effectiveFinalRedundancy());
+ assertEquals(2, cluster.redundancy().effectiveReadyCopies());
+ assertEquals("1|*", cluster.getRootGroup().getPartitions().get());
+ assertEquals(0, cluster.getRootGroup().getNodes().size());
+ assertEquals(2, cluster.getRootGroup().getSubgroups().size());
+ System.out.println("Nodes in group 0: ");
+ cluster.getRootGroup().getSubgroups().get(0).getNodes().forEach(n -> System.out.println(" " + n));
+ System.out.println("Nodes in group 1: ");
+ cluster.getRootGroup().getSubgroups().get(1).getNodes().forEach(n -> System.out.println(" " + n));
+ }
+
+ @Test
public void testUsingNodesCountAttributesAndGettingTooFewNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
@@ -1065,16 +1055,11 @@ public class ModelProvisioningTest {
int numberOfHosts = 1; // We only have 1 content node -> 1 groups with redundancy 1
VespaModelTester tester = new VespaModelTester();
- tester.dedicatedClusterControllerCluster(false);
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
- ClusterControllerContainerCluster clusterControllers = cluster.getClusterControllers();
- assertEquals(1, clusterControllers.getContainers().size());
- assertEquals("bar-controllers", clusterControllers.getName());
- assertEquals("node-1-3-10-01", clusterControllers.getContainers().get(0).getHostName());
assertEquals(1, cluster.redundancy().effectiveInitialRedundancy()); // Reduced from 3*3
assertEquals(1, cluster.redundancy().effectiveFinalRedundancy()); // Reduced from 3*4
assertEquals(1, cluster.redundancy().effectiveReadyCopies()); // Reduced from 3*3
@@ -1229,14 +1214,14 @@ public class ModelProvisioningTest {
" </content>" +
"</services>";
- int totalHosts = 18;
+ int totalHosts = 21;
VespaModelTester tester = new VespaModelTester();
- tester.dedicatedClusterControllerCluster(false);
tester.addHosts(new NodeResources(0.1, 0.2, 300, 0.3, NodeResources.DiskSpeed.slow), 1);// Logserver
tester.addHosts(new NodeResources(0.1, 0.3, 1, 0.5), 2); // Slobrok
tester.addHosts(new NodeResources(12, 10, 30, 0.3), 4); // Container
tester.addHosts(new NodeResources(8, 200, 1000000, 0.3), 5); // Content-foo
tester.addHosts(new NodeResources(10, 64, 200, 0.3), 6); // Content-bar
+ tester.addHosts(new NodeResources(0.5, 2, 10, 0.3), 6); // Cluster-controller
VespaModel model = tester.createModel(services, true, 0);
assertEquals(totalHosts, model.getRoot().hostSystem().getHosts().size());
}
@@ -1263,11 +1248,11 @@ public class ModelProvisioningTest {
int totalHosts = 10;
VespaModelTester tester = new VespaModelTester();
- tester.dedicatedClusterControllerCluster(false);
- tester.addHosts(new NodeResources(11.5, 10, 30, 0.3), 6);
- tester.addHosts(new NodeResources(85, 200, 1000_000_000, 0.3), 20);
+ tester.addHosts(new NodeResources(11.5, 10, 30, 0.3), 6);
+ tester.addHosts(new NodeResources(85, 200, 1000_000_000, 0.3), 20);
+ tester.addHosts(new NodeResources( 0.5, 2, 10, 0.3), 3);
VespaModel model = tester.createModel(services, true);
- assertEquals(totalHosts, model.getRoot().hostSystem().getHosts().size());
+ assertEquals(totalHosts + 3, model.getRoot().hostSystem().getHosts().size());
}
@Test
@@ -1290,11 +1275,11 @@ public class ModelProvisioningTest {
" </content>" +
"</services>";
- int totalHosts = 26;
+ int totalHosts = 29;
VespaModelTester tester = new VespaModelTester();
- tester.dedicatedClusterControllerCluster(false);
- tester.addHosts(new NodeResources(13.5, 100, 1000, 0.3), 6);
- tester.addHosts(new NodeResources(85, 200, 1000_000_000, 0.3), 20);
+ tester.addHosts(new NodeResources(13.5, 100, 1000, 0.3), 6);
+ tester.addHosts(new NodeResources(85, 200, 1000_000_000, 0.3), 20);
+ tester.addHosts(new NodeResources( 0.5, 2, 10, 0.3), 3);
VespaModel model = tester.createModel(services, true, true);
assertEquals(totalHosts, model.getRoot().hostSystem().getHosts().size());
}
@@ -1515,10 +1500,9 @@ public class ModelProvisioningTest {
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
- tester.dedicatedClusterControllerCluster(false);
- tester.addHosts(3);
+ tester.addHosts(6);
VespaModel model = tester.createModel(services, true);
- assertEquals(3, model.getRoot().hostSystem().getHosts().size());
+ assertEquals(6, model.getRoot().hostSystem().getHosts().size());
assertEquals(2, model.getAdmin().getSlobroks().size());
assertEquals(2, model.getContainerClusters().get("foo").getContainers().size());
assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes());
@@ -1633,7 +1617,7 @@ public class ModelProvisioningTest {
assertEquals(1, model.getRoot().hostSystem().getHosts().size());
ContentCluster content = model.getContentClusters().get("storage");
assertEquals(2, content.getRootGroup().getNodes().size());
- ContainerCluster controller = content.getClusterControllers();
+ ContainerCluster<?> controller = model.getAdmin().getClusterControllers();
assertEquals(1, controller.getContainers().size());
}
@@ -1679,7 +1663,7 @@ public class ModelProvisioningTest {
assertThat(model.getRoot().hostSystem().getHosts().size(), is(1));
ContentCluster content = model.getContentClusters().get("storage");
assertEquals(1, content.getRootGroup().getNodes().size());
- ContainerCluster controller = content.getClusterControllers();
+ ContainerCluster<?> controller = model.getAdmin().getClusterControllers();
assertEquals(1, controller.getContainers().size());
}
@@ -1788,10 +1772,10 @@ public class ModelProvisioningTest {
" </services>";
VespaModel model = createNonProvisionedMultitenantModel(services);
- assertThat(model.getRoot().hostSystem().getHosts().size(), is(1));
+ assertEquals(1, model.getRoot().hostSystem().getHosts().size());
ContentCluster content = model.getContentClusters().get("storage");
assertEquals(2, content.getRootGroup().getNodes().size());
- ContainerCluster controller = content.getClusterControllers();
+ ContainerCluster<?> controller = model.getAdmin().getClusterControllers();
assertEquals(1, controller.getContainers().size());
}
@@ -1816,8 +1800,7 @@ public class ModelProvisioningTest {
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
- tester.dedicatedClusterControllerCluster(false);
- tester.addHosts(6);
+ tester.addHosts(9);
VespaModel model = tester.createModel(servicesXml, true);
Map<String, Boolean> tests = Map.of("qrs", false,
@@ -1841,12 +1824,12 @@ public class ModelProvisioningTest {
"<services>" +
" <container version='1.0' id='zk'>" +
" <zookeeper/>" +
- " <nodes count='4'/>" + // (3 + 1 retired)
+ " <nodes count='3'/>" +
" </container>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(4);
- VespaModel model = tester.createModel(servicesXml, true, "node-1-3-10-01");
+ VespaModel model = tester.createModel(servicesXml, true, "node-1-3-10-04");
ApplicationContainerCluster cluster = model.getContainerClusters().get("zk");
assertEquals(1, cluster.getContainers().stream().filter(Container::isRetired).count());
assertEquals(3, cluster.getContainers().stream().filter(c -> !c.isRetired()).count());
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/RankingExpressionShadowingTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/RankingExpressionShadowingTestCase.java
index 8fe4a8fb022..d665b7f20f0 100644
--- a/config-model/src/test/java/com/yahoo/searchdefinition/RankingExpressionShadowingTestCase.java
+++ b/config-model/src/test/java/com/yahoo/searchdefinition/RankingExpressionShadowingTestCase.java
@@ -159,7 +159,7 @@ public class RankingExpressionShadowingTestCase extends SchemaTestCase {
public void testNeuralNetworkSetup() throws ParseException {
// Note: the type assigned to query profile and constant tensors here is not the correct type
RankProfileRegistry rankProfileRegistry = new RankProfileRegistry();
- QueryProfileRegistry queryProfiles = queryProfileWith("query(q)", "tensor(x[1])");
+ QueryProfileRegistry queryProfiles = queryProfileWith("query(q)", "tensor(input[1])");
SearchBuilder builder = new SearchBuilder(rankProfileRegistry, queryProfiles);
builder.importString(
"search test {\n" +
@@ -184,19 +184,19 @@ public class RankingExpressionShadowingTestCase extends SchemaTestCase {
" }\n" +
" }\n" +
" constant W_hidden {\n" +
- " type: tensor(x[1])\n" +
+ " type: tensor(hidden[1])\n" +
" file: ignored.json\n" +
" }\n" +
" constant b_input {\n" +
- " type: tensor(x[1])\n" +
+ " type: tensor(hidden[1])\n" +
" file: ignored.json\n" +
" }\n" +
" constant W_final {\n" +
- " type: tensor(x[1])\n" +
+ " type: tensor(final[1])\n" +
" file: ignored.json\n" +
" }\n" +
" constant b_final {\n" +
- " type: tensor(x[1])\n" +
+ " type: tensor(final[1])\n" +
" file: ignored.json\n" +
" }\n" +
"}\n");
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/processing/DictionaryTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/processing/DictionaryTestCase.java
index ba51caca0f7..df0d58d3c8a 100644
--- a/config-model/src/test/java/com/yahoo/searchdefinition/processing/DictionaryTestCase.java
+++ b/config-model/src/test/java/com/yahoo/searchdefinition/processing/DictionaryTestCase.java
@@ -6,12 +6,16 @@ import com.yahoo.config.model.test.TestUtil;
import com.yahoo.searchdefinition.Search;
import com.yahoo.searchdefinition.SearchBuilder;
import com.yahoo.searchdefinition.derived.AttributeFields;
+import com.yahoo.searchdefinition.document.Case;
import com.yahoo.searchdefinition.document.Dictionary;
+import com.yahoo.searchdefinition.document.ImmutableSDField;
+import com.yahoo.searchdefinition.document.Matching;
import com.yahoo.searchdefinition.parser.ParseException;
import com.yahoo.vespa.config.search.AttributesConfig;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
import static org.junit.Assert.fail;
/**
@@ -44,14 +48,19 @@ public class DictionaryTestCase {
" }",
"}");
Search search = createSearch(def);
- assertEquals(Dictionary.Type.BTREE, search.getAttribute("s1").getDictionary().getType());
- assertEquals(Dictionary.Type.BTREE, search.getAttribute("n1").getDictionary().getType());
+ assertNull(search.getAttribute("s1").getDictionary());
+ assertNull(search.getAttribute("n1").getDictionary());
+ assertEquals(AttributesConfig.Attribute.Dictionary.Type.BTREE,
+ getConfig(search).attribute().get(0).dictionary().type());
+ assertEquals(AttributesConfig.Attribute.Dictionary.Type.BTREE,
+ getConfig(search).attribute().get(1).dictionary().type());
+ assertEquals(AttributesConfig.Attribute.Dictionary.Match.UNCASED,
+ getConfig(search).attribute().get(0).dictionary().match());
+ assertEquals(AttributesConfig.Attribute.Dictionary.Match.UNCASED,
+ getConfig(search).attribute().get(1).dictionary().match());
}
- void verifyNumericDictionaryControl(Dictionary.Type expected,
- AttributesConfig.Attribute.Dictionary.Type.Enum expectedConfig,
- String type,
- String ... cfg) throws ParseException
+ Search verifyDictionaryControl(Dictionary.Type expected, String type, String ... cfg) throws ParseException
{
String def = TestUtil.joinLines(
"search test {",
@@ -64,75 +73,147 @@ public class DictionaryTestCase {
" }",
"}");
Search search = createSearch(def);
+ AttributesConfig.Attribute.Dictionary.Type.Enum expectedConfig = toCfg(expected);
assertEquals(expected, search.getAttribute("n1").getDictionary().getType());
- assertEquals(expectedConfig,
- getConfig(search).attribute().get(0).dictionary().type());
+ assertEquals(expectedConfig, getConfig(search).attribute().get(0).dictionary().type());
+ return search;
+ }
+
+ AttributesConfig.Attribute.Dictionary.Type.Enum toCfg(Dictionary.Type v) {
+ return (v == Dictionary.Type.HASH)
+ ? AttributesConfig.Attribute.Dictionary.Type.Enum.HASH
+ : (v == Dictionary.Type.BTREE)
+ ? AttributesConfig.Attribute.Dictionary.Type.Enum.BTREE
+ : AttributesConfig.Attribute.Dictionary.Type.Enum.BTREE_AND_HASH;
+ }
+ AttributesConfig.Attribute.Dictionary.Match.Enum toCfg(Case v) {
+ return (v == Case.CASED)
+ ? AttributesConfig.Attribute.Dictionary.Match.Enum.CASED
+ : AttributesConfig.Attribute.Dictionary.Match.Enum.UNCASED;
+ }
+
+ void verifyStringDictionaryControl(Dictionary.Type expectedType, Case expectedCase, Case matchCasing,
+ String ... cfg) throws ParseException
+ {
+ Search search = verifyDictionaryControl(expectedType, "string", cfg);
+ ImmutableSDField f = search.getField("n1");
+ AttributesConfig.Attribute.Dictionary.Match.Enum expectedCaseCfg = toCfg(expectedCase);
+ assertEquals(matchCasing, f.getMatching().getCase());
+ assertEquals(expectedCase, search.getAttribute("n1").getDictionary().getMatch());
+ assertEquals(expectedCaseCfg, getConfig(search).attribute().get(0).dictionary().match());
+ }
+
+ @Test
+ public void testCasedBtreeSettings() throws ParseException {
+ verifyDictionaryControl(Dictionary.Type.BTREE, "int", "dictionary:cased");
}
@Test
public void testNumericBtreeSettings() throws ParseException {
- verifyNumericDictionaryControl(Dictionary.Type.BTREE,
- AttributesConfig.Attribute.Dictionary.Type.BTREE,
- "int",
- "dictionary:btree");
+ verifyDictionaryControl(Dictionary.Type.BTREE, "int", "dictionary:btree");
}
@Test
public void testNumericHashSettings() throws ParseException {
- verifyNumericDictionaryControl(Dictionary.Type.HASH,
- AttributesConfig.Attribute.Dictionary.Type.HASH,
- "int",
- "dictionary:hash");
+ verifyDictionaryControl(Dictionary.Type.HASH, "int", "dictionary:hash");
}
@Test
public void testNumericBtreeAndHashSettings() throws ParseException {
- verifyNumericDictionaryControl(Dictionary.Type.BTREE_AND_HASH,
- AttributesConfig.Attribute.Dictionary.Type.BTREE_AND_HASH,
- "int",
- "dictionary:btree", "dictionary:hash");
+ verifyDictionaryControl(Dictionary.Type.BTREE_AND_HASH, "int", "dictionary:btree", "dictionary:hash");
}
@Test
public void testNumericArrayBtreeAndHashSettings() throws ParseException {
- verifyNumericDictionaryControl(Dictionary.Type.BTREE_AND_HASH,
- AttributesConfig.Attribute.Dictionary.Type.BTREE_AND_HASH,
- "array<int>",
- "dictionary:btree", "dictionary:hash");
+ verifyDictionaryControl(Dictionary.Type.BTREE_AND_HASH, "array<int>", "dictionary:btree", "dictionary:hash");
}
@Test
public void testNumericWSetBtreeAndHashSettings() throws ParseException {
- verifyNumericDictionaryControl(Dictionary.Type.BTREE_AND_HASH,
- AttributesConfig.Attribute.Dictionary.Type.BTREE_AND_HASH,
- "weightedset<int>",
- "dictionary:btree", "dictionary:hash");
+ verifyDictionaryControl(Dictionary.Type.BTREE_AND_HASH, "weightedset<int>", "dictionary:btree", "dictionary:hash");
+ }
+ @Test
+ public void testStringBtreeSettings() throws ParseException {
+ verifyStringDictionaryControl(Dictionary.Type.BTREE, Case.UNCASED, Case.UNCASED, "dictionary:btree");
+ }
+ @Test
+ public void testStringBtreeUnCasedSettings() throws ParseException {
+ verifyStringDictionaryControl(Dictionary.Type.BTREE, Case.UNCASED, Case.UNCASED, "dictionary { btree\nuncased\n}");
+ }
+ @Test
+ public void testStringBtreeCasedSettings() throws ParseException {
+ verifyStringDictionaryControl(Dictionary.Type.BTREE, Case.CASED, Case.CASED, "dictionary { btree\ncased\n}", "match:cased");
+ }
+ @Test
+ public void testStringHashSettings() throws ParseException {
+ try {
+ verifyStringDictionaryControl(Dictionary.Type.HASH, Case.UNCASED, Case.UNCASED, "dictionary:hash");
+ } catch (IllegalArgumentException e) {
+ assertEquals("For search 'test', field 'n1': hash dictionary require cased match", e.getMessage());
+ }
+ }
+ @Test
+ public void testStringHashUnCasedSettings() throws ParseException {
+ try {
+ verifyStringDictionaryControl(Dictionary.Type.HASH, Case.UNCASED, Case.UNCASED, "dictionary { hash\nuncased\n}");
+ } catch (IllegalArgumentException e) {
+ assertEquals("For search 'test', field 'n1': hash dictionary require cased match", e.getMessage());
+ }
+ }
+ @Test
+ public void testStringHashBothCasedSettings() throws ParseException {
+ verifyStringDictionaryControl(Dictionary.Type.HASH, Case.CASED, Case.CASED, "dictionary { hash\ncased\n}", "match:cased");
+ }
+ @Test
+ public void testStringHashCasedSettings() throws ParseException {
+ try {
+ verifyStringDictionaryControl(Dictionary.Type.HASH, Case.CASED, Case.CASED, "dictionary { hash\ncased\n}");
+ fail();
+ } catch (IllegalArgumentException e) {
+ assertEquals("For search 'test', field 'n1': Dictionary casing 'CASED' does not match field match casing 'UNCASED'", e.getMessage());
+ }
+ }
+ @Test
+ public void testStringBtreeHashSettings() throws ParseException {
+ verifyStringDictionaryControl(Dictionary.Type.BTREE_AND_HASH, Case.UNCASED, Case.UNCASED, "dictionary{hash\nbtree\n}");
+ }
+ @Test
+ public void testStringBtreeHashUnCasedSettings() throws ParseException {
+ verifyStringDictionaryControl(Dictionary.Type.BTREE_AND_HASH, Case.UNCASED, Case.UNCASED, "dictionary { hash\nbtree\nuncased\n}");
+ }
+ @Test
+ public void testStringBtreeHashCasedSettings() throws ParseException {
+ try {
+ verifyStringDictionaryControl(Dictionary.Type.BTREE_AND_HASH, Case.CASED, Case.CASED, "dictionary { btree\nhash\ncased\n}");
+ } catch (IllegalArgumentException e) {
+ assertEquals("For search 'test', field 'n1': Dictionary casing 'CASED' does not match field match casing 'UNCASED'", e.getMessage());
+ }
}
@Test
public void testNonNumericFieldsFailsDictionaryControl() throws ParseException {
- String def =
- "search test {\n" +
- " document test {\n" +
- " field n1 type string {\n" +
- " indexing: summary | attribute\n" +
- " dictionary:btree\n" +
- " }\n" +
- " }\n" +
- "}\n";
+ String def = TestUtil.joinLines(
+ "search test {",
+ " document test {",
+ " field n1 type bool {",
+ " indexing: summary | attribute",
+ " dictionary:btree",
+ " }",
+ " }",
+ "}");
try {
SearchBuilder sb = SearchBuilder.createFromString(def);
fail("Controlling dictionary for non-numeric fields are not yet supported.");
} catch (IllegalArgumentException e) {
- assertEquals("For search 'test', field 'n1': You can only specify 'dictionary:' for numeric fields", e.getMessage());
+ assertEquals("For search 'test', field 'n1': You can only specify 'dictionary:' for numeric or string fields", e.getMessage());
}
}
@Test
- public void testNonFastSearchFieldsFailsDictionaryControl() throws ParseException {
- String def =
- "search test {\n" +
- " document test {\n" +
- " field n1 type int {\n" +
- " indexing: summary | attribute\n" +
- " dictionary:btree\n" +
- " }\n" +
- " }\n" +
- "}\n";
+ public void testNonFastSearchNumericFieldsFailsDictionaryControl() throws ParseException {
+ String def = TestUtil.joinLines(
+ "search test {",
+ " document test {",
+ " field n1 type int {",
+ " indexing: summary | attribute",
+ " dictionary:btree",
+ " }",
+ " }",
+ "}");
try {
SearchBuilder sb = SearchBuilder.createFromString(def);
fail("Controlling dictionary for non-fast-search fields are not allowed.");
@@ -140,4 +221,31 @@ public class DictionaryTestCase {
assertEquals("For search 'test', field 'n1': You must specify 'attribute:fast-search' to allow dictionary control", e.getMessage());
}
}
+
+ @Test
+ public void testCasingForNonFastSearch() throws ParseException {
+ String def = TestUtil.joinLines(
+ "search test {",
+ " document test {",
+ " field s1 type string {",
+ " indexing: attribute | summary",
+ " }",
+ " field s2 type string {",
+ " indexing: attribute | summary",
+ " match:uncased",
+ " }",
+ " field s3 type string {",
+ " indexing: attribute | summary",
+ " match:cased",
+ " }",
+ " }",
+ "}");
+ Search search = createSearch(def);
+ assertEquals(Case.UNCASED, search.getAttribute("s1").getCase());
+ assertEquals(Case.UNCASED, search.getAttribute("s2").getCase());
+ assertEquals(Case.CASED, search.getAttribute("s3").getCase());
+ assertEquals(AttributesConfig.Attribute.Match.UNCASED, getConfig(search).attribute().get(0).match());
+ assertEquals(AttributesConfig.Attribute.Match.UNCASED, getConfig(search).attribute().get(1).match());
+ assertEquals(AttributesConfig.Attribute.Match.CASED, getConfig(search).attribute().get(2).match());
+ }
}
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionTypeResolverTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionTypeResolverTestCase.java
index 96f12a47a2f..b149dafab95 100644
--- a/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionTypeResolverTestCase.java
+++ b/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionTypeResolverTestCase.java
@@ -196,6 +196,112 @@ public class RankingExpressionTypeResolverTestCase {
}
@Test
+ public void testAttributeInvocationViaBoundIdentifier() throws Exception {
+ SearchBuilder builder = new SearchBuilder();
+ builder.importString(joinLines(
+ "search newsarticle {",
+ " document newsarticle {",
+ " field title type string {",
+ " indexing {",
+ " input title | index",
+ " }",
+ " weight: 30",
+ " }",
+ " field usstaticrank type int {",
+ " indexing: summary | attribute",
+ " }",
+ " field eustaticrank type int {",
+ " indexing: summary | attribute",
+ " }",
+ " }",
+ " rank-profile default {",
+ " macro newsboost() { ",
+ " expression: 200 * matches(title)",
+ " }",
+ " macro commonboost(mystaticrank) { ",
+ " expression: attribute(mystaticrank) + newsboost",
+ " }",
+ " macro commonfirstphase(mystaticrank) { ",
+ " expression: nativeFieldMatch(title) + commonboost(mystaticrank) ",
+ " }",
+ " first-phase { expression: commonfirstphase(usstaticrank) }",
+ " }",
+ " rank-profile eurank inherits default {",
+ " first-phase { expression: commonfirstphase(eustaticrank) }",
+ " }",
+ "}"));
+ builder.build();
+ RankProfile profile = builder.getRankProfileRegistry().get(builder.getSearch(), "eurank");
+ }
+
+ @Test
+ public void testTensorFunctionInvocationTypes_NestedSameName() throws Exception {
+ SearchBuilder builder = new SearchBuilder();
+ builder.importString(joinLines(
+ "search test {",
+ " document test { ",
+ " field a type tensor(x[10],y[1]) {",
+ " indexing: attribute",
+ " }",
+ " field b type tensor(z[10]) {",
+ " indexing: attribute",
+ " }",
+ " }",
+ " rank-profile my_rank_profile {",
+ " function return_a() {",
+ " expression: return_first(attribute(a), attribute(b))",
+ " }",
+ " function return_b() {",
+ " expression: return_second(attribute(a), attribute(b))",
+ " }",
+ " function return_first(e1, e2) {",
+ " expression: just_return(e1)",
+ " }",
+ " function just_return(e1) {",
+ " expression: e1",
+ " }",
+ " function return_second(e1, e2) {",
+ " expression: return_first(e2+0, e1)",
+ " }",
+ " summary-features {",
+ " return_a",
+ " return_b",
+ " }",
+ " }",
+ "}"
+ ));
+ builder.build();
+ RankProfile profile =
+ builder.getRankProfileRegistry().get(builder.getSearch(), "my_rank_profile");
+ assertEquals(TensorType.fromSpec("tensor(x[10],y[1])"),
+ summaryFeatures(profile).get("return_a").type(profile.typeContext(builder.getQueryProfileRegistry())));
+ assertEquals(TensorType.fromSpec("tensor(z[10])"),
+ summaryFeatures(profile).get("return_b").type(profile.typeContext(builder.getQueryProfileRegistry())));
+ }
+
+ @Test
+ public void testTensorFunctionInvocationTypes_viaFuncWithExpr() throws Exception {
+ SearchBuilder builder = new SearchBuilder();
+ builder.importString(joinLines(
+ "search test {",
+ " document test {",
+ " field t1 type tensor<float>(y{}) { indexing: attribute | summary }",
+ " field t2 type tensor<float>(x{}) { indexing: attribute | summary }",
+ " }",
+ " rank-profile test {",
+ " function my_func(t) { expression: sum(t, x) + 1 }",
+ " function test_func_via_func_with_expr() { expression: call_func_with_expr( attribute(t1), attribute(t2) ) }",
+ " function call_func_with_expr(a, b) { expression: my_func( a * b ) }",
+ " summary-features { test_func_via_func_with_expr }",
+ " }",
+ "}"));
+ builder.build();
+ RankProfile profile = builder.getRankProfileRegistry().get(builder.getSearch(), "test");
+ assertEquals(TensorType.fromSpec("tensor<float>(y{})"),
+ summaryFeatures(profile).get("test_func_via_func_with_expr").type(profile.typeContext(builder.getQueryProfileRegistry())));
+ }
+
+ @Test
public void importedFieldsAreAvailable() throws Exception {
SearchBuilder builder = new SearchBuilder();
builder.importString(joinLines(
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/processing/TensorFieldTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/processing/TensorFieldTestCase.java
index 7241b7ca5e7..84ddf4f2d51 100644
--- a/config-model/src/test/java/com/yahoo/searchdefinition/processing/TensorFieldTestCase.java
+++ b/config-model/src/test/java/com/yahoo/searchdefinition/processing/TensorFieldTestCase.java
@@ -103,6 +103,25 @@ public class TensorFieldTestCase {
}
@Test
+ public void tensor_with_hnsw_index_parameters_must_be_an_index() throws ParseException {
+ try {
+ createFromString(getSd(joinLines(
+ "field t1 type tensor(x[64]) {",
+ " indexing: attribute ",
+ " index {",
+ " hnsw { max-links-per-node: 32 }",
+ " }",
+ "}")));
+ fail("Expected exception");
+ }
+ catch (IllegalArgumentException e) {
+ assertEquals("For search 'test', field 't1': " +
+ "A tensor that specifies hnsw index parameters must also specify 'index' in 'indexing'",
+ e.getMessage());
+ }
+ }
+
+ @Test
public void tensors_with_at_least_one_mapped_dimension_can_be_direct() throws ParseException {
assertTrue(getAttributeFromSd(
"field t1 type tensor(x{}) { indexing: attribute \n attribute: fast-search }", "t1").isFastSearch());
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/admin/ClusterControllerTestCase.java b/config-model/src/test/java/com/yahoo/vespa/model/admin/ClusterControllerTestCase.java
index 4ce52aaf4d3..989ae87913d 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/admin/ClusterControllerTestCase.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/admin/ClusterControllerTestCase.java
@@ -291,6 +291,10 @@ public class ClusterControllerTestCase extends DomBuilderTest {
assertThat(cfg.index(), is(0));
assertThat(cfg.fleet_controller_count(), is(1));
assertThat(cfg.init_progress_time(), is(34567000));
+
+ Service cc = model.getService("admin/cluster-controllers/0").get();
+ assertTrue(cc instanceof ClusterControllerContainer);
+ assertEquals("-Dio.netty.allocator.pageSize=4096 -Dio.netty.allocator.maxOrder=8", cc.getJvmOptions());
}
private boolean existsHostsWithClusterControllerConfigId(VespaModel model) {
@@ -392,7 +396,7 @@ public class ClusterControllerTestCase extends DomBuilderTest {
assertEquals(256, qrStartConfig.jvm().heapsize());
assertEquals(0, qrStartConfig.jvm().heapSizeAsPercentageOfPhysicalMemory());
assertEquals(2, qrStartConfig.jvm().availableProcessors());
- assertTrue(qrStartConfig.jvm().verbosegc());
+ assertFalse(qrStartConfig.jvm().verbosegc());
assertEquals("-XX:+UseG1GC -XX:MaxTenuringThreshold=15", qrStartConfig.jvm().gcopts());
assertEquals(512, qrStartConfig.jvm().stacksize());
assertEquals(0, qrStartConfig.jvm().directMemorySizeCache());
@@ -437,7 +441,7 @@ public class ClusterControllerTestCase extends DomBuilderTest {
assertEquals(32, qrStartConfig.jvm().minHeapsize());
// Overridden values from ClusterControllerContainerCluster
assertEquals(256, qrStartConfig.jvm().heapsize());
- assertTrue(qrStartConfig.jvm().verbosegc());
+ assertFalse(qrStartConfig.jvm().verbosegc());
}
@Test
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerTest.java b/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerTest.java
index a11d23dbfbb..3f211a595b9 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerTest.java
@@ -144,7 +144,7 @@ public class MetricsProxyContainerTest {
@Test
public void vespa_services_config_has_all_services() {
VespaServicesConfig vespaServicesConfig = getVespaServicesConfig(servicesWithContent());
- assertEquals(6, vespaServicesConfig.service().size());
+ assertEquals(7, vespaServicesConfig.service().size());
for (var service : vespaServicesConfig.service()) {
if (service.configId().equals("admin/cluster-controllers/0")) {
@@ -185,7 +185,7 @@ public class MetricsProxyContainerTest {
private static String servicesWithContent() {
return String.join("\n",
"<services>",
- " <admin version='4.0'>",
+ " <admin version='2.0'>",
" <adminserver hostalias='node1'/>",
" </admin>",
" <content version='1.0' id='my-content'>",
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/QuotaValidatorTest.java b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/QuotaValidatorTest.java
index 6ad74231cae..d92ace2939a 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/QuotaValidatorTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/QuotaValidatorTest.java
@@ -25,13 +25,13 @@ public class QuotaValidatorTest {
@Test
public void test_deploy_under_quota() {
- var tester = new ValidationTester(5, false, new TestProperties().setHostedVespa(true).setQuota(quota).setZone(publicZone));
+ var tester = new ValidationTester(8, false, new TestProperties().setHostedVespa(true).setQuota(quota).setZone(publicZone));
tester.deploy(null, getServices("testCluster", 5), Environment.prod, null);
}
@Test
public void test_deploy_above_quota_clustersize() {
- var tester = new ValidationTester(11, false, new TestProperties().setHostedVespa(true).setQuota(quota).setZone(publicZone));
+ var tester = new ValidationTester(14, false, new TestProperties().setHostedVespa(true).setQuota(quota).setZone(publicZone));
try {
tester.deploy(null, getServices("testCluster", 11), Environment.prod, null);
fail();
@@ -42,32 +42,32 @@ public class QuotaValidatorTest {
@Test
public void test_deploy_above_quota_budget() {
- var tester = new ValidationTester(10, false, new TestProperties().setHostedVespa(true).setQuota(quota).setZone(publicZone));
+ var tester = new ValidationTester(13, false, new TestProperties().setHostedVespa(true).setQuota(quota).setZone(publicZone));
try {
tester.deploy(null, getServices("testCluster", 10), Environment.prod, null);
fail();
} catch (RuntimeException e) {
assertEquals("Please free up some capacity! This deployment's quota use ($-.--) exceeds reserved quota ($-.--)!",
- ValidationTester.censorNumbers(e.getMessage()));
+ ValidationTester.censorNumbers(e.getMessage()));
}
}
@Test
public void test_deploy_above_quota_budget_in_publiccd() {
- var tester = new ValidationTester(10, false, new TestProperties().setHostedVespa(true).setQuota(quota).setZone(publicCdZone));
+ var tester = new ValidationTester(13, false, new TestProperties().setHostedVespa(true).setQuota(quota).setZone(publicCdZone));
try {
tester.deploy(null, getServices("testCluster", 10), Environment.prod, null);
fail();
} catch (RuntimeException e) {
assertEquals("publiccd: Please free up some capacity! This deployment's quota use ($-.--) exceeds reserved quota ($-.--)!",
- ValidationTester.censorNumbers(e.getMessage()));
+ ValidationTester.censorNumbers(e.getMessage()));
}
}
@Test
public void test_deploy_with_negative_budget() {
var quota = Quota.unlimited().withBudget(BigDecimal.valueOf(-1));
- var tester = new ValidationTester(10, false, new TestProperties().setHostedVespa(true).setQuota(quota).setZone(publicZone));
+ var tester = new ValidationTester(13, false, new TestProperties().setHostedVespa(true).setQuota(quota).setZone(publicZone));
try {
tester.deploy(null, getServices("testCluster", 10), Environment.prod, null);
fail();
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/ValidationTester.java b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/ValidationTester.java
index b7fa72d8a64..02f1195d50e 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/ValidationTester.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/ValidationTester.java
@@ -34,9 +34,9 @@ public class ValidationTester {
private final TestProperties properties;
private final InMemoryProvisioner hostProvisioner;
- /** Creates a validation tester with 1 node available */
+ /** Creates a validation tester with 1 node available (in addition to cluster controllers) */
public ValidationTester() {
- this(1);
+ this(4);
}
/** Creates a validation tester with number of nodes available and the given test properties */
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/ClusterSizeReductionValidatorTest.java b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/ClusterSizeReductionValidatorTest.java
index bade5a746f7..f90762e8fc0 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/ClusterSizeReductionValidatorTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/ClusterSizeReductionValidatorTest.java
@@ -19,7 +19,7 @@ public class ClusterSizeReductionValidatorTest {
@Test
public void testSizeReductionValidation() {
- ValidationTester tester = new ValidationTester(30);
+ ValidationTester tester = new ValidationTester(33);
VespaModel previous = tester.deploy(null, getServices(30), Environment.prod, null).getFirst();
try {
@@ -45,7 +45,7 @@ public class ClusterSizeReductionValidatorTest {
@Test
public void testOverridingSizereductionValidation() {
- ValidationTester tester = new ValidationTester(30);
+ ValidationTester tester = new ValidationTester(33);
VespaModel previous = tester.deploy(null, getServices(30), Environment.prod, null).getFirst();
tester.deploy(previous, getServices(14), Environment.prod, sizeReductionOverride); // Allowed due to override
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/ConfigChangeTestUtils.java b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/ConfigChangeTestUtils.java
index 2157839ef5c..7f92d2f409c 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/ConfigChangeTestUtils.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/ConfigChangeTestUtils.java
@@ -6,7 +6,6 @@ import com.yahoo.config.model.api.ConfigChangeAction;
import com.yahoo.config.model.api.ServiceInfo;
import com.yahoo.config.provision.ClusterSpec;
-import java.time.Instant;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/ContentClusterRemovalValidatorTest.java b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/ContentClusterRemovalValidatorTest.java
index 392c37f50f3..30dab17635a 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/ContentClusterRemovalValidatorTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/ContentClusterRemovalValidatorTest.java
@@ -17,7 +17,7 @@ import static org.junit.Assert.fail;
*/
public class ContentClusterRemovalValidatorTest {
- private final ValidationTester tester = new ValidationTester(2);
+ private final ValidationTester tester = new ValidationTester(5);
@Test
public void testContentRemovalValidation() {
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/AttributeChangeValidatorTest.java b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/AttributeChangeValidatorTest.java
index e89f0c0a9cd..2cf8069c988 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/AttributeChangeValidatorTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/AttributeChangeValidatorTest.java
@@ -69,6 +69,22 @@ public class AttributeChangeValidatorTest {
}
@Test
+ public void changing_btree2hash_require_restart() throws Exception {
+ new Fixture("field f1 type long { indexing: attribute\n attribute: fast-search\n dictionary: btree}",
+ "field f1 type long { indexing: attribute\n attribute: fast-search\n dictionary: hash }").
+ assertValidation(newRestartAction(ClusterSpec.Id.from("test"),
+ "Field 'f1' changed: change property 'dictionary: btree/hash' from 'BTREE' to 'HASH'"));
+ }
+
+ @Test
+ public void changing_hash2btree_require_restart() throws Exception {
+ new Fixture("field f1 type long { indexing: attribute\n attribute: fast-search\n dictionary: hash}",
+ "field f1 type long { indexing: attribute\n attribute: fast-search\n dictionary: btree }").
+ assertValidation(newRestartAction(ClusterSpec.Id.from("test"),
+ "Field 'f1' changed: change property 'dictionary: btree/hash' from 'HASH' to 'BTREE'"));
+ }
+
+ @Test
public void changing_fast_access_require_restart() throws Exception {
new Fixture("field f1 type string { indexing: attribute \n attribute: fast-access }",
"field f1 type string { indexing: attribute }").
@@ -77,6 +93,14 @@ public class AttributeChangeValidatorTest {
}
@Test
+ public void changing_uncased2cased_require_restart() throws Exception {
+ new Fixture("field f1 type string { indexing: attribute\n attribute: fast-search\n dictionary { btree\ncased}\nmatch:cased}",
+ "field f1 type string { indexing: attribute\n attribute: fast-search\n dictionary{ btree\nuncased}\nmatch:uncased }").
+ assertValidation(newRestartAction(ClusterSpec.Id.from("test"),
+ "Field 'f1' changed: change property 'dictionary: cased/uncased' from 'CASED' to 'UNCASED'"));
+ }
+
+ @Test
public void changing_huge_require_restart() throws Exception {
new Fixture("field f1 type string { indexing: attribute }",
"field f1 type string { indexing: attribute \n attribute: huge }").
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/builder/xml/dom/DomAdminV2BuilderTest.java b/config-model/src/test/java/com/yahoo/vespa/model/builder/xml/dom/DomAdminV2BuilderTest.java
index 18063bff16b..69a0c8d656c 100755
--- a/config-model/src/test/java/com/yahoo/vespa/model/builder/xml/dom/DomAdminV2BuilderTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/builder/xml/dom/DomAdminV2BuilderTest.java
@@ -122,12 +122,10 @@ public class DomAdminV2BuilderTest extends DomBuilderTest {
Admin admin = buildAdmin(servicesMultitenantAdminOnly(), true, configServerSpecs);
assertThat(admin.getConfigservers().size(), is(3));
assertThat(admin.getSlobroks().size(), is(1));
- assertThat(admin.getClusterControllerHosts().size(), is(1));
assertNotNull(admin.hostSystem().getHostByHostname("test1"));
for (Configserver configserver : admin.getConfigservers()) {
- assertThat(configserver.getHostName(), is(not(admin.getClusterControllerHosts().get(0).getHost().getHostname())));
for (Slobrok slobrok : admin.getSlobroks()) {
- assertThat(slobrok.getHostName(), is(not(configserver.getHostName())));
+ assertThat(slobrok.getHostName(), is(not(configserver.getHostName())));
}
}
}
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/xml/AccessControlTest.java b/config-model/src/test/java/com/yahoo/vespa/model/container/xml/AccessControlTest.java
index 4993a51ab74..39b5cc139d9 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/container/xml/AccessControlTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/container/xml/AccessControlTest.java
@@ -33,6 +33,7 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
/**
* @author gjoranv
@@ -290,17 +291,47 @@ public class AccessControlTest extends ContainerModelBuilderTestBase {
@Test
public void access_control_client_auth_can_be_overridden() {
- Http http = createModelAndGetHttp(
- " <http>",
- " <filtering>",
- " <access-control tls-handshake-client-auth=\"want\"/>",
- " </filtering>",
- " </http>");
+ AthenzDomain tenantDomain = AthenzDomain.from("my-tenant-domain");
+ DeployState state = new DeployState.Builder().properties(
+ new TestProperties()
+ .setAthenzDomain(tenantDomain)
+ .setHostedVespa(true)
+ .allowDisableMtls(true))
+ .build();
+ Http http = createModelAndGetHttp(state,
+ " <http>",
+ " <filtering>",
+ " <access-control tls-handshake-client-auth=\"want\"/>",
+ " </filtering>",
+ " </http>");
assertTrue(http.getAccessControl().isPresent());
assertEquals(AccessControl.ClientAuthentication.want, http.getAccessControl().get().clientAuthentication);
}
@Test
+ public void access_control_client_auth_cannot_be_overridden_when_disabled() {
+ AthenzDomain tenantDomain = AthenzDomain.from("my-tenant-domain");
+ DeployState state = new DeployState.Builder().properties(
+ new TestProperties()
+ .setAthenzDomain(tenantDomain)
+ .setHostedVespa(true)
+ .allowDisableMtls(false))
+ .build();
+
+ try {
+ Http http = createModelAndGetHttp(state,
+ " <http>",
+ " <filtering>",
+ " <access-control tls-handshake-client-auth=\"want\"/>",
+ " </filtering>",
+ " </http>");
+ fail("Overriding tls-handshake-client-auth allowed, but should have failed");
+ } catch (IllegalArgumentException e) {
+ assertEquals("Overriding 'tls-handshake-client-auth' for application is not allowed.", e.getMessage());
+ }
+ }
+
+ @Test
public void local_connector_has_default_chain() {
Http http = createModelAndGetHttp(
" <http>",
@@ -323,17 +354,20 @@ public class AccessControlTest extends ContainerModelBuilderTestBase {
}
private Http createModelAndGetHttp(String... httpElement) {
- List<String> servicesXml = new ArrayList<>();
- servicesXml.add("<container version='1.0'>");
- servicesXml.addAll(List.of(httpElement));
- servicesXml.add("</container>");
-
AthenzDomain tenantDomain = AthenzDomain.from("my-tenant-domain");
DeployState state = new DeployState.Builder().properties(
new TestProperties()
.setAthenzDomain(tenantDomain)
.setHostedVespa(true))
.build();
+ return createModelAndGetHttp(state, httpElement);
+ }
+ private Http createModelAndGetHttp(DeployState state, String... httpElement) {
+ List<String> servicesXml = new ArrayList<>();
+ servicesXml.add("<container version='1.0'>");
+ servicesXml.addAll(List.of(httpElement));
+ servicesXml.add("</container>");
+
createModel(root, state, null, DomBuilderTest.parse(servicesXml.toArray(String[]::new)));
return ((ApplicationContainer) root.getProducer("container/container.0")).getHttp();
}
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/xml/AccessLogTest.java b/config-model/src/test/java/com/yahoo/vespa/model/container/xml/AccessLogTest.java
index f3f3b2b1076..f3199f6a46f 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/container/xml/AccessLogTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/container/xml/AccessLogTest.java
@@ -86,6 +86,7 @@ public class AccessLogTest extends ContainerModelBuilderTestBase {
assertEquals("pattern", fileHandlerConfig.pattern());
assertEquals("interval", fileHandlerConfig.rotation());
assertEquals(10000, fileHandlerConfig.queueSize());
+ assertEquals(4*1024*1024, fileHandlerConfig.bufferSize());
}
{ // json
@@ -97,6 +98,7 @@ public class AccessLogTest extends ContainerModelBuilderTestBase {
assertEquals("pattern", fileHandlerConfig.pattern());
assertEquals("interval", fileHandlerConfig.rotation());
assertEquals(10000, fileHandlerConfig.queueSize());
+ assertEquals(4*1024*1024, fileHandlerConfig.bufferSize());
}
}
@@ -116,6 +118,7 @@ public class AccessLogTest extends ContainerModelBuilderTestBase {
ConnectionLogConfig config = root.getConfig(ConnectionLogConfig.class, "default/component/com.yahoo.container.logging.FileConnectionLog");
assertEquals("default", config.cluster());
assertEquals(10000, config.queueSize());
+ assertEquals(256*1024, config.bufferSize());
}
@Test
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/content/ContentClusterTest.java b/config-model/src/test/java/com/yahoo/vespa/model/content/ContentClusterTest.java
index 811e789752e..4aadc0e3f05 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/content/ContentClusterTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/content/ContentClusterTest.java
@@ -790,8 +790,8 @@ public class ContentClusterTest extends ContentBaseTest {
}
@Test
- public void flush_on_shutdown_is_default_off_for_hosted() throws Exception {
- assertNoPreShutdownCommand(createOneNodeCluster(true));
+ public void flush_on_shutdown_is_default_on_for_hosted() throws Exception {
+ assertPrepareRestartCommand(createOneNodeCluster(true));
}
@Test
@@ -1021,34 +1021,30 @@ public class ContentClusterTest extends ContentBaseTest {
assertEquals(0.1, resolveMaxDeadBytesRatio(0.1), 1e-5);
}
- void assertZookeeperServerImplementation(String expectedClassName) {
- VespaModel model = createEnd2EndOneNode(new TestProperties().setMultitenant(true));
-
- ContentCluster cc = model.getContentClusters().get("storage");
- for (ClusterControllerContainer c : cc.getClusterControllers().getContainers()) {
+ void assertZookeeperServerImplementation(String expectedClassName,
+ ClusterControllerContainerCluster clusterControllerCluster) {
+ for (ClusterControllerContainer c : clusterControllerCluster.getContainers()) {
var builder = new ComponentsConfig.Builder();
c.getConfig(builder);
assertEquals(1, new ComponentsConfig(builder).components().stream()
- .filter(component -> component.classId().equals(expectedClassName))
- .count());
+ .filter(component -> component.classId().equals(expectedClassName))
+ .count());
}
}
- @Test
- public void reconfigurableZookeeperServerComponentsForClusterController() {
- assertZookeeperServerImplementation("com.yahoo.vespa.zookeeper.ReconfigurableVespaZooKeeperServer");
- assertZookeeperServerImplementation("com.yahoo.vespa.zookeeper.Reconfigurer");
- assertZookeeperServerImplementation("com.yahoo.vespa.zookeeper.VespaZooKeeperAdminImpl");
- }
-
- private int resolveMaxInhibitedGroupsConfigWithFeatureFlag(int maxGroups) {
- VespaModel model = createEnd2EndOneNode(new TestProperties().maxActivationInhibitedOutOfSyncGroups(maxGroups));
+ private StorDistributormanagerConfig resolveStorDistributormanagerConfig(TestProperties props) {
+ VespaModel model = createEnd2EndOneNode(props);
ContentCluster cc = model.getContentClusters().get("storage");
var builder = new StorDistributormanagerConfig.Builder();
cc.getDistributorNodes().getConfig(builder);
- return (new StorDistributormanagerConfig(builder)).max_activation_inhibited_out_of_sync_groups();
+ return (new StorDistributormanagerConfig(builder));
+ }
+
+ private int resolveMaxInhibitedGroupsConfigWithFeatureFlag(int maxGroups) {
+ var cfg = resolveStorDistributormanagerConfig(new TestProperties().maxActivationInhibitedOutOfSyncGroups(maxGroups));
+ return cfg.max_activation_inhibited_out_of_sync_groups();
}
@Test
@@ -1057,6 +1053,22 @@ public class ContentClusterTest extends ContentBaseTest {
assertEquals(2, resolveMaxInhibitedGroupsConfigWithFeatureFlag(2));
}
+ private int resolveNumDistributorStripesConfigWithFeatureFlag(TestProperties props) {
+ var cfg = resolveStorDistributormanagerConfig(props);
+ return cfg.num_distributor_stripes();
+ }
+
+ private int resolveNumDistributorStripesConfigWithFeatureFlag(int numStripes) {
+ return resolveNumDistributorStripesConfigWithFeatureFlag(new TestProperties().setNumDistributorStripes(numStripes));
+ }
+
+ @Test
+ public void num_distributor_stripes_config_controlled_by_properties() {
+ assertEquals(0, resolveNumDistributorStripesConfigWithFeatureFlag(new TestProperties()));
+ assertEquals(0, resolveNumDistributorStripesConfigWithFeatureFlag(0));
+ assertEquals(1, resolveNumDistributorStripesConfigWithFeatureFlag(1));
+ }
+
@Test
public void testDedicatedClusterControllers() {
VespaModel noContentModel = createEnd2EndOneNode(new TestProperties().setHostedVespa(true)
@@ -1116,17 +1128,7 @@ public class ContentClusterTest extends ContentBaseTest {
assertNull("No own cluster controller for content", twoContentModel.getContentClusters().get("dev-null").getClusterControllers());
assertNotNull("Shared cluster controller with content", twoContentModel.getAdmin().getClusterControllers());
- Map<String, ContentCluster> clustersWithOwnCCC = createEnd2EndOneNode(new TestProperties().setMultitenant(true), twoContentServices).getContentClusters();
ClusterControllerContainerCluster clusterControllers = twoContentModel.getAdmin().getClusterControllers();
- assertEquals("Union of components in own clusters is equal to those in shared cluster",
- clusterControllers.getAllComponents().stream()
- .map(Component::getComponentId)
- .collect(toList()),
- clustersWithOwnCCC.values().stream()
- .flatMap(cluster -> Optional.ofNullable(cluster.getClusterControllers()).stream()
- .flatMap(c -> c.getAllComponents().stream()))
- .map(Component::getComponentId)
- .collect(toList()));
assertEquals(1, clusterControllers.reindexingContext().documentTypesForCluster("storage").size());
assertEquals(1, clusterControllers.reindexingContext().documentTypesForCluster("dev-null").size());
@@ -1136,6 +1138,13 @@ public class ContentClusterTest extends ContentBaseTest {
twoContentModel.getConfig(devNullBuilder, "admin/standalone/cluster-controllers/0/components/clustercontroller-dev-null-configurer");
assertEquals(0.618, storageBuilder.build().min_distributor_up_ratio(), 1e-9);
assertEquals(0.418, devNullBuilder.build().min_distributor_up_ratio(), 1e-9);
+
+ assertZookeeperServerImplementation("com.yahoo.vespa.zookeeper.ReconfigurableVespaZooKeeperServer",
+ clusterControllers);
+ assertZookeeperServerImplementation("com.yahoo.vespa.zookeeper.Reconfigurer",
+ clusterControllers);
+ assertZookeeperServerImplementation("com.yahoo.vespa.zookeeper.VespaZooKeeperAdminImpl",
+ clusterControllers);
}
}
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/content/ContentSearchClusterTest.java b/config-model/src/test/java/com/yahoo/vespa/model/content/ContentSearchClusterTest.java
index 5fd4885a1f2..46bd005deb6 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/content/ContentSearchClusterTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/content/ContentSearchClusterTest.java
@@ -3,6 +3,7 @@ package com.yahoo.vespa.model.content;
import com.yahoo.config.model.deploy.DeployState;
import com.yahoo.config.model.deploy.TestProperties;
+import com.yahoo.searchlib.TranslogserverConfig;
import com.yahoo.vespa.config.content.FleetcontrollerConfig;
import com.yahoo.vespa.config.content.AllClustersBucketSpacesConfig;
import com.yahoo.vespa.config.content.core.BucketspacesConfig;
@@ -246,4 +247,16 @@ public class ContentSearchClusterTest {
assertFalse(getFleetcontrollerConfig(cluster).cluster_has_global_document_types());
}
+ TranslogserverConfig getTlsConfig(ContentCluster cluster) {
+ TranslogserverConfig.Builder tlsBuilder = new TranslogserverConfig.Builder();
+ cluster.getSearch().getSearchNodes().get(0).getConfig(tlsBuilder);
+ return tlsBuilder.build();
+ }
+
+ @Test
+ public void fsync_is_controllable() throws Exception {
+ assertTrue(getTlsConfig(createCluster(new ContentClusterBuilder().getXml())).usefsync());
+ assertTrue(getTlsConfig(createCluster(new ContentClusterBuilder().syncTransactionLog(true).getXml())).usefsync());
+ assertFalse(getTlsConfig(createCluster(new ContentClusterBuilder().syncTransactionLog(false).getXml())).usefsync());
+ }
}
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/content/utils/ContentClusterBuilder.java b/config-model/src/test/java/com/yahoo/vespa/model/content/utils/ContentClusterBuilder.java
index 491326fdc9c..d97aeffb107 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/content/utils/ContentClusterBuilder.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/content/utils/ContentClusterBuilder.java
@@ -28,6 +28,7 @@ public class ContentClusterBuilder {
private Optional<Double> protonMemoryLimit = Optional.empty();
private Optional<Double> clusterControllerDiskLimit = Optional.empty();
private Optional<Double> clusterControllerMemoryLimit = Optional.empty();
+ private Optional<Boolean> syncTransactionLog = Optional.empty();
public ContentClusterBuilder() {
}
@@ -42,6 +43,11 @@ public class ContentClusterBuilder {
return this;
}
+ public ContentClusterBuilder syncTransactionLog(boolean syncTransactionLog) {
+ this.syncTransactionLog = Optional.of(syncTransactionLog);
+ return this;
+ }
+
public ContentClusterBuilder searchableCopies(int searchableCopies) {
this.searchableCopies = searchableCopies;
return this;
@@ -101,6 +107,7 @@ public class ContentClusterBuilder {
" <proton>",
" <searchable-copies>" + searchableCopies + "</searchable-copies>",
getProtonResourceLimitsXml(" "),
+ getTransactionLogSyncXml(" "),
" </proton>",
" </engine>");
if (dispatchXml.isPresent()) {
@@ -138,7 +145,11 @@ public class ContentClusterBuilder {
return "";
}
- private static String getXmlLine(String tag, Optional<Double> value, String indent) {
+ private String getTransactionLogSyncXml(String indent) {
+ return getXmlLine("sync-transactionlog", syncTransactionLog, indent);
+ }
+
+ private static <T> String getXmlLine(String tag, Optional<T> value, String indent) {
if (value.isPresent()) {
return indent + "<" + tag + ">" + value.get() + "</" + tag + ">\n";
}
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/ml/ModelEvaluationTest.java b/config-model/src/test/java/com/yahoo/vespa/model/ml/ModelEvaluationTest.java
index 4c1c24c9790..1aaa1669377 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/ml/ModelEvaluationTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/ml/ModelEvaluationTest.java
@@ -145,6 +145,7 @@ public class ModelEvaluationTest {
private final String profile =
"rankingExpression(imported_ml_function_small_constants_and_functions_exp_output).rankingScript: map(input, f(a)(exp(a)))\n" +
+ "rankingExpression(imported_ml_function_small_constants_and_functions_exp_output).type: tensor<float>(d0[3])\n" +
"rankingExpression(default.output).rankingScript: join(rankingExpression(imported_ml_function_small_constants_and_functions_exp_output), reduce(join(join(reduce(rankingExpression(imported_ml_function_small_constants_and_functions_exp_output), sum, d0), tensor<float>(d0[1])(1.0), f(a,b)(a * b)), 9.999999974752427E-7, f(a,b)(a + b)), sum, d0), f(a,b)(a / b))\n" +
"rankingExpression(default.output).input.type: tensor<float>(d0[3])\n" +
"rankingExpression(default.output).type: tensor<float>(d0[3])\n";
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/search/test/SearchNodeTest.java b/config-model/src/test/java/com/yahoo/vespa/model/search/test/SearchNodeTest.java
index e270c81fe78..620b5883d29 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/search/test/SearchNodeTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/search/test/SearchNodeTest.java
@@ -38,9 +38,9 @@ public class SearchNodeTest {
assertEquals(expected, cfg.basedir());
}
- private void prepare(MockRoot root, SearchNode node) {
+ private void prepare(MockRoot root, SearchNode node, Boolean useFsync) {
Host host = new Host(root, "mockhost");
- TransactionLogServer tls = new TransactionLogServer(root, "mycluster");
+ TransactionLogServer tls = new TransactionLogServer(root, "mycluster", useFsync);
tls.setHostResource(new HostResource(host));
tls.setBasePort(100);
tls.initService(root.deployLogger());
@@ -61,10 +61,17 @@ public class SearchNodeTest {
}
@Test
+ public void requireThatSyncIsHonoured() {
+ assertTrue(getTlsConfig(new TestProperties(), null).usefsync());
+ assertTrue(getTlsConfig(new TestProperties(), true).usefsync());
+ assertFalse(getTlsConfig(new TestProperties(), false).usefsync());
+ }
+
+ @Test
public void requireThatBasedirIsCorrectForElasticMode() {
MockRoot root = new MockRoot("");
SearchNode node = createSearchNode(root, "mynode", 3, new NodeSpec(7, 5), false, root.getDeployState().isHosted(), false);
- prepare(root, node);
+ prepare(root, node, true);
assertBaseDir(Defaults.getDefaults().underVespaHome("var/db/vespa/search/cluster.mycluster/n3"), node);
}
@@ -92,10 +99,10 @@ public class SearchNodeTest {
return new MockRoot("", new DeployState.Builder().properties(properties).build());
}
- private TranslogserverConfig getTlsConfig(ModelContext.Properties properties) {
+ private TranslogserverConfig getTlsConfig(ModelContext.Properties properties, Boolean useFsync) {
MockRoot root = createRoot(properties);
SearchNode node = createSearchNode(root);
- prepare(root, node);
+ prepare(root, node, useFsync);
TranslogserverConfig.Builder tlsBuilder = new TranslogserverConfig.Builder();
node.getConfig(tlsBuilder);
return tlsBuilder.build();
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/test/VespaModelTester.java b/config-model/src/test/java/com/yahoo/vespa/model/test/VespaModelTester.java
index eafbca09009..b72ae088484 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/test/VespaModelTester.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/test/VespaModelTester.java
@@ -52,7 +52,6 @@ public class VespaModelTester {
private final Map<NodeResources, Collection<Host>> hostsByResources = new HashMap<>();
private ApplicationId applicationId = ApplicationId.defaultId();
private boolean useDedicatedNodeForLogserver = false;
- private boolean dedicatedClusterControllerCluster = true;
public VespaModelTester() {
this(new NullConfigModelRegistry());
@@ -102,10 +101,6 @@ public class VespaModelTester {
this.useDedicatedNodeForLogserver = useDedicatedNodeForLogserver;
}
- public void dedicatedClusterControllerCluster(boolean dedicatedClusterControllerCluster) {
- this.dedicatedClusterControllerCluster = dedicatedClusterControllerCluster;
- }
-
/** Creates a model which uses 0 as start index and fails on out of capacity */
public VespaModel createModel(String services, String ... retiredHostNames) {
return createModel(Zone.defaultZone(), services, true, retiredHostNames);
@@ -175,8 +170,7 @@ public class VespaModelTester {
.setMultitenant(true)
.setHostedVespa(hosted)
.setApplicationId(applicationId)
- .setUseDedicatedNodeForLogserver(useDedicatedNodeForLogserver)
- .setDedicatedClusterControllerCluster(dedicatedClusterControllerCluster);
+ .setUseDedicatedNodeForLogserver(useDedicatedNodeForLogserver);
DeployState.Builder deployState = deployStatebuilder
.applicationPackage(appPkg)
diff --git a/config/src/main/java/com/yahoo/config/subscription/impl/JRTConfigRequester.java b/config/src/main/java/com/yahoo/config/subscription/impl/JRTConfigRequester.java
index 51266f21846..f846231ac98 100644
--- a/config/src/main/java/com/yahoo/config/subscription/impl/JRTConfigRequester.java
+++ b/config/src/main/java/com/yahoo/config/subscription/impl/JRTConfigRequester.java
@@ -1,4 +1,4 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.config.subscription.impl;
import com.yahoo.config.ConfigInstance;
@@ -47,7 +47,6 @@ public class JRTConfigRequester implements RequestWaiter {
private int fatalFailures = 0; // independent of transientFailures
private int transientFailures = 0; // independent of fatalFailures
private final ScheduledThreadPoolExecutor scheduler;
- private Instant suspendWarningLogged = Instant.MIN;
private Instant noApplicationWarningLogged = Instant.MIN;
private static final Duration delayBetweenWarnings = Duration.ofSeconds(60);
private final ConnectionPool connectionPool;
@@ -196,11 +195,8 @@ public class JRTConfigRequester implements RequestWaiter {
long delay,
Connection connection) {
transientFailures++;
- if (suspendWarningLogged.isBefore(Instant.now().minus(delayBetweenWarnings))) {
- log.log(INFO, "Connection to " + connection.getAddress() +
- " failed or timed out, clients will keep existing config, will keep trying.");
- suspendWarningLogged = Instant.now();
- }
+ log.log(INFO, "Connection to " + connection.getAddress() +
+ " failed or timed out, clients will keep existing config, will keep trying.");
if (sub.getState() != ConfigSubscription.State.OPEN) return;
scheduleNextRequest(jrtReq, sub, delay, calculateErrorTimeout());
}
@@ -237,7 +233,6 @@ public class JRTConfigRequester implements RequestWaiter {
// Reset counters pertaining to error handling here
fatalFailures = 0;
transientFailures = 0;
- suspendWarningLogged = Instant.MIN;
noApplicationWarningLogged = Instant.MIN;
connection.setSuccess();
sub.setLastCallBackOKTS(Instant.now());
@@ -289,7 +284,6 @@ public class JRTConfigRequester implements RequestWaiter {
public void close() {
// Fake that we have logged to avoid printing warnings after this
- suspendWarningLogged = Instant.now();
noApplicationWarningLogged = Instant.now();
if (configSourceSet != null) {
managedPool.release(configSourceSet);
diff --git a/config/src/main/java/com/yahoo/config/subscription/impl/MockConnection.java b/config/src/main/java/com/yahoo/config/subscription/impl/MockConnection.java
index 58eed7f9e78..e6b81adf787 100644
--- a/config/src/main/java/com/yahoo/config/subscription/impl/MockConnection.java
+++ b/config/src/main/java/com/yahoo/config/subscription/impl/MockConnection.java
@@ -1,4 +1,4 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.config.subscription.impl;
import com.yahoo.jrt.Request;
@@ -16,7 +16,7 @@ import com.yahoo.vespa.config.util.ConfigUtils;
*
* @author hmusum
*/
-public class MockConnection implements ConnectionPool, com.yahoo.vespa.config.Connection {
+public class MockConnection implements ConnectionPool, Connection {
private Request lastRequest;
private final ResponseHandler responseHandler;
@@ -87,9 +87,7 @@ public class MockConnection implements ConnectionPool, com.yahoo.vespa.config.Co
}
@Override
- public Connection setNewCurrentConnection() {
- return this;
- }
+ public Connection switchConnection() { return this; }
@Override
public int getSize() {
diff --git a/config/src/main/java/com/yahoo/vespa/config/ConnectionPool.java b/config/src/main/java/com/yahoo/vespa/config/ConnectionPool.java
index c6a93348d39..786dfa975f4 100644
--- a/config/src/main/java/com/yahoo/vespa/config/ConnectionPool.java
+++ b/config/src/main/java/com/yahoo/vespa/config/ConnectionPool.java
@@ -1,4 +1,4 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.config;
import com.yahoo.jrt.Supervisor;
@@ -10,13 +10,35 @@ public interface ConnectionPool extends AutoCloseable {
void close();
+ /**
+ * Sets the supplied Connection to have an error, implementations are expected to call
+ * {@link #switchConnection()} after setting state for the supplied Connection.
+ *
+ */
void setError(Connection connection, int i);
Connection getCurrent();
- Connection setNewCurrentConnection();
+ /**
+ * Switches to another (healthy, if one exists) Connection instance.
+ * Returns the resulting Connection. See also {@link #setError(Connection, int)}
+ *
+ * @return a Connection
+ */
+ Connection switchConnection();
+
+ /**
+ * Sets the current JRTConnection instance by randomly choosing
+ * from the available sources and returns the result.
+ *
+ * @return a Connection
+ */
+ @Deprecated
+ default Connection setNewCurrentConnection() { return switchConnection(); };
int getSize();
+ // TODO: Exposes implementation, try to remove
Supervisor getSupervisor();
+
}
diff --git a/config/src/main/java/com/yahoo/vespa/config/JRTConnection.java b/config/src/main/java/com/yahoo/vespa/config/JRTConnection.java
index cde46eb9de0..0d5f483ad2c 100644
--- a/config/src/main/java/com/yahoo/vespa/config/JRTConnection.java
+++ b/config/src/main/java/com/yahoo/vespa/config/JRTConnection.java
@@ -1,36 +1,33 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.config;
-import com.yahoo.jrt.*;
+import com.yahoo.jrt.Request;
+import com.yahoo.jrt.RequestWaiter;
+import com.yahoo.jrt.Spec;
+import com.yahoo.jrt.Supervisor;
+import com.yahoo.jrt.Target;
-import java.text.SimpleDateFormat;
-import java.util.TimeZone;
+import java.time.Duration;
+import java.time.Instant;
+import java.util.logging.Level;
import java.util.logging.Logger;
/**
* A JRT connection to a config server or config proxy.
*
* @author Gunnar Gauslaa Bergem
+ * @author hmusum
*/
public class JRTConnection implements Connection {
- public final static Logger logger = Logger.getLogger(JRTConnection.class.getPackage().getName());
+ private final static Logger logger = Logger.getLogger(JRTConnection.class.getPackage().getName());
private final String address;
private final Supervisor supervisor;
private Target target;
- private long lastConnectionAttempt = 0; // Timestamp for last connection attempt
- private long lastSuccess = 0;
- private long lastFailure = 0;
-
- private static final long delayBetweenConnectionMessage = 30000; //ms
-
- private static SimpleDateFormat yyyyMMddz;
- static {
- yyyyMMddz = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss z");
- yyyyMMddz.setTimeZone(TimeZone.getTimeZone("GMT"));
- }
-
+ private Instant lastConnected = Instant.EPOCH.plus(Duration.ofSeconds(1)); // to be healthy initially, see isHealthy()
+ private Instant lastSuccess = Instant.EPOCH;
+ private Instant lastFailure = Instant.EPOCH;
public JRTConnection(String address, Supervisor supervisor) {
this.address = address;
@@ -59,39 +56,40 @@ public class JRTConnection implements Connection {
*/
public synchronized Target getTarget() {
if (target == null || !target.isValid()) {
- if ((System.currentTimeMillis() - lastConnectionAttempt) > delayBetweenConnectionMessage) {
- logger.fine("Connecting to " + address);
- }
- lastConnectionAttempt = System.currentTimeMillis();
+ logger.log(Level.INFO, "Connecting to " + address);
target = supervisor.connect(new Spec(address));
+ lastConnected = Instant.now();
}
return target;
}
@Override
public synchronized void setError(int errorCode) {
- lastFailure = System.currentTimeMillis();
+ lastFailure = Instant.now();
}
@Override
public synchronized void setSuccess() {
- lastSuccess = System.currentTimeMillis();
+ lastSuccess = Instant.now();
+ }
+
+ public synchronized boolean isHealthy() {
+ return lastSuccess.isAfter(lastFailure) || lastConnected.isAfter(lastFailure);
}
public String toString() {
StringBuilder sb = new StringBuilder();
- sb.append("Address: ");
sb.append(address);
- if (lastSuccess > 0) {
- sb.append("\n");
- sb.append("Last success: ");
- sb.append(yyyyMMddz.format(lastSuccess));
+ sb.append(", ").append(isHealthy() ? "healthy" : "unhealthy");
+ if (lastSuccess.isAfter(Instant.EPOCH)) {
+ sb.append(", last success: ");
+ sb.append(lastSuccess);
}
- if (lastFailure > 0) {
- sb.append("\n");
- sb.append("Last failure: ");
- sb.append(yyyyMMddz.format(lastFailure));
+ if (lastFailure.isAfter(Instant.EPOCH)) {
+ sb.append(", last failure: ");
+ sb.append(lastFailure);
}
return sb.toString();
}
+
}
diff --git a/config/src/main/java/com/yahoo/vespa/config/JRTConnectionPool.java b/config/src/main/java/com/yahoo/vespa/config/JRTConnectionPool.java
index 05558d3ee5c..019103c7015 100644
--- a/config/src/main/java/com/yahoo/vespa/config/JRTConnectionPool.java
+++ b/config/src/main/java/com/yahoo/vespa/config/JRTConnectionPool.java
@@ -1,4 +1,4 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.config;
import com.yahoo.config.subscription.ConfigSourceSet;
@@ -10,17 +10,16 @@ import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ThreadLocalRandom;
+import java.util.logging.Level;
import java.util.logging.Logger;
-
-import static java.util.logging.Level.FINE;
+import java.util.stream.Collectors;
/**
* A pool of JRT connections to a config source (either a config server or a config proxy).
- * The current connection is chosen randomly when calling {#link #setNewCurrentConnection}
- * (since the connection is chosen randomly, it might end up using the same connection again,
- * and it will always do so if there is only one source).
+ * The current connection is chosen randomly when calling {#link {@link #switchConnection()}}
+ * (it will continue to use the same connection if there is only one source).
* The current connection is available with {@link #getCurrent()}.
- * When calling {@link #setError(Connection, int)}, {#link #setNewCurrentConnection} will always be called.
+ * When calling {@link #setError(Connection, int)}, {@link #switchConnection()} will always be called.
*
* @author Gunnar Gauslaa Bergem
* @author hmusum
@@ -54,7 +53,7 @@ public class JRTConnectionPool implements ConnectionPool {
connections.put(address, new JRTConnection(address, supervisor));
}
}
- setNewCurrentConnection();
+ currentConnection = initialize();
}
/**
@@ -66,18 +65,30 @@ public class JRTConnectionPool implements ConnectionPool {
return currentConnection;
}
- /**
- * Returns and set the current JRTConnection instance by randomly choosing
- * from the available sources (this means that you might end up using
- * the same connection).
- *
- * @return a JRTConnection
- */
- public synchronized JRTConnection setNewCurrentConnection() {
+ @Override
+ public synchronized JRTConnection switchConnection() {
List<JRTConnection> sources = getSources();
- currentConnection = sources.get(ThreadLocalRandom.current().nextInt(0, sources.size()));
- log.log(FINE, () -> "Choosing new connection: " + currentConnection);
- return currentConnection;
+ if (sources.size() <= 1) return currentConnection;
+
+ List<JRTConnection> sourceCandidates = sources.stream()
+ .filter(JRTConnection::isHealthy)
+ .collect(Collectors.toList());
+ JRTConnection newConnection;
+ if (sourceCandidates.size() == 0) {
+ sourceCandidates = getSources();
+ sourceCandidates.remove(currentConnection);
+ }
+ newConnection = pickNewConnectionRandomly(sourceCandidates);
+ log.log(Level.INFO, () -> "Switching from " + currentConnection + " to " + newConnection);
+ return currentConnection = newConnection;
+ }
+
+ public synchronized JRTConnection initialize() {
+ return pickNewConnectionRandomly(getSources());
+ }
+
+ private JRTConnection pickNewConnectionRandomly(List<JRTConnection> sources) {
+ return sources.get(ThreadLocalRandom.current().nextInt(0, sources.size()));
}
List<JRTConnection> getSources() {
@@ -95,7 +106,7 @@ public class JRTConnectionPool implements ConnectionPool {
@Override
public void setError(Connection connection, int errorCode) {
connection.setError(errorCode);
- setNewCurrentConnection();
+ switchConnection();
}
public JRTConnectionPool updateSources(List<String> addresses) {
diff --git a/config/src/test/java/com/yahoo/vespa/config/JRTConnectionPoolTest.java b/config/src/test/java/com/yahoo/vespa/config/JRTConnectionPoolTest.java
index 6604fe46d3f..08f9d0ad31d 100644
--- a/config/src/test/java/com/yahoo/vespa/config/JRTConnectionPoolTest.java
+++ b/config/src/test/java/com/yahoo/vespa/config/JRTConnectionPoolTest.java
@@ -1,14 +1,24 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.config;
import com.yahoo.config.subscription.ConfigSourceSet;
import org.junit.Test;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.CoreMatchers.not;
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.assertTrue;
/**
* Tests for the JRTConnectionPool class.
@@ -25,11 +35,12 @@ public class JRTConnectionPoolTest {
@Test
public void test_random_selection_of_sourceBasicHashBasedSelection() {
JRTConnectionPool sourcePool = new JRTConnectionPool(sources);
- assertThat(sourcePool.toString(), is("Address: host0\nAddress: host1\nAddress: host2\n"));
+ assertEquals("host0,host1,host2",
+ sourcePool.getSources().stream().map(JRTConnection::getAddress).collect(Collectors.joining(",")));
Map<String, Integer> sourceOccurrences = new HashMap<>();
for (int i = 0; i < 1000; i++) {
- final String address = sourcePool.setNewCurrentConnection().getAddress();
+ final String address = sourcePool.switchConnection().getAddress();
if (sourceOccurrences.containsKey(address)) {
sourceOccurrences.put(address, sourceOccurrences.get(address) + 1);
} else {
@@ -57,7 +68,7 @@ public class JRTConnectionPoolTest {
int count = 1000;
for (int i = 0; i < count; i++) {
- String address = sourcePool.setNewCurrentConnection().getAddress();
+ String address = sourcePool.switchConnection().getAddress();
if (timesUsed.containsKey(address)) {
int times = timesUsed.get(address);
timesUsed.put(address, times + 1);
@@ -66,6 +77,7 @@ public class JRTConnectionPoolTest {
}
}
assertConnectionDistributionIsFair(timesUsed);
+ sourcePool.close();
}
// Tests that the number of times each connection is used is close to equal
@@ -120,5 +132,42 @@ public class JRTConnectionPoolTest {
assertThat(newSourceSet2.getSources().size(), is(1));
assertThat(newSourceSet2, is(not(newSourceSet)));
assertTrue(newSourceSet2.getSources().contains("host4"));
+
+ sourcePool.close();
}
+
+ @Test
+ public void testFailingSources() {
+ List<String> sources = new ArrayList<>();
+
+ sources.add("host0");
+ sources.add("host1");
+ sources.add("host2");
+ JRTConnectionPool sourcePool = new JRTConnectionPool(sources);
+
+ Connection firstConnection = sourcePool.getCurrent();
+
+ // Should change connection away from first connection
+ sourcePool.setError(firstConnection, 123);
+ JRTConnection secondConnection = sourcePool.getCurrent();
+ assertNotEquals(secondConnection, firstConnection);
+
+ // Should change connection away from first AND second connection
+ sourcePool.setError(secondConnection, 123);
+ JRTConnection thirdConnection = sourcePool.getCurrent();
+ assertNotEquals(sourcePool.getCurrent(), firstConnection);
+ assertNotEquals(sourcePool.getCurrent(), secondConnection);
+
+ // Should change connection away from third connection
+ sourcePool.setError(thirdConnection, 123);
+ JRTConnection currentConnection = sourcePool.getCurrent();
+ assertNotEquals(sourcePool.getCurrent(), thirdConnection);
+
+ // Should change connection from current connection
+ sourcePool.setError(thirdConnection, 123);
+ assertNotEquals(sourcePool.getCurrent(), currentConnection);
+
+ sourcePool.close();
+ }
+
}
diff --git a/configdefinitions/src/vespa/attributes.def b/configdefinitions/src/vespa/attributes.def
index 453e7283f2c..46e1674e9b9 100644
--- a/configdefinitions/src/vespa/attributes.def
+++ b/configdefinitions/src/vespa/attributes.def
@@ -5,7 +5,8 @@ attribute[].name string
attribute[].datatype enum { STRING, BOOL, UINT2, UINT4, INT8, INT16, INT32, INT64, FLOAT16, FLOAT, DOUBLE, PREDICATE, TENSOR, REFERENCE, NONE } default=NONE
attribute[].collectiontype enum { SINGLE, ARRAY, WEIGHTEDSET } default=SINGLE
attribute[].dictionary.type enum { BTREE, HASH, BTREE_AND_HASH } default = BTREE
-attribute[].dictionary.match enum { CASE_SENSITIVE, CASE_INSENSITIVE, CASED, UNCASED } default=CASE_INSENSITIVE
+attribute[].dictionary.match enum { CASE_SENSITIVE, CASE_INSENSITIVE, CASED, UNCASED } default=UNCASED
+attribute[].match enum { CASED, UNCASED } default=UNCASED
attribute[].removeifzero bool default=false
attribute[].createifnonexistent bool default=false
attribute[].fastsearch bool default=false
diff --git a/configdefinitions/src/vespa/configserver.def b/configdefinitions/src/vespa/configserver.def
index 4dcc83a1c80..f8f4d6b0d92 100644
--- a/configdefinitions/src/vespa/configserver.def
+++ b/configdefinitions/src/vespa/configserver.def
@@ -28,6 +28,7 @@ numDelayedResponseThreads int default=1
serverId string default="localhost"
hostedVespa bool default=false
numParallelTenantLoaders int default=4
+numRedeploymentThreads int default=4
# Configserver app
applicationDirectory string default="conf/configserver-app"
diff --git a/configserver-client/pom.xml b/configserver-client/pom.xml
index cee6f6c067f..0a29ba003f4 100644
--- a/configserver-client/pom.xml
+++ b/configserver-client/pom.xml
@@ -55,13 +55,7 @@
<dependency>
<groupId>org.junit.jupiter</groupId>
- <artifactId>junit-jupiter-engine</artifactId>
- <scope>test</scope>
- </dependency>
- <dependency>
- <groupId>com.yahoo.vespa</groupId>
- <artifactId>testutil</artifactId>
- <version>${project.version}</version>
+ <artifactId>junit-jupiter</artifactId>
<scope>test</scope>
</dependency>
<dependency>
diff --git a/configserver-client/src/main/java/ai/vespa/hosted/client/AbstractConfigServerClient.java b/configserver-client/src/main/java/ai/vespa/hosted/client/AbstractConfigServerClient.java
index 0ee9e320259..2e5e445d63a 100644
--- a/configserver-client/src/main/java/ai/vespa/hosted/client/AbstractConfigServerClient.java
+++ b/configserver-client/src/main/java/ai/vespa/hosted/client/AbstractConfigServerClient.java
@@ -1,8 +1,6 @@
// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package ai.vespa.hosted.client;
-import com.yahoo.slime.Inspector;
-import com.yahoo.slime.SlimeUtils;
import org.apache.hc.client5.http.classic.methods.ClassicHttpRequests;
import org.apache.hc.client5.http.config.RequestConfig;
import org.apache.hc.client5.http.protocol.HttpClientContext;
@@ -10,14 +8,13 @@ import org.apache.hc.core5.http.ClassicHttpRequest;
import org.apache.hc.core5.http.ClassicHttpResponse;
import org.apache.hc.core5.http.ContentType;
import org.apache.hc.core5.http.HttpEntity;
-import org.apache.hc.core5.http.HttpStatus;
import org.apache.hc.core5.http.Method;
+import org.apache.hc.core5.http.ParseException;
+import org.apache.hc.core5.http.io.entity.EntityUtils;
import org.apache.hc.core5.http.io.entity.HttpEntities;
import org.apache.hc.core5.net.URIBuilder;
-import org.apache.hc.core5.util.Timeout;
import java.io.IOException;
-import java.io.InputStream;
import java.io.UncheckedIOException;
import java.net.URI;
import java.net.URISyntaxException;
@@ -26,11 +23,10 @@ import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.function.BiFunction;
+import java.util.function.Consumer;
import java.util.function.Function;
import java.util.logging.Logger;
-import java.util.stream.Stream;
-import static ai.vespa.hosted.client.ConfigServerClient.ConfigServerException.ErrorCode.INCOMPLETE_RESPONSE;
import static java.util.Objects.requireNonNull;
import static java.util.logging.Level.FINE;
import static java.util.logging.Level.WARNING;
@@ -40,19 +36,15 @@ import static java.util.logging.Level.WARNING;
*/
public abstract class AbstractConfigServerClient implements ConfigServerClient {
- static final RequestConfig defaultRequestConfig = RequestConfig.custom()
- .setConnectionRequestTimeout(Timeout.ofSeconds(5))
- .setConnectTimeout(Timeout.ofSeconds(5))
- .setRedirectsEnabled(false)
- .build();
-
private static final Logger log = Logger.getLogger(AbstractConfigServerClient.class.getName());
/** Executes the request with the given context. The caller must close the response. */
- protected abstract ClassicHttpResponse execute(ClassicHttpRequest request, HttpClientContext context) throws IOException;
+ abstract ClassicHttpResponse execute(ClassicHttpRequest request, HttpClientContext context) throws IOException;
/** Executes the given request with response/error handling and retries. */
- private <T> T execute(RequestBuilder builder, BiFunction<ClassicHttpResponse, IOException, T> handler) {
+ private <T> T execute(RequestBuilder builder,
+ BiFunction<ClassicHttpResponse, ClassicHttpRequest, T> handler,
+ Consumer<IOException> catcher) {
HttpClientContext context = HttpClientContext.create();
context.setRequestConfig(builder.config);
@@ -62,10 +54,11 @@ public abstract class AbstractConfigServerClient implements ConfigServerClient {
request.setEntity(builder.entity);
try {
try {
- return handler.apply(execute(request, context), null);
+ return handler.apply(execute(request, context), request);
}
catch (IOException e) {
- return handler.apply(null, e);
+ catcher.accept(e);
+ throw new UncheckedIOException(e); // Throw unchecked if catcher doesn't throw.
}
}
catch (RetryException e) {
@@ -90,7 +83,7 @@ public abstract class AbstractConfigServerClient implements ConfigServerClient {
throw new IllegalStateException("Illegal retry cause: " + thrown.getClass(), thrown);
}
- throw new IllegalArgumentException("No hosts to perform the request against");
+ throw new IllegalStateException("No hosts to perform the request against");
}
/** Append path to the given host, which may already contain a root path. */
@@ -102,8 +95,8 @@ public abstract class AbstractConfigServerClient implements ConfigServerClient {
pathSegments.addAll(pathAndQuery.getPathSegments());
try {
return builder.setPathSegments(pathSegments)
- .setParameters(pathAndQuery.getQueryParams())
- .build();
+ .setParameters(pathAndQuery.getQueryParams())
+ .build();
}
catch (URISyntaxException e) {
throw new IllegalArgumentException("URISyntaxException should not be possible here", e);
@@ -111,7 +104,7 @@ public abstract class AbstractConfigServerClient implements ConfigServerClient {
}
@Override
- public RequestBuilder send(HostStrategy hosts, Method method) {
+ public ConfigServerClient.RequestBuilder send(HostStrategy hosts, Method method) {
return new RequestBuilder(hosts, method);
}
@@ -121,8 +114,11 @@ public abstract class AbstractConfigServerClient implements ConfigServerClient {
private final Method method;
private final HostStrategy hosts;
private final URIBuilder uriBuilder = new URIBuilder();
+ private final List<String> pathSegments = new ArrayList<>();
private HttpEntity entity;
- private RequestConfig config = defaultRequestConfig;
+ private RequestConfig config = ConfigServerClient.defaultRequestConfig;
+ private ResponseVerifier verifier = ConfigServerClient.throwOnError;
+ private Consumer<IOException> catcher = ConfigServerClient.retryAll;
private RequestBuilder(HostStrategy hosts, Method method) {
if ( ! hosts.iterator().hasNext())
@@ -133,8 +129,8 @@ public abstract class AbstractConfigServerClient implements ConfigServerClient {
}
@Override
- public RequestBuilder at(String... pathSegments) {
- uriBuilder.setPathSegments(requireNonNull(pathSegments));
+ public RequestBuilder at(List<String> pathSegments) {
+ this.pathSegments.addAll(pathSegments);
return this;
}
@@ -150,19 +146,30 @@ public abstract class AbstractConfigServerClient implements ConfigServerClient {
}
@Override
- public RequestBuilder parameters(String... pairs) {
- if (pairs.length % 2 != 0)
+ public ConfigServerClient.RequestBuilder emptyParameters(List<String> keys) {
+ for (String key : keys)
+ uriBuilder.setParameter(key, null);
+
+ return this;
+ }
+
+ @Override
+ public RequestBuilder parameters(List<String> pairs) {
+ if (pairs.size() % 2 != 0)
throw new IllegalArgumentException("Must supply parameter key/values in pairs");
- for (int i = 0; i < pairs.length; )
- uriBuilder.setParameter(pairs[i++], pairs[i++]);
+ for (int i = 0; i < pairs.size(); ) {
+ String key = pairs.get(i++), value = pairs.get(i++);
+ if (value != null)
+ uriBuilder.setParameter(key, value);
+ }
return this;
}
@Override
public RequestBuilder timeout(Duration timeout) {
- return config(RequestConfig.copy(defaultRequestConfig)
+ return config(RequestConfig.copy(config)
.setResponseTimeout(timeout.toMillis(), TimeUnit.MILLISECONDS)
.build());
}
@@ -170,94 +177,90 @@ public abstract class AbstractConfigServerClient implements ConfigServerClient {
@Override
public RequestBuilder config(RequestConfig config) {
this.config = requireNonNull(config);
+ return this;
+ }
+ @Override
+ public RequestBuilder catching(Consumer<IOException> catcher) {
+ this.catcher = requireNonNull(catcher);
return this;
}
@Override
- public <T> T handle(BiFunction<ClassicHttpResponse, IOException, T> handler) throws UncheckedIOException {
- return execute(this, requireNonNull(handler));
+ public RequestBuilder throwing(ResponseVerifier verifier) {
+ this.verifier = requireNonNull(verifier);
+ return this;
}
@Override
- public <T> T read(Function<byte[], T> mapper) throws UncheckedIOException, ConfigServerException {
- return mapIfSuccess(input -> {
- try (input) {
- return mapper.apply(input.readAllBytes());
+ public String read() {
+ return handle((response, __) -> {
+ try (response) {
+ return response.getEntity() == null ? "" : EntityUtils.toString(response.getEntity());
}
- catch (IOException e) {
- throw new RetryException(e);
+ catch (ParseException e) {
+ throw new IllegalStateException(e); // This isn't actually thrown by apache >_<
}
});
}
@Override
- public void discard() throws UncheckedIOException, ConfigServerException {
- mapIfSuccess(input -> {
- try (input) {
- return null;
+ public <T> T read(Function<byte[], T> mapper) {
+ return handle((response, __) -> {
+ try (response) {
+ return mapper.apply(response.getEntity() == null ? new byte[0] : EntityUtils.toByteArray(response.getEntity()));
}
- catch (IOException e) {
- throw new RetryException(e);
+ });
+ }
+
+ @Override
+ public void discard() throws UncheckedIOException, ResponseException {
+ handle((response, __) -> {
+ try (response) {
+ return null;
}
});
}
@Override
- public InputStream stream() throws UncheckedIOException, ConfigServerException {
- return mapIfSuccess(input -> input);
+ public HttpInputStream stream() throws UncheckedIOException, ResponseException {
+ return handle((response, __) -> new HttpInputStream(response));
}
- /** Returns the mapped body, if successful, retrying any IOException. The caller must close the body stream. */
- private <T> T mapIfSuccess(Function<InputStream, T> mapper) {
- return handle((response, ioException) -> {
- if (response != null) {
- try {
- InputStream body = response.getEntity() != null ? response.getEntity().getContent()
- : InputStream.nullInputStream();
- if (response.getCode() >= HttpStatus.SC_REDIRECTION)
- throw readException(body.readAllBytes());
-
- return mapper.apply(new ForwardingInputStream(body) {
- @Override
- public void close() throws IOException {
- super.close();
- response.close();
- }
- });
- }
- catch (IOException | RuntimeException | Error e) {
- try {
- response.close();
- }
- catch (IOException f) {
- e.addSuppressed(f);
- }
- if (e instanceof IOException)
- ioException = (IOException) e;
- else
- sneakyThrow(e);
- }
- }
- throw new RetryException(ioException);
- });
+ @Override
+ public <T> T handle(ResponseHandler<T> handler) {
+ uriBuilder.setPathSegments(pathSegments);
+ return execute(this,
+ (response, request) -> {
+ try {
+ verifier.verify(response, request); // This throws on unacceptable responses.
+ return handler.handle(response, request);
+ }
+ catch (IOException | RuntimeException | Error e) {
+ try {
+ response.close();
+ }
+ catch (IOException f) {
+ e.addSuppressed(f);
+ }
+ if (e instanceof IOException) {
+ catcher.accept((IOException) e);
+ throw new UncheckedIOException((IOException) e);
+ }
+ else
+ sneakyThrow(e); // e is a runtime exception or an error, so this is fine.
+ throw new AssertionError("Should not happen");
+ }
+ },
+ catcher);
}
}
+
@SuppressWarnings("unchecked")
private static <T extends Throwable> void sneakyThrow(Throwable t) throws T {
throw (T) t;
}
- private static ConfigServerException readException(byte[] serialised) {
- Inspector root = SlimeUtils.jsonToSlime(serialised).get();
- String codeName = root.field("error-code").asString();
- ConfigServerException.ErrorCode code = Stream.of(ConfigServerException.ErrorCode.values())
- .filter(value -> value.name().equals(codeName))
- .findAny().orElse(INCOMPLETE_RESPONSE);
- String message = root.field("message").valid() ? root.field("message").asString() : "(no message)";
- return new ConfigServerException(code, message, "");
- }
-
} \ No newline at end of file
diff --git a/configserver-client/src/main/java/ai/vespa/hosted/client/ConfigServerClient.java b/configserver-client/src/main/java/ai/vespa/hosted/client/ConfigServerClient.java
index 234dbe9ee06..c92acd7cd0b 100644
--- a/configserver-client/src/main/java/ai/vespa/hosted/client/ConfigServerClient.java
+++ b/configserver-client/src/main/java/ai/vespa/hosted/client/ConfigServerClient.java
@@ -2,29 +2,60 @@
package ai.vespa.hosted.client;
import org.apache.hc.client5.http.config.RequestConfig;
+import org.apache.hc.core5.http.ClassicHttpRequest;
import org.apache.hc.core5.http.ClassicHttpResponse;
import org.apache.hc.core5.http.HttpEntity;
+import org.apache.hc.core5.http.HttpStatus;
import org.apache.hc.core5.http.Method;
+import org.apache.hc.core5.http.io.entity.EntityUtils;
+import org.apache.hc.core5.util.Timeout;
+import java.io.Closeable;
import java.io.IOException;
import java.io.InputStream;
import java.io.UncheckedIOException;
import java.net.URI;
import java.time.Duration;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.Collections;
import java.util.List;
-import java.util.function.BiFunction;
+import java.util.function.Consumer;
import java.util.function.Function;
import java.util.stream.IntStream;
+import static java.nio.charset.StandardCharsets.UTF_8;
import static java.util.Objects.requireNonNull;
import static java.util.stream.Collectors.toUnmodifiableList;
/**
* @author jonmv
*/
-public interface ConfigServerClient extends AutoCloseable {
+public interface ConfigServerClient extends Closeable {
+
+ RequestConfig defaultRequestConfig = RequestConfig.custom()
+ .setConnectionRequestTimeout(Timeout.ofSeconds(5))
+ .setConnectTimeout(Timeout.ofSeconds(5))
+ .setRedirectsEnabled(false)
+ .build();
+
+ /** Wraps with a {@link RetryException} and rethrows. */
+ Consumer<IOException> retryAll = (e) -> {
+ throw new RetryException(e);
+ };
+
+ /** Throws a a {@link RetryException} if {@code statusCode == 503}, or a {@link ResponseException} unless {@code 200 <= statusCode < 300}. */
+ ResponseVerifier throwOnError = new DefaultResponseVerifier() { };
+
+ /** Reads the response body, throwing an {@link UncheckedIOException} if this fails, or {@code null} if there is none. */
+ static byte[] getBytes(ClassicHttpResponse response) {
+ try {
+ return response.getEntity() == null ? null : EntityUtils.toByteArray(response.getEntity());
+ }
+ catch (IOException e) {
+ throw new UncheckedIOException(e);
+ }
+ }
/** Creates a builder for sending the given method, using the specified host strategy. */
RequestBuilder send(HostStrategy hosts, Method method);
@@ -32,8 +63,11 @@ public interface ConfigServerClient extends AutoCloseable {
/** Builder for a request against a given set of hosts, using this config server client. */
interface RequestBuilder {
- /** Sets the request path. */
- RequestBuilder at(String... pathSegments);
+ /** Appends to the request path. */
+ default RequestBuilder at(String... pathSegments) { return at(List.of(pathSegments)); }
+
+ /** Appends to the request path. */
+ RequestBuilder at(List<String> pathSegments);
/** Sets the request body as UTF-8 application/json. */
RequestBuilder body(byte[] json);
@@ -41,50 +75,134 @@ public interface ConfigServerClient extends AutoCloseable {
/** Sets the request body. */
RequestBuilder body(HttpEntity entity);
- /** Sets the parameter key/values for the request. Number of arguments must be even. */
- RequestBuilder parameters(String... pairs);
+ /** Sets query parameters without a value, like {@code ?debug&recursive}. */
+ default RequestBuilder emptyParameters(String... keys) {
+ return emptyParameters(Arrays.asList(keys));
+ }
+
+ /** Sets query parameters without a value, like {@code ?debug&recursive}. */
+ RequestBuilder emptyParameters(List<String> keys);
+
+ /** Sets the parameter key/values for the request. Number of arguments must be even. Null values are omitted. */
+ default RequestBuilder parameters(String... pairs) {
+ return parameters(Arrays.asList(pairs));
+ }
+
+ /** Sets the parameter key/values for the request. Number of arguments must be even. Null values are omitted. */
+ RequestBuilder parameters(List<String> pairs);
/** Overrides the default socket read timeout of the request. {@code Duration.ZERO} gives infinite timeout. */
RequestBuilder timeout(Duration timeout);
- /** Overrides the default socket read timeout of the request. {@code null} allows infinite timeout. */
+ /** Overrides the default request config of the request. */
RequestBuilder config(RequestConfig config);
/**
- * Sets custom retry/failure logic for this.
- * <p>
- * Exactly one of the arguments (response, exception) are non-null.
- * Return a value to have that returned to the caller;
- * throw a {@link RetryException} to have the request retried; or
- * throw any other unchecked exception to have this propagate out to the caller.
- * The caller must close the provided response, if any.
+ * Sets the catch clause for {@link IOException}s during execution of this.
+ * The default is to wrap the IOException in a {@link RetryException} and rethrow this;
+ * this makes the client retry the request, as long as there are remaining entries in the {@link HostStrategy}.
+ * If the catcher returns normally, the {@link IOException} is unchecked and thrown instead.
*/
- <T> T handle(BiFunction<ClassicHttpResponse, IOException, T> handler) throws UncheckedIOException;
+ RequestBuilder catching(Consumer<IOException> catcher);
- /** Sets the response body mapper for this, for successful requests. */
- <T> T read(Function<byte[], T> mapper) throws UncheckedIOException, ConfigServerException;
+ /**
+ * Sets the (error) response handler for this request. The default is {@link #throwOnError}.
+ * When the handler returns normally, the response is treated as a success, and passed on to a response mapper.
+ */
+ RequestBuilder throwing(ResponseVerifier handler);
+
+ /** Reads the response as a {@link String}, or throws if unsuccessful. */
+ String read();
+
+ /** Reads and maps the response, or throws if unsuccessful. */
+ <T> T read(Function<byte[], T> mapper);
- /** Discards the response, but throws if the response is unsuccessful. */
- void discard() throws UncheckedIOException, ConfigServerException;
+ /** Discards the response, but throws if unsuccessful. */
+ void discard();
- /** Returns the raw input stream of the response, if successful. The caller must close the returned stream. */
- InputStream stream() throws UncheckedIOException, ConfigServerException;
+ /** Returns the raw response input stream, or throws if unsuccessful. The caller must close the returned stream. */
+ HttpInputStream stream();
+
+ /** Uses the response and request, if successful, to generate a mapped response. */
+ <T> T handle(ResponseHandler<T> handler);
}
- /** Exception wrapper that signals retries should be attempted. */
- final class RetryException extends RuntimeException {
- public RetryException(IOException cause) {
- super(requireNonNull(cause));
+ class HttpInputStream extends ForwardingInputStream {
+
+ private final ClassicHttpResponse response;
+
+ protected HttpInputStream(ClassicHttpResponse response) throws IOException {
+ super(response.getEntity() != null ? response.getEntity().getContent()
+ : InputStream.nullInputStream());
+ this.response = response;
}
- public RetryException(RuntimeException cause) {
- super(requireNonNull(cause));
+ public int statusCode() { return response.getCode(); }
+
+ public String contentType() { return response.getEntity().getContentType(); }
+
+ @Override
+ public void close() throws IOException {
+ super.close();
+ response.close();
}
}
+
+ /** Reads a successful response and request to compute a result. */
+ @FunctionalInterface
+ interface ResponseHandler<T> {
+
+ /** Called with successful responses, as per {@link ResponseVerifier}. The caller must close the response. */
+ T handle(ClassicHttpResponse response, ClassicHttpRequest request) throws IOException;
+
+ }
+
+
+ /** Verifies a response, throwing on error responses, possibly indicating retries. */
+ @FunctionalInterface
+ interface ResponseVerifier {
+
+ /** Whether this status code means the response is an error response. */
+ default boolean isError(int statusCode) {
+ return statusCode < HttpStatus.SC_OK || HttpStatus.SC_REDIRECTION <= statusCode;
+ }
+
+ /** Whether this status code means we should retry. Has no effect if this is not also an error. */
+ default boolean shouldRetry(int statusCode) {
+ return statusCode == HttpStatus.SC_SERVICE_UNAVAILABLE;
+ }
+
+ /** Verifies the given response, consuming it and throwing if it is an error; or leaving it otherwise. */
+ default void verify(ClassicHttpResponse response, ClassicHttpRequest request) throws IOException {
+ if (isError(response.getCode())) {
+ try (response) {
+ byte[] body = response.getEntity() == null ? new byte[0] : EntityUtils.toByteArray(response.getEntity());
+ RuntimeException exception = toException(response.getCode(), body, request);
+ throw shouldRetry(response.getCode()) ? new RetryException(exception) : exception;
+ }
+ }
+ }
+
+ /** Throws the appropriate exception, for the given status code and body. */
+ RuntimeException toException(int statusCode, byte[] body, ClassicHttpRequest request);
+
+ }
+
+
+ interface DefaultResponseVerifier extends ResponseVerifier {
+
+ @Override
+ default RuntimeException toException(int statusCode, byte[] body, ClassicHttpRequest request) {
+ return new ResponseException(request + " failed with status " + statusCode + " and body '" + new String(body, UTF_8) + "'");
+ }
+
+ }
+
+
/** What host(s) to try for a request, in what order. A host may be specified multiple times, for retries. */
@FunctionalInterface
interface HostStrategy extends Iterable<URI> {
@@ -110,37 +228,26 @@ public interface ConfigServerClient extends AutoCloseable {
}
- /** An exception due to server error, a bad request, or similar. */
- class ConfigServerException extends RuntimeException {
-
- private final ErrorCode errorId;
- private final String message;
-
- public ConfigServerException(ErrorCode errorId, String message, String context) {
- super(context + ": " + message);
- this.errorId = errorId;
- this.message = message;
- }
-
- public ErrorCode errorId() { return errorId; }
-
- public String message() { return message; }
-
- public enum ErrorCode {
- APPLICATION_LOCK_FAILURE,
- BAD_REQUEST,
- ACTIVATION_CONFLICT,
- INTERNAL_SERVER_ERROR,
- INVALID_APPLICATION_PACKAGE,
- METHOD_NOT_ALLOWED,
- NOT_FOUND,
- OUT_OF_CAPACITY,
- REQUEST_TIMEOUT,
- UNKNOWN_VESPA_VERSION,
- PARENT_HOST_NOT_READY,
- CERTIFICATE_NOT_READY,
- LOAD_BALANCER_NOT_READY,
- INCOMPLETE_RESPONSE
+
+ /** Exception wrapper that signals retries should be attempted. */
+ final class RetryException extends RuntimeException {
+
+ public RetryException(IOException cause) {
+ super(requireNonNull(cause));
+ }
+
+ public RetryException(RuntimeException cause) {
+ super(requireNonNull(cause));
+ }
+
+ }
+
+
+ /** An exception due to server error, a bad request, or similar, which resulted in a non-OK HTTP response. */
+ class ResponseException extends RuntimeException {
+
+ public ResponseException(String message) {
+ super(message);
}
}
diff --git a/configserver-client/src/main/java/ai/vespa/hosted/client/HttpConfigServerClient.java b/configserver-client/src/main/java/ai/vespa/hosted/client/HttpConfigServerClient.java
index c5b07eceaf5..3835bb31e0b 100644
--- a/configserver-client/src/main/java/ai/vespa/hosted/client/HttpConfigServerClient.java
+++ b/configserver-client/src/main/java/ai/vespa/hosted/client/HttpConfigServerClient.java
@@ -13,6 +13,8 @@ import org.apache.hc.core5.http.io.SocketConfig;
import org.apache.hc.core5.util.TimeValue;
import org.apache.hc.core5.util.Timeout;
+import javax.net.ssl.HostnameVerifier;
+import javax.net.ssl.SSLSession;
import java.io.IOException;
import java.util.Collection;
import java.util.Set;
@@ -44,17 +46,20 @@ public class HttpConfigServerClient extends AbstractConfigServerClient {
private static CloseableHttpClient createClient(Collection<AthenzIdentity> serverIdentities, String userAgent) {
return VespaHttpClientBuilder.create(socketFactories -> {
var manager = new PoolingHttpClientConnectionManager(socketFactories);
- manager.setMaxTotal(256);
- manager.setDefaultMaxPerRoute(8);
+ manager.setMaxTotal(1024);
+ manager.setDefaultMaxPerRoute(128);
manager.setDefaultSocketConfig(SocketConfig.custom().setSoTimeout(Timeout.ofSeconds(5)).build());
manager.setValidateAfterInactivity(TimeValue.ofSeconds(10));
return manager;
},
- new AthenzIdentityVerifier(Set.copyOf(serverIdentities)),
+ new AthenzIdentityVerifier(Set.copyOf(serverIdentities)) {
+ @Override public boolean verify(String hostname, SSLSession session) {
+ return super.verify(hostname, session) || "localhost".equals(hostname);
+ }
+ },
false)
.disableAutomaticRetries()
.setUserAgent(userAgent)
- .setDefaultRequestConfig(defaultRequestConfig)
.build();
}
diff --git a/configserver-client/src/test/java/ai/vespa/hosted/client/HttpConfigServerClientTest.java b/configserver-client/src/test/java/ai/vespa/hosted/client/HttpConfigServerClientTest.java
index cbf38f46f6f..b2f17f43f5c 100644
--- a/configserver-client/src/test/java/ai/vespa/hosted/client/HttpConfigServerClientTest.java
+++ b/configserver-client/src/test/java/ai/vespa/hosted/client/HttpConfigServerClientTest.java
@@ -1,7 +1,7 @@
// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package ai.vespa.hosted.client;
-import ai.vespa.hosted.client.ConfigServerClient.ConfigServerException;
+import ai.vespa.hosted.client.ConfigServerClient.ResponseException;
import ai.vespa.hosted.client.ConfigServerClient.HostStrategy;
import com.github.tomakehurst.wiremock.http.Fault;
import com.yahoo.vespa.athenz.api.AthenzService;
@@ -35,7 +35,7 @@ class HttpConfigServerClientTest {
@RegisterExtension
final WireMockExtension server = new WireMockExtension();
- final HttpConfigServerClient client = new HttpConfigServerClient(List.of(new AthenzService("mydomain", "yourservice")), "user");
+ final ConfigServerClient client = new HttpConfigServerClient(List.of(new AthenzService("mydomain", "yourservice")), "user");
@Test
void testRetries() {
@@ -70,29 +70,28 @@ class HttpConfigServerClientTest {
server.resetRequests();
// Successful attempt returns.
- server.stubFor(get("/root/boot"))
+ server.stubFor(get("/root/boot/toot"))
.setResponse(okJson("{}").build());
assertEquals("{}",
client.send(HostStrategy.repeating(URI.create("http://localhost:" + server.port()), 10),
Method.GET)
.at("root", "boot")
+ .at("toot")
.read(String::new));
- server.verify(1, getRequestedFor(urlEqualTo("/root/boot")));
+ server.verify(1, getRequestedFor(urlEqualTo("/root/boot/toot")));
server.verify(1, anyRequestedFor(anyUrl()));
server.resetRequests();
- // ConfigServerException is not retried.
+ // ResponseException is not retried.
server.stubFor(get("/"))
- .setResponse(aResponse().withStatus(409).withBody("{\"error-code\":\"ACTIVATION_CONFLICT\",\"message\":\"hi\"}").build());
- ConfigServerException thrown = assertThrows(ConfigServerException.class,
- () -> client.send(HostStrategy.repeating(URI.create("http://localhost:" + server.port()), 10),
- Method.GET)
- .read(String::new));
- assertEquals(ConfigServerException.ErrorCode.ACTIVATION_CONFLICT, thrown.errorId());
- assertEquals("hi", thrown.message());
+ .setResponse(aResponse().withStatus(409).withBody("hi").build());
+ ResponseException thrown = assertThrows(ResponseException.class,
+ () -> client.send(HostStrategy.repeating(URI.create("http://localhost:" + server.port()), 10),
+ Method.GET)
+ .read(String::new));
+ assertEquals("GET / failed with status 409 and body 'hi'", thrown.getMessage());
server.verify(1, getRequestedFor(urlEqualTo("/")));
server.verify(1, anyRequestedFor(anyUrl()));
-
}
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java b/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java
index cc9d88f3fc9..536d8894526 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java
@@ -43,7 +43,6 @@ import com.yahoo.vespa.config.server.application.ConfigConvergenceChecker;
import com.yahoo.vespa.config.server.application.DefaultClusterReindexingStatusClient;
import com.yahoo.vespa.config.server.application.FileDistributionStatus;
import com.yahoo.vespa.config.server.application.HttpProxy;
-import com.yahoo.vespa.config.server.http.SecretStoreValidator;
import com.yahoo.vespa.config.server.application.TenantApplications;
import com.yahoo.vespa.config.server.configchange.ConfigChangeActions;
import com.yahoo.vespa.config.server.configchange.RefeedActions;
@@ -54,6 +53,7 @@ import com.yahoo.vespa.config.server.deploy.Deployment;
import com.yahoo.vespa.config.server.deploy.InfraDeployerProvider;
import com.yahoo.vespa.config.server.http.InternalServerException;
import com.yahoo.vespa.config.server.http.LogRetriever;
+import com.yahoo.vespa.config.server.http.SecretStoreValidator;
import com.yahoo.vespa.config.server.http.SimpleHttpFetcher;
import com.yahoo.vespa.config.server.http.TesterClient;
import com.yahoo.vespa.config.server.http.v2.DeploymentMetricsResponse;
@@ -78,9 +78,7 @@ import com.yahoo.vespa.curator.Curator;
import com.yahoo.vespa.curator.stats.LockStats;
import com.yahoo.vespa.curator.stats.ThreadLockStats;
import com.yahoo.vespa.defaults.Defaults;
-import com.yahoo.vespa.flags.BooleanFlag;
import com.yahoo.vespa.flags.FlagSource;
-import com.yahoo.vespa.flags.Flags;
import com.yahoo.vespa.flags.InMemoryFlagSource;
import com.yahoo.vespa.orchestrator.Orchestrator;
@@ -146,7 +144,6 @@ public class ApplicationRepository implements com.yahoo.config.provision.Deploye
private final Metric metric;
private final SecretStoreValidator secretStoreValidator;
private final ClusterReindexingStatusClient clusterReindexingStatusClient;
- private final BooleanFlag waitForAllConfigServersWhenDeletingApplication;
@Inject
public ApplicationRepository(TenantRepository tenantRepository,
@@ -203,7 +200,6 @@ public class ApplicationRepository implements com.yahoo.config.provision.Deploye
this.metric = Objects.requireNonNull(metric);
this.secretStoreValidator = Objects.requireNonNull(secretStoreValidator);
this.clusterReindexingStatusClient = clusterReindexingStatusClient;
- this.waitForAllConfigServersWhenDeletingApplication = Flags.WAIT_FOR_ALL_CONFIG_SERVERS_WHEN_DELETING_APPLICATION.bindTo(flagSource);
}
public static class Builder {
@@ -524,9 +520,7 @@ public class ApplicationRepository implements com.yahoo.config.provision.Deploye
}
Curator curator = tenantRepository.getCurator();
- Optional<CompletionWaiter> waiter = Optional.empty();
- if (waitForAllConfigServersWhenDeletingApplication.value())
- waiter = Optional.of(tenantApplications.createRemoveApplicationWaiter(applicationId));
+ CompletionWaiter waiter = tenantApplications.createRemoveApplicationWaiter(applicationId);
transaction.add(new ContainerEndpointsCache(tenant.getPath(), curator).delete(applicationId)); // TODO: Not unit tested
// Delete any application roles
transaction.add(new ApplicationRolesStore(curator, tenant.getPath()).delete(applicationId));
@@ -545,7 +539,7 @@ public class ApplicationRepository implements com.yahoo.config.provision.Deploye
}
// Wait for app being removed on other servers
- waiter.ifPresent(w -> w.awaitCompletion(Duration.ofSeconds(30)));
+ waiter.awaitCompletion(Duration.ofSeconds(30));
return true;
} finally {
@@ -656,10 +650,12 @@ public class ApplicationRepository implements com.yahoo.config.provision.Deploye
try {
Tenant tenant = getTenant(applicationId);
if (tenant == null) throw new NotFoundException("Tenant '" + applicationId.tenant() + "' not found");
- long sessionId = getSessionIdForApplication(tenant, applicationId);
- RemoteSession session = getRemoteSession(tenant, sessionId);
- SessionRepository sessionRepository = tenant.getSessionRepository();
- return sessionRepository.ensureApplicationLoaded(session).getForVersionOrLatest(version, clock.instant());
+
+ Optional<ApplicationSet> activeApplicationSet = tenant.getSessionRepository().getActiveApplicationSet(applicationId);
+ if (activeApplicationSet.isPresent())
+ return activeApplicationSet.get().getForVersionOrLatest(version, clock.instant());
+ else
+ throw new NotFoundException("Unknown application id '" + applicationId + "'");
} catch (NotFoundException e) {
log.log(Level.WARNING, "Failed getting application for '" + applicationId + "': " + e.getMessage());
throw e;
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/ConfigServerBootstrap.java b/configserver/src/main/java/com/yahoo/vespa/config/server/ConfigServerBootstrap.java
index 24926f51b15..8a7738bc672 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/ConfigServerBootstrap.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/ConfigServerBootstrap.java
@@ -1,4 +1,4 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.config.server;
import com.google.inject.Inject;
@@ -228,14 +228,17 @@ public class ConfigServerBootstrap extends AbstractComponent implements Runnable
// Returns the set of applications that failed to redeploy
private List<ApplicationId> redeployApplications(List<ApplicationId> applicationIds) throws InterruptedException {
- ExecutorService executor = Executors.newFixedThreadPool(configserverConfig.numParallelTenantLoaders(),
- new DaemonThreadFactory("redeploy apps"));
+ ExecutorService executor = Executors.newFixedThreadPool(configserverConfig.numRedeploymentThreads(),
+ new DaemonThreadFactory("redeploy-apps-"));
// Keep track of deployment per application
Map<ApplicationId, Future<?>> deployments = new HashMap<>();
log.log(Level.INFO, () -> "Redeploying " + applicationIds);
- applicationIds.forEach(appId -> deployments.put(appId,
- executor.submit(() -> applicationRepository.deployFromLocalActive(appId, true /* bootstrap */)
- .ifPresent(Deployment::activate))));
+ applicationIds.forEach(appId -> deployments.put(appId, executor.submit(() -> {
+ log.log(Level.INFO, () -> "Starting redeployment of " + appId);
+ applicationRepository.deployFromLocalActive(appId, true /* bootstrap */)
+ .ifPresent(Deployment::activate);
+ log.log(Level.INFO, () -> appId + " redeployed");
+ })));
List<ApplicationId> failedDeployments =
deployments.entrySet().stream()
@@ -253,7 +256,6 @@ public class ConfigServerBootstrap extends AbstractComponent implements Runnable
private Optional<ApplicationId> checkDeployment(ApplicationId applicationId, Future<?> future) {
try {
future.get();
- log.log(Level.INFO, () -> applicationId + " redeployed");
} catch (ExecutionException | InterruptedException e) {
if (e.getCause() instanceof TransientException) {
log.log(Level.INFO, "Redeploying " + applicationId +
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/application/ConfigConvergenceChecker.java b/configserver/src/main/java/com/yahoo/vespa/config/server/application/ConfigConvergenceChecker.java
index 523334dda7f..2dd655c91d8 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/application/ConfigConvergenceChecker.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/application/ConfigConvergenceChecker.java
@@ -20,8 +20,10 @@ import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient;
import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManagerBuilder;
import org.apache.hc.core5.concurrent.FutureCallback;
import org.apache.hc.core5.http.HttpStatus;
+import org.apache.hc.core5.http2.HttpVersionPolicy;
import org.apache.hc.core5.net.URIBuilder;
import org.apache.hc.core5.reactor.IOReactorConfig;
+import org.apache.hc.core5.util.TimeValue;
import org.apache.hc.core5.util.Timeout;
import java.io.IOException;
@@ -151,7 +153,6 @@ public class ConfigConvergenceChecker extends AbstractComponent {
/** Get service generation of service at given URL */
private CompletableFuture<Long> getServiceGeneration(CloseableHttpAsyncClient client, URI serviceUrl, Duration timeout) {
SimpleHttpRequest request = SimpleHttpRequests.get(createApiUri(serviceUrl));
- request.setHeader("Connection", "close");
request.setConfig(createRequestConfig(timeout));
// Ignoring returned Future object as we want to use the more flexible CompletableFuture instead
@@ -239,13 +240,13 @@ public class ConfigConvergenceChecker extends AbstractComponent {
PoolingAsyncClientConnectionManagerBuilder.create()
.setMaxConnTotal(100)
.setMaxConnPerRoute(10)
+ .setConnectionTimeToLive(TimeValue.ofMilliseconds(1))
.setTlsStrategy(tlsStrategy)
.build())
.setIOReactorConfig(IOReactorConfig.custom()
.setSoTimeout(Timeout.ofSeconds(2))
.build())
.setUserAgent("config-convergence-checker")
- .setConnectionReuseStrategy((request, response, context) -> false) // Disable connection reuse
.build();
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/DeployHandlerLogger.java b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/DeployHandlerLogger.java
index effa8cc510c..9900557f000 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/DeployHandlerLogger.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/DeployHandlerLogger.java
@@ -37,16 +37,31 @@ public class DeployHandlerLogger implements DeployLogger {
@Override
public void log(Level level, String message) {
- if ((level == Level.FINE || level == LogLevel.DEBUG || level == LogLevel.SPAM) && !verbose)
+ if (level.intValue() <= LogLevel.DEBUG.intValue() && !verbose)
return;
- String fullMsg = prefix + message;
+ logJson(level, message);
+ // Also tee to a normal log, Vespa log for example, but use level fine
+ log.log(Level.FINE, () -> prefix + message);
+ }
+
+ @Override
+ public void logApplicationPackage(Level level, String message) {
+ if (level.intValue() <= LogLevel.DEBUG.intValue() && !verbose)
+ return;
+
+ Cursor entry = logJson(level, message);
+ entry.setBool("applicationPackage", true);
+ // Also tee to a normal log, Vespa log for example, but use level fine
+ log.log(Level.FINE, () -> prefix + message);
+ }
+
+ private Cursor logJson(Level level, String message) {
Cursor entry = logroot.addObject();
entry.setLong("time", System.currentTimeMillis());
entry.setString("level", level.getName());
- entry.setString("message", fullMsg);
- // Also tee to a normal log, Vespa log for example, but use level fine
- log.log(Level.FINE, fullMsg);
+ entry.setString("message", message);
+ return entry;
}
public Slime slime() {
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java
index b4dd81ad7aa..8c2be6a5b07 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java
@@ -28,6 +28,7 @@ import com.yahoo.config.provision.TenantName;
import com.yahoo.config.provision.Zone;
import com.yahoo.container.jdisc.secretstore.SecretStore;
import com.yahoo.vespa.config.server.tenant.SecretStoreExternalIdRetriever;
+import com.yahoo.vespa.flags.BooleanFlag;
import com.yahoo.vespa.flags.FetchVector;
import com.yahoo.vespa.flags.FlagSource;
import com.yahoo.vespa.flags.Flags;
@@ -178,6 +179,9 @@ public class ModelContextImpl implements ModelContext {
private final boolean tenantIamRole;
private final int maxActivationInhibitedOutOfSyncGroups;
private final ToIntFunction<ClusterSpec.Type> jvmOmitStackTraceInFastThrow;
+ private final boolean enableJdiscHttp2;
+ private final boolean enableCustomAclMapping;
+ private final int numDistributorStripes;
public FeatureFlags(FlagSource source, ApplicationId appId) {
this.dedicatedClusterControllerFlavor = parseDedicatedClusterControllerFlavor(flagValue(source, appId, Flags.DEDICATED_CLUSTER_CONTROLLER_FLAVOR));
@@ -202,6 +206,9 @@ public class ModelContextImpl implements ModelContext {
this.tenantIamRole = flagValue(source, appId.tenant(), Flags.TENANT_IAM_ROLE);
this.maxActivationInhibitedOutOfSyncGroups = flagValue(source, appId, Flags.MAX_ACTIVATION_INHIBITED_OUT_OF_SYNC_GROUPS);
this.jvmOmitStackTraceInFastThrow = type -> flagValueAsInt(source, appId, type, PermanentFlags.JVM_OMIT_STACK_TRACE_IN_FAST_THROW);
+ this.enableJdiscHttp2 = flagValue(source, appId, Flags.ENABLE_JDISC_HTTP2);
+ this.enableCustomAclMapping = flagValue(source, appId, Flags.ENABLE_CUSTOM_ACL_MAPPING);
+ this.numDistributorStripes = flagValue(source, appId, Flags.NUM_DISTRIBUTOR_STRIPES);
}
@Override public Optional<NodeResources> dedicatedClusterControllerFlavor() { return Optional.ofNullable(dedicatedClusterControllerFlavor); }
@@ -228,6 +235,9 @@ public class ModelContextImpl implements ModelContext {
@Override public String jvmOmitStackTraceInFastThrowOption(ClusterSpec.Type type) {
return translateJvmOmitStackTraceInFastThrowIntToString(jvmOmitStackTraceInFastThrow, type);
}
+ @Override public boolean enableJdiscHttp2() { return enableJdiscHttp2; }
+ @Override public boolean enableCustomAclMapping() { return enableCustomAclMapping; }
+ @Override public int numDistributorStripes() { return numDistributorStripes; }
private static <V> V flagValue(FlagSource source, ApplicationId appId, UnboundFlag<? extends V, ?, ?> flag) {
return flag.bindTo(source)
@@ -286,6 +296,7 @@ public class ModelContextImpl implements ModelContext {
private final List<TenantSecretStore> tenantSecretStores;
private final SecretStore secretStore;
private final StringFlag jvmGCOptionsFlag;
+ private final boolean allowDisableMtls;
public Properties(ApplicationId applicationId,
ConfigserverConfig configserverConfig,
@@ -320,6 +331,8 @@ public class ModelContextImpl implements ModelContext {
this.secretStore = secretStore;
this.jvmGCOptionsFlag = PermanentFlags.JVM_GC_OPTIONS.bindTo(flagSource)
.with(FetchVector.Dimension.APPLICATION_ID, applicationId.serializedForm());
+ this.allowDisableMtls = PermanentFlags.ALLOW_DISABLE_MTLS.bindTo(flagSource)
+ .with(FetchVector.Dimension.APPLICATION_ID, applicationId.serializedForm()).value();
}
@Override public ModelContext.FeatureFlags featureFlags() { return featureFlags; }
@@ -383,6 +396,10 @@ public class ModelContextImpl implements ModelContext {
return flagValueForClusterType(jvmGCOptionsFlag, clusterType);
}
+ @Override
+ public boolean allowDisableMtls() {
+ return allowDisableMtls;
+ }
public String flagValueForClusterType(StringFlag flag, Optional<ClusterSpec.Type> clusterType) {
return clusterType.map(type -> flag.with(CLUSTER_TYPE, type.name()))
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDistributionUtil.java b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDistributionUtil.java
index 289a03db3e7..8dc9272b1b7 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDistributionUtil.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDistributionUtil.java
@@ -79,7 +79,7 @@ public class FileDistributionUtil {
public Connection getCurrent() { return null; }
@Override
- public Connection setNewCurrentConnection() { return null; }
+ public Connection switchConnection() { return null; }
@Override
public int getSize() { return 0; }
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/http/LogRetriever.java b/configserver/src/main/java/com/yahoo/vespa/config/server/http/LogRetriever.java
index ee041ed3490..047953e4d4e 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/http/LogRetriever.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/http/LogRetriever.java
@@ -4,8 +4,9 @@ package com.yahoo.vespa.config.server.http;
import ai.vespa.util.http.hc4.VespaHttpClientBuilder;
import com.yahoo.container.jdisc.HttpResponse;
import com.yahoo.yolean.Exceptions;
-import org.apache.http.client.HttpClient;
+import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
+import org.apache.http.impl.client.CloseableHttpClient;
import java.io.IOException;
@@ -14,7 +15,7 @@ import java.io.IOException;
*/
public class LogRetriever {
- private final HttpClient httpClient = VespaHttpClientBuilder.create().build();
+ private final CloseableHttpClient httpClient = VespaHttpClientBuilder.create().build();
public HttpResponse getLogs(String logServerHostname) {
HttpGet get = new HttpGet(logServerHostname);
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/http/ProxyResponse.java b/configserver/src/main/java/com/yahoo/vespa/config/server/http/ProxyResponse.java
index 9dc26f3a601..120790da3af 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/http/ProxyResponse.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/http/ProxyResponse.java
@@ -3,6 +3,7 @@ package com.yahoo.vespa.config.server.http;
import com.yahoo.container.jdisc.HttpResponse;
import org.apache.http.Header;
+import org.apache.http.client.methods.CloseableHttpResponse;
import java.io.IOException;
import java.io.OutputStream;
@@ -16,9 +17,9 @@ import java.util.Optional;
*/
class ProxyResponse extends HttpResponse {
- private final org.apache.http.HttpResponse clientResponse;
+ private final CloseableHttpResponse clientResponse;
- ProxyResponse(org.apache.http.HttpResponse clientResponse) {
+ ProxyResponse(CloseableHttpResponse clientResponse) {
super(clientResponse.getStatusLine().getStatusCode());
this.clientResponse = clientResponse;
}
@@ -32,6 +33,9 @@ class ProxyResponse extends HttpResponse {
@Override
public void render(OutputStream outputStream) throws IOException {
- clientResponse.getEntity().writeTo(outputStream);
+ try (clientResponse) {
+ clientResponse.getEntity().writeTo(outputStream);
+ }
}
+
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/http/SecretStoreValidator.java b/configserver/src/main/java/com/yahoo/vespa/config/server/http/SecretStoreValidator.java
index 796c581b3c2..6b888014051 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/http/SecretStoreValidator.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/http/SecretStoreValidator.java
@@ -12,6 +12,7 @@ import com.yahoo.slime.SlimeUtils;
import com.yahoo.vespa.config.server.application.Application;
import com.yahoo.vespa.config.server.tenant.SecretStoreExternalIdRetriever;
import com.yahoo.yolean.Exceptions;
+import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.entity.ByteArrayEntity;
import org.apache.http.impl.client.CloseableHttpClient;
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/http/TesterClient.java b/configserver/src/main/java/com/yahoo/vespa/config/server/http/TesterClient.java
index 26765615233..4292a0806c2 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/http/TesterClient.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/http/TesterClient.java
@@ -4,12 +4,13 @@ package com.yahoo.vespa.config.server.http;
import ai.vespa.util.http.hc4.VespaHttpClientBuilder;
import com.yahoo.container.jdisc.HttpResponse;
import com.yahoo.yolean.Exceptions;
-import org.apache.http.client.HttpClient;
+import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.client.methods.HttpUriRequest;
import org.apache.http.client.utils.URIBuilder;
import org.apache.http.entity.ByteArrayEntity;
+import org.apache.http.impl.client.CloseableHttpClient;
import java.io.IOException;
import java.net.URI;
@@ -22,7 +23,7 @@ import java.util.logging.Logger;
*/
public class TesterClient {
- private final HttpClient httpClient = VespaHttpClientBuilder.create().build();
+ private final CloseableHttpClient httpClient = VespaHttpClientBuilder.create().build();
private static final Logger logger = Logger.getLogger(TesterClient.class.getName());
public HttpResponse getStatus(String testerHostname, int port) {
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/DeploymentMetricsResponse.java b/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/DeploymentMetricsResponse.java
index c503b60b3a3..8244a486f1c 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/DeploymentMetricsResponse.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/DeploymentMetricsResponse.java
@@ -2,28 +2,19 @@
package com.yahoo.vespa.config.server.http.v2;
import com.yahoo.config.provision.ApplicationId;
-import com.yahoo.container.jdisc.HttpResponse;
+import com.yahoo.restapi.SlimeJsonResponse;
import com.yahoo.slime.Cursor;
-import com.yahoo.slime.JsonFormat;
-import com.yahoo.slime.Slime;
-import com.yahoo.vespa.config.server.http.HttpConfigResponse;
import com.yahoo.vespa.config.server.metrics.ClusterInfo;
import com.yahoo.vespa.config.server.metrics.DeploymentMetricsAggregator;
-import java.io.IOException;
-import java.io.OutputStream;
import java.util.Map;
/**
* @author olaa
*/
-public class DeploymentMetricsResponse extends HttpResponse {
-
- private final Slime slime = new Slime();
-
- public DeploymentMetricsResponse(int status, ApplicationId applicationId, Map<ClusterInfo, DeploymentMetricsAggregator> aggregatedMetrics) {
- super(status);
+public class DeploymentMetricsResponse extends SlimeJsonResponse {
+ public DeploymentMetricsResponse(ApplicationId applicationId, Map<ClusterInfo, DeploymentMetricsAggregator> aggregatedMetrics) {
Cursor application = slime.setObject();
application.setString("applicationId", applicationId.serializedForm());
@@ -41,16 +32,7 @@ public class DeploymentMetricsResponse extends HttpResponse {
aggregator.aggregateDocumentCount().ifPresent(documentCount -> metrics.setDouble("documentCount", documentCount));
aggregator.aggregateQueryLatency().ifPresent(queryLatency -> metrics.setDouble("queryLatency",queryLatency));
aggregator.aggregateFeedLatency().ifPresent(feedLatency -> metrics.setDouble("feedLatency", feedLatency));
+ aggregator.feedingBlocked().ifPresent(feedingBlocked -> metrics.setDouble("feedingBlocked", feedingBlocked));
}
}
-
- @Override
- public void render(OutputStream outputStream) throws IOException {
- new JsonFormat(false).encode(outputStream, slime);
- }
-
- @Override
- public String getContentType() {
- return HttpConfigResponse.JSON_CONTENT_TYPE;
- }
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/ProtonMetricsResponse.java b/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/ProtonMetricsResponse.java
index 7f95d7e30d0..99b95f9244c 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/ProtonMetricsResponse.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/ProtonMetricsResponse.java
@@ -2,26 +2,17 @@
package com.yahoo.vespa.config.server.http.v2;
import com.yahoo.config.provision.ApplicationId;
-import com.yahoo.container.jdisc.HttpResponse;
+import com.yahoo.restapi.SlimeJsonResponse;
import com.yahoo.slime.Cursor;
-import com.yahoo.slime.JsonFormat;
-import com.yahoo.slime.Slime;
-import com.yahoo.vespa.config.server.http.HttpConfigResponse;
import com.yahoo.vespa.config.server.metrics.ProtonMetricsAggregator;
-import java.io.IOException;
-import java.io.OutputStream;
import java.util.Map;
-public class ProtonMetricsResponse extends HttpResponse {
-
- private final Slime slime = new Slime();
-
- /**
- * @author akvalsvik
- */
- public ProtonMetricsResponse(int status, ApplicationId applicationId, Map<String, ProtonMetricsAggregator> aggregatedProtonMetrics) {
- super(status);
+/**
+ * @author akvalsvik
+ */
+public class ProtonMetricsResponse extends SlimeJsonResponse {
+ public ProtonMetricsResponse(ApplicationId applicationId, Map<String, ProtonMetricsAggregator> aggregatedProtonMetrics) {
Cursor application = slime.setObject();
application.setString("applicationId", applicationId.serializedForm());
@@ -41,12 +32,4 @@ public class ProtonMetricsResponse extends HttpResponse {
metrics.setDouble("resourceMemoryUsageAverage", aggregator.aggregateResourceMemoryUsageAverage());
}
}
-
- @Override
- public void render(OutputStream outputStream) throws IOException {
- new JsonFormat(false).encode(outputStream, slime);
- }
-
- @Override
- public String getContentType() { return HttpConfigResponse.JSON_CONTENT_TYPE; }
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetriever.java b/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetriever.java
index 067be8102b8..2903f0fadcc 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetriever.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetriever.java
@@ -46,7 +46,8 @@ public class ClusterDeploymentMetricsRetriever {
private static final String VESPA_CONTAINER = "vespa.container";
private static final String VESPA_QRSERVER = "vespa.qrserver";
private static final String VESPA_DISTRIBUTOR = "vespa.distributor";
- private static final List<String> WANTED_METRIC_SERVICES = List.of(VESPA_CONTAINER, VESPA_QRSERVER, VESPA_DISTRIBUTOR);
+ private static final String VESPA_SEARCHNODE = "vespa.searchnode";
+ private static final List<String> WANTED_METRIC_SERVICES = List.of(VESPA_CONTAINER, VESPA_QRSERVER, VESPA_DISTRIBUTOR, VESPA_SEARCHNODE);
private static final ExecutorService executor = Executors.newFixedThreadPool(10, new DaemonThreadFactory("cluster-deployment-metrics-retriever-"));
@@ -121,7 +122,7 @@ public class ClusterDeploymentMetricsRetriever {
DeploymentMetricsAggregator deploymentMetricsAggregator = clusterMetricsMap.computeIfAbsent(clusterInfo, c -> new DeploymentMetricsAggregator());
switch (serviceName) {
- case "vespa.container":
+ case VESPA_CONTAINER:
deploymentMetricsAggregator.addContainerLatency(
values.field("query_latency.sum").asDouble(),
values.field("query_latency.count").asDouble());
@@ -129,14 +130,17 @@ public class ClusterDeploymentMetricsRetriever {
values.field("feed.latency.sum").asDouble(),
values.field("feed.latency.count").asDouble());
break;
- case "vespa.qrserver":
+ case VESPA_QRSERVER:
deploymentMetricsAggregator.addQrLatency(
values.field("query_latency.sum").asDouble(),
values.field("query_latency.count").asDouble());
break;
- case "vespa.distributor":
+ case VESPA_DISTRIBUTOR:
deploymentMetricsAggregator.addDocumentCount(values.field("vds.distributor.docsstored.average").asDouble());
break;
+ case VESPA_SEARCHNODE:
+ deploymentMetricsAggregator.addFeedingBlocked((int) values.field("content.proton.resource_usage.feeding_blocked.last").asLong());
+ break;
}
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/DeploymentMetricsAggregator.java b/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/DeploymentMetricsAggregator.java
index a4066fc212d..916f5ff5613 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/DeploymentMetricsAggregator.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/DeploymentMetricsAggregator.java
@@ -13,6 +13,7 @@ public class DeploymentMetricsAggregator {
private LatencyMetrics qr;
private LatencyMetrics container;
private Double documentCount;
+ private Integer feedingBlocked;
public synchronized DeploymentMetricsAggregator addFeedLatency(double sum, double count) {
this.feed = combineLatency(this.feed, sum, count);
@@ -34,9 +35,13 @@ public class DeploymentMetricsAggregator {
return this;
}
+ public synchronized DeploymentMetricsAggregator addFeedingBlocked(int feedingBlocked) {
+ this.feedingBlocked = Math.max(Optional.ofNullable(this.feedingBlocked).orElse(0), feedingBlocked);
+ return this;
+ }
+
public Optional<Double> aggregateFeedLatency() {
return Optional.ofNullable(feed).map(m -> m.latencySum / m.latencyCount).filter(num -> !num.isNaN());
-
}
public Optional<Double> aggregateFeedRate() {
@@ -61,6 +66,10 @@ public class DeploymentMetricsAggregator {
return Optional.ofNullable(documentCount);
}
+ public Optional<Integer> feedingBlocked() {
+ return Optional.ofNullable(feedingBlocked);
+ }
+
private LatencyMetrics combineLatency(LatencyMetrics metricsOrNull, double sum, double count) {
var metrics = Optional.ofNullable(metricsOrNull).orElseGet(LatencyMetrics::new);
metrics.latencyCount += count;
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/DeploymentMetricsRetriever.java b/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/DeploymentMetricsRetriever.java
index 43847cd9c3d..7fc2c47c06c 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/DeploymentMetricsRetriever.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/DeploymentMetricsRetriever.java
@@ -32,7 +32,7 @@ public class DeploymentMetricsRetriever {
public DeploymentMetricsResponse getMetrics(Application application) {
var hosts = getHostsOfApplication(application);
var clusterMetrics = metricsRetriever.requestMetricsGroupedByCluster(hosts);
- return new DeploymentMetricsResponse(200, application.getId(), clusterMetrics);
+ return new DeploymentMetricsResponse(application.getId(), clusterMetrics);
}
private static Collection<URI> getHostsOfApplication(Application application) {
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/ProtonMetricsRetriever.java b/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/ProtonMetricsRetriever.java
index ae70225e8a4..5078fba8b38 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/ProtonMetricsRetriever.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/ProtonMetricsRetriever.java
@@ -24,7 +24,7 @@ public class ProtonMetricsRetriever {
public ProtonMetricsResponse getMetrics(Application application) {
var hosts = getHostsOfApplication(application);
var clusterMetrics = metricsRetriever.requestMetricsGroupedByCluster(hosts);
- return new ProtonMetricsResponse(200, application.getId(), clusterMetrics);
+ return new ProtonMetricsResponse(application.getId(), clusterMetrics);
}
private static Collection<URI> getHostsOfApplication(Application application) {
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionRepository.java b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionRepository.java
index 1734b84ec43..181ed880fd7 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionRepository.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionRepository.java
@@ -621,7 +621,7 @@ public class SessionRepository {
sessionZKClient.createNewSession(clock.instant());
CompletionWaiter waiter = sessionZKClient.getUploadWaiter();
LocalSession session = new LocalSession(tenantName, sessionId, app, sessionZKClient);
- waiter.awaitCompletion(Duration.ofSeconds(Math.min(60, timeoutBudget.timeLeft().getSeconds())));
+ waiter.awaitCompletion(Duration.ofSeconds(Math.min(120, timeoutBudget.timeLeft().getSeconds())));
addLocalSession(session);
return session;
} catch (Exception e) {
diff --git a/configserver/src/main/resources/configserver-app/services.xml b/configserver/src/main/resources/configserver-app/services.xml
index 8c792089810..fd8bda8f305 100644
--- a/configserver/src/main/resources/configserver-app/services.xml
+++ b/configserver/src/main/resources/configserver-app/services.xml
@@ -63,31 +63,26 @@
<component id="com.yahoo.vespa.orchestrator.controller.RetryingClusterControllerClientFactory" bundle="orchestrator" />
<component id="com.yahoo.vespa.orchestrator.OrchestratorImpl" bundle="orchestrator" />
- <rest-api path="orchestrator/v1/suspensions/applications" jersey2="true">
- <components bundle="orchestrator">
- <package>com.yahoo.vespa.orchestrator.resources.appsuspension</package>
- </components>
- </rest-api>
- <rest-api path="orchestrator/v1/health" jersey2="true">
- <components bundle="orchestrator">
- <package>com.yahoo.vespa.orchestrator.resources.health</package>
- </components>
- </rest-api>
- <rest-api path="orchestrator/v1/hosts" jersey2="true">
- <components bundle="orchestrator">
- <package>com.yahoo.vespa.orchestrator.resources.host</package>
- </components>
- </rest-api>
- <handler id="com.yahoo.vespa.orchestrator.resources.HostSuspensionHandler" bundle="orchestrator">
+ <handler id="com.yahoo.vespa.orchestrator.resources.ApplicationSuspensionRequestHandler" bundle="orchestrator">
+ <binding>http://*/orchestrator/v1/suspensions/applications</binding>
+ <binding>http://*/orchestrator/v1/suspensions/applications/*</binding>
+ </handler>
+ <handler id="com.yahoo.vespa.orchestrator.resources.HealthRequestHandler" bundle="orchestrator">
+ <binding>http://*/orchestrator/v1/health</binding>
+ <binding>http://*/orchestrator/v1/health/*</binding>
+ </handler>
+ <handler id="com.yahoo.vespa.orchestrator.resources.HostRequestHandler" bundle="orchestrator">
+ <binding>http://*/orchestrator/v1/hosts</binding>
+ <binding>http://*/orchestrator/v1/hosts/*</binding>
+ </handler>
+ <handler id="com.yahoo.vespa.orchestrator.resources.HostSuspensionRequestHandler" bundle="orchestrator">
<binding>http://*/orchestrator/v1/suspensions/hosts</binding>
<binding>http://*/orchestrator/v1/suspensions/hosts/*</binding>
</handler>
- <rest-api path="orchestrator/v1/instances" jersey2="true">
- <components bundle="orchestrator">
- <package>com.yahoo.vespa.orchestrator.resources.instance</package>
- </components>
- </rest-api>
-
+ <handler id="com.yahoo.vespa.orchestrator.resources.InstanceRequestHandler" bundle="orchestrator">
+ <binding>http://*/orchestrator/v1/instances</binding>
+ <binding>http://*/orchestrator/v1/instances/*</binding>
+ </handler>
<handler id="com.yahoo.vespa.serviceview.StateRequestHandler" bundle="configserver">
<binding>http://*/serviceview/v1</binding>
<binding>http://*/serviceview/v1/*</binding>
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetrieverTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetrieverTest.java
index 8df0e066cf4..5aa3e196222 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetrieverTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetrieverTest.java
@@ -60,7 +60,7 @@ public class ClusterDeploymentMetricsRetrieverTest {
Map<ClusterInfo, DeploymentMetricsAggregator> aggregatorMap = new ClusterDeploymentMetricsRetriever().requestMetricsGroupedByCluster(hosts);
compareAggregators(
- new DeploymentMetricsAggregator().addDocumentCount(6000.0),
+ new DeploymentMetricsAggregator().addDocumentCount(6000.0).addFeedingBlocked(0),
aggregatorMap.get(expectedContentCluster)
);
@@ -77,11 +77,11 @@ public class ClusterDeploymentMetricsRetrieverTest {
}
private String containerMetrics() throws IOException {
- return Files.readString(Path.of("src/test/resources/metrics/container_metrics"));
+ return Files.readString(Path.of("src/test/resources/metrics/container_metrics.json"));
}
private String contentMetrics() throws IOException {
- return Files.readString(Path.of("src/test/resources/metrics/content_metrics"));
+ return Files.readString(Path.of("src/test/resources/metrics/content_metrics.json"));
}
// Same tolerance value as used internally in MetricsAggregator.isZero
@@ -95,6 +95,7 @@ public class ClusterDeploymentMetricsRetrieverTest {
compareOptionals(expected.aggregateFeedRate(), actual.aggregateFeedRate(), assertDoubles);
compareOptionals(expected.aggregateQueryLatency(), actual.aggregateQueryLatency(), assertDoubles);
compareOptionals(expected.aggregateFeedLatency(), actual.aggregateFeedLatency(), assertDoubles);
+ assertEquals(expected.feedingBlocked(), actual.feedingBlocked());
}
@SuppressWarnings("OptionalUsedAsFieldOrParameterType")
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/metrics/ClusterProtonMetricsRetrieverTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/metrics/ClusterProtonMetricsRetrieverTest.java
index 3c53d85c56e..6834fbf29a1 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/metrics/ClusterProtonMetricsRetrieverTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/metrics/ClusterProtonMetricsRetrieverTest.java
@@ -72,7 +72,7 @@ public class ClusterProtonMetricsRetrieverTest {
}
private String nodeMetrics(String extension) throws IOException {
- return Files.readString(Path.of("src/test/resources/metrics/node_metrics" + extension));
+ return Files.readString(Path.of("src/test/resources/metrics/node_metrics" + extension + ".json"));
}
// Same tolerance value as used internally in MetricsAggregator.isZero
diff --git a/configserver/src/test/resources/metrics/container_metrics b/configserver/src/test/resources/metrics/container_metrics
deleted file mode 100644
index 48e00f69897..00000000000
--- a/configserver/src/test/resources/metrics/container_metrics
+++ /dev/null
@@ -1,53 +0,0 @@
-{
- "services": [
- {
- "name":"vespa.container",
- "timestamp": 1557306075,
- "metrics": [
- {
- "values": {
- "queries.rate": 23.0,
- "query_latency.sum": 2000,
- "document.count": 300000,
- "feed.rate": 23.0,
- "write_latency.sum": 2000
- },
- "dimensions": {
- "clustertype": "container",
- "clusterid": "container_cluster_id"
- }
- },
- {
- "values": {
- "query_latency.count": 43.0,
- "query_latency.sum": 3000,
- "feed.latency.count": 43.0,
- "feed.latency.sum": 3000
-
- },
- "dimensions": {
- "clustertype": "container",
- "clusterid": "container_cluster_id"
- }
- }
- ]
- },
-
- {
- "name":"vespa.qrserver",
- "timestamp": 1557306075,
- "metrics": [
- {
- "values": {
- "query_latency.count": 43.0,
- "query_latency.sum": 3000
- },
- "dimensions": {
- "clustertype": "container",
- "clusterid": "container_cluster_id"
- }
- }
- ]
- }
- ]
-} \ No newline at end of file
diff --git a/configserver/src/test/resources/metrics/container_metrics.json b/configserver/src/test/resources/metrics/container_metrics.json
new file mode 100644
index 00000000000..f8ea5591c24
--- /dev/null
+++ b/configserver/src/test/resources/metrics/container_metrics.json
@@ -0,0 +1,51 @@
+{
+ "services": [
+ {
+ "name": "vespa.container",
+ "timestamp": 1557306075,
+ "metrics": [
+ {
+ "values": {
+ "queries.rate": 23.0,
+ "query_latency.sum": 2000,
+ "document.count": 300000,
+ "feed.rate": 23.0,
+ "write_latency.sum": 2000
+ },
+ "dimensions": {
+ "clustertype": "container",
+ "clusterid": "container_cluster_id"
+ }
+ },
+ {
+ "values": {
+ "query_latency.count": 43.0,
+ "query_latency.sum": 3000,
+ "feed.latency.count": 43.0,
+ "feed.latency.sum": 3000
+ },
+ "dimensions": {
+ "clustertype": "container",
+ "clusterid": "container_cluster_id"
+ }
+ }
+ ]
+ },
+ {
+ "name": "vespa.qrserver",
+ "timestamp": 1557306075,
+ "metrics": [
+ {
+ "values": {
+ "query_latency.count": 43.0,
+ "query_latency.sum": 3000
+ },
+ "dimensions": {
+ "clustertype": "container",
+ "clusterid": "container_cluster_id"
+ }
+ }
+ ]
+ }
+ ]
+} \ No newline at end of file
diff --git a/configserver/src/test/resources/metrics/content_metrics b/configserver/src/test/resources/metrics/content_metrics
deleted file mode 100644
index 4a1deced181..00000000000
--- a/configserver/src/test/resources/metrics/content_metrics
+++ /dev/null
@@ -1,20 +0,0 @@
-{
- "services": [
- {
- "name":"vespa.distributor",
- "timestamp": 1557306075,
- "metrics": [
- {
- "values": {
- "vds.distributor.docsstored.average": 3000
-
- },
- "dimensions": {
- "clustertype": "content",
- "clusterid": "content_cluster_id"
- }
- }
- ]
- }
- ]
-} \ No newline at end of file
diff --git a/configserver/src/test/resources/metrics/content_metrics.json b/configserver/src/test/resources/metrics/content_metrics.json
new file mode 100644
index 00000000000..2f88dc91709
--- /dev/null
+++ b/configserver/src/test/resources/metrics/content_metrics.json
@@ -0,0 +1,36 @@
+{
+ "services": [
+ {
+ "name": "vespa.distributor",
+ "timestamp": 1557306075,
+ "metrics": [
+ {
+ "values": {
+ "vds.distributor.docsstored.average": 3000
+ },
+ "dimensions": {
+ "clustertype": "content",
+ "clusterid": "content_cluster_id"
+ }
+ }
+ ]
+ },
+ {
+ "name": "vespa.searchnode",
+ "timestamp": 1557306075,
+ "metrics": [
+ {
+ "values": {
+ "content.proton.resource_usage.disk.average": 0.6716298061329,
+ "content.proton.resource_usage.memory.average": 0.7172795340244,
+ "content.proton.resource_usage.feeding_blocked.last": 0
+ },
+ "dimensions": {
+ "clustertype": "content",
+ "clusterid": "content_cluster_id"
+ }
+ }
+ ]
+ }
+ ]
+} \ No newline at end of file
diff --git a/configserver/src/test/resources/metrics/node_metrics_1 b/configserver/src/test/resources/metrics/node_metrics_1.json
index 4c75f0c4c02..4c75f0c4c02 100644
--- a/configserver/src/test/resources/metrics/node_metrics_1
+++ b/configserver/src/test/resources/metrics/node_metrics_1.json
diff --git a/configserver/src/test/resources/metrics/node_metrics_2 b/configserver/src/test/resources/metrics/node_metrics_2.json
index d916cec54fd..d916cec54fd 100644
--- a/configserver/src/test/resources/metrics/node_metrics_2
+++ b/configserver/src/test/resources/metrics/node_metrics_2.json
diff --git a/container-core/OWNERS b/container-core/OWNERS
index ba027370393..c16e87d4c9e 100644
--- a/container-core/OWNERS
+++ b/container-core/OWNERS
@@ -1,3 +1,4 @@
+arnej27959
bjorncs
bratseth
gjoranv
diff --git a/container-core/abi-spec.json b/container-core/abi-spec.json
index da8ed609dfc..5f9185ebef6 100644
--- a/container-core/abi-spec.json
+++ b/container-core/abi-spec.json
@@ -478,6 +478,37 @@
"public static final java.lang.String FULL_COVERAGE"
]
},
+ "com.yahoo.container.jdisc.AclMapping$Action": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public static com.yahoo.container.jdisc.AclMapping$Action custom(java.lang.String)",
+ "public java.lang.String name()",
+ "public boolean equals(java.lang.Object)",
+ "public int hashCode()",
+ "public java.lang.String toString()"
+ ],
+ "fields": [
+ "public static final com.yahoo.container.jdisc.AclMapping$Action READ",
+ "public static final com.yahoo.container.jdisc.AclMapping$Action WRITE"
+ ]
+ },
+ "com.yahoo.container.jdisc.AclMapping": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public",
+ "interface",
+ "abstract"
+ ],
+ "methods": [
+ "public abstract com.yahoo.container.jdisc.AclMapping$Action get(com.yahoo.container.jdisc.RequestView)"
+ ],
+ "fields": []
+ },
"com.yahoo.container.jdisc.AsyncHttpResponse": {
"superClass": "com.yahoo.container.jdisc.HttpResponse",
"interfaces": [],
@@ -545,6 +576,33 @@
],
"fields": []
},
+ "com.yahoo.container.jdisc.HttpMethodAclMapping$Builder": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>()",
+ "public com.yahoo.container.jdisc.HttpMethodAclMapping$Builder override(com.yahoo.jdisc.http.HttpRequest$Method, com.yahoo.container.jdisc.AclMapping$Action)",
+ "public com.yahoo.container.jdisc.HttpMethodAclMapping build()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.container.jdisc.HttpMethodAclMapping": {
+ "superClass": "java.lang.Object",
+ "interfaces": [
+ "com.yahoo.container.jdisc.AclMapping"
+ ],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public com.yahoo.container.jdisc.AclMapping$Action get(com.yahoo.container.jdisc.RequestView)",
+ "public static com.yahoo.container.jdisc.HttpMethodAclMapping$Builder standard()"
+ ],
+ "fields": []
+ },
"com.yahoo.container.jdisc.HttpRequest$Builder": {
"superClass": "java.lang.Object",
"interfaces": [],
@@ -596,6 +654,38 @@
],
"fields": []
},
+ "com.yahoo.container.jdisc.HttpRequestBuilder": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public static com.yahoo.container.jdisc.HttpRequestBuilder create(com.yahoo.jdisc.http.HttpRequest$Method, java.lang.String)",
+ "public com.yahoo.container.jdisc.HttpRequestBuilder withQueryParameter(java.lang.String, java.lang.String)",
+ "public com.yahoo.container.jdisc.HttpRequestBuilder withHeader(java.lang.String, java.lang.String)",
+ "public com.yahoo.container.jdisc.HttpRequestBuilder withRequestContent(java.io.InputStream)",
+ "public com.yahoo.container.jdisc.HttpRequestBuilder withScheme(java.lang.String)",
+ "public com.yahoo.container.jdisc.HttpRequestBuilder withHostname(java.lang.String)",
+ "public com.yahoo.container.jdisc.HttpRequest build()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.container.jdisc.HttpRequestHandler": {
+ "superClass": "java.lang.Object",
+ "interfaces": [
+ "com.yahoo.jdisc.handler.RequestHandler"
+ ],
+ "attributes": [
+ "public",
+ "interface",
+ "abstract"
+ ],
+ "methods": [
+ "public com.yahoo.container.jdisc.RequestHandlerSpec requestHandlerSpec()"
+ ],
+ "fields": []
+ },
"com.yahoo.container.jdisc.HttpResponse": {
"superClass": "java.lang.Object",
"interfaces": [],
@@ -606,6 +696,7 @@
"methods": [
"public void <init>(int)",
"public abstract void render(java.io.OutputStream)",
+ "public long maxPendingBytes()",
"public int getStatus()",
"public void setStatus(int)",
"public com.yahoo.jdisc.HeaderFields headers()",
@@ -688,6 +779,34 @@
],
"fields": []
},
+ "com.yahoo.container.jdisc.RequestHandlerSpec$Builder": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>()",
+ "public com.yahoo.container.jdisc.RequestHandlerSpec$Builder withAclMapping(com.yahoo.container.jdisc.AclMapping)",
+ "public com.yahoo.container.jdisc.RequestHandlerSpec build()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.container.jdisc.RequestHandlerSpec": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public com.yahoo.container.jdisc.AclMapping aclMapping()",
+ "public static com.yahoo.container.jdisc.RequestHandlerSpec$Builder builder()"
+ ],
+ "fields": [
+ "public static final java.lang.String ATTRIBUTE_NAME",
+ "public static final com.yahoo.container.jdisc.RequestHandlerSpec DEFAULT_INSTANCE"
+ ]
+ },
"com.yahoo.container.jdisc.RequestHandlerTestDriver$MockResponseHandler": {
"superClass": "java.lang.Object",
"interfaces": [
@@ -733,6 +852,20 @@
],
"fields": []
},
+ "com.yahoo.container.jdisc.RequestView": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public",
+ "interface",
+ "abstract"
+ ],
+ "methods": [
+ "public abstract com.yahoo.jdisc.http.HttpRequest$Method method()",
+ "public abstract java.net.URI uri()"
+ ],
+ "fields": []
+ },
"com.yahoo.container.jdisc.ThreadedHttpRequestHandler$LazyContentChannel": {
"superClass": "java.lang.Object",
"interfaces": [
@@ -751,7 +884,9 @@
},
"com.yahoo.container.jdisc.ThreadedHttpRequestHandler": {
"superClass": "com.yahoo.container.jdisc.ThreadedRequestHandler",
- "interfaces": [],
+ "interfaces": [
+ "com.yahoo.container.jdisc.HttpRequestHandler"
+ ],
"attributes": [
"public",
"abstract"
@@ -907,6 +1042,7 @@
"public com.yahoo.jdisc.http.ConnectorConfig$Builder secureRedirect(com.yahoo.jdisc.http.ConnectorConfig$SecureRedirect$Builder)",
"public com.yahoo.jdisc.http.ConnectorConfig$Builder maxRequestsPerConnection(int)",
"public com.yahoo.jdisc.http.ConnectorConfig$Builder maxConnectionLife(double)",
+ "public com.yahoo.jdisc.http.ConnectorConfig$Builder http2Enabled(boolean)",
"public final boolean dispatchGetConfig(com.yahoo.config.ConfigInstance$Producer)",
"public final java.lang.String getDefMd5()",
"public final java.lang.String getDefName()",
@@ -1228,7 +1364,8 @@
"public com.yahoo.jdisc.http.ConnectorConfig$ProxyProtocol proxyProtocol()",
"public com.yahoo.jdisc.http.ConnectorConfig$SecureRedirect secureRedirect()",
"public int maxRequestsPerConnection()",
- "public double maxConnectionLife()"
+ "public double maxConnectionLife()",
+ "public boolean http2Enabled()"
],
"fields": [
"public static final java.lang.String CONFIG_DEF_MD5",
@@ -1463,7 +1600,8 @@
],
"fields": [
"public static final enum com.yahoo.jdisc.http.HttpRequest$Version HTTP_1_0",
- "public static final enum com.yahoo.jdisc.http.HttpRequest$Version HTTP_1_1"
+ "public static final enum com.yahoo.jdisc.http.HttpRequest$Version HTTP_1_1",
+ "public static final enum com.yahoo.jdisc.http.HttpRequest$Version HTTP_2_0"
]
},
"com.yahoo.jdisc.http.HttpRequest": {
@@ -1498,6 +1636,7 @@
"public boolean isChunked()",
"public boolean hasChunkedResponse()",
"public boolean isKeepAlive()",
+ "public long relativeCreatedAtNanoTime()",
"public java.security.Principal getUserPrincipal()",
"public void setUserPrincipal(java.security.Principal)",
"public static com.yahoo.jdisc.http.HttpRequest newServerRequest(com.yahoo.jdisc.service.CurrentContainer, java.net.URI)",
@@ -2015,6 +2154,7 @@
"public abstract void setHeaders(java.lang.String, java.lang.String)",
"public abstract void setHeaders(java.lang.String, java.util.List)",
"public int getIntHeader(java.lang.String)",
+ "public com.yahoo.container.jdisc.RequestView asRequestView()",
"public java.util.List getCookies()",
"public void setCookies(java.util.List)",
"public long getConnectedAt(java.util.concurrent.TimeUnit)",
@@ -2340,6 +2480,308 @@
],
"fields": []
},
+ "com.yahoo.metrics.simple.Bucket": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>()",
+ "public void <init>(long, long)",
+ "public java.util.Set entrySet()",
+ "public java.util.Collection getAllMetricNames()",
+ "public java.util.Collection getValuesForMetric(java.lang.String)",
+ "public java.util.Map getMapForMetric(java.lang.String)",
+ "public java.util.Map getValuesByMetricName()",
+ "public java.lang.String toString()",
+ "public long getFromMillis()",
+ "public long getToMillis()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.metrics.simple.Counter": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void add()",
+ "public void add(long)",
+ "public void add(com.yahoo.metrics.simple.Point)",
+ "public void add(long, com.yahoo.metrics.simple.Point)",
+ "public com.yahoo.metrics.simple.PointBuilder builder()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.metrics.simple.Gauge": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void sample(double)",
+ "public void sample(double, com.yahoo.metrics.simple.Point)",
+ "public com.yahoo.metrics.simple.PointBuilder builder()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.metrics.simple.Identifier": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>(java.lang.String, com.yahoo.metrics.simple.Point)",
+ "public int hashCode()",
+ "public boolean equals(java.lang.Object)",
+ "public java.lang.String toString()",
+ "public java.lang.String getName()",
+ "public com.yahoo.metrics.simple.Point getLocation()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.metrics.simple.Measurement": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>(java.lang.Number)"
+ ],
+ "fields": []
+ },
+ "com.yahoo.metrics.simple.MetricManager": {
+ "superClass": "com.yahoo.component.AbstractComponent",
+ "interfaces": [
+ "com.yahoo.container.di.componentgraph.Provider"
+ ],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>(com.yahoo.metrics.ManagerConfig)",
+ "public void deconstruct()",
+ "public com.yahoo.metrics.simple.MetricReceiver get()",
+ "public bridge synthetic java.lang.Object get()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.metrics.simple.MetricReceiver$MockReceiver": {
+ "superClass": "com.yahoo.metrics.simple.MetricReceiver",
+ "interfaces": [],
+ "attributes": [
+ "public",
+ "final"
+ ],
+ "methods": [
+ "public void <init>()",
+ "public com.yahoo.metrics.simple.Bucket getSnapshot()",
+ "public com.yahoo.metrics.simple.Point point(java.lang.String, java.lang.String)"
+ ],
+ "fields": []
+ },
+ "com.yahoo.metrics.simple.MetricReceiver": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>(com.yahoo.concurrent.ThreadLocalDirectory, java.util.concurrent.atomic.AtomicReference)",
+ "public void update(com.yahoo.metrics.simple.Sample)",
+ "public com.yahoo.metrics.simple.Counter declareCounter(java.lang.String)",
+ "public com.yahoo.metrics.simple.Counter declareCounter(java.lang.String, com.yahoo.metrics.simple.Point)",
+ "public com.yahoo.metrics.simple.Gauge declareGauge(java.lang.String)",
+ "public com.yahoo.metrics.simple.Gauge declareGauge(java.lang.String, com.yahoo.metrics.simple.Point)",
+ "public com.yahoo.metrics.simple.Gauge declareGauge(java.lang.String, java.util.Optional, com.yahoo.metrics.simple.MetricSettings)",
+ "public com.yahoo.metrics.simple.PointBuilder pointBuilder()",
+ "public com.yahoo.metrics.simple.Bucket getSnapshot()"
+ ],
+ "fields": [
+ "public static final com.yahoo.metrics.simple.MetricReceiver nullImplementation"
+ ]
+ },
+ "com.yahoo.metrics.simple.MetricSettings$Builder": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public",
+ "final"
+ ],
+ "methods": [
+ "public void <init>()",
+ "public com.yahoo.metrics.simple.MetricSettings$Builder histogram(boolean)",
+ "public com.yahoo.metrics.simple.MetricSettings build()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.metrics.simple.MetricSettings": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public",
+ "final"
+ ],
+ "methods": [],
+ "fields": []
+ },
+ "com.yahoo.metrics.simple.Point": {
+ "superClass": "java.lang.Object",
+ "interfaces": [
+ "com.yahoo.jdisc.Metric$Context"
+ ],
+ "attributes": [
+ "public",
+ "final"
+ ],
+ "methods": [
+ "public void <init>(java.util.Map)",
+ "public static com.yahoo.metrics.simple.Point emptyPoint()",
+ "public boolean equals(java.lang.Object)",
+ "public int hashCode()",
+ "public java.lang.String toString()",
+ "public java.util.List location()",
+ "public java.util.List dimensions()",
+ "public int dimensionality()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.metrics.simple.PointBuilder$Discriminator": {
+ "superClass": "java.lang.Enum",
+ "interfaces": [],
+ "attributes": [
+ "public",
+ "final",
+ "enum"
+ ],
+ "methods": [
+ "public static com.yahoo.metrics.simple.PointBuilder$Discriminator[] values()",
+ "public static com.yahoo.metrics.simple.PointBuilder$Discriminator valueOf(java.lang.String)"
+ ],
+ "fields": [
+ "public static final enum com.yahoo.metrics.simple.PointBuilder$Discriminator LONG",
+ "public static final enum com.yahoo.metrics.simple.PointBuilder$Discriminator DOUBLE",
+ "public static final enum com.yahoo.metrics.simple.PointBuilder$Discriminator STRING"
+ ]
+ },
+ "com.yahoo.metrics.simple.PointBuilder": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public",
+ "final"
+ ],
+ "methods": [
+ "public com.yahoo.metrics.simple.PointBuilder set(java.lang.String, long)",
+ "public com.yahoo.metrics.simple.PointBuilder set(java.lang.String, double)",
+ "public com.yahoo.metrics.simple.PointBuilder set(java.lang.String, java.lang.String)",
+ "public com.yahoo.metrics.simple.Point build()",
+ "public java.lang.String toString()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.metrics.simple.Sample": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>(com.yahoo.metrics.simple.Measurement, com.yahoo.metrics.simple.Identifier, com.yahoo.metrics.simple.UntypedMetric$AssumedType)"
+ ],
+ "fields": []
+ },
+ "com.yahoo.metrics.simple.UnitTestSetup": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>()",
+ "public com.yahoo.metrics.simple.Bucket getUpdatedSnapshot()",
+ "public com.yahoo.metrics.simple.MetricReceiver getReceiver()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.metrics.simple.UntypedMetric$AssumedType": {
+ "superClass": "java.lang.Enum",
+ "interfaces": [],
+ "attributes": [
+ "public",
+ "final",
+ "enum"
+ ],
+ "methods": [
+ "public static com.yahoo.metrics.simple.UntypedMetric$AssumedType[] values()",
+ "public static com.yahoo.metrics.simple.UntypedMetric$AssumedType valueOf(java.lang.String)"
+ ],
+ "fields": [
+ "public static final enum com.yahoo.metrics.simple.UntypedMetric$AssumedType NONE",
+ "public static final enum com.yahoo.metrics.simple.UntypedMetric$AssumedType GAUGE",
+ "public static final enum com.yahoo.metrics.simple.UntypedMetric$AssumedType COUNTER"
+ ]
+ },
+ "com.yahoo.metrics.simple.UntypedMetric": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public boolean isCounter()",
+ "public long getCount()",
+ "public double getLast()",
+ "public double getMax()",
+ "public double getMin()",
+ "public double getSum()",
+ "public org.HdrHistogram.DoubleHistogram getHistogram()",
+ "public java.lang.String toString()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.metrics.simple.Value$Discriminator": {
+ "superClass": "java.lang.Enum",
+ "interfaces": [],
+ "attributes": [
+ "public",
+ "final",
+ "enum"
+ ],
+ "methods": [
+ "public static com.yahoo.metrics.simple.Value$Discriminator[] values()",
+ "public static com.yahoo.metrics.simple.Value$Discriminator valueOf(java.lang.String)"
+ ],
+ "fields": [
+ "public static final enum com.yahoo.metrics.simple.Value$Discriminator LONG",
+ "public static final enum com.yahoo.metrics.simple.Value$Discriminator DOUBLE",
+ "public static final enum com.yahoo.metrics.simple.Value$Discriminator STRING"
+ ]
+ },
+ "com.yahoo.metrics.simple.Value": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public",
+ "abstract"
+ ],
+ "methods": [
+ "public void <init>()",
+ "public long longValue()",
+ "public double doubleValue()",
+ "public java.lang.String stringValue()",
+ "public abstract com.yahoo.metrics.simple.Value$Discriminator getType()",
+ "public static com.yahoo.metrics.simple.Value of(long)",
+ "public static com.yahoo.metrics.simple.Value of(double)",
+ "public static com.yahoo.metrics.simple.Value of(java.lang.String)"
+ ],
+ "fields": []
+ },
"com.yahoo.processing.IllegalInputException": {
"superClass": "java.lang.IllegalArgumentException",
"interfaces": [],
diff --git a/container-core/pom.xml b/container-core/pom.xml
index d0722a081d1..2b87d79daa4 100644
--- a/container-core/pom.xml
+++ b/container-core/pom.xml
@@ -24,11 +24,6 @@
</dependency>
<dependency>
<groupId>com.yahoo.vespa</groupId>
- <artifactId>container-di</artifactId>
- <version>${project.version}</version>
- </dependency>
- <dependency>
- <groupId>com.yahoo.vespa</groupId>
<artifactId>container-documentapi</artifactId>
<version>${project.version}</version>
</dependency>
@@ -149,14 +144,8 @@
<artifactId>httpclient</artifactId>
</dependency>
<dependency>
- <groupId>org.junit.jupiter</groupId>
- <artifactId>junit-jupiter</artifactId>
- <scope>test</scope>
- </dependency>
- <dependency>
- <groupId>org.junit.vintage</groupId>
- <artifactId>junit-vintage-engine</artifactId>
- <scope>test</scope>
+ <groupId>org.hdrhistogram</groupId>
+ <artifactId>HdrHistogram</artifactId>
</dependency>
<dependency>
<!-- TODO Vespa 8: stop providing org.json:json -->
@@ -254,11 +243,6 @@
<artifactId>jaxb-api</artifactId>
<scope>provided</scope>
</dependency>
- <dependency>
- <groupId>org.eclipse.jetty</groupId>
- <artifactId>jetty-servlet</artifactId>
- <scope>provided</scope>
- </dependency>
<!-- TEST scope -->
<dependency>
@@ -319,25 +303,20 @@
<scope>test</scope>
</dependency>
<dependency>
- <groupId>org.springframework</groupId>
- <artifactId>spring-test</artifactId>
+ <groupId>org.apache.httpcomponents.client5</groupId>
+ <artifactId>httpclient5</artifactId>
<scope>test</scope>
+ <exclusions>
+ <exclusion>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-api</artifactId>
+ </exclusion>
+ </exclusions>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-jar-plugin</artifactId>
- <configuration>
- <archive>
- <manifestEntries>
- <Bundle-SymbolicName>${project.artifactId}</Bundle-SymbolicName>
- </manifestEntries>
- </archive>
- </configuration>
- </plugin>
- <plugin>
<groupId>com.yahoo.vespa</groupId>
<artifactId>config-class-plugin</artifactId>
<version>${project.version}</version>
diff --git a/container-di/src/main/java/com/yahoo/container/bundle/BundleInstantiationSpecification.java b/container-core/src/main/java/com/yahoo/container/bundle/BundleInstantiationSpecification.java
index 0fb8a99a957..0fb8a99a957 100644
--- a/container-di/src/main/java/com/yahoo/container/bundle/BundleInstantiationSpecification.java
+++ b/container-core/src/main/java/com/yahoo/container/bundle/BundleInstantiationSpecification.java
diff --git a/container-di/src/main/java/com/yahoo/container/bundle/MockBundle.java b/container-core/src/main/java/com/yahoo/container/bundle/MockBundle.java
index a6524b41886..a6524b41886 100644
--- a/container-di/src/main/java/com/yahoo/container/bundle/MockBundle.java
+++ b/container-core/src/main/java/com/yahoo/container/bundle/MockBundle.java
diff --git a/container-di/src/main/java/com/yahoo/container/bundle/package-info.java b/container-core/src/main/java/com/yahoo/container/bundle/package-info.java
index c9707371626..c9707371626 100644
--- a/container-di/src/main/java/com/yahoo/container/bundle/package-info.java
+++ b/container-core/src/main/java/com/yahoo/container/bundle/package-info.java
diff --git a/container-di/src/main/java/com/yahoo/container/di/CloudSubscriberFactory.java b/container-core/src/main/java/com/yahoo/container/di/CloudSubscriberFactory.java
index 065733a719a..065733a719a 100644
--- a/container-di/src/main/java/com/yahoo/container/di/CloudSubscriberFactory.java
+++ b/container-core/src/main/java/com/yahoo/container/di/CloudSubscriberFactory.java
diff --git a/container-di/src/main/java/com/yahoo/container/di/ComponentDeconstructor.java b/container-core/src/main/java/com/yahoo/container/di/ComponentDeconstructor.java
index 4e3881a6fe6..4e3881a6fe6 100644
--- a/container-di/src/main/java/com/yahoo/container/di/ComponentDeconstructor.java
+++ b/container-core/src/main/java/com/yahoo/container/di/ComponentDeconstructor.java
diff --git a/container-di/src/main/java/com/yahoo/container/di/ConfigRetriever.java b/container-core/src/main/java/com/yahoo/container/di/ConfigRetriever.java
index a7ff6c46a8b..a7ff6c46a8b 100644
--- a/container-di/src/main/java/com/yahoo/container/di/ConfigRetriever.java
+++ b/container-core/src/main/java/com/yahoo/container/di/ConfigRetriever.java
diff --git a/container-di/src/main/java/com/yahoo/container/di/Container.java b/container-core/src/main/java/com/yahoo/container/di/Container.java
index 82c7f65bc2a..82c7f65bc2a 100644
--- a/container-di/src/main/java/com/yahoo/container/di/Container.java
+++ b/container-core/src/main/java/com/yahoo/container/di/Container.java
diff --git a/container-di/src/main/java/com/yahoo/container/di/Osgi.java b/container-core/src/main/java/com/yahoo/container/di/Osgi.java
index 940986e2f38..940986e2f38 100644
--- a/container-di/src/main/java/com/yahoo/container/di/Osgi.java
+++ b/container-core/src/main/java/com/yahoo/container/di/Osgi.java
diff --git a/container-di/src/main/java/com/yahoo/container/di/componentgraph/core/ComponentGraph.java b/container-core/src/main/java/com/yahoo/container/di/componentgraph/core/ComponentGraph.java
index fef2809f236..71d0e539b5a 100644
--- a/container-di/src/main/java/com/yahoo/container/di/componentgraph/core/ComponentGraph.java
+++ b/container-core/src/main/java/com/yahoo/container/di/componentgraph/core/ComponentGraph.java
@@ -15,9 +15,6 @@ import com.yahoo.config.ConfigInstance;
import com.yahoo.container.di.componentgraph.Provider;
import com.yahoo.container.di.componentgraph.cycle.CycleFinder;
import com.yahoo.container.di.componentgraph.cycle.Graph;
-
-import java.util.Collections;
-import java.util.logging.Level;
import com.yahoo.vespa.config.ConfigKey;
import java.lang.annotation.Annotation;
@@ -28,12 +25,14 @@ import java.lang.reflect.WildcardType;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
+import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
+import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.stream.Collectors;
@@ -259,7 +258,7 @@ public class ComponentGraph {
if (component.isEmpty()) {
Object instance;
try {
- log.log(Level.FINE, "Trying the fallback injector to create" + messageForNoGlobalComponent(clazz, node));
+ log.log(Level.INFO, "Trying the fallback injector to create" + messageForNoGlobalComponent(clazz, node));
instance = fallbackInjector.getInstance(key);
} catch (ConfigurationException e) {
throw removeStackTrace(new IllegalStateException(
diff --git a/container-di/src/main/java/com/yahoo/container/di/componentgraph/core/ComponentNode.java b/container-core/src/main/java/com/yahoo/container/di/componentgraph/core/ComponentNode.java
index b6fa4241e26..b6fa4241e26 100644
--- a/container-di/src/main/java/com/yahoo/container/di/componentgraph/core/ComponentNode.java
+++ b/container-core/src/main/java/com/yahoo/container/di/componentgraph/core/ComponentNode.java
diff --git a/container-di/src/main/java/com/yahoo/container/di/componentgraph/core/ComponentRegistryNode.java b/container-core/src/main/java/com/yahoo/container/di/componentgraph/core/ComponentRegistryNode.java
index 429052c0039..429052c0039 100644
--- a/container-di/src/main/java/com/yahoo/container/di/componentgraph/core/ComponentRegistryNode.java
+++ b/container-core/src/main/java/com/yahoo/container/di/componentgraph/core/ComponentRegistryNode.java
diff --git a/container-di/src/main/java/com/yahoo/container/di/componentgraph/core/Exceptions.java b/container-core/src/main/java/com/yahoo/container/di/componentgraph/core/Exceptions.java
index b0d9d1f3921..b0d9d1f3921 100644
--- a/container-di/src/main/java/com/yahoo/container/di/componentgraph/core/Exceptions.java
+++ b/container-core/src/main/java/com/yahoo/container/di/componentgraph/core/Exceptions.java
diff --git a/container-di/src/main/java/com/yahoo/container/di/componentgraph/core/GuiceNode.java b/container-core/src/main/java/com/yahoo/container/di/componentgraph/core/GuiceNode.java
index 61d0d9bba8d..61d0d9bba8d 100644
--- a/container-di/src/main/java/com/yahoo/container/di/componentgraph/core/GuiceNode.java
+++ b/container-core/src/main/java/com/yahoo/container/di/componentgraph/core/GuiceNode.java
diff --git a/container-di/src/main/java/com/yahoo/container/di/componentgraph/core/JerseyNode.java b/container-core/src/main/java/com/yahoo/container/di/componentgraph/core/JerseyNode.java
index 0f8aa678934..0f8aa678934 100644
--- a/container-di/src/main/java/com/yahoo/container/di/componentgraph/core/JerseyNode.java
+++ b/container-core/src/main/java/com/yahoo/container/di/componentgraph/core/JerseyNode.java
diff --git a/container-di/src/main/java/com/yahoo/container/di/componentgraph/core/Keys.java b/container-core/src/main/java/com/yahoo/container/di/componentgraph/core/Keys.java
index be80fc1616d..be80fc1616d 100644
--- a/container-di/src/main/java/com/yahoo/container/di/componentgraph/core/Keys.java
+++ b/container-core/src/main/java/com/yahoo/container/di/componentgraph/core/Keys.java
diff --git a/container-di/src/main/java/com/yahoo/container/di/componentgraph/core/Node.java b/container-core/src/main/java/com/yahoo/container/di/componentgraph/core/Node.java
index 3afc8bb817c..3afc8bb817c 100644
--- a/container-di/src/main/java/com/yahoo/container/di/componentgraph/core/Node.java
+++ b/container-core/src/main/java/com/yahoo/container/di/componentgraph/core/Node.java
diff --git a/container-di/src/main/java/com/yahoo/container/di/componentgraph/core/package-info.java b/container-core/src/main/java/com/yahoo/container/di/componentgraph/core/package-info.java
index e9b5b14d5d8..e9b5b14d5d8 100644
--- a/container-di/src/main/java/com/yahoo/container/di/componentgraph/core/package-info.java
+++ b/container-core/src/main/java/com/yahoo/container/di/componentgraph/core/package-info.java
diff --git a/container-di/src/main/java/com/yahoo/container/di/componentgraph/cycle/CycleFinder.java b/container-core/src/main/java/com/yahoo/container/di/componentgraph/cycle/CycleFinder.java
index 327949bb8d0..327949bb8d0 100644
--- a/container-di/src/main/java/com/yahoo/container/di/componentgraph/cycle/CycleFinder.java
+++ b/container-core/src/main/java/com/yahoo/container/di/componentgraph/cycle/CycleFinder.java
diff --git a/container-di/src/main/java/com/yahoo/container/di/componentgraph/cycle/Graph.java b/container-core/src/main/java/com/yahoo/container/di/componentgraph/cycle/Graph.java
index 946330668bd..946330668bd 100644
--- a/container-di/src/main/java/com/yahoo/container/di/componentgraph/cycle/Graph.java
+++ b/container-core/src/main/java/com/yahoo/container/di/componentgraph/cycle/Graph.java
diff --git a/container-di/src/main/java/com/yahoo/container/di/config/ResolveDependencyException.java b/container-core/src/main/java/com/yahoo/container/di/config/ResolveDependencyException.java
index c88f851909c..c88f851909c 100644
--- a/container-di/src/main/java/com/yahoo/container/di/config/ResolveDependencyException.java
+++ b/container-core/src/main/java/com/yahoo/container/di/config/ResolveDependencyException.java
diff --git a/container-di/src/main/java/com/yahoo/container/di/config/RestApiContext.java b/container-core/src/main/java/com/yahoo/container/di/config/RestApiContext.java
index bfb9a8f9160..bfb9a8f9160 100644
--- a/container-di/src/main/java/com/yahoo/container/di/config/RestApiContext.java
+++ b/container-core/src/main/java/com/yahoo/container/di/config/RestApiContext.java
diff --git a/container-di/src/main/java/com/yahoo/container/di/config/Subscriber.java b/container-core/src/main/java/com/yahoo/container/di/config/Subscriber.java
index 60207447bfd..60207447bfd 100644
--- a/container-di/src/main/java/com/yahoo/container/di/config/Subscriber.java
+++ b/container-core/src/main/java/com/yahoo/container/di/config/Subscriber.java
diff --git a/container-di/src/main/java/com/yahoo/container/di/config/SubscriberFactory.java b/container-core/src/main/java/com/yahoo/container/di/config/SubscriberFactory.java
index c1c36a1b3de..c1c36a1b3de 100644
--- a/container-di/src/main/java/com/yahoo/container/di/config/SubscriberFactory.java
+++ b/container-core/src/main/java/com/yahoo/container/di/config/SubscriberFactory.java
diff --git a/container-di/src/main/java/com/yahoo/container/di/config/package-info.java b/container-core/src/main/java/com/yahoo/container/di/config/package-info.java
index b8f65b1c3c8..b8f65b1c3c8 100644
--- a/container-di/src/main/java/com/yahoo/container/di/config/package-info.java
+++ b/container-core/src/main/java/com/yahoo/container/di/config/package-info.java
diff --git a/container-di/src/main/java/com/yahoo/container/di/osgi/BundleClasses.java b/container-core/src/main/java/com/yahoo/container/di/osgi/BundleClasses.java
index bca3ed73d0b..bca3ed73d0b 100644
--- a/container-di/src/main/java/com/yahoo/container/di/osgi/BundleClasses.java
+++ b/container-core/src/main/java/com/yahoo/container/di/osgi/BundleClasses.java
diff --git a/container-di/src/main/java/com/yahoo/container/di/osgi/OsgiUtil.java b/container-core/src/main/java/com/yahoo/container/di/osgi/OsgiUtil.java
index e1854155e5b..e1854155e5b 100644
--- a/container-di/src/main/java/com/yahoo/container/di/osgi/OsgiUtil.java
+++ b/container-core/src/main/java/com/yahoo/container/di/osgi/OsgiUtil.java
diff --git a/container-di/src/main/java/com/yahoo/container/di/osgi/package-info.java b/container-core/src/main/java/com/yahoo/container/di/osgi/package-info.java
index 9685cf571bd..9685cf571bd 100644
--- a/container-di/src/main/java/com/yahoo/container/di/osgi/package-info.java
+++ b/container-core/src/main/java/com/yahoo/container/di/osgi/package-info.java
diff --git a/container-core/src/main/java/com/yahoo/container/handler/LogHandler.java b/container-core/src/main/java/com/yahoo/container/handler/LogHandler.java
index 991cd83ffa8..f1ba68ff3c8 100644
--- a/container-core/src/main/java/com/yahoo/container/handler/LogHandler.java
+++ b/container-core/src/main/java/com/yahoo/container/handler/LogHandler.java
@@ -11,6 +11,7 @@ import com.yahoo.jdisc.handler.CompletionHandler;
import com.yahoo.jdisc.handler.ContentChannel;
import java.io.IOException;
+import java.io.InterruptedIOException;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.time.Instant;
@@ -23,7 +24,7 @@ import java.util.logging.Level;
public class LogHandler extends ThreadedHttpRequestHandler {
private final LogReader logReader;
- private static final long MB = 1024*1024;
+ private static final long MB = 1024 * 1024;
@Inject
public LogHandler(Executor executor, LogHandlerConfig config) {
@@ -45,11 +46,11 @@ public class LogHandler extends ThreadedHttpRequestHandler {
return new AsyncHttpResponse(200) {
@Override
+ public long maxPendingBytes() { return MB; }
+ @Override
public void render(OutputStream output, ContentChannel networkChannel, CompletionHandler handler) {
- try {
- OutputStream blockingOutput = new MaxPendingContentChannelOutputStream(networkChannel, 1*MB);
- logReader.writeLogs(blockingOutput, from, to, hostname);
- blockingOutput.close();
+ try (output) {
+ logReader.writeLogs(output, from, to, hostname);
}
catch (Throwable t) {
log.log(Level.WARNING, "Failed reading logs from " + from + " to " + to, t);
@@ -62,74 +63,5 @@ public class LogHandler extends ThreadedHttpRequestHandler {
}
- private static class MaxPendingContentChannelOutputStream extends ContentChannelOutputStream {
- private final long maxPending;
- private final AtomicLong sent = new AtomicLong(0);
- private final AtomicLong acked = new AtomicLong(0);
-
- public MaxPendingContentChannelOutputStream(ContentChannel endpoint, long maxPending) {
- super(endpoint);
- this.maxPending = maxPending;
- }
-
- private long pendingBytes() {
- return sent.get() - acked.get();
- }
-
- private class TrackCompletition implements CompletionHandler {
- private final long written;
- private final AtomicBoolean replied = new AtomicBoolean(false);
- TrackCompletition(long written) {
- this.written = written;
- sent.addAndGet(written);
- }
- @Override
- public void completed() {
- if (!replied.getAndSet(true)) {
- acked.addAndGet(written);
- }
- }
-
- @Override
- public void failed(Throwable t) {
- if (!replied.getAndSet(true)) {
- acked.addAndGet(written);
- }
- }
- }
- @Override
- public void send(ByteBuffer src) throws IOException {
- try {
- stallWhilePendingAbove(maxPending);
- } catch (InterruptedException ignored) {
- throw new IOException("Interrupted waiting for IO");
- }
- CompletionHandler pendingTracker = new TrackCompletition(src.remaining());
- try {
- send(src, pendingTracker);
- } catch (Throwable throwable) {
- pendingTracker.failed(throwable);
- throw throwable;
- }
- }
-
- private void stallWhilePendingAbove(long pending) throws InterruptedException {
- while (pendingBytes() > pending) {
- Thread.sleep(1);
- }
- }
-
- @Override
- public void flush() throws IOException {
- super.flush();
- try {
- stallWhilePendingAbove(0);
- }
- catch (InterruptedException e) {
- throw new IOException("Interrupted waiting for IO");
- }
- }
-
- }
}
diff --git a/container-core/src/main/java/com/yahoo/container/jdisc/AclMapping.java b/container-core/src/main/java/com/yahoo/container/jdisc/AclMapping.java
new file mode 100644
index 00000000000..e7c3d71ba44
--- /dev/null
+++ b/container-core/src/main/java/com/yahoo/container/jdisc/AclMapping.java
@@ -0,0 +1,50 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package com.yahoo.container.jdisc;
+
+import java.util.Objects;
+
+/**
+ * Mapping from request to action
+ *
+ * @author mortent
+ */
+public interface AclMapping {
+ class Action {
+ public static final Action READ = new Action("read");
+ public static final Action WRITE = new Action("write");
+ private final String name;
+ public static Action custom(String name) {
+ return new Action(name);
+ }
+ private Action(String name) {
+ if(Objects.requireNonNull(name).isBlank()) {
+ throw new IllegalArgumentException("Name cannot be blank");
+ }
+ this.name = Objects.requireNonNull(name);
+ }
+ public String name() { return name; }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ Action action = (Action) o;
+ return Objects.equals(name, action.name);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(name);
+ }
+
+ @Override
+ public String toString() {
+ return "Action{" +
+ "name='" + name + '\'' +
+ '}';
+ }
+ }
+
+ Action get(RequestView requestView);
+}
diff --git a/container-core/src/main/java/com/yahoo/container/jdisc/HttpMethodAclMapping.java b/container-core/src/main/java/com/yahoo/container/jdisc/HttpMethodAclMapping.java
new file mode 100644
index 00000000000..2ca19f689ee
--- /dev/null
+++ b/container-core/src/main/java/com/yahoo/container/jdisc/HttpMethodAclMapping.java
@@ -0,0 +1,71 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package com.yahoo.container.jdisc;
+
+import com.yahoo.jdisc.http.HttpRequest;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Optional;
+
+import static com.yahoo.jdisc.http.HttpRequest.Method.CONNECT;
+import static com.yahoo.jdisc.http.HttpRequest.Method.DELETE;
+import static com.yahoo.jdisc.http.HttpRequest.Method.GET;
+import static com.yahoo.jdisc.http.HttpRequest.Method.HEAD;
+import static com.yahoo.jdisc.http.HttpRequest.Method.OPTIONS;
+import static com.yahoo.jdisc.http.HttpRequest.Method.PATCH;
+import static com.yahoo.jdisc.http.HttpRequest.Method.POST;
+import static com.yahoo.jdisc.http.HttpRequest.Method.PUT;
+import static com.yahoo.jdisc.http.HttpRequest.Method.TRACE;
+
+/**
+ * Acl Mapping based on http method.
+ * Defaults to:<br>
+ * {GET, HEAD, OPTIONS} -&gt; READ<br>
+ * {POST, DELETE, PUT, PATCH, CONNECT, TRACE} -&gt; WRITE
+ * @author mortent
+ */
+public class HttpMethodAclMapping implements AclMapping {
+
+ private final Map<HttpRequest.Method, Action> mappings;
+
+ private HttpMethodAclMapping(Map<HttpRequest.Method, Action> overrides) {
+ HashMap<HttpRequest.Method, Action> tmp = new HashMap<>(defaultMappings());
+ tmp.putAll(overrides);
+ mappings = Map.copyOf(tmp);
+ }
+
+ private static Map<HttpRequest.Method, Action> defaultMappings() {
+ return Map.of(GET, Action.READ,
+ HEAD, Action.READ,
+ OPTIONS, Action.READ,
+ POST, Action.WRITE,
+ DELETE, Action.WRITE,
+ PUT, Action.WRITE,
+ PATCH, Action.WRITE,
+ CONNECT, Action.WRITE,
+ TRACE, Action.WRITE);
+ }
+
+ @Override
+ public Action get(RequestView requestView) {
+ return Optional.ofNullable(mappings.get(requestView.method()))
+ .orElseThrow(() -> new IllegalArgumentException("Illegal request method: " + requestView.method()));
+ }
+
+ public static HttpMethodAclMapping.Builder standard() {
+ return new HttpMethodAclMapping.Builder();
+ }
+
+ public static class Builder {
+ private final Map<com.yahoo.jdisc.http.HttpRequest.Method, Action> overrides = new HashMap<>();
+ public HttpMethodAclMapping.Builder override(HttpRequest.Method method, Action action) {
+ overrides.put(method, action);
+ return this;
+ }
+ public HttpMethodAclMapping build() {
+ return new HttpMethodAclMapping(overrides);
+ }
+ }
+}
diff --git a/container-core/src/main/java/com/yahoo/container/jdisc/HttpRequestBuilder.java b/container-core/src/main/java/com/yahoo/container/jdisc/HttpRequestBuilder.java
new file mode 100644
index 00000000000..3f70f4b75bb
--- /dev/null
+++ b/container-core/src/main/java/com/yahoo/container/jdisc/HttpRequestBuilder.java
@@ -0,0 +1,71 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.container.jdisc;
+
+import com.yahoo.jdisc.http.HttpRequest.Method;
+
+import java.io.InputStream;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+
+/**
+ * Builder for creating a {@link HttpRequest} to be used in test context
+ *
+ * @author bjorncs
+ */
+public class HttpRequestBuilder {
+ private final Method method;
+ private final String path;
+ private final Map<String, List<String>> queryParameters = new TreeMap<>();
+ private final Map<String, String> headers = new TreeMap<>();
+ private String scheme;
+ private String hostname;
+ private InputStream content;
+
+ private HttpRequestBuilder(Method method, String path) {
+ this.method = method;
+ this.path = path;
+ }
+
+ public static HttpRequestBuilder create(Method method, String path) { return new HttpRequestBuilder(method, path); }
+
+ public HttpRequestBuilder withQueryParameter(String name, String value) {
+ this.queryParameters.computeIfAbsent(name, ignored -> new ArrayList<>()).add(value);
+ return this;
+ }
+
+ public HttpRequestBuilder withHeader(String name, String value) { this.headers.put(name, value); return this; }
+
+ public HttpRequestBuilder withRequestContent(InputStream content) { this.content = content; return this; }
+
+ public HttpRequestBuilder withScheme(String scheme) { this.scheme = scheme; return this; }
+
+ public HttpRequestBuilder withHostname(String hostname) { this.hostname = hostname; return this; }
+
+ public HttpRequest build() {
+ String scheme = this.scheme != null ? this.scheme : "http";
+ String hostname = this.hostname != null ? this.hostname : "localhost";
+ StringBuilder uriBuilder = new StringBuilder(scheme).append("://").append(hostname).append(path);
+ if (queryParameters.size() > 0) {
+ uriBuilder.append('?');
+ queryParameters.forEach((name, values) -> {
+ for (String value : values) {
+ uriBuilder.append(name).append('=').append(value).append('&');
+ }
+ });
+ int lastIndex = uriBuilder.length() - 1;
+ if (uriBuilder.charAt(lastIndex) == '&') {
+ uriBuilder.setLength(lastIndex);
+ }
+ }
+ HttpRequest request;
+ if (content != null) {
+ request = HttpRequest.createTestRequest(uriBuilder.toString(), method, content);
+ } else {
+ request = HttpRequest.createTestRequest(uriBuilder.toString(), method);
+ }
+ headers.forEach((name, value) -> request.getJDiscRequest().headers().put(name, value));
+ return request;
+ }
+}
diff --git a/container-core/src/main/java/com/yahoo/container/jdisc/HttpRequestHandler.java b/container-core/src/main/java/com/yahoo/container/jdisc/HttpRequestHandler.java
new file mode 100644
index 00000000000..f322c9c5b6f
--- /dev/null
+++ b/container-core/src/main/java/com/yahoo/container/jdisc/HttpRequestHandler.java
@@ -0,0 +1,20 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package com.yahoo.container.jdisc;
+
+import com.yahoo.jdisc.handler.RequestHandler;
+
+/**
+ * Extends a request handler with a http specific
+ *
+ * @author mortent
+ */
+public interface HttpRequestHandler extends RequestHandler {
+
+ /**
+ * @return handler specification
+ */
+ default RequestHandlerSpec requestHandlerSpec() {
+ return RequestHandlerSpec.DEFAULT_INSTANCE;
+ }
+}
diff --git a/container-core/src/main/java/com/yahoo/container/jdisc/HttpResponse.java b/container-core/src/main/java/com/yahoo/container/jdisc/HttpResponse.java
index a6042c541c0..5df40a90fe6 100644
--- a/container-core/src/main/java/com/yahoo/container/jdisc/HttpResponse.java
+++ b/container-core/src/main/java/com/yahoo/container/jdisc/HttpResponse.java
@@ -40,6 +40,9 @@ public abstract class HttpResponse {
/** Marshals this response to the network layer. The caller is responsible for flushing and closing outputStream. */
public abstract void render(OutputStream outputStream) throws IOException;
+ /** The amount of content bytes this response may have in-flight (if positive) before response rendering blocks. */
+ public long maxPendingBytes() { return -1; }
+
/**
* Returns the numeric HTTP status code, e.g. 200, 404 and so on.
*
diff --git a/container-core/src/main/java/com/yahoo/container/jdisc/RequestHandlerSpec.java b/container-core/src/main/java/com/yahoo/container/jdisc/RequestHandlerSpec.java
new file mode 100644
index 00000000000..0ebb0bb99d9
--- /dev/null
+++ b/container-core/src/main/java/com/yahoo/container/jdisc/RequestHandlerSpec.java
@@ -0,0 +1,46 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package com.yahoo.container.jdisc;
+
+import java.util.Objects;
+
+/**
+ * A specification provided by a request handler.
+ * Available through request context attribute
+ *
+ * @author mortent
+ */
+public class RequestHandlerSpec {
+
+ public static final String ATTRIBUTE_NAME = RequestHandlerSpec.class.getName();
+ public static final RequestHandlerSpec DEFAULT_INSTANCE = RequestHandlerSpec.builder().build();
+
+ private final AclMapping aclMapping;
+
+ private RequestHandlerSpec(AclMapping aclMapping) {
+ this.aclMapping = aclMapping;
+ }
+
+ public AclMapping aclMapping() {
+ return aclMapping;
+ }
+
+ public static Builder builder(){
+ return new Builder();
+ }
+
+ public static class Builder {
+
+ private AclMapping aclMapping = HttpMethodAclMapping.standard().build();
+
+ public Builder withAclMapping(AclMapping aclMapping) {
+ this.aclMapping = Objects.requireNonNull(aclMapping);
+ return this;
+ }
+
+ public RequestHandlerSpec build() {
+ return new RequestHandlerSpec(aclMapping);
+ }
+ }
+}
+
diff --git a/container-core/src/main/java/com/yahoo/container/jdisc/RequestView.java b/container-core/src/main/java/com/yahoo/container/jdisc/RequestView.java
new file mode 100644
index 00000000000..51a5fdc8959
--- /dev/null
+++ b/container-core/src/main/java/com/yahoo/container/jdisc/RequestView.java
@@ -0,0 +1,18 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package com.yahoo.container.jdisc;
+
+import com.yahoo.jdisc.http.HttpRequest;
+
+import java.net.URI;
+
+/**
+ * Read-only view of the request
+ *
+ * @author mortent
+ */
+public interface RequestView {
+ HttpRequest.Method method();
+
+ URI uri();
+}
diff --git a/container-core/src/main/java/com/yahoo/container/jdisc/ThreadedHttpRequestHandler.java b/container-core/src/main/java/com/yahoo/container/jdisc/ThreadedHttpRequestHandler.java
index 9687697d6f6..be708f2fc94 100644
--- a/container-core/src/main/java/com/yahoo/container/jdisc/ThreadedHttpRequestHandler.java
+++ b/container-core/src/main/java/com/yahoo/container/jdisc/ThreadedHttpRequestHandler.java
@@ -9,6 +9,10 @@ import com.yahoo.jdisc.handler.CompletionHandler;
import com.yahoo.jdisc.handler.ContentChannel;
import com.yahoo.jdisc.handler.UnsafeContentInputStream;
import com.yahoo.jdisc.handler.ResponseHandler;
+
+import java.io.InterruptedIOException;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicLong;
import java.util.logging.Level;
import java.io.IOException;
@@ -28,7 +32,7 @@ import java.util.logging.Logger;
* @author Steinar Knutsen
* @author bratseth
*/
-public abstract class ThreadedHttpRequestHandler extends ThreadedRequestHandler {
+public abstract class ThreadedHttpRequestHandler extends ThreadedRequestHandler implements HttpRequestHandler {
public static final String CONTENT_TYPE = "Content-Type";
private static final String RENDERING_ERRORS = "rendering_errors";
@@ -97,7 +101,8 @@ public abstract class ThreadedHttpRequestHandler extends ThreadedRequestHandler
LoggingCompletionHandler logOnCompletion = null;
ContentChannelOutputStream output = null;
try {
- output = new ContentChannelOutputStream(channel);
+ output = httpResponse.maxPendingBytes() > 0 ? new MaxPendingContentChannelOutputStream(channel, httpResponse.maxPendingBytes())
+ : new ContentChannelOutputStream(channel);
logOnCompletion = createLoggingCompletionHandler(startTime, System.currentTimeMillis(),
httpResponse, request, output);
@@ -247,4 +252,82 @@ public abstract class ThreadedHttpRequestHandler extends ThreadedRequestHandler
return (com.yahoo.jdisc.http.HttpRequest) request;
}
+
+ /**
+ * @author baldersheim
+ */
+ static class MaxPendingContentChannelOutputStream extends ContentChannelOutputStream {
+ private final long maxPending;
+ private final AtomicLong sent = new AtomicLong(0);
+ private final AtomicLong acked = new AtomicLong(0);
+
+ public MaxPendingContentChannelOutputStream(ContentChannel endpoint, long maxPending) {
+ super(endpoint);
+ this.maxPending = maxPending;
+ }
+
+ private long pendingBytes() {
+ return sent.get() - acked.get();
+ }
+
+ private class TrackCompletion implements CompletionHandler {
+
+ private final long written;
+ private final AtomicBoolean replied = new AtomicBoolean(false);
+
+ TrackCompletion(long written) {
+ this.written = written;
+ sent.addAndGet(written);
+ }
+
+ @Override
+ public void completed() {
+ if ( ! replied.getAndSet(true)) {
+ acked.addAndGet(written);
+ }
+ }
+
+ @Override
+ public void failed(Throwable t) {
+ if ( ! replied.getAndSet(true)) {
+ acked.addAndGet(written);
+ }
+ }
+ }
+
+ @Override
+ public void send(ByteBuffer src) throws IOException {
+ try {
+ stallWhilePendingAbove(maxPending);
+ } catch (InterruptedException ignored) {
+ throw new InterruptedIOException("Interrupted waiting for IO");
+ }
+ CompletionHandler pendingTracker = new TrackCompletion(src.remaining());
+ try {
+ send(src, pendingTracker);
+ } catch (Throwable throwable) {
+ pendingTracker.failed(throwable);
+ throw throwable;
+ }
+ }
+
+ private void stallWhilePendingAbove(long pending) throws InterruptedException {
+ while (pendingBytes() > pending) {
+ Thread.sleep(1);
+ }
+ }
+
+ @Override
+ public void flush() throws IOException {
+ super.flush();
+ try {
+ stallWhilePendingAbove(0);
+ }
+ catch (InterruptedException e) {
+ throw new InterruptedIOException("Interrupted waiting for IO");
+ }
+ }
+
+ }
+
}
diff --git a/container-core/src/main/java/com/yahoo/container/logging/AccessLogHandler.java b/container-core/src/main/java/com/yahoo/container/logging/AccessLogHandler.java
index 89aab1513ee..f14479899f5 100644
--- a/container-core/src/main/java/com/yahoo/container/logging/AccessLogHandler.java
+++ b/container-core/src/main/java/com/yahoo/container/logging/AccessLogHandler.java
@@ -12,7 +12,7 @@ class AccessLogHandler {
AccessLogHandler(AccessLogConfig.FileHandler config, LogWriter<RequestLogEntry> logWriter) {
logFileHandler = new LogFileHandler<>(
- toCompression(config), config.pattern(), config.rotation(),
+ toCompression(config), config.bufferSize(), config.pattern(), config.rotation(),
config.symlink(), config.queueSize(), "request-logger", logWriter);
}
diff --git a/container-core/src/main/java/com/yahoo/container/logging/ConnectionLogEntry.java b/container-core/src/main/java/com/yahoo/container/logging/ConnectionLogEntry.java
index 6afe3b74329..5b30ce5963d 100644
--- a/container-core/src/main/java/com/yahoo/container/logging/ConnectionLogEntry.java
+++ b/container-core/src/main/java/com/yahoo/container/logging/ConnectionLogEntry.java
@@ -33,6 +33,8 @@ public class ConnectionLogEntry {
private final Instant sslPeerNotAfter;
private final String sslSniServerName;
private final SslHandshakeFailure sslHandshakeFailure;
+ private final String httpProtocol;
+ private final String proxyProtocolVersion;
private ConnectionLogEntry(Builder builder) {
@@ -57,6 +59,8 @@ public class ConnectionLogEntry {
this.sslPeerNotAfter = builder.sslPeerNotAfter;
this.sslSniServerName = builder.sslSniServerName;
this.sslHandshakeFailure = builder.sslHandshakeFailure;
+ this.httpProtocol = builder.httpProtocol;
+ this.proxyProtocolVersion = builder.proxyProtocolVersion;
}
public static Builder builder(UUID id, Instant timestamp) {
@@ -84,6 +88,8 @@ public class ConnectionLogEntry {
public Optional<Instant> sslPeerNotAfter() { return Optional.ofNullable(sslPeerNotAfter); }
public Optional<String> sslSniServerName() { return Optional.ofNullable(sslSniServerName); }
public Optional<SslHandshakeFailure> sslHandshakeFailure() { return Optional.ofNullable(sslHandshakeFailure); }
+ public Optional<String> httpProtocol() { return Optional.ofNullable(httpProtocol); }
+ public Optional<String> proxyProtocolVersion() { return Optional.ofNullable(proxyProtocolVersion); }
public static class SslHandshakeFailure {
private final String type;
@@ -133,6 +139,8 @@ public class ConnectionLogEntry {
private Instant sslPeerNotAfter;
private String sslSniServerName;
private SslHandshakeFailure sslHandshakeFailure;
+ private String httpProtocol;
+ private String proxyProtocolVersion;
Builder(UUID id, Instant timestamp) {
@@ -217,9 +225,18 @@ public class ConnectionLogEntry {
this.sslHandshakeFailure = sslHandshakeFailure;
return this;
}
+ public Builder withHttpProtocol(String protocol) {
+ this.httpProtocol = protocol;
+ return this;
+ }
+ public Builder withProxyProtocolVersion(String version) {
+ this.proxyProtocolVersion = version;
+ return this;
+ }
public ConnectionLogEntry build(){
return new ConnectionLogEntry(this);
}
+
}
}
diff --git a/container-core/src/main/java/com/yahoo/container/logging/ConnectionLogHandler.java b/container-core/src/main/java/com/yahoo/container/logging/ConnectionLogHandler.java
index 7a0e8aca95e..7b130884667 100644
--- a/container-core/src/main/java/com/yahoo/container/logging/ConnectionLogHandler.java
+++ b/container-core/src/main/java/com/yahoo/container/logging/ConnectionLogHandler.java
@@ -8,9 +8,11 @@ package com.yahoo.container.logging;
class ConnectionLogHandler {
private final LogFileHandler<ConnectionLogEntry> logFileHandler;
- public ConnectionLogHandler(String logDirectoryName, String clusterName, int queueSize, LogWriter<ConnectionLogEntry> logWriter) {
+ public ConnectionLogHandler(String logDirectoryName, int bufferSize, String clusterName,
+ int queueSize, LogWriter<ConnectionLogEntry> logWriter) {
logFileHandler = new LogFileHandler<>(
LogFileHandler.Compression.ZSTD,
+ bufferSize,
String.format("logs/vespa/%s/ConnectionLog.%s.%s", logDirectoryName, clusterName, "%Y%m%d%H%M%S"),
"0 60 ...",
String.format("ConnectionLog.%s", clusterName),
diff --git a/container-core/src/main/java/com/yahoo/container/logging/FileConnectionLog.java b/container-core/src/main/java/com/yahoo/container/logging/FileConnectionLog.java
index 7432c313286..749426d3da9 100644
--- a/container-core/src/main/java/com/yahoo/container/logging/FileConnectionLog.java
+++ b/container-core/src/main/java/com/yahoo/container/logging/FileConnectionLog.java
@@ -14,7 +14,7 @@ public class FileConnectionLog extends AbstractComponent implements ConnectionLo
@Inject
public FileConnectionLog(ConnectionLogConfig config) {
- logHandler = new ConnectionLogHandler(config.logDirectoryName(), config.cluster(), config.queueSize(), new JsonConnectionLogWriter());
+ logHandler = new ConnectionLogHandler(config.logDirectoryName(), config.bufferSize(), config.cluster(), config.queueSize(), new JsonConnectionLogWriter());
}
@Override
diff --git a/container-core/src/main/java/com/yahoo/container/logging/JsonConnectionLogWriter.java b/container-core/src/main/java/com/yahoo/container/logging/JsonConnectionLogWriter.java
index 158d2ec4ea6..dfdc5f1b55a 100644
--- a/container-core/src/main/java/com/yahoo/container/logging/JsonConnectionLogWriter.java
+++ b/container-core/src/main/java/com/yahoo/container/logging/JsonConnectionLogWriter.java
@@ -33,12 +33,32 @@ class JsonConnectionLogWriter implements LogWriter<ConnectionLogEntry> {
writeOptionalInteger(generator, "peerPort", unwrap(record.peerPort()));
writeOptionalString(generator, "localAddress", unwrap(record.localAddress()));
writeOptionalInteger(generator, "localPort", unwrap(record.localPort()));
- writeOptionalString(generator, "remoteAddress", unwrap(record.remoteAddress()));
- writeOptionalInteger(generator, "remotePort", unwrap(record.remotePort()));
- writeOptionalLong(generator, "httpBytesReceived", unwrap(record.httpBytesReceived()));
- writeOptionalLong(generator, "httpBytesSent", unwrap(record.httpBytesSent()));
- writeOptionalLong(generator, "requests", unwrap(record.requests()));
- writeOptionalLong(generator, "responses", unwrap(record.responses()));
+
+ String proxyProtocolVersion = unwrap(record.proxyProtocolVersion());
+ String proxyProtocolRemoteAddress = unwrap(record.remoteAddress());
+ Integer proxyProtocolRemotePort = unwrap(record.remotePort());
+ if (isAnyValuePresent(proxyProtocolVersion, proxyProtocolRemoteAddress, proxyProtocolRemotePort)) {
+ generator.writeObjectFieldStart("proxyProtocol");
+ writeOptionalString(generator, "version", proxyProtocolVersion);
+ writeOptionalString(generator, "remoteAddress", proxyProtocolRemoteAddress);
+ writeOptionalInteger(generator, "remotePort", proxyProtocolRemotePort);
+ generator.writeEndObject();
+ }
+
+ String httpVersion = unwrap(record.httpProtocol());
+ Long httpBytesReceived = unwrap(record.httpBytesReceived());
+ Long httpBytesSent = unwrap(record.httpBytesSent());
+ Long httpRequests = unwrap(record.requests());
+ Long httpResponses = unwrap(record.responses());
+ if (isAnyValuePresent(httpVersion, httpBytesReceived, httpBytesSent, httpRequests, httpResponses)) {
+ generator.writeObjectFieldStart("http");
+ writeOptionalString(generator, "version", httpVersion);
+ writeOptionalLong(generator, "bytesReceived", httpBytesReceived);
+ writeOptionalLong(generator, "responses", httpResponses);
+ writeOptionalLong(generator, "bytesSent", httpBytesSent);
+ writeOptionalLong(generator, "requests", httpRequests);
+ generator.writeEndObject();
+ }
String sslProtocol = unwrap(record.sslProtocol());
String sslSessionId = unwrap(record.sslSessionId());
diff --git a/container-core/src/main/java/com/yahoo/container/logging/LogFileHandler.java b/container-core/src/main/java/com/yahoo/container/logging/LogFileHandler.java
index 0f2a9e42eb8..85c211c0e3a 100644
--- a/container-core/src/main/java/com/yahoo/container/logging/LogFileHandler.java
+++ b/container-core/src/main/java/com/yahoo/container/logging/LogFileHandler.java
@@ -47,21 +47,15 @@ class LogFileHandler <LOGTYPE> {
@FunctionalInterface private interface Pollable<T> { Operation<T> poll() throws InterruptedException; }
- LogFileHandler(Compression compression, String filePattern, String rotationTimes, String symlinkName, int queueSize,
- String threadName, LogWriter<LOGTYPE> logWriter) {
- this(compression, filePattern, calcTimesMinutes(rotationTimes), symlinkName, queueSize, threadName, logWriter);
+ LogFileHandler(Compression compression, int bufferSize, String filePattern, String rotationTimes, String symlinkName,
+ int queueSize, String threadName, LogWriter<LOGTYPE> logWriter) {
+ this(compression, bufferSize, filePattern, calcTimesMinutes(rotationTimes), symlinkName, queueSize, threadName, logWriter);
}
- LogFileHandler(
- Compression compression,
- String filePattern,
- long[] rotationTimes,
- String symlinkName,
- int queueSize,
- String threadName,
- LogWriter<LOGTYPE> logWriter) {
+ LogFileHandler(Compression compression, int bufferSize, String filePattern, long[] rotationTimes, String symlinkName,
+ int queueSize, String threadName, LogWriter<LOGTYPE> logWriter) {
this.logQueue = new LinkedBlockingQueue<>(queueSize);
- this.logThread = new LogThread<>(logWriter, filePattern, compression, rotationTimes, symlinkName, threadName, this::poll);
+ this.logThread = new LogThread<>(logWriter, filePattern, compression, bufferSize, rotationTimes, symlinkName, threadName, this::poll);
this.logThread.start();
}
@@ -197,6 +191,7 @@ class LogFileHandler <LOGTYPE> {
private volatile String fileName;
private final LogWriter<LOGTYPE> logWriter;
private final Compression compression;
+ private final int bufferSize;
private final long[] rotationTimes;
private final String symlinkName;
private final ExecutorService executor = createCompressionTaskExecutor();
@@ -206,6 +201,7 @@ class LogFileHandler <LOGTYPE> {
LogThread(LogWriter<LOGTYPE> logWriter,
String filePattern,
Compression compression,
+ int bufferSize,
long[] rotationTimes,
String symlinkName,
String threadName,
@@ -215,6 +211,7 @@ class LogFileHandler <LOGTYPE> {
this.logWriter = logWriter;
this.filePattern = filePattern;
this.compression = compression;
+ this.bufferSize = bufferSize;
this.rotationTimes = rotationTimes;
this.symlinkName = (symlinkName != null && !symlinkName.isBlank()) ? symlinkName : null;
this.operationProvider = operationProvider;
@@ -360,7 +357,7 @@ class LogFileHandler <LOGTYPE> {
internalClose();
try {
checkAndCreateDir(fileName);
- fileOutput = new PageCacheFriendlyFileOutputStream(nativeIO, Paths.get(fileName), 4 * 1024 * 1024);
+ fileOutput = new PageCacheFriendlyFileOutputStream(nativeIO, Paths.get(fileName), bufferSize);
LogFileDb.nowLoggingTo(fileName);
} catch (IOException e) {
throw new RuntimeException("Couldn't open log file '" + fileName + "'", e);
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/HttpRequest.java b/container-core/src/main/java/com/yahoo/jdisc/http/HttpRequest.java
index 118c34245c0..0b5e9ddde58 100644
--- a/container-core/src/main/java/com/yahoo/jdisc/http/HttpRequest.java
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/HttpRequest.java
@@ -47,7 +47,8 @@ public class HttpRequest extends Request implements ServletOrJdiscHttpRequest {
public enum Version {
HTTP_1_0("HTTP/1.0"),
- HTTP_1_1("HTTP/1.1");
+ HTTP_1_1("HTTP/1.1"),
+ HTTP_2_0("HTTP/2.0");
private final String str;
@@ -70,6 +71,7 @@ public class HttpRequest extends Request implements ServletOrJdiscHttpRequest {
}
}
+ private final long jvmRelativeCreatedAt = System.nanoTime();
private final HeaderFields trailers = new HeaderFields();
private final Map<String, List<String>> parameters = new HashMap<>();
private Principal principal;
@@ -296,6 +298,11 @@ public class HttpRequest extends Request implements ServletOrJdiscHttpRequest {
return version == Version.HTTP_1_1;
}
+ /**
+ * @return the relative created timestamp (using {@link System#nanoTime()}
+ */
+ public long relativeCreatedAtNanoTime() { return jvmRelativeCreatedAt; }
+
public Principal getUserPrincipal() {
return principal;
}
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/filter/DiscFilterRequest.java b/container-core/src/main/java/com/yahoo/jdisc/http/filter/DiscFilterRequest.java
index f7ab399574c..72068bd2dd5 100644
--- a/container-core/src/main/java/com/yahoo/jdisc/http/filter/DiscFilterRequest.java
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/filter/DiscFilterRequest.java
@@ -1,6 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.jdisc.http.filter;
+import com.yahoo.container.jdisc.RequestView;
import com.yahoo.jdisc.HeaderFields;
import com.yahoo.jdisc.http.Cookie;
import com.yahoo.jdisc.http.HttpHeaders;
@@ -254,6 +255,19 @@ public abstract class DiscFilterRequest {
}
}
+ public RequestView asRequestView() {
+ return new RequestView() {
+ @Override
+ public HttpRequest.Method method() {
+ return HttpRequest.Method.valueOf(getMethod());
+ }
+
+ @Override
+ public URI uri() {
+ return getUri();
+ }
+ };
+ }
public List<Cookie> getCookies() {
return parent.decodeCookieHeader();
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/AccessLogRequestLog.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/AccessLogRequestLog.java
index 2f9fc0d07b2..11898381f0a 100644
--- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/AccessLogRequestLog.java
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/AccessLogRequestLog.java
@@ -24,7 +24,7 @@ import java.util.function.BiConsumer;
import java.util.logging.Level;
import java.util.logging.Logger;
-import static com.yahoo.jdisc.http.server.jetty.HttpServletRequestUtils.getConnectorLocalPort;
+import static com.yahoo.jdisc.http.server.jetty.RequestUtils.getConnectorLocalPort;
/**
* This class is a bridge between Jetty's {@link org.eclipse.jetty.server.handler.RequestLogHandler}
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/AccessLoggingRequestHandler.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/AccessLoggingRequestHandler.java
index 842ab75a312..5b628d73ab8 100644
--- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/AccessLoggingRequestHandler.java
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/AccessLoggingRequestHandler.java
@@ -6,6 +6,7 @@ import com.yahoo.container.logging.AccessLogEntry;
import com.yahoo.jdisc.Request;
import com.yahoo.jdisc.handler.AbstractRequestHandler;
import com.yahoo.jdisc.handler.ContentChannel;
+import com.yahoo.jdisc.handler.DelegatedRequestHandler;
import com.yahoo.jdisc.handler.RequestHandler;
import com.yahoo.jdisc.handler.ResponseHandler;
import com.yahoo.jdisc.http.HttpRequest;
@@ -23,7 +24,7 @@ import java.util.Optional;
*
* @author bakksjo
*/
-public class AccessLoggingRequestHandler extends AbstractRequestHandler {
+public class AccessLoggingRequestHandler extends AbstractRequestHandler implements DelegatedRequestHandler {
public static final String CONTEXT_KEY_ACCESS_LOG_ENTRY
= AccessLoggingRequestHandler.class.getName() + "_access-log-entry";
@@ -56,4 +57,8 @@ public class AccessLoggingRequestHandler extends AbstractRequestHandler {
}
+ @Override
+ public RequestHandler getDelegate() {
+ return delegate;
+ }
}
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ConnectorFactory.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ConnectorFactory.java
index d7ad12a5c64..71c0b3a0225 100644
--- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ConnectorFactory.java
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ConnectorFactory.java
@@ -7,6 +7,8 @@ import com.yahoo.jdisc.http.ConnectorConfig;
import com.yahoo.jdisc.http.ssl.SslContextFactoryProvider;
import com.yahoo.security.tls.MixedMode;
import com.yahoo.security.tls.TransportSecurityUtils;
+import org.eclipse.jetty.alpn.server.ALPNServerConnectionFactory;
+import org.eclipse.jetty.http2.server.HTTP2ServerConnectionFactory;
import org.eclipse.jetty.server.ConnectionFactory;
import org.eclipse.jetty.server.DetectorConnectionFactory;
import org.eclipse.jetty.server.HttpConfiguration;
@@ -18,6 +20,7 @@ import org.eclipse.jetty.server.ServerConnector;
import org.eclipse.jetty.server.SslConnectionFactory;
import org.eclipse.jetty.util.ssl.SslContextFactory;
+import java.util.Collection;
import java.util.List;
/**
@@ -76,41 +79,68 @@ public class ConnectorFactory {
}
private List<ConnectionFactory> createConnectionFactories(Metric metric) {
- HttpConnectionFactory httpFactory = newHttpConnectionFactory();
if (!isSslEffectivelyEnabled(connectorConfig)) {
- return List.of(httpFactory);
+ return List.of(newHttp1ConnectionFactory());
} else if (connectorConfig.ssl().enabled()) {
- return connectionFactoriesForHttps(metric, httpFactory);
+ return connectionFactoriesForHttps(metric);
} else if (TransportSecurityUtils.isTransportSecurityEnabled()) {
switch (TransportSecurityUtils.getInsecureMixedMode()) {
case TLS_CLIENT_MIXED_SERVER:
case PLAINTEXT_CLIENT_MIXED_SERVER:
- return List.of(new DetectorConnectionFactory(newSslConnectionFactory(metric, httpFactory)), httpFactory);
+ return connectionFactoriesForHttpsMixedMode(metric);
case DISABLED:
- return connectionFactoriesForHttps(metric, httpFactory);
+ return connectionFactoriesForHttps(metric);
default:
throw new IllegalStateException();
}
} else {
- return List.of(httpFactory);
+ return List.of(newHttp1ConnectionFactory());
}
}
- private List<ConnectionFactory> connectionFactoriesForHttps(Metric metric, HttpConnectionFactory httpFactory) {
+ private List<ConnectionFactory> connectionFactoriesForHttps(Metric metric) {
ConnectorConfig.ProxyProtocol proxyProtocolConfig = connectorConfig.proxyProtocol();
- SslConnectionFactory sslFactory = newSslConnectionFactory(metric, httpFactory);
- if (proxyProtocolConfig.enabled()) {
- if (proxyProtocolConfig.mixedMode()) {
- return List.of(new DetectorConnectionFactory(sslFactory, new ProxyConnectionFactory(sslFactory.getProtocol())), sslFactory, httpFactory);
+ HttpConnectionFactory http1Factory = newHttp1ConnectionFactory();
+ if (connectorConfig.http2Enabled()) {
+ HTTP2ServerConnectionFactory http2Factory = newHttp2ConnectionFactory();
+ ALPNServerConnectionFactory alpnFactory = newAlpnConnectionFactory(List.of(http1Factory, http2Factory), http1Factory);
+ SslConnectionFactory sslFactory = newSslConnectionFactory(metric, alpnFactory);
+ if (proxyProtocolConfig.enabled()) {
+ ProxyConnectionFactory proxyProtocolFactory = newProxyProtocolConnectionFactory(sslFactory);
+ if (proxyProtocolConfig.mixedMode()) {
+ DetectorConnectionFactory detectorFactory = newDetectorConnectionFactory(sslFactory);
+ return List.of(detectorFactory, proxyProtocolFactory, sslFactory, alpnFactory, http1Factory, http2Factory);
+ } else {
+ return List.of(proxyProtocolFactory, sslFactory, alpnFactory, http1Factory, http2Factory);
+ }
} else {
- return List.of(new ProxyConnectionFactory(sslFactory.getProtocol()), sslFactory, httpFactory);
+ return List.of(sslFactory, alpnFactory, http1Factory, http2Factory);
}
} else {
- return List.of(sslFactory, httpFactory);
+ SslConnectionFactory sslFactory = newSslConnectionFactory(metric, http1Factory);
+ if (proxyProtocolConfig.enabled()) {
+ ProxyConnectionFactory proxyProtocolFactory = newProxyProtocolConnectionFactory(sslFactory);
+ if (proxyProtocolConfig.mixedMode()) {
+ DetectorConnectionFactory detectorFactory = newDetectorConnectionFactory(sslFactory);
+ return List.of(detectorFactory, proxyProtocolFactory, sslFactory, http1Factory);
+ } else {
+ return List.of(proxyProtocolFactory, sslFactory, http1Factory);
+ }
+ } else {
+ return List.of(sslFactory, http1Factory);
+ }
}
}
- private HttpConnectionFactory newHttpConnectionFactory() {
+ private List<ConnectionFactory> connectionFactoriesForHttpsMixedMode(Metric metric) {
+ // No support for proxy-protocol/http2 when using HTTP with TLS mixed mode
+ HttpConnectionFactory httpFactory = newHttp1ConnectionFactory();
+ SslConnectionFactory sslFactory = newSslConnectionFactory(metric, httpFactory);
+ DetectorConnectionFactory detectorFactory = newDetectorConnectionFactory(sslFactory);
+ return List.of(detectorFactory, httpFactory, sslFactory);
+ }
+
+ private HttpConfiguration newHttpConfiguration() {
HttpConfiguration httpConfig = new HttpConfiguration();
httpConfig.setSendDateHeader(true);
httpConfig.setSendServerVersion(false);
@@ -122,16 +152,41 @@ public class ConnectorFactory {
if (isSslEffectivelyEnabled(connectorConfig)) {
httpConfig.addCustomizer(new SecureRequestCustomizer());
}
- return new HttpConnectionFactory(httpConfig);
+ return httpConfig;
+ }
+
+ private HttpConnectionFactory newHttp1ConnectionFactory() {
+ return new HttpConnectionFactory(newHttpConfiguration());
}
- private SslConnectionFactory newSslConnectionFactory(Metric metric, HttpConnectionFactory httpFactory) {
+ private HTTP2ServerConnectionFactory newHttp2ConnectionFactory() {
+ return new HTTP2ServerConnectionFactory(newHttpConfiguration());
+ }
+
+ private SslConnectionFactory newSslConnectionFactory(Metric metric, ConnectionFactory wrappedFactory) {
SslContextFactory ctxFactory = sslContextFactoryProvider.getInstance(connectorConfig.name(), connectorConfig.listenPort());
- SslConnectionFactory connectionFactory = new SslConnectionFactory(ctxFactory, httpFactory.getProtocol());
+ SslConnectionFactory connectionFactory = new SslConnectionFactory(ctxFactory, wrappedFactory.getProtocol());
connectionFactory.addBean(new SslHandshakeFailedListener(metric, connectorConfig.name(), connectorConfig.listenPort()));
return connectionFactory;
}
+ private ALPNServerConnectionFactory newAlpnConnectionFactory(Collection<ConnectionFactory> alternatives,
+ ConnectionFactory defaultFactory) {
+ String[] protocols = alternatives.stream().map(ConnectionFactory::getProtocol).toArray(String[]::new);
+ ALPNServerConnectionFactory factory = new ALPNServerConnectionFactory(protocols);
+ factory.setDefaultProtocol(defaultFactory.getProtocol());
+ return factory;
+ }
+
+ private DetectorConnectionFactory newDetectorConnectionFactory(ConnectionFactory.Detecting... alternatives) {
+ // Note: Detector connection factory with single alternative will fallback to next protocol in connection factory list
+ return new DetectorConnectionFactory(alternatives);
+ }
+
+ private ProxyConnectionFactory newProxyProtocolConnectionFactory(ConnectionFactory wrappedFactory) {
+ return new ProxyConnectionFactory(wrappedFactory.getProtocol());
+ }
+
private static boolean isSslEffectivelyEnabled(ConnectorConfig config) {
return config.ssl().enabled()
|| (config.implicitTlsEnabled() && TransportSecurityUtils.isTransportSecurityEnabled());
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/FilterResolver.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/FilterResolver.java
index 1e2686aa184..a9639ba4da7 100644
--- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/FilterResolver.java
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/FilterResolver.java
@@ -11,13 +11,13 @@ import com.yahoo.jdisc.http.HttpRequest;
import com.yahoo.jdisc.http.filter.RequestFilter;
import com.yahoo.jdisc.http.filter.ResponseFilter;
import com.yahoo.jdisc.http.servlet.ServletRequest;
+import org.eclipse.jetty.server.Request;
-import javax.servlet.http.HttpServletRequest;
import java.net.URI;
import java.util.Map;
import java.util.Optional;
-import static com.yahoo.jdisc.http.server.jetty.JDiscHttpServlet.getConnector;
+import static com.yahoo.jdisc.http.server.jetty.RequestUtils.getConnector;
/**
* Resolve request/response filter (chain) based on {@link FilterBindings}.
@@ -36,38 +36,38 @@ class FilterResolver {
this.strictFiltering = strictFiltering;
}
- Optional<RequestFilter> resolveRequestFilter(HttpServletRequest servletRequest, URI jdiscUri) {
- Optional<String> maybeFilterId = bindings.resolveRequestFilter(jdiscUri, getConnector(servletRequest).listenPort());
+ Optional<RequestFilter> resolveRequestFilter(Request request, URI jdiscUri) {
+ Optional<String> maybeFilterId = bindings.resolveRequestFilter(jdiscUri, getConnector(request).listenPort());
if (maybeFilterId.isPresent()) {
- metric.add(MetricDefinitions.FILTERING_REQUEST_HANDLED, 1L, createMetricContext(servletRequest, maybeFilterId.get()));
- servletRequest.setAttribute(ServletRequest.JDISC_REQUEST_CHAIN, maybeFilterId.get());
+ metric.add(MetricDefinitions.FILTERING_REQUEST_HANDLED, 1L, createMetricContext(request, maybeFilterId.get()));
+ request.setAttribute(ServletRequest.JDISC_REQUEST_CHAIN, maybeFilterId.get());
} else if (!strictFiltering) {
- metric.add(MetricDefinitions.FILTERING_REQUEST_UNHANDLED, 1L, createMetricContext(servletRequest, null));
+ metric.add(MetricDefinitions.FILTERING_REQUEST_UNHANDLED, 1L, createMetricContext(request, null));
} else {
String syntheticFilterId = RejectingRequestFilter.SYNTHETIC_FILTER_CHAIN_ID;
- metric.add(MetricDefinitions.FILTERING_REQUEST_HANDLED, 1L, createMetricContext(servletRequest, syntheticFilterId));
- servletRequest.setAttribute(ServletRequest.JDISC_REQUEST_CHAIN, syntheticFilterId);
+ metric.add(MetricDefinitions.FILTERING_REQUEST_HANDLED, 1L, createMetricContext(request, syntheticFilterId));
+ request.setAttribute(ServletRequest.JDISC_REQUEST_CHAIN, syntheticFilterId);
return Optional.of(RejectingRequestFilter.INSTANCE);
}
return maybeFilterId.map(bindings::getRequestFilter);
}
- Optional<ResponseFilter> resolveResponseFilter(HttpServletRequest servletRequest, URI jdiscUri) {
- Optional<String> maybeFilterId = bindings.resolveResponseFilter(jdiscUri, getConnector(servletRequest).listenPort());
+ Optional<ResponseFilter> resolveResponseFilter(Request request, URI jdiscUri) {
+ Optional<String> maybeFilterId = bindings.resolveResponseFilter(jdiscUri, getConnector(request).listenPort());
if (maybeFilterId.isPresent()) {
- metric.add(MetricDefinitions.FILTERING_RESPONSE_HANDLED, 1L, createMetricContext(servletRequest, maybeFilterId.get()));
- servletRequest.setAttribute(ServletRequest.JDISC_RESPONSE_CHAIN, maybeFilterId.get());
+ metric.add(MetricDefinitions.FILTERING_RESPONSE_HANDLED, 1L, createMetricContext(request, maybeFilterId.get()));
+ request.setAttribute(ServletRequest.JDISC_RESPONSE_CHAIN, maybeFilterId.get());
} else {
- metric.add(MetricDefinitions.FILTERING_RESPONSE_UNHANDLED, 1L, createMetricContext(servletRequest, null));
+ metric.add(MetricDefinitions.FILTERING_RESPONSE_UNHANDLED, 1L, createMetricContext(request, null));
}
return maybeFilterId.map(bindings::getResponseFilter);
}
- private Metric.Context createMetricContext(HttpServletRequest request, String filterId) {
+ private Metric.Context createMetricContext(Request request, String filterId) {
Map<String, String> extraDimensions = filterId != null
? Map.of(MetricDefinitions.FILTER_CHAIN_ID_DIMENSION, filterId)
: Map.of();
- return JDiscHttpServlet.getConnector(request).createRequestMetricContext(request, extraDimensions);
+ return getConnector(request).createRequestMetricContext(request, extraDimensions);
}
private static class RejectingRequestFilter extends NoopSharedResource implements RequestFilter {
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/FilteringRequestHandler.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/FilteringRequestHandler.java
index de768f979a1..43acbb9b096 100644
--- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/FilteringRequestHandler.java
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/FilteringRequestHandler.java
@@ -2,12 +2,15 @@
package com.yahoo.jdisc.http.server.jetty;
import com.google.common.base.Preconditions;
+import com.yahoo.container.jdisc.RequestHandlerSpec;
+import com.yahoo.container.jdisc.HttpRequestHandler;
import com.yahoo.jdisc.Request;
import com.yahoo.jdisc.Response;
import com.yahoo.jdisc.handler.AbstractRequestHandler;
import com.yahoo.jdisc.handler.BindingNotFoundException;
import com.yahoo.jdisc.handler.CompletionHandler;
import com.yahoo.jdisc.handler.ContentChannel;
+import com.yahoo.jdisc.handler.DelegatedRequestHandler;
import com.yahoo.jdisc.handler.RequestDeniedException;
import com.yahoo.jdisc.handler.RequestHandler;
import com.yahoo.jdisc.handler.ResponseHandler;
@@ -15,9 +18,9 @@ import com.yahoo.jdisc.http.HttpRequest;
import com.yahoo.jdisc.http.filter.RequestFilter;
import com.yahoo.jdisc.http.filter.ResponseFilter;
-import javax.servlet.http.HttpServletRequest;
import java.nio.ByteBuffer;
import java.util.Objects;
+import java.util.Optional;
import java.util.concurrent.atomic.AtomicBoolean;
/**
@@ -42,11 +45,11 @@ class FilteringRequestHandler extends AbstractRequestHandler {
};
private final FilterResolver filterResolver;
- private final HttpServletRequest servletRequest;
+ private final org.eclipse.jetty.server.Request jettyRequest;
- public FilteringRequestHandler(FilterResolver filterResolver, HttpServletRequest servletRequest) {
+ public FilteringRequestHandler(FilterResolver filterResolver, org.eclipse.jetty.server.Request jettyRequest) {
this.filterResolver = filterResolver;
- this.servletRequest = servletRequest;
+ this.jettyRequest = jettyRequest;
}
@Override
@@ -54,9 +57,9 @@ class FilteringRequestHandler extends AbstractRequestHandler {
Preconditions.checkArgument(request instanceof HttpRequest, "Expected HttpRequest, got " + request);
Objects.requireNonNull(originalResponseHandler, "responseHandler");
- RequestFilter requestFilter = filterResolver.resolveRequestFilter(servletRequest, request.getUri())
+ RequestFilter requestFilter = filterResolver.resolveRequestFilter(jettyRequest, request.getUri())
.orElse(null);
- ResponseFilter responseFilter = filterResolver.resolveResponseFilter(servletRequest, request.getUri())
+ ResponseFilter responseFilter = filterResolver.resolveResponseFilter(jettyRequest, request.getUri())
.orElse(null);
// Not using request.connect() here - it adds logic for error handling that we'd rather leave to the framework.
@@ -66,6 +69,9 @@ class FilteringRequestHandler extends AbstractRequestHandler {
throw new BindingNotFoundException(request.getUri());
}
+ getRequestHandlerSpec(resolvedRequestHandler)
+ .ifPresent(requestHandlerSpec -> request.context().put(RequestHandlerSpec.ATTRIBUTE_NAME, requestHandlerSpec));
+
RequestHandler requestHandler = new ReferenceCountingRequestHandler(resolvedRequestHandler);
ResponseHandler responseHandler;
@@ -90,6 +96,18 @@ class FilteringRequestHandler extends AbstractRequestHandler {
return contentChannel;
}
+ private Optional<RequestHandlerSpec> getRequestHandlerSpec(RequestHandler resolvedRequestHandler) {
+ RequestHandler delegate = resolvedRequestHandler;
+ if (delegate instanceof DelegatedRequestHandler) {
+ delegate = ((DelegatedRequestHandler) delegate).getDelegateRecursive();
+ }
+ if(delegate instanceof HttpRequestHandler) {
+ return Optional.ofNullable(((HttpRequestHandler) delegate).requestHandlerSpec());
+ } else {
+ return Optional.empty();
+ }
+ }
+
private static class FilteringResponseHandler implements ResponseHandler {
private final ResponseHandler delegate;
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/FormPostRequestHandler.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/FormPostRequestHandler.java
index 38f84438526..57fb32f89f0 100644
--- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/FormPostRequestHandler.java
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/FormPostRequestHandler.java
@@ -7,6 +7,7 @@ import com.yahoo.jdisc.ResourceReference;
import com.yahoo.jdisc.handler.AbstractRequestHandler;
import com.yahoo.jdisc.handler.CompletionHandler;
import com.yahoo.jdisc.handler.ContentChannel;
+import com.yahoo.jdisc.handler.DelegatedRequestHandler;
import com.yahoo.jdisc.handler.RequestHandler;
import com.yahoo.jdisc.handler.ResponseHandler;
import com.yahoo.jdisc.http.HttpRequest;
@@ -38,7 +39,7 @@ import static com.yahoo.jdisc.http.server.jetty.CompletionHandlerUtils.NOOP_COMP
* @author bakksjo
* $Id$
*/
-class FormPostRequestHandler extends AbstractRequestHandler implements ContentChannel {
+class FormPostRequestHandler extends AbstractRequestHandler implements ContentChannel, DelegatedRequestHandler {
private final ByteArrayOutputStream accumulatedRequestContent = new ByteArrayOutputStream();
private final RequestHandler delegateHandler;
@@ -185,4 +186,9 @@ class FormPostRequestHandler extends AbstractRequestHandler implements ContentCh
}
}
}
+
+ @Override
+ public RequestHandler getDelegate() {
+ return delegateHandler;
+ }
}
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/HealthCheckProxyHandler.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/HealthCheckProxyHandler.java
index 0f7ce77e4cd..8b6192bb455 100644
--- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/HealthCheckProxyHandler.java
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/HealthCheckProxyHandler.java
@@ -40,7 +40,7 @@ import java.util.concurrent.Executors;
import java.util.logging.Level;
import java.util.logging.Logger;
-import static com.yahoo.jdisc.http.server.jetty.HttpServletRequestUtils.getConnectorLocalPort;
+import static com.yahoo.jdisc.http.server.jetty.RequestUtils.getConnectorLocalPort;
/**
* A handler that proxies status.html health checks
@@ -91,7 +91,7 @@ class HealthCheckProxyHandler extends HandlerWrapper {
@Override
public void handle(String target, Request request, HttpServletRequest servletRequest, HttpServletResponse servletResponse) throws IOException, ServletException {
- int localPort = getConnectorLocalPort(servletRequest);
+ int localPort = getConnectorLocalPort(request);
ProxyTarget proxyTarget = portToProxyTargetMapping.get(localPort);
if (proxyTarget != null) {
AsyncContext asyncContext = servletRequest.startAsync();
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/HttpRequestDispatch.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/HttpRequestDispatch.java
index 05715b13d10..7828751df5a 100644
--- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/HttpRequestDispatch.java
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/HttpRequestDispatch.java
@@ -14,7 +14,6 @@ import com.yahoo.jdisc.http.ConnectorConfig;
import com.yahoo.jdisc.http.HttpHeaders;
import com.yahoo.jdisc.http.HttpRequest;
import org.eclipse.jetty.io.EofException;
-import org.eclipse.jetty.server.HttpConnection;
import org.eclipse.jetty.server.Request;
import javax.servlet.AsyncContext;
@@ -34,8 +33,8 @@ import java.util.logging.Level;
import java.util.logging.Logger;
import static com.yahoo.jdisc.http.HttpHeaders.Values.APPLICATION_X_WWW_FORM_URLENCODED;
-import static com.yahoo.jdisc.http.server.jetty.HttpServletRequestUtils.getConnection;
-import static com.yahoo.jdisc.http.server.jetty.JDiscHttpServlet.getConnector;
+import static com.yahoo.jdisc.http.server.jetty.RequestUtils.getConnector;
+import static com.yahoo.jdisc.http.server.jetty.RequestUtils.getHttp1Connection;
import static com.yahoo.yolean.Exceptions.throwUnchecked;
/**
@@ -72,7 +71,7 @@ class HttpRequestDispatch {
jDiscContext.janitor,
metricReporter,
jDiscContext.developerMode());
- markConnectionAsNonPersistentIfThresholdReached(servletRequest);
+ markHttp1ConnectionAsNonPersistentIfThresholdReached(jettyRequest);
this.async = servletRequest.startAsync();
async.setTimeout(0);
metricReporter.uriLength(jettyRequest.getOriginalURI().length());
@@ -139,22 +138,24 @@ class HttpRequestDispatch {
};
}
- private static void markConnectionAsNonPersistentIfThresholdReached(HttpServletRequest request) {
+ private static void markHttp1ConnectionAsNonPersistentIfThresholdReached(Request request) {
ConnectorConfig connectorConfig = getConnector(request).connectorConfig();
int maxRequestsPerConnection = connectorConfig.maxRequestsPerConnection();
if (maxRequestsPerConnection > 0) {
- HttpConnection connection = getConnection(request);
- if (connection.getMessagesIn() >= maxRequestsPerConnection) {
- connection.getGenerator().setPersistent(false);
- }
+ getHttp1Connection(request).ifPresent(connection -> {
+ if (connection.getMessagesIn() >= maxRequestsPerConnection) {
+ connection.getGenerator().setPersistent(false);
+ }
+ });
}
double maxConnectionLifeInSeconds = connectorConfig.maxConnectionLife();
if (maxConnectionLifeInSeconds > 0) {
- HttpConnection connection = getConnection(request);
- Instant expireAt = Instant.ofEpochMilli((long)(connection.getCreatedTimeStamp() + maxConnectionLifeInSeconds * 1000));
- if (Instant.now().isAfter(expireAt)) {
- connection.getGenerator().setPersistent(false);
- }
+ getHttp1Connection(request).ifPresent(connection -> {
+ Instant expireAt = Instant.ofEpochMilli((long) (connection.getCreatedTimeStamp() + maxConnectionLifeInSeconds * 1000));
+ if (Instant.now().isAfter(expireAt)) {
+ connection.getGenerator().setPersistent(false);
+ }
+ });
}
}
@@ -212,7 +213,7 @@ class HttpRequestDispatch {
AccessLogEntry accessLogEntry,
HttpServletRequest servletRequest) {
RequestHandler requestHandler = wrapHandlerIfFormPost(
- new FilteringRequestHandler(context.filterResolver, servletRequest),
+ new FilteringRequestHandler(context.filterResolver, (Request)servletRequest),
servletRequest, context.serverConfig.removeRawPostBodyForWwwUrlEncodedPost());
return new AccessLoggingRequestHandler(requestHandler, accessLogEntry);
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/HttpRequestFactory.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/HttpRequestFactory.java
index e8d37cfadb5..8b223c45827 100644
--- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/HttpRequestFactory.java
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/HttpRequestFactory.java
@@ -4,6 +4,7 @@ package com.yahoo.jdisc.http.server.jetty;
import com.yahoo.jdisc.http.HttpRequest;
import com.yahoo.jdisc.http.servlet.ServletRequest;
import com.yahoo.jdisc.service.CurrentContainer;
+import org.eclipse.jetty.server.Request;
import org.eclipse.jetty.util.Utf8Appendable;
import javax.servlet.http.HttpServletRequest;
@@ -13,8 +14,8 @@ import java.security.cert.X509Certificate;
import java.util.Enumeration;
import static com.yahoo.jdisc.Response.Status.BAD_REQUEST;
-import static com.yahoo.jdisc.http.server.jetty.HttpServletRequestUtils.getConnection;
-import static com.yahoo.jdisc.http.server.jetty.HttpServletRequestUtils.getConnectorLocalPort;
+import static com.yahoo.jdisc.http.server.jetty.RequestUtils.getConnection;
+import static com.yahoo.jdisc.http.server.jetty.RequestUtils.getConnectorLocalPort;
/**
* @author Simon Thoresen Hult
@@ -30,7 +31,7 @@ class HttpRequestFactory {
HttpRequest.Method.valueOf(servletRequest.getMethod()),
HttpRequest.Version.fromString(servletRequest.getProtocol()),
new InetSocketAddress(servletRequest.getRemoteAddr(), servletRequest.getRemotePort()),
- getConnection(servletRequest).getCreatedTimeStamp());
+ getConnection((Request) servletRequest).getCreatedTimeStamp());
httpRequest.context().put(ServletRequest.JDISC_REQUEST_X509CERT, getCertChain(servletRequest));
return httpRequest;
} catch (Utf8Appendable.NotUtf8Exception e) {
@@ -43,7 +44,7 @@ class HttpRequestFactory {
try {
String scheme = servletRequest.getScheme();
String host = servletRequest.getServerName();
- int port = getConnectorLocalPort(servletRequest);
+ int port = getConnectorLocalPort((Request) servletRequest);
String path = servletRequest.getRequestURI();
String query = servletRequest.getQueryString();
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JDiscFilterInvokerFilter.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JDiscFilterInvokerFilter.java
index a89c115a1c2..2904d79ad41 100644
--- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JDiscFilterInvokerFilter.java
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JDiscFilterInvokerFilter.java
@@ -4,6 +4,7 @@ package com.yahoo.jdisc.http.server.jetty;
import com.yahoo.container.logging.AccessLogEntry;
import com.yahoo.jdisc.handler.ResponseHandler;
import com.yahoo.jdisc.http.filter.RequestFilter;
+import org.eclipse.jetty.server.Request;
import javax.servlet.AsyncContext;
import javax.servlet.AsyncListener;
@@ -26,7 +27,7 @@ import java.util.Map;
import java.util.Optional;
import java.util.concurrent.atomic.AtomicReference;
-import static com.yahoo.jdisc.http.server.jetty.JDiscHttpServlet.getConnector;
+import static com.yahoo.jdisc.http.server.jetty.RequestUtils.getConnector;
import static com.yahoo.yolean.Exceptions.throwUnchecked;
/**
@@ -77,7 +78,7 @@ class JDiscFilterInvokerFilter implements Filter {
private void runChainAndResponseFilters(URI uri, HttpServletRequest request, HttpServletResponse response, FilterChain chain) throws IOException, ServletException {
Optional<OneTimeRunnable> responseFilterInvoker =
- jDiscContext.filterResolver.resolveResponseFilter(request, uri)
+ jDiscContext.filterResolver.resolveResponseFilter(toJettyRequest(request), uri)
.map(responseFilter ->
new OneTimeRunnable(() ->
filterInvoker.invokeResponseFilterChain(responseFilter, uri, request, response)));
@@ -107,7 +108,7 @@ class JDiscFilterInvokerFilter implements Filter {
private HttpServletRequest runRequestFilterWithMatchingBinding(AtomicReference<Boolean> responseReturned, URI uri, HttpServletRequest request, HttpServletResponse response) throws IOException {
try {
- RequestFilter requestFilter = jDiscContext.filterResolver.resolveRequestFilter(request, uri).orElse(null);
+ RequestFilter requestFilter = jDiscContext.filterResolver.resolveRequestFilter(toJettyRequest(request), uri).orElse(null);
if (requestFilter == null)
return request;
@@ -134,13 +135,20 @@ class JDiscFilterInvokerFilter implements Filter {
final AccessLogEntry accessLogEntry = null; // Not used in this context.
return new HttpRequestDispatch(jDiscContext,
accessLogEntry,
- getConnector(request).createRequestMetricContext(request, Map.of()),
+ getConnector(toJettyRequest(request)).createRequestMetricContext(request, Map.of()),
request, response);
} catch (IOException e) {
throw throwUnchecked(e);
}
}
+ private static Request toJettyRequest(HttpServletRequest request) {
+ if (request instanceof com.yahoo.jdisc.http.servlet.ServletRequest) {
+ return (Request) ((com.yahoo.jdisc.http.servlet.ServletRequest)request).getRequest();
+ }
+ return (Request) request;
+ }
+
@Override
public void destroy() {}
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JDiscHttpServlet.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JDiscHttpServlet.java
index 41a1ffc2709..7e1445ffa4f 100644
--- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JDiscHttpServlet.java
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JDiscHttpServlet.java
@@ -5,6 +5,7 @@ import com.yahoo.container.logging.AccessLogEntry;
import com.yahoo.jdisc.Metric;
import com.yahoo.jdisc.handler.OverloadException;
import com.yahoo.jdisc.http.HttpRequest.Method;
+import org.eclipse.jetty.server.Request;
import javax.servlet.ServletException;
import javax.servlet.annotation.WebServlet;
@@ -20,7 +21,7 @@ import java.util.logging.Logger;
import java.util.stream.Collectors;
import java.util.stream.Stream;
-import static com.yahoo.jdisc.http.server.jetty.HttpServletRequestUtils.getConnection;
+import static com.yahoo.jdisc.http.server.jetty.RequestUtils.getConnector;
/**
* @author Simon Thoresen Hult
@@ -85,7 +86,7 @@ class JDiscHttpServlet extends HttpServlet {
@Override
protected void service(HttpServletRequest request, HttpServletResponse response)
throws ServletException, IOException {
- request.setAttribute(JDiscServerConnector.REQUEST_ATTRIBUTE, getConnector(request));
+ request.setAttribute(JDiscServerConnector.REQUEST_ATTRIBUTE, getConnector((Request) request));
Metric.Context metricContext = getMetricContext(request);
context.metric.add(MetricDefinitions.NUM_REQUESTS, 1, metricContext);
@@ -103,10 +104,6 @@ class JDiscHttpServlet extends HttpServlet {
}
}
- static JDiscServerConnector getConnector(HttpServletRequest request) {
- return (JDiscServerConnector)getConnection(request).getConnector();
- }
-
private void dispatchHttpRequest(HttpServletRequest request, HttpServletResponse response) throws IOException {
AccessLogEntry accessLogEntry = new AccessLogEntry();
request.setAttribute(ATTRIBUTE_NAME_ACCESS_LOG_ENTRY, accessLogEntry);
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JettyConnectionLogger.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JettyConnectionLogger.java
index cd1ca490f61..e7cdb13425f 100644
--- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JettyConnectionLogger.java
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JettyConnectionLogger.java
@@ -6,6 +6,7 @@ import com.yahoo.container.logging.ConnectionLogEntry;
import com.yahoo.container.logging.ConnectionLogEntry.SslHandshakeFailure.ExceptionEntry;
import com.yahoo.io.HexDump;
import com.yahoo.jdisc.http.ServerConfig;
+import org.eclipse.jetty.http2.server.HTTP2ServerConnection;
import org.eclipse.jetty.io.Connection;
import org.eclipse.jetty.io.EndPoint;
import org.eclipse.jetty.io.SocketChannelEndPoint;
@@ -94,9 +95,18 @@ class JettyConnectionLogger extends AbstractLifeCycle implements Connection.List
info = ConnectionInfo.from(endpoint);
connectionInfo.put(IdentityKey.of(endpoint), info);
}
+ String connectionClassName = connection.getClass().getSimpleName(); // For hidden implementations of Connection
if (connection instanceof SslConnection) {
SSLEngine sslEngine = ((SslConnection) connection).getSSLEngine();
sslToConnectionInfo.put(IdentityKey.of(sslEngine), info);
+ } else if (connection instanceof HttpConnection) {
+ info.setHttpProtocol("HTTP/1.1");
+ } else if (connection instanceof HTTP2ServerConnection) {
+ info.setHttpProtocol("HTTP/2.0");
+ } else if (connectionClassName.endsWith("ProxyProtocolV1Connection")) {
+ info.setProxyProtocolVersion("v1");
+ } else if (connectionClassName.endsWith("ProxyProtocolV2Connection")) {
+ info.setProxyProtocolVersion("v2");
}
if (connection.getEndPoint() instanceof ProxyConnectionFactory.ProxyEndPoint) {
InetSocketAddress remoteAddress = connection.getEndPoint().getRemoteAddress();
@@ -227,6 +237,8 @@ class JettyConnectionLogger extends AbstractLifeCycle implements Connection.List
private Date sslPeerNotAfter;
private List<SNIServerName> sslSniServerNames;
private SSLHandshakeException sslHandshakeException;
+ private String proxyProtocolVersion;
+ private String httpProtocol;
private ConnectionInfo(UUID uuid, long createdAt, InetSocketAddress localAddress, InetSocketAddress peerAddress) {
this.uuid = uuid;
@@ -290,6 +302,10 @@ class JettyConnectionLogger extends AbstractLifeCycle implements Connection.List
return this;
}
+ synchronized ConnectionInfo setHttpProtocol(String protocol) { this.httpProtocol = protocol; return this; }
+
+ synchronized ConnectionInfo setProxyProtocolVersion(String version) { this.proxyProtocolVersion = version; return this; }
+
synchronized ConnectionLogEntry toLogEntry() {
ConnectionLogEntry.Builder builder = ConnectionLogEntry.builder(uuid, Instant.ofEpochMilli(createdAt));
if (closedAt > 0) {
@@ -348,6 +364,12 @@ class JettyConnectionLogger extends AbstractLifeCycle implements Connection.List
.orElse("UNKNOWN");
builder.withSslHandshakeFailure(new ConnectionLogEntry.SslHandshakeFailure(type, exceptionChain));
}
+ if (httpProtocol != null) {
+ builder.withHttpProtocol(httpProtocol);
+ }
+ if (proxyProtocolVersion != null) {
+ builder.withProxyProtocolVersion(proxyProtocolVersion);
+ }
return builder.build();
}
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ReferenceCountingRequestHandler.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ReferenceCountingRequestHandler.java
index f2bf5b56d5c..71cca62ce9c 100644
--- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ReferenceCountingRequestHandler.java
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ReferenceCountingRequestHandler.java
@@ -7,6 +7,7 @@ import com.yahoo.jdisc.Response;
import com.yahoo.jdisc.SharedResource;
import com.yahoo.jdisc.handler.CompletionHandler;
import com.yahoo.jdisc.handler.ContentChannel;
+import com.yahoo.jdisc.handler.DelegatedRequestHandler;
import com.yahoo.jdisc.handler.NullContent;
import com.yahoo.jdisc.handler.RequestHandler;
import com.yahoo.jdisc.handler.ResponseHandler;
@@ -26,7 +27,7 @@ import java.util.logging.Logger;
* @author bakksjo
*/
@SuppressWarnings("try")
-class ReferenceCountingRequestHandler implements RequestHandler {
+class ReferenceCountingRequestHandler implements DelegatedRequestHandler {
private static final Logger log = Logger.getLogger(ReferenceCountingRequestHandler.class.getName());
@@ -79,6 +80,11 @@ class ReferenceCountingRequestHandler implements RequestHandler {
return delegate.toString();
}
+ @Override
+ public RequestHandler getDelegate() {
+ return delegate;
+ }
+
private static class ReferenceCountingResponseHandler implements ResponseHandler {
final SharedResource request;
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/HttpServletRequestUtils.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/RequestUtils.java
index e7b9f459d2e..5fca7a8d778 100644
--- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/HttpServletRequestUtils.java
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/RequestUtils.java
@@ -1,26 +1,39 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.jdisc.http.server.jetty;
+import org.eclipse.jetty.io.Connection;
import org.eclipse.jetty.server.HttpConnection;
+import org.eclipse.jetty.server.Request;
import javax.servlet.http.HttpServletRequest;
+import java.util.Optional;
/**
* @author bjorncs
*/
-public class HttpServletRequestUtils {
- private HttpServletRequestUtils() {}
+public class RequestUtils {
+ private RequestUtils() {}
- public static HttpConnection getConnection(HttpServletRequest request) {
- return (HttpConnection)request.getAttribute("org.eclipse.jetty.server.HttpConnection");
+ public static Connection getConnection(Request request) {
+ return request.getHttpChannel().getConnection();
+ }
+
+ public static Optional<HttpConnection> getHttp1Connection(Request request) {
+ Connection connection = getConnection(request);
+ if (connection instanceof HttpConnection) return Optional.of((HttpConnection) connection);
+ return Optional.empty();
+ }
+
+ public static JDiscServerConnector getConnector(Request request) {
+ return (JDiscServerConnector) request.getHttpChannel().getConnector();
}
/**
* Note: {@link HttpServletRequest#getLocalPort()} may return the local port of the load balancer / reverse proxy if proxy-protocol is enabled.
* @return the actual local port of the underlying Jetty connector
*/
- public static int getConnectorLocalPort(HttpServletRequest request) {
- JDiscServerConnector connector = (JDiscServerConnector) getConnection(request).getConnector();
+ public static int getConnectorLocalPort(Request request) {
+ JDiscServerConnector connector = getConnector(request);
int actualLocalPort = connector.getLocalPort();
int localPortIfConnectorUnopened = -1;
int localPortIfConnectorClosed = -2;
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/SecuredRedirectHandler.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/SecuredRedirectHandler.java
index e32c9d46deb..dad274ae520 100644
--- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/SecuredRedirectHandler.java
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/SecuredRedirectHandler.java
@@ -14,7 +14,7 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import static com.yahoo.jdisc.http.server.jetty.HttpServletRequestUtils.getConnectorLocalPort;
+import static com.yahoo.jdisc.http.server.jetty.RequestUtils.getConnectorLocalPort;
/**
* A secure redirect handler inspired by {@link org.eclipse.jetty.server.handler.SecuredRedirectHandler}.
@@ -33,7 +33,7 @@ class SecuredRedirectHandler extends HandlerWrapper {
@Override
public void handle(String target, Request request, HttpServletRequest servletRequest, HttpServletResponse servletResponse) throws IOException, ServletException {
- int localPort = getConnectorLocalPort(servletRequest);
+ int localPort = getConnectorLocalPort(request);
if (!redirectMap.containsKey(localPort)) {
_handler.handle(target, request, servletRequest, servletResponse);
return;
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/TlsClientAuthenticationEnforcer.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/TlsClientAuthenticationEnforcer.java
index 10a6c4702b5..7299ab4b500 100644
--- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/TlsClientAuthenticationEnforcer.java
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/TlsClientAuthenticationEnforcer.java
@@ -16,7 +16,7 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import static com.yahoo.jdisc.http.server.jetty.HttpServletRequestUtils.getConnectorLocalPort;
+import static com.yahoo.jdisc.http.server.jetty.RequestUtils.getConnectorLocalPort;
/**
* A Jetty handler that enforces TLS client authentication with configurable white list.
@@ -34,7 +34,7 @@ class TlsClientAuthenticationEnforcer extends HandlerWrapper {
@Override
public void handle(String target, Request request, HttpServletRequest servletRequest, HttpServletResponse servletResponse) throws IOException, ServletException {
if (isHttpsRequest(request)
- && !isRequestToWhitelistedBinding(servletRequest)
+ && !isRequestToWhitelistedBinding(request)
&& !isClientAuthenticated(servletRequest)) {
servletResponse.sendError(
Response.Status.UNAUTHORIZED,
@@ -60,14 +60,14 @@ class TlsClientAuthenticationEnforcer extends HandlerWrapper {
return request.getDispatcherType() == DispatcherType.REQUEST && request.getScheme().equalsIgnoreCase("https");
}
- private boolean isRequestToWhitelistedBinding(HttpServletRequest servletRequest) {
- int localPort = getConnectorLocalPort(servletRequest);
+ private boolean isRequestToWhitelistedBinding(Request jettyRequest) {
+ int localPort = getConnectorLocalPort(jettyRequest);
List<String> whiteListedPaths = getWhitelistedPathsForPort(localPort);
if (whiteListedPaths == null) {
return true; // enforcer not enabled
}
// Note: Same path definition as HttpRequestFactory.getUri()
- return whiteListedPaths.contains(servletRequest.getRequestURI());
+ return whiteListedPaths.contains(jettyRequest.getRequestURI());
}
private List<String> getWhitelistedPathsForPort(int localPort) {
diff --git a/container-core/src/test/java/com/yahoo/jdisc/http/guiceModules/ConnectorFactoryRegistryModule.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/testutils/ConnectorFactoryRegistryModule.java
index cc2a00c08c6..9d475309955 100644
--- a/container-core/src/test/java/com/yahoo/jdisc/http/guiceModules/ConnectorFactoryRegistryModule.java
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/testutils/ConnectorFactoryRegistryModule.java
@@ -1,5 +1,5 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.jdisc.http.guiceModules;
+package com.yahoo.jdisc.http.server.jetty.testutils;
import com.google.inject.Binder;
import com.google.inject.Module;
@@ -7,7 +7,6 @@ import com.google.inject.Provides;
import com.yahoo.component.ComponentId;
import com.yahoo.component.provider.ComponentRegistry;
import com.yahoo.jdisc.http.ConnectorConfig;
-import com.yahoo.jdisc.http.ConnectorConfig.Builder;
import com.yahoo.jdisc.http.server.jetty.ConnectorFactory;
import com.yahoo.jdisc.http.ssl.impl.ConfiguredSslContextFactoryProvider;
@@ -19,29 +18,28 @@ import com.yahoo.jdisc.http.ssl.impl.ConfiguredSslContextFactoryProvider;
*/
public class ConnectorFactoryRegistryModule implements Module {
- private final Builder connectorConfigBuilder;
+ private final ConnectorConfig config;
- public ConnectorFactoryRegistryModule(Builder connectorConfigBuilder) {
- this.connectorConfigBuilder = connectorConfigBuilder;
+ public ConnectorFactoryRegistryModule(ConnectorConfig config) {
+ this.config = config;
}
public ConnectorFactoryRegistryModule() {
- this(new Builder());
+ this(new ConnectorConfig(new ConnectorConfig.Builder()));
}
+ @SuppressWarnings("unused")
@Provides
public ComponentRegistry<ConnectorFactory> connectorFactoryComponentRegistry() {
ComponentRegistry<ConnectorFactory> registry = new ComponentRegistry<>();
registry.register(ComponentId.createAnonymousComponentId("connector-factory"),
- new StaticKeyDbConnectorFactory(new ConnectorConfig(connectorConfigBuilder)));
+ new StaticKeyDbConnectorFactory(config));
registry.freeze();
return registry;
}
- @Override
- public void configure(Binder binder) {
- }
+ @Override public void configure(Binder binder) {}
private static class StaticKeyDbConnectorFactory extends ConnectorFactory {
diff --git a/container-core/src/test/java/com/yahoo/jdisc/http/guiceModules/ServletModule.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/testutils/ServletModule.java
index dd6511d1f88..a507255c9b7 100644
--- a/container-core/src/test/java/com/yahoo/jdisc/http/guiceModules/ServletModule.java
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/testutils/ServletModule.java
@@ -1,5 +1,5 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.jdisc.http.guiceModules;
+package com.yahoo.jdisc.http.server.jetty.testutils;
import com.google.inject.Binder;
import com.google.inject.Module;
@@ -12,13 +12,12 @@ import org.eclipse.jetty.servlet.ServletHolder;
* @author Tony Vaagenes
*/
public class ServletModule implements Module {
- @Override
- public void configure(Binder binder) {
- }
+ @SuppressWarnings("unused")
@Provides
public ComponentRegistry<ServletHolder> servletHolderComponentRegistry() {
return new ComponentRegistry<>();
}
+ @Override public void configure(Binder binder) { }
}
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/testutils/TestDriver.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/testutils/TestDriver.java
new file mode 100644
index 00000000000..7f3d54f1d34
--- /dev/null
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/testutils/TestDriver.java
@@ -0,0 +1,122 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.jdisc.http.server.jetty.testutils;
+
+import com.google.inject.AbstractModule;
+import com.google.inject.Module;
+import com.google.inject.util.Modules;
+import com.yahoo.container.logging.ConnectionLog;
+import com.yahoo.container.logging.RequestLog;
+import com.yahoo.jdisc.application.ContainerBuilder;
+import com.yahoo.jdisc.handler.RequestHandler;
+import com.yahoo.jdisc.http.ConnectorConfig;
+import com.yahoo.jdisc.http.ServerConfig;
+import com.yahoo.jdisc.http.ServletPathsConfig;
+import com.yahoo.jdisc.http.server.jetty.FilterBindings;
+import com.yahoo.jdisc.http.server.jetty.JettyHttpServer;
+import com.yahoo.jdisc.http.server.jetty.VoidConnectionLog;
+import com.yahoo.jdisc.http.server.jetty.VoidRequestLog;
+import com.yahoo.security.SslContextBuilder;
+
+import javax.net.ssl.SSLContext;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.SortedMap;
+import java.util.TreeMap;
+
+/**
+ * A {@link com.yahoo.jdisc.test.TestDriver} that is configured with {@link JettyHttpServer}.
+ *
+ * @author bjorncs
+ */
+public class TestDriver implements AutoCloseable {
+
+ private final com.yahoo.jdisc.test.TestDriver jdiscCoreTestDriver;
+ private final JettyHttpServer server;
+ private final SSLContext sslContext;
+
+ private TestDriver(Builder builder) {
+ ServerConfig serverConfig =
+ builder.serverConfig != null ? builder.serverConfig : new ServerConfig(new ServerConfig.Builder());
+ ConnectorConfig connectorConfig =
+ builder.connectorConfig != null ? builder.connectorConfig : new ConnectorConfig(new ConnectorConfig.Builder());
+ Module baseModule = createBaseModule(serverConfig, connectorConfig);
+ Module combinedModule =
+ builder.extraGuiceModules.isEmpty() ? baseModule : Modules.override(baseModule).with(builder.extraGuiceModules);
+ com.yahoo.jdisc.test.TestDriver jdiscCoreTestDriver =
+ com.yahoo.jdisc.test.TestDriver.newSimpleApplicationInstance(combinedModule);
+ ContainerBuilder containerBuilder = jdiscCoreTestDriver.newContainerBuilder();
+ JettyHttpServer server = containerBuilder.getInstance(JettyHttpServer.class);
+ containerBuilder.serverProviders().install(server);
+ builder.handlers.forEach((binding, handler) -> containerBuilder.serverBindings().bind(binding, handler));
+ jdiscCoreTestDriver.activateContainer(containerBuilder);
+ server.start();
+ this.jdiscCoreTestDriver = jdiscCoreTestDriver;
+ this.server = server;
+ this.sslContext = newSslContext(containerBuilder);
+ }
+
+ public static Builder newBuilder() { return new Builder(); }
+
+ public SSLContext sslContext() { return sslContext; }
+ public JettyHttpServer server() { return server; }
+
+ @Override public void close() { shutdown(); }
+
+ public boolean shutdown() {
+ server.close();
+ server.release();
+ return jdiscCoreTestDriver.close();
+ }
+
+ private static SSLContext newSslContext(ContainerBuilder builder) {
+ ConnectorConfig.Ssl sslConfig = builder.getInstance(ConnectorConfig.class).ssl();
+ if (!sslConfig.enabled()) return null;
+
+ return new SslContextBuilder()
+ .withKeyStore(Paths.get(sslConfig.privateKeyFile()), Paths.get(sslConfig.certificateFile()))
+ .withTrustStore(Paths.get(sslConfig.caCertificateFile()))
+ .build();
+ }
+
+ private static Module createBaseModule(ServerConfig serverConfig, ConnectorConfig connectorConfig) {
+ return Modules.combine(
+ new AbstractModule() {
+ @Override
+ protected void configure() {
+ bind(ServletPathsConfig.class).toInstance(new ServletPathsConfig(new ServletPathsConfig.Builder()));
+ bind(ServerConfig.class).toInstance(serverConfig);
+ bind(ConnectorConfig.class).toInstance(connectorConfig);
+ bind(FilterBindings.class).toInstance(new FilterBindings.Builder().build());
+ bind(ConnectionLog.class).toInstance(new VoidConnectionLog());
+ bind(RequestLog.class).toInstance(new VoidRequestLog());
+ }
+ },
+ new ConnectorFactoryRegistryModule(connectorConfig),
+ new ServletModule());
+ }
+
+ public static class Builder {
+ private final SortedMap<String, RequestHandler> handlers = new TreeMap<>();
+ private final List<Module> extraGuiceModules = new ArrayList<>();
+ private ServerConfig serverConfig;
+ private ConnectorConfig connectorConfig;
+
+ private Builder() {}
+
+ public Builder withRequestHandler(String binding, RequestHandler handler) {
+ this.handlers.put(binding, handler); return this;
+ }
+
+ public Builder withRequestHandler(RequestHandler handler) { return withRequestHandler("http://*/*", handler); }
+
+ public Builder withServerConfig(ServerConfig config) { this.serverConfig = config; return this; }
+
+ public Builder withConnectorConfig(ConnectorConfig config) { this.connectorConfig = config; return this; }
+
+ public Builder withGuiceModule(Module module) { this.extraGuiceModules.add(module); return this; }
+
+ public TestDriver build() { return new TestDriver(this); }
+
+ }
+}
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/servlet/ServletRequest.java b/container-core/src/main/java/com/yahoo/jdisc/http/servlet/ServletRequest.java
index c945dc6d8b6..bb78511a17f 100644
--- a/container-core/src/main/java/com/yahoo/jdisc/http/servlet/ServletRequest.java
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/servlet/ServletRequest.java
@@ -6,6 +6,7 @@ import com.yahoo.jdisc.HeaderFields;
import com.yahoo.jdisc.http.Cookie;
import com.yahoo.jdisc.http.HttpHeaders;
import com.yahoo.jdisc.http.HttpRequest;
+import org.eclipse.jetty.server.Request;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletRequestWrapper;
@@ -24,7 +25,7 @@ import java.util.Map;
import java.util.Set;
import java.util.concurrent.TimeUnit;
-import static com.yahoo.jdisc.http.server.jetty.HttpServletRequestUtils.getConnection;
+import static com.yahoo.jdisc.http.server.jetty.RequestUtils.getConnection;
/**
* Mutable wrapper to use a {@link javax.servlet.http.HttpServletRequest}
@@ -68,7 +69,7 @@ public class ServletRequest extends HttpServletRequestWrapper implements Servlet
remoteHostAddress = request.getRemoteAddr();
remoteHostName = request.getRemoteHost();
remotePort = request.getRemotePort();
- connectedAt = getConnection(request).getCreatedTimeStamp();
+ connectedAt = getConnection((Request) request).getCreatedTimeStamp();
headerFields = new HeaderFields();
Enumeration<String> parentHeaders = request.getHeaderNames();
diff --git a/simplemetrics/src/main/java/com/yahoo/metrics/simple/Bucket.java b/container-core/src/main/java/com/yahoo/metrics/simple/Bucket.java
index b75a0529a03..b75a0529a03 100644
--- a/simplemetrics/src/main/java/com/yahoo/metrics/simple/Bucket.java
+++ b/container-core/src/main/java/com/yahoo/metrics/simple/Bucket.java
diff --git a/simplemetrics/src/main/java/com/yahoo/metrics/simple/Counter.java b/container-core/src/main/java/com/yahoo/metrics/simple/Counter.java
index 21cdbd3c219..21cdbd3c219 100644
--- a/simplemetrics/src/main/java/com/yahoo/metrics/simple/Counter.java
+++ b/container-core/src/main/java/com/yahoo/metrics/simple/Counter.java
diff --git a/simplemetrics/src/main/java/com/yahoo/metrics/simple/DimensionCache.java b/container-core/src/main/java/com/yahoo/metrics/simple/DimensionCache.java
index 8893a88d94c..8893a88d94c 100644
--- a/simplemetrics/src/main/java/com/yahoo/metrics/simple/DimensionCache.java
+++ b/container-core/src/main/java/com/yahoo/metrics/simple/DimensionCache.java
diff --git a/simplemetrics/src/main/java/com/yahoo/metrics/simple/Gauge.java b/container-core/src/main/java/com/yahoo/metrics/simple/Gauge.java
index 1edefd0ae5a..1edefd0ae5a 100644
--- a/simplemetrics/src/main/java/com/yahoo/metrics/simple/Gauge.java
+++ b/container-core/src/main/java/com/yahoo/metrics/simple/Gauge.java
diff --git a/simplemetrics/src/main/java/com/yahoo/metrics/simple/Identifier.java b/container-core/src/main/java/com/yahoo/metrics/simple/Identifier.java
index 4d0f470534a..4d0f470534a 100644
--- a/simplemetrics/src/main/java/com/yahoo/metrics/simple/Identifier.java
+++ b/container-core/src/main/java/com/yahoo/metrics/simple/Identifier.java
diff --git a/simplemetrics/src/main/java/com/yahoo/metrics/simple/Measurement.java b/container-core/src/main/java/com/yahoo/metrics/simple/Measurement.java
index 4098ac1bdea..4098ac1bdea 100644
--- a/simplemetrics/src/main/java/com/yahoo/metrics/simple/Measurement.java
+++ b/container-core/src/main/java/com/yahoo/metrics/simple/Measurement.java
diff --git a/simplemetrics/src/main/java/com/yahoo/metrics/simple/MetricAggregator.java b/container-core/src/main/java/com/yahoo/metrics/simple/MetricAggregator.java
index 7168eb49676..7168eb49676 100644
--- a/simplemetrics/src/main/java/com/yahoo/metrics/simple/MetricAggregator.java
+++ b/container-core/src/main/java/com/yahoo/metrics/simple/MetricAggregator.java
diff --git a/simplemetrics/src/main/java/com/yahoo/metrics/simple/MetricManager.java b/container-core/src/main/java/com/yahoo/metrics/simple/MetricManager.java
index 1956783b4c0..1956783b4c0 100644
--- a/simplemetrics/src/main/java/com/yahoo/metrics/simple/MetricManager.java
+++ b/container-core/src/main/java/com/yahoo/metrics/simple/MetricManager.java
diff --git a/simplemetrics/src/main/java/com/yahoo/metrics/simple/MetricReceiver.java b/container-core/src/main/java/com/yahoo/metrics/simple/MetricReceiver.java
index e0e3469e257..e0e3469e257 100644
--- a/simplemetrics/src/main/java/com/yahoo/metrics/simple/MetricReceiver.java
+++ b/container-core/src/main/java/com/yahoo/metrics/simple/MetricReceiver.java
diff --git a/simplemetrics/src/main/java/com/yahoo/metrics/simple/MetricSettings.java b/container-core/src/main/java/com/yahoo/metrics/simple/MetricSettings.java
index 924e311015b..924e311015b 100644
--- a/simplemetrics/src/main/java/com/yahoo/metrics/simple/MetricSettings.java
+++ b/container-core/src/main/java/com/yahoo/metrics/simple/MetricSettings.java
diff --git a/simplemetrics/src/main/java/com/yahoo/metrics/simple/MetricUpdater.java b/container-core/src/main/java/com/yahoo/metrics/simple/MetricUpdater.java
index 848132c9bea..848132c9bea 100644
--- a/simplemetrics/src/main/java/com/yahoo/metrics/simple/MetricUpdater.java
+++ b/container-core/src/main/java/com/yahoo/metrics/simple/MetricUpdater.java
diff --git a/simplemetrics/src/main/java/com/yahoo/metrics/simple/Point.java b/container-core/src/main/java/com/yahoo/metrics/simple/Point.java
index 672d05c1874..672d05c1874 100644
--- a/simplemetrics/src/main/java/com/yahoo/metrics/simple/Point.java
+++ b/container-core/src/main/java/com/yahoo/metrics/simple/Point.java
diff --git a/simplemetrics/src/main/java/com/yahoo/metrics/simple/PointBuilder.java b/container-core/src/main/java/com/yahoo/metrics/simple/PointBuilder.java
index f613aab26a2..f613aab26a2 100644
--- a/simplemetrics/src/main/java/com/yahoo/metrics/simple/PointBuilder.java
+++ b/container-core/src/main/java/com/yahoo/metrics/simple/PointBuilder.java
diff --git a/simplemetrics/src/main/java/com/yahoo/metrics/simple/Sample.java b/container-core/src/main/java/com/yahoo/metrics/simple/Sample.java
index 0d2144deeb4..0d2144deeb4 100644
--- a/simplemetrics/src/main/java/com/yahoo/metrics/simple/Sample.java
+++ b/container-core/src/main/java/com/yahoo/metrics/simple/Sample.java
diff --git a/simplemetrics/src/main/java/com/yahoo/metrics/simple/UnitTestSetup.java b/container-core/src/main/java/com/yahoo/metrics/simple/UnitTestSetup.java
index e6856ee2970..e6856ee2970 100644
--- a/simplemetrics/src/main/java/com/yahoo/metrics/simple/UnitTestSetup.java
+++ b/container-core/src/main/java/com/yahoo/metrics/simple/UnitTestSetup.java
diff --git a/simplemetrics/src/main/java/com/yahoo/metrics/simple/UntypedMetric.java b/container-core/src/main/java/com/yahoo/metrics/simple/UntypedMetric.java
index f757ab15022..f757ab15022 100644
--- a/simplemetrics/src/main/java/com/yahoo/metrics/simple/UntypedMetric.java
+++ b/container-core/src/main/java/com/yahoo/metrics/simple/UntypedMetric.java
diff --git a/simplemetrics/src/main/java/com/yahoo/metrics/simple/Value.java b/container-core/src/main/java/com/yahoo/metrics/simple/Value.java
index fd4113a5e22..fd4113a5e22 100644
--- a/simplemetrics/src/main/java/com/yahoo/metrics/simple/Value.java
+++ b/container-core/src/main/java/com/yahoo/metrics/simple/Value.java
diff --git a/simplemetrics/src/main/java/com/yahoo/metrics/simple/jdisc/JdiscMetricsFactory.java b/container-core/src/main/java/com/yahoo/metrics/simple/jdisc/JdiscMetricsFactory.java
index 30102c43919..30102c43919 100644
--- a/simplemetrics/src/main/java/com/yahoo/metrics/simple/jdisc/JdiscMetricsFactory.java
+++ b/container-core/src/main/java/com/yahoo/metrics/simple/jdisc/JdiscMetricsFactory.java
diff --git a/simplemetrics/src/main/java/com/yahoo/metrics/simple/jdisc/SimpleMetricConsumer.java b/container-core/src/main/java/com/yahoo/metrics/simple/jdisc/SimpleMetricConsumer.java
index ee5f18e78d3..ee5f18e78d3 100644
--- a/simplemetrics/src/main/java/com/yahoo/metrics/simple/jdisc/SimpleMetricConsumer.java
+++ b/container-core/src/main/java/com/yahoo/metrics/simple/jdisc/SimpleMetricConsumer.java
diff --git a/simplemetrics/src/main/java/com/yahoo/metrics/simple/jdisc/SnapshotConverter.java b/container-core/src/main/java/com/yahoo/metrics/simple/jdisc/SnapshotConverter.java
index 495062e38f8..495062e38f8 100644
--- a/simplemetrics/src/main/java/com/yahoo/metrics/simple/jdisc/SnapshotConverter.java
+++ b/container-core/src/main/java/com/yahoo/metrics/simple/jdisc/SnapshotConverter.java
diff --git a/simplemetrics/src/main/java/com/yahoo/metrics/simple/jdisc/package-info.java b/container-core/src/main/java/com/yahoo/metrics/simple/jdisc/package-info.java
index d191a5764c0..d191a5764c0 100644
--- a/simplemetrics/src/main/java/com/yahoo/metrics/simple/jdisc/package-info.java
+++ b/container-core/src/main/java/com/yahoo/metrics/simple/jdisc/package-info.java
diff --git a/simplemetrics/src/main/java/com/yahoo/metrics/simple/package-info.java b/container-core/src/main/java/com/yahoo/metrics/simple/package-info.java
index 9306c7c59db..9306c7c59db 100644
--- a/simplemetrics/src/main/java/com/yahoo/metrics/simple/package-info.java
+++ b/container-core/src/main/java/com/yahoo/metrics/simple/package-info.java
diff --git a/simplemetrics/src/main/java/com/yahoo/metrics/simple/runtime/MetricProperties.java b/container-core/src/main/java/com/yahoo/metrics/simple/runtime/MetricProperties.java
index 9c3ecec10fc..9c3ecec10fc 100644
--- a/simplemetrics/src/main/java/com/yahoo/metrics/simple/runtime/MetricProperties.java
+++ b/container-core/src/main/java/com/yahoo/metrics/simple/runtime/MetricProperties.java
diff --git a/simplemetrics/src/main/java/com/yahoo/metrics/simple/runtime/package-info.java b/container-core/src/main/java/com/yahoo/metrics/simple/runtime/package-info.java
index e7c7cd166eb..e7c7cd166eb 100644
--- a/simplemetrics/src/main/java/com/yahoo/metrics/simple/runtime/package-info.java
+++ b/container-core/src/main/java/com/yahoo/metrics/simple/runtime/package-info.java
diff --git a/container-di/src/main/java/com/yahoo/osgi/provider/model/ComponentModel.java b/container-core/src/main/java/com/yahoo/osgi/provider/model/ComponentModel.java
index 8c501963db3..8c501963db3 100644
--- a/container-di/src/main/java/com/yahoo/osgi/provider/model/ComponentModel.java
+++ b/container-core/src/main/java/com/yahoo/osgi/provider/model/ComponentModel.java
diff --git a/container-di/src/main/java/com/yahoo/osgi/provider/model/package-info.java b/container-core/src/main/java/com/yahoo/osgi/provider/model/package-info.java
index f930f56ae4a..f930f56ae4a 100644
--- a/container-di/src/main/java/com/yahoo/osgi/provider/model/package-info.java
+++ b/container-core/src/main/java/com/yahoo/osgi/provider/model/package-info.java
diff --git a/container-core/src/main/java/com/yahoo/processing/test/ProcessorLibrary.java b/container-core/src/main/java/com/yahoo/processing/test/ProcessorLibrary.java
index 5f6201c6f2d..fc8904bee7f 100644
--- a/container-core/src/main/java/com/yahoo/processing/test/ProcessorLibrary.java
+++ b/container-core/src/main/java/com/yahoo/processing/test/ProcessorLibrary.java
@@ -169,7 +169,6 @@ public class ProcessorLibrary {
List<FutureResponse> futureResponses = new ArrayList<>(chains.size());
for (Chain<? extends Processor> chain : chains) {
-
futureResponses.add(new AsyncExecution(chain, execution).process(request.clone()));
}
AsyncExecution.waitForAll(futureResponses, 1000);
diff --git a/container-core/src/main/java/com/yahoo/restapi/ErrorResponse.java b/container-core/src/main/java/com/yahoo/restapi/ErrorResponse.java
index d3e81a10720..1885a0c970c 100644
--- a/container-core/src/main/java/com/yahoo/restapi/ErrorResponse.java
+++ b/container-core/src/main/java/com/yahoo/restapi/ErrorResponse.java
@@ -5,6 +5,7 @@ import com.yahoo.slime.Cursor;
import com.yahoo.slime.Slime;
import static com.yahoo.jdisc.Response.Status.BAD_REQUEST;
+import static com.yahoo.jdisc.Response.Status.CONFLICT;
import static com.yahoo.jdisc.Response.Status.FORBIDDEN;
import static com.yahoo.jdisc.Response.Status.INTERNAL_SERVER_ERROR;
import static com.yahoo.jdisc.Response.Status.METHOD_NOT_ALLOWED;
@@ -24,7 +25,8 @@ public class ErrorResponse extends SlimeJsonResponse {
FORBIDDEN,
METHOD_NOT_ALLOWED,
INTERNAL_SERVER_ERROR,
- UNAUTHORIZED
+ UNAUTHORIZED,
+ CONFLICT
}
public ErrorResponse(int statusCode, String errorType, String message) {
@@ -63,4 +65,8 @@ public class ErrorResponse extends SlimeJsonResponse {
return new ErrorResponse(METHOD_NOT_ALLOWED, errorCodes.METHOD_NOT_ALLOWED.name(), message);
}
+ public static ErrorResponse conflict(String message) {
+ return new ErrorResponse(CONFLICT, errorCodes.CONFLICT.name(), message);
+ }
+
}
diff --git a/container-core/src/main/java/com/yahoo/restapi/RestApi.java b/container-core/src/main/java/com/yahoo/restapi/RestApi.java
index df05723ac14..6f5bf298de3 100644
--- a/container-core/src/main/java/com/yahoo/restapi/RestApi.java
+++ b/container-core/src/main/java/com/yahoo/restapi/RestApi.java
@@ -22,6 +22,7 @@ public interface RestApi {
static RouteBuilder route(String pathPattern) { return new RestApiImpl.RouteBuilderImpl(pathPattern); }
HttpResponse handleRequest(HttpRequest request);
+ ObjectMapper jacksonJsonMapper();
interface Builder {
Builder setObjectMapper(ObjectMapper mapper);
diff --git a/container-core/src/main/java/com/yahoo/restapi/RestApiException.java b/container-core/src/main/java/com/yahoo/restapi/RestApiException.java
index ac3aa647b87..d9da320499f 100644
--- a/container-core/src/main/java/com/yahoo/restapi/RestApiException.java
+++ b/container-core/src/main/java/com/yahoo/restapi/RestApiException.java
@@ -40,8 +40,11 @@ public class RestApiException extends RuntimeException {
public int statusCode() { return statusCode; }
public HttpResponse response() { return response; }
- public static class NotFoundException extends RestApiException {
- public NotFoundException() { super(ErrorResponse::notFoundError, "Not Found", null); }
+ public static class NotFound extends RestApiException {
+ public NotFound() { this(null, null); }
+ public NotFound(Throwable cause) { this(cause.getMessage(), cause); }
+ public NotFound(String message) { this(message, null); }
+ public NotFound(String message, Throwable cause) { super(ErrorResponse::notFoundError, message, cause); }
}
public static class MethodNotAllowed extends RestApiException {
@@ -52,12 +55,14 @@ public class RestApiException extends RuntimeException {
}
public static class BadRequest extends RestApiException {
- public BadRequest(String message) { super(ErrorResponse::badRequest, message, null); }
+ public BadRequest(String message) { this(message, null); }
+ public BadRequest(Throwable cause) { this(cause.getMessage(), cause); }
public BadRequest(String message, Throwable cause) { super(ErrorResponse::badRequest, message, cause); }
}
public static class InternalServerError extends RestApiException {
- public InternalServerError(String message) { super(ErrorResponse::internalServerError, message, null); }
+ public InternalServerError(String message) { this(message, null); }
+ public InternalServerError(Throwable cause) { this(cause.getMessage(), cause); }
public InternalServerError(String message, Throwable cause) { super(ErrorResponse::internalServerError, message, cause); }
}
@@ -65,4 +70,10 @@ public class RestApiException extends RuntimeException {
public Forbidden(String message) { super(ErrorResponse::forbidden, message, null); }
public Forbidden(String message, Throwable cause) { super(ErrorResponse::forbidden, message, cause); }
}
+
+ public static class Conflict extends RestApiException {
+ public Conflict() { this("Conflict", null); }
+ public Conflict(String message) { this(message, null); }
+ public Conflict(String message, Throwable cause) { super(ErrorResponse::conflict, message, cause); }
+ }
}
diff --git a/container-core/src/main/java/com/yahoo/restapi/RestApiImpl.java b/container-core/src/main/java/com/yahoo/restapi/RestApiImpl.java
index e6c6d7ccb62..8ba94f9aca9 100644
--- a/container-core/src/main/java/com/yahoo/restapi/RestApiImpl.java
+++ b/container-core/src/main/java/com/yahoo/restapi/RestApiImpl.java
@@ -70,6 +70,8 @@ class RestApiImpl implements RestApi {
}
}
+ @Override public ObjectMapper jacksonJsonMapper() { return jacksonJsonMapper; }
+
private HttpResponse dispatchToRoute(Route route, RequestContextImpl context) {
HandlerHolder<?> resolvedHandler = resolveHandler(context, route);
RequestMapperHolder<?> resolvedRequestMapper = resolveRequestMapper(resolvedHandler);
@@ -142,7 +144,7 @@ class RestApiImpl implements RestApi {
private static Route createDefaultRoute() {
RouteBuilder routeBuilder = new RouteBuilderImpl("{*}")
.defaultHandler(context -> {
- throw new RestApiException.NotFoundException();
+ throw new RestApiException.NotFound();
});
return ((RouteBuilderImpl)routeBuilder).build();
}
@@ -347,7 +349,10 @@ class RestApiImpl implements RestApi {
@Override public ObjectMapper jacksonJsonMapper() { return jacksonJsonMapper; }
@Override public UriBuilder uriBuilder() {
URI uri = request.getUri();
- return new UriBuilder(uri.getScheme() + "://" + uri.getHost() + ':' + uri.getPort());
+ int uriPort = uri.getPort();
+ return uriPort != -1
+ ? new UriBuilder(uri.getScheme() + "://" + uri.getHost() + ':' + uriPort)
+ : new UriBuilder(uri.getScheme() + "://" + uri.getHost());
}
private class PathParametersImpl implements RestApi.RequestContext.PathParameters {
diff --git a/container-core/src/main/java/com/yahoo/restapi/RestApiRequestHandler.java b/container-core/src/main/java/com/yahoo/restapi/RestApiRequestHandler.java
index 9fe813903dd..c501ad8c804 100644
--- a/container-core/src/main/java/com/yahoo/restapi/RestApiRequestHandler.java
+++ b/container-core/src/main/java/com/yahoo/restapi/RestApiRequestHandler.java
@@ -4,6 +4,9 @@ package com.yahoo.restapi;
import com.yahoo.container.jdisc.HttpRequest;
import com.yahoo.container.jdisc.HttpResponse;
import com.yahoo.container.jdisc.LoggingRequestHandler;
+import com.yahoo.jdisc.Metric;
+
+import java.util.concurrent.Executor;
/**
* @author bjorncs
@@ -25,12 +28,26 @@ public abstract class RestApiRequestHandler<T extends RestApiRequestHandler<T>>
this.restApi = provider.createRestApi((T)this);
}
+ /**
+ * @see #RestApiRequestHandler(Context, RestApiProvider)
+ */
+ @SuppressWarnings("unchecked")
+ protected RestApiRequestHandler(Executor executor, Metric metric, RestApiProvider<T> provider) {
+ super(executor, metric);
+ this.restApi = provider.createRestApi((T)this);
+ }
+
protected RestApiRequestHandler(LoggingRequestHandler.Context context, RestApi restApi) {
super(context);
this.restApi = restApi;
}
+ protected RestApiRequestHandler(Executor executor, Metric metric, RestApi restApi) {
+ super(executor, metric);
+ this.restApi = restApi;
+ }
+
@Override public final HttpResponse handle(HttpRequest request) { return restApi.handleRequest(request); }
- protected RestApi restApi() { return restApi; }
+ public RestApi restApi() { return restApi; }
}
diff --git a/container-core/src/main/java/com/yahoo/restapi/RestApiTestDriver.java b/container-core/src/main/java/com/yahoo/restapi/RestApiTestDriver.java
new file mode 100644
index 00000000000..7dc5b710bbe
--- /dev/null
+++ b/container-core/src/main/java/com/yahoo/restapi/RestApiTestDriver.java
@@ -0,0 +1,96 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.restapi;
+
+import com.fasterxml.jackson.core.type.TypeReference;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.yahoo.container.jdisc.HttpRequest;
+import com.yahoo.container.jdisc.HttpResponse;
+import com.yahoo.container.jdisc.LoggingRequestHandler;
+import com.yahoo.jdisc.http.server.jetty.testutils.TestDriver;
+import com.yahoo.jdisc.test.MockMetric;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.InputStream;
+import java.util.OptionalInt;
+import java.util.concurrent.Executors;
+
+import static com.yahoo.yolean.Exceptions.uncheck;
+
+/**
+ * Test driver for {@link RestApi}
+ *
+ * @author bjorncs
+ */
+public class RestApiTestDriver implements AutoCloseable {
+
+ private final RestApiRequestHandler<?> handler;
+ private final TestDriver testDriver;
+
+ private RestApiTestDriver(Builder builder) {
+ this.handler = builder.handler;
+ this.testDriver = builder.jdiscHttpServer ? TestDriver.newBuilder().withRequestHandler(builder.handler).build() : null;
+ }
+
+ public static Builder newBuilder(RestApiRequestHandler<?> handler) { return new Builder(handler); }
+
+ @FunctionalInterface public interface RestApiRequestHandlerFactory { RestApiRequestHandler<?> create(LoggingRequestHandler.Context context); }
+ public static Builder newBuilder(RestApiRequestHandlerFactory factory) { return new Builder(factory); }
+
+ public static LoggingRequestHandler.Context createHandlerTestContext() {
+ return new LoggingRequestHandler.Context(Executors.newSingleThreadExecutor(), new MockMetric());
+ }
+
+ public OptionalInt listenPort() {
+ return testDriver != null ? OptionalInt.of(testDriver.server().getListenPort()) : OptionalInt.empty();
+ }
+
+ public RestApiRequestHandler<?> handler() { return handler; }
+ public RestApi restApi() { return handler.restApi(); }
+ public ObjectMapper jacksonJsonMapper() { return handler.restApi().jacksonJsonMapper(); }
+
+ public HttpResponse executeRequest(HttpRequest request) { return handler.handle(request); }
+
+ public InputStream requestContentOf(Object jacksonEntity) {
+ ByteArrayOutputStream out = new ByteArrayOutputStream();
+ uncheck(() -> handler.restApi().jacksonJsonMapper().writeValue(out, jacksonEntity));
+ return new ByteArrayInputStream(out.toByteArray());
+ }
+
+ public <T> T parseJacksonResponseContent(HttpResponse response, TypeReference<T> type) {
+ return uncheck(() -> handler.restApi().jacksonJsonMapper().readValue(responseData(response), type));
+ }
+
+ public <T> T parseJacksonResponseContent(HttpResponse response, Class<T> type) {
+ return uncheck(() -> handler.restApi().jacksonJsonMapper().readValue(responseData(response), type));
+ }
+
+ private static byte[] responseData(HttpResponse response) {
+ ByteArrayOutputStream out = new ByteArrayOutputStream();
+ uncheck(() -> response.render(out));
+ return out.toByteArray();
+ }
+
+ @Override
+ public void close() throws Exception {
+ if (testDriver != null) {
+ testDriver.close();
+ }
+ }
+
+ public static class Builder {
+ private final RestApiRequestHandler<?> handler;
+ private boolean jdiscHttpServer = false;
+
+ private Builder(RestApiRequestHandler<?> handler) {
+ this.handler = handler;
+ }
+
+ private Builder(RestApiRequestHandlerFactory factory) { this(factory.create(createHandlerTestContext())); }
+
+ public Builder withJdiscHttpServer() { this.jdiscHttpServer = true; return this; }
+
+ public RestApiTestDriver build() { return new RestApiTestDriver(this); }
+ }
+
+}
diff --git a/container-di/src/main/resources/configdefinitions/application-bundles.def b/container-core/src/main/resources/configdefinitions/application-bundles.def
index 7e03b1e3ac8..7e03b1e3ac8 100644
--- a/container-di/src/main/resources/configdefinitions/application-bundles.def
+++ b/container-core/src/main/resources/configdefinitions/application-bundles.def
diff --git a/container-di/src/main/resources/configdefinitions/container.components.def b/container-core/src/main/resources/configdefinitions/container.components.def
index f27abc2fa5a..f27abc2fa5a 100644
--- a/container-di/src/main/resources/configdefinitions/container.components.def
+++ b/container-core/src/main/resources/configdefinitions/container.components.def
diff --git a/container-core/src/main/resources/configdefinitions/container.core.access-log.def b/container-core/src/main/resources/configdefinitions/container.core.access-log.def
index 69058b3d8da..e6052b7068c 100644
--- a/container-core/src/main/resources/configdefinitions/container.core.access-log.def
+++ b/container-core/src/main/resources/configdefinitions/container.core.access-log.def
@@ -21,3 +21,6 @@ fileHandler.compressionFormat enum {GZIP, ZSTD} default=GZIP
# Max queue length of file handler
fileHandler.queueSize int default=10000
+
+# Buffer size for the output stream has a default of 256k
+fileHandler.bufferSize int default=262144
diff --git a/container-di/src/main/resources/configdefinitions/container.di.config.jersey-bundles.def b/container-core/src/main/resources/configdefinitions/container.di.config.jersey-bundles.def
index a226420274d..a226420274d 100644
--- a/container-di/src/main/resources/configdefinitions/container.di.config.jersey-bundles.def
+++ b/container-core/src/main/resources/configdefinitions/container.di.config.jersey-bundles.def
diff --git a/container-di/src/main/resources/configdefinitions/container.di.config.jersey-injection.def b/container-core/src/main/resources/configdefinitions/container.di.config.jersey-injection.def
index 9f5be59abbd..9f5be59abbd 100644
--- a/container-di/src/main/resources/configdefinitions/container.di.config.jersey-injection.def
+++ b/container-core/src/main/resources/configdefinitions/container.di.config.jersey-injection.def
diff --git a/container-core/src/main/resources/configdefinitions/container.logging.connection-log.def b/container-core/src/main/resources/configdefinitions/container.logging.connection-log.def
index 65b632c9008..cb2145cd01c 100644
--- a/container-core/src/main/resources/configdefinitions/container.logging.connection-log.def
+++ b/container-core/src/main/resources/configdefinitions/container.logging.connection-log.def
@@ -8,4 +8,7 @@ cluster string
logDirectoryName string default="qrs"
# Max queue length of file handler
-queueSize int default=10000 \ No newline at end of file
+queueSize int default=10000
+
+# Buffer size for the output stream has a default of 256k
+bufferSize int default=262144
diff --git a/container-core/src/main/resources/configdefinitions/jdisc.http.jdisc.http.connector.def b/container-core/src/main/resources/configdefinitions/jdisc.http.jdisc.http.connector.def
index 055e5ad62d2..cb1e366f843 100644
--- a/container-core/src/main/resources/configdefinitions/jdisc.http.jdisc.http.connector.def
+++ b/container-core/src/main/resources/configdefinitions/jdisc.http.jdisc.http.connector.def
@@ -125,3 +125,6 @@ maxRequestsPerConnection int default=0
# Maximum number of seconds a connection can live before it's marked as non-persistent. Set to '0' to disable.
maxConnectionLife double default=0.0
+
+# Enable HTTP/2 (in addition to HTTP/1.1 using ALPN)
+http2Enabled bool default=false
diff --git a/simplemetrics/src/main/resources/configdefinitions/metrics.manager.def b/container-core/src/main/resources/configdefinitions/metrics.manager.def
index 6446e0df8b6..6446e0df8b6 100644
--- a/simplemetrics/src/main/resources/configdefinitions/metrics.manager.def
+++ b/container-core/src/main/resources/configdefinitions/metrics.manager.def
diff --git a/container-di/src/main/resources/configdefinitions/platform-bundles.def b/container-core/src/main/resources/configdefinitions/platform-bundles.def
index a30a846b565..a30a846b565 100644
--- a/container-di/src/main/resources/configdefinitions/platform-bundles.def
+++ b/container-core/src/main/resources/configdefinitions/platform-bundles.def
diff --git a/container-di/src/test/java/com/yahoo/component/ComponentSpecTestCase.java b/container-core/src/test/java/com/yahoo/component/ComponentSpecTestCase.java
index 6fe58e99fda..6fe58e99fda 100644
--- a/container-di/src/test/java/com/yahoo/component/ComponentSpecTestCase.java
+++ b/container-core/src/test/java/com/yahoo/component/ComponentSpecTestCase.java
diff --git a/container-di/src/test/java/com/yahoo/component/provider/test/ComponentRegistryTestCase.java b/container-core/src/test/java/com/yahoo/component/provider/test/ComponentRegistryTestCase.java
index 69eec95b746..69eec95b746 100644
--- a/container-di/src/test/java/com/yahoo/component/provider/test/ComponentRegistryTestCase.java
+++ b/container-core/src/test/java/com/yahoo/component/provider/test/ComponentRegistryTestCase.java
diff --git a/container-di/src/test/java/com/yahoo/component/test/ComponentIdTestCase.java b/container-core/src/test/java/com/yahoo/component/test/ComponentIdTestCase.java
index 0842ee4a797..0842ee4a797 100644
--- a/container-di/src/test/java/com/yahoo/component/test/ComponentIdTestCase.java
+++ b/container-core/src/test/java/com/yahoo/component/test/ComponentIdTestCase.java
diff --git a/container-di/src/test/java/com/yahoo/container/di/ConfigRetrieverTest.java b/container-core/src/test/java/com/yahoo/container/di/ConfigRetrieverTest.java
index 290836d7842..290836d7842 100644
--- a/container-di/src/test/java/com/yahoo/container/di/ConfigRetrieverTest.java
+++ b/container-core/src/test/java/com/yahoo/container/di/ConfigRetrieverTest.java
diff --git a/container-di/src/test/java/com/yahoo/container/di/ContainerTest.java b/container-core/src/test/java/com/yahoo/container/di/ContainerTest.java
index b596246a43d..b596246a43d 100644
--- a/container-di/src/test/java/com/yahoo/container/di/ContainerTest.java
+++ b/container-core/src/test/java/com/yahoo/container/di/ContainerTest.java
diff --git a/container-di/src/test/java/com/yahoo/container/di/ContainerTestBase.java b/container-core/src/test/java/com/yahoo/container/di/ContainerTestBase.java
index 2106a1f3671..2106a1f3671 100644
--- a/container-di/src/test/java/com/yahoo/container/di/ContainerTestBase.java
+++ b/container-core/src/test/java/com/yahoo/container/di/ContainerTestBase.java
diff --git a/container-di/src/test/java/com/yahoo/container/di/DirConfigSource.java b/container-core/src/test/java/com/yahoo/container/di/DirConfigSource.java
index ec937a1a4ef..ec937a1a4ef 100644
--- a/container-di/src/test/java/com/yahoo/container/di/DirConfigSource.java
+++ b/container-core/src/test/java/com/yahoo/container/di/DirConfigSource.java
diff --git a/container-di/src/test/java/com/yahoo/container/di/componentgraph/core/ComponentGraphTest.java b/container-core/src/test/java/com/yahoo/container/di/componentgraph/core/ComponentGraphTest.java
index 70dc4c8665c..70dc4c8665c 100644
--- a/container-di/src/test/java/com/yahoo/container/di/componentgraph/core/ComponentGraphTest.java
+++ b/container-core/src/test/java/com/yahoo/container/di/componentgraph/core/ComponentGraphTest.java
diff --git a/container-di/src/test/java/com/yahoo/container/di/componentgraph/core/FallbackToGuiceInjectorTest.java b/container-core/src/test/java/com/yahoo/container/di/componentgraph/core/FallbackToGuiceInjectorTest.java
index 7c517d67960..7c517d67960 100644
--- a/container-di/src/test/java/com/yahoo/container/di/componentgraph/core/FallbackToGuiceInjectorTest.java
+++ b/container-core/src/test/java/com/yahoo/container/di/componentgraph/core/FallbackToGuiceInjectorTest.java
diff --git a/container-di/src/test/java/com/yahoo/container/di/componentgraph/core/JerseyNodeTest.java b/container-core/src/test/java/com/yahoo/container/di/componentgraph/core/JerseyNodeTest.java
index f30f9260830..f30f9260830 100644
--- a/container-di/src/test/java/com/yahoo/container/di/componentgraph/core/JerseyNodeTest.java
+++ b/container-core/src/test/java/com/yahoo/container/di/componentgraph/core/JerseyNodeTest.java
diff --git a/container-di/src/test/java/com/yahoo/container/di/componentgraph/core/ReuseComponentsTest.java b/container-core/src/test/java/com/yahoo/container/di/componentgraph/core/ReuseComponentsTest.java
index e61e90cd718..e61e90cd718 100644
--- a/container-di/src/test/java/com/yahoo/container/di/componentgraph/core/ReuseComponentsTest.java
+++ b/container-core/src/test/java/com/yahoo/container/di/componentgraph/core/ReuseComponentsTest.java
diff --git a/container-di/src/test/java/com/yahoo/container/di/componentgraph/cycle/CycleFinderTest.java b/container-core/src/test/java/com/yahoo/container/di/componentgraph/cycle/CycleFinderTest.java
index 174ca301c59..174ca301c59 100644
--- a/container-di/src/test/java/com/yahoo/container/di/componentgraph/cycle/CycleFinderTest.java
+++ b/container-core/src/test/java/com/yahoo/container/di/componentgraph/cycle/CycleFinderTest.java
diff --git a/container-di/src/test/java/com/yahoo/container/di/componentgraph/cycle/GraphTest.java b/container-core/src/test/java/com/yahoo/container/di/componentgraph/cycle/GraphTest.java
index 069f72ad8e7..069f72ad8e7 100644
--- a/container-di/src/test/java/com/yahoo/container/di/componentgraph/cycle/GraphTest.java
+++ b/container-core/src/test/java/com/yahoo/container/di/componentgraph/cycle/GraphTest.java
diff --git a/container-core/src/test/java/com/yahoo/container/handler/LogHandlerTest.java b/container-core/src/test/java/com/yahoo/container/handler/LogHandlerTest.java
index afe57579a97..38683c75375 100644
--- a/container-core/src/test/java/com/yahoo/container/handler/LogHandlerTest.java
+++ b/container-core/src/test/java/com/yahoo/container/handler/LogHandlerTest.java
@@ -2,6 +2,7 @@
package com.yahoo.container.handler;
import com.yahoo.container.jdisc.AsyncHttpResponse;
+import com.yahoo.container.jdisc.ContentChannelOutputStream;
import com.yahoo.container.jdisc.HttpRequest;
import com.yahoo.jdisc.handler.ReadableContentChannel;
import com.yahoo.yolean.Exceptions;
@@ -28,7 +29,7 @@ public class LogHandlerTest {
String uri = "http://myhost.com:1111/logs?from=1000&to=2000";
AsyncHttpResponse response = logHandler.handle(HttpRequest.createTestRequest(uri, com.yahoo.jdisc.http.HttpRequest.Method.GET));
ReadableContentChannel out = new ReadableContentChannel();
- new Thread(() -> Exceptions.uncheck(() -> response.render(null, out, null))).start();
+ new Thread(() -> Exceptions.uncheck(() -> response.render(new ContentChannelOutputStream(out), out, null))).start();
String expectedResponse = "newer log";
assertEquals(expectedResponse, new String(out.toStream().readAllBytes(), UTF_8));
}
@@ -37,7 +38,7 @@ public class LogHandlerTest {
String uri = "http://myhost.com:1111/logs?from=0&to=1000";
AsyncHttpResponse response = logHandler.handle(HttpRequest.createTestRequest(uri, com.yahoo.jdisc.http.HttpRequest.Method.GET));
ReadableContentChannel out = new ReadableContentChannel();
- new Thread(() -> Exceptions.uncheck(() -> response.render(null, out, null))).start();
+ new Thread(() -> Exceptions.uncheck(() -> response.render(new ContentChannelOutputStream(out), out, null))).start();
String expectedResponse = "older log";
assertEquals(expectedResponse, new String(out.toStream().readAllBytes(), UTF_8));
}
diff --git a/container-core/src/test/java/com/yahoo/container/jdisc/ThreadedHttpRequestHandlerTest.java b/container-core/src/test/java/com/yahoo/container/jdisc/ThreadedHttpRequestHandlerTest.java
index 07dba21e5b6..3d02f3e08f2 100644
--- a/container-core/src/test/java/com/yahoo/container/jdisc/ThreadedHttpRequestHandlerTest.java
+++ b/container-core/src/test/java/com/yahoo/container/jdisc/ThreadedHttpRequestHandlerTest.java
@@ -21,7 +21,7 @@ public class ThreadedHttpRequestHandlerTest {
ThreadedHttpRequestHandlerThrowingException handler = new ThreadedHttpRequestHandlerThrowingException(metricMock);
RequestHandlerTestDriver driver = new RequestHandlerTestDriver(handler);
- driver.sendRequest("http://localhost/myhandler");
+ driver.sendRequest("http://localhost/myhandler").readAll();
String expectedMetricName = "jdisc.http.handler.unhandled_exceptions";
assertThat(metricMock.addInvocations)
.containsKey(expectedMetricName);
@@ -54,4 +54,4 @@ public class ThreadedHttpRequestHandlerTest {
}
private static class DummyException extends RuntimeException {}
-} \ No newline at end of file
+}
diff --git a/container-core/src/test/java/com/yahoo/container/jdisc/ThreadedRequestHandlerTestCase.java b/container-core/src/test/java/com/yahoo/container/jdisc/ThreadedRequestHandlerTestCase.java
index 331c536a531..cfea0f5c38b 100644
--- a/container-core/src/test/java/com/yahoo/container/jdisc/ThreadedRequestHandlerTestCase.java
+++ b/container-core/src/test/java/com/yahoo/container/jdisc/ThreadedRequestHandlerTestCase.java
@@ -1,22 +1,30 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.container.jdisc;
+import com.yahoo.container.jdisc.ThreadedHttpRequestHandler.MaxPendingContentChannelOutputStream;
import com.yahoo.jdisc.Request;
import com.yahoo.jdisc.Response;
import com.yahoo.jdisc.application.ContainerBuilder;
import com.yahoo.jdisc.handler.*;
import com.yahoo.jdisc.test.TestDriver;
-import org.junit.Ignore;
+import com.yahoo.yolean.Exceptions;
import org.junit.Test;
+import java.io.IOException;
import java.net.URI;
import java.nio.ByteBuffer;
import java.time.Duration;
import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executor;
+import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.Phaser;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicInteger;
import static org.junit.Assert.*;
@@ -353,4 +361,44 @@ public class ThreadedRequestHandlerTestCase {
latch.countDown();
}
}
+
+ @Test
+ public void testMaxPendingOutputStream() throws IOException, ExecutionException, InterruptedException {
+ ReadableContentChannel buffer = new ReadableContentChannel();
+ MaxPendingContentChannelOutputStream limited = new MaxPendingContentChannelOutputStream(buffer, 2);
+
+ ExecutorService executor = Executors.newSingleThreadExecutor();
+
+ limited.send(ByteBuffer.allocate(2));
+ limited.send(ByteBuffer.allocate(1)); // 2 is not > 2, so OK.
+
+ // Next write will block.
+ Future<?> future = executor.submit(() -> Exceptions.uncheck(() -> limited.send(ByteBuffer.allocate(0))));
+ try {
+ future.get(100, TimeUnit.MILLISECONDS);
+ fail("Should not be able to write now");
+ }
+ catch (TimeoutException expected) { }
+
+ // Free buffer capacity, so write completes, then drain buffer.
+ assertEquals(2, buffer.read().capacity());
+ future.get();
+ buffer.close(null);
+ assertEquals(1, buffer.read().capacity());
+ assertEquals(0, buffer.read().capacity());
+ assertNull(buffer.read());
+
+ // Buffer is closed, so further writes fail. This does not count towards pending bytes.
+ try {
+ limited.send(ByteBuffer.allocate(3));
+ fail("Should throw");
+ }
+ catch (IOException expected) { }
+ try {
+ limited.send(ByteBuffer.allocate(3));
+ fail("Should throw");
+ }
+ catch (IOException expected) { }
+ }
+
}
diff --git a/container-core/src/test/java/com/yahoo/container/logging/LogFileHandlerTestCase.java b/container-core/src/test/java/com/yahoo/container/logging/LogFileHandlerTestCase.java
index dad8f5e3f90..d43e3dcebbe 100644
--- a/container-core/src/test/java/com/yahoo/container/logging/LogFileHandlerTestCase.java
+++ b/container-core/src/test/java/com/yahoo/container/logging/LogFileHandlerTestCase.java
@@ -33,6 +33,7 @@ import static org.junit.Assert.assertNotEquals;
* @author bjorncs
*/
public class LogFileHandlerTestCase {
+ private static final int BUFFER_SIZE = 0x10000;
@Rule
public TemporaryFolder temporaryFolder = new TemporaryFolder();
@@ -43,7 +44,7 @@ public class LogFileHandlerTestCase {
String pattern = root.getAbsolutePath() + "/logfilehandlertest.%Y%m%d%H%M%S";
long[] rTimes = {1000, 2000, 10000};
- LogFileHandler<String> h = new LogFileHandler<>(Compression.NONE, pattern, rTimes, null, 2048, "thread-name", new StringLogWriter());
+ LogFileHandler<String> h = new LogFileHandler<>(Compression.NONE, BUFFER_SIZE, pattern, rTimes, null, 2048, "thread-name", new StringLogWriter());
long now = System.currentTimeMillis();
long millisPerDay = 60*60*24*1000;
long tomorrowDays = (now / millisPerDay) +1;
@@ -65,7 +66,7 @@ public class LogFileHandlerTestCase {
File logFile = temporaryFolder.newFile("testLogFileG1.txt");
//create logfilehandler
- LogFileHandler<String> h = new LogFileHandler<>(Compression.NONE, logFile.getAbsolutePath(), "0 5 ...", null, 2048, "thread-name", new StringLogWriter());
+ LogFileHandler<String> h = new LogFileHandler<>(Compression.NONE, BUFFER_SIZE, logFile.getAbsolutePath(), "0 5 ...", null, 2048, "thread-name", new StringLogWriter());
//write log
h.publish("testDeleteFileFirst1");
@@ -78,7 +79,7 @@ public class LogFileHandlerTestCase {
File logFile = temporaryFolder.newFile("testLogFileG2.txt");
//create logfilehandler
- LogFileHandler<String> h = new LogFileHandler<>(Compression.NONE, logFile.getAbsolutePath(), "0 5 ...", null, 2048, "thread-name", new StringLogWriter());
+ LogFileHandler<String> h = new LogFileHandler<>(Compression.NONE, BUFFER_SIZE, logFile.getAbsolutePath(), "0 5 ...", null, 2048, "thread-name", new StringLogWriter());
//write log
h.publish("testDeleteFileDuringLogging1");
@@ -104,7 +105,7 @@ public class LogFileHandlerTestCase {
}
};
LogFileHandler<String> handler = new LogFileHandler<>(
- Compression.NONE, root.getAbsolutePath() + "/logfilehandlertest.%Y%m%d%H%M%S%s", new long[]{0}, "symlink", 2048, "thread-name", new StringLogWriter());
+ Compression.NONE, BUFFER_SIZE, root.getAbsolutePath() + "/logfilehandlertest.%Y%m%d%H%M%S%s", new long[]{0}, "symlink", 2048, "thread-name", new StringLogWriter());
String message = formatter.format(new LogRecord(Level.INFO, "test"));
handler.publishAndWait(message);
@@ -128,7 +129,7 @@ public class LogFileHandlerTestCase {
public void compresses_previous_log_file() throws InterruptedException, IOException {
File root = temporaryFolder.newFolder("compressespreviouslogfile");
LogFileHandler<String> firstHandler = new LogFileHandler<>(
- Compression.ZSTD, root.getAbsolutePath() + "/compressespreviouslogfile.%Y%m%d%H%M%S%s", new long[]{0}, "symlink", 2048, "thread-name", new StringLogWriter());
+ Compression.ZSTD, BUFFER_SIZE, root.getAbsolutePath() + "/compressespreviouslogfile.%Y%m%d%H%M%S%s", new long[]{0}, "symlink", 2048, "thread-name", new StringLogWriter());
firstHandler.publishAndWait("test");
firstHandler.shutdown();
@@ -136,7 +137,7 @@ public class LogFileHandlerTestCase {
assertThat(root.toPath().resolve("symlink").toRealPath().toString()).isEqualTo(firstHandler.getFileName());
LogFileHandler<String> secondHandler = new LogFileHandler<>(
- Compression.ZSTD, root.getAbsolutePath() + "/compressespreviouslogfile.%Y%m%d%H%M%S%s", new long[]{0}, "symlink", 2048, "thread-name", new StringLogWriter());
+ Compression.ZSTD, BUFFER_SIZE, root.getAbsolutePath() + "/compressespreviouslogfile.%Y%m%d%H%M%S%s", new long[]{0}, "symlink", 2048, "thread-name", new StringLogWriter());
secondHandler.publishAndWait("test");
secondHandler.rotateNow();
@@ -174,7 +175,7 @@ public class LogFileHandlerTestCase {
File root = temporaryFolder.newFolder("testcompression" + compression.name());
LogFileHandler<String> h = new LogFileHandler<>(
- compression, root.getAbsolutePath() + "/logfilehandlertest.%Y%m%d%H%M%S%s", new long[]{0}, null, 2048, "thread-name", new StringLogWriter());
+ compression, BUFFER_SIZE, root.getAbsolutePath() + "/logfilehandlertest.%Y%m%d%H%M%S%s", new long[]{0}, null, 2048, "thread-name", new StringLogWriter());
int logEntries = 10000;
for (int i = 0; i < logEntries; i++) {
h.publish("test");
diff --git a/container-core/src/test/java/com/yahoo/jdisc/http/filter/ServletFilterRequestTest.java b/container-core/src/test/java/com/yahoo/jdisc/http/filter/ServletFilterRequestTest.java
index 3052902f174..ed4c9b66068 100644
--- a/container-core/src/test/java/com/yahoo/jdisc/http/filter/ServletFilterRequestTest.java
+++ b/container-core/src/test/java/com/yahoo/jdisc/http/filter/ServletFilterRequestTest.java
@@ -3,12 +3,11 @@ package com.yahoo.jdisc.http.filter;
import com.yahoo.jdisc.http.Cookie;
import com.yahoo.jdisc.http.HttpHeaders;
+import com.yahoo.jdisc.http.server.jetty.JettyMockRequestBuilder;
import com.yahoo.jdisc.http.servlet.ServletRequest;
-import org.eclipse.jetty.server.HttpConnection;
+import org.eclipse.jetty.server.Request;
import org.junit.Before;
import org.junit.Test;
-import org.mockito.Mockito;
-import org.springframework.mock.web.MockHttpServletRequest;
import java.net.URI;
import java.util.Arrays;
@@ -18,7 +17,6 @@ import java.util.List;
import static com.yahoo.jdisc.http.HttpRequest.Version;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
-import static org.mockito.Mockito.when;
/**
* Test the parts of the DiscFilterRequest API that are implemented
@@ -26,7 +24,6 @@ import static org.mockito.Mockito.when;
* {@link com.yahoo.jdisc.http.servlet.ServletRequest}.
*
* @author gjoranv
- * @since 5.27
*/
public class ServletFilterRequestTest {
@@ -54,18 +51,14 @@ public class ServletFilterRequestTest {
parentRequest = ((ServletFilterRequest)filterRequest).getServletRequest();
}
- private ServletRequest newServletRequest() throws Exception {
- MockHttpServletRequest parent = new MockHttpServletRequest("GET", uri.toString());
- parent.setProtocol(Version.HTTP_1_1.toString());
- parent.setRemoteHost(host);
- parent.setRemotePort(port);
- parent.setParameter(paramName, paramValue);
- parent.setParameter(listParamName, listParamValue);
- parent.addHeader(headerName, headerValue);
- parent.setAttribute(attributeName, attributeValue);
- HttpConnection connection = Mockito.mock(HttpConnection.class);
- when(connection.getCreatedTimeStamp()).thenReturn(System.currentTimeMillis());
- parent.setAttribute("org.eclipse.jetty.server.HttpConnection", connection);
+ private ServletRequest newServletRequest() {
+ Request parent = JettyMockRequestBuilder.newBuilder()
+ .remote("1.2.3.4", host, port)
+ .header(headerName, List.of(headerValue))
+ .parameter(paramName, List.of(paramValue))
+ .parameter(listParamName, List.of(listParamValue))
+ .attribute(attributeName, attributeValue)
+ .build();
return new ServletRequest(parent, uri);
}
diff --git a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/AccessLogRequestLogTest.java b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/AccessLogRequestLogTest.java
index e472f954afc..c45d17a4ff8 100644
--- a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/AccessLogRequestLogTest.java
+++ b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/AccessLogRequestLogTest.java
@@ -4,12 +4,7 @@ package com.yahoo.jdisc.http.server.jetty;
import com.yahoo.container.logging.AccessLogEntry;
import com.yahoo.container.logging.RequestLog;
import com.yahoo.container.logging.RequestLogEntry;
-import com.yahoo.jdisc.http.ConnectorConfig;
import com.yahoo.jdisc.http.ServerConfig;
-import org.eclipse.jetty.http.MetaData;
-import org.eclipse.jetty.server.HttpChannel;
-import org.eclipse.jetty.server.HttpConnection;
-import org.eclipse.jetty.server.HttpInput;
import org.eclipse.jetty.server.Request;
import org.eclipse.jetty.server.Response;
import org.junit.Test;
@@ -23,8 +18,6 @@ import static org.hamcrest.CoreMatchers.nullValue;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
/**
* @author Oyvind Bakksjo
@@ -33,9 +26,9 @@ import static org.mockito.Mockito.when;
public class AccessLogRequestLogTest {
@Test
public void requireThatQueryWithUnquotedSpecialCharactersIsHandled() {
- final Request jettyRequest = createRequestMock();
- when(jettyRequest.getRequestURI()).thenReturn("/search/");
- when(jettyRequest.getQueryString()).thenReturn("query=year:>2010");
+ Request jettyRequest = createRequestBuilder()
+ .uri("http", "localhost", 12345, "/search/", "query=year:>2010")
+ .build();
InMemoryRequestLog requestLog = new InMemoryRequestLog();
doAccessLoggingOfRequest(requestLog, jettyRequest);
@@ -47,11 +40,11 @@ public class AccessLogRequestLogTest {
@Test
public void requireThatDoubleQuotingIsNotPerformed() {
- final Request jettyRequest = createRequestMock();
- final String path = "/search/";
- when(jettyRequest.getRequestURI()).thenReturn(path);
- final String query = "query=year%252010+%3B&customParameter=something";
- when(jettyRequest.getQueryString()).thenReturn(query);
+ String path = "/search/";
+ String query = "query=year%252010+%3B&customParameter=something";
+ Request jettyRequest = createRequestBuilder()
+ .uri("http", "localhost", 12345, path, query)
+ .build();
InMemoryRequestLog requestLog = new InMemoryRequestLog();
doAccessLoggingOfRequest(requestLog, jettyRequest);
@@ -64,11 +57,11 @@ public class AccessLogRequestLogTest {
@Test
public void raw_path_and_query_are_set_from_request() {
- Request jettyRequest = createRequestMock();
String rawPath = "//search/";
- when(jettyRequest.getRequestURI()).thenReturn(rawPath);
String rawQuery = "q=%%2";
- when(jettyRequest.getQueryString()).thenReturn(rawQuery);
+ Request jettyRequest = createRequestBuilder()
+ .uri("http", "localhost", 12345, rawPath, rawQuery)
+ .build();
InMemoryRequestLog requestLog = new InMemoryRequestLog();
doAccessLoggingOfRequest(requestLog, jettyRequest);
@@ -81,11 +74,11 @@ public class AccessLogRequestLogTest {
@Test
public void verify_x_forwarded_for_precedence () {
- Request jettyRequest = createRequestMock();
- when(jettyRequest.getRequestURI()).thenReturn("//search/");
- when(jettyRequest.getQueryString()).thenReturn("q=%%2");
- when(jettyRequest.getHeader("x-forwarded-for")).thenReturn("1.2.3.4");
- when(jettyRequest.getHeader("y-ra")).thenReturn("2.3.4.5");
+ Request jettyRequest = createRequestBuilder()
+ .uri("http", "localhost", 12345, "//search/", "q=%%2")
+ .header("x-forwarded-for", List.of("1.2.3.4"))
+ .header("y-ra", List.of("2.3.4.5"))
+ .build();
InMemoryRequestLog requestLog = new InMemoryRequestLog();
doAccessLoggingOfRequest(requestLog, jettyRequest);
@@ -95,11 +88,11 @@ public class AccessLogRequestLogTest {
@Test
public void verify_x_forwarded_port_precedence () {
- Request jettyRequest = createRequestMock();
- when(jettyRequest.getRequestURI()).thenReturn("//search/");
- when(jettyRequest.getQueryString()).thenReturn("q=%%2");
- when(jettyRequest.getHeader("X-Forwarded-Port")).thenReturn("80");
- when(jettyRequest.getHeader("y-rp")).thenReturn("8080");
+ Request jettyRequest = createRequestBuilder()
+ .uri("http", "localhost", 12345, "//search/", "q=%%2")
+ .header("X-Forwarded-Port", List.of("80"))
+ .header("y-rp", List.of("8080"))
+ .build();
InMemoryRequestLog requestLog = new InMemoryRequestLog();
doAccessLoggingOfRequest(requestLog, jettyRequest);
@@ -109,10 +102,12 @@ public class AccessLogRequestLogTest {
@Test
public void defaults_to_peer_port_if_remote_port_header_is_invalid() {
- final Request jettyRequest = createRequestMock();
- when(jettyRequest.getRequestURI()).thenReturn("/search/");
- when(jettyRequest.getHeader("X-Forwarded-Port")).thenReturn("8o8o");
- when(jettyRequest.getRemotePort()).thenReturn(80);
+ Request jettyRequest = createRequestBuilder()
+ .uri("http", "localhost", 12345, "/search/", null)
+ .header("X-Forwarded-Port", List.of("8o8o"))
+ .header("y-rp", List.of("8o8o"))
+ .remote("2.3.4.5", "localhost", 80)
+ .build();
InMemoryRequestLog requestLog = new InMemoryRequestLog();
doAccessLoggingOfRequest(requestLog, jettyRequest);
@@ -129,32 +124,14 @@ public class AccessLogRequestLogTest {
new AccessLogRequestLog(requestLog, config).log(jettyRequest, createResponseMock());
}
- private static Request createRequestMock() {
- JDiscServerConnector serverConnector = mock(JDiscServerConnector.class);
- int localPort = 1234;
- when(serverConnector.connectorConfig()).thenReturn(new ConnectorConfig(new ConnectorConfig.Builder().listenPort(localPort)));
- when(serverConnector.getLocalPort()).thenReturn(localPort);
- HttpConnection httpConnection = mock(HttpConnection.class);
- when(httpConnection.getConnector()).thenReturn(serverConnector);
- Request request = mock(Request.class);
- when(request.getMethod()).thenReturn("GET");
- when(request.getRemoteAddr()).thenReturn("localhost");
- when(request.getRemotePort()).thenReturn(12345);
- when(request.getProtocol()).thenReturn("HTTP/1.1");
- when(request.getScheme()).thenReturn("http");
- when(request.getTimeStamp()).thenReturn(0L);
- when(request.getAttribute(JDiscHttpServlet.ATTRIBUTE_NAME_ACCESS_LOG_ENTRY)).thenReturn(new AccessLogEntry());
- when(request.getAttribute("org.eclipse.jetty.server.HttpConnection")).thenReturn(httpConnection);
- HttpInput httpInput = mock(HttpInput.class);
- when(httpInput.getContentReceived()).thenReturn(2345L);
- when(request.getHttpInput()).thenReturn(httpInput);
- return request;
+ private static JettyMockRequestBuilder createRequestBuilder() {
+ return JettyMockRequestBuilder.newBuilder()
+ .attribute(JDiscHttpServlet.ATTRIBUTE_NAME_ACCESS_LOG_ENTRY, new AccessLogEntry())
+ .remote("2.3.4.5", "localhost", 12345)
+ .localPort(1234);
}
private Response createResponseMock() {
- Response response = mock(Response.class);
- when(response.getHttpChannel()).thenReturn(mock(HttpChannel.class));
- when(response.getCommittedMetaData()).thenReturn(mock(MetaData.Response.class));
- return response;
+ return JettyMockResponseBuilder.newBuilder().build();
}
}
diff --git a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/FilterTestCase.java b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/FilterTestCase.java
index a67656dd5ca..43f722df3c9 100644
--- a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/FilterTestCase.java
+++ b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/FilterTestCase.java
@@ -3,6 +3,8 @@ package com.yahoo.jdisc.http.server.jetty;
import com.google.inject.AbstractModule;
import com.google.inject.util.Modules;
+import com.yahoo.container.jdisc.HttpRequestHandler;
+import com.yahoo.container.jdisc.RequestHandlerSpec;
import com.yahoo.container.logging.ConnectionLog;
import com.yahoo.container.logging.RequestLog;
import com.yahoo.jdisc.AbstractResource;
@@ -12,6 +14,8 @@ import com.yahoo.jdisc.Response;
import com.yahoo.jdisc.handler.AbstractRequestHandler;
import com.yahoo.jdisc.handler.CompletionHandler;
import com.yahoo.jdisc.handler.ContentChannel;
+import com.yahoo.jdisc.handler.DelegatedRequestHandler;
+import com.yahoo.jdisc.handler.RequestHandler;
import com.yahoo.jdisc.handler.ResponseDispatch;
import com.yahoo.jdisc.handler.ResponseHandler;
import com.yahoo.jdisc.http.ConnectorConfig;
@@ -24,7 +28,8 @@ import com.yahoo.jdisc.http.filter.ResponseFilter;
import com.yahoo.jdisc.http.filter.ResponseHeaderFilter;
import com.yahoo.jdisc.http.filter.chain.RequestFilterChain;
import com.yahoo.jdisc.http.filter.chain.ResponseFilterChain;
-import com.yahoo.jdisc.http.guiceModules.ConnectorFactoryRegistryModule;
+import com.yahoo.jdisc.http.server.jetty.testutils.ConnectorFactoryRegistryModule;
+import org.assertj.core.api.Assertions;
import org.junit.Test;
import org.mockito.ArgumentCaptor;
@@ -61,7 +66,7 @@ public class FilterTestCase {
.addRequestFilterBinding("my-request-filter", "http://*/filtered/*")
.build();
final MyRequestHandler requestHandler = new MyRequestHandler();
- final TestDriver testDriver = newDriver(requestHandler, filterBindings);
+ final JettyTestDriver testDriver = newDriver(requestHandler, filterBindings);
testDriver.client().get("/status.html");
@@ -79,7 +84,7 @@ public class FilterTestCase {
.addRequestFilterBinding("my-request-filter", "http://*/filtered/*")
.build();
final MyRequestHandler requestHandler = new MyRequestHandler();
- final TestDriver testDriver = newDriver(requestHandler, filterBindings);
+ final JettyTestDriver testDriver = newDriver(requestHandler, filterBindings);
testDriver.client().get("/filtered/status.html");
@@ -97,9 +102,9 @@ public class FilterTestCase {
.addRequestFilterBinding("my-request-filter", "http://*/*")
.build();
final MyRequestHandler requestHandler = new MyRequestHandler();
- final TestDriver testDriver = newDriver(requestHandler, filterBindings);
+ final JettyTestDriver testDriver = newDriver(requestHandler, filterBindings);
- testDriver.client().get("status.html");
+ testDriver.client().get("/status.html");
assertThat(requestHandler.awaitInvocation(), is(true));
assertThat(requestHandler.getHeaderMap().get("foo").get(0), is("bar"));
@@ -114,7 +119,7 @@ public class FilterTestCase {
.addRequestFilterBinding("my-request-filter", "http://*/*")
.build();
final MyRequestHandler requestHandler = new MyRequestHandler();
- final TestDriver testDriver = newDriver(requestHandler, filterBindings);
+ final JettyTestDriver testDriver = newDriver(requestHandler, filterBindings);
testDriver.client().get("/status.html").expectStatusCode(is(Response.Status.FORBIDDEN));
@@ -132,7 +137,7 @@ public class FilterTestCase {
.addRequestFilterBinding("my-request-filter", "http://*/*")
.build();
final MyRequestHandler requestHandler = new MyRequestHandler();
- final TestDriver testDriver = newDriver(requestHandler, filterBindings);
+ final JettyTestDriver testDriver = newDriver(requestHandler, filterBindings);
testDriver.client().get("/status.html")
.expectStatusCode(is(responseStatus))
@@ -150,7 +155,7 @@ public class FilterTestCase {
.addRequestFilterBinding("my-request-filter", "http://*/*")
.build();
final MyRequestHandler requestHandler = new MyRequestHandler();
- final TestDriver testDriver = newDriver(requestHandler, filterBindings);
+ final JettyTestDriver testDriver = newDriver(requestHandler, filterBindings);
testDriver.client().get("/status.html").expectStatusCode(is(Response.Status.INTERNAL_SERVER_ERROR));
@@ -167,7 +172,7 @@ public class FilterTestCase {
.addResponseFilterBinding("my-response-filter", "http://*/filtered/*")
.build();
final MyRequestHandler requestHandler = new MyRequestHandler();
- final TestDriver testDriver = newDriver(requestHandler, filterBindings);
+ final JettyTestDriver testDriver = newDriver(requestHandler, filterBindings);
testDriver.client().get("/status.html");
@@ -185,7 +190,7 @@ public class FilterTestCase {
.addResponseFilterBinding("my-response-filter", "http://*/filtered/*")
.build();
final MyRequestHandler requestHandler = new MyRequestHandler();
- final TestDriver testDriver = newDriver(requestHandler, filterBindings);
+ final JettyTestDriver testDriver = newDriver(requestHandler, filterBindings);
testDriver.client().get("/filtered/status.html");
@@ -202,7 +207,7 @@ public class FilterTestCase {
.addResponseFilterBinding("my-response-filter", "http://*/*")
.build();
final MyRequestHandler requestHandler = new MyRequestHandler();
- final TestDriver testDriver = newDriver(requestHandler, filterBindings);
+ final JettyTestDriver testDriver = newDriver(requestHandler, filterBindings);
testDriver.client().get("/status.html")
.expectHeader("foo", is("bar"));
@@ -219,7 +224,7 @@ public class FilterTestCase {
.addResponseFilterBinding("my-response-filter", "http://*/*")
.build();
final MyRequestHandler requestHandler = new MyRequestHandler();
- final TestDriver testDriver = newDriver(requestHandler, filterBindings);
+ final JettyTestDriver testDriver = newDriver(requestHandler, filterBindings);
testDriver.client().get("/status.html").expectStatusCode(is(Response.Status.INTERNAL_SERVER_ERROR));
@@ -240,7 +245,7 @@ public class FilterTestCase {
.addResponseFilterBinding("my-response-filter", uriPattern)
.build();
final MyRequestHandler requestHandler = new MyRequestHandler();
- final TestDriver testDriver = newDriver(requestHandler, filterBindings);
+ final JettyTestDriver testDriver = newDriver(requestHandler, filterBindings);
testDriver.client().get("/status.html");
@@ -260,7 +265,7 @@ public class FilterTestCase {
.addResponseFilterBinding("my-response-filter", "http://*/*")
.build();
final MyRequestHandler requestHandler = new MyRequestHandler();
- final TestDriver testDriver = newDriver(requestHandler, filterBindings);
+ final JettyTestDriver testDriver = newDriver(requestHandler, filterBindings);
testDriver.client().get("/status.html")
.expectStatusCode(is(Response.Status.FORBIDDEN))
@@ -410,7 +415,7 @@ public class FilterTestCase {
.setRequestFilterDefaultForPort(defaultFilterId, 0)
.build();
MyRequestHandler requestHandler = new MyRequestHandler();
- TestDriver testDriver = newDriver(requestHandler, filterBindings);
+ JettyTestDriver testDriver = newDriver(requestHandler, filterBindings);
testDriver.client().get("/status.html");
@@ -433,7 +438,7 @@ public class FilterTestCase {
.setResponseFilterDefaultForPort(defaultFilterId, 0)
.build();
MyRequestHandler requestHandler = new MyRequestHandler();
- TestDriver testDriver = newDriver(requestHandler, filterBindings);
+ JettyTestDriver testDriver = newDriver(requestHandler, filterBindings);
testDriver.client().get("/status.html");
@@ -456,7 +461,7 @@ public class FilterTestCase {
.setRequestFilterDefaultForPort(defaultFilterId, 0)
.build();
MyRequestHandler requestHandler = new MyRequestHandler();
- TestDriver testDriver = newDriver(requestHandler, filterBindings);
+ JettyTestDriver testDriver = newDriver(requestHandler, filterBindings);
testDriver.client().get("/filtered/status.html");
@@ -479,7 +484,7 @@ public class FilterTestCase {
.setResponseFilterDefaultForPort(defaultFilterId, 0)
.build();
MyRequestHandler requestHandler = new MyRequestHandler();
- TestDriver testDriver = newDriver(requestHandler, filterBindings);
+ JettyTestDriver testDriver = newDriver(requestHandler, filterBindings);
testDriver.client().get("/filtered/status.html");
@@ -498,7 +503,7 @@ public class FilterTestCase {
.build();
MetricConsumerMock metricConsumerMock = new MetricConsumerMock();
MyRequestHandler requestHandler = new MyRequestHandler();
- TestDriver testDriver = newDriver(requestHandler, filterBindings, metricConsumerMock, false);
+ JettyTestDriver testDriver = newDriver(requestHandler, filterBindings, metricConsumerMock, false);
testDriver.client().get("/status.html");
assertThat(requestHandler.awaitInvocation(), is(true));
@@ -521,7 +526,7 @@ public class FilterTestCase {
.addRequestFilterBinding("my-request-filter", "http://*/filtered/*")
.build();
MyRequestHandler requestHandler = new MyRequestHandler();
- TestDriver testDriver = newDriver(requestHandler, filterBindings, new MetricConsumerMock(), true);
+ JettyTestDriver testDriver = newDriver(requestHandler, filterBindings, new MetricConsumerMock(), true);
testDriver.client().get("/unfiltered/")
.expectStatusCode(is(Response.Status.FORBIDDEN))
@@ -530,17 +535,46 @@ public class FilterTestCase {
assertThat(testDriver.close(), is(true));
}
- private static TestDriver newDriver(MyRequestHandler requestHandler, FilterBindings filterBindings) {
+ @Test
+ public void requireThatRequestHandlerSpecIsAvailableThroughDelegate() throws IOException, InterruptedException {
+ MyRequestHandler requestHandler = new MyHttpRequestHandler();
+ MyDelegatedHandler delegateHandler1 = new MyDelegatedHandler(requestHandler);
+ MyDelegatedHandler delegateHandler2 = new MyDelegatedHandler(delegateHandler1);
+ requestHandlerSpecTest(delegateHandler2);
+ }
+
+ @Test
+ public void requireThatRequestHandlerSpecIsAvailable() throws IOException, InterruptedException {
+ MyRequestHandler requestHandler = new MyHttpRequestHandler();
+ requestHandlerSpecTest(requestHandler);
+ }
+
+ private void requestHandlerSpecTest(MyRequestHandler requestHandler) throws IOException, InterruptedException {
+ RequestFilter filter = mock(RequestFilter.class);
+ FilterBindings filterBindings = new FilterBindings.Builder()
+ .addRequestFilter("my-request-filter", filter)
+ .addRequestFilterBinding("my-request-filter", "http://*/filtered/*")
+ .build();
+
+ JettyTestDriver testDriver = newDriver(requestHandler, filterBindings, new MetricConsumerMock(), true);
+
+ testDriver.client().get("/filtered/")
+ .expectStatusCode(is(Response.Status.OK));
+ ArgumentCaptor<HttpRequest> requestArgumentCaptor = ArgumentCaptor.forClass(HttpRequest.class);
+ verify(filter).filter(requestArgumentCaptor.capture(), any(ResponseHandler.class));
+ Assertions.assertThat(requestArgumentCaptor.getValue().context()).containsKey(RequestHandlerSpec.ATTRIBUTE_NAME);
+ }
+
+ private static JettyTestDriver newDriver(MyRequestHandler requestHandler, FilterBindings filterBindings) {
return newDriver(requestHandler, filterBindings, new MetricConsumerMock(), false);
}
- private static TestDriver newDriver(
+ private static JettyTestDriver newDriver(
MyRequestHandler requestHandler,
FilterBindings filterBindings,
MetricConsumerMock metricConsumer,
boolean strictFiltering) {
- return TestDriver.newInstance(
- JettyHttpServer.class,
+ return JettyTestDriver.newInstance(
requestHandler,
newFilterModule(filterBindings, metricConsumer, strictFiltering));
}
@@ -664,4 +698,33 @@ public class FilterTestCase {
channel.close(null);
}
}
+
+ private static class MyDelegatedHandler extends MyRequestHandler implements DelegatedRequestHandler {
+
+ private final RequestHandler delegate;
+
+ public MyDelegatedHandler(RequestHandler delegate) {
+ this.delegate = delegate;
+ }
+
+ @Override
+ public RequestHandler getDelegate() {
+ return delegate;
+ }
+ @Override
+ public ContentChannel handleRequest(Request request, ResponseHandler handler) {
+ return delegate.handleRequest(request, handler);
+ }
+ @Override
+ public void handleTimeout(Request request, ResponseHandler handler) {
+ delegate.handleTimeout(request, handler);
+ }
+ }
+
+ private static class MyHttpRequestHandler extends MyRequestHandler implements HttpRequestHandler {
+ @Override
+ public RequestHandlerSpec requestHandlerSpec() {
+ return RequestHandlerSpec.DEFAULT_INSTANCE;
+ }
+ }
}
diff --git a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpRequestFactoryTest.java b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpRequestFactoryTest.java
index 9c1348004ee..fbbf3074839 100644
--- a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpRequestFactoryTest.java
+++ b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpRequestFactoryTest.java
@@ -7,10 +7,8 @@ import com.yahoo.jdisc.References;
import com.yahoo.jdisc.ResourceReference;
import com.yahoo.jdisc.Response;
import com.yahoo.jdisc.handler.RequestHandler;
-import com.yahoo.jdisc.http.ConnectorConfig;
import com.yahoo.jdisc.http.HttpRequest;
import com.yahoo.jdisc.service.CurrentContainer;
-import org.eclipse.jetty.server.HttpConnection;
import org.junit.Test;
import javax.servlet.http.HttpServletRequest;
@@ -22,8 +20,6 @@ import static org.hamcrest.MatcherAssert.assertThat;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.fail;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
/**
* @author Steinar Knutsen
@@ -141,27 +137,15 @@ public class HttpRequestFactoryTest {
assertEquals(LOCAL_PORT, request.getUri().getPort());
}
- private static HttpServletRequest createMockRequest(String scheme, String serverName, String path, String queryString) {
- HttpServletRequest request = mock(HttpServletRequest.class);
- HttpConnection connection = mock(HttpConnection.class);
- JDiscServerConnector connector = mock(JDiscServerConnector.class);
- when(connector.connectorConfig()).thenReturn(new ConnectorConfig(new ConnectorConfig.Builder().listenPort(LOCAL_PORT)));
- when(connector.getLocalPort()).thenReturn(LOCAL_PORT);
- when(connection.getCreatedTimeStamp()).thenReturn(System.currentTimeMillis());
- when(connection.getConnector()).thenReturn(connector);
- when(request.getAttribute("org.eclipse.jetty.server.HttpConnection")).thenReturn(connection);
- when(request.getProtocol()).thenReturn("HTTP/1.1");
- when(request.getScheme()).thenReturn(scheme);
- when(request.getServerName()).thenReturn(serverName);
- when(request.getRemoteAddr()).thenReturn("127.0.0.1");
- when(request.getRemotePort()).thenReturn(1234);
- when(request.getLocalPort()).thenReturn(LOCAL_PORT);
- when(request.getMethod()).thenReturn("GET");
- when(request.getQueryString()).thenReturn(queryString);
- when(request.getRequestURI()).thenReturn(path);
- return request;
+ private HttpServletRequest createMockRequest(String scheme, String host, String path, String query) {
+ return JettyMockRequestBuilder.newBuilder()
+ .uri(scheme, host, LOCAL_PORT, path, query)
+ .remote("127.0.0.1", "localhost", 1234)
+ .localPort(LOCAL_PORT)
+ .build();
}
+
private static final class MockContainer implements CurrentContainer {
@Override
diff --git a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerConformanceTest.java b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerConformanceTest.java
index 5659dfc2d3c..825e3eba110 100644
--- a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerConformanceTest.java
+++ b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerConformanceTest.java
@@ -8,7 +8,7 @@ import com.yahoo.container.logging.ConnectionLog;
import com.yahoo.container.logging.RequestLog;
import com.yahoo.jdisc.http.ServerConfig;
import com.yahoo.jdisc.http.ServletPathsConfig;
-import com.yahoo.jdisc.http.guiceModules.ConnectorFactoryRegistryModule;
+import com.yahoo.jdisc.http.server.jetty.testutils.ConnectorFactoryRegistryModule;
import com.yahoo.jdisc.test.ServerProviderConformanceTest;
import org.apache.http.HttpResponse;
import org.apache.http.HttpVersion;
diff --git a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerTest.java b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerTest.java
index d8e94d13813..a5804dc9b86 100644
--- a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerTest.java
+++ b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerTest.java
@@ -26,7 +26,7 @@ import com.yahoo.jdisc.http.Cookie;
import com.yahoo.jdisc.http.HttpRequest;
import com.yahoo.jdisc.http.HttpResponse;
import com.yahoo.jdisc.http.ServerConfig;
-import com.yahoo.jdisc.http.server.jetty.TestDrivers.TlsClientAuth;
+import com.yahoo.jdisc.http.server.jetty.JettyTestDriver.TlsClientAuth;
import com.yahoo.jdisc.service.BindingSetNotFoundException;
import com.yahoo.security.KeyUtils;
import com.yahoo.security.Pkcs10Csr;
@@ -35,10 +35,19 @@ import com.yahoo.security.SslContextBuilder;
import com.yahoo.security.X509CertificateBuilder;
import com.yahoo.security.X509CertificateUtils;
import com.yahoo.security.tls.TlsContext;
-import org.apache.http.conn.ssl.NoopHostnameVerifier;
-import org.apache.http.entity.ContentType;
-import org.apache.http.entity.mime.FormBodyPart;
-import org.apache.http.entity.mime.content.StringBody;
+import org.apache.hc.client5.http.async.methods.SimpleHttpRequests;
+import org.apache.hc.client5.http.async.methods.SimpleHttpResponse;
+import org.apache.hc.client5.http.entity.mime.FormBodyPart;
+import org.apache.hc.client5.http.entity.mime.FormBodyPartBuilder;
+import org.apache.hc.client5.http.entity.mime.StringBody;
+import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient;
+import org.apache.hc.client5.http.impl.async.HttpAsyncClientBuilder;
+import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManagerBuilder;
+import org.apache.hc.client5.http.ssl.ClientTlsStrategyBuilder;
+import org.apache.hc.client5.http.ssl.NoopHostnameVerifier;
+import org.apache.hc.core5.http.ContentType;
+import org.apache.hc.core5.http.nio.ssl.TlsStrategy;
+import org.apache.hc.core5.http2.HttpVersionPolicy;
import org.assertj.core.api.Assertions;
import org.eclipse.jetty.client.HttpClient;
import org.eclipse.jetty.client.ProxyProtocolClientConnectionFactory.V1;
@@ -107,6 +116,7 @@ import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.anyOf;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.junit.Assume.assumeTrue;
@@ -129,16 +139,16 @@ public class HttpServerTest {
@Test
public void requireThatServerCanListenToRandomPort() throws Exception {
- final TestDriver driver = TestDrivers.newInstance(mockRequestHandler());
+ final JettyTestDriver driver = JettyTestDriver.newInstance(mockRequestHandler());
assertNotEquals(0, driver.server().getListenPort());
assertTrue(driver.close());
}
@Test
public void requireThatServerCanNotListenToBoundPort() throws Exception {
- final TestDriver driver = TestDrivers.newInstance(mockRequestHandler());
+ final JettyTestDriver driver = JettyTestDriver.newInstance(mockRequestHandler());
try {
- TestDrivers.newConfiguredInstance(
+ JettyTestDriver.newConfiguredInstance(
mockRequestHandler(),
new ServerConfig.Builder(),
new ConnectorConfig.Builder()
@@ -152,7 +162,7 @@ public class HttpServerTest {
@Test
public void requireThatBindingSetNotFoundReturns404() throws Exception {
- final TestDriver driver = TestDrivers.newConfiguredInstance(
+ final JettyTestDriver driver = JettyTestDriver.newConfiguredInstance(
mockRequestHandler(),
new ServerConfig.Builder()
.developerMode(true),
@@ -169,7 +179,7 @@ public class HttpServerTest {
@Test
public void requireThatTooLongInitLineReturns414() throws Exception {
- final TestDriver driver = TestDrivers.newConfiguredInstance(
+ final JettyTestDriver driver = JettyTestDriver.newConfiguredInstance(
mockRequestHandler(),
new ServerConfig.Builder(),
new ConnectorConfig.Builder()
@@ -182,7 +192,7 @@ public class HttpServerTest {
@Test
public void requireThatAccessLogIsCalledForRequestRejectedByJetty() throws Exception {
BlockingQueueRequestLog requestLogMock = new BlockingQueueRequestLog();
- final TestDriver driver = TestDrivers.newConfiguredInstance(
+ final JettyTestDriver driver = JettyTestDriver.newConfiguredInstance(
mockRequestHandler(),
new ServerConfig.Builder(),
new ConnectorConfig.Builder().requestHeaderSize(1),
@@ -196,7 +206,7 @@ public class HttpServerTest {
@Test
public void requireThatServerCanEcho() throws Exception {
- final TestDriver driver = TestDrivers.newInstance(new EchoRequestHandler());
+ final JettyTestDriver driver = JettyTestDriver.newInstance(new EchoRequestHandler());
driver.client().get("/status.html")
.expectStatusCode(is(OK));
assertTrue(driver.close());
@@ -204,7 +214,7 @@ public class HttpServerTest {
@Test
public void requireThatServerCanEchoCompressed() throws Exception {
- final TestDriver driver = TestDrivers.newInstance(new EchoRequestHandler());
+ final JettyTestDriver driver = JettyTestDriver.newInstance(new EchoRequestHandler());
SimpleHttpClient client = driver.newClient(true);
client.get("/status.html")
.expectStatusCode(is(OK));
@@ -213,7 +223,7 @@ public class HttpServerTest {
@Test
public void requireThatServerCanHandleMultipleRequests() throws Exception {
- final TestDriver driver = TestDrivers.newInstance(new EchoRequestHandler());
+ final JettyTestDriver driver = JettyTestDriver.newInstance(new EchoRequestHandler());
driver.client().get("/status.html")
.expectStatusCode(is(OK));
driver.client().get("/status.html")
@@ -223,7 +233,7 @@ public class HttpServerTest {
@Test
public void requireThatFormPostWorks() throws Exception {
- final TestDriver driver = TestDrivers.newInstance(new ParameterPrinterRequestHandler());
+ final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final String requestContent = generateContent('a', 30);
final ResponseValidator response =
driver.client().newPost("/status.html")
@@ -237,7 +247,7 @@ public class HttpServerTest {
@Test
public void requireThatFormPostDoesNotRemoveContentByDefault() throws Exception {
- final TestDriver driver = TestDrivers.newInstance(new ParameterPrinterRequestHandler());
+ final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
@@ -250,7 +260,7 @@ public class HttpServerTest {
@Test
public void requireThatFormPostKeepsContentWhenConfiguredTo() throws Exception {
- final TestDriver driver = newDriverWithFormPostContentRemoved(new ParameterPrinterRequestHandler(), false);
+ final JettyTestDriver driver = newDriverWithFormPostContentRemoved(new ParameterPrinterRequestHandler(), false);
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
@@ -263,7 +273,7 @@ public class HttpServerTest {
@Test
public void requireThatFormPostRemovesContentWhenConfiguredTo() throws Exception {
- final TestDriver driver = newDriverWithFormPostContentRemoved(new ParameterPrinterRequestHandler(), true);
+ final JettyTestDriver driver = newDriverWithFormPostContentRemoved(new ParameterPrinterRequestHandler(), true);
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
@@ -276,7 +286,7 @@ public class HttpServerTest {
@Test
public void requireThatFormPostWithCharsetSpecifiedWorks() throws Exception {
- final TestDriver driver = TestDrivers.newInstance(new ParameterPrinterRequestHandler());
+ final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final String requestContent = generateContent('a', 30);
final ResponseValidator response =
driver.client().newPost("/status.html")
@@ -291,7 +301,7 @@ public class HttpServerTest {
@Test
public void requireThatEmptyFormPostWorks() throws Exception {
- final TestDriver driver = TestDrivers.newInstance(new ParameterPrinterRequestHandler());
+ final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
@@ -303,7 +313,7 @@ public class HttpServerTest {
@Test
public void requireThatFormParametersAreParsed() throws Exception {
- final TestDriver driver = TestDrivers.newInstance(new ParameterPrinterRequestHandler());
+ final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
@@ -316,7 +326,7 @@ public class HttpServerTest {
@Test
public void requireThatUriParametersAreParsed() throws Exception {
- final TestDriver driver = TestDrivers.newInstance(new ParameterPrinterRequestHandler());
+ final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html?a=b&c=d")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
@@ -328,7 +338,7 @@ public class HttpServerTest {
@Test
public void requireThatFormAndUriParametersAreMerged() throws Exception {
- final TestDriver driver = TestDrivers.newInstance(new ParameterPrinterRequestHandler());
+ final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html?a=b&c=d1")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
@@ -341,7 +351,7 @@ public class HttpServerTest {
@Test
public void requireThatFormCharsetIsHonored() throws Exception {
- final TestDriver driver = newDriverWithFormPostContentRemoved(new ParameterPrinterRequestHandler(), true);
+ final JettyTestDriver driver = newDriverWithFormPostContentRemoved(new ParameterPrinterRequestHandler(), true);
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED + ";charset=ISO-8859-1")
@@ -354,7 +364,7 @@ public class HttpServerTest {
@Test
public void requireThatUnknownFormCharsetIsTreatedAsBadRequest() throws Exception {
- final TestDriver driver = TestDrivers.newInstance(new ParameterPrinterRequestHandler());
+ final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED + ";charset=FLARBA-GARBA-7")
@@ -366,7 +376,7 @@ public class HttpServerTest {
@Test
public void requireThatFormPostWithPercentEncodedContentIsDecoded() throws Exception {
- final TestDriver driver = TestDrivers.newInstance(new ParameterPrinterRequestHandler());
+ final JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
@@ -379,7 +389,7 @@ public class HttpServerTest {
@Test
public void requireThatFormPostWithThrowingHandlerIsExceptionSafe() throws Exception {
- final TestDriver driver = TestDrivers.newInstance(new ThrowingHandler());
+ final JettyTestDriver driver = JettyTestDriver.newInstance(new ThrowingHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
@@ -396,12 +406,12 @@ public class HttpServerTest {
final String updaterConfContent
= "identifier = updater\n"
+ "server_type = gds\n";
- final TestDriver driver = TestDrivers.newInstance(new EchoRequestHandler());
+ final JettyTestDriver driver = JettyTestDriver.newInstance(new EchoRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.setMultipartContent(
- newFileBody("", "start.txt", startTxtContent),
- newFileBody("", "updater.conf", updaterConfContent))
+ newFileBody("start.txt", startTxtContent),
+ newFileBody("updater.conf", updaterConfContent))
.execute();
response.expectStatusCode(is(OK))
.expectContent(containsString(startTxtContent))
@@ -410,7 +420,7 @@ public class HttpServerTest {
@Test
public void requireThatRequestCookiesAreReceived() throws Exception {
- final TestDriver driver = TestDrivers.newInstance(new CookiePrinterRequestHandler());
+ final JettyTestDriver driver = JettyTestDriver.newInstance(new CookiePrinterRequestHandler());
final ResponseValidator response =
driver.client().newPost("/status.html")
.addHeader(COOKIE, "foo=bar")
@@ -422,7 +432,7 @@ public class HttpServerTest {
@Test
public void requireThatSetCookieHeaderIsCorrect() throws Exception {
- final TestDriver driver = TestDrivers.newInstance(new CookieSetterRequestHandler(
+ final JettyTestDriver driver = JettyTestDriver.newInstance(new CookieSetterRequestHandler(
new Cookie("foo", "bar")
.setDomain(".localhost")
.setHttpOnly(true)
@@ -438,7 +448,7 @@ public class HttpServerTest {
@Test
public void requireThatTimeoutWorks() throws Exception {
final UnresponsiveHandler requestHandler = new UnresponsiveHandler();
- final TestDriver driver = TestDrivers.newInstance(requestHandler);
+ final JettyTestDriver driver = JettyTestDriver.newInstance(requestHandler);
driver.client().get("/status.html")
.expectStatusCode(is(GATEWAY_TIMEOUT));
ResponseDispatch.newInstance(OK).dispatch(requestHandler.responseHandler);
@@ -449,7 +459,7 @@ public class HttpServerTest {
// Details in https://github.com/eclipse/jetty.project/issues/1116
@Test
public void requireThatHeaderWithNullValueIsOmitted() throws Exception {
- final TestDriver driver = TestDrivers.newInstance(new EchoWithHeaderRequestHandler("X-Foo", null));
+ final JettyTestDriver driver = JettyTestDriver.newInstance(new EchoWithHeaderRequestHandler("X-Foo", null));
driver.client().get("/status.html")
.expectStatusCode(is(OK))
.expectNoHeader("X-Foo");
@@ -460,7 +470,7 @@ public class HttpServerTest {
// Details in https://github.com/eclipse/jetty.project/issues/1116
@Test
public void requireThatHeaderWithEmptyValueIsAllowed() throws Exception {
- final TestDriver driver = TestDrivers.newInstance(new EchoWithHeaderRequestHandler("X-Foo", ""));
+ final JettyTestDriver driver = JettyTestDriver.newInstance(new EchoWithHeaderRequestHandler("X-Foo", ""));
driver.client().get("/status.html")
.expectStatusCode(is(OK))
.expectHeader("X-Foo", is(""));
@@ -469,7 +479,7 @@ public class HttpServerTest {
@Test
public void requireThatNoConnectionHeaderMeansKeepAliveInHttp11KeepAliveDisabled() throws Exception {
- final TestDriver driver = TestDrivers.newInstance(new EchoWithHeaderRequestHandler(CONNECTION, CLOSE));
+ final JettyTestDriver driver = JettyTestDriver.newInstance(new EchoWithHeaderRequestHandler(CONNECTION, CLOSE));
driver.client().get("/status.html")
.expectHeader(CONNECTION, is(CLOSE));
assertThat(driver.close(), is(true));
@@ -478,7 +488,7 @@ public class HttpServerTest {
@Test
public void requireThatConnectionIsClosedAfterXRequests() throws Exception {
final int MAX_KEEPALIVE_REQUESTS = 100;
- final TestDriver driver = TestDrivers.newConfiguredInstance(new EchoRequestHandler(),
+ final JettyTestDriver driver = JettyTestDriver.newConfiguredInstance(new EchoRequestHandler(),
new ServerConfig.Builder(),
new ConnectorConfig.Builder().maxRequestsPerConnection(MAX_KEEPALIVE_REQUESTS));
for (int i = 0; i < MAX_KEEPALIVE_REQUESTS - 1; i++) {
@@ -498,18 +508,38 @@ public class HttpServerTest {
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
- final TestDriver driver = TestDrivers.newInstanceWithSsl(new EchoRequestHandler(), certificateFile, privateKeyFile, TlsClientAuth.WANT);
+ final JettyTestDriver driver = JettyTestDriver.newInstanceWithSsl(new EchoRequestHandler(), certificateFile, privateKeyFile, TlsClientAuth.WANT);
driver.client().get("/status.html")
.expectStatusCode(is(OK));
assertTrue(driver.close());
}
@Test
+ public void requireThatServerCanRespondToHttp2Request() throws Exception {
+ Path privateKeyFile = tmpFolder.newFile().toPath();
+ Path certificateFile = tmpFolder.newFile().toPath();
+ generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
+
+ MetricConsumerMock metricConsumer = new MetricConsumerMock();
+ InMemoryConnectionLog connectionLog = new InMemoryConnectionLog();
+ JettyTestDriver driver = createSslTestDriver(certificateFile, privateKeyFile, metricConsumer, connectionLog);
+ try (CloseableHttpAsyncClient client = createHttp2Client(certificateFile, privateKeyFile)) {
+ String uri = "https://localhost:" + driver.server().getListenPort() + "/status.html";
+ SimpleHttpResponse response = client.execute(SimpleHttpRequests.get(uri), null).get();
+ assertNull(response.getBodyText());
+ assertEquals(OK, response.getCode());
+ }
+ assertTrue(driver.close());
+ ConnectionLogEntry entry = connectionLog.logEntries().get(0);
+ assertEquals("HTTP/2.0", entry.httpProtocol().get());
+ }
+
+ @Test
public void requireThatTlsClientAuthenticationEnforcerRejectsRequestsForNonWhitelistedPaths() throws IOException {
Path privateKeyFile = tmpFolder.newFile().toPath();
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
- TestDriver driver = TestDrivers.newInstanceWithSsl(new EchoRequestHandler(), certificateFile, privateKeyFile, TlsClientAuth.WANT);
+ JettyTestDriver driver = createSslWithTlsClientAuthenticationEnforcer(certificateFile, privateKeyFile);
SSLContext trustStoreOnlyCtx = new SslContextBuilder()
.withTrustStore(certificateFile)
@@ -527,7 +557,7 @@ public class HttpServerTest {
Path privateKeyFile = tmpFolder.newFile().toPath();
Path certificateFile = tmpFolder.newFile().toPath();
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
- TestDriver driver = TestDrivers.newInstanceWithSsl(new EchoRequestHandler(), certificateFile, privateKeyFile, TlsClientAuth.WANT);
+ JettyTestDriver driver = JettyTestDriver.newInstanceWithSsl(new EchoRequestHandler(), certificateFile, privateKeyFile, TlsClientAuth.WANT);
SSLContext trustStoreOnlyCtx = new SslContextBuilder()
.withTrustStore(certificateFile)
@@ -542,7 +572,7 @@ public class HttpServerTest {
@Test
public void requireThatConnectedAtReturnsNonZero() throws Exception {
- final TestDriver driver = TestDrivers.newInstance(new ConnectedAtRequestHandler());
+ final JettyTestDriver driver = JettyTestDriver.newInstance(new ConnectedAtRequestHandler());
driver.client().get("/status.html")
.expectStatusCode(is(OK))
.expectContent(matchesPattern("\\d{13,}"));
@@ -551,7 +581,7 @@ public class HttpServerTest {
@Test
public void requireThatGzipEncodingRequestsAreAutomaticallyDecompressed() throws Exception {
- TestDriver driver = TestDrivers.newInstance(new ParameterPrinterRequestHandler());
+ JettyTestDriver driver = JettyTestDriver.newInstance(new ParameterPrinterRequestHandler());
String requestContent = generateContent('a', 30);
ResponseValidator response = driver.client().newPost("/status.html")
.addHeader(CONTENT_TYPE, APPLICATION_X_WWW_FORM_URLENCODED)
@@ -565,7 +595,7 @@ public class HttpServerTest {
@Test
public void requireThatResponseStatsAreCollected() throws Exception {
RequestTypeHandler handler = new RequestTypeHandler();
- TestDriver driver = TestDrivers.newInstance(handler);
+ JettyTestDriver driver = JettyTestDriver.newInstance(handler);
HttpResponseStatisticsCollector statisticsCollector = ((AbstractHandlerContainer) driver.server().server().getHandler())
.getChildHandlerByClass(HttpResponseStatisticsCollector.class);
@@ -620,7 +650,7 @@ public class HttpServerTest {
@Test
public void requireThatConnectionThrottleDoesNotBlockConnectionsBelowThreshold() throws Exception {
- TestDriver driver = TestDrivers.newConfiguredInstance(
+ JettyTestDriver driver = JettyTestDriver.newConfiguredInstance(
new EchoRequestHandler(),
new ServerConfig.Builder(),
new ConnectorConfig.Builder()
@@ -641,7 +671,7 @@ public class HttpServerTest {
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
var metricConsumer = new MetricConsumerMock();
InMemoryConnectionLog connectionLog = new InMemoryConnectionLog();
- TestDriver driver = createSslTestDriver(certificateFile, privateKeyFile, metricConsumer, connectionLog);
+ JettyTestDriver driver = createSslTestDriver(certificateFile, privateKeyFile, metricConsumer, connectionLog);
SSLContext clientCtx = new SslContextBuilder()
.withTrustStore(certificateFile)
@@ -663,7 +693,7 @@ public class HttpServerTest {
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
var metricConsumer = new MetricConsumerMock();
InMemoryConnectionLog connectionLog = new InMemoryConnectionLog();
- TestDriver driver = createSslTestDriver(certificateFile, privateKeyFile, metricConsumer, connectionLog);
+ JettyTestDriver driver = createSslTestDriver(certificateFile, privateKeyFile, metricConsumer, connectionLog);
SSLContext clientCtx = new SslContextBuilder()
.withTrustStore(certificateFile)
@@ -689,7 +719,7 @@ public class HttpServerTest {
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
var metricConsumer = new MetricConsumerMock();
InMemoryConnectionLog connectionLog = new InMemoryConnectionLog();
- TestDriver driver = createSslTestDriver(certificateFile, privateKeyFile, metricConsumer, connectionLog);
+ JettyTestDriver driver = createSslTestDriver(certificateFile, privateKeyFile, metricConsumer, connectionLog);
SSLContext clientCtx = new SslContextBuilder()
.withTrustStore(certificateFile)
@@ -713,7 +743,7 @@ public class HttpServerTest {
generatePrivateKeyAndCertificate(serverPrivateKeyFile, serverCertificateFile);
var metricConsumer = new MetricConsumerMock();
InMemoryConnectionLog connectionLog = new InMemoryConnectionLog();
- TestDriver driver = createSslTestDriver(serverCertificateFile, serverPrivateKeyFile, metricConsumer, connectionLog);
+ JettyTestDriver driver = createSslTestDriver(serverCertificateFile, serverPrivateKeyFile, metricConsumer, connectionLog);
Path clientPrivateKeyFile = tmpFolder.newFile().toPath();
Path clientCertificateFile = tmpFolder.newFile().toPath();
@@ -744,7 +774,7 @@ public class HttpServerTest {
generatePrivateKeyAndCertificate(rootPrivateKeyFile, rootCertificateFile, privateKeyFile, certificateFile, notAfter);
var metricConsumer = new MetricConsumerMock();
InMemoryConnectionLog connectionLog = new InMemoryConnectionLog();
- TestDriver driver = createSslTestDriver(rootCertificateFile, rootPrivateKeyFile, metricConsumer, connectionLog);
+ JettyTestDriver driver = createSslTestDriver(rootCertificateFile, rootPrivateKeyFile, metricConsumer, connectionLog);
SSLContext clientCtx = new SslContextBuilder()
.withTrustStore(rootCertificateFile)
@@ -767,7 +797,7 @@ public class HttpServerTest {
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
InMemoryRequestLog requestLogMock = new InMemoryRequestLog();
InMemoryConnectionLog connectionLog = new InMemoryConnectionLog();
- TestDriver driver = createSslWithProxyProtocolTestDriver(certificateFile, privateKeyFile, requestLogMock, /*mixedMode*/connectionLog, false);
+ JettyTestDriver driver = createSslWithProxyProtocolTestDriver(certificateFile, privateKeyFile, requestLogMock, /*mixedMode*/connectionLog, false);
String proxiedRemoteAddress = "192.168.0.100";
int proxiedRemotePort = 12345;
@@ -780,7 +810,9 @@ public class HttpServerTest {
assertLogEntryHasRemote(requestLogMock.entries().get(1), proxiedRemoteAddress, proxiedRemotePort);
Assertions.assertThat(connectionLog.logEntries()).hasSize(2);
assertLogEntryHasRemote(connectionLog.logEntries().get(0), proxiedRemoteAddress, proxiedRemotePort);
+ assertEquals("v1", connectionLog.logEntries().get(0).proxyProtocolVersion().get());
assertLogEntryHasRemote(connectionLog.logEntries().get(1), proxiedRemoteAddress, proxiedRemotePort);
+ assertEquals("v2", connectionLog.logEntries().get(1).proxyProtocolVersion().get());
}
@Test
@@ -790,19 +822,22 @@ public class HttpServerTest {
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
InMemoryRequestLog requestLogMock = new InMemoryRequestLog();
InMemoryConnectionLog connectionLog = new InMemoryConnectionLog();
- TestDriver driver = createSslWithProxyProtocolTestDriver(certificateFile, privateKeyFile, requestLogMock, /*mixedMode*/connectionLog, true);
+ JettyTestDriver driver = createSslWithProxyProtocolTestDriver(certificateFile, privateKeyFile, requestLogMock, /*mixedMode*/connectionLog, true);
String proxiedRemoteAddress = "192.168.0.100";
sendJettyClientRequest(driver, certificateFile, null);
+ sendJettyClientRequest(driver, certificateFile, new V1.Tag(proxiedRemoteAddress, 12345));
sendJettyClientRequest(driver, certificateFile, new V2.Tag(proxiedRemoteAddress, 12345));
assertTrue(driver.close());
- assertEquals(2, requestLogMock.entries().size());
+ assertEquals(3, requestLogMock.entries().size());
assertLogEntryHasRemote(requestLogMock.entries().get(0), "127.0.0.1", 0);
assertLogEntryHasRemote(requestLogMock.entries().get(1), proxiedRemoteAddress, 0);
- Assertions.assertThat(connectionLog.logEntries()).hasSize(2);
+ assertLogEntryHasRemote(requestLogMock.entries().get(2), proxiedRemoteAddress, 0);
+ Assertions.assertThat(connectionLog.logEntries()).hasSize(3);
assertLogEntryHasRemote(connectionLog.logEntries().get(0), null, 0);
assertLogEntryHasRemote(connectionLog.logEntries().get(1), proxiedRemoteAddress, 12345);
+ assertLogEntryHasRemote(connectionLog.logEntries().get(2), proxiedRemoteAddress, 12345);
}
@Test
@@ -812,7 +847,7 @@ public class HttpServerTest {
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
InMemoryRequestLog requestLogMock = new InMemoryRequestLog();
InMemoryConnectionLog connectionLog = new InMemoryConnectionLog();
- TestDriver driver = createSslWithProxyProtocolTestDriver(certificateFile, privateKeyFile, requestLogMock, connectionLog, /*mixedMode*/false);
+ JettyTestDriver driver = createSslWithProxyProtocolTestDriver(certificateFile, privateKeyFile, requestLogMock, connectionLog, /*mixedMode*/false);
String proxiedRemoteAddress = "192.168.0.100";
int proxiedRemotePort = 12345;
@@ -835,7 +870,7 @@ public class HttpServerTest {
generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
InMemoryConnectionLog connectionLog = new InMemoryConnectionLog();
Module overrideModule = binder -> binder.bind(ConnectionLog.class).toInstance(connectionLog);
- TestDriver driver = TestDrivers.newInstanceWithSsl(new OkRequestHandler(), certificateFile, privateKeyFile, TlsClientAuth.NEED, overrideModule);
+ JettyTestDriver driver = JettyTestDriver.newInstanceWithSsl(new OkRequestHandler(), certificateFile, privateKeyFile, TlsClientAuth.NEED, overrideModule);
int listenPort = driver.server().getListenPort();
StringBuilder builder = new StringBuilder();
for (int i = 0; i < 1000; i++) {
@@ -870,7 +905,7 @@ public class HttpServerTest {
@Test
public void requireThatRequestIsTrackedInAccessLog() throws IOException, InterruptedException {
BlockingQueueRequestLog requestLogMock = new BlockingQueueRequestLog();
- TestDriver driver = TestDrivers.newConfiguredInstance(
+ JettyTestDriver driver = JettyTestDriver.newConfiguredInstance(
new EchoRequestHandler(),
new ServerConfig.Builder(),
new ConnectorConfig.Builder(),
@@ -882,7 +917,7 @@ public class HttpServerTest {
assertThat(driver.close(), is(true));
}
- private ContentResponse sendJettyClientRequest(TestDriver testDriver, Path certificateFile, Object tag)
+ private ContentResponse sendJettyClientRequest(JettyTestDriver testDriver, Path certificateFile, Object tag)
throws Exception {
HttpClient client = createJettyHttpClient(certificateFile);
try {
@@ -918,6 +953,21 @@ public class HttpServerTest {
return client;
}
+ private static CloseableHttpAsyncClient createHttp2Client(Path certificateFile, Path privateKeyFile) {
+ JettyTestDriver driver = JettyTestDriver.newInstanceWithSsl(new EchoRequestHandler(), certificateFile, privateKeyFile, TlsClientAuth.WANT);
+ TlsStrategy tlsStrategy = ClientTlsStrategyBuilder.create()
+ .setSslContext(driver.sslContext())
+ .build();
+ var client = HttpAsyncClientBuilder.create()
+ .setVersionPolicy(HttpVersionPolicy.FORCE_HTTP_2)
+ .disableConnectionState()
+ .disableAutomaticRetries()
+ .setConnectionManager(PoolingAsyncClientConnectionManagerBuilder.create().setTlsStrategy(tlsStrategy).build())
+ .build();
+ client.start();
+ return client;
+ }
+
private static void assertLogEntryHasRemote(RequestLogEntry entry, String expectedAddress, int expectedPort) {
assertEquals(expectedAddress, entry.peerAddress().get());
if (expectedPort > 0) {
@@ -947,10 +997,11 @@ public class HttpServerTest {
assertEquals(expectedException.getName(), exceptionEntry.name());
}
- private static TestDriver createSslWithProxyProtocolTestDriver(
+ private static JettyTestDriver createSslWithProxyProtocolTestDriver(
Path certificateFile, Path privateKeyFile, RequestLog requestLog,
ConnectionLog connectionLog, boolean mixedMode) {
ConnectorConfig.Builder connectorConfig = new ConnectorConfig.Builder()
+ .http2Enabled(true)
.proxyProtocol(new ConnectorConfig.ProxyProtocol.Builder()
.enabled(true)
.mixedMode(mixedMode))
@@ -959,7 +1010,7 @@ public class HttpServerTest {
.privateKeyFile(privateKeyFile.toString())
.certificateFile(certificateFile.toString())
.caCertificateFile(certificateFile.toString()));
- return TestDrivers.newConfiguredInstance(
+ return JettyTestDriver.newConfiguredInstance(
new EchoRequestHandler(),
new ServerConfig.Builder().connectionLog(new ServerConfig.ConnectionLog.Builder().enabled(true)),
connectorConfig,
@@ -969,18 +1020,37 @@ public class HttpServerTest {
});
}
- private static TestDriver createSslTestDriver(
+ private static JettyTestDriver createSslWithTlsClientAuthenticationEnforcer(Path certificateFile, Path privateKeyFile) {
+ ConnectorConfig.Builder connectorConfig = new ConnectorConfig.Builder()
+ .tlsClientAuthEnforcer(
+ new ConnectorConfig.TlsClientAuthEnforcer.Builder()
+ .enable(true)
+ .pathWhitelist("/status.html"))
+ .ssl(new ConnectorConfig.Ssl.Builder()
+ .enabled(true)
+ .clientAuth(ConnectorConfig.Ssl.ClientAuth.Enum.WANT_AUTH)
+ .privateKeyFile(privateKeyFile.toString())
+ .certificateFile(certificateFile.toString())
+ .caCertificateFile(certificateFile.toString()));
+ return JettyTestDriver.newConfiguredInstance(
+ new EchoRequestHandler(),
+ new ServerConfig.Builder().connectionLog(new ServerConfig.ConnectionLog.Builder().enabled(true)),
+ connectorConfig,
+ binder -> {});
+ }
+
+ private static JettyTestDriver createSslTestDriver(
Path serverCertificateFile, Path serverPrivateKeyFile, MetricConsumerMock metricConsumer, InMemoryConnectionLog connectionLog) throws IOException {
Module extraModule = binder -> {
binder.bind(MetricConsumer.class).toInstance(metricConsumer.mockitoMock());
binder.bind(ConnectionLog.class).toInstance(connectionLog);
};
- return TestDrivers.newInstanceWithSsl(
+ return JettyTestDriver.newInstanceWithSsl(
new EchoRequestHandler(), serverCertificateFile, serverPrivateKeyFile, TlsClientAuth.NEED, extraModule);
}
private static void assertHttpsRequestTriggersSslHandshakeException(
- TestDriver testDriver,
+ JettyTestDriver testDriver,
SSLContext sslContext,
String protocolOverride,
String cipherOverride,
@@ -1040,39 +1110,25 @@ public class HttpServerTest {
return ret.toString();
}
- private static TestDriver newDriverWithFormPostContentRemoved(RequestHandler requestHandler,
- boolean removeFormPostBody) throws Exception {
- return TestDrivers.newConfiguredInstance(
+ private static JettyTestDriver newDriverWithFormPostContentRemoved(RequestHandler requestHandler,
+ boolean removeFormPostBody) throws Exception {
+ return JettyTestDriver.newConfiguredInstance(
requestHandler,
new ServerConfig.Builder()
.removeRawPostBodyForWwwUrlEncodedPost(removeFormPostBody),
new ConnectorConfig.Builder());
}
- private static FormBodyPart newFileBody(final String parameterName, final String fileName, final String fileContent) {
- return new FormBodyPart(
- parameterName,
- new StringBody(fileContent, ContentType.TEXT_PLAIN) {
- @Override
- public String getFilename() {
- return fileName;
- }
-
- @Override
- public String getTransferEncoding() {
- return "binary";
- }
-
- @Override
- public String getMimeType() {
- return "";
- }
-
- @Override
- public String getCharset() {
- return null;
- }
- });
+ private static FormBodyPart newFileBody(final String fileName, final String fileContent) {
+ return FormBodyPartBuilder.create()
+ .setBody(
+ new StringBody(fileContent, ContentType.TEXT_PLAIN) {
+ @Override public String getFilename() { return fileName; }
+ @Override public String getMimeType() { return ""; }
+ @Override public String getCharset() { return null; }
+ })
+ .setName(fileName)
+ .build();
}
private static class ConnectedAtRequestHandler extends AbstractRequestHandler {
diff --git a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/JDiscHttpServletTest.java b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/JDiscHttpServletTest.java
index 230f59cbb34..d46531ad844 100644
--- a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/JDiscHttpServletTest.java
+++ b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/JDiscHttpServletTest.java
@@ -7,15 +7,15 @@ import com.yahoo.jdisc.handler.AbstractRequestHandler;
import com.yahoo.jdisc.handler.ContentChannel;
import com.yahoo.jdisc.handler.RequestHandler;
import com.yahoo.jdisc.handler.ResponseHandler;
-import org.apache.http.client.methods.HttpDelete;
-import org.apache.http.client.methods.HttpGet;
-import org.apache.http.client.methods.HttpHead;
-import org.apache.http.client.methods.HttpOptions;
-import org.apache.http.client.methods.HttpPatch;
-import org.apache.http.client.methods.HttpPost;
-import org.apache.http.client.methods.HttpPut;
-import org.apache.http.client.methods.HttpRequestBase;
-import org.apache.http.client.methods.HttpTrace;
+import org.apache.hc.client5.http.classic.methods.HttpDelete;
+import org.apache.hc.client5.http.classic.methods.HttpGet;
+import org.apache.hc.client5.http.classic.methods.HttpHead;
+import org.apache.hc.client5.http.classic.methods.HttpOptions;
+import org.apache.hc.client5.http.classic.methods.HttpPatch;
+import org.apache.hc.client5.http.classic.methods.HttpPost;
+import org.apache.hc.client5.http.classic.methods.HttpPut;
+import org.apache.hc.client5.http.classic.methods.HttpTrace;
+import org.apache.hc.client5.http.classic.methods.HttpUriRequestBase;
import org.junit.Test;
import java.io.IOException;
@@ -33,7 +33,7 @@ public class JDiscHttpServletTest {
@Test
public void requireThatServerRespondsToAllMethods() throws Exception {
- final TestDriver driver = TestDrivers.newInstance(newEchoHandler());
+ final JettyTestDriver driver = JettyTestDriver.newInstance(newEchoHandler());
final URI uri = driver.client().newUri("/status.html");
driver.client().execute(new HttpGet(uri))
.expectStatusCode(is(OK));
@@ -56,7 +56,7 @@ public class JDiscHttpServletTest {
@Test
public void requireThatServerResponds405ToUnknownMethods() throws IOException {
- TestDriver driver = TestDrivers.newInstance(newEchoHandler());
+ JettyTestDriver driver = JettyTestDriver.newInstance(newEchoHandler());
final URI uri = driver.client().newUri("/status.html");
driver.client().execute(new UnknownMethodHttpRequest(uri))
.expectStatusCode(is(METHOD_NOT_ALLOWED));
@@ -73,8 +73,7 @@ public class JDiscHttpServletTest {
};
}
- private static class UnknownMethodHttpRequest extends HttpRequestBase {
- UnknownMethodHttpRequest(URI uri) { setURI(uri); }
- @Override public String getMethod() { return "UNKNOWN_METHOD"; }
+ private static class UnknownMethodHttpRequest extends HttpUriRequestBase {
+ UnknownMethodHttpRequest(URI uri) { super("UNKNOWN_METHOD", uri); }
}
}
diff --git a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/JettyMockRequestBuilder.java b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/JettyMockRequestBuilder.java
new file mode 100644
index 00000000000..4bf6afeb3f1
--- /dev/null
+++ b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/JettyMockRequestBuilder.java
@@ -0,0 +1,176 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.jdisc.http.server.jetty;
+
+import com.yahoo.jdisc.http.ConnectorConfig;
+import org.eclipse.jetty.server.HttpChannel;
+import org.eclipse.jetty.server.HttpConnection;
+import org.eclipse.jetty.server.HttpInput;
+import org.eclipse.jetty.server.Request;
+import org.mockito.stubbing.Answer;
+
+import java.io.UnsupportedEncodingException;
+import java.util.Collections;
+import java.util.Enumeration;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyString;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+/**
+ * Builder for creating a mock instance of Jetty's {@link Request} type.
+ *
+ * @author bjorncs
+ */
+public class JettyMockRequestBuilder {
+
+ private final Map<String, List<String>> parameters = new HashMap<>();
+ private final Map<String, List<String>> headers = new HashMap<>();
+ private final Map<String, Object> attributes = new HashMap<>();
+ private Integer localPort;
+ private String uriScheme;
+ private String uriServerName;
+ private Integer uriPort;
+ private String uriPath;
+ private String uriQuery;
+ private String remoteAddress;
+ private String remoteHost;
+ private Integer remotePort;
+
+ private JettyMockRequestBuilder() {}
+
+ public static JettyMockRequestBuilder newBuilder() { return new JettyMockRequestBuilder(); }
+
+ public JettyMockRequestBuilder localPort(int localPort) { this.localPort = localPort; return this; }
+
+ public JettyMockRequestBuilder remote(String address, String host, int port) {
+ this.remoteAddress = address;
+ this.remoteHost = host;
+ this.remotePort = port;
+ return this;
+ }
+
+ public JettyMockRequestBuilder uri(String scheme, String serverName, int port, String path, String query) {
+ this.uriScheme = scheme;
+ this.uriServerName = serverName;
+ this.uriPort = port;
+ this.uriPath = path;
+ this.uriQuery = query;
+ return this;
+ }
+
+ public JettyMockRequestBuilder parameter(String name, List<String> values) { this.parameters.put(name, List.copyOf(values)); return this; }
+
+ public JettyMockRequestBuilder header(String name, List<String> values) { this.headers.put(name, List.copyOf(values)); return this; }
+
+ public JettyMockRequestBuilder attribute(String name, Object value) { this.attributes.put(name, value); return this; }
+
+ public Request build() {
+ int localPort = this.localPort != null ? this.localPort : 8080;
+ String scheme = this.uriScheme != null ? this.uriScheme : "http";
+ String serverName = this.uriServerName != null ? this.uriServerName : "localhost";
+ int uriPort = this.uriPort != null ? this.uriPort : 8080;
+ String path = this.uriPath;
+ String query = this.uriQuery;
+ String remoteAddress = this.remoteAddress != null ? this.remoteAddress : "1.2.3.4";
+ String remoteHost = this.remoteHost != null ? this.remoteHost : "remotehost";
+ Integer remotePort = this.remotePort != null ? this.remotePort : 12345;
+
+ HttpChannel channel = mock(HttpChannel.class);
+ HttpConnection connection = mock(HttpConnection.class);
+ JDiscServerConnector connector = mock(JDiscServerConnector.class);
+ when(connector.connectorConfig()).thenReturn(new ConnectorConfig(new ConnectorConfig.Builder().listenPort(localPort)));
+ when(connector.getLocalPort()).thenReturn(localPort);
+ when(connection.getCreatedTimeStamp()).thenReturn(System.currentTimeMillis());
+ when(connection.getConnector()).thenReturn(connector);
+ when(connection.getHttpChannel()).thenReturn(channel);
+ when(channel.getConnector()).thenReturn(connector);
+ when(channel.getConnection()).thenReturn(connection);
+
+ HttpInput httpInput = mock(HttpInput.class);
+ when(httpInput.getContentReceived()).thenReturn(2345L);
+
+ Request request = mock(Request.class);
+ when(request.getHttpChannel()).thenReturn(channel);
+ when(request.getHttpInput()).thenReturn(httpInput);
+ when(request.getProtocol()).thenReturn("HTTP/1.1");
+ when(request.getScheme()).thenReturn(scheme);
+ when(request.getServerName()).thenReturn(serverName);
+ when(request.getRemoteAddr()).thenReturn(remoteAddress);
+ when(request.getRemotePort()).thenReturn(remotePort);
+ when(request.getRemoteHost()).thenReturn(remoteHost);
+ when(request.getLocalPort()).thenReturn(uriPort);
+ when(request.getMethod()).thenReturn("GET");
+ when(request.getQueryString()).thenReturn(query);
+ when(request.getRequestURI()).thenReturn(path);
+
+ mockCharacterEncodingHandling(request);
+ mockHeaderHandling(request);
+ mockParameterHandling(request);
+ mockAttributeHandling(request);
+
+ return request;
+ }
+
+ private void mockCharacterEncodingHandling(Request request) {
+ try {
+ AtomicReference<String> characterEncoding = new AtomicReference<>("");
+ when(request.getCharacterEncoding()).thenAnswer((Answer<String>) ignored -> characterEncoding.get());
+ doAnswer((Answer<Void>) invocation -> {
+ String value = invocation.getArgument(0);
+ characterEncoding.set(value);
+ return null;
+ }).when(request).setCharacterEncoding(anyString());
+ } catch (UnsupportedEncodingException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ private void mockHeaderHandling(Request request) {
+ Map<String, List<String>> headers = new ConcurrentHashMap<>(this.headers);
+ when(request.getHeaderNames()).thenReturn(Collections.enumeration(headers.keySet()));
+ when(request.getHeaders(anyString())).thenAnswer((Answer<Enumeration<String>>) invocation -> {
+ String key = invocation.getArgument(0);
+ List<String> values = headers.get(key);
+ return values != null ? Collections.enumeration(values) : Collections.enumeration(List.of());
+ });
+ when(request.getHeader(anyString())).thenAnswer((Answer<String>) invocation -> {
+ String name = invocation.getArgument(0);
+ List<String> values = headers.get(name);
+ if (values == null || values.isEmpty()) return null;
+ return values.get(0);
+ });
+ }
+
+ private void mockParameterHandling(Request request) {
+ Map<String, String[]> parameters = new ConcurrentHashMap<>();
+ this.parameters.forEach((key, values) -> parameters.put(key, values.toArray(String[]::new)));
+ when(request.getParameterMap()).thenReturn(parameters);
+ }
+
+ private void mockAttributeHandling(Request request) {
+ Map<String, Object> attributes = new ConcurrentHashMap<>(this.attributes);
+
+ when(request.getAttribute(any())).thenAnswer(invocation -> {
+ String attributeName = invocation.getArgument(0);
+ return attributes.get(attributeName);
+ });
+ doAnswer((Answer<Void>) invocation -> {
+ String attributeName = invocation.getArgument(0);
+ Object attributeValue = invocation.getArgument(1);
+ attributes.put(attributeName, attributeValue);
+ return null;
+ }).when(request).setAttribute(anyString(), any());
+ doAnswer((Answer<Void>) invocation -> {
+ String attributeName = invocation.getArgument(0);
+ attributes.remove(attributeName);
+ return null;
+ }).when(request).removeAttribute(anyString());
+ }
+}
diff --git a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/JettyMockResponseBuilder.java b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/JettyMockResponseBuilder.java
new file mode 100644
index 00000000000..6addb966208
--- /dev/null
+++ b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/JettyMockResponseBuilder.java
@@ -0,0 +1,29 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.jdisc.http.server.jetty;
+
+import org.eclipse.jetty.http.MetaData;
+import org.eclipse.jetty.server.HttpChannel;
+import org.eclipse.jetty.server.Response;
+
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+/**
+ * Builder for creating a mock instance of Jetty's {@link Response} type.
+ *
+ * @author bjorncs
+ */
+public class JettyMockResponseBuilder {
+
+ private JettyMockResponseBuilder() {}
+
+ public static JettyMockResponseBuilder newBuilder() { return new JettyMockResponseBuilder(); }
+
+ public Response build() {
+ Response response = mock(Response.class);
+ when(response.getHttpChannel()).thenReturn(mock(HttpChannel.class));
+ when(response.getCommittedMetaData()).thenReturn(mock(MetaData.Response.class));
+ return response;
+ }
+
+}
diff --git a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/JettyTestDriver.java b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/JettyTestDriver.java
new file mode 100644
index 00000000000..57438cbe207
--- /dev/null
+++ b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/JettyTestDriver.java
@@ -0,0 +1,90 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.jdisc.http.server.jetty;
+
+import com.google.inject.Module;
+import com.yahoo.jdisc.handler.RequestHandler;
+import com.yahoo.jdisc.http.ConnectorConfig;
+import com.yahoo.jdisc.http.ServerConfig;
+import com.yahoo.jdisc.http.server.jetty.testutils.TestDriver;
+
+import javax.net.ssl.SSLContext;
+import java.nio.file.Path;
+import java.util.Collection;
+import java.util.List;
+
+import static com.yahoo.yolean.Exceptions.uncheck;
+
+/**
+ * Provides functionality for setting up a jdisc container with an HTTP server, handlers and a simple http client.
+ *
+ * @author bjorncs
+ * @author Simon Thoresen Hult
+ * @author bakksjo
+ */
+public class JettyTestDriver {
+
+ public enum TlsClientAuth { NEED, WANT }
+
+ private final TestDriver driver;
+ private final SimpleHttpClient client;
+
+ private JettyTestDriver(RequestHandler requestHandler,
+ ServerConfig serverConfig,
+ ConnectorConfig connectorConfig,
+ Collection<Module> guiceModules) {
+ var builder = TestDriver.newBuilder()
+ .withRequestHandler(requestHandler)
+ .withServerConfig(serverConfig)
+ .withConnectorConfig(connectorConfig);
+ guiceModules.forEach(builder::withGuiceModule);
+ this.driver = builder.build();
+ this.client = new SimpleHttpClient(driver.sslContext(), driver.server().getListenPort(), false);
+ }
+
+ public boolean close() {
+ uncheck(client::close);
+ return driver.shutdown();
+ }
+
+ public JettyHttpServer server() { return driver.server(); }
+ public SimpleHttpClient client() { return client; }
+ public SSLContext sslContext() { return driver.sslContext(); }
+
+ public SimpleHttpClient newClient(boolean useCompression) {
+ return new SimpleHttpClient(driver.sslContext(), driver.server().getListenPort(), useCompression);
+ }
+
+ public static JettyTestDriver newConfiguredInstance(RequestHandler requestHandler,
+ ServerConfig.Builder serverConfig,
+ ConnectorConfig.Builder connectorConfig,
+ Module... guiceModules) {
+ return new JettyTestDriver(requestHandler, serverConfig.build(), connectorConfig.build(), List.of(guiceModules));
+ }
+
+ public static JettyTestDriver newInstance(RequestHandler requestHandler, Module... guiceModules) {
+ return newConfiguredInstance(requestHandler, new ServerConfig.Builder(), new ConnectorConfig.Builder(), guiceModules);
+ }
+
+
+ public static JettyTestDriver newInstanceWithSsl(RequestHandler requestHandler,
+ Path certificateFile,
+ Path privateKeyFile,
+ TlsClientAuth tlsClientAuth,
+ Module... guiceModules) {
+ return newConfiguredInstance(
+ requestHandler,
+ new ServerConfig.Builder().connectionLog(new ServerConfig.ConnectionLog.Builder().enabled(true)),
+ new ConnectorConfig.Builder()
+ .http2Enabled(true)
+ .ssl(new ConnectorConfig.Ssl.Builder()
+ .enabled(true)
+ .clientAuth(tlsClientAuth == TlsClientAuth.NEED
+ ? ConnectorConfig.Ssl.ClientAuth.Enum.NEED_AUTH
+ : ConnectorConfig.Ssl.ClientAuth.Enum.WANT_AUTH)
+ .privateKeyFile(privateKeyFile.toString())
+ .certificateFile(certificateFile.toString())
+ .caCertificateFile(certificateFile.toString())),
+ guiceModules);
+ }
+
+}
diff --git a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/SimpleHttpClient.java b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/SimpleHttpClient.java
index eea8d7e3072..161f48d847d 100644
--- a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/SimpleHttpClient.java
+++ b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/SimpleHttpClient.java
@@ -1,33 +1,36 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.jdisc.http.server.jetty;
-import org.apache.http.Header;
-import org.apache.http.HttpEntity;
-import org.apache.http.HttpResponse;
-import org.apache.http.client.entity.GzipCompressingEntity;
-import org.apache.http.client.methods.CloseableHttpResponse;
-import org.apache.http.client.methods.HttpGet;
-import org.apache.http.client.methods.HttpPost;
-import org.apache.http.client.methods.HttpUriRequest;
-import org.apache.http.config.Registry;
-import org.apache.http.config.RegistryBuilder;
-import org.apache.http.conn.socket.ConnectionSocketFactory;
-import org.apache.http.conn.ssl.DefaultHostnameVerifier;
-import org.apache.http.conn.ssl.SSLConnectionSocketFactory;
-import org.apache.http.entity.ByteArrayEntity;
-import org.apache.http.entity.StringEntity;
-import org.apache.http.entity.mime.FormBodyPart;
-import org.apache.http.entity.mime.MultipartEntityBuilder;
-import org.apache.http.impl.client.CloseableHttpClient;
-import org.apache.http.impl.client.HttpClientBuilder;
-import org.apache.http.impl.conn.BasicHttpClientConnectionManager;
-import org.apache.http.util.EntityUtils;
+import org.apache.hc.client5.http.SystemDefaultDnsResolver;
+import org.apache.hc.client5.http.classic.methods.HttpGet;
+import org.apache.hc.client5.http.classic.methods.HttpPost;
+import org.apache.hc.client5.http.classic.methods.HttpUriRequest;
+import org.apache.hc.client5.http.entity.GzipCompressingEntity;
+import org.apache.hc.client5.http.entity.mime.FormBodyPart;
+import org.apache.hc.client5.http.entity.mime.MultipartEntityBuilder;
+import org.apache.hc.client5.http.impl.classic.CloseableHttpClient;
+import org.apache.hc.client5.http.impl.classic.CloseableHttpResponse;
+import org.apache.hc.client5.http.impl.classic.HttpClientBuilder;
+import org.apache.hc.client5.http.impl.io.PoolingHttpClientConnectionManager;
+import org.apache.hc.client5.http.impl.io.PoolingHttpClientConnectionManagerBuilder;
+import org.apache.hc.client5.http.ssl.DefaultHostnameVerifier;
+import org.apache.hc.client5.http.ssl.SSLConnectionSocketFactory;
+import org.apache.hc.core5.http.ContentType;
+import org.apache.hc.core5.http.Header;
+import org.apache.hc.core5.http.HttpEntity;
+import org.apache.hc.core5.http.HttpResponse;
+import org.apache.hc.core5.http.ParseException;
+import org.apache.hc.core5.http.io.entity.ByteArrayEntity;
+import org.apache.hc.core5.http.io.entity.EntityUtils;
+import org.apache.hc.core5.http.io.entity.StringEntity;
import org.hamcrest.Matcher;
import org.hamcrest.MatcherAssert;
import javax.net.ssl.SSLContext;
import java.io.IOException;
+import java.net.InetAddress;
import java.net.URI;
+import java.net.UnknownHostException;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
import java.util.List;
@@ -55,8 +58,9 @@ public class SimpleHttpClient implements AutoCloseable {
public SimpleHttpClient(SSLContext sslContext, List<String> enabledProtocols, List<String> enabledCiphers,
int listenPort, boolean useCompression) {
- HttpClientBuilder builder = HttpClientBuilder.create();
- builder.disableConnectionState(); // Reuse SSL connection when client authentication is enabled
+ HttpClientBuilder builder = HttpClientBuilder.create()
+ .disableAutomaticRetries()
+ .disableConnectionState(); // Reuse SSL connection when client authentication is enabled
if (!useCompression) {
builder.disableContentCompression();
}
@@ -66,12 +70,17 @@ public class SimpleHttpClient implements AutoCloseable {
toArray(enabledProtocols),
toArray(enabledCiphers),
new DefaultHostnameVerifier());
- builder.setSSLSocketFactory(sslConnectionFactory);
-
- Registry<ConnectionSocketFactory> registry = RegistryBuilder.<ConnectionSocketFactory>create()
- .register("https", sslConnectionFactory)
+ PoolingHttpClientConnectionManager connManager = PoolingHttpClientConnectionManagerBuilder.create()
+ .setSSLSocketFactory(sslConnectionFactory)
+ .setDnsResolver(new SystemDefaultDnsResolver() {
+ @Override
+ public InetAddress[] resolve(String host) throws UnknownHostException {
+ // Returns single address instead of multiple (to avoid multiple connection attempts)
+ return new InetAddress[] { InetAddress.getByName(host) };
+ }
+ })
.build();
- builder.setConnectionManager(new BasicHttpClientConnectionManager(registry));
+ builder.setConnectionManager(connManager);
scheme = "https";
} else {
scheme = "http";
@@ -139,7 +148,7 @@ public class SimpleHttpClient implements AutoCloseable {
}
public RequestExecutor setBinaryContent(final byte[] content) {
- this.entity = new ByteArrayEntity(content);
+ this.entity = new ByteArrayEntity(content, ContentType.DEFAULT_BINARY);
return this;
}
@@ -152,7 +161,7 @@ public class SimpleHttpClient implements AutoCloseable {
public ResponseValidator execute() throws IOException {
if (entity != null) {
- ((HttpPost)request).setEntity(entity);
+ request.setEntity(entity);
}
try (CloseableHttpResponse response = delegate.execute(request)){
return new ResponseValidator(response);
@@ -165,15 +174,19 @@ public class SimpleHttpClient implements AutoCloseable {
private final HttpResponse response;
private final String content;
- public ResponseValidator(HttpResponse response) throws IOException {
- this.response = response;
+ public ResponseValidator(CloseableHttpResponse response) throws IOException {
+ try {
+ this.response = response;
- HttpEntity entity = response.getEntity();
- this.content = entity == null ? null : EntityUtils.toString(entity, StandardCharsets.UTF_8);
+ HttpEntity entity = response.getEntity();
+ this.content = entity == null ? null : EntityUtils.toString(entity, StandardCharsets.UTF_8);
+ } catch (ParseException e) {
+ throw new IOException(e);
+ }
}
public ResponseValidator expectStatusCode(Matcher<Integer> matcher) {
- MatcherAssert.assertThat(response.getStatusLine().getStatusCode(), matcher);
+ MatcherAssert.assertThat(response.getCode(), matcher);
return this;
}
diff --git a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/TestDriver.java b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/TestDriver.java
deleted file mode 100644
index 49928a5679e..00000000000
--- a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/TestDriver.java
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.jdisc.http.server.jetty;
-
-import com.google.inject.Module;
-import com.yahoo.jdisc.application.ContainerBuilder;
-import com.yahoo.jdisc.handler.RequestHandler;
-import com.yahoo.jdisc.http.ConnectorConfig;
-import com.yahoo.security.SslContextBuilder;
-
-import javax.net.ssl.SSLContext;
-import java.nio.file.Paths;
-
-import static com.yahoo.yolean.Exceptions.uncheck;
-
-/**
- * Provides functionality for setting up a jdisc container with an HTTP server and handlers.
- *
- * @author Simon Thoresen Hult
- * @author bakksjo
- */
-public class TestDriver {
-
- private final com.yahoo.jdisc.test.TestDriver driver;
- private final JettyHttpServer server;
- private final SimpleHttpClient client;
-
- private TestDriver(com.yahoo.jdisc.test.TestDriver driver, JettyHttpServer server, SimpleHttpClient client) {
- this.driver = driver;
- this.server = server;
- this.client = client;
- }
-
- public static TestDriver newInstance(Class<? extends JettyHttpServer> serverClass,
- RequestHandler requestHandler,
- Module testConfig) {
- com.yahoo.jdisc.test.TestDriver driver =
- com.yahoo.jdisc.test.TestDriver.newSimpleApplicationInstance(testConfig);
- ContainerBuilder builder = driver.newContainerBuilder();
- JettyHttpServer server = builder.getInstance(serverClass);
- builder.serverProviders().install(server);
- builder.serverBindings().bind("http://*/*", requestHandler);
- driver.activateContainer(builder);
- server.start();
-
- SimpleHttpClient client = new SimpleHttpClient(newSslContext(builder), server.getListenPort(), false);
- return new TestDriver(driver, server, client);
- }
-
- public boolean close() {
- server.close();
- server.release();
- uncheck(client::close);
- return driver.close();
- }
-
- public JettyHttpServer server() { return server; }
-
- public SimpleHttpClient client() { return client; }
-
- public SimpleHttpClient newClient(final boolean useCompression) {
- return new SimpleHttpClient(newSslContext(), server.getListenPort(), useCompression);
- }
-
- public SSLContext newSslContext() {
- return newSslContext(driver.newContainerBuilder());
- }
-
- private static SSLContext newSslContext(ContainerBuilder builder) {
- ConnectorConfig.Ssl sslConfig = builder.getInstance(ConnectorConfig.class).ssl();
- if (!sslConfig.enabled()) return null;
-
- return new SslContextBuilder()
- .withKeyStore(Paths.get(sslConfig.privateKeyFile()), Paths.get(sslConfig.certificateFile()))
- .withTrustStore(Paths.get(sslConfig.caCertificateFile()))
- .build();
- }
-
-}
diff --git a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/TestDrivers.java b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/TestDrivers.java
deleted file mode 100644
index 7d7530c32e0..00000000000
--- a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/TestDrivers.java
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.jdisc.http.server.jetty;
-
-import com.google.inject.AbstractModule;
-import com.google.inject.Module;
-import com.google.inject.util.Modules;
-import com.yahoo.container.logging.ConnectionLog;
-import com.yahoo.container.logging.RequestLog;
-import com.yahoo.jdisc.handler.RequestHandler;
-import com.yahoo.jdisc.http.ConnectorConfig;
-import com.yahoo.jdisc.http.ServerConfig;
-import com.yahoo.jdisc.http.ServletPathsConfig;
-import com.yahoo.jdisc.http.guiceModules.ConnectorFactoryRegistryModule;
-import com.yahoo.jdisc.http.guiceModules.ServletModule;
-
-import java.nio.file.Path;
-
-/**
- * @author Simon Thoresen Hult
- * @author bjorncs
- */
-public class TestDrivers {
-
- public static TestDriver newConfiguredInstance(RequestHandler requestHandler,
- ServerConfig.Builder serverConfig,
- ConnectorConfig.Builder connectorConfig,
- Module... guiceModules) {
- return TestDriver.newInstance(
- JettyHttpServer.class,
- requestHandler,
- newConfigModule(serverConfig, connectorConfig, guiceModules));
- }
-
- public static TestDriver newInstance(RequestHandler requestHandler, Module... guiceModules) {
- return TestDriver.newInstance(
- JettyHttpServer.class,
- requestHandler,
- newConfigModule(
- new ServerConfig.Builder(),
- new ConnectorConfig.Builder(),
- guiceModules
- ));
- }
-
- public enum TlsClientAuth { NEED, WANT }
-
- public static TestDriver newInstanceWithSsl(RequestHandler requestHandler,
- Path certificateFile,
- Path privateKeyFile,
- TlsClientAuth tlsClientAuth,
- Module... guiceModules) {
- return TestDriver.newInstance(
- JettyHttpServer.class,
- requestHandler,
- newConfigModule(
- new ServerConfig.Builder().connectionLog(new ServerConfig.ConnectionLog.Builder().enabled(true)),
- new ConnectorConfig.Builder()
- .tlsClientAuthEnforcer(
- new ConnectorConfig.TlsClientAuthEnforcer.Builder()
- .enable(true)
- .pathWhitelist("/status.html"))
- .ssl(new ConnectorConfig.Ssl.Builder()
- .enabled(true)
- .clientAuth(tlsClientAuth == TlsClientAuth.NEED
- ? ConnectorConfig.Ssl.ClientAuth.Enum.NEED_AUTH
- : ConnectorConfig.Ssl.ClientAuth.Enum.WANT_AUTH)
- .privateKeyFile(privateKeyFile.toString())
- .certificateFile(certificateFile.toString())
- .caCertificateFile(certificateFile.toString())),
- guiceModules));
- }
-
- private static Module newConfigModule(ServerConfig.Builder serverConfig,
- ConnectorConfig.Builder connectorConfigBuilder,
- Module... guiceModules) {
- return Modules.override(
- Modules.combine(
- new AbstractModule() {
- @Override
- protected void configure() {
- bind(ServletPathsConfig.class).toInstance(new ServletPathsConfig(new ServletPathsConfig.Builder()));
- bind(ServerConfig.class).toInstance(new ServerConfig(serverConfig));
- bind(ConnectorConfig.class).toInstance(new ConnectorConfig(connectorConfigBuilder));
- bind(FilterBindings.class).toInstance(new FilterBindings.Builder().build());
- bind(ConnectionLog.class).toInstance(new VoidConnectionLog());
- bind(RequestLog.class).toInstance(new VoidRequestLog());
- }
- },
- new ConnectorFactoryRegistryModule(connectorConfigBuilder),
- new ServletModule()))
- .with(guiceModules);
- }
-
-}
diff --git a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/servlet/JDiscFilterForServletTest.java b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/servlet/JDiscFilterForServletTest.java
index 16969a47b84..0479374854c 100644
--- a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/servlet/JDiscFilterForServletTest.java
+++ b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/servlet/JDiscFilterForServletTest.java
@@ -15,8 +15,7 @@ import com.yahoo.jdisc.http.filter.ResponseFilter;
import com.yahoo.jdisc.http.server.jetty.FilterBindings;
import com.yahoo.jdisc.http.server.jetty.FilterInvoker;
import com.yahoo.jdisc.http.server.jetty.SimpleHttpClient.ResponseValidator;
-import com.yahoo.jdisc.http.server.jetty.TestDriver;
-import com.yahoo.jdisc.http.server.jetty.TestDrivers;
+import com.yahoo.jdisc.http.server.jetty.JettyTestDriver;
import org.junit.Test;
import javax.servlet.http.HttpServletRequest;
@@ -36,7 +35,7 @@ import static org.hamcrest.CoreMatchers.is;
public class JDiscFilterForServletTest extends ServletTestBase {
@Test
public void request_filter_can_return_response() throws IOException, InterruptedException {
- TestDriver testDriver = requestFilterTestDriver();
+ JettyTestDriver testDriver = requestFilterTestDriver();
ResponseValidator response = httpGet(testDriver, TestServlet.PATH).execute();
response.expectContent(containsString(TestRequestFilter.responseContent));
@@ -44,7 +43,7 @@ public class JDiscFilterForServletTest extends ServletTestBase {
@Test
public void request_can_be_forwarded_through_request_filter_to_servlet() throws IOException {
- TestDriver testDriver = requestFilterTestDriver();
+ JettyTestDriver testDriver = requestFilterTestDriver();
ResponseValidator response = httpGet(testDriver, TestServlet.PATH).
addHeader(TestRequestFilter.BYPASS_FILTER_HEADER, Boolean.TRUE.toString()).
execute();
@@ -54,7 +53,7 @@ public class JDiscFilterForServletTest extends ServletTestBase {
@Test
public void response_filter_can_modify_response() throws IOException {
- TestDriver testDriver = responseFilterTestDriver();
+ JettyTestDriver testDriver = responseFilterTestDriver();
ResponseValidator response = httpGet(testDriver, TestServlet.PATH).execute();
response.expectHeader(TestResponseFilter.INVOKED_HEADER, is(Boolean.TRUE.toString()));
@@ -62,7 +61,7 @@ public class JDiscFilterForServletTest extends ServletTestBase {
@Test
public void response_filter_is_run_on_empty_sync_response() throws IOException {
- TestDriver testDriver = responseFilterTestDriver();
+ JettyTestDriver testDriver = responseFilterTestDriver();
ResponseValidator response = httpGet(testDriver, NoContentTestServlet.PATH).execute();
response.expectHeader(TestResponseFilter.INVOKED_HEADER, is(Boolean.TRUE.toString()));
@@ -70,7 +69,7 @@ public class JDiscFilterForServletTest extends ServletTestBase {
@Test
public void response_filter_is_run_on_empty_async_response() throws IOException {
- TestDriver testDriver = responseFilterTestDriver();
+ JettyTestDriver testDriver = responseFilterTestDriver();
ResponseValidator response = httpGet(testDriver, NoContentTestServlet.PATH).
addHeader(NoContentTestServlet.HEADER_ASYNC, Boolean.TRUE.toString()).
execute();
@@ -78,20 +77,20 @@ public class JDiscFilterForServletTest extends ServletTestBase {
response.expectHeader(TestResponseFilter.INVOKED_HEADER, is(Boolean.TRUE.toString()));
}
- private TestDriver requestFilterTestDriver() throws IOException {
+ private JettyTestDriver requestFilterTestDriver() throws IOException {
FilterBindings filterBindings = new FilterBindings.Builder()
.addRequestFilter("my-request-filter", new TestRequestFilter())
.addRequestFilterBinding("my-request-filter", "http://*/*")
.build();
- return TestDrivers.newInstance(dummyRequestHandler, bindings(filterBindings));
+ return JettyTestDriver.newInstance(dummyRequestHandler, bindings(filterBindings));
}
- private TestDriver responseFilterTestDriver() throws IOException {
+ private JettyTestDriver responseFilterTestDriver() throws IOException {
FilterBindings filterBindings = new FilterBindings.Builder()
.addResponseFilter("my-response-filter", new TestResponseFilter())
.addResponseFilterBinding("my-response-filter", "http://*/*")
.build();
- return TestDrivers.newInstance(dummyRequestHandler, bindings(filterBindings));
+ return JettyTestDriver.newInstance(dummyRequestHandler, bindings(filterBindings));
}
diff --git a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/servlet/ServletAccessLoggingTest.java b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/servlet/ServletAccessLoggingTest.java
index a533a447f6a..39d28465b17 100644
--- a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/servlet/ServletAccessLoggingTest.java
+++ b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/servlet/ServletAccessLoggingTest.java
@@ -7,8 +7,7 @@ import com.google.inject.util.Modules;
import com.yahoo.container.logging.AccessLog;
import com.yahoo.container.logging.RequestLog;
import com.yahoo.container.logging.RequestLogEntry;
-import com.yahoo.jdisc.http.server.jetty.TestDriver;
-import com.yahoo.jdisc.http.server.jetty.TestDrivers;
+import com.yahoo.jdisc.http.server.jetty.JettyTestDriver;
import org.junit.Test;
import org.mockito.verification.VerificationMode;
@@ -30,7 +29,7 @@ public class ServletAccessLoggingTest extends ServletTestBase {
@Test
public void accessLogIsInvokedForNonJDiscServlet() throws Exception {
final AccessLog accessLog = mock(AccessLog.class);
- final TestDriver testDriver = newTestDriver(accessLog);
+ final JettyTestDriver testDriver = newTestDriver(accessLog);
httpGet(testDriver, TestServlet.PATH).execute();
verifyCallsLog(accessLog, timeout(MAX_LOG_WAIT_TIME_MILLIS).times(1));
}
@@ -38,7 +37,7 @@ public class ServletAccessLoggingTest extends ServletTestBase {
@Test
public void accessLogIsInvokedForJDiscServlet() throws Exception {
final AccessLog accessLog = mock(AccessLog.class);
- final TestDriver testDriver = newTestDriver(accessLog);
+ final JettyTestDriver testDriver = newTestDriver(accessLog);
testDriver.client().newGet("/status.html").execute();
verifyCallsLog(accessLog, timeout(MAX_LOG_WAIT_TIME_MILLIS).times(1));
}
@@ -47,8 +46,8 @@ public class ServletAccessLoggingTest extends ServletTestBase {
verify(requestLog, verificationMode).log(any(RequestLogEntry.class));
}
- private TestDriver newTestDriver(RequestLog requestLog) throws IOException {
- return TestDrivers.newInstance(dummyRequestHandler, bindings(requestLog));
+ private JettyTestDriver newTestDriver(RequestLog requestLog) throws IOException {
+ return JettyTestDriver.newInstance(dummyRequestHandler, bindings(requestLog));
}
private Module bindings(RequestLog requestLog) {
diff --git a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/servlet/ServletTestBase.java b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/servlet/ServletTestBase.java
index 54bfe8c026d..e1f1a818ab0 100644
--- a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/servlet/ServletTestBase.java
+++ b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/servlet/ServletTestBase.java
@@ -14,7 +14,7 @@ import com.yahoo.jdisc.handler.ResponseHandler;
import com.yahoo.jdisc.http.ServletPathsConfig;
import com.yahoo.jdisc.http.ServletPathsConfig.Servlets.Builder;
import com.yahoo.jdisc.http.server.jetty.SimpleHttpClient.RequestExecutor;
-import com.yahoo.jdisc.http.server.jetty.TestDriver;
+import com.yahoo.jdisc.http.server.jetty.JettyTestDriver;
import org.eclipse.jetty.servlet.ServletHolder;
import javax.servlet.ServletException;
@@ -46,7 +46,7 @@ public class ServletTestBase {
new ServletInstance(TestServlet.ID, TestServlet.PATH, new TestServlet()),
new ServletInstance(NoContentTestServlet.ID, NoContentTestServlet.PATH, new NoContentTestServlet()));
- protected RequestExecutor httpGet(TestDriver testDriver, String path) {
+ protected RequestExecutor httpGet(JettyTestDriver testDriver, String path) {
return testDriver.client().newGet("/" + path);
}
diff --git a/simplemetrics/src/test/java/com/yahoo/metrics/simple/BucketTest.java b/container-core/src/test/java/com/yahoo/metrics/simple/BucketTest.java
index b33da4bd531..b33da4bd531 100644
--- a/simplemetrics/src/test/java/com/yahoo/metrics/simple/BucketTest.java
+++ b/container-core/src/test/java/com/yahoo/metrics/simple/BucketTest.java
diff --git a/simplemetrics/src/test/java/com/yahoo/metrics/simple/CounterTest.java b/container-core/src/test/java/com/yahoo/metrics/simple/CounterTest.java
index dc097f71a6b..dc097f71a6b 100644
--- a/simplemetrics/src/test/java/com/yahoo/metrics/simple/CounterTest.java
+++ b/container-core/src/test/java/com/yahoo/metrics/simple/CounterTest.java
diff --git a/simplemetrics/src/test/java/com/yahoo/metrics/simple/DimensionsCacheTest.java b/container-core/src/test/java/com/yahoo/metrics/simple/DimensionsCacheTest.java
index 0fde3bcf588..0fde3bcf588 100644
--- a/simplemetrics/src/test/java/com/yahoo/metrics/simple/DimensionsCacheTest.java
+++ b/container-core/src/test/java/com/yahoo/metrics/simple/DimensionsCacheTest.java
diff --git a/simplemetrics/src/test/java/com/yahoo/metrics/simple/GaugeTest.java b/container-core/src/test/java/com/yahoo/metrics/simple/GaugeTest.java
index fef56c27114..fef56c27114 100644
--- a/simplemetrics/src/test/java/com/yahoo/metrics/simple/GaugeTest.java
+++ b/container-core/src/test/java/com/yahoo/metrics/simple/GaugeTest.java
diff --git a/simplemetrics/src/test/java/com/yahoo/metrics/simple/MetricsTest.java b/container-core/src/test/java/com/yahoo/metrics/simple/MetricsTest.java
index 0450e5db5f5..0450e5db5f5 100644
--- a/simplemetrics/src/test/java/com/yahoo/metrics/simple/MetricsTest.java
+++ b/container-core/src/test/java/com/yahoo/metrics/simple/MetricsTest.java
diff --git a/simplemetrics/src/test/java/com/yahoo/metrics/simple/PointTest.java b/container-core/src/test/java/com/yahoo/metrics/simple/PointTest.java
index 802bea3c463..802bea3c463 100644
--- a/simplemetrics/src/test/java/com/yahoo/metrics/simple/PointTest.java
+++ b/container-core/src/test/java/com/yahoo/metrics/simple/PointTest.java
diff --git a/simplemetrics/src/test/java/com/yahoo/metrics/simple/jdisc/SnapshotConverterTest.java b/container-core/src/test/java/com/yahoo/metrics/simple/jdisc/SnapshotConverterTest.java
index 13f7ba55e61..13f7ba55e61 100644
--- a/simplemetrics/src/test/java/com/yahoo/metrics/simple/jdisc/SnapshotConverterTest.java
+++ b/container-core/src/test/java/com/yahoo/metrics/simple/jdisc/SnapshotConverterTest.java
diff --git a/container-core/src/test/java/com/yahoo/restapi/RestApiImplTest.java b/container-core/src/test/java/com/yahoo/restapi/RestApiImplTest.java
index 1de8184ce22..06fc6d80741 100644
--- a/container-core/src/test/java/com/yahoo/restapi/RestApiImplTest.java
+++ b/container-core/src/test/java/com/yahoo/restapi/RestApiImplTest.java
@@ -102,13 +102,25 @@ class RestApiImplTest {
verifyJsonResponse(restApi, Method.POST, "/api", rawJson, 200, rawJson);
}
+ @Test
+ public void uri_builder_creates_valid_uri_prefix() {
+ RestApi restApi = RestApi.builder()
+ .addRoute(route("/test").get(ctx -> new MessageResponse(ctx.uriBuilder().toString())))
+ .build();
+ verifyJsonResponse(restApi, Method.GET, "/test", null, 200, "{\"message\":\"http://localhost\"}");
+ }
+
private static void verifyJsonResponse(RestApi restApi, Method method, String path, String requestContent, int expectedStatusCode, String expectedJson) {
- HttpRequest testRequest = requestContent != null ?
- HttpRequest.createTestRequest(
- path, method,
- new ByteArrayInputStream(requestContent.getBytes(StandardCharsets.UTF_8)),
- Map.of("Content-Type", "application/json")) :
- HttpRequest.createTestRequest(path, method);
+ HttpRequest testRequest;
+ String uri = "http://localhost" + path;
+ if (requestContent != null) {
+ testRequest = HttpRequest.createTestRequest(
+ uri, method,
+ new ByteArrayInputStream(requestContent.getBytes(StandardCharsets.UTF_8)),
+ Map.of("Content-Type", "application/json"));
+ } else {
+ testRequest = HttpRequest.createTestRequest(uri, method);
+ }
HttpResponse response = restApi.handleRequest(testRequest);
assertEquals(expectedStatusCode, response.getStatus());
if (expectedJson != null) {
diff --git a/container-di/src/test/vespa-configdef/config.di.int.def b/container-core/src/test/vespa-configdef/config.di.int.def
index a34539c4a0f..a34539c4a0f 100644
--- a/container-di/src/test/vespa-configdef/config.di.int.def
+++ b/container-core/src/test/vespa-configdef/config.di.int.def
diff --git a/container-di/src/test/vespa-configdef/config.di.string.def b/container-core/src/test/vespa-configdef/config.di.string.def
index 396afe54f3f..396afe54f3f 100644
--- a/container-di/src/test/vespa-configdef/config.di.string.def
+++ b/container-core/src/test/vespa-configdef/config.di.string.def
diff --git a/container-di/src/test/vespa-configdef/config.test.bootstrap1.def b/container-core/src/test/vespa-configdef/config.test.bootstrap1.def
index bdee16d99ea..bdee16d99ea 100644
--- a/container-di/src/test/vespa-configdef/config.test.bootstrap1.def
+++ b/container-core/src/test/vespa-configdef/config.test.bootstrap1.def
diff --git a/container-di/src/test/vespa-configdef/config.test.bootstrap2.def b/container-core/src/test/vespa-configdef/config.test.bootstrap2.def
index b4fbffd8ae6..b4fbffd8ae6 100644
--- a/container-di/src/test/vespa-configdef/config.test.bootstrap2.def
+++ b/container-core/src/test/vespa-configdef/config.test.bootstrap2.def
diff --git a/container-di/src/test/vespa-configdef/config.test.components1.def b/container-core/src/test/vespa-configdef/config.test.components1.def
index bdee16d99ea..bdee16d99ea 100644
--- a/container-di/src/test/vespa-configdef/config.test.components1.def
+++ b/container-core/src/test/vespa-configdef/config.test.components1.def
diff --git a/container-di/src/test/vespa-configdef/config.test.test.def b/container-core/src/test/vespa-configdef/config.test.test.def
index d3e0ed17748..d3e0ed17748 100644
--- a/container-di/src/test/vespa-configdef/config.test.test.def
+++ b/container-core/src/test/vespa-configdef/config.test.test.def
diff --git a/container-di/src/test/vespa-configdef/config.test.test2.def b/container-core/src/test/vespa-configdef/config.test.test2.def
index d3e0ed17748..d3e0ed17748 100644
--- a/container-di/src/test/vespa-configdef/config.test.test2.def
+++ b/container-core/src/test/vespa-configdef/config.test.test2.def
diff --git a/container-di/src/test/vespa-configdef/config.test.thread-pool.def b/container-core/src/test/vespa-configdef/config.test.thread-pool.def
index 9e6b6694e84..9e6b6694e84 100644
--- a/container-di/src/test/vespa-configdef/config.test.thread-pool.def
+++ b/container-core/src/test/vespa-configdef/config.test.thread-pool.def
diff --git a/container-dependency-versions/pom.xml b/container-dependency-versions/pom.xml
index 04ce3405d83..5e4a45074ae 100644
--- a/container-dependency-versions/pom.xml
+++ b/container-dependency-versions/pom.xml
@@ -22,12 +22,6 @@
<url>https://github.com/vespa-engine</url>
</developer>
</developers>
- <distributionManagement>
- <repository>
- <id>bintray-vespa-repo</id>
- <url>https://api.bintray.com/maven/yahoo/maven/vespa;publish=1</url>
- </repository>
- </distributionManagement>
<scm>
<connection>scm:git:git@github.com:vespa-engine/vespa.git</connection>
<developerConnection>scm:git:git@github.com:vespa-engine/vespa.git</developerConnection>
@@ -360,66 +354,6 @@
<artifactId>xml-apis</artifactId>
<version>${xml-apis.version}</version>
</dependency>
-
- <!-- NOTE: The dependencies below are either not provided from the jdisc container runtime, or should
- not be leaked as maven dependency via the 'container' artifact. Still, they had to be moved
- here from 'parent' because factorylib reads the text in parent/pom.xml and this pom file to
- build a pom model used to bootstrap the maven cache on factory. Hence all deps using properties
- declared in this pom also have to reside in this pom.
- See factorylib:com.yahoo.vespa.dependencies.pom.list.Main.-->
- <!-- TODO: move these back to parent/pom.xml when the above does not hold anymore. -->
-
- <dependency>
- <!-- NOT provided from jdisc runtime -->
- <groupId>com.fasterxml.jackson.jaxrs</groupId>
- <artifactId>jackson-jaxrs-xml-provider</artifactId>
- <version>${jackson2.version}</version>
- </dependency>
- <dependency>
- <!-- NOT provided from jdisc runtime -->
- <groupId>com.fasterxml.jackson.dataformat</groupId>
- <artifactId>jackson-dataformat-xml</artifactId>
- <version>${jackson2.version}</version>
- </dependency>
- <dependency>
- <!-- NOT provided from jdisc runtime -->
- <groupId>com.google.guava</groupId>
- <artifactId>guava-testlib</artifactId>
- <version>${guava.version}</version>
- </dependency>
- <dependency>
- <!-- Installed in jdisc runtime, but should only be used internally and not leaked as maven dep to users -->
- <groupId>org.eclipse.jetty</groupId>
- <artifactId>jetty-continuation</artifactId>
- <version>${jetty.version}</version>
- </dependency>
- <dependency>
- <!-- Installed in jdisc runtime, but should only be used internally and not leaked as maven dep to users -->
- <groupId>org.eclipse.jetty</groupId>
- <artifactId>jetty-server</artifactId>
- <version>${jetty.version}</version>
- </dependency>
- <dependency>
- <!-- Installed in jdisc runtime, but should only be used internally and not leaked as maven dep to users -->
- <groupId>org.eclipse.jetty</groupId>
- <artifactId>jetty-servlet</artifactId>
- <version>${jetty.version}</version>
- </dependency>
- <dependency>
- <!-- Installed in jdisc runtime, but should only be used internally and not leaked as maven dep to users -->
- <groupId>org.eclipse.jetty</groupId>
- <artifactId>jetty-servlets</artifactId>
- <version>${jetty.version}</version>
- </dependency>
- <dependency>
- <!-- Installed in jdisc runtime, but should only be used internally and not leaked as maven dep to users -->
- <groupId>org.eclipse.jetty</groupId>
- <artifactId>jetty-jmx</artifactId>
- <version>${jetty.version}</version>
- </dependency>
-
- <!-- Please don't add deps here, but instead above the NOTE. -->
-
</dependencies>
</dependencyManagement>
@@ -458,7 +392,8 @@
<javax.inject.version>1</javax.inject.version>
<javax.servlet-api.version>3.1.0</javax.servlet-api.version>
<jaxb.version>2.3.0</jaxb.version>
- <jetty.version>9.4.38.v20210224</jetty.version>
+ <jetty.version>9.4.40.v20210413</jetty.version>
+ <jetty-alpn.version>1.1.3.v20160715</jetty-alpn.version>
<org.lz4.version>1.7.1</org.lz4.version>
<org.json.version>20090211</org.json.version>
<slf4j.version>1.7.30</slf4j.version>
diff --git a/container-dev/pom.xml b/container-dev/pom.xml
index 2dcb03bba58..cfe9c0a6d8e 100644
--- a/container-dev/pom.xml
+++ b/container-dev/pom.xml
@@ -36,17 +36,6 @@
</dependency>
<dependency>
<groupId>com.yahoo.vespa</groupId>
- <artifactId>simplemetrics</artifactId>
- <version>${project.version}</version>
- <exclusions>
- <exclusion>
- <groupId>org.hdrhistogram</groupId>
- <artifactId>HdrHistogram</artifactId>
- </exclusion>
- </exclusions>
- </dependency>
- <dependency>
- <groupId>com.yahoo.vespa</groupId>
<artifactId>config-lib</artifactId>
<version>${project.version}</version>
</dependency>
@@ -99,6 +88,10 @@
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
</exclusion>
+ <exclusion>
+ <groupId>org.hdrhistogram</groupId>
+ <artifactId>HdrHistogram</artifactId>
+ </exclusion>
</exclusions>
</dependency>
<dependency>
diff --git a/container-di/.gitignore b/container-di/.gitignore
deleted file mode 100644
index 3cc25b51fc4..00000000000
--- a/container-di/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-/pom.xml.build
-/target
diff --git a/container-di/CMakeLists.txt b/container-di/CMakeLists.txt
deleted file mode 100644
index 7bb25e6a420..00000000000
--- a/container-di/CMakeLists.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-install_config_definitions()
diff --git a/container-di/OWNERS b/container-di/OWNERS
deleted file mode 100644
index 3b2ba1ede81..00000000000
--- a/container-di/OWNERS
+++ /dev/null
@@ -1 +0,0 @@
-gjoranv
diff --git a/container-di/README.md b/container-di/README.md
deleted file mode 100644
index 0eeb9d7280f..00000000000
--- a/container-di/README.md
+++ /dev/null
@@ -1,4 +0,0 @@
-<!-- Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -->
-# Container DI
-
-JDisc's dependency injection framework.
diff --git a/container-di/abi-spec.json b/container-di/abi-spec.json
deleted file mode 100644
index 02cc29cd07f..00000000000
--- a/container-di/abi-spec.json
+++ /dev/null
@@ -1,17 +0,0 @@
-{
- "com.yahoo.container.di.componentgraph.Provider": {
- "superClass": "java.lang.Object",
- "interfaces": [
- "com.yahoo.component.Deconstructable"
- ],
- "attributes": [
- "public",
- "interface",
- "abstract"
- ],
- "methods": [
- "public abstract java.lang.Object get()"
- ],
- "fields": []
- }
-} \ No newline at end of file
diff --git a/container-di/benchmarks/src/test/java/com/yahoo/component/ComponentIdBenchmark.java b/container-di/benchmarks/src/test/java/com/yahoo/component/ComponentIdBenchmark.java
deleted file mode 100644
index bcdaff110e1..00000000000
--- a/container-di/benchmarks/src/test/java/com/yahoo/component/ComponentIdBenchmark.java
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.component;
-
-/**
- * @author baldersheim
- */
-public class ComponentIdBenchmark {
- public void run() {
- boolean result=true;
- String strings[] = createStrings(1000);
- // Warm-up
- out("Warming up...");
- for (int i=0; i<30*1000; i++)
- result = result ^ createComponentId(strings);
-
- long startTime=System.currentTimeMillis();
- out("Running...");
- for (int i=0; i<100*1000; i++)
- result = result ^ createComponentId(strings);
- out("Ignore this: " + result); // Make sure we are not fooled by optimization by creating an observable result
- long endTime=System.currentTimeMillis();
- out("Create anonymous component ids of 1000 strings 100.000 times took " + (endTime-startTime) + " ms");
- }
-
- private final String [] createStrings(int num) {
- String strings [] = new String [num];
- for(int i=0; i < strings.length; i++) {
- strings[i] = "this.is.a.short.compound.name." + i;
- }
- return strings;
- }
-
- private final boolean createComponentId(String [] strings) {
- boolean retval = true;
- for (int i=0; i < strings.length; i++) {
- ComponentId n = ComponentId.createAnonymousComponentId(strings[i]);
- retval = retval ^ n.isAnonymous();
- }
- return retval;
- }
-
- private void out(String string) {
- System.out.println(string);
- }
-
- public static void main(String[] args) {
- new ComponentIdBenchmark().run();
- }
-
-}
diff --git a/container-di/pom.xml b/container-di/pom.xml
deleted file mode 100644
index f0778f7d45e..00000000000
--- a/container-di/pom.xml
+++ /dev/null
@@ -1,127 +0,0 @@
-<?xml version="1.0"?>
-<!-- Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
- http://maven.apache.org/xsd/maven-4.0.0.xsd">
- <modelVersion>4.0.0</modelVersion>
- <parent>
- <groupId>com.yahoo.vespa</groupId>
- <artifactId>parent</artifactId>
- <version>7-SNAPSHOT</version>
- <relativePath>../parent/pom.xml</relativePath>
- </parent>
- <artifactId>container-di</artifactId>
- <version>7-SNAPSHOT</version>
- <packaging>container-plugin</packaging>
- <dependencies>
- <dependency>
- <groupId>com.yahoo.vespa</groupId>
- <artifactId>annotations</artifactId>
- <version>${project.version}</version>
- </dependency>
- <dependency>
- <groupId>junit</groupId>
- <artifactId>junit</artifactId>
- <scope>test</scope>
- </dependency>
- <dependency>
- <groupId>org.hamcrest</groupId>
- <artifactId>hamcrest-library</artifactId>
- <scope>test</scope>
- </dependency>
- <dependency>
- <groupId>com.yahoo.vespa</groupId>
- <artifactId>jdisc_core</artifactId>
- <version>${project.version}</version>
- <scope>provided</scope>
- </dependency>
- <dependency>
- <groupId>com.yahoo.vespa</groupId>
- <artifactId>vespajlib</artifactId>
- <version>${project.version}</version>
- <exclusions>
- <exclusion>
- <groupId>log4j</groupId>
- <artifactId>log4j</artifactId>
- </exclusion>
- </exclusions>
- </dependency>
- <dependency>
- <groupId>com.yahoo.vespa</groupId>
- <artifactId>config-bundle</artifactId>
- <version>${project.version}</version>
- <scope>provided</scope>
- </dependency>
- <dependency>
- <groupId>com.yahoo.vespa</groupId>
- <artifactId>config</artifactId>
- <version>${project.version}</version>
- <scope>provided</scope>
- </dependency>
- <dependency>
- <groupId>com.google.inject</groupId>
- <artifactId>guice</artifactId>
- <classifier>no_aop</classifier>
- <scope>provided</scope>
- </dependency>
- <dependency>
- <groupId>com.yahoo.vespa</groupId>
- <artifactId>component</artifactId>
- <version>${project.version}</version>
- <scope>provided</scope>
- </dependency>
- </dependencies>
- <build>
- <plugins>
- <plugin>
- <groupId>com.yahoo.vespa</groupId>
- <artifactId>bundle-plugin</artifactId>
- <extensions>true</extensions>
- </plugin>
- <plugin>
- <groupId>com.yahoo.vespa</groupId>
- <artifactId>config-class-plugin</artifactId>
- <version>${project.version}</version>
- <executions>
- <execution>
- <goals>
- <goal>config-gen</goal>
- </goals>
- </execution>
- <execution>
- <id>configgen-test-defs</id>
- <phase>generate-test-sources</phase>
- <goals>
- <goal>config-gen</goal>
- </goals>
- <configuration>
- <defFilesDirectories>src/test/vespa-configdef</defFilesDirectories>
- <outputDirectory>target/generated-test-sources/vespa-configgen-plugin</outputDirectory>
- <testConfig>true</testConfig>
- </configuration>
- </execution>
- </executions>
- </plugin>
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-javadoc-plugin</artifactId>
- </plugin>
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-surefire-plugin</artifactId>
- <configuration>
- <forkMode>once</forkMode>
- </configuration>
- </plugin>
- <plugin>
- <groupId>com.yahoo.vespa</groupId>
- <artifactId>abi-check-plugin</artifactId>
- </plugin>
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-compiler-plugin</artifactId>
- </plugin>
- </plugins>
- </build>
-</project>
diff --git a/container-di/src/main/java/com/yahoo/container/di/componentgraph/package-info.java b/container-di/src/main/java/com/yahoo/container/di/componentgraph/package-info.java
deleted file mode 100644
index 0c11cfb5ba4..00000000000
--- a/container-di/src/main/java/com/yahoo/container/di/componentgraph/package-info.java
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-@ExportPackage
-@PublicApi
-package com.yahoo.container.di.componentgraph;
-
-import com.yahoo.api.annotations.PublicApi;
-import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-disc/pom.xml b/container-disc/pom.xml
index a78657b7f09..e537b8f1c35 100644
--- a/container-disc/pom.xml
+++ b/container-disc/pom.xml
@@ -56,17 +56,6 @@
<groupId>com.yahoo.vespa</groupId>
<artifactId>component</artifactId>
<version>${project.version}</version>
- <scope>provided</scope>
- </dependency>
- <dependency>
- <groupId>javax.servlet</groupId>
- <artifactId>javax.servlet-api</artifactId>
- <scope>provided</scope>
- </dependency>
- <dependency>
- <groupId>org.eclipse.jetty</groupId>
- <artifactId>jetty-servlet</artifactId>
- <scope>provided</scope>
</dependency>
<dependency>
<groupId>com.yahoo.vespa</groupId>
@@ -109,22 +98,27 @@
<!-- WARNING: These are only here to make bundlification work -->
<dependency>
<groupId>com.yahoo.vespa</groupId>
- <artifactId>simplemetrics</artifactId>
+ <artifactId>config-bundle</artifactId>
<version>${project.version}</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>com.yahoo.vespa</groupId>
- <artifactId>config-bundle</artifactId>
+ <artifactId>configdefinitions</artifactId>
<version>${project.version}</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>com.yahoo.vespa</groupId>
- <artifactId>configdefinitions</artifactId>
+ <artifactId>jdisc_jetty</artifactId>
<version>${project.version}</version>
<scope>provided</scope>
</dependency>
+ <dependency>
+ <groupId>javax.servlet</groupId>
+ <artifactId>javax.servlet-api</artifactId>
+ <scope>provided</scope>
+ </dependency>
<!-- end WARNING -->
<!-- ensure that transitive Jackson dependencies are not included in compile scope -->
@@ -167,7 +161,6 @@
<buildLegacyVespaPlatformBundle>true</buildLegacyVespaPlatformBundle>
<discPreInstallBundle>
<!-- Vespa bundles -->
- component-jar-with-dependencies.jar,
configgen.jar,
config-bundle-jar-with-dependencies.jar,
configdefinitions-jar-with-dependencies.jar,
@@ -182,12 +175,16 @@
vespaclient-container-plugin-jar-with-dependencies.jar,
vespa-athenz-jar-with-dependencies.jar,
security-utils-jar-with-dependencies.jar,
- simplemetrics-jar-with-dependencies.jar,
defaults-jar-with-dependencies.jar,
- component-jar-with-dependencies.jar,
zkfacade-jar-with-dependencies.jar,
zookeeper-server-jar-with-dependencies.jar,
<!-- Jetty -->
+ alpn-api-${jetty-alpn.version}.jar,
+ http2-server-${jetty.version}.jar,
+ http2-common-${jetty.version}.jar,
+ http2-hpack-${jetty.version}.jar,
+ jetty-alpn-java-server-${jetty.version}.jar,
+ jetty-alpn-server-${jetty.version}.jar,
jetty-continuation-${jetty.version}.jar,
jetty-http-${jetty.version}.jar,
jetty-io-${jetty.version}.jar,
@@ -198,6 +195,14 @@
jetty-servlets-${jetty.version}.jar,
jetty-util-${jetty.version}.jar,
jetty-util-ajax-${jetty.version}.jar,
+ <!-- Spifly (required for OSGi service loader used by Jetty) -->
+ org.apache.aries.spifly.dynamic.bundle-${spifly.version}.jar,
+ asm-${asm.version}.jar,
+ asm-analysis-${asm.version}.jar,
+ asm-commons-${asm.version}.jar,
+ asm-tree-${asm.version}.jar,
+ asm-util-${asm.version}.jar,
+ <!-- Spifly end -->
<!-- Misc 3rd party bundles -->
bcpkix-jdk15on-${bouncycastle.version}.jar,
bcprov-jdk15on-${bouncycastle.version}.jar,
diff --git a/container-jersey2/pom.xml b/container-jersey2/pom.xml
index e94e2a46bba..8f24b5edcb7 100644
--- a/container-jersey2/pom.xml
+++ b/container-jersey2/pom.xml
@@ -38,7 +38,7 @@
</dependency>
<dependency>
<groupId>com.yahoo.vespa</groupId>
- <artifactId>container-di</artifactId>
+ <artifactId>container-core</artifactId>
<version>${project.version}</version>
<scope>provided</scope>
</dependency>
diff --git a/container-messagebus/pom.xml b/container-messagebus/pom.xml
index f98607015f9..adaf64a0a03 100644
--- a/container-messagebus/pom.xml
+++ b/container-messagebus/pom.xml
@@ -60,12 +60,6 @@
</dependency>
<dependency>
<groupId>com.yahoo.vespa</groupId>
- <artifactId>container-di</artifactId>
- <version>${project.version}</version>
- <scope>provided</scope>
- </dependency>
- <dependency>
- <groupId>com.yahoo.vespa</groupId>
<artifactId>jdisc_core</artifactId>
<version>${project.version}</version>
<scope>provided</scope>
diff --git a/container-search-and-docproc/pom.xml b/container-search-and-docproc/pom.xml
index 01516cfdd99..2decbd5fd1e 100644
--- a/container-search-and-docproc/pom.xml
+++ b/container-search-and-docproc/pom.xml
@@ -128,12 +128,6 @@
</dependency>
<dependency>
<groupId>com.yahoo.vespa</groupId>
- <artifactId>simplemetrics</artifactId>
- <version>${project.version}</version>
- <scope>provided</scope>
- </dependency>
- <dependency>
- <groupId>com.yahoo.vespa</groupId>
<artifactId>provided-dependencies</artifactId>
<version>${project.version}</version>
<scope>provided</scope>
diff --git a/container-search/abi-spec.json b/container-search/abi-spec.json
index 75b8814ecb0..b5933936adf 100644
--- a/container-search/abi-spec.json
+++ b/container-search/abi-spec.json
@@ -1642,13 +1642,13 @@
"final"
],
"methods": [
+ "public void <init>()",
+ "public void <init>(int)",
+ "public void <init>(java.lang.String, int)",
"public com.yahoo.prelude.query.Item$ItemType getItemType()",
"public java.lang.String getName()",
- "public void <init>(java.lang.String, int)",
- "public void <init>(int)",
"public void setIndexName(java.lang.String)",
"public java.lang.String getIndexName()",
- "public void <init>()",
"public int getN()",
"public void setN(int)",
"public int getScoreThreshold()",
@@ -4235,7 +4235,8 @@
"public com.yahoo.search.Result searchAndFill(com.yahoo.search.Query, com.yahoo.component.chain.Chain)",
"public com.yahoo.processing.rendering.Renderer getRendererCopy(com.yahoo.component.ComponentSpecification)",
"public com.yahoo.search.searchchain.SearchChainRegistry getSearchChainRegistry()",
- "public void createRequestMapping(com.yahoo.slime.Inspector, java.util.Map, java.lang.String)"
+ "public void createRequestMapping(com.yahoo.slime.Inspector, java.util.Map, java.lang.String)",
+ "public com.yahoo.container.jdisc.RequestHandlerSpec requestHandlerSpec()"
],
"fields": [
"public static final java.lang.String defaultSearchChainName"
diff --git a/container-search/pom.xml b/container-search/pom.xml
index c1a7156235e..822e6971bed 100644
--- a/container-search/pom.xml
+++ b/container-search/pom.xml
@@ -61,12 +61,6 @@
<scope>provided</scope>
</dependency>
<dependency>
- <groupId>com.yahoo.vespa</groupId>
- <artifactId>simplemetrics</artifactId>
- <version>${project.version}</version>
- <scope>provided</scope>
- </dependency>
- <dependency>
<groupId>org.json</groupId>
<artifactId>json</artifactId>
<scope>provided</scope>
diff --git a/container-search/src/main/antlr4/com/yahoo/search/yql/yqlplus.g4 b/container-search/src/main/antlr4/com/yahoo/search/yql/yqlplus.g4
index b7d443ea56c..c0cf293f2ea 100644
--- a/container-search/src/main/antlr4/com/yahoo/search/yql/yqlplus.g4
+++ b/container-search/src/main/antlr4/com/yahoo/search/yql/yqlplus.g4
@@ -18,22 +18,9 @@ options {
protected Stack<expression_scope> expression_stack = new Stack();
}
-// tokens for command syntax
- CREATE : 'create';
+// tokens
+
SELECT : 'select';
- INSERT : 'insert';
- UPDATE : 'update';
- SET : 'set';
- VIEW : 'view';
- TABLE : 'table';
- DELETE : 'delete';
- INTO : 'into';
- VALUES : 'values';
- IMPORT : 'import';
- NEXT : 'next';
- PAGED : 'paged';
- FALLBACK : 'fallback';
- IMPORT_FROM :;
LIMIT : 'limit';
OFFSET : 'offset';
@@ -44,36 +31,11 @@ options {
FROM : 'from';
SOURCES : 'sources';
AS : 'as';
- MERGE : 'merge';
- LEFT : 'left';
- JOIN : 'join';
-
- ON : 'on';
+
COMMA : ',';
OUTPUT : 'output';
COUNT : 'count';
- RETURNING : 'returning';
- APPLY : 'apply';
- CAST : 'cast';
-
- BEGIN : 'begin';
- END : 'end';
-
- // type-related
- TYPE_BYTE : 'byte';
- TYPE_INT16 : 'int16';
- TYPE_INT32 : 'int32';
- TYPE_INT64 : 'int64';
- TYPE_STRING : 'string';
- TYPE_DOUBLE : 'double';
- TYPE_TIMESTAMP : 'timestamp';
- TYPE_BOOLEAN : 'boolean';
- TYPE_ARRAY : 'array';
- TYPE_MAP : 'map';
-
- // READ_FIELD;
-
- // token literals
+
TRUE : 'true';
FALSE : 'false';
@@ -123,7 +85,6 @@ options {
// statement delimiter
SEMI : ';';
- PROGRAM : 'program';
TIMEOUT : 'timeout';
@@ -149,7 +110,6 @@ FLOAT
fragment
EXPONENT : ('e'|'E') ('+'|'-')? ('0'..'9')+ ;
-
fragment
DIGIT : '0'..'9'
;
@@ -163,7 +123,6 @@ STRING : '"' ( ESC_SEQ | ~('\\'| '"') )* '"'
| '\'' ( ESC_SEQ | ~('\\' | '\'') )* '\''
;
-/////////////////////////////
fragment
HEX_DIGIT : ('0'..'9'|'a'..'f'|'A'..'F') ;
@@ -183,13 +142,11 @@ WS : ( ' '
| '\t'
| '\r'
| '\n'
- // ) {$channel=HIDDEN;}
) -> channel(HIDDEN)
;
COMMENT
: ( ('//') ~('\n'|'\r')* '\r'? '\n'?
- // | '/*' ( options {greedy=false;} : . )* '*/'
| '/*' .*? '*/'
)
-> channel(HIDDEN)
@@ -208,39 +165,20 @@ VESPA_GROUPING_ARG
(')' | ']' | '>')
;
-/*------------------------------------------------------------------
- * PARSER RULES
- *------------------------------------------------------------------*/
+// --------- parser rules ------------
ident
: keyword_as_ident //{addChild(new TerminalNodeImpl(keyword_as_ident.getText()));}
- //{return ID<IDNode>[$keyword_as_ident.text];}
| ID
;
keyword_as_ident
- : SELECT | TABLE | DELETE | INTO | VALUES | LIMIT | OFFSET | WHERE | 'order' | 'by' | DESC | MERGE | LEFT | JOIN
- | ON | OUTPUT | COUNT | BEGIN | END | APPLY | TYPE_BYTE | TYPE_INT16 | TYPE_INT32 | TYPE_INT64 | TYPE_BOOLEAN | TYPE_TIMESTAMP | TYPE_DOUBLE | TYPE_STRING | TYPE_ARRAY | TYPE_MAP
- | VIEW | CREATE | IMPORT | PROGRAM | NEXT | PAGED | SOURCES | SET | MATCHES | LIKE | CAST
+ : SELECT | LIMIT | OFFSET | WHERE | 'order' | 'by' | DESC | OUTPUT | COUNT | SOURCES | MATCHES | LIKE
;
-program : params? (import_statement SEMI)* (ddl SEMI)* (statement SEMI)* EOF
+program : (statement SEMI)* EOF
;
-params
- : PROGRAM LPAREN program_arglist? RPAREN SEMI
- ;
-
-import_statement
- : IMPORT moduleName AS moduleId
- | IMPORT moduleId
- | FROM moduleName IMPORT import_list
- ;
-
-import_list
- : moduleId (',' moduleId)*
- ;
-
moduleId
: ID
;
@@ -250,46 +188,18 @@ moduleName
| namespaced_name
;
-ddl
- : view
- ;
-
-view : CREATE VIEW ID AS source_statement
- ;
-
-program_arglist
- : procedure_argument (',' procedure_argument)*
- ;
-
-procedure_argument
- :
- AT (ident TYPE_ARRAY LT typename GTEQ (expression[false])? ) {registerParameter($ident.start.getText(), $typename.start.getText());}
- | AT (ident typename ('=' expression[false])? ) {registerParameter($ident.start.getText(), $typename.start.getText());}
- ;
-
statement
: output_statement
- | selectvar_statement
- | next_statement
;
output_statement
- : source_statement paged_clause? output_spec?
- ;
-
-paged_clause
- : PAGED fixed_or_parameter
+ : source_statement output_spec?
;
-next_statement
- : NEXT literalString OUTPUT AS ident
- ;
-
source_statement
: query_statement (PIPE pipeline_step)*
;
-
pipeline_step
: namespaced_name arguments[false]?
| vespa_grouping
@@ -300,50 +210,17 @@ vespa_grouping
| annotation VESPA_GROUPING
;
-selectvar_statement
- : CREATE ('temp' | 'temporary') TABLE ident AS LPAREN source_statement RPAREN
- ;
-
-typename
- : TYPE_BYTE | TYPE_INT16 | TYPE_INT32 | TYPE_INT64 | TYPE_STRING | TYPE_BOOLEAN | TYPE_TIMESTAMP
- | arrayType | mapType | TYPE_DOUBLE
- ;
-
-arrayType
- : TYPE_ARRAY LT typename GT
- ;
-
-mapType
- : TYPE_MAP LT typename GT
- ;
-
output_spec
: (OUTPUT AS ident)
| (OUTPUT COUNT AS ident)
;
query_statement
- : merge_statement
- | select_statement
- | insert_statement
- | delete_statement
- | update_statement
+ : select_statement
;
-// This does not use the UNION / UNION ALL from SQL because the semantics are different than SQL UNION
-// - no set operation is implied (no DISTINCT)
-// - CQL resultsets may be heterogeneous (rows may have heterogenous types)
-merge_statement
- : merge_component (MERGE merge_component)+
- ;
-
-merge_component
- : select_statement
- | LPAREN source_statement RPAREN
- ;
-
select_statement
- : SELECT select_field_spec select_source? where? orderby? limit? offset? timeout? fallback?
+ : SELECT select_field_spec select_source? where? orderby? limit? offset? timeout?
;
select_field_spec
@@ -355,10 +232,6 @@ project_spec
: field_def (COMMA field_def)*
;
-fallback
- : FALLBACK select_statement
- ;
-
timeout
: TIMEOUT fixed_or_parameter
;
@@ -366,7 +239,7 @@ timeout
select_source
: select_source_all
| select_source_multi
- | select_source_join
+ | select_source_from
;
select_source_all
@@ -377,23 +250,14 @@ select_source_multi
: FROM SOURCES source_list
;
-select_source_join
- : FROM source_spec join_expr*
+select_source_from
+ : FROM source_spec
;
source_list
: namespaced_name (COMMA namespaced_name )*
;
-join_expr
- : (join_spec source_spec ON joinExpression)
- ;
-
-join_spec
- : LEFT JOIN
- | 'inner'? JOIN
- ;
-
source_spec
: ( data_source (alias_def { ($data_source.ctx).addChild($alias_def.ctx); })? )
;
@@ -466,21 +330,6 @@ argument[boolean in_select]
: expression[$in_select]
;
-// -------- join expressions ------------
-
-// Limit expression syntax for joins: A single equality test and one field from each source.
-// This means it can always turn the join into a query to one source, collecting all of the
-// keys from the results, and then a query to the other source (or querying the other source inline).
-// Does not support map or index references.
-
-joinExpression
- : joinDereferencedExpression EQ joinDereferencedExpression
- ;
-
-joinDereferencedExpression
- : namespaced_name
- ;
-
// --------- expressions ------------
expression [boolean select]
@@ -587,15 +436,6 @@ indexref[boolean in_select]
propertyref
: DOT nm=ID
;
-operatorCall
-@init{
- boolean in_select = expression_stack.peek().in_select;
-}
- : multOp arguments[in_select]
- | additiveOp arguments[in_select]
- | AND arguments[in_select]
- | OR arguments[in_select]
- ;
primaryExpression
@init {
@@ -671,7 +511,7 @@ array_parameter
;
literal_list
- : LPAREN literal_element (COMMA literal_element)* RPAREN //{return ^(ARRAY_LITERAL literal_element+);}
+ : LPAREN literal_element (COMMA literal_element)* RPAREN
;
literal_element
@@ -683,63 +523,3 @@ fixed_or_parameter
: INT
| parameter
;
-
-// INSERT
-
-insert_statement
- : INSERT insert_source insert_values returning_spec?
- ;
-
-insert_source
- : INTO write_data_source
- ;
-
-write_data_source
- : namespaced_name
- ;
-
-insert_values
- : field_names_spec VALUES field_values_group_spec (COMMA field_values_group_spec)*
- | query_statement
- ;
-
-field_names_spec
- : LPAREN field_def (COMMA field_def)* RPAREN
- ;
-
-field_values_spec
- : LPAREN expression[true] (COMMA expression[true])* RPAREN
- ;
-
-field_values_group_spec
- : LPAREN expression[true] (COMMA expression[true])* RPAREN
- ;
-
-returning_spec
- : RETURNING select_field_spec
- ;
-
-// DELETE
-
-delete_statement
- : DELETE delete_source where? returning_spec?
- ;
-
-delete_source
- : FROM write_data_source
- ;
-
-// UPDATE
-
-update_statement
- : UPDATE update_source SET update_values where? returning_spec?
- ;
-
-update_source
- : write_data_source
- ;
-
-update_values
- : field_names_spec EQ field_values_spec
- | field_def (COMMA field_def)*
- ;
diff --git a/container-search/src/main/java/com/yahoo/prelude/Index.java b/container-search/src/main/java/com/yahoo/prelude/Index.java
index 8915c4b42f0..306c7c80577 100644
--- a/container-search/src/main/java/com/yahoo/prelude/Index.java
+++ b/container-search/src/main/java/com/yahoo/prelude/Index.java
@@ -23,31 +23,11 @@ import java.util.Set;
*/
public class Index {
- public static class Attribute {
-
- private boolean tokenizedContent = false;
- public final String name;
-
- public Attribute(String name) {
- this.name = name;
- }
-
- public boolean isTokenizedContent() {
- return tokenizedContent;
- }
-
- public void setTokenizedContent(boolean tokenizedContent) {
- this.tokenizedContent = tokenizedContent;
- }
- }
-
/** The null index - don't use this for name lookups */
public static final Index nullIndex = new Index("(null)");
private final String name;
- private String type; // TODO: Parse to a type object; do not expose this as a string
-
private final List<String> aliases = new ArrayList<>();
// The state resulting from adding commands to this (using addCommand)
diff --git a/container-search/src/main/java/com/yahoo/prelude/cluster/ClusterSearcher.java b/container-search/src/main/java/com/yahoo/prelude/cluster/ClusterSearcher.java
index 37b0fd7ebfb..ccb6e1248b4 100644
--- a/container-search/src/main/java/com/yahoo/prelude/cluster/ClusterSearcher.java
+++ b/container-search/src/main/java/com/yahoo/prelude/cluster/ClusterSearcher.java
@@ -4,12 +4,12 @@ package com.yahoo.prelude.cluster;
import com.yahoo.component.ComponentId;
import com.yahoo.component.chain.dependencies.After;
import com.yahoo.component.provider.ComponentRegistry;
+import com.yahoo.container.QrConfig;
import com.yahoo.container.QrSearchersConfig;
import com.yahoo.container.handler.VipStatus;
import com.yahoo.prelude.IndexFacts;
import com.yahoo.prelude.fastsearch.ClusterParams;
import com.yahoo.prelude.fastsearch.DocumentdbInfoConfig;
-import com.yahoo.prelude.fastsearch.FS4ResourcePool;
import com.yahoo.prelude.fastsearch.FastSearcher;
import com.yahoo.prelude.fastsearch.SummaryParameters;
import com.yahoo.prelude.fastsearch.VespaBackEndSearcher;
@@ -66,7 +66,7 @@ public class ClusterSearcher extends Searcher {
ClusterConfig clusterConfig,
DocumentdbInfoConfig documentDbConfig,
ComponentRegistry<Dispatcher> dispatchers,
- FS4ResourcePool fs4ResourcePool,
+ QrConfig qrConfig,
VipStatus vipStatus) {
super(id);
@@ -92,12 +92,12 @@ public class ClusterSearcher extends Searcher {
}
if (searchClusterConfig.indexingmode() == STREAMING) {
- VdsStreamingSearcher searcher = vdsCluster(fs4ResourcePool.getServerId(), searchClusterIndex,
+ VdsStreamingSearcher searcher = vdsCluster(qrConfig.discriminator(), searchClusterIndex,
searchClusterConfig, docSumParams, documentDbConfig);
addBackendSearcher(searcher);
vipStatus.addToRotation(searcher.getName());
} else {
- FastSearcher searcher = searchDispatch(searchClusterIndex, searchClusterName, fs4ResourcePool.getServerId(),
+ FastSearcher searcher = searchDispatch(searchClusterIndex, searchClusterName, qrConfig.discriminator(),
docSumParams, documentDbConfig, dispatchers);
addBackendSearcher(searcher);
diff --git a/container-search/src/main/java/com/yahoo/prelude/fastsearch/FS4ResourcePool.java b/container-search/src/main/java/com/yahoo/prelude/fastsearch/FS4ResourcePool.java
deleted file mode 100644
index ed9eb72d7dd..00000000000
--- a/container-search/src/main/java/com/yahoo/prelude/fastsearch/FS4ResourcePool.java
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.prelude.fastsearch;
-
-import com.google.inject.Inject;
-import com.yahoo.component.AbstractComponent;
-import com.yahoo.concurrent.ThreadFactoryFactory;
-import com.yahoo.container.QrConfig;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.logging.Level;
-import java.util.logging.Logger;
-
-/**
- * All users will get the same pool instance.
- *
- * @author baldersheim
- */
-public class FS4ResourcePool extends AbstractComponent {
-
- private static final Logger logger = Logger.getLogger(FS4ResourcePool.class.getName());
- private static final AtomicInteger instanceCounter = new AtomicInteger(0);
- private final String serverId;
- private final int instanceId;
- private final ExecutorService executor;
- private final ScheduledExecutorService scheduledExecutor;
-
- @Inject
- public FS4ResourcePool(QrConfig config) {
- this(config.discriminator());
- }
-
- public FS4ResourcePool(String serverId) {
- this.serverId = serverId;
- instanceId = instanceCounter.getAndIncrement();
- String name = "FS4-" + instanceId;
- executor = Executors.newCachedThreadPool(ThreadFactoryFactory.getDaemonThreadFactory(name));
- scheduledExecutor = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory(name + ".scheduled"));
- }
-
- /** Returns an unique identifier of the server this runs in */
- public String getServerId() { return serverId; }
- public ExecutorService getExecutor() { return executor; }
- public ScheduledExecutorService getScheduledExecutor() { return scheduledExecutor; }
-
- @Override
- public void deconstruct() {
- logger.log(Level.INFO, "Deconstructing FS4ResourcePool with id '" + instanceId + "'.");
- super.deconstruct();
- executor.shutdown();
- scheduledExecutor.shutdown();
- try {
- executor.awaitTermination(10, TimeUnit.SECONDS);
- scheduledExecutor.awaitTermination(10, TimeUnit.SECONDS);
- } catch (InterruptedException e) {
- logger.warning("Executors failed terminating within timeout of 10 seconds : " + e);
- }
- }
-
-}
diff --git a/container-search/src/main/java/com/yahoo/prelude/query/FalseItem.java b/container-search/src/main/java/com/yahoo/prelude/query/FalseItem.java
index 531a89312df..9abc6b2bdaa 100644
--- a/container-search/src/main/java/com/yahoo/prelude/query/FalseItem.java
+++ b/container-search/src/main/java/com/yahoo/prelude/query/FalseItem.java
@@ -4,7 +4,7 @@ package com.yahoo.prelude.query;
import java.nio.ByteBuffer;
/**
- * A query item which never matches. This is sometimes an useful output of query rewriting.
+ * A query item which never matches. This is sometimes a useful output of query rewriting.
*
* @author bratseth
*/
diff --git a/container-search/src/main/java/com/yahoo/prelude/query/QueryCanonicalizer.java b/container-search/src/main/java/com/yahoo/prelude/query/QueryCanonicalizer.java
index 88bae76b26d..2bf20bf7c5a 100644
--- a/container-search/src/main/java/com/yahoo/prelude/query/QueryCanonicalizer.java
+++ b/container-search/src/main/java/com/yahoo/prelude/query/QueryCanonicalizer.java
@@ -94,7 +94,7 @@ public class QueryCanonicalizer {
if (composite instanceof RankItem || composite instanceof NotItem) {
collapseLevels(composite, composite.getItemIterator()); // collapse the first item only
}
- else if (composite instanceof AndItem || composite instanceof OrItem) {
+ else if (composite instanceof AndItem || composite instanceof OrItem || composite instanceof WeakAndItem) {
for (ListIterator<Item> i = composite.getItemIterator(); i.hasNext(); )
collapseLevels(composite, i);
}
@@ -106,10 +106,17 @@ public class QueryCanonicalizer {
Item child = i.next();
if (child == null) return;
if (child.getClass() != composite.getClass()) return;
+ if (child instanceof WeakAndItem && !equalWeakAndSettings((WeakAndItem)child, (WeakAndItem)composite)) return;
i.remove();
moveChildren((CompositeItem) child, i);
}
-
+
+ private static boolean equalWeakAndSettings(WeakAndItem a, WeakAndItem b) {
+ if ( ! a.getIndexName().equals(b.getIndexName())) return false;
+ if (a.getN() != b.getN()) return false;
+ return true;
+ }
+
private static void moveChildren(CompositeItem from, ListIterator<Item> toIterator) {
for (ListIterator<Item> i = from.getItemIterator(); i.hasNext(); )
toIterator.add(i.next());
diff --git a/container-search/src/main/java/com/yahoo/prelude/query/SameElementItem.java b/container-search/src/main/java/com/yahoo/prelude/query/SameElementItem.java
index ac4e8b98b03..58bbcd7315c 100644
--- a/container-search/src/main/java/com/yahoo/prelude/query/SameElementItem.java
+++ b/container-search/src/main/java/com/yahoo/prelude/query/SameElementItem.java
@@ -50,6 +50,7 @@ public class SameElementItem extends NonReducibleCompositeItem {
super.adding(item);
//TODO See if we can require only SimpleIndexedItem instead of TermItem
Validator.ensureInstanceOf("Child item", item, TermItem.class);
+ Validator.ensureNotInstanceOf("Child item", item, WordAlternativesItem.class);
TermItem asTerm = (TermItem) item;
Validator.ensureNonEmpty("Struct fieldname", asTerm.getIndexName());
Validator.ensureNonEmpty("Query term", asTerm.getIndexedString());
diff --git a/container-search/src/main/java/com/yahoo/prelude/query/WandItem.java b/container-search/src/main/java/com/yahoo/prelude/query/WandItem.java
index 8cce8fb5720..c5679e113f1 100644
--- a/container-search/src/main/java/com/yahoo/prelude/query/WandItem.java
+++ b/container-search/src/main/java/com/yahoo/prelude/query/WandItem.java
@@ -99,13 +99,10 @@ public class WandItem extends WeightedSetItem {
protected void appendHeadingString(StringBuilder buffer) {
buffer.append(getName());
buffer.append("(");
- buffer.append(targetNumHits);
- buffer.append(",");
- buffer.append(scoreThreshold);
- buffer.append(",");
+ buffer.append(targetNumHits).append(",");
+ buffer.append(scoreThreshold).append(",");
buffer.append(thresholdBoostFactor);
- buffer.append(")");
- buffer.append(" ");
+ buffer.append(") ");
}
@Override
diff --git a/container-search/src/main/java/com/yahoo/prelude/query/WeakAndItem.java b/container-search/src/main/java/com/yahoo/prelude/query/WeakAndItem.java
index 4fa2ed8b214..e8817a44133 100644
--- a/container-search/src/main/java/com/yahoo/prelude/query/WeakAndItem.java
+++ b/container-search/src/main/java/com/yahoo/prelude/query/WeakAndItem.java
@@ -18,16 +18,20 @@ import java.nio.ByteBuffer;
*/
public final class WeakAndItem extends NonReducibleCompositeItem {
+ /** The default N used if none is specified: 100 */
+ public static final int defaultN = 100;
+
private int n;
private String index;
private int scoreThreshold = 0;
- public ItemType getItemType() {
- return ItemType.WEAK_AND;
+ /** Creates a WAND item with default N */
+ public WeakAndItem() {
+ this(defaultN);
}
- public String getName() {
- return "WAND";
+ public WeakAndItem(int N) {
+ this("", N);
}
/**
@@ -42,50 +46,37 @@ public final class WeakAndItem extends NonReducibleCompositeItem {
this.n = n;
this.index = (index == null) ? "" : index;
}
- public WeakAndItem(int N) {
- this("", N);
- }
- /** Sets the index name of all subitems of this */
+ @Override
+ public ItemType getItemType() { return ItemType.WEAK_AND; }
+
+ @Override
+ public String getName() { return "WEAKAND"; }
+
+ @Override
public void setIndexName(String index) {
String toSet = (index == null) ? "" : index;
super.setIndexName(toSet);
this.index = toSet;
}
- public String getIndexName() {
- return index;
- }
+ public String getIndexName() { return index; }
/** Appends the heading of this string - <code>[getName()]([limit]) </code> */
+ @Override
protected void appendHeadingString(StringBuilder buffer) {
buffer.append(getName());
buffer.append("(");
buffer.append(n);
- buffer.append(")");
- buffer.append(" ");
+ buffer.append(") ");
}
- /** The default N used if none is specified: 100 */
- public static final int defaultN = 100;
+ public int getN() { return n; }
- /** Creates a WAND item with default N */
- public WeakAndItem() {
- this(defaultN);
- }
-
- public int getN() {
- return n;
- }
-
- public void setN(int N) {
- this.n = N;
- }
+ public void setN(int N) { this.n = N; }
@Deprecated // TODO: Remove on Vespa 8
- public int getScoreThreshold() {
- return scoreThreshold;
- }
+ public int getScoreThreshold() { return scoreThreshold; }
/**
* Noop.
@@ -93,9 +84,7 @@ public final class WeakAndItem extends NonReducibleCompositeItem {
* @deprecated has no effect
*/
@Deprecated // TODO: Remove on Vespa 8
- public void setScoreThreshold(int scoreThreshold) {
- this.scoreThreshold = scoreThreshold;
- }
+ public void setScoreThreshold(int scoreThreshold) { this.scoreThreshold = scoreThreshold; }
@Override
protected void encodeThis(ByteBuffer buffer) {
@@ -111,9 +100,7 @@ public final class WeakAndItem extends NonReducibleCompositeItem {
}
@Override
- public int hashCode() {
- return super.hashCode() + 31 * n;
- }
+ public int hashCode() { return super.hashCode() + 31 * n; }
/** Returns whether this item is of the same class and contains the same state as the given item. */
@Override
diff --git a/container-search/src/main/java/com/yahoo/prelude/query/WordAlternativesItem.java b/container-search/src/main/java/com/yahoo/prelude/query/WordAlternativesItem.java
index 2c017410109..d2df2aa6c89 100644
--- a/container-search/src/main/java/com/yahoo/prelude/query/WordAlternativesItem.java
+++ b/container-search/src/main/java/com/yahoo/prelude/query/WordAlternativesItem.java
@@ -19,7 +19,6 @@ import com.yahoo.compress.IntegerCompressor;
public class WordAlternativesItem extends TermItem {
private List<Alternative> alternatives;
- private int maxIndex;
public static final class Alternative {
@@ -48,7 +47,6 @@ public class WordAlternativesItem extends TermItem {
public void setAlternatives(Collection<Alternative> terms) {
this.alternatives = uniqueAlternatives(terms);
- setMaxIndex();
}
private static ImmutableList<Alternative> uniqueAlternatives(Collection<Alternative> terms) {
@@ -67,27 +65,6 @@ public class WordAlternativesItem extends TermItem {
return ImmutableList.copyOf(uniqueTerms);
}
- private void setMaxIndex() {
- int maxIndex = 0;
- int currentIndex = 0;
- double maxScore = 0.0d;
- boolean first = true;
- for (Alternative val : this.alternatives) {
- if (first) {
- first = false;
- maxIndex = 0;
- maxScore = val.exactness;
- } else {
- if (val.exactness > maxScore) {
- maxScore = val.exactness;
- maxIndex = currentIndex;
- }
- }
- ++currentIndex;
- }
- this.maxIndex = maxIndex;
- }
-
@Override
public String stringValue() {
StringBuilder builder = new StringBuilder();
diff --git a/container-search/src/main/java/com/yahoo/prelude/query/parser/AdvancedParser.java b/container-search/src/main/java/com/yahoo/prelude/query/parser/AdvancedParser.java
index 74a993b0413..8b878417912 100644
--- a/container-search/src/main/java/com/yahoo/prelude/query/parser/AdvancedParser.java
+++ b/container-search/src/main/java/com/yahoo/prelude/query/parser/AdvancedParser.java
@@ -147,10 +147,10 @@ public class AdvancedParser extends StructuredParser {
return equiv;
}
return topLevelItem;
- } else if (isTheWord("wand", item)) {
+ } else if (isTheWord("wand", item) || isTheWord("weakand", item)) {
int n = consumeNumericArgument();
if (n == 0)
- n=WeakAndItem.defaultN;
+ n = WeakAndItem.defaultN;
if (topLevelIsClosed || !(topLevelItem instanceof WeakAndItem) || n != ((WeakAndItem)topLevelItem).getN()) {
WeakAndItem wand = new WeakAndItem();
wand.setN(n);
diff --git a/container-search/src/main/java/com/yahoo/prelude/querytransform/QueryRewrite.java b/container-search/src/main/java/com/yahoo/prelude/querytransform/QueryRewrite.java
index 5a936d42ccc..329e886c3d3 100644
--- a/container-search/src/main/java/com/yahoo/prelude/querytransform/QueryRewrite.java
+++ b/container-search/src/main/java/com/yahoo/prelude/querytransform/QueryRewrite.java
@@ -186,6 +186,7 @@ public class QueryRewrite {
} else if ((item instanceof AndItem) || (item instanceof NearItem)) {
return Recall.RECALLS_NOTHING;
} else if (item instanceof RankItem) {
+ if (i == 0) return Recall.RECALLS_NOTHING;
item.removeItem(i);
} else {
throw new UnsupportedOperationException(item.getClass().getName());
diff --git a/container-search/src/main/java/com/yahoo/prelude/statistics/StatisticsSearcher.java b/container-search/src/main/java/com/yahoo/prelude/statistics/StatisticsSearcher.java
index 941015b2fae..24b631f1773 100644
--- a/container-search/src/main/java/com/yahoo/prelude/statistics/StatisticsSearcher.java
+++ b/container-search/src/main/java/com/yahoo/prelude/statistics/StatisticsSearcher.java
@@ -5,9 +5,9 @@ import com.yahoo.component.chain.dependencies.Before;
import com.yahoo.concurrent.CopyOnWriteHashMap;
import com.yahoo.container.protect.Error;
import com.yahoo.jdisc.Metric;
-import java.util.logging.Level;
-import com.yahoo.metrics.simple.MetricSettings;
+import com.yahoo.jdisc.http.HttpRequest;
import com.yahoo.metrics.simple.MetricReceiver;
+import com.yahoo.metrics.simple.MetricSettings;
import com.yahoo.processing.request.CompoundName;
import com.yahoo.search.Query;
import com.yahoo.search.Result;
@@ -29,7 +29,18 @@ import java.util.PriorityQueue;
import java.util.Queue;
import java.util.logging.Level;
-import static com.yahoo.container.protect.Error.*;
+import static com.yahoo.container.protect.Error.BACKEND_COMMUNICATION_ERROR;
+import static com.yahoo.container.protect.Error.EMPTY_DOCUMENTS;
+import static com.yahoo.container.protect.Error.ERROR_IN_PLUGIN;
+import static com.yahoo.container.protect.Error.ILLEGAL_QUERY;
+import static com.yahoo.container.protect.Error.INTERNAL_SERVER_ERROR;
+import static com.yahoo.container.protect.Error.INVALID_QUERY_PARAMETER;
+import static com.yahoo.container.protect.Error.INVALID_QUERY_TRANSFORMATION;
+import static com.yahoo.container.protect.Error.NO_BACKENDS_IN_SERVICE;
+import static com.yahoo.container.protect.Error.RESULT_HAS_ERRORS;
+import static com.yahoo.container.protect.Error.SERVER_IS_MISCONFIGURED;
+import static com.yahoo.container.protect.Error.TIMEOUT;
+import static com.yahoo.container.protect.Error.UNSPECIFIED;
/**
@@ -236,7 +247,7 @@ public class StatisticsSearcher extends Searcher {
incrQueryCount(metricContext);
logQuery(query);
- long start_ns = System.nanoTime(); // Start time, in nanoseconds.
+ long start_ns = getStartNanoTime(query);
qps(metricContext);
Result result;
//handle exceptions thrown below in searchers
@@ -428,5 +439,15 @@ public class StatisticsSearcher extends Searcher {
}
}
+ /**
+ * Returns the relative start time from request was received by jdisc
+ */
+ private static long getStartNanoTime(Query query) {
+ return Optional.ofNullable(query.getHttpRequest())
+ .flatMap(httpRequest -> Optional.ofNullable(httpRequest.getJDiscRequest()))
+ .map(HttpRequest::relativeCreatedAtNanoTime)
+ .orElseGet(System::nanoTime);
+ }
+
}
diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/CloseableInvoker.java b/container-search/src/main/java/com/yahoo/search/dispatch/CloseableInvoker.java
index 515d6249fd8..9329f4a6819 100644
--- a/container-search/src/main/java/com/yahoo/search/dispatch/CloseableInvoker.java
+++ b/container-search/src/main/java/com/yahoo/search/dispatch/CloseableInvoker.java
@@ -12,6 +12,7 @@ import java.util.function.BiConsumer;
* @author ollivir
*/
public abstract class CloseableInvoker implements Closeable {
+
protected abstract void release();
private BiConsumer<Boolean, Long> teardown = null;
@@ -35,4 +36,5 @@ public abstract class CloseableInvoker implements Closeable {
}
release();
}
+
}
diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/Dispatcher.java b/container-search/src/main/java/com/yahoo/search/dispatch/Dispatcher.java
index 626cf087aca..9b92a78a7c9 100644
--- a/container-search/src/main/java/com/yahoo/search/dispatch/Dispatcher.java
+++ b/container-search/src/main/java/com/yahoo/search/dispatch/Dispatcher.java
@@ -65,7 +65,7 @@ public class Dispatcher extends AbstractComponent {
/** A model of the search cluster this dispatches to */
private final SearchCluster searchCluster;
- private final ClusterMonitor clusterMonitor;
+ private final ClusterMonitor<Node> clusterMonitor;
private final LoadBalancer loadBalancer;
@@ -108,7 +108,7 @@ public class Dispatcher extends AbstractComponent {
}
/* Protected for simple mocking in tests. Beware that searchCluster is shutdown on in deconstruct() */
- protected Dispatcher(ClusterMonitor clusterMonitor,
+ protected Dispatcher(ClusterMonitor<Node> clusterMonitor,
SearchCluster searchCluster,
DispatchConfig dispatchConfig,
InvokerFactory invokerFactory,
@@ -125,12 +125,7 @@ public class Dispatcher extends AbstractComponent {
this.metricContext = metric.createContext(null);
this.maxHitsPerNode = dispatchConfig.maxHitsPerNode();
searchCluster.addMonitoring(clusterMonitor);
- Thread warmup = new Thread(new Runnable() {
- @Override
- public void run() {
- warmup(dispatchConfig.warmuptime());
- }
- });
+ Thread warmup = new Thread(() -> warmup(dispatchConfig.warmuptime()));
warmup.start();
try {
while ( ! searchCluster.hasInformationAboutAllNodes()) {
@@ -139,20 +134,17 @@ public class Dispatcher extends AbstractComponent {
warmup.join();
} catch (InterruptedException e) {}
- /*
- * No we have information from all nodes and a ping iteration has completed.
- * Instead of waiting until next ping interval to update coverage and group state,
- * we should compute the state ourselves, so that when the dispatcher is ready the state
- * of its groups are also known.
- */
+ // Now we have information from all nodes and a ping iteration has completed.
+ // Instead of waiting until next ping interval to update coverage and group state,
+ // we should compute the state ourselves, so that when the dispatcher is ready the state
+ // of its groups are also known.
searchCluster.pingIterationCompleted();
}
- /*
- Will run important code in order to trigger JIT compilation and avoid cold start issues.
- Currently warms up lz4 compression code.
+ /**
+ * Will run important code in order to trigger JIT compilation and avoid cold start issues.
+ * Currently warms up lz4 compression code.
*/
-
private static long warmup(double seconds) {
return new Compressor().warmup(seconds);
}
@@ -164,7 +156,7 @@ public class Dispatcher extends AbstractComponent {
@Override
public void deconstruct() {
- /* The clustermonitor must be shutdown first as it uses the invokerfactory through the searchCluster. */
+ // The clustermonitor must be shutdown first as it uses the invokerfactory through the searchCluster.
clusterMonitor.shutdown();
invokerFactory.release();
}
@@ -212,7 +204,7 @@ public class Dispatcher extends AbstractComponent {
return invokerFactory.createSearchInvoker(searcher,
query,
OptionalInt.empty(),
- Arrays.asList(node),
+ List.of(node),
true,
maxHitsPerNode)
.orElseThrow(() -> new IllegalStateException("Could not dispatch directly to " + node));
diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/FillInvoker.java b/container-search/src/main/java/com/yahoo/search/dispatch/FillInvoker.java
index dd4c4494ac5..8b7714aaf3b 100644
--- a/container-search/src/main/java/com/yahoo/search/dispatch/FillInvoker.java
+++ b/container-search/src/main/java/com/yahoo/search/dispatch/FillInvoker.java
@@ -10,7 +10,8 @@ import com.yahoo.search.Result;
* @author ollivir
*/
public abstract class FillInvoker extends CloseableInvoker {
- /** Retrieve document summaries for the unfilled hits in the given {@link Result} */
+
+ /** Retrieves document summaries for the unfilled hits in the given {@link Result} */
public void fill(Result result, String summaryClass) {
sendFillRequest(result, summaryClass);
getFillResults(result, summaryClass);
@@ -19,4 +20,5 @@ public abstract class FillInvoker extends CloseableInvoker {
protected abstract void getFillResults(Result result, String summaryClass);
protected abstract void sendFillRequest(Result result, String summaryClass);
+
}
diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/InterleavedSearchInvoker.java b/container-search/src/main/java/com/yahoo/search/dispatch/InterleavedSearchInvoker.java
index 036592dcf23..adf7368faa2 100644
--- a/container-search/src/main/java/com/yahoo/search/dispatch/InterleavedSearchInvoker.java
+++ b/container-search/src/main/java/com/yahoo/search/dispatch/InterleavedSearchInvoker.java
@@ -235,17 +235,6 @@ public class InterleavedSearchInvoker extends SearchInvoker implements ResponseM
return nextAdaptive;
}
- private String dbg(LeanHit hit) {
- var buf = new StringBuilder();
- buf.append("LeanHit[");
- if (hit.hasSortData()) buf.append("hasSortData,");
- buf.append("relevance=").append(hit.getRelevance());
- buf.append(",partId=").append(hit.getPartId());
- buf.append(",distributionKey=").append(hit.getDistributionKey());
- buf.append("]");
- return buf.toString();
- }
-
private List<LeanHit> mergeResult(Result result, InvokerResult partialResult, List<LeanHit> current) {
collectCoverage(partialResult.getResult().getCoverage(true));
@@ -382,4 +371,5 @@ public class InterleavedSearchInvoker extends SearchInvoker implements ResponseM
// For testing
Collection<SearchInvoker> invokers() { return invokers; }
+
}
diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/InvokerFactory.java b/container-search/src/main/java/com/yahoo/search/dispatch/InvokerFactory.java
index f65e0e43757..1bcb640e3a5 100644
--- a/container-search/src/main/java/com/yahoo/search/dispatch/InvokerFactory.java
+++ b/container-search/src/main/java/com/yahoo/search/dispatch/InvokerFactory.java
@@ -35,7 +35,7 @@ public abstract class InvokerFactory {
public abstract FillInvoker createFillInvoker(VespaBackEndSearcher searcher, Result result);
/**
- * Create a {@link SearchInvoker} for a list of content nodes.
+ * Creates a {@link SearchInvoker} for a list of content nodes.
*
* @param searcher the searcher processing the query
* @param query the search query being processed
@@ -79,7 +79,7 @@ public abstract class InvokerFactory {
success.add(node);
}
}
- if ( ! searchCluster.isPartialGroupCoverageSufficient(groupId, success) && !acceptIncompleteCoverage) {
+ if ( ! searchCluster.isPartialGroupCoverageSufficient(success) && !acceptIncompleteCoverage) {
return Optional.empty();
}
if (invokers.size() == 0) {
diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/InvokerResult.java b/container-search/src/main/java/com/yahoo/search/dispatch/InvokerResult.java
index 94c347a6927..2723429c0cf 100644
--- a/container-search/src/main/java/com/yahoo/search/dispatch/InvokerResult.java
+++ b/container-search/src/main/java/com/yahoo/search/dispatch/InvokerResult.java
@@ -12,14 +12,19 @@ import java.util.List;
/**
* Wraps a Result and a flat, skinny hit list
+ *
+ * @author baldersheim
*/
public class InvokerResult {
+
private final Result result;
private final List<LeanHit> leanHits;
+
public InvokerResult(Result result) {
this.result = result;
this.leanHits = Collections.emptyList();
}
+
public InvokerResult(Query query, int expectedHits) {
result = new Result(query);
leanHits = new ArrayList<>(expectedHits);
@@ -32,6 +37,7 @@ public class InvokerResult {
public List<LeanHit> getLeanHits() {
return leanHits;
}
+
void complete() {
Query query = result.getQuery();
Sorting sorting = query.getRanking().getSorting();
@@ -47,4 +53,5 @@ public class InvokerResult {
}
leanHits.clear();
}
+
}
diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/LeanHit.java b/container-search/src/main/java/com/yahoo/search/dispatch/LeanHit.java
index 8a90557fa3b..df8fb2f29fa 100644
--- a/container-search/src/main/java/com/yahoo/search/dispatch/LeanHit.java
+++ b/container-search/src/main/java/com/yahoo/search/dispatch/LeanHit.java
@@ -4,7 +4,11 @@ package com.yahoo.search.dispatch;
import java.util.Arrays;
+/**
+ * @author baldersheim
+ */
public class LeanHit implements Comparable<LeanHit> {
+
private final byte [] gid;
private final double relevance;
private final byte [] sortData;
@@ -21,6 +25,7 @@ public class LeanHit implements Comparable<LeanHit> {
this.partId = partId;
this.distributionKey = distributionKey;
}
+
public double getRelevance() { return relevance; }
public byte [] getGid() { return gid; }
public byte [] getSortData() { return sortData; }
@@ -49,4 +54,5 @@ public class LeanHit implements Comparable<LeanHit> {
int vr = (int) right[i] & 0xFF;
return vl - vr;
}
+
}
diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/LoadBalancer.java b/container-search/src/main/java/com/yahoo/search/dispatch/LoadBalancer.java
index 05e1ea6e2f9..ebde2ffc611 100644
--- a/container-search/src/main/java/com/yahoo/search/dispatch/LoadBalancer.java
+++ b/container-search/src/main/java/com/yahoo/search/dispatch/LoadBalancer.java
@@ -43,8 +43,8 @@ public class LoadBalancer {
}
/**
- * Select and allocate the search cluster group which is to be used for the next search query. Callers <b>must</b> call
- * {@link #releaseGroup} symmetrically for each taken allocation.
+ * Select and allocate the search cluster group which is to be used for the next search query.
+ * Callers <b>must</b> call {@link #releaseGroup} symmetrically for each taken allocation.
*
* @param rejectedGroups if not null, the load balancer will only return groups with IDs not in the set
* @return the node group to target, or <i>empty</i> if the internal dispatch logic cannot be used
@@ -76,7 +76,7 @@ public class LoadBalancer {
synchronized (this) {
for (GroupStatus sched : scoreboard) {
if (sched.group.id() == group.id()) {
- sched.release(success, (double) searchTimeMs / 1000.0);
+ sched.release(success, searchTimeMs / 1000.0);
break;
}
}
diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/ResponseMonitor.java b/container-search/src/main/java/com/yahoo/search/dispatch/ResponseMonitor.java
index c2e81d43677..3ebd21fa18a 100644
--- a/container-search/src/main/java/com/yahoo/search/dispatch/ResponseMonitor.java
+++ b/container-search/src/main/java/com/yahoo/search/dispatch/ResponseMonitor.java
@@ -9,5 +9,7 @@ package com.yahoo.search.dispatch;
* @author ollivir
*/
public interface ResponseMonitor<T> {
+
void responseAvailable(T from);
+
}
diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/SearchErrorInvoker.java b/container-search/src/main/java/com/yahoo/search/dispatch/SearchErrorInvoker.java
index 256759360f7..7dbc2e98759 100644
--- a/container-search/src/main/java/com/yahoo/search/dispatch/SearchErrorInvoker.java
+++ b/container-search/src/main/java/com/yahoo/search/dispatch/SearchErrorInvoker.java
@@ -61,4 +61,5 @@ public class SearchErrorInvoker extends SearchInvoker {
protected void setMonitor(ResponseMonitor<SearchInvoker> monitor) {
this.monitor = monitor;
}
+
}
diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/SearchPath.java b/container-search/src/main/java/com/yahoo/search/dispatch/SearchPath.java
index 7937be50813..f6480f80c01 100644
--- a/container-search/src/main/java/com/yahoo/search/dispatch/SearchPath.java
+++ b/container-search/src/main/java/com/yahoo/search/dispatch/SearchPath.java
@@ -28,15 +28,11 @@ import java.util.stream.IntStream;
public class SearchPath {
/**
- * Parse the search path and select nodes from the given cluster based on it.
+ * Parses the search path and select nodes from the given cluster based on it.
*
- * @param searchPath
- * unparsed search path expression (see: model.searchPath in Search
- * API reference)
- * @param cluster
- * the search cluster from which nodes are selected
- * @throws InvalidSearchPathException
- * if the searchPath is malformed
+ * @param searchPath unparsed search path expression (see: model.searchPath in Search API reference)
+ * @param cluster the search cluster from which nodes are selected
+ * @throws InvalidSearchPathException if the searchPath is malformed
* @return list of nodes chosen with the search path, or an empty list in which
* case some other node selection logic should be used
*/
diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/TopKEstimator.java b/container-search/src/main/java/com/yahoo/search/dispatch/TopKEstimator.java
index aef1ef2f498..315dfdd4320 100644
--- a/container-search/src/main/java/com/yahoo/search/dispatch/TopKEstimator.java
+++ b/container-search/src/main/java/com/yahoo/search/dispatch/TopKEstimator.java
@@ -6,9 +6,11 @@ import org.apache.commons.math3.distribution.TDistribution;
/**
* Use StudentT distribution and estimate how many hits you need from each partition
* to to get the globally top-k documents with the desired probability
+ *
* @author baldersheim
*/
public class TopKEstimator {
+
private final TDistribution studentT;
private final double defaultP;
private final boolean estimate;
@@ -19,9 +21,11 @@ public class TopKEstimator {
private static boolean needEstimate(double p) {
return (0.0 < p) && (p < 1.0);
}
+
TopKEstimator(double freedom, double defaultProbability) {
this(freedom, defaultProbability, 0.0);
}
+
public TopKEstimator(double freedom, double defaultProbability, double skewFactor) {
this.studentT = new TDistribution(null, freedom);
defaultP = defaultProbability;
@@ -32,36 +36,44 @@ public class TopKEstimator {
defaultCumulativeProbability[i] = computeCumulativeProbability(i+MIN_N, defaultP);
}
}
+
private double inverseCumulativeProbability(int n, double p) {
if (p == defaultP && (n >= MIN_N) && (n < defaultCumulativeProbability.length + MIN_N)) {
return defaultCumulativeProbability[n - MIN_N];
}
return computeCumulativeProbability(n, p);
}
+
private double computeCumulativeProbability(int n, double p) {
double p_inverse = 1 - (1 - p)/computeN(n);
return studentT.inverseCumulativeProbability(p_inverse);
}
+
private double computeN(double n) {
double p_max = (1 + skewFactor)/n;
return Math.max(1, 1/p_max);
}
+
double estimateExactK(double k, int n_i, double p) {
double n = computeN(n_i);
double variance = k * 1/n * (1 - 1/n);
return k/n + inverseCumulativeProbability(n_i, p) * Math.sqrt(variance);
}
+
double estimateExactK(double k, int n) {
return estimateExactK(k, n, defaultP);
}
+
public int estimateK(int k, int n) {
return (estimate && (n >= MIN_N))
? Math.min(k, (int)Math.ceil(estimateExactK(k, n, defaultP)))
: k;
}
+
public int estimateK(int k, int n, double p) {
return (needEstimate(p) && (n >= MIN_N))
? Math.min(k, (int)Math.ceil(estimateExactK(k, n, p)))
: k;
}
}
+
diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/rpc/ProtobufSerialization.java b/container-search/src/main/java/com/yahoo/search/dispatch/rpc/ProtobufSerialization.java
index 6dc01f34571..250524fadf2 100644
--- a/container-search/src/main/java/com/yahoo/search/dispatch/rpc/ProtobufSerialization.java
+++ b/container-search/src/main/java/com/yahoo/search/dispatch/rpc/ProtobufSerialization.java
@@ -196,7 +196,7 @@ public class ProtobufSerialization {
result.getResult().setTotalHitCount(protobuf.getTotalHitCount());
result.getResult().setCoverage(convertToCoverage(protobuf));
- var haveGrouping = protobuf.getGroupingBlob() != null && !protobuf.getGroupingBlob().isEmpty();
+ var haveGrouping = ! protobuf.getGroupingBlob().isEmpty();
if (haveGrouping) {
BufferSerializer buf = new BufferSerializer(new GrowableByteBuffer(protobuf.getGroupingBlob().asReadOnlyByteBuffer()));
int cnt = buf.getInt(null);
@@ -219,7 +219,7 @@ public class ProtobufSerialization {
}
var slimeTrace = protobuf.getSlimeTrace();
- if (slimeTrace != null && !slimeTrace.isEmpty()) {
+ if ( ! slimeTrace.isEmpty()) {
var traces = new Value.ArrayValue();
traces.add(new SlimeAdapter(BinaryFormat.decode(slimeTrace.toByteArray()).get()));
query.trace(traces, query.getTraceLevel());
diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/rpc/RpcPingFactory.java b/container-search/src/main/java/com/yahoo/search/dispatch/rpc/RpcPingFactory.java
index 7d9b3ca1034..01e3ec3ca2b 100644
--- a/container-search/src/main/java/com/yahoo/search/dispatch/rpc/RpcPingFactory.java
+++ b/container-search/src/main/java/com/yahoo/search/dispatch/rpc/RpcPingFactory.java
@@ -8,12 +8,16 @@ import com.yahoo.search.dispatch.searchcluster.Pinger;
import com.yahoo.search.dispatch.searchcluster.PongHandler;
public class RpcPingFactory implements PingFactory {
+
private final RpcResourcePool rpcResourcePool;
+
public RpcPingFactory(RpcResourcePool rpcResourcePool) {
this.rpcResourcePool = rpcResourcePool;
}
+
@Override
public Pinger createPinger(Node node, ClusterMonitor<Node> monitor, PongHandler pongHandler) {
return new RpcPing(node, monitor, rpcResourcePool, pongHandler);
}
+
}
diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/rpc/RpcProtobufFillInvoker.java b/container-search/src/main/java/com/yahoo/search/dispatch/rpc/RpcProtobufFillInvoker.java
index 341b9b2bce3..8a17be8102e 100644
--- a/container-search/src/main/java/com/yahoo/search/dispatch/rpc/RpcProtobufFillInvoker.java
+++ b/container-search/src/main/java/com/yahoo/search/dispatch/rpc/RpcProtobufFillInvoker.java
@@ -38,6 +38,7 @@ import java.util.logging.Logger;
* @author ollivir
*/
public class RpcProtobufFillInvoker extends FillInvoker {
+
private static final String RPC_METHOD = "vespa.searchprotocol.getDocsums";
private static final Logger log = Logger.getLogger(RpcProtobufFillInvoker.class.getName());
diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/rpc/RpcResourcePool.java b/container-search/src/main/java/com/yahoo/search/dispatch/rpc/RpcResourcePool.java
index 746461630dd..c3d072b8db6 100644
--- a/container-search/src/main/java/com/yahoo/search/dispatch/rpc/RpcResourcePool.java
+++ b/container-search/src/main/java/com/yahoo/search/dispatch/rpc/RpcResourcePool.java
@@ -26,6 +26,7 @@ import java.util.Random;
* @author ollivir
*/
public class RpcResourcePool extends AbstractComponent {
+
/** The compression method which will be used with rpc dispatch. "lz4" (default) and "none" is supported. */
public final static CompoundName dispatchCompression = new CompoundName("dispatch.compression");
diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/rpc/RpcSearchInvoker.java b/container-search/src/main/java/com/yahoo/search/dispatch/rpc/RpcSearchInvoker.java
index 4c0b77207d5..20b11efb470 100644
--- a/container-search/src/main/java/com/yahoo/search/dispatch/rpc/RpcSearchInvoker.java
+++ b/container-search/src/main/java/com/yahoo/search/dispatch/rpc/RpcSearchInvoker.java
@@ -102,9 +102,7 @@ public class RpcSearchInvoker extends SearchInvoker implements Client.ResponseRe
ProtobufResponse protobufResponse = response.response().get();
CompressionType compression = CompressionType.valueOf(protobufResponse.compression());
byte[] payload = resourcePool.compressor().decompress(protobufResponse.compressedPayload(), compression, protobufResponse.uncompressedSize());
- var result = ProtobufSerialization.deserializeToSearchResult(payload, query, searcher, node.pathIndex(), node.key());
-
- return result;
+ return ProtobufSerialization.deserializeToSearchResult(payload, query, searcher, node.pathIndex(), node.key());
}
@Override
diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/Group.java b/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/Group.java
index e5066797b06..dca5892e0e7 100644
--- a/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/Group.java
+++ b/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/Group.java
@@ -92,7 +92,7 @@ public class Group {
}
@Override
- public String toString() { return "search group " + id; }
+ public String toString() { return "group " + id; }
@Override
public int hashCode() { return id; }
diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/Node.java b/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/Node.java
index 8f465070de4..9807a978647 100644
--- a/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/Node.java
+++ b/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/Node.java
@@ -71,7 +71,7 @@ public class Node {
}
/** Updates the active documents on this node */
- void setActiveDocuments(long activeDocuments) { this.activeDocuments.set(activeDocuments); }
+ public void setActiveDocuments(long activeDocuments) { this.activeDocuments.set(activeDocuments); }
/** Returns the active documents on this node. If unknown, 0 is returned. */
long getActiveDocuments() { return activeDocuments.get(); }
diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/PingFactory.java b/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/PingFactory.java
index 2e07d8d61e6..3b9e9573367 100644
--- a/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/PingFactory.java
+++ b/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/PingFactory.java
@@ -3,7 +3,9 @@ package com.yahoo.search.dispatch.searchcluster;
import com.yahoo.search.cluster.ClusterMonitor;
-
+/**
+ * @author ollivir
+ */
public interface PingFactory {
Pinger createPinger(Node node, ClusterMonitor<Node> monitor, PongHandler pongHandler);
diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/Pinger.java b/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/Pinger.java
index b4a7ccbf98c..681a7d0af2c 100644
--- a/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/Pinger.java
+++ b/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/Pinger.java
@@ -8,5 +8,7 @@ package com.yahoo.search.dispatch.searchcluster;
* @author baldersheim
*/
public interface Pinger {
+
void ping();
+
}
diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/PongHandler.java b/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/PongHandler.java
index 1b39f14fd86..c39426e9d76 100644
--- a/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/PongHandler.java
+++ b/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/PongHandler.java
@@ -9,5 +9,7 @@ import com.yahoo.prelude.Pong;
* @author baldersheim
*/
public interface PongHandler {
+
void handle(Pong pong);
+
}
diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchCluster.java b/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchCluster.java
index b3b2c23e7dc..ce834b108db 100644
--- a/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchCluster.java
+++ b/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchCluster.java
@@ -5,6 +5,7 @@ import com.google.common.collect.ImmutableCollection;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableMultimap;
+import com.google.common.math.Quantiles;
import com.yahoo.container.handler.VipStatus;
import com.yahoo.net.HostName;
import com.yahoo.prelude.Pong;
@@ -13,6 +14,8 @@ import com.yahoo.search.cluster.NodeManager;
import com.yahoo.search.dispatch.TopKEstimator;
import com.yahoo.vespa.config.search.DispatchConfig;
+import java.util.ArrayList;
+import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
@@ -311,33 +314,33 @@ public class SearchCluster implements NodeManager<Node> {
boolean sufficientCoverage = isGroupCoverageSufficient(group.workingNodes(),
group.getActiveDocuments(),
group.getActiveDocuments());
- trackGroupCoverageChanges(0, group, sufficientCoverage, group.getActiveDocuments());
+ trackGroupCoverageChanges(group, sufficientCoverage, group.getActiveDocuments());
}
private void pingIterationCompletedMultipleGroups() {
- int numGroups = orderedGroups().size();
- // Update active documents per group and use it to decide if the group should be active
- long[] activeDocumentsInGroup = new long[numGroups];
- long sumOfActiveDocuments = 0;
- for(int i = 0; i < numGroups; i++) {
- Group group = orderedGroups().get(i);
- group.aggregateNodeValues();
- activeDocumentsInGroup[i] = group.getActiveDocuments();
- sumOfActiveDocuments += activeDocumentsInGroup[i];
- }
-
+ aggregateNodeValues();
+ long medianDocuments = medianDocumentsPerGroup();
boolean anyGroupsSufficientCoverage = false;
- for (int i = 0; i < numGroups; i++) {
- Group group = orderedGroups().get(i);
- long activeDocuments = activeDocumentsInGroup[i];
- long averageDocumentsInOtherGroups = (sumOfActiveDocuments - activeDocuments) / (numGroups - 1);
- boolean sufficientCoverage = isGroupCoverageSufficient(group.workingNodes(), activeDocuments, averageDocumentsInOtherGroups);
+ for (Group group : orderedGroups()) {
+ boolean sufficientCoverage = isGroupCoverageSufficient(group.workingNodes(),
+ group.getActiveDocuments(),
+ medianDocuments);
anyGroupsSufficientCoverage = anyGroupsSufficientCoverage || sufficientCoverage;
updateSufficientCoverage(group, sufficientCoverage);
- trackGroupCoverageChanges(i, group, sufficientCoverage, averageDocumentsInOtherGroups);
+ trackGroupCoverageChanges(group, sufficientCoverage, medianDocuments);
}
}
+ private void aggregateNodeValues() {
+ orderedGroups().forEach(Group::aggregateNodeValues);
+ }
+
+ private long medianDocumentsPerGroup() {
+ if (orderedGroups().isEmpty()) return 0;
+ var activeDocuments = orderedGroups().stream().map(Group::getActiveDocuments).collect(Collectors.toList());
+ return (long)Quantiles.median().compute(activeDocuments);
+ }
+
/**
* Update statistics after a round of issuing pings.
* Note that this doesn't wait for pings to return, so it will typically accumulate data from
@@ -353,10 +356,9 @@ public class SearchCluster implements NodeManager<Node> {
}
}
- private boolean isGroupCoverageSufficient(int workingNodesInGroup, long activeDocuments, long averageDocumentsInOtherGroups) {
- double documentCoverage = 100.0 * (double) activeDocuments / averageDocumentsInOtherGroups;
-
- if (averageDocumentsInOtherGroups > 0 && documentCoverage < dispatchConfig.minActivedocsPercentage())
+ private boolean isGroupCoverageSufficient(int workingNodesInGroup, long activeDocuments, long medianDocuments) {
+ double documentCoverage = 100.0 * (double) activeDocuments / medianDocuments;
+ if (medianDocuments > 0 && documentCoverage < dispatchConfig.minActivedocsPercentage())
return false;
if ( ! isGroupNodeCoverageSufficient(workingNodesInGroup))
@@ -380,56 +382,33 @@ public class SearchCluster implements NodeManager<Node> {
/**
* Calculate whether a subset of nodes in a group has enough coverage
*/
- public boolean isPartialGroupCoverageSufficient(OptionalInt knownGroupId, List<Node> nodes) {
- if (orderedGroups().size() == 1) {
- boolean sufficient = nodes.size() >= wantedGroupSize() - dispatchConfig.maxNodesDownPerGroup();
- return sufficient;
- }
-
- if (knownGroupId.isEmpty()) {
- return false;
- }
- int groupId = knownGroupId.getAsInt();
- Group group = groups().get(groupId);
- if (group == null) {
- return false;
- }
- long sumOfActiveDocuments = 0;
- int otherGroups = 0;
- for (Group g : orderedGroups()) {
- if (g.id() != groupId) {
- sumOfActiveDocuments += g.getActiveDocuments();
- otherGroups++;
- }
- }
- long activeDocuments = 0;
- for (Node n : nodes) {
- activeDocuments += n.getActiveDocuments();
- }
- long averageDocumentsInOtherGroups = sumOfActiveDocuments / otherGroups;
- return isGroupCoverageSufficient(nodes.size(), activeDocuments, averageDocumentsInOtherGroups);
+ public boolean isPartialGroupCoverageSufficient(List<Node> nodes) {
+ if (orderedGroups().size() == 1)
+ return nodes.size() >= wantedGroupSize() - dispatchConfig.maxNodesDownPerGroup();
+ long activeDocuments = nodes.stream().mapToLong(Node::getActiveDocuments).sum();
+ return isGroupCoverageSufficient(nodes.size(), activeDocuments, medianDocumentsPerGroup());
}
- private void trackGroupCoverageChanges(int index, Group group, boolean fullCoverage, long averageDocuments) {
+ private void trackGroupCoverageChanges(Group group, boolean fullCoverage, long medianDocuments) {
if ( ! hasInformationAboutAllNodes()) return; // Be silent until we know what we are talking about.
boolean changed = group.isFullCoverageStatusChanged(fullCoverage);
if (changed || (!fullCoverage && System.currentTimeMillis() > nextLogTime)) {
nextLogTime = System.currentTimeMillis() + 30 * 1000;
int requiredNodes = group.nodes().size() - dispatchConfig.maxNodesDownPerGroup();
if (fullCoverage) {
- log.info(() -> String.format("Cluster %s: Group %d is now good again (%d/%d active docs, coverage %d/%d)",
- clusterId, index, group.getActiveDocuments(), averageDocuments,
- group.workingNodes(), group.nodes().size()));
+ log.info("Cluster " + clusterId + ": " + group + " has full coverage. " +
+ "Active documents: " + group.getActiveDocuments() + "/" + medianDocuments + ", " +
+ "working nodes: " + group.workingNodes() + "/" + group.nodes().size());
} else {
- StringBuilder missing = new StringBuilder();
+ StringBuilder unresponsive = new StringBuilder();
for (var node : group.nodes()) {
- if (node.isWorking() != Boolean.TRUE) {
- missing.append('\n').append(node);
- }
+ if (node.isWorking() != Boolean.TRUE)
+ unresponsive.append('\n').append(node);
}
- log.warning(() -> String.format("Cluster %s: Coverage of group %d is only %d/%d (requires %d) (%d/%d active docs) Failed nodes are:%s",
- clusterId, index, group.workingNodes(), group.nodes().size(), requiredNodes,
- group.getActiveDocuments(), averageDocuments, missing));
+ log.warning("Cluster " + clusterId + ": " + group + " has reduced coverage: " +
+ "Active documents: " + group.getActiveDocuments() + "/" + medianDocuments + ", " +
+ "working nodes: " + group.workingNodes() + "/" + group.nodes().size() + " required " + requiredNodes +
+ ", unresponsive nodes: " + (unresponsive.toString().isEmpty() ? " none" : unresponsive));
}
}
}
diff --git a/container-search/src/main/java/com/yahoo/search/handler/SearchHandler.java b/container-search/src/main/java/com/yahoo/search/handler/SearchHandler.java
index 2b27f60ef73..65dc1052a78 100644
--- a/container-search/src/main/java/com/yahoo/search/handler/SearchHandler.java
+++ b/container-search/src/main/java/com/yahoo/search/handler/SearchHandler.java
@@ -11,14 +11,18 @@ import com.yahoo.container.QrSearchersConfig;
import com.yahoo.container.core.ChainsConfig;
import com.yahoo.container.core.ContainerHttpConfig;
import com.yahoo.container.handler.threadpool.ContainerThreadPool;
+import com.yahoo.container.jdisc.HttpMethodAclMapping;
import com.yahoo.container.jdisc.HttpRequest;
import com.yahoo.container.jdisc.HttpResponse;
import com.yahoo.container.jdisc.LoggingRequestHandler;
+import com.yahoo.container.jdisc.RequestHandlerSpec;
import com.yahoo.container.jdisc.VespaHeaders;
import com.yahoo.container.logging.AccessLog;
import com.yahoo.io.IOUtils;
import com.yahoo.jdisc.Metric;
import com.yahoo.jdisc.Request;
+import com.yahoo.container.jdisc.AclMapping;
+import com.yahoo.container.jdisc.RequestView;
import com.yahoo.language.Linguistics;
import com.yahoo.net.HostName;
import com.yahoo.net.UriTools;
@@ -103,6 +107,9 @@ public class SearchHandler extends LoggingRequestHandler {
private final AtomicLong numRequestsLeftToTrace;
+ private final static RequestHandlerSpec REQUEST_HANDLER_SPEC = RequestHandlerSpec.builder()
+ .withAclMapping(SearchHandler.aclRequestMapper()).build();
+
private final class MeanConnections implements Callback {
@Override
@@ -631,6 +638,16 @@ public class SearchHandler extends LoggingRequestHandler {
});
}
+ @Override
+ public RequestHandlerSpec requestHandlerSpec() {
+ return REQUEST_HANDLER_SPEC;
+ }
+
+ private static AclMapping aclRequestMapper() {
+ return HttpMethodAclMapping.standard()
+ .override(com.yahoo.jdisc.http.HttpRequest.Method.POST, AclMapping.Action.READ)
+ .build();
+ }
}
diff --git a/container-search/src/main/java/com/yahoo/search/query/parser/ParserEnvironment.java b/container-search/src/main/java/com/yahoo/search/query/parser/ParserEnvironment.java
index 9e53f9d8ea9..94b9bf6ce65 100644
--- a/container-search/src/main/java/com/yahoo/search/query/parser/ParserEnvironment.java
+++ b/container-search/src/main/java/com/yahoo/search/query/parser/ParserEnvironment.java
@@ -4,7 +4,6 @@ package com.yahoo.search.query.parser;
import com.yahoo.language.Linguistics;
import com.yahoo.language.simple.SimpleLinguistics;
import com.yahoo.prelude.IndexFacts;
-import com.yahoo.prelude.query.parser.SpecialTokenRegistry;
import com.yahoo.prelude.query.parser.SpecialTokens;
import com.yahoo.search.Searcher;
import com.yahoo.search.searchchain.Execution;
diff --git a/container-search/src/main/java/com/yahoo/search/querytransform/VespaLowercasingSearcher.java b/container-search/src/main/java/com/yahoo/search/querytransform/VespaLowercasingSearcher.java
index 25488aa7bbc..3aa9e59003d 100644
--- a/container-search/src/main/java/com/yahoo/search/querytransform/VespaLowercasingSearcher.java
+++ b/container-search/src/main/java/com/yahoo/search/querytransform/VespaLowercasingSearcher.java
@@ -31,8 +31,7 @@ public class VespaLowercasingSearcher extends LowercasingSearcher {
public boolean shouldLowercase(WordItem word, IndexFacts.Session indexFacts) {
if (word.isLowercased()) return false;
- Index index = indexFacts.getIndex(word.getIndexName());
- return index.isLowercase() || index.isAttribute();
+ return indexFacts.getIndex(word.getIndexName()).isLowercase();
}
@Override
@@ -41,8 +40,7 @@ public class VespaLowercasingSearcher extends LowercasingSearcher {
StringBuilder sb = new StringBuilder();
sb.append(commonPath).append(".").append(word.getIndexName());
- Index index = indexFacts.getIndex(sb.toString());
- return index.isLowercase() || index.isAttribute();
+ return indexFacts.getIndex(sb.toString()).isLowercase();
}
}
diff --git a/container-search/src/main/java/com/yahoo/search/rendering/JsonRenderer.java b/container-search/src/main/java/com/yahoo/search/rendering/JsonRenderer.java
index c4f850307ae..0a87ad7ec2b 100644
--- a/container-search/src/main/java/com/yahoo/search/rendering/JsonRenderer.java
+++ b/container-search/src/main/java/com/yahoo/search/rendering/JsonRenderer.java
@@ -65,6 +65,8 @@ import java.util.Set;
import java.util.concurrent.Executor;
import java.util.function.LongSupplier;
+import static com.fasterxml.jackson.databind.SerializationFeature.FLUSH_AFTER_WRITE_VALUE;
+
/**
* JSON renderer for search results.
*
@@ -147,7 +149,7 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> {
* @return an object mapper for the internal JsonFactory
*/
protected static ObjectMapper createJsonCodec() {
- return new ObjectMapper();
+ return new ObjectMapper().disable(FLUSH_AFTER_WRITE_VALUE);
}
@Override
diff --git a/container-search/src/main/java/com/yahoo/search/yql/ProgramParser.java b/container-search/src/main/java/com/yahoo/search/yql/ProgramParser.java
index bcd5822a6f6..d3f07bae428 100644
--- a/container-search/src/main/java/com/yahoo/search/yql/ProgramParser.java
+++ b/container-search/src/main/java/com/yahoo/search/yql/ProgramParser.java
@@ -14,76 +14,46 @@ import com.yahoo.search.yql.yqlplusParser.AnnotateExpressionContext;
import com.yahoo.search.yql.yqlplusParser.ArgumentContext;
import com.yahoo.search.yql.yqlplusParser.ArgumentsContext;
import com.yahoo.search.yql.yqlplusParser.ArrayLiteralContext;
-import com.yahoo.search.yql.yqlplusParser.ArrayTypeContext;
import com.yahoo.search.yql.yqlplusParser.Call_sourceContext;
import com.yahoo.search.yql.yqlplusParser.ConstantArrayContext;
import com.yahoo.search.yql.yqlplusParser.ConstantExpressionContext;
import com.yahoo.search.yql.yqlplusParser.ConstantMapExpressionContext;
import com.yahoo.search.yql.yqlplusParser.ConstantPropertyNameAndValueContext;
-import com.yahoo.search.yql.yqlplusParser.Delete_statementContext;
import com.yahoo.search.yql.yqlplusParser.DereferencedExpressionContext;
import com.yahoo.search.yql.yqlplusParser.EqualityExpressionContext;
import com.yahoo.search.yql.yqlplusParser.ExpressionContext;
-import com.yahoo.search.yql.yqlplusParser.FallbackContext;
import com.yahoo.search.yql.yqlplusParser.Field_defContext;
-import com.yahoo.search.yql.yqlplusParser.Field_names_specContext;
-import com.yahoo.search.yql.yqlplusParser.Field_values_group_specContext;
-import com.yahoo.search.yql.yqlplusParser.Field_values_specContext;
import com.yahoo.search.yql.yqlplusParser.IdentContext;
-import com.yahoo.search.yql.yqlplusParser.Import_listContext;
-import com.yahoo.search.yql.yqlplusParser.Import_statementContext;
import com.yahoo.search.yql.yqlplusParser.InNotInTargetContext;
-import com.yahoo.search.yql.yqlplusParser.Insert_sourceContext;
-import com.yahoo.search.yql.yqlplusParser.Insert_statementContext;
-import com.yahoo.search.yql.yqlplusParser.Insert_valuesContext;
-import com.yahoo.search.yql.yqlplusParser.JoinExpressionContext;
-import com.yahoo.search.yql.yqlplusParser.Join_exprContext;
import com.yahoo.search.yql.yqlplusParser.LimitContext;
import com.yahoo.search.yql.yqlplusParser.Literal_elementContext;
import com.yahoo.search.yql.yqlplusParser.Literal_listContext;
import com.yahoo.search.yql.yqlplusParser.LogicalANDExpressionContext;
import com.yahoo.search.yql.yqlplusParser.LogicalORExpressionContext;
import com.yahoo.search.yql.yqlplusParser.MapExpressionContext;
-import com.yahoo.search.yql.yqlplusParser.MapTypeContext;
-import com.yahoo.search.yql.yqlplusParser.Merge_componentContext;
-import com.yahoo.search.yql.yqlplusParser.Merge_statementContext;
-import com.yahoo.search.yql.yqlplusParser.ModuleIdContext;
-import com.yahoo.search.yql.yqlplusParser.ModuleNameContext;
import com.yahoo.search.yql.yqlplusParser.MultiplicativeExpressionContext;
import com.yahoo.search.yql.yqlplusParser.Namespaced_nameContext;
-import com.yahoo.search.yql.yqlplusParser.Next_statementContext;
import com.yahoo.search.yql.yqlplusParser.OffsetContext;
import com.yahoo.search.yql.yqlplusParser.OrderbyContext;
import com.yahoo.search.yql.yqlplusParser.Orderby_fieldContext;
import com.yahoo.search.yql.yqlplusParser.Output_specContext;
-import com.yahoo.search.yql.yqlplusParser.Paged_clauseContext;
-import com.yahoo.search.yql.yqlplusParser.ParamsContext;
import com.yahoo.search.yql.yqlplusParser.Pipeline_stepContext;
-import com.yahoo.search.yql.yqlplusParser.Procedure_argumentContext;
-import com.yahoo.search.yql.yqlplusParser.Program_arglistContext;
import com.yahoo.search.yql.yqlplusParser.Project_specContext;
import com.yahoo.search.yql.yqlplusParser.ProgramContext;
import com.yahoo.search.yql.yqlplusParser.PropertyNameAndValueContext;
import com.yahoo.search.yql.yqlplusParser.Query_statementContext;
import com.yahoo.search.yql.yqlplusParser.RelationalExpressionContext;
import com.yahoo.search.yql.yqlplusParser.RelationalOpContext;
-import com.yahoo.search.yql.yqlplusParser.Returning_specContext;
import com.yahoo.search.yql.yqlplusParser.Scalar_literalContext;
-import com.yahoo.search.yql.yqlplusParser.Select_source_joinContext;
import com.yahoo.search.yql.yqlplusParser.Select_source_multiContext;
import com.yahoo.search.yql.yqlplusParser.Select_statementContext;
-import com.yahoo.search.yql.yqlplusParser.Selectvar_statementContext;
import com.yahoo.search.yql.yqlplusParser.Sequence_sourceContext;
import com.yahoo.search.yql.yqlplusParser.Source_listContext;
import com.yahoo.search.yql.yqlplusParser.Source_specContext;
import com.yahoo.search.yql.yqlplusParser.Source_statementContext;
import com.yahoo.search.yql.yqlplusParser.StatementContext;
import com.yahoo.search.yql.yqlplusParser.TimeoutContext;
-import com.yahoo.search.yql.yqlplusParser.TypenameContext;
import com.yahoo.search.yql.yqlplusParser.UnaryExpressionContext;
-import com.yahoo.search.yql.yqlplusParser.Update_statementContext;
-import com.yahoo.search.yql.yqlplusParser.Update_valuesContext;
-import com.yahoo.search.yql.yqlplusParser.ViewContext;
import com.yahoo.search.yql.yqlplusParser.WhereContext;
import org.antlr.v4.runtime.BaseErrorListener;
@@ -126,7 +96,6 @@ final class ProgramParser {
return prepareParser(file.getAbsoluteFile().toString(), new CaseInsensitiveFileStream(file.getAbsolutePath()));
}
-
private yqlplusParser prepareParser(String programName, CharStream input) {
yqlplusLexer lexer = new yqlplusLexer(input);
lexer.removeErrorListeners();
@@ -168,41 +137,18 @@ final class ProgramParser {
try {
return parser.program();
} catch (RecognitionException e) {
- //Retry parsing using full LL mode
+ // Retry parsing using full LL mode
parser.reset();
parser.getInterpreter().setPredictionMode(PredictionMode.LL);
return parser.program();
}
}
- public OperatorNode<StatementOperator> parse(String programName, InputStream program) throws IOException, RecognitionException {
- yqlplusParser parser = prepareParser(programName, program);
- return convertProgram(parseProgram(parser), parser, programName);
- }
-
public OperatorNode<StatementOperator> parse(String programName, String program) throws IOException, RecognitionException {
yqlplusParser parser = prepareParser(programName, program);
return convertProgram(parseProgram(parser), parser, programName);
}
- public OperatorNode<StatementOperator> parse(File input) throws IOException, RecognitionException {
- yqlplusParser parser = prepareParser(input);
- return convertProgram(parseProgram(parser), parser, input.getAbsoluteFile().toString());
- }
-
- public OperatorNode<ExpressionOperator> parseExpression(String input) throws IOException, RecognitionException {
- return convertExpr(prepareParser("<expression>", input).expression(false).getRuleContext(), new Scope());
- }
-
- public OperatorNode<ExpressionOperator> parseExpression(String input, Set<String> visibleAliases) throws IOException, RecognitionException {
- Scope scope = new Scope();
- final Location loc = new Location("<expression>", -1, -1);
- for (String alias : visibleAliases) {
- scope.defineDataSource(loc, alias);
- }
- return convertExpr(prepareParser("<expression>", input).expression(false).getRuleContext(), scope);
- }
-
private Location toLocation(Scope scope, ParseTree node) {
Token start;
if (node instanceof ParserRuleContext) {
@@ -212,8 +158,7 @@ final class ProgramParser {
} else {
throw new ProgramCompileException("Location is not available for type " + node.getClass());
}
- Location location = new Location(scope != null? scope.programName: "<string>", start.getLine(), start.getCharPositionInLine());
- return location;
+ return new Location(scope != null? scope.programName: "<string>", start.getLine(), start.getCharPositionInLine());
}
private List<String> readName(Namespaced_nameContext node) {
@@ -230,14 +175,6 @@ final class ProgramParser {
private final List<String> binding;
- Binding(String moduleName, String exportName) {
- this.binding = ImmutableList.of(moduleName, exportName);
- }
-
- Binding(String moduleName) {
- this.binding = ImmutableList.of(moduleName);
- }
-
Binding(List<String> binding) {
this.binding = binding;
}
@@ -263,13 +200,6 @@ final class ProgramParser {
final yqlplusParser parser;
final String programName;
- Scope() {
- this.parser = null;
- this.programName = null;
- this.root = this;
- this.parent = null;
- }
-
Scope(yqlplusParser parser, String programName) {
this.parser = parser;
this.programName = programName;
@@ -288,15 +218,10 @@ final class ProgramParser {
return parser;
}
- public String getProgramName() {
- return programName;
- }
-
public Set<String> getCursors() {
return cursors;
}
-
boolean isBound(String name) {
// bindings live only in the 'root' node
return root.bindings.containsKey(name);
@@ -329,13 +254,6 @@ final class ProgramParser {
root.bindings.put(symbolName, new Binding(binding));
}
- public void bindModuleSymbol(Location loc, List<String> moduleName, String exportName, String symbolName) {
- ImmutableList.Builder<String> builder = ImmutableList.builder();
- builder.addAll(moduleName);
- builder.add(exportName);
- bindModule(loc, builder.build(), symbolName);
- }
-
public void defineDataSource(Location loc, String name) {
if (isCursor(name)) {
throw new ProgramCompileException(loc, "Alias '%s' is already used.", name);
@@ -376,13 +294,12 @@ final class ProgramParser {
}
}
- private OperatorNode<SequenceOperator> convertSelectOrInsertOrUpdateOrDelete(ParseTree node, Scope scopeParent) {
+ private OperatorNode<SequenceOperator> convertSelect(ParseTree node, Scope scopeParent) {
- Preconditions.checkArgument(node instanceof Select_statementContext || node instanceof Insert_statementContext ||
- node instanceof Update_statementContext || node instanceof Delete_statementContext);
+ Preconditions.checkArgument(node instanceof Select_statementContext);
- // SELECT^ select_field_spec select_source where? orderby? limit? offset? timeout? fallback?
- // select is the only place to define where/orderby/limit/offset and joins
+ // SELECT^ select_field_spec select_source where? orderby? limit? offset? timeout?
+ // select is the only place to define where/orderby/limit/offset
Scope scope = scopeParent.child();
ProjectionBuilder proj = null;
OperatorNode<SequenceOperator> source = null;
@@ -391,29 +308,18 @@ final class ProgramParser {
OperatorNode<ExpressionOperator> offset = null;
OperatorNode<ExpressionOperator> limit = null;
OperatorNode<ExpressionOperator> timeout = null;
- OperatorNode<SequenceOperator> fallback = null;
- OperatorNode<SequenceOperator> insertValues = null;
- OperatorNode<ExpressionOperator> updateValues = null;
- ParseTree sourceNode;
-
- if (node instanceof Select_statementContext ) {
- sourceNode = node.getChild(2) != null ? node.getChild(2).getChild(0):null;
- } else {
- sourceNode = node.getChild(1);
- }
+ ParseTree sourceNode = node.getChild(2) != null ? node.getChild(2).getChild(0):null;
if (sourceNode != null) {
switch (getParseTreeIndex(sourceNode)) {
// ALL_SOURCE and MULTI_SOURCE are how FROM SOURCES
// *|source_name,... are parsed
- // They can't be used directly with the JOIN syntax at this time
- case yqlplusParser.RULE_select_source_all: {
+ case yqlplusParser.RULE_select_source_all:
Location location = toLocation(scope, sourceNode.getChild(2));
source = OperatorNode.create(location, SequenceOperator.ALL);
source.putAnnotation("alias", "row");
scope.defineDataSource(location, "row");
- }
break;
case yqlplusParser.RULE_select_source_multi:
Source_listContext multiSourceContext = ((Select_source_multiContext) sourceNode).source_list();
@@ -421,22 +327,8 @@ final class ProgramParser {
source.putAnnotation("alias", "row");
scope.defineDataSource(toLocation(scope, multiSourceContext), "row");
break;
- case yqlplusParser.RULE_select_source_join:
+ case yqlplusParser.RULE_select_source_from:
source = convertSource((ParserRuleContext) sourceNode.getChild(1), scope);
- List<Join_exprContext> joinContexts = ((Select_source_joinContext)sourceNode).join_expr();
- for (Join_exprContext joinContext:joinContexts) {
- source = convertJoin(joinContext, source, scope);
- }
- break;
- case yqlplusParser.RULE_insert_source:
- Insert_sourceContext insertSourceContext = (Insert_sourceContext) sourceNode;
- source = convertSource((ParserRuleContext)insertSourceContext.getChild(1), scope);
- break;
- case yqlplusParser.RULE_delete_source:
- source = convertSource((ParserRuleContext)sourceNode.getChild(1), scope);
- break;
- case yqlplusParser.RULE_update_source:
- source = convertSource((ParserRuleContext)sourceNode.getChild(0), scope);
break;
}
} else {
@@ -451,9 +343,6 @@ final class ProgramParser {
proj = readProjection(((Project_specContext) child.getChild(0)).field_def(), scope);
}
break;
- case yqlplusParser.RULE_returning_spec:
- proj = readProjection(((Returning_specContext) child).select_field_spec().project_spec().field_def(), scope);
- break;
case yqlplusParser.RULE_where:
filter = convertExpr(((WhereContext) child).expression(), scope);
break;
@@ -475,23 +364,6 @@ final class ProgramParser {
case yqlplusParser.RULE_timeout:
timeout = convertExpr(((TimeoutContext) child).fixed_or_parameter(), scope);
break;
- case yqlplusParser.RULE_fallback:
- fallback = convertQuery(((FallbackContext) child).select_statement(), scope);
- break;
- case yqlplusParser.RULE_insert_values:
- if (child.getChild(0) instanceof yqlplusParser.Query_statementContext) {
- insertValues = convertQuery(child.getChild(0).getChild(0), scope);
- } else {
- insertValues = readBatchValues(((Insert_valuesContext) child).field_names_spec(), ((Insert_valuesContext)child).field_values_group_spec(), scope);
- }
- break;
- case yqlplusParser.RULE_update_values:
- if (getParseTreeIndex(child.getChild(0)) == yqlplusParser.RULE_field_def) {
- updateValues = readValues(((Update_valuesContext)child).field_def(), scope);
- } else {
- updateValues = readValues((Field_names_specContext)child.getChild(0), (Field_values_specContext)child.getChild(2), scope);
- }
- break;
}
}
// now assemble the logical plan
@@ -500,26 +372,6 @@ final class ProgramParser {
if (filter != null) {
result = OperatorNode.create(SequenceOperator.FILTER, result, filter);
}
- // insert values
- if (insertValues != null) {
- result = OperatorNode.create(SequenceOperator.INSERT, result, insertValues);
- }
- // update
- if (updateValues != null) {
- if (filter != null) {
- result = OperatorNode.create(SequenceOperator.UPDATE, source, updateValues, filter);
- } else {
- result = OperatorNode.create(SequenceOperator.UPDATE_ALL, source, updateValues);
- }
- }
- // delete
- if (getParseTreeIndex(node) == yqlplusParser.RULE_delete_statement) {
- if (filter != null) {
- result = OperatorNode.create(SequenceOperator.DELETE, source, filter);
- } else {
- result = OperatorNode.create(SequenceOperator.DELETE_ALL, source);
- }
- }
// then sort (or project and sort)
boolean projectBeforeSort = false;
if (orderby != null) {
@@ -558,30 +410,9 @@ final class ProgramParser {
if (timeout != null) {
result = OperatorNode.create(SequenceOperator.TIMEOUT, result, timeout);
}
- // if there's a fallback, emit a fallback node
- if (fallback != null) {
- result = OperatorNode.create(SequenceOperator.FALLBACK, result, fallback);
- }
return result;
}
- private OperatorNode<ExpressionOperator> readValues(List<Field_defContext> fieldDefs, Scope scope) {
- List<String> fieldNames;
- List<OperatorNode<ExpressionOperator>> fieldValues;
- int numPairs = fieldDefs.size();
- fieldNames = Lists.newArrayListWithExpectedSize(numPairs);
- fieldValues = Lists.newArrayListWithExpectedSize(numPairs);
- for (int j = 0; j < numPairs; j++) {
- ParseTree startNode = fieldDefs.get(j);
- while(startNode.getChildCount() < 3) {
- startNode = startNode.getChild(0);
- }
- fieldNames.add((String) convertExpr(startNode.getChild(0), scope).getArgument(1));
- fieldValues.add(convertExpr(startNode.getChild(2), scope));
- }
- return OperatorNode.create(ExpressionOperator.MAP, fieldNames, fieldValues);
- }
-
private OperatorNode<SequenceOperator> readMultiSource(Scope scope, Source_listContext multiSource) {
List<List<String>> sourceNameList = Lists.newArrayList();
List<Namespaced_nameContext> nameSpaces = multiSource.namespaced_name();
@@ -591,9 +422,7 @@ final class ProgramParser {
}
return OperatorNode.create(toLocation(scope, multiSource), SequenceOperator.MULTISOURCE, sourceNameList);
}
-// pipeline_step
-// : namespaced_name arguments[false]?
-// ;
+
private OperatorNode<SequenceOperator> convertPipe(Query_statementContext queryStatementContext, List<Pipeline_stepContext> nodes, Scope scope) {
OperatorNode<SequenceOperator> result = convertQuery(queryStatementContext.getChild(0), scope.getRoot());
for (Pipeline_stepContext step:nodes) {
@@ -603,7 +432,7 @@ final class ProgramParser {
} else {
List<String> name = readName(step.namespaced_name());
List<OperatorNode<ExpressionOperator>> args = ImmutableList.of();
- //LPAREN (argument[$in_select] (COMMA argument[$in_select])*) RPAREN
+ // LPAREN (argument[$in_select] (COMMA argument[$in_select])*) RPAREN
if (step.getChildCount() > 1) {
ArgumentsContext arguments = step.arguments();
if (arguments.getChildCount() > 2) {
@@ -621,56 +450,25 @@ final class ProgramParser {
return result;
}
- private OperatorNode<SequenceOperator> convertMerge(List<Merge_componentContext> mergeComponentList, Scope scope) {
- Preconditions.checkArgument(mergeComponentList != null);
- List<OperatorNode<SequenceOperator>> sources = Lists.newArrayListWithExpectedSize(mergeComponentList.size());
- for (Merge_componentContext mergeComponent:mergeComponentList) {
- Select_statementContext selectContext = mergeComponent.select_statement();
- Source_statementContext sourceContext = mergeComponent.source_statement();
- if (selectContext != null) {
- sources.add(convertQuery(selectContext, scope.getRoot()));
- } else {
- sources.add(convertQuery(sourceContext, scope.getRoot()));
- }
- }
- return OperatorNode.create(SequenceOperator.MERGE, sources);
- }
-
private OperatorNode<SequenceOperator> convertQuery(ParseTree node, Scope scope) {
- if (node instanceof Select_statementContext
- || node instanceof Insert_statementContext
- || node instanceof Update_statementContext
- || node instanceof Delete_statementContext) {
- return convertSelectOrInsertOrUpdateOrDelete(node, scope.getRoot());
+ if (node instanceof Select_statementContext) {
+ return convertSelect(node, scope.getRoot());
} else if (node instanceof Source_statementContext) { // for pipe
Source_statementContext sourceStatementContext = (Source_statementContext)node;
return convertPipe(sourceStatementContext.query_statement(), sourceStatementContext.pipeline_step(), scope);
- } else if (node instanceof Merge_statementContext) {
- return convertMerge(((Merge_statementContext)node).merge_component(), scope);
} else {
throw new IllegalArgumentException("Unexpected argument type to convertQueryStatement: " + node.toStringTree());
}
}
- private OperatorNode<SequenceOperator> convertJoin(Join_exprContext node, OperatorNode<SequenceOperator> left, Scope scope) {
- Source_specContext sourceSpec = node.source_spec();
- OperatorNode<SequenceOperator> right = convertSource(sourceSpec, scope);
- JoinExpressionContext joinContext = node.joinExpression();
- OperatorNode<ExpressionOperator> joinExpression = readBinOp(ExpressionOperator.valueOf("EQ"), joinContext.getChild(0), joinContext.getChild(2), scope);
- if (joinExpression.getOperator() != ExpressionOperator.EQ) {
- throw new ProgramCompileException(joinExpression.getLocation(), "Unexpected join expression type: %s (expected EQ)", joinExpression.getOperator());
- }
- return OperatorNode.create(toLocation(scope, sourceSpec), node.join_spec().LEFT() != null ? SequenceOperator.LEFT_JOIN : SequenceOperator.JOIN, left, right, joinExpression);
- }
-
private String assignAlias(String alias, ParserRuleContext node, Scope scope) {
if (alias == null) {
alias = "source";
}
- if (node != null && node instanceof yqlplusParser.Alias_defContext) {
- //alias_def : (AS? ID);
+ if (node instanceof yqlplusParser.Alias_defContext) {
+ // alias_def : (AS? ID);
ParseTree idChild = node;
if (node.getChildCount() > 1) {
idChild = node.getChild(1);
@@ -690,20 +488,14 @@ final class ProgramParser {
scope.defineDataSource(null, candidate);
return alias;
}
- }
-
- private OperatorNode<SequenceOperator> convertSource(ParserRuleContext sourceSpecNode, Scope scope) {
+ }
+ private OperatorNode<SequenceOperator> convertSource(ParserRuleContext sourceSpecNode, Scope scope) {
// DataSources
String alias;
OperatorNode<SequenceOperator> result;
ParserRuleContext dataSourceNode = sourceSpecNode;
ParserRuleContext aliasContext = null;
- //data_source
- //: call_source
- //| LPAREN source_statement RPAREN
- //| sequence_source
- //;
if (sourceSpecNode instanceof Source_specContext) {
dataSourceNode = (ParserRuleContext)sourceSpecNode.getChild(0);
if (sourceSpecNode.getChildCount() == 2) {
@@ -717,7 +509,6 @@ final class ProgramParser {
}
}
switch (getParseTreeIndex(dataSourceNode)) {
- case yqlplusParser.RULE_write_data_source:
case yqlplusParser.RULE_call_source: {
List<String> names = readName(dataSourceNode.getChild(Namespaced_nameContext.class, 0));
alias = assignAlias(names.get(names.size() - 1), aliasContext, scope);
@@ -763,60 +554,9 @@ final class ProgramParser {
return result;
}
- private OperatorNode<TypeOperator> decodeType(Scope scope, TypenameContext type) {
-
- TypeOperator op;
- ParseTree typeNode = type.getChild(0);
- switch (getParseTreeIndex(typeNode)) {
- case yqlplusParser.TYPE_BOOLEAN:
- op = TypeOperator.BOOLEAN;
- break;
- case yqlplusParser.TYPE_BYTE:
- op = TypeOperator.BYTE;
- break;
- case yqlplusParser.TYPE_DOUBLE:
- op = TypeOperator.DOUBLE;
- break;
- case yqlplusParser.TYPE_INT16:
- op = TypeOperator.INT16;
- break;
- case yqlplusParser.TYPE_INT32:
- op = TypeOperator.INT32;
- break;
- case yqlplusParser.TYPE_INT64:
- op = TypeOperator.INT64;
- break;
- case yqlplusParser.TYPE_STRING:
- op = TypeOperator.STRING;
- break;
- case yqlplusParser.TYPE_TIMESTAMP:
- op = TypeOperator.TIMESTAMP;
- break;
- case yqlplusParser.RULE_arrayType:
- return OperatorNode.create(toLocation(scope, typeNode), TypeOperator.ARRAY, decodeType(scope, ((ArrayTypeContext)typeNode).getChild(TypenameContext.class, 0)));
- case yqlplusParser.RULE_mapType:
- return OperatorNode.create(toLocation(scope, typeNode), TypeOperator.MAP, decodeType(scope, ((MapTypeContext)typeNode).getChild(TypenameContext.class, 0)));
- default:
- throw new ProgramCompileException("Unknown type " + typeNode.getText());
- }
- return OperatorNode.create(toLocation(scope, typeNode), op);
- }
-
- private List<String> createBindingName(ParseTree node) {
- if (node instanceof ModuleNameContext) {
- if (((ModuleNameContext)node).namespaced_name() != null) {
- return readName(((ModuleNameContext)node).namespaced_name());
- } else if (((ModuleNameContext)node).literalString() != null) {
- return ImmutableList.of(((ModuleNameContext)node).literalString().STRING().getText());
- }
- } else if (node instanceof ModuleIdContext) {
- return ImmutableList.of(node.getText());
- }
- throw new ProgramCompileException("Wrong context");
- }
-
- private OperatorNode<StatementOperator> convertProgram(
- ParserRuleContext program, yqlplusParser parser, String programName) {
+ private OperatorNode<StatementOperator> convertProgram(ParserRuleContext program,
+ yqlplusParser parser,
+ String programName) {
Scope scope = new Scope(parser, programName);
List<OperatorNode<StatementOperator>> stmts = Lists.newArrayList();
int output = 0;
@@ -825,148 +565,37 @@ final class ProgramParser {
continue;
}
ParserRuleContext ruleContext = (ParserRuleContext) node;
- switch (ruleContext.getRuleIndex()) {
- case yqlplusParser.RULE_params: {
- // ^(ARGUMENT ident typeref expression?)
- ParamsContext paramsContext = (ParamsContext) ruleContext;
- Program_arglistContext program_arglistContext = paramsContext.program_arglist();
- if (program_arglistContext != null) {
- List<Procedure_argumentContext> argList = program_arglistContext.procedure_argument();
- for (Procedure_argumentContext procedureArgumentContext : argList) {
- String name = procedureArgumentContext.ident().getText();
- OperatorNode<TypeOperator> type = decodeType(scope, procedureArgumentContext.getChild(TypenameContext.class, 0));
- OperatorNode<ExpressionOperator> defaultValue = OperatorNode.create(ExpressionOperator.NULL);
- if (procedureArgumentContext.expression() != null) {
- defaultValue = convertExpr(procedureArgumentContext.expression(), scope);
- }
- scope.defineVariable(toLocation(scope, procedureArgumentContext), name);
- stmts.add(OperatorNode.create(StatementOperator.ARGUMENT, name, type, defaultValue));
- }
- }
- break;
- }
- case yqlplusParser.RULE_import_statement: {
- Import_statementContext importContext = (Import_statementContext) ruleContext;
- if (null == importContext.import_list()) {
- List<String> name = createBindingName(node.getChild(1));
- String target;
- Location location = toLocation(scope, node.getChild(1));
- if (node.getChildCount() == 2) {
- target = name.get(0);
- } else if (node.getChildCount() == 4) {
- target = node.getChild(3).getText();
- } else {
- throw new ProgramCompileException("Unknown node count for IMPORT: " + node.toStringTree());
- }
- scope.bindModule(location, name, target);
- } else {
- // | FROM moduleName IMPORT import_list -> ^(IMPORT_FROM
- // moduleName import_list+)
- Import_listContext importListContext = importContext.import_list();
- List<String> name = createBindingName(importContext.moduleName());
- Location location = toLocation(scope, importContext.moduleName());
- List<ModuleIdContext> moduleIds = importListContext.moduleId();
- List<String> symbols = Lists.newArrayListWithExpectedSize(moduleIds.size());
- for (ModuleIdContext cnode : moduleIds) {
- symbols.add(cnode.ID().getText());
- }
- for (String sym : symbols) {
- scope.bindModuleSymbol(location, name, sym, sym);
- }
- }
- break;
- }
+ if (ruleContext.getRuleIndex() != yqlplusParser.RULE_statement)
+ throw new ProgramCompileException("Unknown program element: " + node.getText());
- // DDL
- case yqlplusParser.RULE_ddl:
- ruleContext = (ParserRuleContext)ruleContext.getChild(0);
- break;
- case yqlplusParser.RULE_view: {
- // view and projection expansion now has to be done by the
- // execution engine
- // since views/projections, in order to be useful, have to
- // support being used from outside the same program
- ViewContext viewContext = (ViewContext) ruleContext;
- Location loc = toLocation(scope, viewContext);
- scope.getRoot().defineView(loc, viewContext.ID().getText());
- stmts.add(OperatorNode.create(loc, StatementOperator.DEFINE_VIEW, viewContext.ID().getText(), convertQuery(viewContext.source_statement(), scope.getRoot())));
- break;
+ // ^(STATEMENT_QUERY source_statement paged_clause? output_spec?)
+ StatementContext statementContext = (StatementContext) ruleContext;
+ Source_statementContext source_statement = statementContext.output_statement().source_statement();
+ OperatorNode<SequenceOperator> query;
+ if (source_statement.getChildCount() == 1) {
+ query = convertQuery( source_statement.query_statement().getChild(0), scope);
+ } else {
+ query = convertQuery(source_statement, scope);
}
- case yqlplusParser.RULE_statement: {
- // ^(STATEMENT_QUERY source_statement paged_clause?
- // output_spec?)
- StatementContext statementContext = (StatementContext) ruleContext;
- switch (getParseTreeIndex(ruleContext.getChild(0))) {
- case yqlplusParser.RULE_selectvar_statement: {
- // ^(STATEMENT_SELECTVAR ident source_statement)
- Selectvar_statementContext selectVarContext = (Selectvar_statementContext) ruleContext.getChild(0);
- String variable = selectVarContext.ident().getText();
- OperatorNode<SequenceOperator> query = convertQuery(selectVarContext.source_statement(), scope);
- Location location = toLocation(scope, selectVarContext.ident());
- scope.defineVariable(location, variable);
- stmts.add(OperatorNode.create(location, StatementOperator.EXECUTE, query, variable));
- break;
- }
- case yqlplusParser.RULE_next_statement: {
- // NEXT^ literalString OUTPUT! AS! ident
- Next_statementContext nextStateContext = (Next_statementContext) ruleContext.getChild(0);
- String continuationValue = StringUnescaper.unquote(nextStateContext.literalString().getText());
- String variable = nextStateContext.ident().getText();
- Location location = toLocation(scope, node);
- OperatorNode<SequenceOperator> next = OperatorNode.create(location, SequenceOperator.NEXT, continuationValue);
- stmts.add(OperatorNode.create(location, StatementOperator.EXECUTE, next, variable));
- stmts.add(OperatorNode.create(location, StatementOperator.OUTPUT, variable));
- scope.defineVariable(location, variable);
- break;
+ String variable = "result" + (++output);
+ boolean isCountVariable = false;
+ ParseTree outputStatement = node.getChild(0);
+ Location location = toLocation(scope, outputStatement);
+ for (int i = 1; i < outputStatement.getChildCount(); ++i) {
+ ParseTree child = outputStatement.getChild(i);
+ if ( getParseTreeIndex(child) != yqlplusParser.RULE_output_spec)
+ throw new ProgramCompileException( "Unknown statement attribute: " + child.toStringTree());
+
+ Output_specContext outputSpecContext = (Output_specContext) child;
+ variable = outputSpecContext.ident().getText();
+ if (outputSpecContext.COUNT() != null) {
+ isCountVariable = true;
}
- case yqlplusParser.RULE_output_statement:
- Source_statementContext source_statement = statementContext.output_statement().source_statement();
- OperatorNode<SequenceOperator> query;
- if (source_statement.getChildCount() == 1) {
- query = convertQuery( source_statement.query_statement().getChild(0), scope);
- } else {
- query = convertQuery(source_statement, scope);
- }
- String variable = "result" + (++output);
- boolean isCountVariable = false;
- OperatorNode<ExpressionOperator> pageSize = null;
- ParseTree outputStatement = node.getChild(0);
- Location location = toLocation(scope, outputStatement);
- for (int i = 1; i < outputStatement.getChildCount(); ++i) {
- ParseTree child = outputStatement.getChild(i);
- switch (getParseTreeIndex(child)) {
- case yqlplusParser.RULE_paged_clause:
- Paged_clauseContext pagedContext = (Paged_clauseContext) child;
- pageSize = convertExpr(pagedContext.fixed_or_parameter(), scope);
- break;
- case yqlplusParser.RULE_output_spec:
- Output_specContext outputSpecContext = (Output_specContext) child;
- variable = outputSpecContext.ident().getText();
- if (outputSpecContext.COUNT() != null) {
- isCountVariable = true;
- }
- break;
- default:
- throw new ProgramCompileException( "Unknown statement attribute: " + child.toStringTree());
- }
- }
- scope.defineVariable(location, variable);
- if (pageSize != null) {
- query = OperatorNode.create(SequenceOperator.PAGE, query, pageSize);
- }
- stmts.add(OperatorNode.create(location, StatementOperator.EXECUTE, query, variable));
- stmts.add(OperatorNode.create(location, isCountVariable ? StatementOperator.COUNT:StatementOperator.OUTPUT, variable));
- }
- break;
- }
- default:
- throw new ProgramCompileException("Unknown program element: " + node.getText());
}
+ scope.defineVariable(location, variable);
+ stmts.add(OperatorNode.create(location, StatementOperator.EXECUTE, query, variable));
+ stmts.add(OperatorNode.create(location, isCountVariable ? StatementOperator.COUNT:StatementOperator.OUTPUT, variable));
}
- // traverse the tree, find all of the namespaced calls not covered by
- // imports so we can
- // define "implicit" import statements for them (to make engine
- // implementation easier)
return OperatorNode.create(StatementOperator.PROGRAM, stmts);
}
@@ -982,19 +611,19 @@ final class ProgramParser {
private ProjectionBuilder readProjection(List<Field_defContext> fieldDefs, Scope scope) {
if (null == fieldDefs)
- throw new ProgramCompileException("Null fieldDefs");
+ throw new ProgramCompileException("Null fieldDefs");
ProjectionBuilder proj = new ProjectionBuilder();
for (Field_defContext rulenode : fieldDefs) {
// FIELD
- // expression alias_def?
- OperatorNode<ExpressionOperator> expr = convertExpr(rulenode.getChild(0), scope);
+ // expression alias_def?
+ OperatorNode<ExpressionOperator> expr = convertExpr(rulenode.getChild(0), scope);
- String aliasName = null;
- if (rulenode.getChildCount() > 1) {
- // ^(ALIAS ID)
- aliasName = rulenode.alias_def().ID().getText();
- }
- proj.addField(aliasName, expr);
+ String aliasName = null;
+ if (rulenode.getChildCount() > 1) {
+ // ^(ALIAS ID)
+ aliasName = rulenode.alias_def().ID().getText();
+ }
+ proj.addField(aliasName, expr);
// no grammar for the other rule types at this time
}
return proj;
@@ -1009,358 +638,345 @@ final class ProgramParser {
}
public OperatorNode<ExpressionOperator> convertExpr(ParseTree parseTree, Scope scope) {
- switch (getParseTreeIndex(parseTree)) {
- case yqlplusParser.RULE_vespa_grouping: {
- ParseTree firstChild = parseTree.getChild(0);
- if (getParseTreeIndex(firstChild) == yqlplusParser.RULE_annotation) {
- ParseTree secondChild = parseTree.getChild(1);
- OperatorNode<ExpressionOperator> annotation = convertExpr(((AnnotationContext) firstChild)
- .constantMapExpression(), scope);
- OperatorNode<ExpressionOperator> expr = OperatorNode.create(toLocation(scope, secondChild),
- ExpressionOperator.VESPA_GROUPING, secondChild.getText());
- List<String> names = annotation.getArgument(0);
- List<OperatorNode<ExpressionOperator>> annotates = annotation.getArgument(1);
- for (int i = 0; i < names.size(); ++i) {
- expr.putAnnotation(names.get(i), readConstantExpression(annotates.get(i)));
- }
- return expr;
- } else {
- return OperatorNode.create(toLocation(scope, firstChild), ExpressionOperator.VESPA_GROUPING,
- firstChild.getText());
- }
- }
- case yqlplusParser.RULE_nullOperator:
- return OperatorNode.create(ExpressionOperator.NULL);
- case yqlplusParser.RULE_argument:
- return convertExpr(parseTree.getChild(0), scope);
- case yqlplusParser.RULE_fixed_or_parameter: {
- ParseTree firstChild = parseTree.getChild(0);
- if (getParseTreeIndex(firstChild) == yqlplusParser.INT) {
- return OperatorNode.create(toLocation(scope, firstChild), ExpressionOperator.LITERAL, Integer.valueOf(firstChild.getText()));
- } else {
- return convertExpr(firstChild, scope);
- }
- }
- case yqlplusParser.RULE_constantMapExpression: {
- List<ConstantPropertyNameAndValueContext> propertyList = ((ConstantMapExpressionContext) parseTree).constantPropertyNameAndValue();
- List<String> names = Lists.newArrayListWithExpectedSize(propertyList.size());
- List<OperatorNode<ExpressionOperator>> exprs = Lists.newArrayListWithExpectedSize(propertyList.size());
- for (ConstantPropertyNameAndValueContext child : propertyList) {
- // : propertyName ':' expression[$expression::namespace] ->
- // ^(PROPERTY propertyName expression)
- names.add(StringUnescaper.unquote(child.getChild(0).getText()));
- exprs.add(convertExpr(child.getChild(2), scope));
+ switch (getParseTreeIndex(parseTree)) {
+ case yqlplusParser.RULE_vespa_grouping: {
+ ParseTree firstChild = parseTree.getChild(0);
+ if (getParseTreeIndex(firstChild) == yqlplusParser.RULE_annotation) {
+ ParseTree secondChild = parseTree.getChild(1);
+ OperatorNode<ExpressionOperator> annotation = convertExpr(((AnnotationContext) firstChild)
+ .constantMapExpression(), scope);
+ OperatorNode<ExpressionOperator> expr = OperatorNode.create(toLocation(scope, secondChild),
+ ExpressionOperator.VESPA_GROUPING, secondChild.getText());
+ List<String> names = annotation.getArgument(0);
+ List<OperatorNode<ExpressionOperator>> annotates = annotation.getArgument(1);
+ for (int i = 0; i < names.size(); ++i) {
+ expr.putAnnotation(names.get(i), readConstantExpression(annotates.get(i)));
+ }
+ return expr;
+ } else {
+ return OperatorNode.create(toLocation(scope, firstChild), ExpressionOperator.VESPA_GROUPING,
+ firstChild.getText());
+ }
}
- return OperatorNode.create(toLocation(scope, parseTree),ExpressionOperator.MAP, names, exprs);
- }
- case yqlplusParser.RULE_mapExpression: {
- List<PropertyNameAndValueContext> propertyList = ((MapExpressionContext)parseTree).propertyNameAndValue();
- List<String> names = Lists.newArrayListWithExpectedSize(propertyList.size());
- List<OperatorNode<ExpressionOperator>> exprs = Lists.newArrayListWithCapacity(propertyList.size());
- for (PropertyNameAndValueContext child : propertyList) {
- // : propertyName ':' expression[$expression::namespace] ->
- // ^(PROPERTY propertyName expression)
- names.add(StringUnescaper.unquote(child.getChild(0).getText()));
- exprs.add(convertExpr(child.getChild(2), scope));
- }
- return OperatorNode.create(toLocation(scope, parseTree),ExpressionOperator.MAP, names, exprs);
- }
- case yqlplusParser.RULE_constantArray: {
- List<ConstantExpressionContext> expressionList = ((ConstantArrayContext)parseTree).constantExpression();
- List<OperatorNode<ExpressionOperator>> values = Lists.newArrayListWithExpectedSize(expressionList.size());
- for (ConstantExpressionContext expr : expressionList) {
- values.add(convertExpr(expr, scope));
+ case yqlplusParser.RULE_nullOperator:
+ return OperatorNode.create(ExpressionOperator.NULL);
+ case yqlplusParser.RULE_argument:
+ return convertExpr(parseTree.getChild(0), scope);
+ case yqlplusParser.RULE_fixed_or_parameter: {
+ ParseTree firstChild = parseTree.getChild(0);
+ if (getParseTreeIndex(firstChild) == yqlplusParser.INT) {
+ return OperatorNode.create(toLocation(scope, firstChild), ExpressionOperator.LITERAL, Integer.valueOf(firstChild.getText()));
+ } else {
+ return convertExpr(firstChild, scope);
+ }
+ }
+ case yqlplusParser.RULE_constantMapExpression: {
+ List<ConstantPropertyNameAndValueContext> propertyList = ((ConstantMapExpressionContext) parseTree).constantPropertyNameAndValue();
+ List<String> names = Lists.newArrayListWithExpectedSize(propertyList.size());
+ List<OperatorNode<ExpressionOperator>> exprs = Lists.newArrayListWithExpectedSize(propertyList.size());
+ for (ConstantPropertyNameAndValueContext child : propertyList) {
+ // : propertyName ':' expression[$expression::namespace] ->
+ // ^(PROPERTY propertyName expression)
+ names.add(StringUnescaper.unquote(child.getChild(0).getText()));
+ exprs.add(convertExpr(child.getChild(2), scope));
+ }
+ return OperatorNode.create(toLocation(scope, parseTree),ExpressionOperator.MAP, names, exprs);
+ }
+ case yqlplusParser.RULE_mapExpression: {
+ List<PropertyNameAndValueContext> propertyList = ((MapExpressionContext)parseTree).propertyNameAndValue();
+ List<String> names = Lists.newArrayListWithExpectedSize(propertyList.size());
+ List<OperatorNode<ExpressionOperator>> exprs = Lists.newArrayListWithCapacity(propertyList.size());
+ for (PropertyNameAndValueContext child : propertyList) {
+ // : propertyName ':' expression[$expression::namespace] ->
+ // ^(PROPERTY propertyName expression)
+ names.add(StringUnescaper.unquote(child.getChild(0).getText()));
+ exprs.add(convertExpr(child.getChild(2), scope));
+ }
+ return OperatorNode.create(toLocation(scope, parseTree),ExpressionOperator.MAP, names, exprs);
+ }
+ case yqlplusParser.RULE_constantArray: {
+ List<ConstantExpressionContext> expressionList = ((ConstantArrayContext)parseTree).constantExpression();
+ List<OperatorNode<ExpressionOperator>> values = Lists.newArrayListWithExpectedSize(expressionList.size());
+ for (ConstantExpressionContext expr : expressionList) {
+ values.add(convertExpr(expr, scope));
+ }
+ return OperatorNode.create(toLocation(scope, expressionList.isEmpty()? parseTree:expressionList.get(0)), ExpressionOperator.ARRAY, values);
+ }
+ case yqlplusParser.RULE_arrayLiteral: {
+ List<ExpressionContext> expressionList = ((ArrayLiteralContext) parseTree).expression();
+ List<OperatorNode<ExpressionOperator>> values = Lists.newArrayListWithExpectedSize(expressionList.size());
+ for (ExpressionContext expr : expressionList) {
+ values.add(convertExpr(expr, scope));
+ }
+ return OperatorNode.create(toLocation(scope, expressionList.isEmpty()? parseTree:expressionList.get(0)), ExpressionOperator.ARRAY, values);
+ }
+ // dereferencedExpression: primaryExpression(indexref[in_select]| propertyref)*
+ case yqlplusParser.RULE_dereferencedExpression: {
+ DereferencedExpressionContext dereferencedExpression = (DereferencedExpressionContext) parseTree;
+ Iterator<ParseTree> it = dereferencedExpression.children.iterator();
+ OperatorNode<ExpressionOperator> result = convertExpr(it.next(), scope);
+ while (it.hasNext()) {
+ ParseTree defTree = it.next();
+ if (getParseTreeIndex(defTree) == yqlplusParser.RULE_propertyref) {
+ // DOT nm=ID
+ result = OperatorNode.create(toLocation(scope, parseTree), ExpressionOperator.PROPREF, result, defTree.getChild(1).getText());
+ } else {
+ // indexref
+ result = OperatorNode.create(toLocation(scope, parseTree), ExpressionOperator.INDEX, result, convertExpr(defTree.getChild(1), scope));
+ }
+ }
+ return result;
+ }
+ case yqlplusParser.RULE_primaryExpression: {
+ // ^(CALL namespaced_name arguments)
+ ParseTree firstChild = parseTree.getChild(0);
+ switch (getParseTreeIndex(firstChild)) {
+ case yqlplusParser.RULE_fieldref: {
+ return convertExpr(firstChild, scope);
+ }
+ case yqlplusParser.RULE_callExpresion: {
+ List<ArgumentContext> args = ((ArgumentsContext) firstChild.getChild(1)).argument();
+ List<OperatorNode<ExpressionOperator>> arguments = Lists.newArrayListWithExpectedSize(args.size());
+ for (ArgumentContext argContext : args) {
+ arguments.add(convertExpr(argContext.expression(),scope));
+ }
+ return OperatorNode.create(toLocation(scope, parseTree), ExpressionOperator.CALL, scope.resolvePath(readName((Namespaced_nameContext) firstChild.getChild(0))), arguments);
+ }
+ case yqlplusParser.RULE_parameter:
+ // external variable reference
+ return OperatorNode.create(toLocation(scope, firstChild), ExpressionOperator.VARREF, firstChild.getChild(1).getText());
+ case yqlplusParser.RULE_scalar_literal:
+ case yqlplusParser.RULE_arrayLiteral:
+ case yqlplusParser.RULE_mapExpression:
+ return convertExpr(firstChild, scope);
+ case yqlplusParser.LPAREN:
+ return convertExpr(parseTree.getChild(1), scope);
+ }
+ break;
+ }
+ case yqlplusParser.RULE_parameter: {
+ // external variable reference
+ ParserRuleContext parameterContext = (ParserRuleContext) parseTree;
+ IdentContext identContext = parameterContext.getRuleContext(IdentContext.class, 0);
+ return OperatorNode.create(toLocation(scope, identContext), ExpressionOperator.VARREF, identContext.getText());
+ }
+ case yqlplusParser.RULE_annotateExpression: {
+ //annotation logicalORExpression
+ AnnotationContext annotateExpressionContext = ((AnnotateExpressionContext)parseTree).annotation();
+ OperatorNode<ExpressionOperator> annotation = convertExpr(annotateExpressionContext.constantMapExpression(), scope);
+ OperatorNode<ExpressionOperator> expr = convertExpr(parseTree.getChild(1), scope);
+ List<String> names = annotation.getArgument(0);
+ List<OperatorNode<ExpressionOperator>> annotates = annotation.getArgument(1);
+ for (int i = 0; i < names.size(); ++i) {
+ expr.putAnnotation(names.get(i), readConstantExpression(annotates.get(i)));
+ }
+ return expr;
+ }
+ case yqlplusParser.RULE_expression: {
+ return convertExpr(parseTree.getChild(0), scope);
+ }
+ case yqlplusParser.RULE_logicalANDExpression:
+ LogicalANDExpressionContext andExpressionContext = (LogicalANDExpressionContext) parseTree;
+ return readConjOp(ExpressionOperator.AND, andExpressionContext.equalityExpression(), scope);
+ case yqlplusParser.RULE_logicalORExpression: {
+ int childCount = parseTree.getChildCount();
+ LogicalORExpressionContext logicalORExpressionContext = (LogicalORExpressionContext) parseTree;
+ if (childCount > 1) {
+ return readConjOrOp(ExpressionOperator.OR, logicalORExpressionContext, scope);
+ } else {
+ List<EqualityExpressionContext> equalityExpressionList = ((LogicalANDExpressionContext) parseTree.getChild(0)).equalityExpression();
+ if (equalityExpressionList.size() > 1) {
+ return readConjOp(ExpressionOperator.AND, equalityExpressionList, scope);
+ } else {
+ return convertExpr(equalityExpressionList.get(0), scope);
+ }
+ }
+ }
+ case yqlplusParser.RULE_equalityExpression: {
+ EqualityExpressionContext equalityExpression = (EqualityExpressionContext) parseTree;
+ RelationalExpressionContext relationalExpressionContext = equalityExpression.relationalExpression(0);
+ OperatorNode<ExpressionOperator> expr = convertExpr(relationalExpressionContext, scope);
+ InNotInTargetContext inNotInTarget = equalityExpression.inNotInTarget();
+ int childCount = equalityExpression.getChildCount();
+ if (childCount == 1) {
+ return expr;
+ }
+ if (inNotInTarget != null) {
+ Literal_listContext literalListContext = inNotInTarget.literal_list();
+ boolean isIN = equalityExpression.IN() != null;
+ if (literalListContext == null) {
+ Select_statementContext selectStatementContext = inNotInTarget.select_statement();
+ OperatorNode<SequenceOperator> query = convertQuery(selectStatementContext, scope);
+ return OperatorNode.create(expr.getLocation(),isIN ? ExpressionOperator.IN_QUERY: ExpressionOperator.NOT_IN_QUERY, expr, query);
+ } else {
+ // we need to identify the type of the target; if it's a
+ // scalar we need to wrap it in a CREATE_ARRAY
+ // if it's already a CREATE ARRAY then it's fine, otherwise
+ // we need to know the variable type
+ // return readBinOp(node.getType() == yqlplusParser.IN ?
+ // ExpressionOperator.IN : ExpressionOperator.NOT_IN, node,
+ // scope);
+ return readBinOp(isIN ? ExpressionOperator.IN: ExpressionOperator.NOT_IN, equalityExpression.getChild(0), literalListContext, scope);
+ }
+
+ } else {
+ ParseTree firstChild = equalityExpression.getChild(1);
+ if (equalityExpression.getChildCount() == 2) {
+ switch (getParseTreeIndex(firstChild)) {
+ case yqlplusParser.IS_NULL:
+ return readUnOp(ExpressionOperator.IS_NULL, relationalExpressionContext, scope);
+ case yqlplusParser.IS_NOT_NULL:
+ return readUnOp(ExpressionOperator.IS_NOT_NULL, relationalExpressionContext, scope);
+ }
+ } else {
+ switch (getParseTreeIndex(firstChild.getChild(0))) {
+ case yqlplusParser.EQ:
+ return readBinOp(ExpressionOperator.EQ, equalityExpression.getChild(0), equalityExpression.getChild(2), scope);
+ case yqlplusParser.NEQ:
+ return readBinOp(ExpressionOperator.NEQ, equalityExpression.getChild(0), equalityExpression.getChild(2), scope);
+ case yqlplusParser.LIKE:
+ return readBinOp(ExpressionOperator.LIKE, equalityExpression.getChild(0), equalityExpression.getChild(2), scope);
+ case yqlplusParser.NOTLIKE:
+ return readBinOp(ExpressionOperator.NOT_LIKE, equalityExpression.getChild(0), equalityExpression.getChild(2), scope);
+ case yqlplusParser.MATCHES:
+ return readBinOp(ExpressionOperator.MATCHES, equalityExpression.getChild(0), equalityExpression.getChild(2), scope);
+ case yqlplusParser.NOTMATCHES:
+ return readBinOp(ExpressionOperator.NOT_MATCHES, equalityExpression.getChild(0), equalityExpression.getChild(2), scope);
+ case yqlplusParser.CONTAINS:
+ return readBinOp(ExpressionOperator.CONTAINS, equalityExpression.getChild(0), equalityExpression.getChild(2), scope);
+ }
+ }
+
+ }
+ break;
+ }
+ case yqlplusParser.RULE_relationalExpression: {
+ RelationalExpressionContext relationalExpressionContext = (RelationalExpressionContext) parseTree;
+ RelationalOpContext opContext = relationalExpressionContext.relationalOp();
+ if (opContext != null) {
+ switch (getParseTreeIndex(relationalExpressionContext.relationalOp().getChild(0))) {
+ case yqlplusParser.LT:
+ return readBinOp(ExpressionOperator.LT, parseTree, scope);
+ case yqlplusParser.LTEQ:
+ return readBinOp(ExpressionOperator.LTEQ, parseTree, scope);
+ case yqlplusParser.GT:
+ return readBinOp(ExpressionOperator.GT, parseTree, scope);
+ case yqlplusParser.GTEQ:
+ return readBinOp(ExpressionOperator.GTEQ, parseTree, scope);
+ }
+ } else {
+ return convertExpr(relationalExpressionContext.additiveExpression(0), scope);
+ }
+ }
+ break;
+ case yqlplusParser.RULE_additiveExpression:
+ case yqlplusParser.RULE_multiplicativeExpression: {
+ if (parseTree.getChildCount() > 1) {
+ String opStr = parseTree.getChild(1).getText();
+ switch (opStr) {
+ case "+":
+ return readBinOp(ExpressionOperator.ADD, parseTree, scope);
+ case "-":
+ return readBinOp(ExpressionOperator.SUB, parseTree, scope);
+ case "/":
+ return readBinOp(ExpressionOperator.DIV, parseTree, scope);
+ case "*":
+ return readBinOp(ExpressionOperator.MULT, parseTree, scope);
+ case "%":
+ return readBinOp(ExpressionOperator.MOD, parseTree, scope);
+ default:
+ if (parseTree.getChild(0) instanceof UnaryExpressionContext) {
+ return convertExpr(parseTree.getChild(0), scope);
+ } else {
+ throw new ProgramCompileException(toLocation(scope, parseTree), "Unknown expression type: " + parseTree.toStringTree());
+ }
+ }
+ } else {
+ if (parseTree.getChild(0) instanceof UnaryExpressionContext) {
+ return convertExpr(parseTree.getChild(0), scope);
+ } else if (parseTree.getChild(0) instanceof MultiplicativeExpressionContext) {
+ return convertExpr(parseTree.getChild(0), scope);
+ } else {
+ throw new ProgramCompileException(toLocation(scope, parseTree), "Unknown expression type: " + parseTree.getText());
+ }
+ }
+ }
+ case yqlplusParser.RULE_unaryExpression: {
+ if (1 == parseTree.getChildCount()) {
+ return convertExpr(parseTree.getChild(0), scope);
+ } else if (2 == parseTree.getChildCount()) {
+ if ("-".equals(parseTree.getChild(0).getText())) {
+ return readUnOp(ExpressionOperator.NEGATE, parseTree, scope);
+ } else if ("!".equals(parseTree.getChild(0).getText())) {
+ return readUnOp(ExpressionOperator.NOT, parseTree, scope);
+ }
+ throw new ProgramCompileException(toLocation(scope, parseTree),"Unknown unary operator " + parseTree.getText());
+ } else {
+ throw new ProgramCompileException(toLocation(scope, parseTree),"Unknown child count " + parseTree.getChildCount() + " of " + parseTree.getText());
+ }
+ }
+ case yqlplusParser.RULE_fieldref: {
+ // all in-scope data sources should be defined in scope
+ // the 'first' field in a namespaced reference must be:
+ // - a field name if (and only if) there is exactly one data source
+ // in scope OR
+ // - an alias name, which will be followed by a field name
+ // ^(FIELDREF<FieldReference>[$expression::namespace]
+ // namespaced_name)
+ List<String> path = readName((Namespaced_nameContext) parseTree.getChild(0));
+ Location loc = toLocation(scope, parseTree.getChild(0));
+ String alias = path.get(0);
+ OperatorNode<ExpressionOperator> result = null;
+ int start = 0;
+ if (scope.isCursor(alias)) {
+ if (path.size() > 1) {
+ result = OperatorNode.create(loc, ExpressionOperator.READ_FIELD, alias, path.get(1));
+ start = 2;
+ } else {
+ result = OperatorNode.create(loc, ExpressionOperator.READ_RECORD, alias);
+ start = 1;
+ }
+ } else if (scope.isBound(alias)) {
+ return OperatorNode.create(loc, ExpressionOperator.READ_MODULE, scope.getBinding(alias).toPathWith(path.subList(1, path.size())));
+ } else if (scope.getCursors().size() == 1) {
+ alias = scope.getCursors().iterator().next();
+ result = OperatorNode.create(loc, ExpressionOperator.READ_FIELD, alias, path.get(0));
+ start = 1;
+ } else {
+ // ah ha, we can't end up with a 'loose' UDF call because it
+ // won't be a module or known alias
+ // so we need not support implicit imports for constants used in
+ // UDFs
+ throw new ProgramCompileException(loc, "Unknown field or alias '%s'", alias);
+ }
+ for (int idx = start; idx < path.size(); ++idx) {
+ result = OperatorNode.create(loc, ExpressionOperator.PROPREF, result, path.get(idx));
+ }
+ return result;
}
- return OperatorNode.create(toLocation(scope, expressionList.isEmpty()? parseTree:expressionList.get(0)), ExpressionOperator.ARRAY, values);
+ case yqlplusParser.RULE_scalar_literal:
+ return OperatorNode.create(toLocation(scope, parseTree), ExpressionOperator.LITERAL, convertLiteral((Scalar_literalContext) parseTree));
+ case yqlplusParser.RULE_constantExpression:
+ return convertExpr(parseTree.getChild(0), scope);
+ case yqlplusParser.RULE_literal_list:
+ if (getParseTreeIndex(parseTree.getChild(1)) == yqlplusParser.RULE_array_parameter) {
+ return convertExpr(parseTree.getChild(1), scope);
+ } else {
+ List<Literal_elementContext> elements = ((Literal_listContext) parseTree).literal_element();
+ ParseTree firldElement = elements.get(0).getChild(0);
+ if (elements.size() == 1 && scope.getParser().isArrayParameter(firldElement)) {
+ return convertExpr(firldElement, scope);
+ } else {
+ List<OperatorNode<ExpressionOperator>> values = Lists.newArrayListWithExpectedSize(elements.size());
+ for (Literal_elementContext child : elements) {
+ values.add(convertExpr(child.getChild(0), scope));
+ }
+ return OperatorNode.create(toLocation(scope, elements.get(0)),ExpressionOperator.ARRAY, values);
+ }
+ }
}
- case yqlplusParser.RULE_arrayLiteral: {
- List<ExpressionContext> expressionList = ((ArrayLiteralContext) parseTree).expression();
- List<OperatorNode<ExpressionOperator>> values = Lists.newArrayListWithExpectedSize(expressionList.size());
- for (ExpressionContext expr : expressionList) {
- values.add(convertExpr(expr, scope));
- }
- return OperatorNode.create(toLocation(scope, expressionList.isEmpty()? parseTree:expressionList.get(0)), ExpressionOperator.ARRAY, values);
- }
- //dereferencedExpression: primaryExpression(indexref[in_select]| propertyref)*
- case yqlplusParser.RULE_dereferencedExpression: {
- DereferencedExpressionContext dereferencedExpression = (DereferencedExpressionContext) parseTree;
- Iterator<ParseTree> it = dereferencedExpression.children.iterator();
- OperatorNode<ExpressionOperator> result = convertExpr(it.next(), scope);
- while (it.hasNext()) {
- ParseTree defTree = it.next();
- if (getParseTreeIndex(defTree) == yqlplusParser.RULE_propertyref) {
- //DOT nm=ID
- result = OperatorNode.create(toLocation(scope, parseTree), ExpressionOperator.PROPREF, result, defTree.getChild(1).getText());
- } else {
- //indexref
- result = OperatorNode.create(toLocation(scope, parseTree), ExpressionOperator.INDEX, result, convertExpr(defTree.getChild(1), scope));
- }
- }
- return result;
- }
- case yqlplusParser.RULE_primaryExpression: {
- // ^(CALL namespaced_name arguments)
- ParseTree firstChild = parseTree.getChild(0);
- switch (getParseTreeIndex(firstChild)) {
- case yqlplusParser.RULE_fieldref: {
- return convertExpr(firstChild, scope);
- }
- case yqlplusParser.RULE_callExpresion: {
- List<ArgumentContext> args = ((ArgumentsContext) firstChild.getChild(1)).argument();
- List<OperatorNode<ExpressionOperator>> arguments = Lists.newArrayListWithExpectedSize(args.size());
- for (ArgumentContext argContext : args) {
- arguments.add(convertExpr(argContext.expression(),scope));
- }
- return OperatorNode.create(toLocation(scope, parseTree), ExpressionOperator.CALL, scope.resolvePath(readName((Namespaced_nameContext) firstChild.getChild(0))), arguments);
- }
- // TODO add processing this is not implemented in V3
- // case yqlplusParser.APPLY:
-
- case yqlplusParser.RULE_parameter:
- // external variable reference
- return OperatorNode.create(toLocation(scope, firstChild), ExpressionOperator.VARREF, firstChild.getChild(1).getText());
- case yqlplusParser.RULE_scalar_literal:
- case yqlplusParser.RULE_arrayLiteral:
- case yqlplusParser.RULE_mapExpression:
- return convertExpr(firstChild, scope);
- case yqlplusParser.LPAREN:
- return convertExpr(parseTree.getChild(1), scope);
- }
- break;
- }
-
- // TODO: Temporarily disable CAST - think through how types are named
- // case yqlplusParser.CAST: {
- //
- // return new Cast()
- // }
- // return new CastExpression(payload);
- case yqlplusParser.RULE_parameter: {
- // external variable reference
- ParserRuleContext parameterContext = (ParserRuleContext) parseTree;
- IdentContext identContext = parameterContext.getRuleContext(IdentContext.class, 0);
- return OperatorNode.create(toLocation(scope, identContext), ExpressionOperator.VARREF, identContext.getText());
- }
- case yqlplusParser.RULE_annotateExpression: {
- //annotation logicalORExpression
- AnnotationContext annotateExpressionContext = ((AnnotateExpressionContext)parseTree).annotation();
- OperatorNode<ExpressionOperator> annotation = convertExpr(annotateExpressionContext.constantMapExpression(), scope);
- OperatorNode<ExpressionOperator> expr = convertExpr(parseTree.getChild(1), scope);
- List<String> names = annotation.getArgument(0);
- List<OperatorNode<ExpressionOperator>> annotates = annotation.getArgument(1);
- for (int i = 0; i < names.size(); ++i) {
- expr.putAnnotation(names.get(i), readConstantExpression(annotates.get(i)));
- }
- return expr;
- }
- case yqlplusParser.RULE_expression: {
- return convertExpr(parseTree.getChild(0), scope);
- }
- case yqlplusParser.RULE_logicalANDExpression:
- LogicalANDExpressionContext andExpressionContext = (LogicalANDExpressionContext) parseTree;
- return readConjOp(ExpressionOperator.AND, andExpressionContext.equalityExpression(), scope);
- case yqlplusParser.RULE_logicalORExpression: {
- int childCount = parseTree.getChildCount();
- LogicalORExpressionContext logicalORExpressionContext = (LogicalORExpressionContext) parseTree;
- if (childCount > 1) {
- return readConjOrOp(ExpressionOperator.OR, logicalORExpressionContext, scope);
- } else {
- List<EqualityExpressionContext> equalityExpressionList = ((LogicalANDExpressionContext) parseTree.getChild(0)).equalityExpression();
- if (equalityExpressionList.size() > 1) {
- return readConjOp(ExpressionOperator.AND, equalityExpressionList, scope);
- } else {
- return convertExpr(equalityExpressionList.get(0), scope);
- }
- }
- }
- case yqlplusParser.RULE_equalityExpression: {
- EqualityExpressionContext equalityExpression = (EqualityExpressionContext) parseTree;
- RelationalExpressionContext relationalExpressionContext = equalityExpression.relationalExpression(0);
- OperatorNode<ExpressionOperator> expr = convertExpr(relationalExpressionContext, scope);
- InNotInTargetContext inNotInTarget = equalityExpression.inNotInTarget();
- int childCount = equalityExpression.getChildCount();
- if (childCount == 1) {
- return expr;
- }
- if (inNotInTarget != null) {
- Literal_listContext literalListContext = inNotInTarget.literal_list();
- boolean isIN = equalityExpression.IN() != null;
- if (literalListContext == null) {
- Select_statementContext selectStatementContext = inNotInTarget.select_statement();
- OperatorNode<SequenceOperator> query = convertQuery(selectStatementContext, scope);
- return OperatorNode.create(expr.getLocation(),isIN ? ExpressionOperator.IN_QUERY: ExpressionOperator.NOT_IN_QUERY, expr, query);
- } else {
- // we need to identify the type of the target; if it's a
- // scalar we need to wrap it in a CREATE_ARRAY
- // if it's already a CREATE ARRAY then it's fine, otherwise
- // we need to know the variable type
- // return readBinOp(node.getType() == yqlplusParser.IN ?
- // ExpressionOperator.IN : ExpressionOperator.NOT_IN, node,
- // scope);
- return readBinOp(isIN ? ExpressionOperator.IN: ExpressionOperator.NOT_IN, equalityExpression.getChild(0), literalListContext, scope);
- }
-
- } else {
- ParseTree firstChild = equalityExpression.getChild(1);
- if (equalityExpression.getChildCount() == 2) {
- switch (getParseTreeIndex(firstChild)) {
- case yqlplusParser.IS_NULL:
- return readUnOp(ExpressionOperator.IS_NULL, relationalExpressionContext, scope);
- case yqlplusParser.IS_NOT_NULL:
- return readUnOp(ExpressionOperator.IS_NOT_NULL, relationalExpressionContext, scope);
- }
- } else {
- switch (getParseTreeIndex(firstChild.getChild(0))) {
- case yqlplusParser.EQ:
- return readBinOp(ExpressionOperator.EQ, equalityExpression.getChild(0), equalityExpression.getChild(2), scope);
- case yqlplusParser.NEQ:
- return readBinOp(ExpressionOperator.NEQ, equalityExpression.getChild(0), equalityExpression.getChild(2), scope);
- case yqlplusParser.LIKE:
- return readBinOp(ExpressionOperator.LIKE, equalityExpression.getChild(0), equalityExpression.getChild(2), scope);
- case yqlplusParser.NOTLIKE:
- return readBinOp(ExpressionOperator.NOT_LIKE, equalityExpression.getChild(0), equalityExpression.getChild(2), scope);
- case yqlplusParser.MATCHES:
- return readBinOp(ExpressionOperator.MATCHES, equalityExpression.getChild(0), equalityExpression.getChild(2), scope);
- case yqlplusParser.NOTMATCHES:
- return readBinOp(ExpressionOperator.NOT_MATCHES, equalityExpression.getChild(0), equalityExpression.getChild(2), scope);
- case yqlplusParser.CONTAINS:
- return readBinOp(ExpressionOperator.CONTAINS, equalityExpression.getChild(0), equalityExpression.getChild(2), scope);
- }
- }
-
- }
- break;
- }
- case yqlplusParser.RULE_relationalExpression: {
- RelationalExpressionContext relationalExpressionContext = (RelationalExpressionContext) parseTree;
- RelationalOpContext opContext = relationalExpressionContext.relationalOp();
- if (opContext != null) {
- switch (getParseTreeIndex(relationalExpressionContext.relationalOp().getChild(0))) {
- case yqlplusParser.LT:
- return readBinOp(ExpressionOperator.LT, parseTree, scope);
- case yqlplusParser.LTEQ:
- return readBinOp(ExpressionOperator.LTEQ, parseTree, scope);
- case yqlplusParser.GT:
- return readBinOp(ExpressionOperator.GT, parseTree, scope);
- case yqlplusParser.GTEQ:
- return readBinOp(ExpressionOperator.GTEQ, parseTree, scope);
- }
- } else {
- return convertExpr(relationalExpressionContext.additiveExpression(0), scope);
- }
- }
- break;
- case yqlplusParser.RULE_additiveExpression:
- case yqlplusParser.RULE_multiplicativeExpression: {
- if (parseTree.getChildCount() > 1) {
- String opStr = parseTree.getChild(1).getText();
- switch (opStr) {
- case "+":
- return readBinOp(ExpressionOperator.ADD, parseTree, scope);
- case "-":
- return readBinOp(ExpressionOperator.SUB, parseTree, scope);
- case "/":
- return readBinOp(ExpressionOperator.DIV, parseTree, scope);
- case "*":
- return readBinOp(ExpressionOperator.MULT, parseTree, scope);
- case "%":
- return readBinOp(ExpressionOperator.MOD, parseTree, scope);
- default:
- if (parseTree.getChild(0) instanceof UnaryExpressionContext) {
- return convertExpr(parseTree.getChild(0), scope);
- } else {
- throw new ProgramCompileException(toLocation(scope, parseTree), "Unknown expression type: " + parseTree.toStringTree());
- }
- }
- } else {
- if (parseTree.getChild(0) instanceof UnaryExpressionContext) {
- return convertExpr(parseTree.getChild(0), scope);
- } else if (parseTree.getChild(0) instanceof MultiplicativeExpressionContext) {
- return convertExpr(parseTree.getChild(0), scope);
- } else {
- throw new ProgramCompileException(toLocation(scope, parseTree), "Unknown expression type: " + parseTree.getText());
- }
- }
- }
- case yqlplusParser.RULE_unaryExpression: {
- if (1 == parseTree.getChildCount()) {
- return convertExpr(parseTree.getChild(0), scope);
- } else if (2 == parseTree.getChildCount()) {
- if ("-".equals(parseTree.getChild(0).getText())) {
- return readUnOp(ExpressionOperator.NEGATE, parseTree, scope);
- } else if ("!".equals(parseTree.getChild(0).getText())) {
- return readUnOp(ExpressionOperator.NOT, parseTree, scope);
- }
- throw new ProgramCompileException(toLocation(scope, parseTree),"Unknown unary operator " + parseTree.getText());
- } else {
- throw new ProgramCompileException(toLocation(scope, parseTree),"Unknown child count " + parseTree.getChildCount() + " of " + parseTree.getText());
- }
- }
- case yqlplusParser.RULE_fieldref:
- case yqlplusParser.RULE_joinDereferencedExpression: {
- // all in-scope data sources should be defined in scope
- // the 'first' field in a namespaced reference must be:
- // - a field name if (and only if) there is exactly one data source
- // in scope OR
- // - an alias name, which will be followed by a field name
- // ^(FIELDREF<FieldReference>[$expression::namespace]
- // namespaced_name)
- List<String> path = readName((Namespaced_nameContext) parseTree.getChild(0));
- Location loc = toLocation(scope, parseTree.getChild(0));
- String alias = path.get(0);
- OperatorNode<ExpressionOperator> result = null;
- int start = 0;
- if (scope.isCursor(alias)) {
- if (path.size() > 1) {
- result = OperatorNode.create(loc, ExpressionOperator.READ_FIELD, alias, path.get(1));
- start = 2;
- } else {
- result = OperatorNode.create(loc, ExpressionOperator.READ_RECORD, alias);
- start = 1;
- }
- } else if (scope.isBound(alias)) {
- return OperatorNode.create(loc, ExpressionOperator.READ_MODULE, scope.getBinding(alias).toPathWith(path.subList(1, path.size())));
- } else if (scope.getCursors().size() == 1) {
- alias = scope.getCursors().iterator().next();
- result = OperatorNode.create(loc, ExpressionOperator.READ_FIELD, alias, path.get(0));
- start = 1;
- } else {
- // ah ha, we can't end up with a 'loose' UDF call because it
- // won't be a module or known alias
- // so we need not support implicit imports for constants used in
- // UDFs
- throw new ProgramCompileException(loc, "Unknown field or alias '%s'", alias);
- }
- for (int idx = start; idx < path.size(); ++idx) {
- result = OperatorNode.create(loc, ExpressionOperator.PROPREF, result, path.get(idx));
- }
- return result;
- }
- case yqlplusParser.RULE_scalar_literal:
- return OperatorNode.create(toLocation(scope, parseTree), ExpressionOperator.LITERAL, convertLiteral((Scalar_literalContext) parseTree));
- case yqlplusParser.RULE_insert_values:
- return readValues((Insert_valuesContext) parseTree, scope);
- case yqlplusParser.RULE_constantExpression:
- return convertExpr(parseTree.getChild(0), scope);
- case yqlplusParser.RULE_literal_list:
- if (getParseTreeIndex(parseTree.getChild(1)) == yqlplusParser.RULE_array_parameter) {
- return convertExpr(parseTree.getChild(1), scope);
- } else {
- List<Literal_elementContext> elements = ((Literal_listContext) parseTree).literal_element();
- ParseTree firldElement = elements.get(0).getChild(0);
- if (elements.size() == 1 && scope.getParser().isArrayParameter(firldElement)) {
- return convertExpr(firldElement, scope);
- } else {
- List<OperatorNode<ExpressionOperator>> values = Lists.newArrayListWithExpectedSize(elements.size());
- for (Literal_elementContext child : elements) {
- values.add(convertExpr(child.getChild(0), scope));
- }
- return OperatorNode.create(toLocation(scope, elements.get(0)),ExpressionOperator.ARRAY, values);
- }
- }
- }
- throw new ProgramCompileException(toLocation(scope, parseTree),
- "Unknown expression type: " + parseTree.getText());
+ throw new ProgramCompileException(toLocation(scope, parseTree),
+ "Unknown expression type: " + parseTree.getText());
}
public Object convertLiteral(Scalar_literalContext literal) {
@@ -1462,77 +1078,7 @@ final class ProgramParser {
}
}
- private OperatorNode<ExpressionOperator> readValues(Field_names_specContext nameDefs, Field_values_specContext values, Scope scope) {
- List<Field_defContext> fieldDefs = nameDefs.field_def();
- List<ExpressionContext> valueDefs = values.expression();
- assert fieldDefs.size() == valueDefs.size();
- List<String> fieldNames;
- List<OperatorNode<ExpressionOperator>> fieldValues;
- int numPairs = fieldDefs.size();
- fieldNames = Lists.newArrayListWithExpectedSize(numPairs);
- fieldValues = Lists.newArrayListWithExpectedSize(numPairs);
- for (int i = 0; i < numPairs; i++) {
- fieldNames.add((String) convertExpr(fieldDefs.get(i).expression(), scope).getArgument(1));
- fieldValues.add(convertExpr(valueDefs.get(i), scope));
- }
- return OperatorNode.create(ExpressionOperator.MAP, fieldNames, fieldValues);
- }
-
- private OperatorNode<ExpressionOperator> readValues(ParserRuleContext node, Scope scope) {
- List<String> fieldNames;
- List<OperatorNode<ExpressionOperator>> fieldValues;
- if (node.getRuleIndex() == yqlplusParser.RULE_field_def) {
- Field_defContext fieldDefContext = (Field_defContext)node;
- //TODO double check
- fieldNames = Lists.newArrayListWithExpectedSize(node.getChildCount());
- fieldValues = Lists.newArrayListWithExpectedSize(node.getChildCount());
- for (int i = 0; i < node.getChildCount(); i++) {
- fieldNames.add((String) convertExpr(node.getChild(i).getChild(0).getChild(0), scope).getArgument(1));
- fieldValues.add(convertExpr(node.getChild(i).getChild(0).getChild(1), scope));
- }
- } else {
- assert node.getChildCount() % 2 == 0;
- int numPairs = node.getChildCount() / 2;
- fieldNames = Lists.newArrayListWithExpectedSize(numPairs);
- fieldValues = Lists.newArrayListWithExpectedSize(numPairs);
- for (int i = 0; i < numPairs; i++) {
- fieldNames.add((String) convertExpr(node.getChild(i).getChild(0), scope).getArgument(1));
- fieldValues.add(convertExpr(node.getChild(numPairs + i), scope));
- }
- }
- return OperatorNode.create(ExpressionOperator.MAP, fieldNames, fieldValues);
- }
-
- /*
- * Converts node list
- *
- * a_name, b_name, c_name, a_value_1, b_value_1, c_value_1, a_value_2, b_value_2, c_value2, a_value_3, b_value_3, c_value_3
- *
- * into corresponding constant sequence:
- *
- * [ { a_name : a_value_1, b_name : b_value_1, c_name : c_value_1 }, ... ]
- *
- */
- private OperatorNode<SequenceOperator> readBatchValues(Field_names_specContext nameDefs, List<Field_values_group_specContext> valueGroups, Scope scope) {
- List<Field_defContext> nameContexts = nameDefs.field_def();
- List<String> fieldNames = Lists.newArrayList();
- for (Field_defContext nameContext:nameContexts) {
- fieldNames.add((String) convertExpr(nameContext.getChild(0), scope).getArgument(1));
- }
- List<OperatorNode> records = Lists.newArrayList();
- for (Field_values_group_specContext valueGorup:valueGroups) {
- List<ExpressionContext> expressionList = valueGorup.expression();
- List<OperatorNode<ExpressionOperator>> fieldValues = Lists.newArrayListWithExpectedSize(expressionList.size());
- for (ExpressionContext expressionContext:expressionList) {
- fieldValues.add(convertExpr(expressionContext, scope));
- }
- records.add(OperatorNode.create(ExpressionOperator.MAP, fieldNames, fieldValues));
- }
- // Return constant sequence of records with the given name/values
- return OperatorNode.create(SequenceOperator.EVALUATE, OperatorNode.create(ExpressionOperator.ARRAY, records));
- }
-
- /*
+ /**
* Scans the given node for READ_FIELD expressions.
*
* TODO: Search recursively and consider additional operators
@@ -1557,4 +1103,5 @@ final class ProgramParser {
}
return readFieldList;
}
+
}
diff --git a/container-search/src/test/java/com/yahoo/prelude/cluster/ClusterSearcherTestCase.java b/container-search/src/test/java/com/yahoo/prelude/cluster/ClusterSearcherTestCase.java
index 8b7a57c38e7..f4608f1c991 100644
--- a/container-search/src/test/java/com/yahoo/prelude/cluster/ClusterSearcherTestCase.java
+++ b/container-search/src/test/java/com/yahoo/prelude/cluster/ClusterSearcherTestCase.java
@@ -14,7 +14,6 @@ import com.yahoo.prelude.IndexFacts;
import com.yahoo.prelude.IndexModel;
import com.yahoo.prelude.SearchDefinition;
import com.yahoo.prelude.fastsearch.DocumentdbInfoConfig;
-import com.yahoo.prelude.fastsearch.FS4ResourcePool;
import com.yahoo.prelude.fastsearch.FastHit;
import com.yahoo.prelude.fastsearch.VespaBackEndSearcher;
import com.yahoo.prelude.fastsearch.test.MockMetric;
@@ -530,7 +529,7 @@ public class ClusterSearcherTestCase {
clusterConfig.build(),
documentDbConfig.build(),
dispatchers,
- new FS4ResourcePool(new QrConfig.Builder().build()),
+ new QrConfig.Builder().build(),
vipStatus);
}
diff --git a/container-search/src/test/java/com/yahoo/prelude/query/ItemsCommonStuffTestCase.java b/container-search/src/test/java/com/yahoo/prelude/query/ItemsCommonStuffTestCase.java
index 204ccff7fb3..02175425808 100644
--- a/container-search/src/test/java/com/yahoo/prelude/query/ItemsCommonStuffTestCase.java
+++ b/container-search/src/test/java/com/yahoo/prelude/query/ItemsCommonStuffTestCase.java
@@ -192,7 +192,7 @@ public class ItemsCommonStuffTestCase {
assertEquals("SAND", i.getName());
i = new WeakAndItem();
assertEquals(ItemType.WEAK_AND, i.getItemType());
- assertEquals("WAND", i.getName());
+ assertEquals("WEAKAND", i.getName());
}
@Test
diff --git a/container-search/src/test/java/com/yahoo/prelude/query/parser/test/ParseTestCase.java b/container-search/src/test/java/com/yahoo/prelude/query/parser/test/ParseTestCase.java
index f221cbb63b1..6afea895f3a 100644
--- a/container-search/src/test/java/com/yahoo/prelude/query/parser/test/ParseTestCase.java
+++ b/container-search/src/test/java/com/yahoo/prelude/query/parser/test/ParseTestCase.java
@@ -2480,17 +2480,17 @@ public class ParseTestCase {
@Test
public void testSimpleWandAdvanced() {
- tester.assertParsed("WAND(100) foo bar baz", "foo wand bar wand baz", Query.Type.ADVANCED);
+ tester.assertParsed("WEAKAND(100) foo bar baz", "foo wand bar wand baz", Query.Type.ADVANCED);
}
@Test
public void testSimpleWandAdvancedWithNonDefaultN() {
- tester.assertParsed("WAND(32) foo bar baz", "foo wand(32) bar wand(32) baz", Query.Type.ADVANCED);
+ tester.assertParsed("WEAKAND(32) foo bar baz", "foo weakand(32) bar weakand(32) baz", Query.Type.ADVANCED);
}
@Test
public void testSimpleWandAdvancedWithNonDefaultNAndWeights() {
- tester.assertParsed("WAND(32) foo!32 bar!64 baz", "foo!32 wand(32) bar!64 wand(32) baz", Query.Type.ADVANCED);
+ tester.assertParsed("WEAKAND(32) foo!32 bar!64 baz", "foo!32 weakand(32) bar!64 weakand(32) baz", Query.Type.ADVANCED);
}
@Test
diff --git a/container-search/src/test/java/com/yahoo/prelude/query/test/QueryCanonicalizerTestCase.java b/container-search/src/test/java/com/yahoo/prelude/query/test/QueryCanonicalizerTestCase.java
index d3b64100c1e..e7a2a4f3ef8 100644
--- a/container-search/src/test/java/com/yahoo/prelude/query/test/QueryCanonicalizerTestCase.java
+++ b/container-search/src/test/java/com/yahoo/prelude/query/test/QueryCanonicalizerTestCase.java
@@ -29,7 +29,7 @@ public class QueryCanonicalizerTestCase {
CompositeItem root = new WeakAndItem();
root.addItem(new WordItem("word"));
- assertCanonicalized("WAND(100) word", null, root);
+ assertCanonicalized("WEAKAND(100) word", null, root);
}
@Test
@@ -108,6 +108,47 @@ public class QueryCanonicalizerTestCase {
}
@Test
+ public void testMultilevelWeakAndCollapsing() {
+ CompositeItem root = new WeakAndItem();
+ CompositeItem l1 = new WeakAndItem();
+ CompositeItem l2 = new WeakAndItem();
+ CompositeItem l3 = new WeakAndItem();
+ CompositeItem l4 = new WeakAndItem();
+
+ root.addItem(l1);
+
+ l1.addItem(l2);
+ l1.addItem(new WordItem("l1"));
+
+ l2.addItem(l3);
+ l2.addItem(new WordItem("l2"));
+
+ l3.addItem(l4);
+ l3.addItem(new WordItem("l3"));
+
+ l4.addItem(new WordItem("l4"));
+
+ assertCanonicalized("WEAKAND(100) l4 l3 l2 l1", null, root);
+ }
+
+ @Test
+ public void testWeakAndCollapsingRequireSameNAndIndex() {
+ CompositeItem root = new WeakAndItem(10);
+ CompositeItem l1 = new WeakAndItem(100);
+ CompositeItem l2 = new WeakAndItem(100);
+ l2.setIndexName("other");
+
+ root.addItem(l1);
+
+ l1.addItem(l2);
+ l1.addItem(new WordItem("l1"));
+
+ l2.addItem(new WordItem("l2"));
+
+ assertCanonicalized("WEAKAND(10) (WEAKAND(100) (WEAKAND(100) l2) l1)", null, root);
+ }
+
+ @Test
public void testNullRoot() {
assertCanonicalized(null, "No query", new Query());
}
diff --git a/container-search/src/test/java/com/yahoo/prelude/query/test/SameElementItemTestCase.java b/container-search/src/test/java/com/yahoo/prelude/query/test/SameElementItemTestCase.java
index 1382c106ae3..bb3a775ccf2 100644
--- a/container-search/src/test/java/com/yahoo/prelude/query/test/SameElementItemTestCase.java
+++ b/container-search/src/test/java/com/yahoo/prelude/query/test/SameElementItemTestCase.java
@@ -5,10 +5,14 @@ import com.yahoo.prelude.query.AndItem;
import com.yahoo.prelude.query.IntItem;
import com.yahoo.prelude.query.Item;
import com.yahoo.prelude.query.SameElementItem;
+import com.yahoo.prelude.query.Substring;
import com.yahoo.prelude.query.TermItem;
+import com.yahoo.prelude.query.WordAlternativesItem;
import com.yahoo.prelude.query.WordItem;
import org.junit.Test;
+import java.util.Collections;
+import java.util.List;
import java.util.Optional;
import static org.junit.Assert.assertEquals;
@@ -79,7 +83,7 @@ public class SameElementItemTestCase {
}
@Test
- public void requireAllChildrenAreTermItems() {
+ public void requireNoChildrenAreWordAlternatives() {
try {
SameElementItem s = new SameElementItem("structa");
s.addItem(new AndItem());
@@ -91,6 +95,19 @@ public class SameElementItemTestCase {
}
}
+ @Test
+ public void requireAllChildrenAreTermItems() {
+ try {
+ SameElementItem s = new SameElementItem("structa");
+ s.addItem(new WordAlternativesItem("test", true, new Substring("origin"), List.of(new WordAlternativesItem.Alternative("a", 0.3))));
+ fail("Expected exception");
+ }
+ catch (IllegalArgumentException e) { // Success
+ assertEquals("Child item WORD_ALTERNATIVES test:[ a(0.3) ] should NOT be an instance of class com.yahoo.prelude.query.WordAlternativesItem but is class com.yahoo.prelude.query.WordAlternativesItem",
+ e.getMessage());
+ }
+ }
+
private void verifyExtractSingle(TermItem term) {
String subFieldName = term.getIndexName();
SameElementItem s = new SameElementItem("structa");
diff --git a/container-search/src/test/java/com/yahoo/prelude/querytransform/test/QueryRewriteTestCase.java b/container-search/src/test/java/com/yahoo/prelude/querytransform/test/QueryRewriteTestCase.java
index 36137abd9b8..6143682c028 100644
--- a/container-search/src/test/java/com/yahoo/prelude/querytransform/test/QueryRewriteTestCase.java
+++ b/container-search/src/test/java/com/yahoo/prelude/querytransform/test/QueryRewriteTestCase.java
@@ -89,8 +89,10 @@ public class QueryRewriteTestCase {
@Test
public void testRestrictRank() {
- assertRewritten("sddocname:per&filter=abc", "espen", "|abc");
+ assertRewritten("sddocname:per&filter=abc", "espen", "NULL");
assertRewritten("sddocname:per&filter=abc", "per", "RANK sddocname:per |abc");
+ assertRewritten("sddocname:per RANK bar", "per", "RANK sddocname:per bar");
+ assertRewritten("sddocname:per RANK bar", "espen", "NULL");
}
private static Query query(String queryString, String restrict) {
diff --git a/container-search/src/test/java/com/yahoo/search/dispatch/DispatcherTest.java b/container-search/src/test/java/com/yahoo/search/dispatch/DispatcherTest.java
index eaafb2d8b8a..943390cb10c 100644
--- a/container-search/src/test/java/com/yahoo/search/dispatch/DispatcherTest.java
+++ b/container-search/src/test/java/com/yahoo/search/dispatch/DispatcherTest.java
@@ -6,6 +6,7 @@ import com.yahoo.prelude.fastsearch.test.MockMetric;
import com.yahoo.search.Query;
import com.yahoo.search.Result;
import com.yahoo.search.cluster.ClusterMonitor;
+import com.yahoo.search.dispatch.searchcluster.Group;
import com.yahoo.search.dispatch.searchcluster.Node;
import com.yahoo.search.dispatch.searchcluster.PingFactory;
import com.yahoo.search.dispatch.searchcluster.Pinger;
@@ -166,7 +167,7 @@ public class DispatcherTest {
boolean nonEmpty = events[step].returnInvoker(nodes, acceptIncompleteCoverage);
step++;
if (nonEmpty) {
- return Optional.of(new MockInvoker(nodes.get(0).key()));
+ return Optional.of(new MockInvoker(nodes.get(0).key(), groupId));
} else {
return Optional.empty();
}
diff --git a/container-search/src/test/java/com/yahoo/search/dispatch/InterleavedSearchInvokerTest.java b/container-search/src/test/java/com/yahoo/search/dispatch/InterleavedSearchInvokerTest.java
index 8cab7884152..730aa0800e7 100644
--- a/container-search/src/test/java/com/yahoo/search/dispatch/InterleavedSearchInvokerTest.java
+++ b/container-search/src/test/java/com/yahoo/search/dispatch/InterleavedSearchInvokerTest.java
@@ -44,10 +44,11 @@ import static org.junit.Assert.fail;
* @author ollivir
*/
public class InterleavedSearchInvokerTest {
- private ManualClock clock = new ManualClock(Instant.now());
- private Query query = new TestQuery();
- private LinkedList<Event> expectedEvents = new LinkedList<>();
- private List<SearchInvoker> invokers = new ArrayList<>();
+
+ private final ManualClock clock = new ManualClock(Instant.now());
+ private final Query query = new TestQuery();
+ private final LinkedList<Event> expectedEvents = new LinkedList<>();
+ private final List<SearchInvoker> invokers = new ArrayList<>();
@Test
public void requireThatAdaptiveTimeoutsAreNotUsedWithFullCoverageRequirement() throws IOException {
diff --git a/container-search/src/test/java/com/yahoo/search/dispatch/LoadBalancerTest.java b/container-search/src/test/java/com/yahoo/search/dispatch/LoadBalancerTest.java
index 36b476e2936..d0ba396ef94 100644
--- a/container-search/src/test/java/com/yahoo/search/dispatch/LoadBalancerTest.java
+++ b/container-search/src/test/java/com/yahoo/search/dispatch/LoadBalancerTest.java
@@ -26,6 +26,7 @@ import static org.junit.Assert.assertThat;
* @author ollivir
*/
public class LoadBalancerTest {
+
@Test
public void requireThatLoadBalancerServesSingleNodeSetups() {
Node n1 = new Node(0, "test-node1", 0);
diff --git a/container-search/src/test/java/com/yahoo/search/dispatch/MockInvoker.java b/container-search/src/test/java/com/yahoo/search/dispatch/MockInvoker.java
index 459dcc83ab0..53d1a2457d0 100644
--- a/container-search/src/test/java/com/yahoo/search/dispatch/MockInvoker.java
+++ b/container-search/src/test/java/com/yahoo/search/dispatch/MockInvoker.java
@@ -12,20 +12,32 @@ import com.yahoo.search.searchchain.Execution;
import java.io.IOException;
import java.util.List;
import java.util.Optional;
+import java.util.OptionalInt;
class MockInvoker extends SearchInvoker {
+
private final Coverage coverage;
+ private final OptionalInt groupId;
private Query query;
private List<Hit> hits;
int hitsRequested;
- protected MockInvoker(int key, Coverage coverage) {
+ protected MockInvoker(int key, Coverage coverage, OptionalInt groupId) {
super(Optional.of(new Node(key, "?", 0)));
this.coverage = coverage;
+ this.groupId = groupId;
+ }
+
+ protected MockInvoker(int key, OptionalInt groupId) {
+ this(key, null, groupId);
+ }
+
+ protected MockInvoker(int key, Coverage coverage) {
+ this(key, coverage, OptionalInt.empty());
}
protected MockInvoker(int key) {
- this(key, null);
+ this(key, null, OptionalInt.empty());
}
MockInvoker setHits(List<Hit> hits) {
@@ -33,6 +45,9 @@ class MockInvoker extends SearchInvoker {
return this;
}
+ /** Returns the group to be invoked, if known */
+ public OptionalInt groupId() { return groupId; }
+
@Override
protected Object sendSearchRequest(Query query, Object context) throws IOException {
this.query = query;
@@ -62,4 +77,11 @@ class MockInvoker extends SearchInvoker {
@Override
protected void release() {
}
+
+ @Override
+ public String toString() {
+ return "invoker with key " + distributionKey() +
+ (groupId().isPresent() ? " of group " + groupId().getAsInt() : "");
+ }
+
} \ No newline at end of file
diff --git a/container-search/src/test/java/com/yahoo/search/dispatch/MockSearchCluster.java b/container-search/src/test/java/com/yahoo/search/dispatch/MockSearchCluster.java
index fe43658ee75..9d96b2302d7 100644
--- a/container-search/src/test/java/com/yahoo/search/dispatch/MockSearchCluster.java
+++ b/container-search/src/test/java/com/yahoo/search/dispatch/MockSearchCluster.java
@@ -114,6 +114,7 @@ public class MockSearchCluster extends SearchCluster {
public static DispatchConfig createDispatchConfig(double minSearchCoverage, Node... nodes) {
return createDispatchConfig(minSearchCoverage, Arrays.asList(nodes));
}
+
public static DispatchConfig createDispatchConfig(double minSearchCoverage, List<Node> nodes) {
DispatchConfig.Builder builder = new DispatchConfig.Builder();
builder.minActivedocsPercentage(88.0);
diff --git a/container-search/src/test/java/com/yahoo/search/dispatch/searchcluster/SearchClusterCoverageTest.java b/container-search/src/test/java/com/yahoo/search/dispatch/searchcluster/SearchClusterCoverageTest.java
new file mode 100644
index 00000000000..6338107d4b6
--- /dev/null
+++ b/container-search/src/test/java/com/yahoo/search/dispatch/searchcluster/SearchClusterCoverageTest.java
@@ -0,0 +1,89 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.search.dispatch.searchcluster;
+
+import org.junit.Test;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * @author bratseth
+ */
+public class SearchClusterCoverageTest {
+
+ @Test
+ public void two_groups_equal_docs() {
+ var tester = new SearchClusterTester(2, 3);
+
+ tester.setDocsPerNode(100, 0);
+ tester.setDocsPerNode(100, 1);
+ tester.pingIterationCompleted();
+ assertTrue(tester.group(0).hasSufficientCoverage());
+ assertTrue(tester.group(1).hasSufficientCoverage());
+ }
+
+ @Test
+ public void two_groups_one_missing_docs() {
+ var tester = new SearchClusterTester(2, 3);
+
+ tester.setDocsPerNode(100, 0);
+ tester.setDocsPerNode( 70, 1);
+ tester.pingIterationCompleted();
+ assertTrue(tester.group(0).hasSufficientCoverage());
+ assertFalse(tester.group(1).hasSufficientCoverage());
+ }
+
+ @Test
+ public void three_groups_one_missing_docs() {
+ var tester = new SearchClusterTester(3, 3);
+
+ tester.setDocsPerNode(100, 0);
+ tester.setDocsPerNode( 87, 1); // min is set to 88 in MockSearchCluster
+ tester.setDocsPerNode(100, 2);
+ tester.pingIterationCompleted();
+ assertTrue(tester.group(0).hasSufficientCoverage());
+ assertFalse(tester.group(1).hasSufficientCoverage());
+ assertTrue(tester.group(2).hasSufficientCoverage());
+ }
+
+ @Test
+ public void three_groups_one_missing_docs_but_too_few() {
+ var tester = new SearchClusterTester(3, 3);
+
+ tester.setDocsPerNode(100, 0);
+ tester.setDocsPerNode( 89, 1); // min is set to 88 in MockSearchCluster
+ tester.setDocsPerNode(100, 2);
+ tester.pingIterationCompleted();
+ assertTrue(tester.group(0).hasSufficientCoverage());
+ assertTrue(tester.group(1).hasSufficientCoverage());
+ assertTrue(tester.group(2).hasSufficientCoverage());
+ }
+
+ @Test
+ public void three_groups_one_has_too_many_docs() {
+ var tester = new SearchClusterTester(3, 3);
+
+ tester.setDocsPerNode(100, 0);
+ tester.setDocsPerNode(150, 1);
+ tester.setDocsPerNode(100, 2);
+ tester.pingIterationCompleted();
+ assertTrue(tester.group(0).hasSufficientCoverage());
+ assertTrue(tester.group(1).hasSufficientCoverage());
+ assertTrue(tester.group(2).hasSufficientCoverage());
+ }
+
+ @Test
+ public void three_groups_one_has_a_node_down() {
+ var tester = new SearchClusterTester(3, 3);
+
+ tester.setDocsPerNode(100, 0);
+ tester.setDocsPerNode(150, 1);
+ tester.setDocsPerNode(100, 2);
+ tester.setWorking(1, 1, false);
+ tester.pingIterationCompleted();
+ assertTrue(tester.group(0).hasSufficientCoverage());
+ assertFalse(tester.group(1).hasSufficientCoverage());
+ assertTrue(tester.group(2).hasSufficientCoverage());
+ }
+
+}
diff --git a/container-search/src/test/java/com/yahoo/search/dispatch/searchcluster/SearchClusterTest.java b/container-search/src/test/java/com/yahoo/search/dispatch/searchcluster/SearchClusterTest.java
index c6fd48836fe..48134094faf 100644
--- a/container-search/src/test/java/com/yahoo/search/dispatch/searchcluster/SearchClusterTest.java
+++ b/container-search/src/test/java/com/yahoo/search/dispatch/searchcluster/SearchClusterTest.java
@@ -28,7 +28,7 @@ import static org.junit.Assert.assertTrue;
*/
public class SearchClusterTest {
- static class State implements AutoCloseable{
+ static class State implements AutoCloseable {
final String clusterId;
final int nodesPerGroup;
diff --git a/container-search/src/test/java/com/yahoo/search/dispatch/searchcluster/SearchClusterTester.java b/container-search/src/test/java/com/yahoo/search/dispatch/searchcluster/SearchClusterTester.java
new file mode 100644
index 00000000000..5e7ecb854ff
--- /dev/null
+++ b/container-search/src/test/java/com/yahoo/search/dispatch/searchcluster/SearchClusterTester.java
@@ -0,0 +1,33 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.search.dispatch.searchcluster;
+
+import com.yahoo.search.dispatch.MockSearchCluster;
+
+public class SearchClusterTester {
+
+ private final SearchCluster cluster;
+
+ public SearchClusterTester(int groups, int nodesPerGroup) {
+ cluster = new MockSearchCluster("1", groups, nodesPerGroup);
+ }
+
+ public void pingIterationCompleted() {
+ cluster.pingIterationCompleted();
+ }
+
+ public Group group(int id) {
+ return cluster.group(id).get();
+ }
+
+ public void setWorking(int group, int node, boolean working) {
+ cluster.group(group).get().nodes().get(node).setWorking(working);
+ }
+
+ public void setDocsPerNode(int docs, int groupId) {
+ for (Node node : cluster.groups().get(groupId).nodes()) {
+ node.setWorking(true);
+ node.setActiveDocuments(docs);
+ }
+ }
+
+}
diff --git a/container-search/src/test/java/com/yahoo/search/searchchain/test/AsyncExecutionOfOneChainTestCase.java b/container-search/src/test/java/com/yahoo/search/searchchain/test/AsyncExecutionOfOneChainTestCase.java
index 182ec7568f3..28ad202c4f5 100644
--- a/container-search/src/test/java/com/yahoo/search/searchchain/test/AsyncExecutionOfOneChainTestCase.java
+++ b/container-search/src/test/java/com/yahoo/search/searchchain/test/AsyncExecutionOfOneChainTestCase.java
@@ -48,20 +48,20 @@ public class AsyncExecutionOfOneChainTestCase {
private class ParallelExecutor extends Searcher {
/** The number of parallel executions */
- private static final int parallelism=2;
+ private static final int parallelism = 2;
@Override
public Result search(Query query, Execution execution) {
- List<FutureResult> futureResults=new ArrayList<>(parallelism);
- for (int i=0; i<parallelism; i++)
+ List<FutureResult> futureResults = new ArrayList<>(parallelism);
+ for (int i = 0; i < parallelism; i++)
futureResults.add(new AsyncExecution(execution).search(query.clone()));
- Result mainResult=execution.search(query);
+ Result mainResult = execution.search(query);
// Add hits from other threads
AsyncExecution.waitForAll(futureResults,query.getTimeLeft());
for (FutureResult futureResult : futureResults) {
- Result result=futureResult.get();
+ Result result = futureResult.get();
mainResult.mergeWith(result);
mainResult.hits().addAll(result.hits().asList());
}
@@ -72,7 +72,7 @@ public class AsyncExecutionOfOneChainTestCase {
private static class RegularProvider extends Searcher {
- private AtomicInteger counter=new AtomicInteger();
+ private final AtomicInteger counter = new AtomicInteger();
@Override
public Result search(Query query,Execution execution) {
diff --git a/container-search/src/test/java/com/yahoo/search/yql/YqlParserTestCase.java b/container-search/src/test/java/com/yahoo/search/yql/YqlParserTestCase.java
index 24c8e040051..dd2d27eb66c 100644
--- a/container-search/src/test/java/com/yahoo/search/yql/YqlParserTestCase.java
+++ b/container-search/src/test/java/com/yahoo/search/yql/YqlParserTestCase.java
@@ -631,14 +631,14 @@ public class YqlParserTestCase {
@SuppressWarnings("deprecation")
public void testWeakAnd() {
assertParse("select foo from bar where weakAnd(a contains \"A\", b contains \"B\");",
- "WAND(100) a:A b:B");
+ "WEAKAND(100) a:A b:B");
assertParse("select foo from bar where [{\"targetHits\": 37}]weakAnd(a contains \"A\", " +
"b contains \"B\");",
- "WAND(37) a:A b:B");
+ "WEAKAND(37) a:A b:B");
QueryTree tree = parse("select foo from bar where [{\"scoreThreshold\": 41}]weakAnd(a " +
"contains \"A\", b contains \"B\");");
- assertEquals("WAND(100) a:A b:B", tree.toString());
+ assertEquals("WEAKAND(100) a:A b:B", tree.toString());
assertEquals(WeakAndItem.class, tree.getRoot().getClass());
assertEquals(41, ((WeakAndItem)tree.getRoot()).getScoreThreshold());
}
diff --git a/container-search/src/test/java/com/yahoo/select/SelectTestCase.java b/container-search/src/test/java/com/yahoo/select/SelectTestCase.java
index 7549c67d0ae..d1e46a6a8c2 100644
--- a/container-search/src/test/java/com/yahoo/select/SelectTestCase.java
+++ b/container-search/src/test/java/com/yahoo/select/SelectTestCase.java
@@ -555,12 +555,12 @@ public class SelectTestCase {
@SuppressWarnings("deprecation")
public void testWeakAnd() {
assertParse("{ \"weakAnd\": [{ \"contains\": [\"a\", \"A\"] }, { \"contains\": [\"b\", \"B\"] } ] }",
- "WAND(100) a:A b:B");
+ "WEAKAND(100) a:A b:B");
assertParse("{ \"weakAnd\": { \"children\" : [{ \"contains\": [\"a\", \"A\"] }, { \"contains\": [\"b\", \"B\"] } ], \"attributes\" : {\"targetHits\": 37} }}",
- "WAND(37) a:A b:B");
+ "WEAKAND(37) a:A b:B");
QueryTree tree = parseWhere("{ \"weakAnd\": { \"children\" : [{ \"contains\": [\"a\", \"A\"] }, { \"contains\": [\"b\", \"B\"] } ], \"attributes\" : {\"scoreThreshold\": 41}}}");
- assertEquals("WAND(100) a:A b:B", tree.toString());
+ assertEquals("WEAKAND(100) a:A b:B", tree.toString());
assertEquals(WeakAndItem.class, tree.getRoot().getClass());
assertEquals(41, ((WeakAndItem)tree.getRoot()).getScoreThreshold());
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/ClusterMetrics.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/ClusterMetrics.java
index cc10041992c..cb29d5854a0 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/ClusterMetrics.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/ClusterMetrics.java
@@ -1,7 +1,6 @@
// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.controller.api.application.v4.model;
-import java.util.HashMap;
import java.util.Map;
import java.util.Optional;
@@ -16,15 +15,16 @@ public class ClusterMetrics {
public static final String DOCUMENT_COUNT = "documentCount";
public static final String FEED_LATENCY = "feedLatency";
public static final String QUERY_LATENCY = "queryLatency";
+ public static final String FEEDING_BLOCKED = "feedingBlocked";
private final String clusterId;
private final String clusterType;
private final Map<String, Double> metrics;
- public ClusterMetrics(String clusterId, String clusterType) {
+ public ClusterMetrics(String clusterId, String clusterType, Map<String, Double> metrics) {
this.clusterId = clusterId;
this.clusterType = clusterType;
- this.metrics = new HashMap<>();
+ this.metrics = Map.copyOf(metrics);
}
public String getClusterId() {
@@ -55,9 +55,8 @@ public class ClusterMetrics {
return Optional.ofNullable(metrics.get(QUERY_LATENCY));
}
- public ClusterMetrics addMetric(String name, double value) {
- metrics.put(name, value);
- return this;
+ public Optional<Double> feedingBlocked() {
+ return Optional.ofNullable(metrics.get(FEEDING_BLOCKED));
}
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/Cluster.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/Cluster.java
index d356f5eb89f..07de259be2f 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/Cluster.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/Cluster.java
@@ -107,16 +107,19 @@ public class Cluster {
private final ClusterResources from, to;
private final Instant at;
+ private final Optional<Instant> completion;
- public ScalingEvent(ClusterResources from, ClusterResources to, Instant at) {
+ public ScalingEvent(ClusterResources from, ClusterResources to, Instant at, Optional<Instant> completion) {
this.from = from;
this.to = to;
this.at = at;
+ this.completion = completion;
}
public ClusterResources from() { return from; }
public ClusterResources to() { return to; }
public Instant at() { return at; }
+ public Optional<Instant> completion() { return completion; }
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ConfigServer.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ConfigServer.java
index ed545dc35d1..81c4d7be483 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ConfigServer.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ConfigServer.java
@@ -39,7 +39,7 @@ public interface ConfigServer {
void reindex(DeploymentId deployment, List<String> clusterNames, List<String> documentTypes, boolean indexedOnly);
- Optional<ApplicationReindexing> getReindexing(DeploymentId deployment);
+ ApplicationReindexing getReindexing(DeploymentId deployment);
void disableReindexing(DeploymentId deployment);
@@ -47,7 +47,7 @@ public interface ConfigServer {
void restart(DeploymentId deployment, RestartFilter restartFilter);
- void deactivate(DeploymentId deployment) throws NotFoundException;
+ void deactivate(DeploymentId deployment);
boolean isSuspended(DeploymentId deployment);
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ConfigServerException.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ConfigServerException.java
index 4240b0d9fa6..d651eda7139 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ConfigServerException.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ConfigServerException.java
@@ -1,38 +1,32 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.controller.api.integration.configserver;
-import java.net.URI;
-import java.util.Objects;
+import com.yahoo.slime.Inspector;
+import com.yahoo.slime.SlimeUtils;
+import org.apache.hc.core5.http.ClassicHttpRequest;
+
+import java.util.stream.Stream;
/**
- * @author Tony Vaagenes
+ * An exception due to server error, a bad request, or similar.
+ *
+ * @author jonmv
*/
public class ConfigServerException extends RuntimeException {
- private final URI serverUri;
- private final ErrorCode errorCode;
- private final String serverMessage;
+ private final ErrorCode code;
+ private final String message;
- public ConfigServerException(URI serverUri, String context, String serverMessage, ErrorCode errorCode, Throwable cause) {
- super(context + ": " + serverMessage, cause);
- this.serverUri = Objects.requireNonNull(serverUri);
- this.errorCode = Objects.requireNonNull(errorCode);
- this.serverMessage = Objects.requireNonNull(serverMessage);
+ public ConfigServerException(ErrorCode code, String message, String context) {
+ super(context + ": " + message);
+ this.code = code;
+ this.message = message;
}
- public ErrorCode getErrorCode() {
- return errorCode;
- }
+ public ErrorCode code() { return code; }
- public URI getServerUri() {
- return serverUri;
- }
+ public String message() { return message; }
- public String getServerMessage() {
- return serverMessage;
- }
-
- // TODO: Copied from Vespa. Expose these in Vespa and use them here
public enum ErrorCode {
APPLICATION_LOCK_FAILURE,
BAD_REQUEST,
@@ -46,7 +40,18 @@ public class ConfigServerException extends RuntimeException {
UNKNOWN_VESPA_VERSION,
PARENT_HOST_NOT_READY,
CERTIFICATE_NOT_READY,
- LOAD_BALANCER_NOT_READY
+ LOAD_BALANCER_NOT_READY,
+ INCOMPLETE_RESPONSE
+ }
+
+ public static ConfigServerException readException(byte[] body, String context) {
+ Inspector root = SlimeUtils.jsonToSlime(body).get();
+ String codeName = root.field("error-code").asString();
+ ErrorCode code = Stream.of(ErrorCode.values())
+ .filter(value -> value.name().equals(codeName))
+ .findAny().orElse(ErrorCode.INCOMPLETE_RESPONSE);
+ String message = root.field("message").valid() ? root.field("message").asString() : "(no message)";
+ return new ConfigServerException(code, message, context);
}
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/LoadBalancer.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/LoadBalancer.java
index 05bdf3c3412..d2f19f4df9f 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/LoadBalancer.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/LoadBalancer.java
@@ -18,11 +18,11 @@ public class LoadBalancer {
private final String id;
private final ApplicationId application;
private final ClusterSpec.Id cluster;
- private final HostName hostname;
+ private final Optional<HostName> hostname;
private final State state;
private final Optional<String> dnsZone;
- public LoadBalancer(String id, ApplicationId application, ClusterSpec.Id cluster, HostName hostname, State state,
+ public LoadBalancer(String id, ApplicationId application, ClusterSpec.Id cluster, Optional<HostName> hostname, State state,
Optional<String> dnsZone) {
this.id = Objects.requireNonNull(id, "id must be non-null");
this.application = Objects.requireNonNull(application, "application must be non-null");
@@ -44,7 +44,7 @@ public class LoadBalancer {
return cluster;
}
- public HostName hostname() {
+ public Optional<HostName> hostname() {
return hostname;
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/Log.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/Log.java
index ba5d740d0e1..29c3253c9c0 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/Log.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/Log.java
@@ -11,5 +11,6 @@ public class Log {
public long time;
public String level;
public String message;
+ public boolean applicationPackage;
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/Node.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/Node.java
index d1078b5e30c..0f9e12d8cf2 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/Node.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/Node.java
@@ -64,6 +64,7 @@ public class Node {
private final Set<String> additionalIpAddresses;
private final String openStackId;
private final Optional<String> switchHostname;
+ private final Optional<String> modelName;
public Node(HostName hostname, Optional<HostName> parentHostname, State state, NodeType type, NodeResources resources, Optional<ApplicationId> owner,
Version currentVersion, Version wantedVersion, Version currentOsVersion, Version wantedOsVersion,
@@ -72,7 +73,7 @@ public class Node {
int cost, String flavor, String clusterId, ClusterType clusterType, boolean retired, boolean wantToRetire, boolean wantToDeprovision,
boolean wantToRebuild, Optional<TenantName> reservedTo, Optional<ApplicationId> exclusiveTo,
DockerImage wantedDockerImage, DockerImage currentDockerImage, Map<String, JsonNode> reports, List<NodeHistory> history,
- Set<String> additionalIpAddresses, String openStackId, Optional<String> switchHostname) {
+ Set<String> additionalIpAddresses, String openStackId, Optional<String> switchHostname, Optional<String> modelName) {
this.hostname = hostname;
this.parentHostname = parentHostname;
this.state = state;
@@ -108,6 +109,7 @@ public class Node {
this.openStackId = openStackId;
this.additionalIpAddresses = additionalIpAddresses;
this.switchHostname = switchHostname;
+ this.modelName = modelName;
}
public HostName hostname() {
@@ -244,6 +246,8 @@ public class Node {
return switchHostname;
}
+ public Optional<String> modelName() { return modelName; }
+
@Override
public boolean equals(Object o) {
if (this == o) return true;
@@ -326,6 +330,7 @@ public class Node {
private Set<String> additionalIpAddresses = new HashSet<>();
private String openStackId;
private Optional<String> switchHostname = Optional.empty();
+ private Optional<String> modelName = Optional.empty();
public Builder() { }
@@ -365,6 +370,7 @@ public class Node {
this.additionalIpAddresses = node.additionalIpAddresses;
this.openStackId = node.openStackId;
this.switchHostname = node.switchHostname;
+ this.modelName = node.modelName;
}
public Builder hostname(HostName hostname) {
@@ -537,12 +543,17 @@ public class Node {
return this;
}
+ public Builder modelName(String modelName) {
+ this.modelName = Optional.ofNullable(modelName);
+ return this;
+ }
+
public Node build() {
return new Node(hostname, parentHostname, state, type, resources, owner, currentVersion, wantedVersion,
currentOsVersion, wantedOsVersion, currentFirmwareCheck, wantedFirmwareCheck, serviceState,
suspendedSince, restartGeneration, wantedRestartGeneration, rebootGeneration, wantedRebootGeneration,
cost, flavor, clusterId, clusterType, retired, wantToRetire, wantToDeprovision, wantToRebuild, reservedTo, exclusiveTo,
- wantedDockerImage, currentDockerImage, reports, history, additionalIpAddresses, openStackId, switchHostname);
+ wantedDockerImage, currentDockerImage, reports, history, additionalIpAddresses, openStackId, switchHostname, modelName);
}
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/NodeRepository.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/NodeRepository.java
index 72592e16bfd..ac4ff0a80a0 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/NodeRepository.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/NodeRepository.java
@@ -36,36 +36,21 @@ public interface NodeRepository {
void deleteNode(ZoneId zone, String hostname);
- void setState(ZoneId zone, NodeState nodeState, String nodename);
+ void setState(ZoneId zone, NodeState nodeState, String hostname);
NodeRepositoryNode getNode(ZoneId zone, String hostname);
+ // TODO: Migrate any callers to list() and remove this method
NodeList listNodes(ZoneId zone);
- NodeList listNodes(ZoneId zone, ApplicationId application);
-
- NodeList listNodes(ZoneId zone, List<HostName> hostnames);
-
/** List all nodes in given zone */
- default List<Node> list(ZoneId zone) {
- return listNodes(zone).nodes().stream()
- .map(NodeRepository::toNode)
- .collect(Collectors.toUnmodifiableList());
- }
+ List<Node> list(ZoneId zone, boolean includeDeprovisioned);
/** List all nodes in zone having given hostnames */
- default List<Node> list(ZoneId zone, List<HostName> hostnames) {
- return listNodes(zone, hostnames).nodes().stream()
- .map(NodeRepository::toNode)
- .collect(Collectors.toUnmodifiableList());
- }
+ List<Node> list(ZoneId zone, List<HostName> hostnames);
/** List all nodes in zone owned by given application */
- default List<Node> list(ZoneId zone, ApplicationId application) {
- return listNodes(zone, application).nodes().stream()
- .map(NodeRepository::toNode)
- .collect(Collectors.toUnmodifiableList());
- }
+ List<Node> list(ZoneId zone, ApplicationId application);
/** List all nodes in states, in zone owned by given application */
default List<Node> list(ZoneId zone, ApplicationId application, Set<Node.State> states) {
@@ -111,7 +96,7 @@ public interface NodeRepository {
/** Checks whether the zone has the spare capacity to remove the given hosts */
boolean isReplaceable(ZoneId zoneId, List<HostName> hostNames);
- private static Node toNode(NodeRepositoryNode node) {
+ static Node toNode(NodeRepositoryNode node) {
var application = Optional.ofNullable(node.getOwner())
.map(owner -> ApplicationId.from(owner.getTenant(), owner.getApplication(),
owner.getInstance()));
@@ -156,7 +141,8 @@ public interface NodeRepository {
node.getHistory(),
node.getAdditionalIpAddresses(),
node.getOpenStackId(),
- Optional.ofNullable(node.getSwitchHostname()));
+ Optional.ofNullable(node.getSwitchHostname()),
+ Optional.ofNullable(node.getModelName()));
}
private static String clusterIdOf(NodeMembership nodeMembership) {
@@ -200,6 +186,7 @@ public interface NodeRepository {
case failed: return Node.State.failed;
case parked: return Node.State.parked;
case breakfixed: return Node.State.breakfixed;
+ case deprovisioned: return Node.State.deprovisioned;
}
return Node.State.unknown;
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/ProvisionResource.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/ProvisionResource.java
index 26243a28ee0..5714e3d5f6d 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/ProvisionResource.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/ProvisionResource.java
@@ -2,7 +2,6 @@
package com.yahoo.vespa.hosted.controller.api.integration.noderepository;
import com.yahoo.config.provision.TenantName;
-import com.yahoo.vespa.hosted.controller.api.integration.configserver.NodeRepoStats;
import javax.ws.rs.Consumes;
import javax.ws.rs.DELETE;
@@ -44,7 +43,7 @@ public interface ProvisionResource {
@GET
@Path("/node/")
- NodeList listNodes(@QueryParam("recursive") boolean recursive);
+ NodeList listNodes(@QueryParam("recursive") boolean recursive, @QueryParam("includeDeprovisioned") boolean includeDeprovisioned);
@GET
@Path("/node/")
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/ScalingEventData.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/ScalingEventData.java
index b33a7436522..1ac24695afe 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/ScalingEventData.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/ScalingEventData.java
@@ -7,6 +7,7 @@ import com.fasterxml.jackson.annotation.JsonProperty;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.Cluster;
import java.time.Instant;
+import java.util.Optional;
/**
* @author bratseth
@@ -24,8 +25,17 @@ public class ScalingEventData {
@JsonProperty("at")
public Long at;
+ @JsonProperty("completion")
+ public Long completion;
+
public Cluster.ScalingEvent toScalingEvent() {
- return new Cluster.ScalingEvent(from.toClusterResources(), to.toClusterResources(), Instant.ofEpochMilli(at));
+ return new Cluster.ScalingEvent(from.toClusterResources(), to.toClusterResources(), Instant.ofEpochMilli(at),
+ toOptionalInstant(completion));
+ }
+
+ private Optional<Instant> toOptionalInstant(Long epochMillis) {
+ if (epochMillis == null) return Optional.empty();
+ return Optional.of(Instant.ofEpochMilli(epochMillis));
}
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/vcmr/ChangeRequest.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/vcmr/ChangeRequest.java
index 31665c8ae0a..11adc1f7bb6 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/vcmr/ChangeRequest.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/vcmr/ChangeRequest.java
@@ -16,7 +16,7 @@ public class ChangeRequest {
private final Approval approval;
private final Impact impact;
- private ChangeRequest(String id, ChangeRequestSource changeRequestSource, List<String> impactedSwitches, List<String> impactedHosts, Approval approval, Impact impact) {
+ public ChangeRequest(String id, ChangeRequestSource changeRequestSource, List<String> impactedSwitches, List<String> impactedHosts, Approval approval, Impact impact) {
this.id = Objects.requireNonNull(id);
this.changeRequestSource = Objects.requireNonNull(changeRequestSource);
this.impactedSwitches = Objects.requireNonNull(impactedSwitches);
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/vcmr/ChangeRequestClient.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/vcmr/ChangeRequestClient.java
index e8ff768927f..f8f54567bea 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/vcmr/ChangeRequestClient.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/vcmr/ChangeRequestClient.java
@@ -8,7 +8,8 @@ import java.util.List;
*/
public interface ChangeRequestClient {
- List<ChangeRequest> getUpcomingChangeRequests();
+ /** Get upcoming change requests and updated status of previously stored requests */
+ List<ChangeRequest> getChangeRequests(List<ChangeRequest> changeRequests);
void approveChangeRequests(List<ChangeRequest> changeRequests);
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/vcmr/ChangeRequestSource.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/vcmr/ChangeRequestSource.java
index 63f6c256766..6cf0f6e0ebd 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/vcmr/ChangeRequestSource.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/vcmr/ChangeRequestSource.java
@@ -2,8 +2,11 @@
package com.yahoo.vespa.hosted.controller.api.integration.vcmr;
import java.time.ZonedDateTime;
+import java.util.List;
import java.util.Objects;
+import static com.yahoo.vespa.hosted.controller.api.integration.vcmr.ChangeRequestSource.Status.*;
+
/**
* @author olaa
*/
@@ -17,7 +20,7 @@ public class ChangeRequestSource {
private final ZonedDateTime plannedEndTime;
- private ChangeRequestSource(String system, String id, String url, Status status, ZonedDateTime plannedStartTime, ZonedDateTime plannedEndTime) {
+ public ChangeRequestSource(String system, String id, String url, Status status, ZonedDateTime plannedStartTime, ZonedDateTime plannedEndTime) {
this.system = Objects.requireNonNull(system);
this.id = Objects.requireNonNull(id);
this.url = Objects.requireNonNull(url);
@@ -80,6 +83,10 @@ public class ChangeRequestSource {
return Objects.hash(system, id, status, url, plannedStartTime, plannedEndTime);
}
+ public boolean isClosed() {
+ return List.of(CLOSED, CANCELED, COMPLETE).contains(status);
+ }
+
public enum Status {
DRAFT,
WAITING_FOR_APPROVAL,
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/vcmr/HostAction.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/vcmr/HostAction.java
new file mode 100644
index 00000000000..5a018475e9f
--- /dev/null
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/vcmr/HostAction.java
@@ -0,0 +1,72 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.api.integration.vcmr;
+
+import java.time.Instant;
+import java.util.Objects;
+
+/**
+ * @author olaa
+ *
+ * Contains planned/current action for a host impacted by a change request
+ */
+public class HostAction {
+
+ private final String hostname;
+ private final State state;
+ private final Instant lastUpdated;
+
+ public HostAction(String hostname, State state, Instant lastUpdated) {
+ this.hostname = hostname;
+ this.state = state;
+ this.lastUpdated = lastUpdated;
+ }
+
+ public String getHostname() {
+ return hostname;
+ }
+
+ public State getState() {
+ return state;
+ }
+
+ public Instant getLastUpdated() {
+ return lastUpdated;
+ }
+
+ public HostAction withState(State state) {
+ return new HostAction(hostname, state, this.state == state ? lastUpdated : Instant.now());
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ HostAction that = (HostAction) o;
+ return Objects.equals(hostname, that.hostname) &&
+ state == that.state &&
+ Objects.equals(lastUpdated, that.lastUpdated);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(hostname, state, lastUpdated);
+ }
+
+ @Override
+ public String toString() {
+ return "HostAction{" +
+ "hostname='" + hostname + '\'' +
+ ", state=" + state +
+ ", lastUpdated=" + lastUpdated +
+ '}';
+ }
+
+ public enum State {
+ REQUIRES_OPERATOR_ACTION,
+ PENDING_RETIREMENT,
+ NONE,
+ RETIRING,
+ RETIRED,
+ COMPLETE
+ }
+}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/vcmr/MockChangeRequestClient.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/vcmr/MockChangeRequestClient.java
index e85c0afcb0e..10175f36991 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/vcmr/MockChangeRequestClient.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/vcmr/MockChangeRequestClient.java
@@ -13,7 +13,7 @@ public class MockChangeRequestClient implements ChangeRequestClient {
private List<ChangeRequest> approvedChangeRequests = new ArrayList<>();
@Override
- public List<ChangeRequest> getUpcomingChangeRequests() {
+ public List<ChangeRequest> getChangeRequests(List<ChangeRequest> changeRequests) {
return upcomingChangeRequests;
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/vcmr/VespaChangeRequest.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/vcmr/VespaChangeRequest.java
new file mode 100644
index 00000000000..a8be4a77c71
--- /dev/null
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/vcmr/VespaChangeRequest.java
@@ -0,0 +1,98 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.api.integration.vcmr;
+
+import com.yahoo.config.provision.zone.ZoneId;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Objects;
+
+/**
+ * @author olaa
+ */
+public class VespaChangeRequest extends ChangeRequest {
+
+ private final Status status;
+ private final ZoneId zoneId;
+ // TODO: Create applicationActionPlan
+ private final List<HostAction> hostActionPlan;
+
+ public VespaChangeRequest(String id, ChangeRequestSource changeRequestSource, List<String> impactedSwitches, List<String> impactedHosts, Approval approval, Impact impact, Status status, List<HostAction> hostActionPlan, ZoneId zoneId) {
+ super(id, changeRequestSource, impactedSwitches, impactedHosts, approval, impact);
+ this.status = status;
+ this.hostActionPlan = hostActionPlan;
+ this.zoneId = zoneId;
+ }
+ public VespaChangeRequest(ChangeRequest changeRequest, ZoneId zoneId) {
+ this(changeRequest.getId(), changeRequest.getChangeRequestSource(), changeRequest.getImpactedSwitches(),
+ changeRequest.getImpactedHosts(), changeRequest.getApproval(), changeRequest.getImpact(), Status.PENDING_ASSESSMENT, List.of(), zoneId);
+ }
+
+ public Status getStatus() {
+ return status;
+ }
+
+ public List<HostAction> getHostActionPlan() {
+ return hostActionPlan;
+ }
+
+ public ZoneId getZoneId() {
+ return zoneId;
+ }
+
+ public VespaChangeRequest withStatus(Status status) {
+ return new VespaChangeRequest(getId(), getChangeRequestSource(), getImpactedSwitches(), getImpactedHosts(), getApproval(), getImpact(), status, hostActionPlan, zoneId);
+ }
+
+ public VespaChangeRequest withSource(ChangeRequestSource source) {
+ return new VespaChangeRequest(getId(), source, getImpactedSwitches(), getImpactedHosts(), getApproval(), getImpact(), status, hostActionPlan, zoneId);
+ }
+
+ public VespaChangeRequest withApproval(Approval approval) {
+ return new VespaChangeRequest(getId(), getChangeRequestSource(), getImpactedSwitches(), getImpactedHosts(), approval, getImpact(), status, hostActionPlan, zoneId);
+ }
+
+ public VespaChangeRequest withActionPlan(List<HostAction> hostActionPlan) {
+ return new VespaChangeRequest(getId(), getChangeRequestSource(), getImpactedSwitches(), getImpactedHosts(), getApproval(), getImpact(), status, hostActionPlan, zoneId);
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ if (!super.equals(o)) return false;
+ VespaChangeRequest that = (VespaChangeRequest) o;
+ return status == that.status &&
+ Objects.equals(hostActionPlan, that.hostActionPlan) &&
+ Objects.equals(zoneId, that.zoneId);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(super.hashCode(), status, hostActionPlan, zoneId);
+ }
+
+ @Override
+ public String toString() {
+ return "VespaChangeRequest{" +
+ "id='" + getId() + '\'' +
+ ", changeRequestSource=" + getChangeRequestSource() +
+ ", impactedSwitches=" + getImpactedSwitches() +
+ ", impactedHosts=" + getImpactedHosts() +
+ ", approval=" + getApproval() +
+ ", impact=" + getImpact() +
+ ", status=" + status +
+ ", zoneId=" + zoneId +
+ ", hostActionPlan=" + hostActionPlan +
+ '}';
+ }
+
+ public enum Status {
+ COMPLETED,
+ IN_PROGRESS,
+ PENDING_ACTION,
+ PENDING_ASSESSMENT,
+ REQUIRES_OPERATOR_ACTION,
+ NOOP
+ }
+}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/PathGroup.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/PathGroup.java
index 1387f4a4eaa..558beb20e66 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/PathGroup.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/PathGroup.java
@@ -54,6 +54,7 @@ enum PathGroup {
tenantInfo(Matcher.tenant,
"/application/v4/tenant/{tenant}/application/",
"/application/v4/tenant/{tenant}/info/",
+ "/application/v4/tenant/{tenant}/notifications",
"/routing/v1/status/tenant/{tenant}/{*}"),
tenantKeys(Matcher.tenant,
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java
index 69d723edbe8..32063bf9ba5 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java
@@ -11,6 +11,7 @@ import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.InstanceName;
import com.yahoo.config.provision.TenantName;
import com.yahoo.config.provision.zone.ZoneId;
+import com.yahoo.log.LogLevel;
import com.yahoo.vespa.athenz.api.AthenzDomain;
import com.yahoo.vespa.athenz.api.AthenzIdentity;
import com.yahoo.vespa.athenz.api.AthenzPrincipal;
@@ -21,7 +22,6 @@ import com.yahoo.vespa.flags.FetchVector;
import com.yahoo.vespa.flags.FlagSource;
import com.yahoo.vespa.flags.PermanentFlags;
import com.yahoo.vespa.flags.StringFlag;
-import com.yahoo.vespa.hosted.controller.application.ActivateResult;
import com.yahoo.vespa.hosted.controller.api.application.v4.model.DeploymentData;
import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
import com.yahoo.vespa.hosted.controller.api.identifiers.InstanceId;
@@ -35,7 +35,6 @@ import com.yahoo.vespa.hosted.controller.api.integration.configserver.ConfigServ
import com.yahoo.vespa.hosted.controller.api.integration.configserver.ContainerEndpoint;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.Log;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.Node;
-import com.yahoo.vespa.hosted.controller.api.integration.configserver.NotFoundException;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.ApplicationStore;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.ApplicationVersion;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.ArtifactRepository;
@@ -43,6 +42,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobId;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.TesterId;
import com.yahoo.vespa.hosted.controller.api.integration.noderepository.RestartFilter;
import com.yahoo.vespa.hosted.controller.api.integration.secrets.TenantSecretStore;
+import com.yahoo.vespa.hosted.controller.application.ActivateResult;
import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.application.ApplicationPackageValidator;
import com.yahoo.vespa.hosted.controller.application.Deployment;
@@ -58,6 +58,8 @@ import com.yahoo.vespa.hosted.controller.deployment.DeploymentTrigger;
import com.yahoo.vespa.hosted.controller.deployment.JobStatus;
import com.yahoo.vespa.hosted.controller.deployment.Run;
import com.yahoo.vespa.hosted.controller.deployment.RunStatus;
+import com.yahoo.vespa.hosted.controller.notification.Notification;
+import com.yahoo.vespa.hosted.controller.notification.NotificationSource;
import com.yahoo.vespa.hosted.controller.persistence.CuratorDb;
import com.yahoo.vespa.hosted.controller.security.AccessControl;
import com.yahoo.vespa.hosted.controller.security.Credentials;
@@ -85,6 +87,7 @@ import java.util.TreeMap;
import java.util.function.Consumer;
import java.util.logging.Level;
import java.util.logging.Logger;
+import java.util.stream.Collectors;
import static com.yahoo.vespa.hosted.controller.api.integration.configserver.Node.State.active;
import static com.yahoo.vespa.hosted.controller.api.integration.configserver.Node.State.reserved;
@@ -183,8 +186,7 @@ public class ApplicationController {
/** Returns the reindexing status for the given application in the given zone. */
public ApplicationReindexing applicationReindexing(ApplicationId id, ZoneId zoneId) {
- return configServer.getReindexing(new DeploymentId(id, zoneId))
- .orElseThrow(() -> new NotExistsException("Reindexing status not found for " + id + " in " + zoneId));
+ return configServer.getReindexing(new DeploymentId(id, zoneId));
}
/** Enables reindexing for the given application in the given zone. */
@@ -392,6 +394,22 @@ public class ApplicationController {
// Record the quota usage for this application
var quotaUsage = deploymentQuotaUsage(zone, job.application());
+ // For direct deployments use the full application ID, but otherwise use just the tenant and application as
+ // the source since it's the same application, so it should have the same warnings
+ NotificationSource source = zone.environment().isManuallyDeployed() ?
+ NotificationSource.from(job.application()) : NotificationSource.from(applicationId);
+ List<String> warnings = Optional.ofNullable(result.prepareResponse().log)
+ .map(logs -> logs.stream()
+ .filter(log -> log.applicationPackage)
+ .filter(log -> LogLevel.parse(log.level).intValue() >= Level.WARNING.intValue())
+ .map(log -> log.message)
+ .sorted()
+ .distinct()
+ .collect(Collectors.toList()))
+ .orElseGet(List::of);
+ if (warnings.isEmpty()) controller.notificationsDb().removeNotification(source, Notification.Type.APPLICATION_PACKAGE_WARNING);
+ else controller.notificationsDb().setNotification(source, Notification.Type.APPLICATION_PACKAGE_WARNING, warnings);
+
lockApplicationOrThrow(applicationId, application ->
store(application.with(job.application().instance(),
instance -> instance.withNewDeployment(zone, revision, platform,
@@ -562,6 +580,7 @@ public class ApplicationController {
curator.removeApplication(id);
controller.jobController().collectGarbage();
+ controller.notificationsDb().removeNotifications(NotificationSource.from(id));
log.info("Deleted " + id);
});
}
@@ -589,6 +608,7 @@ public class ApplicationController {
controller.routing().removeEndpointsInDns(application.get(), instanceId.instance());
curator.writeApplication(application.without(instanceId.instance()).get());
controller.jobController().collectGarbage();
+ controller.notificationsDb().removeNotifications(NotificationSource.from(instanceId));
log.info("Deleted " + instanceId);
});
}
@@ -678,8 +698,6 @@ public class ApplicationController {
DeploymentId id = new DeploymentId(application.get().id().instance(instanceName), zone);
try {
configServer.deactivate(id);
- } catch (NotFoundException ignored) {
- // ok; already gone
} finally {
controller.routing().policies().refresh(application.get().id().instance(instanceName), application.get().deploymentSpec(), zone);
if (zone.environment().isManuallyDeployed())
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Controller.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Controller.java
index 5b2c2d74d20..2de8fa6457a 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Controller.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Controller.java
@@ -23,6 +23,7 @@ import com.yahoo.vespa.hosted.controller.config.ControllerConfig;
import com.yahoo.vespa.hosted.controller.deployment.JobController;
import com.yahoo.vespa.hosted.controller.dns.NameServiceForwarder;
import com.yahoo.vespa.hosted.controller.metric.ConfigServerMetrics;
+import com.yahoo.vespa.hosted.controller.notification.NotificationsDb;
import com.yahoo.vespa.hosted.controller.persistence.CuratorDb;
import com.yahoo.vespa.hosted.controller.persistence.JobControlFlags;
import com.yahoo.vespa.hosted.controller.security.AccessControl;
@@ -82,6 +83,7 @@ public class Controller extends AbstractComponent {
private final ControllerConfig controllerConfig;
private final SecretStore secretStore;
private final CuratorArchiveBucketDb archiveBucketDb;
+ private final NotificationsDb notificationsDb;
/**
* Creates a controller
@@ -118,6 +120,7 @@ public class Controller extends AbstractComponent {
auditLogger = new AuditLogger(curator, clock);
jobControl = new JobControl(new JobControlFlags(curator, flagSource));
archiveBucketDb = new CuratorArchiveBucketDb(this);
+ notificationsDb = new NotificationsDb(this);
this.controllerConfig = controllerConfig;
this.secretStore = secretStore;
@@ -306,4 +309,8 @@ public class Controller extends AbstractComponent {
public CuratorArchiveBucketDb archiveBucketDb() {
return archiveBucketDb;
}
+
+ public NotificationsDb notificationsDb() {
+ return notificationsDb;
+ }
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/TenantController.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/TenantController.java
index f3e192aef90..4b102ef3077 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/TenantController.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/TenantController.java
@@ -10,6 +10,7 @@ import com.yahoo.vespa.flags.Flags;
import com.yahoo.vespa.hosted.controller.api.identifiers.TenantId;
import com.yahoo.vespa.hosted.controller.application.SystemApplication;
import com.yahoo.vespa.hosted.controller.concurrent.Once;
+import com.yahoo.vespa.hosted.controller.notification.NotificationSource;
import com.yahoo.vespa.hosted.controller.persistence.CuratorDb;
import com.yahoo.vespa.hosted.controller.security.AccessControl;
import com.yahoo.vespa.hosted.controller.security.Credentials;
@@ -171,6 +172,7 @@ public class TenantController {
curator.removeTenant(tenant);
accessControl.deleteTenant(tenant, credentials);
+ controller.notificationsDb().removeNotifications(NotificationSource.from(tenant));
}
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java
index 548f6a9aaf2..0458a64c5a9 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java
@@ -25,9 +25,9 @@ import com.yahoo.security.X509CertificateUtils;
import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.Controller;
import com.yahoo.vespa.hosted.controller.Instance;
-import com.yahoo.vespa.hosted.controller.application.ActivateResult;
import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
import com.yahoo.vespa.hosted.controller.api.integration.LogEntry;
+import com.yahoo.vespa.hosted.controller.api.integration.certificates.EndpointCertificateException;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.ConfigServerException;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.Node;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.PrepareResponse;
@@ -39,13 +39,15 @@ import com.yahoo.vespa.hosted.controller.api.integration.deployment.TesterCloud;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.TesterId;
import com.yahoo.vespa.hosted.controller.api.integration.organization.DeploymentFailureMails;
import com.yahoo.vespa.hosted.controller.api.integration.organization.Mail;
+import com.yahoo.vespa.hosted.controller.application.ActivateResult;
import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.application.Deployment;
import com.yahoo.vespa.hosted.controller.application.Endpoint;
import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
-import com.yahoo.vespa.hosted.controller.api.integration.certificates.EndpointCertificateException;
import com.yahoo.vespa.hosted.controller.config.ControllerConfig;
import com.yahoo.vespa.hosted.controller.maintenance.JobRunner;
+import com.yahoo.vespa.hosted.controller.notification.Notification;
+import com.yahoo.vespa.hosted.controller.notification.NotificationSource;
import com.yahoo.vespa.hosted.controller.routing.RoutingPolicyId;
import com.yahoo.yolean.Exceptions;
@@ -67,6 +69,7 @@ import java.util.Locale;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
+import java.util.function.Consumer;
import java.util.function.Supplier;
import java.util.logging.Level;
import java.util.logging.Logger;
@@ -225,31 +228,31 @@ public class InternalStepRunner implements StepRunner {
// Retry certain failures for up to one hour.
Optional<RunStatus> result = startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1)))
? Optional.of(deploymentFailed) : Optional.empty();
- switch (e.getErrorCode()) {
+ switch (e.code()) {
case CERTIFICATE_NOT_READY:
logger.log("Waiting for certificate to become ready on config server: New application, or old one has expired");
if (startTime.plus(timeouts.endpointCertificate()).isBefore(controller.clock().instant())) {
- logger.log("Certificate did not become available on config server within (" + timeouts.endpointCertificate() + ")");
+ logger.log(WARNING, "Certificate did not become available on config server within (" + timeouts.endpointCertificate() + ")");
return Optional.of(RunStatus.endpointCertificateTimeout);
}
return result;
case ACTIVATION_CONFLICT:
case APPLICATION_LOCK_FAILURE:
- logger.log("Deployment failed with possibly transient error " + e.getErrorCode() +
+ logger.log("Deployment failed with possibly transient error " + e.code() +
", will retry: " + e.getMessage());
return result;
case LOAD_BALANCER_NOT_READY:
case PARENT_HOST_NOT_READY:
- logger.log(e.getServerMessage());
+ logger.log(e.message());
return result;
case OUT_OF_CAPACITY:
- logger.log(e.getServerMessage());
+ logger.log(e.message());
return controller.system().isCd() && startTime.plus(timeouts.capacity()).isAfter(controller.clock().instant())
? Optional.empty()
: Optional.of(outOfCapacity);
case INVALID_APPLICATION_PACKAGE:
case BAD_REQUEST:
- logger.log(e.getMessage());
+ logger.log(WARNING, e.getMessage());
return Optional.of(deploymentFailed);
}
@@ -261,7 +264,7 @@ public class InternalStepRunner implements StepRunner {
// Same as CERTIFICATE_NOT_READY above, only from the controller
logger.log("Waiting for certificate to become valid: New application, or old one has expired");
if (startTime.plus(timeouts.endpointCertificate()).isBefore(controller.clock().instant())) {
- logger.log("Controller could not validate certificate within " +
+ logger.log(WARNING, "Controller could not validate certificate within " +
timeouts.endpointCertificate() + ": " + Exceptions.toMessageString(e));
return Optional.of(RunStatus.endpointCertificateTimeout);
}
@@ -596,7 +599,7 @@ public class InternalStepRunner implements StepRunner {
testerCertificate.get().checkValidity(Date.from(controller.clock().instant()));
}
catch (CertificateExpiredException | CertificateNotYetValidException e) {
- logger.log(INFO, "Tester certificate expired before tests could complete.");
+ logger.log(WARNING, "Tester certificate expired before tests could complete.");
return Optional.of(aborted);
}
}
@@ -671,7 +674,8 @@ public class InternalStepRunner implements StepRunner {
try {
controller.jobController().active(id).ifPresent(run -> {
if (run.hasFailed())
- sendNotification(run, logger);
+ sendEmailNotification(run, logger);
+ updateConsoleNotification(run);
});
}
catch (IllegalStateException e) {
@@ -682,7 +686,7 @@ public class InternalStepRunner implements StepRunner {
}
/** Sends a mail with a notification of a failed run, if one should be sent. */
- private void sendNotification(Run run, DualLogger logger) {
+ private void sendEmailNotification(Run run, DualLogger logger) {
Application application = controller.applications().requireApplication(TenantAndApplicationId.from(run.id().application()));
Notifications notifications = application.deploymentSpec().requireInstance(run.id().application().instance()).notifications();
boolean newCommit = application.require(run.id().application().instance()).change().application()
@@ -702,8 +706,39 @@ public class InternalStepRunner implements StepRunner {
mailOf(run, recipients).ifPresent(controller.serviceRegistry().mailer()::send);
}
catch (RuntimeException e) {
- logger.log(INFO, "Exception trying to send mail for " + run.id(), e);
+ logger.log(WARNING, "Exception trying to send mail for " + run.id(), e);
+ }
+ }
+
+ private void updateConsoleNotification(Run run) {
+ NotificationSource source = NotificationSource.from(run.id());
+ Consumer<String> updater = msg -> controller.notificationsDb().setNotification(source, Notification.Type.DEPLOYMENT_FAILURE, msg);
+ switch (run.status()) {
+ case aborted: return; // wait and see how the next run goes.
+ case running:
+ case success:
+ controller.notificationsDb().removeNotification(source, Notification.Type.DEPLOYMENT_FAILURE);
+ return;
+ case outOfCapacity:
+ if ( ! run.id().type().environment().isTest()) updater.accept("lack of capacity. Please contact the Vespa team to request more!");
+ return;
+ case deploymentFailed:
+ updater.accept("invalid application configuration, or timeout of other deployments of the same application");
+ return;
+ case installationFailed:
+ updater.accept("nodes were not able to start the new Java containers");
+ return;
+ case testFailure:
+ updater.accept("one or more verification tests against the deployment failed");
+ return;
+ case error:
+ case endpointCertificateTimeout:
+ break;
+ default:
+ logger.log(WARNING, "Don't know what to set console notification to for run status '" + run.status() + "'");
}
+ updater.accept("something in the framework went wrong. Such errors are " +
+ "usually transient. Please contact the Vespa team if the problem persists!");
}
private Optional<Mail> mailOf(Run run, List<String> recipients) {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobController.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobController.java
index 65d3f666309..3dc88d5d6d2 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobController.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobController.java
@@ -532,8 +532,6 @@ public class JobController {
var zone = type.zone(controller.system());
try {
controller.serviceRegistry().configServer().deactivate(new DeploymentId(id.id(), zone));
- } catch (NotFoundException ignored) {
- // Already gone -- great!
} finally {
// Passing an empty DeploymentSpec here is fine as it's used for registering global endpoint names, and
// tester instances have none.
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ChangeManagementAssessor.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ChangeManagementAssessor.java
index a84d1c3ad7e..95432e3acbc 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ChangeManagementAssessor.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ChangeManagementAssessor.java
@@ -104,7 +104,7 @@ public class ChangeManagementAssessor {
private List<String> toParentHosts(List<String> impactedHostnames, List<NodeRepositoryNode> allNodes) {
return impactedHostnames.stream()
- .map(hostname ->
+ .flatMap(hostname ->
allNodes.stream()
.filter(node -> List.of(NodeType.config, NodeType.proxy, NodeType.host).contains(node.getType()))
.filter(node -> hostname.equals(node.getHostname()) || hostname.equals(node.getParentHostname()))
@@ -112,7 +112,7 @@ public class ChangeManagementAssessor {
if (node.getType() == NodeType.host)
return node.getHostname();
return node.getParentHostname();
- }).findFirst().orElseThrow()
+ }).findFirst().stream()
)
.collect(Collectors.toList());
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ChangeRequestMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ChangeRequestMaintainer.java
index ca9ebe132fd..0ebf4cbc2d2 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ChangeRequestMaintainer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ChangeRequestMaintainer.java
@@ -1,13 +1,25 @@
// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.controller.maintenance;
+import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.SystemName;
+import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.vespa.hosted.controller.Controller;
+import com.yahoo.vespa.hosted.controller.api.integration.configserver.NodeRepository;
import com.yahoo.vespa.hosted.controller.api.integration.vcmr.ChangeRequest;
import com.yahoo.vespa.hosted.controller.api.integration.vcmr.ChangeRequestClient;
+import com.yahoo.vespa.hosted.controller.api.integration.vcmr.ChangeRequestSource;
+import com.yahoo.vespa.hosted.controller.api.integration.vcmr.VespaChangeRequest;
+import com.yahoo.vespa.hosted.controller.persistence.CuratorDb;
import java.time.Duration;
+import java.time.ZonedDateTime;
+import java.util.ArrayList;
+import java.util.Collections;
import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.function.Function;
import java.util.function.Predicate;
import java.util.logging.Logger;
import java.util.stream.Collectors;
@@ -20,27 +32,29 @@ public class ChangeRequestMaintainer extends ControllerMaintainer {
private final Logger logger = Logger.getLogger(ChangeRequestMaintainer.class.getName());
private final ChangeRequestClient changeRequestClient;
private final SystemName system;
+ private final CuratorDb curator;
+ private final NodeRepository nodeRepository;
public ChangeRequestMaintainer(Controller controller, Duration interval) {
super(controller, interval, null, SystemName.allOf(Predicate.not(SystemName::isPublic)));
this.changeRequestClient = controller.serviceRegistry().changeRequestClient();
this.system = controller.system();
+ this.curator = controller.curator();
+ this.nodeRepository = controller.serviceRegistry().configServer().nodeRepository();
}
@Override
protected boolean maintain() {
- var changeRequests = changeRequestClient.getUpcomingChangeRequests();
+ var currentChangeRequests = pruneOldChangeRequests();
+ var changeRequests = changeRequestClient.getChangeRequests(currentChangeRequests);
- if (!changeRequests.isEmpty()) {
- logger.info(() -> "Found the following upcoming change requests:");
- changeRequests.forEach(changeRequest -> logger.info(changeRequest::toString));
- }
-
- if (system.equals(SystemName.main))
+ logger.fine(() -> "Found requests: " + changeRequests);
+ storeChangeRequests(changeRequests);
+ if (system.equals(SystemName.main)) {
approveChanges(changeRequests);
+ }
- // TODO: Store in curator?
return true;
}
@@ -50,6 +64,75 @@ public class ChangeRequestMaintainer extends ControllerMaintainer {
.filter(changeRequest -> changeRequest.getApproval() == ChangeRequest.Approval.REQUESTED)
.collect(Collectors.toList());
+ logger.fine(() -> "Approving " + unapprovedRequests);
changeRequestClient.approveChangeRequests(unapprovedRequests);
}
+
+ private void storeChangeRequests(List<ChangeRequest> changeRequests) {
+ var existingChangeRequests = curator.readChangeRequests()
+ .stream()
+ .collect(Collectors.toMap(ChangeRequest::getId, Function.identity()));
+
+ var hostsByZone = hostsByZone();
+ // Create or update requests in curator
+ try (var lock = curator.lockChangeRequests()) {
+ changeRequests.forEach(changeRequest -> {
+ var optionalZone = inferZone(changeRequest, hostsByZone);
+ optionalZone.ifPresent(zone -> {
+ var vcmr = existingChangeRequests
+ .getOrDefault(changeRequest.getId(), new VespaChangeRequest(changeRequest, zone))
+ .withSource(changeRequest.getChangeRequestSource())
+ .withApproval(changeRequest.getApproval());
+ logger.fine(() -> "Storing " + vcmr);
+ curator.writeChangeRequest(vcmr);
+ });
+ });
+ }
+ }
+
+ // Deletes closed change requests older than 7 days, returns the current list of requests
+ private List<ChangeRequest> pruneOldChangeRequests() {
+ List<ChangeRequest> currentChangeRequests = new ArrayList<>();
+
+ try (var lock = curator.lockChangeRequests()) {
+ for (var changeRequest : curator.readChangeRequests()) {
+ if (shouldDeleteChangeRequest(changeRequest.getChangeRequestSource())) {
+ curator.deleteChangeRequest(changeRequest);
+ } else {
+ currentChangeRequests.add(changeRequest);
+ }
+ }
+ }
+ return currentChangeRequests;
+ }
+
+ private Map<ZoneId, List<String>> hostsByZone() {
+ return controller().zoneRegistry()
+ .zones()
+ .reachable()
+ .in(Environment.prod)
+ .ids()
+ .stream()
+ .collect(Collectors.toMap(
+ zone -> zone,
+ zone -> nodeRepository.list(zone, false)
+ .stream()
+ .map(node -> node.hostname().value())
+ .collect(Collectors.toList())
+ ));
+ }
+
+ private Optional<ZoneId> inferZone(ChangeRequest changeRequest, Map<ZoneId, List<String>> hostsByZone) {
+ return hostsByZone.entrySet().stream()
+ .filter(entry -> !Collections.disjoint(entry.getValue(), changeRequest.getImpactedHosts()))
+ .map(Map.Entry::getKey)
+ .findFirst();
+ }
+
+ private boolean shouldDeleteChangeRequest(ChangeRequestSource source) {
+ return source.isClosed() &&
+ source.getPlannedStartTime()
+ .plus(Duration.ofDays(7))
+ .isBefore(ZonedDateTime.now());
+ }
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/CloudEventReporter.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/CloudEventReporter.java
index 5ab2ca4a5d6..d923db936cb 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/CloudEventReporter.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/CloudEventReporter.java
@@ -54,7 +54,7 @@ public class CloudEventReporter extends ControllerMaintainer {
/** Deprovision any host affected by given event */
private void deprovisionAffectedHosts(String region, CloudEvent event) {
for (var zone : zonesByCloudNativeRegion.get(region)) {
- for (var node : nodeRepository.list(zone.getId())) {
+ for (var node : nodeRepository.list(zone.getId(), false)) {
if (!affects(node, event)) continue;
log.info("Retiring and deprovisioning " + node.hostname().value() + " in " + zone.getId() +
": Affected by maintenance event " + event.instanceEventId);
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintenance.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintenance.java
index 19199c5a281..015da1faae8 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintenance.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintenance.java
@@ -61,7 +61,7 @@ public class ControllerMaintenance extends AbstractComponent {
maintainers.add(new SystemRoutingPolicyMaintainer(controller, intervals.systemRoutingPolicyMaintainer));
maintainers.add(new ApplicationMetaDataGarbageCollector(controller, intervals.applicationMetaDataGarbageCollector));
maintainers.add(new ContainerImageExpirer(controller, intervals.containerImageExpirer));
- maintainers.add(new HostSwitchUpdater(controller, intervals.hostSwitchUpdater));
+ maintainers.add(new HostInfoUpdater(controller, intervals.hostSwitchUpdater));
maintainers.add(new ReindexingTriggerer(controller, intervals.reindexingTriggerer));
maintainers.add(new EndpointCertificateMaintainer(controller, intervals.endpointCertificateMaintainer));
maintainers.add(new TrafficShareUpdater(controller, intervals.trafficFractionUpdater));
@@ -69,6 +69,7 @@ public class ControllerMaintenance extends AbstractComponent {
maintainers.add(new ArchiveAccessMaintainer(controller, metric, intervals.archiveAccessMaintainer));
maintainers.add(new TenantRoleMaintainer(controller, intervals.tenantRoleMaintainer));
maintainers.add(new ChangeRequestMaintainer(controller, intervals.changeRequestMaintainer));
+ maintainers.add(new VCMRMaintainer(controller, intervals.vcmrMaintainer));
}
public Upgrader upgrader() { return upgrader; }
@@ -123,6 +124,7 @@ public class ControllerMaintenance extends AbstractComponent {
private final Duration archiveAccessMaintainer;
private final Duration tenantRoleMaintainer;
private final Duration changeRequestMaintainer;
+ private final Duration vcmrMaintainer;
public Intervals(SystemName system) {
this.system = Objects.requireNonNull(system);
@@ -153,7 +155,8 @@ public class ControllerMaintenance extends AbstractComponent {
this.archiveUriUpdater = duration(5, MINUTES);
this.archiveAccessMaintainer = duration(10, MINUTES);
this.tenantRoleMaintainer = duration(5, MINUTES);
- this.changeRequestMaintainer = duration(12, HOURS);
+ this.changeRequestMaintainer = duration(1, HOURS);
+ this.vcmrMaintainer = duration(1, HOURS);
}
private Duration duration(long amount, TemporalUnit unit) {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/HostSwitchUpdater.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/HostInfoUpdater.java
index 8e7a364b5f3..83ccda422e6 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/HostSwitchUpdater.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/HostInfoUpdater.java
@@ -12,6 +12,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.noderepository.NodeRepo
import java.time.Duration;
import java.util.EnumSet;
import java.util.Map;
+import java.util.Optional;
import java.util.function.Function;
import java.util.logging.Logger;
import java.util.regex.Matcher;
@@ -19,18 +20,19 @@ import java.util.regex.Pattern;
import java.util.stream.Collectors;
/**
- * Ensures that the switch information for all hosts is up to date.
+ * Ensures that the host information for all hosts is up to date.
*
* @author mpolden
+ * @author bjormel
*/
-public class HostSwitchUpdater extends ControllerMaintainer {
+public class HostInfoUpdater extends ControllerMaintainer {
- private static final Logger LOG = Logger.getLogger(HostSwitchUpdater.class.getName());
+ private static final Logger LOG = Logger.getLogger(HostInfoUpdater.class.getName());
private static final Pattern HOST_PATTERN = Pattern.compile("^(proxy|cfg|controller)host(.+)$");
private final NodeRepository nodeRepository;
- public HostSwitchUpdater(Controller controller, Duration interval) {
+ public HostInfoUpdater(Controller controller, Duration interval) {
super(controller, interval, null, EnumSet.of(SystemName.cd, SystemName.main));
this.nodeRepository = controller.serviceRegistry().configServer().nodeRepository();
}
@@ -40,28 +42,35 @@ public class HostSwitchUpdater extends ControllerMaintainer {
Map<String, NodeEntity> nodeEntities = controller().serviceRegistry().entityService().listNodes().stream()
.collect(Collectors.toMap(NodeEntity::hostname,
Function.identity()));
- int nodesUpdated = 0;
+ int hostsUpdated = 0;
try {
for (var zone : controller().zoneRegistry().zones().controllerUpgraded().all().ids()) {
- for (var node : nodeRepository.list(zone)) {
+ for (var node : nodeRepository.list(zone, false)) {
if (!node.type().isHost()) continue;
NodeEntity nodeEntity = nodeEntities.get(registeredHostnameOf(node));
- if (!shouldUpdate(node, nodeEntity)) continue;
+ if (!shouldUpdateSwitch(node, nodeEntity) && !shouldUpdateModel(node, nodeEntity)) continue;
NodeRepositoryNode updatedNode = new NodeRepositoryNode();
- updatedNode.setSwitchHostname(nodeEntity.switchHostname().get());
+ nodeEntity.switchHostname().ifPresent(updatedNode::setSwitchHostname);
+ buildModelName(nodeEntity).ifPresent(updatedNode::setModelName);
nodeRepository.patchNode(zone, node.hostname().value(), updatedNode);
- nodesUpdated++;
+ hostsUpdated++;
}
}
} finally {
- if (nodesUpdated > 0) {
- LOG.info("Updated switch hostname for " + nodesUpdated + " node(s)");
+ if (hostsUpdated > 0) {
+ LOG.info("Updated information for " + hostsUpdated + " hosts(s)");
}
}
return true;
}
+ private static Optional<String> buildModelName(NodeEntity nodeEntity) {
+ if(nodeEntity.manufacturer().isEmpty() || nodeEntity.model().isEmpty())
+ return Optional.empty();
+ return Optional.of(nodeEntity.manufacturer().get() + " " + nodeEntity.model().get());
+ }
+
/** Returns the hostname that given host is registered under in the {@link EntityService} */
private static String registeredHostnameOf(Node host) {
String hostname = host.hostname().value();
@@ -71,10 +80,17 @@ public class HostSwitchUpdater extends ControllerMaintainer {
return matcher.replaceFirst("$1$2");
}
- private static boolean shouldUpdate(Node node, NodeEntity nodeEntity) {
+ private static boolean shouldUpdateSwitch(Node node, NodeEntity nodeEntity) {
if (nodeEntity == null) return false;
- if (nodeEntity.switchHostname().isEmpty()) return false;
+ if (nodeEntity.switchHostname().isEmpty()) return false;
return !node.switchHostname().equals(nodeEntity.switchHostname());
}
+ private static boolean shouldUpdateModel(Node node, NodeEntity nodeEntity) {
+ if (nodeEntity == null) return false;
+ if (nodeEntity.model().isEmpty()) return false;
+ if (nodeEntity.manufacturer().isEmpty()) return false;
+ return !node.modelName().equals(buildModelName(nodeEntity));
+ }
+
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainer.java
index fd375c80218..b40f2232504 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainer.java
@@ -94,7 +94,7 @@ public class ResourceMeterMaintainer extends ControllerMaintainer {
.ofCloud(CloudName.from("aws"))
.reachable().zones().stream()
.map(ZoneApi::getId)
- .map(zoneId -> createResourceSnapshotsFromNodes(zoneId, nodeRepository.list(zoneId)))
+ .map(zoneId -> createResourceSnapshotsFromNodes(zoneId, nodeRepository.list(zoneId, false)))
.flatMap(Collection::stream)
.collect(Collectors.toList());
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceTagMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceTagMaintainer.java
index 634e5ba10ce..c7bf7e765ed 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceTagMaintainer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceTagMaintainer.java
@@ -43,7 +43,7 @@ public class ResourceTagMaintainer extends ControllerMaintainer {
private Map<HostName, Optional<ApplicationId>> getTenantOfParentHosts(ZoneId zoneId) {
return controller().serviceRegistry().configServer().nodeRepository()
- .list(zoneId)
+ .list(zoneId, false)
.stream()
.filter(node -> node.type().isHost())
.collect(Collectors.toMap(
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/VCMRMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/VCMRMaintainer.java
new file mode 100644
index 00000000000..a8de70a56a2
--- /dev/null
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/VCMRMaintainer.java
@@ -0,0 +1,256 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.maintenance;
+
+import com.yahoo.config.provision.Environment;
+import com.yahoo.config.provision.NodeType;
+import com.yahoo.config.provision.SystemName;
+import com.yahoo.config.provision.zone.ZoneId;
+import com.yahoo.vespa.hosted.controller.Controller;
+import com.yahoo.vespa.hosted.controller.api.integration.configserver.Node;
+import com.yahoo.vespa.hosted.controller.api.integration.configserver.NodeRepository;
+import com.yahoo.vespa.hosted.controller.api.integration.noderepository.NodeRepositoryNode;
+import com.yahoo.vespa.hosted.controller.api.integration.noderepository.NodeState;
+import com.yahoo.vespa.hosted.controller.api.integration.vcmr.ChangeRequest.Impact;
+import com.yahoo.vespa.hosted.controller.api.integration.vcmr.HostAction;
+import com.yahoo.vespa.hosted.controller.api.integration.vcmr.HostAction.State;
+import com.yahoo.vespa.hosted.controller.api.integration.vcmr.VespaChangeRequest;
+import com.yahoo.vespa.hosted.controller.api.integration.vcmr.VespaChangeRequest.Status;
+import com.yahoo.vespa.hosted.controller.persistence.CuratorDb;
+import com.yahoo.yolean.Exceptions;
+
+import java.time.Duration;
+import java.time.Instant;
+import java.time.ZonedDateTime;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.function.Predicate;
+import java.util.logging.Logger;
+import java.util.stream.Collectors;
+
+/**
+ * @author olaa
+ *
+ * Maintains status and execution of VCMRs
+ * For now only retires all affected tenant hosts if zone capacity allows it
+ */
+public class VCMRMaintainer extends ControllerMaintainer {
+
+ private final Logger logger = Logger.getLogger(VCMRMaintainer.class.getName());
+ private final Duration ALLOWED_RETIREMENT_TIME = Duration.ofHours(60);
+ private final Duration ALLOWED_POSTPONEMENT_TIME = Duration.ofDays(7);
+ private final CuratorDb curator;
+ private final NodeRepository nodeRepository;
+
+ public VCMRMaintainer(Controller controller, Duration interval) {
+ super(controller, interval, null, SystemName.allOf(Predicate.not(SystemName::isPublic)));
+ this.curator = controller.curator();
+ this.nodeRepository = controller.serviceRegistry().configServer().nodeRepository();
+ }
+
+ @Override
+ protected boolean maintain() {
+ var changeRequests = curator.readChangeRequests()
+ .stream()
+ .filter(shouldUpdate())
+ .collect(Collectors.toList());
+
+ var nodesByZone = nodesByZone();
+
+ changeRequests.forEach(changeRequest -> {
+ var nodes = impactedNodes(nodesByZone, changeRequest);
+ var nextActions = getNextActions(nodes, changeRequest);
+ var status = getStatus(nextActions, changeRequest);
+
+ try (var lock = curator.lockChangeRequests()) {
+ // Read the vcmr again, in case the source status has been updated
+ curator.readChangeRequest(changeRequest.getId())
+ .ifPresent(vcmr -> curator.writeChangeRequest(vcmr.withActionPlan(nextActions)
+ .withStatus(status)));
+ }
+ });
+
+ return true;
+ }
+
+ /**
+ * Status is based on:
+ * 1. Whether the source has reportedly closed the request
+ * 2. Whether any host requires operator action
+ * 3. Whether any host has started/finished retiring
+ */
+ private Status getStatus(List<HostAction> nextActions, VespaChangeRequest changeRequest) {
+ if (changeRequest.getChangeRequestSource().isClosed()) {
+ return Status.COMPLETED;
+ }
+
+ var byActionState = nextActions.stream().collect(Collectors.groupingBy(HostAction::getState, Collectors.counting()));
+
+ if (byActionState.getOrDefault(State.REQUIRES_OPERATOR_ACTION, 0L) > 0) {
+ return Status.REQUIRES_OPERATOR_ACTION;
+ }
+
+ if (byActionState.getOrDefault(State.RETIRING, 0L) + byActionState.getOrDefault(State.RETIRED, 0L) > 0) {
+ return Status.IN_PROGRESS;
+ }
+
+ if (byActionState.getOrDefault(State.PENDING_RETIREMENT, 0L) > 0) {
+ return Status.PENDING_ACTION;
+ }
+
+ return Status.NOOP;
+ }
+
+ private List<HostAction> getNextActions(List<Node> nodes, VespaChangeRequest changeRequest) {
+ var spareCapacity = hasSpareCapacity(changeRequest.getZoneId(), nodes);
+ return nodes.stream()
+ .map(node -> nextAction(node, changeRequest, spareCapacity))
+ .collect(Collectors.toList());
+ }
+
+ // Get the superset of impacted hosts by looking at impacted switches
+ private List<Node> impactedNodes(Map<ZoneId, List<Node>> nodesByZone, VespaChangeRequest changeRequest) {
+ return nodesByZone.get(changeRequest.getZoneId())
+ .stream()
+ .filter(isImpacted(changeRequest))
+ .collect(Collectors.toList());
+ }
+
+ private Optional<HostAction> getPreviousAction(Node node, VespaChangeRequest changeRequest) {
+ return changeRequest.getHostActionPlan()
+ .stream()
+ .filter(hostAction -> hostAction.getHostname().equals(node.hostname().value()))
+ .findFirst();
+ }
+
+ private HostAction nextAction(Node node, VespaChangeRequest changeRequest, boolean spareCapacity) {
+ var hostAction = getPreviousAction(node, changeRequest)
+ .orElse(new HostAction(node.hostname().value(), State.NONE, Instant.now()));
+
+ if (changeRequest.getChangeRequestSource().isClosed()) {
+ logger.fine(() -> changeRequest.getChangeRequestSource().getId() + " is closed, recycling " + node.hostname());
+ recycleNode(changeRequest.getZoneId(), node, hostAction);
+ return hostAction.withState(State.COMPLETE);
+ }
+
+ if (isPostponed(changeRequest, hostAction)) {
+ logger.fine(() -> changeRequest.getChangeRequestSource().getId() + " is postponed, recycling " + node.hostname());
+ recycleNode(changeRequest.getZoneId(), node, hostAction);
+ return hostAction.withState(State.PENDING_RETIREMENT);
+ }
+
+ if (node.type() != NodeType.host || !spareCapacity) {
+ return hostAction.withState(State.REQUIRES_OPERATOR_ACTION);
+ }
+
+ if (shouldRetire(changeRequest, hostAction)) {
+ if (!node.wantToRetire()) {
+ logger.info(String.format("Retiring %s due to %s", node.hostname().value(), changeRequest.getChangeRequestSource().getId()));
+ // TODO: Remove try/catch once retirement is stabilized
+ try {
+ setWantToRetire(changeRequest.getZoneId(), node, true);
+ } catch (Exception e) {
+ logger.warning("Failed to retire host " + node.hostname() + ": " + Exceptions.toMessageString(e));
+ // Check if retirement actually failed
+ if (!nodeRepository.getNode(changeRequest.getZoneId(), node.hostname().value()).getWantToRetire()) {
+ return hostAction;
+ }
+ }
+ }
+ return hostAction.withState(State.RETIRING);
+ }
+
+ if (hasRetired(node, hostAction)) {
+ logger.fine(() -> node.hostname() + " has retired");
+ return hostAction.withState(State.RETIRED);
+ }
+
+ if (pendingRetirement(node, hostAction)) {
+ logger.fine(() -> node.hostname() + " is pending retirement");
+ return hostAction.withState(State.PENDING_RETIREMENT);
+ }
+
+ return hostAction;
+ }
+
+ // Dirty host iff the parked host was retired by this maintainer
+ private void recycleNode(ZoneId zoneId, Node node, HostAction hostAction) {
+ if (hostAction.getState() == State.RETIRED &&
+ node.state() == Node.State.parked) {
+ logger.info("Setting " + node.hostname() + " to dirty");
+ nodeRepository.setState(zoneId, NodeState.dirty, node.hostname().value());
+ }
+ if (hostAction.getState() == State.RETIRING && node.wantToRetire()) {
+ try {
+ setWantToRetire(zoneId, node, false);
+ } catch (Exception ignored) {}
+ }
+ }
+
+ private boolean isPostponed(VespaChangeRequest changeRequest, HostAction action) {
+ return List.of(State.RETIRED, State.RETIRING).contains(action.getState()) &&
+ changeRequest.getChangeRequestSource().getPlannedStartTime()
+ .minus(ALLOWED_POSTPONEMENT_TIME)
+ .isAfter(ZonedDateTime.now());
+ }
+
+ private boolean shouldRetire(VespaChangeRequest changeRequest, HostAction action) {
+ return action.getState() == State.PENDING_RETIREMENT &&
+ changeRequest.getChangeRequestSource().getPlannedStartTime()
+ .minus(ALLOWED_RETIREMENT_TIME)
+ .isBefore(ZonedDateTime.now());
+ }
+
+ private boolean hasRetired(Node node, HostAction hostAction) {
+ return hostAction.getState() == State.RETIRING &&
+ node.state() == Node.State.parked;
+ }
+
+ /**
+ * TODO: For now, we choose to retire any active host
+ */
+ private boolean pendingRetirement(Node node, HostAction action) {
+ return action.getState() == State.NONE && node.state() == Node.State.active;
+ }
+
+ private Map<ZoneId, List<Node>> nodesByZone() {
+ return controller().zoneRegistry()
+ .zones()
+ .reachable()
+ .in(Environment.prod)
+ .ids()
+ .stream()
+ .collect(Collectors.toMap(
+ zone -> zone,
+ zone -> nodeRepository.list(zone, false)
+ ));
+ }
+
+ private Predicate<Node> isImpacted(VespaChangeRequest changeRequest) {
+ return node -> changeRequest.getImpactedHosts().contains(node.hostname().value()) ||
+ node.switchHostname()
+ .map(switchHostname -> changeRequest.getImpactedSwitches().contains(switchHostname))
+ .orElse(false);
+ }
+ private Predicate<VespaChangeRequest> shouldUpdate() {
+ return changeRequest -> changeRequest.getStatus() != Status.COMPLETED &&
+ List.of(Impact.HIGH, Impact.VERY_HIGH)
+ .contains(changeRequest.getImpact());
+ }
+
+ private boolean hasSpareCapacity(ZoneId zoneId, List<Node> nodes) {
+ var tenantHosts = nodes.stream()
+ .filter(node -> node.type() == NodeType.host)
+ .map(Node::hostname)
+ .collect(Collectors.toList());
+
+ return tenantHosts.isEmpty() ||
+ nodeRepository.isReplaceable(zoneId, tenantHosts);
+ }
+
+ private void setWantToRetire(ZoneId zoneId, Node node, boolean wantToRetire) {
+ var newNode = new NodeRepositoryNode();
+ newNode.setWantToRetire(wantToRetire);
+ nodeRepository.patchNode(zoneId, node.hostname().value(), newNode);
+ }
+}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/metric/CostCalculator.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/metric/CostCalculator.java
index 5d92166d758..b6468464a0b 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/metric/CostCalculator.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/metric/CostCalculator.java
@@ -48,7 +48,7 @@ public class CostCalculator {
Map<Property, ResourceAllocation> allocationByProperty = new HashMap<>();
var nodes = controller.zoneRegistry().zones()
.reachable().in(Environment.prod).ofCloud(cloudName).zones().stream()
- .flatMap(zone -> uncheck(() -> nodeRepository.list(zone.getId()).stream()))
+ .flatMap(zone -> uncheck(() -> nodeRepository.list(zone.getId(), false).stream()))
.filter(node -> node.owner().isPresent() && !node.owner().get().tenant().equals(SystemApplication.TENANT))
.collect(Collectors.toList());
var totalAllocation = ResourceAllocation.ZERO;
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/Notification.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/Notification.java
new file mode 100644
index 00000000000..299ef3ef50d
--- /dev/null
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/Notification.java
@@ -0,0 +1,74 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.notification;
+
+import java.time.Instant;
+import java.util.List;
+import java.util.Objects;
+
+/**
+ * @author freva
+ */
+public class Notification {
+ private final Instant at;
+ private final Type type;
+ private final NotificationSource source;
+ private final List<String> messages;
+
+ public Notification(Instant at, Type type, NotificationSource source, List<String> messages) {
+ this.at = Objects.requireNonNull(at, "at cannot be null");
+ this.type = Objects.requireNonNull(type, "type cannot be null");
+ this.source = Objects.requireNonNull(source, "source cannot be null");
+ this.messages = List.copyOf(Objects.requireNonNull(messages, "messages cannot be null"));
+ if (messages.size() < 1) throw new IllegalArgumentException("messages cannot be empty");
+ }
+
+ public Instant at() { return at; }
+ public Type type() { return type; }
+ public NotificationSource source() { return source; }
+ public List<String> messages() { return messages; }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ Notification that = (Notification) o;
+ return at.equals(that.at) && type == that.type && source.equals(that.source) && messages.equals(that.messages);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(at, type, source, messages);
+ }
+
+ @Override
+ public String toString() {
+ return "Notification{" +
+ "at=" + at +
+ ", type=" + type +
+ ", source=" + source +
+ ", messages=" + messages +
+ '}';
+ }
+
+ public enum Level {
+ warning, error;
+ }
+
+ public enum Type {
+ /** Warnings about usage of deprecated features in application package */
+ APPLICATION_PACKAGE_WARNING(Level.warning),
+
+ /** Failure to deploy application package */
+ DEPLOYMENT_FAILURE(Level.error);
+
+ private final Level level;
+ Type(Level level) {
+ this.level = level;
+ }
+
+ public Level level() {
+ return level;
+ }
+ }
+
+}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/NotificationSource.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/NotificationSource.java
new file mode 100644
index 00000000000..827b5a71eb1
--- /dev/null
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/NotificationSource.java
@@ -0,0 +1,152 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.notification;
+
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.ApplicationName;
+import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.config.provision.Environment;
+import com.yahoo.config.provision.InstanceName;
+import com.yahoo.config.provision.TenantName;
+import com.yahoo.config.provision.zone.ZoneId;
+import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
+import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
+import com.yahoo.vespa.hosted.controller.api.integration.deployment.RunId;
+import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
+
+import java.util.Objects;
+import java.util.Optional;
+import java.util.OptionalLong;
+
+/**
+ * Denotes the source of the notification.
+ *
+ * @author freva
+ */
+public class NotificationSource {
+ private final TenantName tenant;
+ private final Optional<ApplicationName> application;
+ private final Optional<InstanceName> instance;
+ private final Optional<ZoneId> zoneId;
+ private final Optional<ClusterSpec.Id> clusterId;
+ private final Optional<JobType> jobType;
+ private final OptionalLong runNumber;
+
+ public NotificationSource(TenantName tenant, Optional<ApplicationName> application, Optional<InstanceName> instance,
+ Optional<ZoneId> zoneId, Optional<ClusterSpec.Id> clusterId, Optional<JobType> jobType, OptionalLong runNumber) {
+ this.tenant = Objects.requireNonNull(tenant, "tenant cannot be null");
+ this.application = Objects.requireNonNull(application, "application cannot be null");
+ this.instance = Objects.requireNonNull(instance, "instance cannot be null");
+ this.zoneId = Objects.requireNonNull(zoneId, "zoneId cannot be null");
+ this.clusterId = Objects.requireNonNull(clusterId, "clusterId cannot be null");
+ this.jobType = Objects.requireNonNull(jobType, "jobType cannot be null");
+ this.runNumber = Objects.requireNonNull(runNumber, "runNumber cannot be null");
+
+ if (instance.isPresent() && application.isEmpty())
+ throw new IllegalArgumentException("Application name must be present with instance name");
+ if (zoneId.isPresent() && instance.isEmpty())
+ throw new IllegalArgumentException("Instance name must be present with zone ID");
+ if (clusterId.isPresent() && zoneId.isEmpty())
+ throw new IllegalArgumentException("Zone ID must be present with cluster ID");
+ if (clusterId.isPresent() && jobType.isPresent())
+ throw new IllegalArgumentException("Cannot set both cluster ID and job type");
+ if (jobType.isPresent() && instance.isEmpty())
+ throw new IllegalArgumentException("Instance name must be present with job type");
+ if (jobType.isPresent() != runNumber.isPresent())
+ throw new IllegalArgumentException(String.format("Run number (%s) must be 1-to-1 with job type (%s)",
+ runNumber.isPresent() ? "present" : "missing", jobType.map(i -> "present").orElse("missing")));
+ }
+
+
+ public TenantName tenant() { return tenant; }
+ public Optional<ApplicationName> application() { return application; }
+ public Optional<InstanceName> instance() { return instance; }
+ public Optional<ZoneId> zoneId() { return zoneId; }
+ public Optional<ClusterSpec.Id> clusterId() { return clusterId; }
+ public Optional<JobType> jobType() { return jobType; }
+ public OptionalLong runNumber() { return runNumber; }
+
+ /**
+ * Returns true iff this source contains the given source. A source contains the other source if
+ * all the set fields in this source are equal to the given source, while the fields not set
+ * in this source are ignored.
+ */
+ public boolean contains(NotificationSource other) {
+ return tenant.equals(other.tenant) &&
+ (application.isEmpty() || application.equals(other.application)) &&
+ (instance.isEmpty() || instance.equals(other.instance)) &&
+ (zoneId.isEmpty() || zoneId.equals(other.zoneId)) &&
+ (clusterId.isEmpty() || clusterId.equals(other.clusterId)) &&
+ (jobType.isEmpty() || jobType.equals(other.jobType)); // Do not consider run number (it's unique!)
+ }
+
+ /**
+ * Returns whether this source from a production deployment or deployment related to prod deployment (e.g. to
+ * staging zone), or if this is at tenant or application level
+ */
+ public boolean isProduction() {
+ if (instance.isEmpty()) return true;
+ return ! zoneId.map(ZoneId::environment)
+ .or(() -> jobType.map(JobType::environment))
+ .map(Environment::isManuallyDeployed)
+ .orElse(true); // Assume that notification with full application ID concern dev deployments
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ NotificationSource that = (NotificationSource) o;
+ return tenant.equals(that.tenant) && application.equals(that.application) && instance.equals(that.instance) &&
+ zoneId.equals(that.zoneId) && clusterId.equals(that.clusterId) && jobType.equals(that.jobType); // Do not consider run number (it's unique!)
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(tenant, application, instance, zoneId, clusterId, jobType, runNumber);
+ }
+
+ @Override
+ public String toString() {
+ return "NotificationSource{" +
+ "tenant=" + tenant +
+ application.map(application -> ", application=" + application.value()).orElse("") +
+ instance.map(instance -> ", instance=" + instance.value()).orElse("") +
+ zoneId.map(zoneId -> ", zone=" + zoneId.value()).orElse("") +
+ clusterId.map(clusterId -> ", clusterId=" + clusterId.value()).orElse("") +
+ jobType.map(jobType -> ", job=" + jobType.jobName() + "#" + runNumber.getAsLong()).orElse("") +
+ '}';
+ }
+
+ private static NotificationSource from(TenantName tenant, ApplicationName application, InstanceName instance, ZoneId zoneId,
+ ClusterSpec.Id clusterId, JobType jobType, Long runNumber) {
+ return new NotificationSource(tenant, Optional.ofNullable(application), Optional.ofNullable(instance), Optional.ofNullable(zoneId),
+ Optional.ofNullable(clusterId), Optional.ofNullable(jobType), runNumber == null ? OptionalLong.empty() : OptionalLong.of(runNumber));
+ }
+
+ public static NotificationSource from(TenantName tenantName) {
+ return from(tenantName, null, null, null, null, null, null);
+ }
+
+ public static NotificationSource from(TenantAndApplicationId id) {
+ return from(id.tenant(), id.application(), null, null, null, null, null);
+ }
+
+ public static NotificationSource from(ApplicationId app) {
+ return from(app.tenant(), app.application(), app.instance(), null, null, null, null);
+ }
+
+ public static NotificationSource from(DeploymentId deploymentId) {
+ ApplicationId app = deploymentId.applicationId();
+ return from(app.tenant(), app.application(), app.instance(), deploymentId.zoneId(), null, null, null);
+ }
+
+ public static NotificationSource from(DeploymentId deploymentId, ClusterSpec.Id clusterId) {
+ ApplicationId app = deploymentId.applicationId();
+ return from(app.tenant(), app.application(), app.instance(), deploymentId.zoneId(), clusterId, null, null);
+ }
+
+ public static NotificationSource from(RunId runId) {
+ ApplicationId app = runId.application();
+ return from(app.tenant(), app.application(), app.instance(), null, null, runId.job().type(), runId.number());
+ }
+}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDb.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDb.java
new file mode 100644
index 00000000000..950dddfc056
--- /dev/null
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDb.java
@@ -0,0 +1,84 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.notification;
+
+import com.yahoo.vespa.curator.Lock;
+import com.yahoo.vespa.hosted.controller.Controller;
+import com.yahoo.vespa.hosted.controller.persistence.CuratorDb;
+
+import java.time.Clock;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.stream.Collectors;
+
+/**
+ * Adds, updates and removes tenant notifications in ZK
+ *
+ * @author freva
+ */
+public class NotificationsDb {
+
+ private final Clock clock;
+ private final CuratorDb curatorDb;
+
+ public NotificationsDb(Controller controller) {
+ this(controller.clock(), controller.curator());
+ }
+
+ NotificationsDb(Clock clock, CuratorDb curatorDb) {
+ this.clock = clock;
+ this.curatorDb = curatorDb;
+ }
+
+ public List<Notification> listNotifications(NotificationSource source, boolean productionOnly) {
+ return curatorDb.readNotifications(source.tenant()).stream()
+ .filter(notification -> source.contains(notification.source()) && (!productionOnly || notification.source().isProduction()))
+ .collect(Collectors.toUnmodifiableList());
+ }
+
+ public void setNotification(NotificationSource source, Notification.Type type, String message) {
+ setNotification(source, type, List.of(message));
+ }
+
+ /**
+ * Add a notification with given source and type. If a notification with same source and type
+ * already exists, it'll be replaced by this one instead
+ */
+ public void setNotification(NotificationSource source, Notification.Type type, List<String> messages) {
+ try (Lock lock = curatorDb.lockNotifications(source.tenant())) {
+ List<Notification> notifications = curatorDb.readNotifications(source.tenant()).stream()
+ .filter(notification -> !source.equals(notification.source()) || type != notification.type())
+ .collect(Collectors.toCollection(ArrayList::new));
+ notifications.add(new Notification(clock.instant(), type, source, messages));
+ curatorDb.writeNotifications(source.tenant(), notifications);
+ }
+ }
+
+ /** Remove the notification with the given source and type */
+ public void removeNotification(NotificationSource source, Notification.Type type) {
+ try (Lock lock = curatorDb.lockNotifications(source.tenant())) {
+ List<Notification> initial = curatorDb.readNotifications(source.tenant());
+ List<Notification> filtered = initial.stream()
+ .filter(notification -> !source.equals(notification.source()) || type != notification.type())
+ .collect(Collectors.toUnmodifiableList());
+ if (initial.size() > filtered.size())
+ curatorDb.writeNotifications(source.tenant(), filtered);
+ }
+ }
+
+ /** Remove all notifications for this source or sources contained by this source */
+ public void removeNotifications(NotificationSource source) {
+ try (Lock lock = curatorDb.lockNotifications(source.tenant())) {
+ if (source.application().isEmpty()) { // Source is tenant
+ curatorDb.deleteNotifications(source.tenant());
+ return;
+ }
+
+ List<Notification> initial = curatorDb.readNotifications(source.tenant());
+ List<Notification> filtered = initial.stream()
+ .filter(notification -> !source.contains(notification.source()))
+ .collect(Collectors.toUnmodifiableList());
+ if (initial.size() > filtered.size())
+ curatorDb.writeNotifications(source.tenant(), filtered);
+ }
+ }
+}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ChangeRequestSerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ChangeRequestSerializer.java
new file mode 100644
index 00000000000..407eb5ad5ab
--- /dev/null
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ChangeRequestSerializer.java
@@ -0,0 +1,150 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.persistence;
+
+import com.yahoo.config.provision.zone.ZoneId;
+import com.yahoo.slime.ArrayTraverser;
+import com.yahoo.slime.Cursor;
+import com.yahoo.slime.Inspector;
+import com.yahoo.slime.Slime;
+import com.yahoo.vespa.hosted.controller.api.integration.vcmr.ChangeRequest;
+import com.yahoo.vespa.hosted.controller.api.integration.vcmr.ChangeRequestSource;
+import com.yahoo.vespa.hosted.controller.api.integration.vcmr.HostAction;
+import com.yahoo.vespa.hosted.controller.api.integration.vcmr.VespaChangeRequest;
+
+import java.time.Instant;
+import java.time.ZonedDateTime;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * @author olaa
+ */
+public class ChangeRequestSerializer {
+
+ // WARNING: Since there are multiple servers in a ZooKeeper cluster and they upgrade one by one
+ // (and rewrite all nodes on startup), changes to the serialized format must be made
+ // such that what is serialized on version N+1 can be read by version N:
+ // - ADDING FIELDS: Always ok
+ // - REMOVING FIELDS: Stop reading the field first. Stop writing it on a later version.
+ // - CHANGING THE FORMAT OF A FIELD: Don't do it bro.
+
+ private static final String ID_FIELD = "id";
+ private static final String SOURCE_FIELD = "source";
+ private static final String SOURCE_SYSTEM_FIELD = "system";
+ private static final String STATUS_FIELD = "status";
+ private static final String URL_FIELD = "url";
+ private static final String ZONE_FIELD = "zoneId";
+ private static final String START_TIME_FIELD = "plannedStartTime";
+ private static final String END_TIME_FIELD = "plannedEndTime";
+ private static final String APPROVAL_FIELD = "approval";
+ private static final String IMPACT_FIELD = "impact";
+ private static final String IMPACTED_HOSTS_FIELD = "impactedHosts";
+ private static final String IMPACTED_SWITCHES_FIELD = "impactedSwitches";
+ private static final String ACTION_PLAN_FIELD = "actionPlan";
+ private static final String HOST_FIELD = "hostname";
+ private static final String ACTION_STATE_FIELD = "state";
+ private static final String LAST_UPDATED_FIELD = "lastUpdated";
+ private static final String HOSTS_FIELD = "hosts";
+
+
+ public static VespaChangeRequest fromSlime(Slime slime) {
+ var inspector = slime.get();
+ var id = inspector.field(ID_FIELD).asString();
+ var zoneId = ZoneId.from(inspector.field(ZONE_FIELD).asString());
+ var changeRequestSource = readChangeRequestSource(inspector.field(SOURCE_FIELD));
+ var actionPlan = readHostActionPlan(inspector.field(ACTION_PLAN_FIELD));
+ var status = VespaChangeRequest.Status.valueOf(inspector.field(STATUS_FIELD).asString());
+ var impact = ChangeRequest.Impact.valueOf(inspector.field(IMPACT_FIELD).asString());
+ var approval = ChangeRequest.Approval.valueOf(inspector.field(APPROVAL_FIELD).asString());
+
+ var impactedHosts = new ArrayList<String>();
+ inspector.field(IMPACTED_HOSTS_FIELD)
+ .traverse((ArrayTraverser) (i, hostname) -> impactedHosts.add(hostname.asString()));
+ var impactedSwitches = new ArrayList<String>();
+ inspector.field(IMPACTED_SWITCHES_FIELD)
+ .traverse((ArrayTraverser) (i, switchName) -> impactedSwitches.add(switchName.asString()));
+
+ return new VespaChangeRequest(
+ id,
+ changeRequestSource,
+ impactedSwitches,
+ impactedHosts,
+ approval,
+ impact,
+ status,
+ actionPlan,
+ zoneId);
+ }
+
+ public static Slime toSlime(VespaChangeRequest changeRequest) {
+ var slime = new Slime();
+ writeChangeRequest(slime.setObject(), changeRequest);
+ return slime;
+ }
+
+ public static void writeChangeRequest(Cursor cursor, VespaChangeRequest changeRequest) {
+ cursor.setString(ID_FIELD, changeRequest.getId());
+ cursor.setString(STATUS_FIELD, changeRequest.getStatus().name());
+ cursor.setString(IMPACT_FIELD, changeRequest.getImpact().name());
+ cursor.setString(APPROVAL_FIELD, changeRequest.getApproval().name());
+ cursor.setString(ZONE_FIELD, changeRequest.getZoneId().value());
+ writeChangeRequestSource(cursor.setObject(SOURCE_FIELD), changeRequest.getChangeRequestSource());
+ writeActionPlan(cursor.setObject(ACTION_PLAN_FIELD), changeRequest);
+
+ var impactedHosts = cursor.setArray(IMPACTED_HOSTS_FIELD);
+ changeRequest.getImpactedHosts().forEach(impactedHosts::addString);
+ var impactedSwitches = cursor.setArray(IMPACTED_SWITCHES_FIELD);
+ changeRequest.getImpactedSwitches().forEach(impactedSwitches::addString);
+ }
+
+ private static void writeActionPlan(Cursor cursor, VespaChangeRequest changeRequest) {
+ var hostsCursor = cursor.setArray(HOSTS_FIELD);
+
+ changeRequest.getHostActionPlan().forEach(action -> {
+ var actionCursor = hostsCursor.addObject();
+ actionCursor.setString(HOST_FIELD, action.getHostname());
+ actionCursor.setString(ACTION_STATE_FIELD, action.getState().name());
+ actionCursor.setString(LAST_UPDATED_FIELD, action.getLastUpdated().toString());
+ });
+
+ // TODO: Add action plan per application
+ }
+
+ private static void writeChangeRequestSource(Cursor cursor, ChangeRequestSource source) {
+ cursor.setString(SOURCE_SYSTEM_FIELD, source.getSystem());
+ cursor.setString(ID_FIELD, source.getId());
+ cursor.setString(URL_FIELD, source.getUrl());
+ cursor.setString(START_TIME_FIELD, source.getPlannedStartTime().toString());
+ cursor.setString(END_TIME_FIELD, source.getPlannedEndTime().toString());
+ cursor.setString(STATUS_FIELD, source.getStatus().name());
+ }
+
+ private static ChangeRequestSource readChangeRequestSource(Inspector inspector) {
+ return new ChangeRequestSource(
+ inspector.field(SOURCE_SYSTEM_FIELD).asString(),
+ inspector.field(ID_FIELD).asString(),
+ inspector.field(URL_FIELD).asString(),
+ ChangeRequestSource.Status.valueOf(inspector.field(STATUS_FIELD).asString()),
+ ZonedDateTime.parse(inspector.field(START_TIME_FIELD).asString()),
+ ZonedDateTime.parse(inspector.field(END_TIME_FIELD).asString())
+ );
+ }
+
+ private static List<HostAction> readHostActionPlan(Inspector inspector) {
+ if (!inspector.valid())
+ return List.of();
+
+ var actionPlan = new ArrayList<HostAction>();
+ inspector.field(HOSTS_FIELD).traverse((ArrayTraverser) (index, hostObject) ->
+ actionPlan.add(
+ new HostAction(
+ hostObject.field(HOST_FIELD).asString(),
+ HostAction.State.valueOf(hostObject.field(ACTION_STATE_FIELD).asString()),
+ Instant.parse(hostObject.field(LAST_UPDATED_FIELD).asString())
+ )
+ )
+ );
+ return actionPlan;
+ }
+
+}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/CuratorDb.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/CuratorDb.java
index 34741bcaedf..3d6cb45aeb1 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/CuratorDb.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/CuratorDb.java
@@ -24,6 +24,8 @@ import com.yahoo.vespa.hosted.controller.auditlog.AuditLog;
import com.yahoo.vespa.hosted.controller.deployment.Run;
import com.yahoo.vespa.hosted.controller.deployment.Step;
import com.yahoo.vespa.hosted.controller.dns.NameServiceQueue;
+import com.yahoo.vespa.hosted.controller.api.integration.vcmr.VespaChangeRequest;
+import com.yahoo.vespa.hosted.controller.notification.Notification;
import com.yahoo.vespa.hosted.controller.routing.GlobalRouting;
import com.yahoo.vespa.hosted.controller.routing.RoutingPolicy;
import com.yahoo.vespa.hosted.controller.routing.RoutingPolicyId;
@@ -39,7 +41,6 @@ import java.io.IOException;
import java.io.UncheckedIOException;
import java.nio.ByteBuffer;
import java.time.Duration;
-import java.time.Instant;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
@@ -54,7 +55,6 @@ import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeoutException;
import java.util.function.Function;
import java.util.function.Predicate;
-import java.util.function.Supplier;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.stream.Collectors;
@@ -89,6 +89,8 @@ public class CuratorDb {
private static final Path zoneRoutingPoliciesRoot = root.append("zoneRoutingPolicies");
private static final Path endpointCertificateRoot = root.append("applicationCertificates");
private static final Path archiveBucketsRoot = root.append("archiveBuckets");
+ private static final Path changeRequestsRoot = root.append("changeRequests");
+ private static final Path notificationsRoot = root.append("notifications");
private final NodeVersionSerializer nodeVersionSerializer = new NodeVersionSerializer();
private final VersionStatusSerializer versionStatusSerializer = new VersionStatusSerializer(nodeVersionSerializer);
@@ -204,6 +206,14 @@ public class CuratorDb {
return curator.lock(lockRoot.append("archiveBuckets").append(zoneId.value()), defaultLockTimeout);
}
+ public Lock lockChangeRequests() {
+ return curator.lock(lockRoot.append("changeRequests"), defaultLockTimeout);
+ }
+
+ public Lock lockNotifications(TenantName tenantName) {
+ return curator.lock(lockRoot.append("notifications").append(tenantName.value()), defaultLockTimeout);
+ }
+
// -------------- Helpers ------------------------------------------
/** Try locking with a low timeout, meaning it is OK to fail lock acquisition.
@@ -563,6 +573,43 @@ public class CuratorDb {
curator.set(archiveBucketsPath(zoneid), asJson(ArchiveBucketsSerializer.toSlime(archiveBuckets)));
}
+ // -------------- VCMRs ---------------------------------------------------
+
+ public Optional<VespaChangeRequest> readChangeRequest(String changeRequestId) {
+ return readSlime(changeRequestPath(changeRequestId)).map(ChangeRequestSerializer::fromSlime);
+ }
+
+ public List<VespaChangeRequest> readChangeRequests() {
+ return curator.getChildren(changeRequestsRoot)
+ .stream()
+ .map(this::readChangeRequest)
+ .flatMap(Optional::stream)
+ .collect(Collectors.toList());
+ }
+
+ public void writeChangeRequest(VespaChangeRequest changeRequest) {
+ curator.set(changeRequestPath(changeRequest.getId()), asJson(ChangeRequestSerializer.toSlime(changeRequest)));
+ }
+
+ public void deleteChangeRequest(VespaChangeRequest changeRequest) {
+ curator.delete(changeRequestPath(changeRequest.getId()));
+ }
+
+ // -------------- Notifications ---------------------------------------------------
+
+ public List<Notification> readNotifications(TenantName tenantName) {
+ return readSlime(notificationsPath(tenantName))
+ .map(slime -> NotificationsSerializer.fromSlime(tenantName, slime)).orElseGet(List::of);
+ }
+
+ public void writeNotifications(TenantName tenantName, List<Notification> notifications) {
+ curator.set(notificationsPath(tenantName), asJson(NotificationsSerializer.toSlime(notifications)));
+ }
+
+ public void deleteNotifications(TenantName tenantName) {
+ curator.delete(notificationsPath(tenantName));
+ }
+
// -------------- Paths ---------------------------------------------------
private Path lockPath(TenantName tenant) {
@@ -688,4 +735,12 @@ public class CuratorDb {
return archiveBucketsRoot.append(zoneId.value());
}
+ private static Path changeRequestPath(String id) {
+ return changeRequestsRoot.append(id);
+ }
+
+ private static Path notificationsPath(TenantName tenantName) {
+ return notificationsRoot.append(tenantName.value());
+ }
+
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/NotificationsSerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/NotificationsSerializer.java
new file mode 100644
index 00000000000..dcb485b9016
--- /dev/null
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/NotificationsSerializer.java
@@ -0,0 +1,104 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.persistence;
+
+import com.yahoo.config.provision.ApplicationName;
+import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.config.provision.InstanceName;
+import com.yahoo.config.provision.TenantName;
+import com.yahoo.config.provision.zone.ZoneId;
+import com.yahoo.slime.Cursor;
+import com.yahoo.slime.Inspector;
+import com.yahoo.slime.Slime;
+import com.yahoo.slime.SlimeUtils;
+import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
+import com.yahoo.vespa.hosted.controller.notification.Notification;
+import com.yahoo.vespa.hosted.controller.notification.NotificationSource;
+
+import java.util.List;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+
+/**
+ * (de)serializes notifications for a tenant
+ *
+ * @author freva
+ */
+public class NotificationsSerializer {
+
+ // WARNING: Since there are multiple servers in a ZooKeeper cluster and they upgrade one by one
+ // (and rewrite all nodes on startup), changes to the serialized format must be made
+ // such that what is serialized on version N+1 can be read by version N:
+ // - ADDING FIELDS: Always ok
+ // - REMOVING FIELDS: Stop reading the field first. Stop writing it on a later version.
+ // - CHANGING THE FORMAT OF A FIELD: Don't do it bro.
+
+ private static final String notificationsFieldName = "notifications";
+ private static final String atFieldName = "at";
+ private static final String typeField = "type";
+ private static final String messagesField = "messages";
+ private static final String applicationField = "application";
+ private static final String instanceField = "instance";
+ private static final String zoneField = "zone";
+ private static final String clusterIdField = "clusterId";
+ private static final String jobTypeField = "jobId";
+ private static final String runNumberField = "runNumber";
+
+ public static Slime toSlime(List<Notification> notifications) {
+ Slime slime = new Slime();
+ Cursor notificationsArray = slime.setObject().setArray(notificationsFieldName);
+
+ for (Notification notification : notifications) {
+ Cursor notificationObject = notificationsArray.addObject();
+ notificationObject.setLong(atFieldName, notification.at().toEpochMilli());
+ notificationObject.setString(typeField, asString(notification.type()));
+ Cursor messagesArray = notificationObject.setArray(messagesField);
+ notification.messages().forEach(messagesArray::addString);
+
+ notification.source().application().ifPresent(application -> notificationObject.setString(applicationField, application.value()));
+ notification.source().instance().ifPresent(instance -> notificationObject.setString(instanceField, instance.value()));
+ notification.source().zoneId().ifPresent(zoneId -> notificationObject.setString(zoneField, zoneId.value()));
+ notification.source().clusterId().ifPresent(clusterId -> notificationObject.setString(clusterIdField, clusterId.value()));
+ notification.source().jobType().ifPresent(jobType -> notificationObject.setString(jobTypeField, jobType.jobName()));
+ notification.source().runNumber().ifPresent(runNumber -> notificationObject.setLong(runNumberField, runNumber));
+ }
+
+ return slime;
+ }
+
+ public static List<Notification> fromSlime(TenantName tenantName, Slime slime) {
+ return SlimeUtils.entriesStream(slime.get().field(notificationsFieldName))
+ .map(inspector -> fromInspector(tenantName, inspector))
+ .collect(Collectors.toUnmodifiableList());
+ }
+
+ private static Notification fromInspector(TenantName tenantName, Inspector inspector) {
+ return new Notification(
+ Serializers.instant(inspector.field(atFieldName)),
+ typeFrom(inspector.field(typeField)),
+ new NotificationSource(
+ tenantName,
+ Serializers.optionalString(inspector.field(applicationField)).map(ApplicationName::from),
+ Serializers.optionalString(inspector.field(instanceField)).map(InstanceName::from),
+ Serializers.optionalString(inspector.field(zoneField)).map(ZoneId::from),
+ Serializers.optionalString(inspector.field(clusterIdField)).map(ClusterSpec.Id::from),
+ Serializers.optionalString(inspector.field(jobTypeField)).map(JobType::fromJobName),
+ Serializers.optionalLong(inspector.field(runNumberField))),
+ SlimeUtils.entriesStream(inspector.field(messagesField)).map(Inspector::asString).collect(Collectors.toUnmodifiableList()));
+ }
+
+ private static String asString(Notification.Type type) {
+ switch (type) {
+ case APPLICATION_PACKAGE_WARNING: return "APPLICATION_PACKAGE_WARNING";
+ case DEPLOYMENT_FAILURE: return "DEPLOYMENT_FAILURE";
+ default: throw new IllegalArgumentException("No serialization defined for notification type " + type);
+ }
+ }
+
+ private static Notification.Type typeFrom(Inspector field) {
+ switch (field.asString()) {
+ case "APPLICATION_PACKAGE_WARNING": return Notification.Type.APPLICATION_PACKAGE_WARNING;
+ case "DEPLOYMENT_FAILURE": return Notification.Type.DEPLOYMENT_FAILURE;
+ default: throw new IllegalArgumentException("Unknown serialized notification type value '" + field.asString() + "'");
+ }
+ }
+}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
index ffb5e040517..994dc877182 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
@@ -80,6 +80,7 @@ import com.yahoo.vespa.hosted.controller.application.EndpointList;
import com.yahoo.vespa.hosted.controller.application.QuotaUsage;
import com.yahoo.vespa.hosted.controller.application.SystemApplication;
import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
+import com.yahoo.vespa.hosted.controller.auditlog.AuditLoggingRequestHandler;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentStatus;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentSteps;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentTrigger;
@@ -87,6 +88,8 @@ import com.yahoo.vespa.hosted.controller.deployment.DeploymentTrigger.ChangesToC
import com.yahoo.vespa.hosted.controller.deployment.JobStatus;
import com.yahoo.vespa.hosted.controller.deployment.Run;
import com.yahoo.vespa.hosted.controller.deployment.TestConfigSerializer;
+import com.yahoo.vespa.hosted.controller.notification.Notification;
+import com.yahoo.vespa.hosted.controller.notification.NotificationSource;
import com.yahoo.vespa.hosted.controller.rotation.RotationId;
import com.yahoo.vespa.hosted.controller.rotation.RotationState;
import com.yahoo.vespa.hosted.controller.rotation.RotationStatus;
@@ -136,8 +139,6 @@ import java.util.stream.Stream;
import static com.yahoo.jdisc.Response.Status.BAD_REQUEST;
import static com.yahoo.jdisc.Response.Status.CONFLICT;
-import static com.yahoo.jdisc.Response.Status.INTERNAL_SERVER_ERROR;
-import static com.yahoo.jdisc.Response.Status.NOT_FOUND;
import static java.util.Map.Entry.comparingByKey;
import static java.util.stream.Collectors.joining;
import static java.util.stream.Collectors.toList;
@@ -151,7 +152,7 @@ import static java.util.stream.Collectors.toUnmodifiableList;
* @author mpolden
*/
@SuppressWarnings("unused") // created by injection
-public class ApplicationApiHandler extends LoggingRequestHandler {
+public class ApplicationApiHandler extends AuditLoggingRequestHandler {
private static final ObjectMapper jsonMapper = new ObjectMapper();
@@ -163,7 +164,7 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
public ApplicationApiHandler(LoggingRequestHandler.Context parentCtx,
Controller controller,
AccessControlRequests accessControlRequests) {
- super(parentCtx);
+ super(parentCtx, controller.auditLogger());
this.controller = controller;
this.accessControlRequests = accessControlRequests;
this.testConfigSerializer = new TestConfigSerializer(controller.system());
@@ -175,7 +176,7 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
}
@Override
- public HttpResponse handle(HttpRequest request) {
+ public HttpResponse auditAndHandle(HttpRequest request) {
try {
Path path = new Path(request.getUri());
switch (request.getMethod()) {
@@ -201,15 +202,15 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
}
catch (ConfigServerException e) {
- switch (e.getErrorCode()) {
+ switch (e.code()) {
case NOT_FOUND:
- return new ErrorResponse(NOT_FOUND, e.getErrorCode().name(), Exceptions.toMessageString(e));
+ return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
case ACTIVATION_CONFLICT:
- return new ErrorResponse(CONFLICT, e.getErrorCode().name(), Exceptions.toMessageString(e));
+ return new ErrorResponse(CONFLICT, e.code().name(), Exceptions.toMessageString(e));
case INTERNAL_SERVER_ERROR:
- return new ErrorResponse(INTERNAL_SERVER_ERROR, e.getErrorCode().name(), Exceptions.toMessageString(e));
+ return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
default:
- return new ErrorResponse(BAD_REQUEST, e.getErrorCode().name(), Exceptions.toMessageString(e));
+ return new ErrorResponse(BAD_REQUEST, e.code().name(), Exceptions.toMessageString(e));
}
}
catch (RuntimeException e) {
@@ -223,6 +224,7 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
if (path.matches("/application/v4/tenant")) return tenants(request);
if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/info")) return tenantInfo(path.get("tenant"), request);
+ if (path.matches("/application/v4/tenant/{tenant}/notifications")) return notifications(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}/validate")) return validateSecretStore(path.get("tenant"), path.get("name"), request);
if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), Optional.empty(), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return application(path.get("tenant"), path.get("application"), request);
@@ -480,6 +482,53 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
.withAddress(updateTenantInfoAddress(insp.field("address"), oldContact.address()));
}
+ private HttpResponse notifications(String tenantName, HttpRequest request) {
+ NotificationSource notificationSource = new NotificationSource(TenantName.from(tenantName),
+ Optional.ofNullable(request.getProperty("application")).map(ApplicationName::from),
+ Optional.ofNullable(request.getProperty("instance")).map(InstanceName::from),
+ Optional.empty(), Optional.empty(), Optional.empty(), OptionalLong.empty());
+
+ Slime slime = new Slime();
+ Cursor notificationsArray = slime.setObject().setArray("notifications");
+ controller.notificationsDb().listNotifications(notificationSource, showOnlyProductionInstances(request))
+ .forEach(notification -> toSlime(notificationsArray.addObject(), notification));
+ return new SlimeJsonResponse(slime);
+ }
+
+ private static void toSlime(Cursor cursor, Notification notification) {
+ cursor.setLong("at", notification.at().toEpochMilli());
+ cursor.setString("level", notificatioLevelAsString(notification.type().level()));
+ cursor.setString("type", notificationTypeAsString(notification.type()));
+ Cursor messagesArray = cursor.setArray("messages");
+ notification.messages().forEach(messagesArray::addString);
+
+ notification.source().application().ifPresent(application -> cursor.setString("application", application.value()));
+ notification.source().instance().ifPresent(instance -> cursor.setString("instance", instance.value()));
+ notification.source().zoneId().ifPresent(zoneId -> {
+ cursor.setString("environment", zoneId.environment().value());
+ cursor.setString("region", zoneId.region().value());
+ });
+ notification.source().clusterId().ifPresent(clusterId -> cursor.setString("clusterId", clusterId.value()));
+ notification.source().jobType().ifPresent(jobType -> cursor.setString("jobName", jobType.jobName()));
+ notification.source().runNumber().ifPresent(runNumber -> cursor.setLong("runNumber", runNumber));
+ }
+
+ private static String notificationTypeAsString(Notification.Type type) {
+ switch (type) {
+ case APPLICATION_PACKAGE_WARNING: return "APPLICATION_PACKAGE_WARNING";
+ case DEPLOYMENT_FAILURE: return "DEPLOYMENT_FAILURE";
+ default: throw new IllegalArgumentException("No serialization defined for notification type " + type);
+ }
+ }
+
+ private static String notificatioLevelAsString(Notification.Level level) {
+ switch (level) {
+ case warning: return "warning";
+ case error: return "error";
+ default: throw new IllegalArgumentException("No serialization defined for notification level " + level);
+ }
+ }
+
private HttpResponse applications(String tenantName, Optional<String> applicationName, HttpRequest request) {
TenantName tenant = TenantName.from(tenantName);
if (controller.tenants().get(tenantName).isEmpty())
@@ -685,10 +734,10 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
var tenantSecretStore = new TenantSecretStore(name, awsId, role);
if (!tenantSecretStore.isValid()) {
- return ErrorResponse.badRequest(String.format("Secret store " + tenantSecretStore + " is invalid"));
+ return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is invalid");
}
if (tenant.tenantSecretStores().contains(tenantSecretStore)) {
- return ErrorResponse.badRequest(String.format("Secret store " + tenantSecretStore + " is already configured"));
+ return ErrorResponse.badRequest("Secret store " + tenantSecretStore + " is already configured");
}
controller.serviceRegistry().roleService().createTenantPolicy(TenantName.from(tenantName), name, awsId, role);
@@ -1631,7 +1680,6 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
/** Trigger deployment of the given Vespa version if a valid one is given, e.g., "7.8.9". */
private HttpResponse deployPlatform(String tenantName, String applicationName, String instanceName, boolean pin, HttpRequest request) {
- request = controller.auditLogger().log(request);
String versionString = readToString(request.getData());
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
@@ -1660,7 +1708,6 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
/** Trigger deployment to the last known application package for the given application. */
private HttpResponse deployApplication(String tenantName, String applicationName, String instanceName, HttpRequest request) {
- controller.auditLogger().log(request);
ApplicationId id = ApplicationId.from(tenantName, applicationName, instanceName);
StringBuilder response = new StringBuilder();
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
@@ -2050,6 +2097,7 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
toSlime(scalingEvent.from(), scalingEventObject.setObject("from"));
toSlime(scalingEvent.to(), scalingEventObject.setObject("to"));
scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli());
+ scalingEvent.completion().ifPresent(completion -> scalingEventObject.setLong("completion", completion.toEpochMilli()));
}
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/ChangeManagementApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/ChangeManagementApiHandler.java
index 2077278ee0c..5973cc3fcf3 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/ChangeManagementApiHandler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/ChangeManagementApiHandler.java
@@ -20,6 +20,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.configserver.Node;
import com.yahoo.vespa.hosted.controller.api.integration.vcmr.ChangeRequest;
import com.yahoo.vespa.hosted.controller.auditlog.AuditLoggingRequestHandler;
import com.yahoo.vespa.hosted.controller.maintenance.ChangeManagementAssessor;
+import com.yahoo.vespa.hosted.controller.persistence.ChangeRequestSerializer;
import com.yahoo.yolean.Exceptions;
import javax.ws.rs.BadRequestException;
@@ -63,6 +64,7 @@ public class ChangeManagementApiHandler extends AuditLoggingRequestHandler {
private HttpResponse get(HttpRequest request) {
Path path = new Path(request.getUri());
if (path.matches("/changemanagement/v1/assessment/{changeRequestId}")) return changeRequestAssessment(path.get("changeRequestId"));
+ if (path.matches("/changemanagement/v1/vcmr")) return getVCMRs();
return ErrorResponse.notFoundError("Nothing at " + path);
}
@@ -87,8 +89,7 @@ public class ChangeManagementApiHandler extends AuditLoggingRequestHandler {
}
private HttpResponse changeRequestAssessment(String changeRequestId) {
- var optionalChangeRequest = controller.serviceRegistry().changeRequestClient()
- .getUpcomingChangeRequests()
+ var optionalChangeRequest = controller.curator().readChangeRequests()
.stream()
.filter(request -> changeRequestId.equals(request.getChangeRequestSource().getId()))
.findFirst();
@@ -171,6 +172,17 @@ public class ChangeManagementApiHandler extends AuditLoggingRequestHandler {
return new SlimeJsonResponse(slime);
}
+ private HttpResponse getVCMRs() {
+ var changeRequests = controller.curator().readChangeRequests();
+ var slime = new Slime();
+ var cursor = slime.setObject().setArray("vcmrs");
+ changeRequests.forEach(changeRequest -> {
+ var changeCursor = cursor.addObject();
+ ChangeRequestSerializer.writeChangeRequest(changeCursor, changeRequest);
+ });
+ return new SlimeJsonResponse(slime);
+ }
+
private Optional<ZoneId> affectedZone(List<String> hosts) {
var affectedHosts = hosts.stream()
.map(HostName::from)
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicies.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicies.java
index 0356e11ae36..898b2531460 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicies.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicies.java
@@ -188,9 +188,10 @@ public class RoutingPolicies {
private void storePoliciesOf(LoadBalancerAllocation allocation, @SuppressWarnings("unused") Lock lock) {
var policies = new LinkedHashMap<>(get(allocation.deployment.applicationId()));
for (LoadBalancer loadBalancer : allocation.loadBalancers) {
+ if (loadBalancer.hostname().isEmpty()) continue;
var policyId = new RoutingPolicyId(loadBalancer.application(), loadBalancer.cluster(), allocation.deployment.zoneId());
var existingPolicy = policies.get(policyId);
- var newPolicy = new RoutingPolicy(policyId, loadBalancer.hostname(), loadBalancer.dnsZone(),
+ var newPolicy = new RoutingPolicy(policyId, loadBalancer.hostname().get(), loadBalancer.dnsZone(),
allocation.endpointIdsOf(loadBalancer),
new Status(isActive(loadBalancer), GlobalRouting.DEFAULT_STATUS));
// Preserve global routing status for existing policy
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentContext.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentContext.java
index 4a4159180b5..976cdb5c674 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentContext.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentContext.java
@@ -275,11 +275,9 @@ public class DeploymentContext {
/** Fail current deployment in given job */
public DeploymentContext outOfCapacity(JobType type) {
return failDeployment(type,
- new ConfigServerException(URI.create("https://config.server"),
- "Failed to deploy application",
+ new ConfigServerException(ConfigServerException.ErrorCode.OUT_OF_CAPACITY,
"Out of capacity",
- ConfigServerException.ErrorCode.OUT_OF_CAPACITY,
- new RuntimeException("Out of capacity from test code")));
+ "Failed to deploy application"));
}
/** Fail current deployment in given job */
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunnerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunnerTest.java
index 6bd7feb8d96..14244d7bdda 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunnerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunnerTest.java
@@ -99,11 +99,9 @@ public class InternalStepRunnerTest {
@Test
public void retriesDeploymentForOneHour() {
- RuntimeException exception = new ConfigServerException(URI.create("https://server"),
- "test failure",
+ RuntimeException exception = new ConfigServerException(ConfigServerException.ErrorCode.APPLICATION_LOCK_FAILURE,
"Exception to retry",
- ConfigServerException.ErrorCode.APPLICATION_LOCK_FAILURE,
- new RuntimeException("Retry me"));
+ "test failure");
tester.configServer().throwOnNextPrepare(exception);
tester.jobs().deploy(app.instanceId(), JobType.devUsEast1, Optional.empty(), applicationPackage());
assertEquals(unfinished, tester.jobs().last(app.instanceId(), JobType.devUsEast1).get().stepStatuses().get(Step.deployReal));
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java
index 70651ada473..9e15f2ec788 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java
@@ -30,7 +30,6 @@ import com.yahoo.vespa.hosted.controller.api.integration.configserver.ContainerE
import com.yahoo.vespa.hosted.controller.api.integration.configserver.LoadBalancer;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.Log;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.Node;
-import com.yahoo.vespa.hosted.controller.api.integration.configserver.NotFoundException;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.PrepareResponse;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.ProxyResponse;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.QuotaUsage;
@@ -121,7 +120,8 @@ public class ConfigServerMock extends AbstractComponent implements ConfigServer
new Cluster.Utilization(0.1, 0.2, 0.3, 0.4, 0.5, 0.6),
List.of(new Cluster.ScalingEvent(new ClusterResources(0, 0, NodeResources.unspecified()),
current,
- Instant.ofEpochMilli(1234))),
+ Instant.ofEpochMilli(1234),
+ Optional.of(Instant.ofEpochMilli(2234)))),
"the autoscaling status",
Duration.ofMinutes(6),
0.7,
@@ -390,7 +390,7 @@ public class ConfigServerMock extends AbstractComponent implements ConfigServer
putLoadBalancers(id.zoneId(), List.of(new LoadBalancer(UUID.randomUUID().toString(),
id.applicationId(),
cluster,
- HostName.from("lb-0--" + id.applicationId().serializedForm() + "--" + id.zoneId().toString()),
+ Optional.of(HostName.from("lb-0--" + id.applicationId().serializedForm() + "--" + id.zoneId().toString())),
LoadBalancer.State.active,
Optional.of("dns-zone-1"))));
}
@@ -432,21 +432,18 @@ public class ConfigServerMock extends AbstractComponent implements ConfigServer
public void reindex(DeploymentId deployment, List<String> clusterNames, List<String> documentTypes, boolean indexedOnly) { }
@Override
- public Optional<ApplicationReindexing> getReindexing(DeploymentId deployment) {
- return Optional.of(new ApplicationReindexing(true,
- Map.of("cluster",
- new ApplicationReindexing.Cluster(Map.of("type", 100L),
- Map.of("type", new Status(Instant.ofEpochMilli(345),
- Instant.ofEpochMilli(456),
- Instant.ofEpochMilli(567),
- ApplicationReindexing.State.FAILED,
- "(#`д´)ノ",
- 0.1))))));
-
-
+ public ApplicationReindexing getReindexing(DeploymentId deployment) {
+ return new ApplicationReindexing(true,
+ Map.of("cluster",
+ new ApplicationReindexing.Cluster(Map.of("type", 100L),
+ Map.of("type", new Status(Instant.ofEpochMilli(345),
+ Instant.ofEpochMilli(456),
+ Instant.ofEpochMilli(567),
+ ApplicationReindexing.State.FAILED,
+ "(#`д´)ノ",
+ 0.1)))));
}
-
@Override
public void disableReindexing(DeploymentId deployment) { }
@@ -464,12 +461,13 @@ public class ConfigServerMock extends AbstractComponent implements ConfigServer
}
@Override
- public void deactivate(DeploymentId deployment) throws NotFoundException {
+ public void deactivate(DeploymentId deployment) {
ApplicationId applicationId = deployment.applicationId();
nodeRepository().removeNodes(deployment.zoneId(),
nodeRepository().list(deployment.zoneId(), applicationId));
if ( ! applications.containsKey(deployment))
- throw new NotFoundException("No application with id " + applicationId + " exists, cannot deactivate");
+ return;
+
applications.remove(deployment);
serviceStatus.remove(deployment);
removeLoadBalancers(deployment.applicationId(), deployment.zoneId());
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/NodeRepositoryMock.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/NodeRepositoryMock.java
index 2eabc5dc21b..fe241976d13 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/NodeRepositoryMock.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/NodeRepositoryMock.java
@@ -33,7 +33,6 @@ import java.util.Map;
import java.util.NoSuchElementException;
import java.util.Objects;
import java.util.Optional;
-import java.util.Set;
import java.util.function.Function;
import java.util.function.UnaryOperator;
import java.util.stream.Collectors;
@@ -56,6 +55,7 @@ public class NodeRepositoryMock implements NodeRepository {
private final Map<ZoneId, List<NodeRepositoryNode>> nodeRepoNodes = new HashMap<>();
private boolean allowPatching = false;
+ private boolean hasSpareCapacity = false;
/** Add or update given nodes in zone */
public void putNodes(ZoneId zone, List<Node> nodes) {
@@ -162,8 +162,14 @@ public class NodeRepositoryMock implements NodeRepository {
}
@Override
- public void setState(ZoneId zone, NodeState nodeState, String nodename) {
- throw new UnsupportedOperationException();
+ public void setState(ZoneId zone, NodeState nodeState, String hostName) {
+ var existing = list(zone, List.of(HostName.from(hostName)));
+ if (existing.size() != 1) throw new IllegalArgumentException("Node " + hostName + " not found in " + zone);
+
+ var node = new Node.Builder(existing.get(0))
+ .state(Node.State.valueOf(nodeState.name()))
+ .build();
+ putNodes(zone, node);
}
@Override
@@ -177,17 +183,7 @@ public class NodeRepositoryMock implements NodeRepository {
}
@Override
- public NodeList listNodes(ZoneId zone, ApplicationId application) {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public NodeList listNodes(ZoneId zone, List<HostName> hostnames) {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public List<Node> list(ZoneId zone) {
+ public List<Node> list(ZoneId zone, boolean includeDeprovisioned) {
return List.copyOf(nodeRepository.getOrDefault(zone, Map.of()).values());
}
@@ -288,10 +284,16 @@ public class NodeRepositoryMock implements NodeRepository {
List<Node> existing = list(zoneId, List.of(HostName.from(hostName)));
if (existing.size() != 1) throw new IllegalArgumentException("Node " + hostName + " not found in " + zoneId);
- // Note: Only supports switchHostname
- Node newNode = new Node.Builder(existing.get(0)).switchHostname(node.getSwitchHostname())
- .build();
- putNodes(zoneId, newNode);
+ // Note: Only supports switchHostname, modelName and wantToRetire
+ Node.Builder newNode = new Node.Builder(existing.get(0));
+ if (node.getSwitchHostname() != null)
+ newNode.switchHostname(node.getSwitchHostname());
+ if (node.getModelName() != null)
+ newNode.modelName(node.getModelName());
+ if (node.getWantToRetire() != null)
+ newNode.wantToRetire(node.getWantToRetire());
+
+ putNodes(zoneId, newNode.build());
}
@Override
@@ -301,7 +303,7 @@ public class NodeRepositoryMock implements NodeRepository {
@Override
public boolean isReplaceable(ZoneId zoneId, List<HostName> hostNames) {
- return false;
+ return hasSpareCapacity;
}
public Optional<Duration> osUpgradeBudget(ZoneId zone, NodeType type, Version version) {
@@ -351,4 +353,8 @@ public class NodeRepositoryMock implements NodeRepository {
return this;
}
+ public void hasSpareCapacity(boolean hasSpareCapacity) {
+ this.hasSpareCapacity = hasSpareCapacity;
+ }
+
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ChangeManagementAssessorTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ChangeManagementAssessorTest.java
index 575a38cd637..476d2465202 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ChangeManagementAssessorTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ChangeManagementAssessorTest.java
@@ -69,7 +69,7 @@ public class ChangeManagementAssessorTest {
@Test
public void one_of_two_groups_in_one_of_two_clusters() {
ZoneId zone = ZoneId.from("prod", "eu-trd");
- List<String> hostNames = Arrays.asList("host1", "host2");
+ List<String> hostNames = Arrays.asList("host1", "host2", "host5");
List<NodeRepositoryNode> allNodesInZone = new ArrayList<>();
// Two impacted nodes on host1
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ChangeRequestMaintainerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ChangeRequestMaintainerTest.java
index 1ce59587d6c..290e08ca47b 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ChangeRequestMaintainerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ChangeRequestMaintainerTest.java
@@ -1,10 +1,13 @@
// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.controller.maintenance;
+import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.vespa.hosted.controller.ControllerTester;
import com.yahoo.vespa.hosted.controller.api.integration.vcmr.ChangeRequest;
import com.yahoo.vespa.hosted.controller.api.integration.vcmr.ChangeRequestSource;
+import com.yahoo.vespa.hosted.controller.api.integration.vcmr.ChangeRequestSource.Status;
import com.yahoo.vespa.hosted.controller.api.integration.vcmr.MockChangeRequestClient;
+import com.yahoo.vespa.hosted.controller.api.integration.vcmr.VespaChangeRequest;
import org.junit.Test;
import java.time.Duration;
@@ -24,10 +27,11 @@ public class ChangeRequestMaintainerTest {
@Test
public void only_approve_requests_pending_approval() {
-
+ var changeRequest1 = newChangeRequest("id1", ChangeRequest.Approval.APPROVED);
+ var changeRequest2 = newChangeRequest("id2", ChangeRequest.Approval.REQUESTED);
var upcomingChangeRequests = List.of(
- newChangeRequest("id1", ChangeRequest.Approval.APPROVED),
- newChangeRequest("id2", ChangeRequest.Approval.REQUESTED)
+ changeRequest1,
+ changeRequest2
);
changeRequestClient.setUpcomingChangeRequests(upcomingChangeRequests);
@@ -37,24 +41,72 @@ public class ChangeRequestMaintainerTest {
assertEquals(1, approvedChangeRequests.size());
assertEquals("id2", approvedChangeRequests.get(0).getId());
+ var writtenChangeRequests = tester.curator().readChangeRequests();
+ assertEquals(2, writtenChangeRequests.size());
+
+ var expectedChangeRequest = new VespaChangeRequest(changeRequest1, ZoneId.from("prod.us-east-3"));
+ assertEquals(expectedChangeRequest, writtenChangeRequests.get(0));
+ }
+
+ @Test
+ public void updates_status_time_and_approval() {
+ var time = ZonedDateTime.now();
+ var persistedChangeRequest = persistedChangeRequest("some-id", time.minusDays(5), Status.WAITING_FOR_APPROVAL);
+ tester.curator().writeChangeRequest(persistedChangeRequest);
+
+ var updatedChangeRequest = newChangeRequest("some-id", ChangeRequest.Approval.APPROVED, time, Status.CANCELED);
+ changeRequestClient.setUpcomingChangeRequests(List.of(updatedChangeRequest));
+ changeRequestMaintainer.maintain();
+
+ persistedChangeRequest = tester.curator().readChangeRequest("some-id").get();
+ assertEquals(Status.CANCELED, persistedChangeRequest.getChangeRequestSource().getStatus());
+ assertEquals(ChangeRequest.Approval.APPROVED, persistedChangeRequest.getApproval());
+ assertEquals(time, persistedChangeRequest.getChangeRequestSource().getPlannedStartTime());
+ }
+
+ @Test
+ public void deletes_old_change_requests() {
+ var now = ZonedDateTime.now();
+ var before = now.minus(Duration.ofDays(8));
+ var newChangeRequest = persistedChangeRequest("new", now, Status.CLOSED);
+ var oldChangeRequest = persistedChangeRequest("old", before, Status.CLOSED);
+
+ tester.curator().writeChangeRequest(newChangeRequest);
+ tester.curator().writeChangeRequest(oldChangeRequest);
+
+ changeRequestMaintainer.maintain();
+
+ var persistedChangeRequests = tester.curator().readChangeRequests();
+ assertEquals(1, persistedChangeRequests.size());
+ assertEquals(newChangeRequest, persistedChangeRequests.get(0));
}
private ChangeRequest newChangeRequest(String id, ChangeRequest.Approval approval) {
+ return newChangeRequest(id, approval, ZonedDateTime.now(), Status.CLOSED);
+ }
+
+ private ChangeRequest newChangeRequest(String id, ChangeRequest.Approval approval, ZonedDateTime time, Status status) {
return new ChangeRequest.Builder()
.id(id)
.approval(approval)
.impact(ChangeRequest.Impact.VERY_HIGH)
.impactedSwitches(List.of())
- .impactedHosts(List.of())
+ .impactedHosts(List.of("node-1-tenant-host-prod.us-east-3"))
.changeRequestSource(new ChangeRequestSource.Builder()
- .plannedStartTime(ZonedDateTime.now())
- .plannedEndTime(ZonedDateTime.now())
+ .plannedStartTime(time)
+ .plannedEndTime(time)
.id("some-id")
.url("some-url")
.system("some-system")
- .status(ChangeRequestSource.Status.CLOSED)
+ .status(status)
.build())
.build();
+ }
+ private VespaChangeRequest persistedChangeRequest(String id, ZonedDateTime time, Status status) {
+ return new VespaChangeRequest(
+ newChangeRequest(id, ChangeRequest.Approval.REQUESTED, time, status),
+ ZoneId.from("prod.us-east-3")
+ );
}
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/CloudEventReporterTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/CloudEventReporterTest.java
index d14d4014b48..680743055c9 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/CloudEventReporterTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/CloudEventReporterTest.java
@@ -128,7 +128,7 @@ public class CloudEventReporterTest {
}
private Set<String> getHostnames(ZoneId zoneId) {
- return tester.configServer().nodeRepository().list(zoneId)
+ return tester.configServer().nodeRepository().list(zoneId, false)
.stream()
.map(node -> node.hostname().value())
.collect(Collectors.toSet());
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainerTest.java
index a1fb6463074..84f4f3d9b7c 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainerTest.java
@@ -114,10 +114,7 @@ public class DeploymentMetricsMaintainerTest {
}
private void setMetrics(ApplicationId application, Map<String, Double> metrics) {
- var clusterMetrics = new ClusterMetrics("default", "container");
- for (var kv : metrics.entrySet()) {
- clusterMetrics = clusterMetrics.addMetric(kv.getKey(), kv.getValue());
- }
+ var clusterMetrics = new ClusterMetrics("default", "container", metrics);
tester.controllerTester().serviceRegistry().configServerMock().setMetrics(new DeploymentId(application, ZoneId.from("dev", "us-east-1")), clusterMetrics);
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/HostSwitchUpdaterTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/HostInfoUpdaterTest.java
index 4dcacb3934b..0baee28143c 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/HostSwitchUpdaterTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/HostInfoUpdaterTest.java
@@ -19,8 +19,9 @@ import static org.junit.Assert.assertTrue;
/**
* @author mpolden
+ * @author bjormel
*/
-public class HostSwitchUpdaterTest {
+public class HostInfoUpdaterTest {
@Test
public void maintain() {
@@ -29,7 +30,7 @@ public class HostSwitchUpdaterTest {
addNodeEntities(tester);
// First iteration patches all hosts
- HostSwitchUpdater maintainer = new HostSwitchUpdater(tester.controller(), Duration.ofDays(1));
+ HostInfoUpdater maintainer = new HostInfoUpdater(tester.controller(), Duration.ofDays(1));
maintainer.maintain();
List<Node> nodes = allNodes(tester);
assertFalse(nodes.isEmpty());
@@ -48,7 +49,7 @@ public class HostSwitchUpdaterTest {
// One host is moved to a different switch
Node host = allNodes(tester).stream().filter(node -> node.type().isHost()).findFirst().get();
String newSwitch = "tor2-" + host.hostname().value();
- NodeEntity nodeEntity = new NodeEntity(host.hostname().value(), "", "", newSwitch);
+ NodeEntity nodeEntity = new NodeEntity(host.hostname().value(), "RD350G", "Lenovo", newSwitch);
tester.serviceRegistry().entityService().addNodeEntity(nodeEntity);
// Host is updated
@@ -56,12 +57,29 @@ public class HostSwitchUpdaterTest {
maintainer.maintain();
assertEquals(newSwitch, getNode(host.hostname(), tester).switchHostname().get());
+ // Host has updated model
+ String newModel = "Quanta q801";
+ String manufacturer = "quanta computer";
+ nodeEntity = new NodeEntity(host.hostname().value(), newModel, manufacturer, newSwitch);
+ tester.serviceRegistry().entityService().addNodeEntity(nodeEntity);
+
+ // Host is updated
+ tester.serviceRegistry().configServer().nodeRepository().allowPatching(true);
+ maintainer.maintain();
+ assertEquals(manufacturer + " " + newModel, getNode(host.hostname(), tester).modelName().get());
+
// Host keeps old switch hostname if removed from the node entity
- nodeEntity = new NodeEntity(host.hostname().value(), "", "", "");
+ nodeEntity = new NodeEntity(host.hostname().value(), newModel, manufacturer, "");
tester.serviceRegistry().entityService().addNodeEntity(nodeEntity);
maintainer.maintain();
assertEquals(newSwitch, getNode(host.hostname(), tester).switchHostname().get());
+ // Host keeps old model name if removed from the node entity
+ nodeEntity = new NodeEntity(host.hostname().value(), "", "", newSwitch);
+ tester.serviceRegistry().entityService().addNodeEntity(nodeEntity);
+ maintainer.maintain();
+ assertEquals(manufacturer + " " + newModel, getNode(host.hostname(), tester).modelName().get());
+
// Updates node registered under a different hostname
ZoneId zone = tester.zoneRegistry().zones().controllerUpgraded().all().ids().get(0);
String hostnameSuffix = ".prod." + zone.value();
@@ -73,7 +91,7 @@ public class HostSwitchUpdaterTest {
.build();
tester.serviceRegistry().configServer().nodeRepository().putNodes(zone, List.of(configNode, configHost));
String switchHostname = switchHostname(configHost);
- NodeEntity configNodeEntity = new NodeEntity("cfg3" + hostnameSuffix, "", "", switchHostname);
+ NodeEntity configNodeEntity = new NodeEntity("cfg3" + hostnameSuffix, "RD350G", "Lenovo", switchHostname);
tester.serviceRegistry().entityService().addNodeEntity(configNodeEntity);
maintainer.maintain();
assertEquals(switchHostname, getNode(configHost.hostname(), tester).switchHostname().get());
@@ -90,7 +108,7 @@ public class HostSwitchUpdaterTest {
private static List<Node> allNodes(ControllerTester tester) {
List<Node> nodes = new ArrayList<>();
for (var zone : tester.zoneRegistry().zones().controllerUpgraded().all().ids()) {
- nodes.addAll(tester.serviceRegistry().configServer().nodeRepository().list(zone));
+ nodes.addAll(tester.serviceRegistry().configServer().nodeRepository().list(zone, false));
}
return nodes;
}
@@ -102,7 +120,7 @@ public class HostSwitchUpdaterTest {
private static void addNodeEntities(ControllerTester tester) {
for (var node : allNodes(tester)) {
if (!node.type().isHost()) continue;
- NodeEntity nodeEntity = new NodeEntity(node.hostname().value(), "", "", switchHostname(node));
+ NodeEntity nodeEntity = new NodeEntity(node.hostname().value(), "RD350G", "Lenovo", switchHostname(node));
tester.serviceRegistry().entityService().addNodeEntity(nodeEntity);
}
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/MetricsReporterTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/MetricsReporterTest.java
index 1b43fe2aaa5..d42342b57fb 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/MetricsReporterTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/MetricsReporterTest.java
@@ -357,7 +357,7 @@ public class MetricsReporterTest {
tester.configServer().setOsVersion(version0, SystemApplication.tenantHost.id(), zone);
tester.configServer().setOsVersion(version0, SystemApplication.configServerHost.id(), zone);
runAll(statusUpdater, reporter);
- List<Node> hosts = tester.configServer().nodeRepository().list(zone);
+ List<Node> hosts = tester.configServer().nodeRepository().list(zone, false);
assertOsChangeDuration(Duration.ZERO, hosts);
var targets = List.of(Version.fromString("8.1"), Version.fromString("8.2"));
@@ -381,7 +381,7 @@ public class MetricsReporterTest {
tester.configServer().nodeRepository().list(zone, SystemApplication.tenantHost.id()).stream()
.map(Node::wantedOsVersion).min(Comparator.naturalOrder()).get());
assertTrue("No nodes are suspended", tester.controller().serviceRegistry().configServer()
- .nodeRepository().list(zone).stream()
+ .nodeRepository().list(zone, false).stream()
.noneMatch(node -> node.serviceState() == Node.ServiceState.allowedDown));
// Another 30 minutes pass
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/SystemRoutingPolicyMaintainerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/SystemRoutingPolicyMaintainerTest.java
index d7440a706ea..1eadae18668 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/SystemRoutingPolicyMaintainerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/SystemRoutingPolicyMaintainerTest.java
@@ -35,7 +35,7 @@ public class SystemRoutingPolicyMaintainerTest {
tester.configServer().putLoadBalancers(zone, List.of(new LoadBalancer("lb1",
SystemApplication.configServer.id(),
ClusterSpec.Id.from("config"),
- HostName.from("lb1.example.com"),
+ Optional.of(HostName.from("lb1.example.com")),
LoadBalancer.State.active,
Optional.of("dns-zone-1"))));
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/TrafficShareUpdaterTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/TrafficShareUpdaterTest.java
index 2f24d3e6eee..2afa3a0faea 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/TrafficShareUpdaterTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/TrafficShareUpdaterTest.java
@@ -13,6 +13,7 @@ import com.yahoo.vespa.hosted.controller.integration.NodeRepositoryMock;
import org.junit.Test;
import java.time.Duration;
+import java.util.Map;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
@@ -81,8 +82,7 @@ public class TrafficShareUpdaterTest {
}
private void setQpsMetric(double qps, ApplicationId application, ZoneId zone, DeploymentTester tester) {
- var clusterMetrics = new ClusterMetrics("default", "container");
- clusterMetrics = clusterMetrics.addMetric(ClusterMetrics.QUERIES_PER_SECOND, qps);
+ var clusterMetrics = new ClusterMetrics("default", "container", Map.of(ClusterMetrics.QUERIES_PER_SECOND, qps));
tester.controllerTester().serviceRegistry().configServerMock().setMetrics(new DeploymentId(application, zone), clusterMetrics);
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/VCMRMaintainerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/VCMRMaintainerTest.java
new file mode 100644
index 00000000000..d5c35f806f4
--- /dev/null
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/VCMRMaintainerTest.java
@@ -0,0 +1,214 @@
+package com.yahoo.vespa.hosted.controller.maintenance;
+
+import com.yahoo.config.provision.HostName;
+import com.yahoo.config.provision.NodeType;
+import com.yahoo.config.provision.zone.ZoneId;
+import com.yahoo.vespa.hosted.controller.ControllerTester;
+import com.yahoo.vespa.hosted.controller.api.integration.configserver.Node;
+import com.yahoo.vespa.hosted.controller.api.integration.vcmr.ChangeRequest;
+import com.yahoo.vespa.hosted.controller.api.integration.vcmr.ChangeRequestSource;
+import com.yahoo.vespa.hosted.controller.api.integration.vcmr.HostAction;
+import com.yahoo.vespa.hosted.controller.api.integration.vcmr.HostAction.State;
+import com.yahoo.vespa.hosted.controller.api.integration.vcmr.VespaChangeRequest;
+import com.yahoo.vespa.hosted.controller.api.integration.vcmr.VespaChangeRequest.Status;
+import com.yahoo.vespa.hosted.controller.integration.NodeRepositoryMock;
+import org.junit.Test;
+
+import java.time.Duration;
+import java.time.Instant;
+import java.time.ZonedDateTime;
+import java.util.List;
+
+import static org.junit.Assert.*;
+
+/**
+ * @author olaa
+ */
+public class VCMRMaintainerTest {
+
+ private final ControllerTester tester = new ControllerTester();
+ private final VCMRMaintainer maintainer = new VCMRMaintainer(tester.controller(), Duration.ofMinutes(1));
+ private final NodeRepositoryMock nodeRepo = tester.serviceRegistry().configServer().nodeRepository();
+ private final ZoneId zoneId = ZoneId.from("prod.us-east-3");
+ private final HostName host1 = HostName.from("host1");
+ private final HostName host2 = HostName.from("host2");
+ private final String changeRequestId = "id123";
+
+ @Test
+ public void recycle_hosts_after_completion() {
+ var parkedNode = createNode(host1, NodeType.host, Node.State.parked, true);
+ var failedNode = createNode(host2, NodeType.host, Node.State.failed, false);
+ nodeRepo.putNodes(zoneId, List.of(parkedNode, failedNode));
+
+ tester.curator().writeChangeRequest(canceledChangeRequest());
+ maintainer.maintain();
+
+ // Only the parked node is recycled
+ var nodeList = nodeRepo.list(zoneId, List.of(host1, host2));
+ assertEquals(Node.State.dirty, nodeList.get(0).state());
+ assertEquals(Node.State.failed, nodeList.get(1).state());
+ var writtenChangeRequest = tester.curator().readChangeRequest(changeRequestId).get();
+ assertEquals(Status.COMPLETED, writtenChangeRequest.getStatus());
+ }
+
+ @Test
+ public void infrastructure_hosts_require_maunal_intervention() {
+ var configNode = createNode(host1, NodeType.config, Node.State.active, false);
+ var activeNode = createNode(host2, NodeType.host, Node.State.active, false);
+ nodeRepo.putNodes(zoneId, List.of(configNode, activeNode));
+ nodeRepo.hasSpareCapacity(true);
+
+ tester.curator().writeChangeRequest(futureChangeRequest());
+ maintainer.maintain();
+
+ var writtenChangeRequest = tester.curator().readChangeRequest(changeRequestId).get();
+ var configAction = writtenChangeRequest.getHostActionPlan().get(0);
+ var tenantHostAction = writtenChangeRequest.getHostActionPlan().get(1);
+ assertEquals(State.REQUIRES_OPERATOR_ACTION, configAction.getState());
+ assertEquals(State.PENDING_RETIREMENT, tenantHostAction.getState());
+ assertEquals(Status.REQUIRES_OPERATOR_ACTION, writtenChangeRequest.getStatus());
+ }
+
+ @Test
+ public void retires_hosts_when_near_vcmr() {
+ var activeNode = createNode(host1, NodeType.host, Node.State.active, false);
+ var failedNode = createNode(host2, NodeType.host, Node.State.failed, false);
+ nodeRepo.putNodes(zoneId, List.of(activeNode, failedNode));
+ nodeRepo.allowPatching(true).hasSpareCapacity(true);
+
+ tester.curator().writeChangeRequest(startingChangeRequest());
+ maintainer.maintain();
+
+ var writtenChangeRequest = tester.curator().readChangeRequest(changeRequestId).orElseThrow();
+ var parkedNodeAction = writtenChangeRequest.getHostActionPlan().get(0);
+ var failedNodeAction = writtenChangeRequest.getHostActionPlan().get(1);
+ assertEquals(State.RETIRING, parkedNodeAction.getState());
+ assertEquals(State.NONE, failedNodeAction.getState());
+ assertEquals(Status.IN_PROGRESS, writtenChangeRequest.getStatus());
+
+ activeNode = nodeRepo.list(zoneId, List.of(activeNode.hostname())).get(0);
+ assertTrue(activeNode.wantToRetire());
+
+ }
+
+ @Test
+ public void no_spare_capacity_requires_operator_action() {
+ var activeNode = createNode(host1, NodeType.host, Node.State.active, false);
+ var failedNode = createNode(host2, NodeType.host, Node.State.failed, false);
+ nodeRepo.putNodes(zoneId, List.of(activeNode, failedNode));
+ nodeRepo.hasSpareCapacity(false);
+
+ tester.curator().writeChangeRequest(startingChangeRequest());
+ maintainer.maintain();
+
+ var writtenChangeRequest = tester.curator().readChangeRequest(changeRequestId).orElseThrow();
+ var parkedNodeAction = writtenChangeRequest.getHostActionPlan().get(0);
+ var failedNodeAction = writtenChangeRequest.getHostActionPlan().get(1);
+ assertEquals(State.REQUIRES_OPERATOR_ACTION, parkedNodeAction.getState());
+ assertEquals(State.REQUIRES_OPERATOR_ACTION, failedNodeAction.getState());
+ assertEquals(Status.REQUIRES_OPERATOR_ACTION, writtenChangeRequest.getStatus());
+ }
+
+ @Test
+ public void updates_status_when_retiring_host_is_parked() {
+ var parkedNode = createNode(host1, NodeType.host, Node.State.parked, true);
+ nodeRepo.putNodes(zoneId, parkedNode);
+ nodeRepo.hasSpareCapacity(true);
+
+ tester.curator().writeChangeRequest(inProgressChangeRequest());
+ maintainer.maintain();
+
+ var writtenChangeRequest = tester.curator().readChangeRequest(changeRequestId).orElseThrow();
+ var parkedNodeAction = writtenChangeRequest.getHostActionPlan().get(0);
+ assertEquals(State.RETIRED, parkedNodeAction.getState());
+ assertEquals(Status.IN_PROGRESS, writtenChangeRequest.getStatus());
+ }
+
+ @Test
+ public void pending_retirement_when_vcmr_is_far_ahead() {
+ var activeNode = createNode(host2, NodeType.host, Node.State.active, false);
+ nodeRepo.putNodes(zoneId, List.of(activeNode));
+ nodeRepo.hasSpareCapacity(true);
+
+ tester.curator().writeChangeRequest(futureChangeRequest());
+ maintainer.maintain();
+
+ var writtenChangeRequest = tester.curator().readChangeRequest(changeRequestId).get();
+ var tenantHostAction = writtenChangeRequest.getHostActionPlan().get(0);
+ assertEquals(State.PENDING_RETIREMENT, tenantHostAction.getState());
+ assertEquals(Status.PENDING_ACTION, writtenChangeRequest.getStatus());
+ }
+
+ @Test
+ public void recycles_nodes_if_vcmr_is_postponed() {
+ var parkedNode = createNode(host1, NodeType.host, Node.State.parked, false);
+ var retiringNode = createNode(host2, NodeType.host, Node.State.active, true);
+ nodeRepo.putNodes(zoneId, List.of(parkedNode, retiringNode));
+ nodeRepo.allowPatching(true).hasSpareCapacity(true);
+
+ tester.curator().writeChangeRequest(postponedChangeRequest());
+ maintainer.maintain();
+
+ var writtenChangeRequest = tester.curator().readChangeRequest(changeRequestId).get();
+ var hostAction = writtenChangeRequest.getHostActionPlan().get(0);
+ assertEquals(State.PENDING_RETIREMENT, hostAction.getState());
+
+ parkedNode = nodeRepo.list(zoneId, List.of(parkedNode.hostname())).get(0);
+ assertEquals(Node.State.dirty, parkedNode.state());
+ assertFalse(parkedNode.wantToRetire());
+
+ retiringNode = nodeRepo.list(zoneId, List.of(retiringNode.hostname())).get(0);
+ assertEquals(Node.State.active, retiringNode.state());
+ assertFalse(retiringNode.wantToRetire());
+ }
+
+
+ private VespaChangeRequest canceledChangeRequest() {
+ return newChangeRequest(ChangeRequestSource.Status.CANCELED, State.RETIRED, State.RETIRING, ZonedDateTime.now());
+ }
+
+ private VespaChangeRequest futureChangeRequest() {
+ return newChangeRequest(ChangeRequestSource.Status.WAITING_FOR_APPROVAL, State.NONE, State.NONE, ZonedDateTime.now().plus(Duration.ofDays(5L)));
+ }
+
+ private VespaChangeRequest startingChangeRequest() {
+ return newChangeRequest(ChangeRequestSource.Status.STARTED, State.PENDING_RETIREMENT, State.NONE, ZonedDateTime.now());
+ }
+
+ private VespaChangeRequest inProgressChangeRequest() {
+ return newChangeRequest(ChangeRequestSource.Status.STARTED, State.RETIRING, State.RETIRING, ZonedDateTime.now());
+ }
+
+ private VespaChangeRequest postponedChangeRequest() {
+ return newChangeRequest(ChangeRequestSource.Status.STARTED, State.RETIRED, State.RETIRING, ZonedDateTime.now().plus(Duration.ofDays(8)));
+ }
+
+
+ private VespaChangeRequest newChangeRequest(ChangeRequestSource.Status sourceStatus, State state1, State state2, ZonedDateTime startTime) {
+ var source = new ChangeRequestSource("aws", changeRequestId, "url", sourceStatus , startTime, ZonedDateTime.now());
+ var actionPlan = List.of(
+ new HostAction(host1.value(), state1, Instant.now()),
+ new HostAction(host2.value(), state2, Instant.now())
+ );
+ return new VespaChangeRequest(
+ changeRequestId,
+ source,
+ List.of("switch1"),
+ List.of("host1", "host2"),
+ ChangeRequest.Approval.APPROVED,
+ ChangeRequest.Impact.VERY_HIGH,
+ VespaChangeRequest.Status.IN_PROGRESS,
+ actionPlan,
+ ZoneId.from("prod.us-east-3")
+ );
+ }
+
+ private Node createNode(HostName hostname, NodeType nodeType, Node.State state, boolean wantToRetire) {
+ return new Node.Builder()
+ .hostname(hostname)
+ .type(nodeType)
+ .state(state)
+ .wantToRetire(wantToRetire)
+ .build();
+ }
+} \ No newline at end of file
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/metric/ConfigServerMetricsTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/metric/ConfigServerMetricsTest.java
index 3d1cb3eba86..33b043bc93d 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/metric/ConfigServerMetricsTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/metric/ConfigServerMetricsTest.java
@@ -12,6 +12,7 @@ import org.junit.Before;
import org.junit.Test;
import java.util.List;
+import java.util.Map;
import static org.junit.Assert.assertEquals;
@@ -44,15 +45,9 @@ public class ConfigServerMetricsTest {
//
var deploymentId = new DeploymentId(applicationId, zoneId);
- var clusterMetrics1 = new ClusterMetrics("niceCluster", "container") {{
- addMetric("queriesPerSecond", 23.0);
- addMetric("queryLatency", 1337.0);
- }};
+ var clusterMetrics1 = new ClusterMetrics("niceCluster", "container", Map.of("queriesPerSecond", 23.0, "queryLatency", 1337.0));
- var clusterMetrics2 = new ClusterMetrics("alsoNiceCluster", "container") {{
- addMetric("queriesPerSecond", 11.0);
- addMetric("queryLatency", 12.0);
- }};
+ var clusterMetrics2 = new ClusterMetrics("alsoNiceCluster", "container", Map.of("queriesPerSecond", 11.0, "queryLatency", 12.0));
var response = List.of(clusterMetrics1, clusterMetrics2);
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDbTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDbTest.java
new file mode 100644
index 00000000000..90d1ecb2f20
--- /dev/null
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDbTest.java
@@ -0,0 +1,107 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.notification;
+
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.config.provision.TenantName;
+import com.yahoo.config.provision.zone.ZoneId;
+import com.yahoo.path.Path;
+import com.yahoo.test.ManualClock;
+import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
+import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
+import com.yahoo.vespa.hosted.controller.api.integration.deployment.RunId;
+import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
+import com.yahoo.vespa.hosted.controller.persistence.MockCuratorDb;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.time.Instant;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.stream.Collectors;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * @author freva
+ */
+public class NotificationsDbTest {
+
+ private static final TenantName tenant = TenantName.from("tenant1");
+ private static final List<Notification> notifications = List.of(
+ notification(1001, Notification.Type.DEPLOYMENT_FAILURE, NotificationSource.from(tenant), "tenant msg"),
+ notification(1101, Notification.Type.DEPLOYMENT_FAILURE, NotificationSource.from(TenantAndApplicationId.from(tenant.value(), "app1")), "app msg"),
+ notification(1201, Notification.Type.DEPLOYMENT_FAILURE, NotificationSource.from(ApplicationId.from(tenant.value(), "app2", "instance2")), "instance msg"),
+ notification(1301, Notification.Type.DEPLOYMENT_FAILURE, NotificationSource.from(new DeploymentId(ApplicationId.from(tenant.value(), "app2", "instance2"), ZoneId.from("prod", "us-north-2"))), "deployment msg"),
+ notification(1401, Notification.Type.DEPLOYMENT_FAILURE, NotificationSource.from(new DeploymentId(ApplicationId.from(tenant.value(), "app1", "instance1"), ZoneId.from("dev", "us-south-1")), ClusterSpec.Id.from("cluster1")), "cluster msg"),
+ notification(1501, Notification.Type.DEPLOYMENT_FAILURE, NotificationSource.from(new RunId(ApplicationId.from(tenant.value(), "app1", "instance1"), JobType.devUsEast1, 4)), "run id msg"));
+
+ private final ManualClock clock = new ManualClock(Instant.ofEpochSecond(12345));
+ private final MockCuratorDb curatorDb = new MockCuratorDb();
+ private final NotificationsDb notificationsDb = new NotificationsDb(clock, curatorDb);
+
+ @Test
+ public void list_test() {
+ assertEquals(notifications, notificationsDb.listNotifications(NotificationSource.from(tenant), false));
+ assertEquals(notificationIndices(0, 1, 3), notificationsDb.listNotifications(NotificationSource.from(tenant), true));
+ assertEquals(notificationIndices(2, 3), notificationsDb.listNotifications(NotificationSource.from(TenantAndApplicationId.from(tenant.value(), "app2")), false));
+ assertEquals(notificationIndices(4, 5), notificationsDb.listNotifications(NotificationSource.from(ApplicationId.from(tenant.value(), "app1", "instance1")), false));
+ assertEquals(notificationIndices(5), notificationsDb.listNotifications(NotificationSource.from(new RunId(ApplicationId.from(tenant.value(), "app1", "instance1"), JobType.devUsEast1, 5)), false));
+ assertEquals(List.of(), notificationsDb.listNotifications(NotificationSource.from(new RunId(ApplicationId.from(tenant.value(), "app1", "instance1"), JobType.productionUsEast3, 4)), false));
+ }
+
+ @Test
+ public void add_test() {
+ Notification notification1 = notification(12345, Notification.Type.DEPLOYMENT_FAILURE, NotificationSource.from(ApplicationId.from(tenant.value(), "app2", "instance2")), "instance msg #2");
+ Notification notification2 = notification(12345, Notification.Type.DEPLOYMENT_FAILURE, NotificationSource.from(ApplicationId.from(tenant.value(), "app3", "instance2")), "instance msg #3");
+
+ // Replace the 3rd notification
+ notificationsDb.setNotification(notification1.source(), notification1.type(), notification1.messages());
+
+ // Notification for a new app, add without replacement
+ notificationsDb.setNotification(notification2.source(), notification2.type(), notification2.messages());
+
+ List<Notification> expected = notificationIndices(0, 1, 3, 4, 5);
+ expected.addAll(List.of(notification1, notification2));
+ assertEquals(expected, curatorDb.readNotifications(tenant));
+ }
+
+ @Test
+ public void remove_single_test() {
+ // Remove the 3rd notification
+ notificationsDb.removeNotification(NotificationSource.from(ApplicationId.from(tenant.value(), "app2", "instance2")), Notification.Type.DEPLOYMENT_FAILURE);
+
+ // Removing something that doesn't exist is OK
+ notificationsDb.removeNotification(NotificationSource.from(ApplicationId.from(tenant.value(), "app3", "instance2")), Notification.Type.DEPLOYMENT_FAILURE);
+
+ assertEquals(notificationIndices(0, 1, 3, 4, 5), curatorDb.readNotifications(tenant));
+ }
+
+ @Test
+ public void remove_multiple_test() {
+ // Remove the 3rd notification
+ notificationsDb.removeNotifications(NotificationSource.from(ApplicationId.from(tenant.value(), "app1", "instance1")));
+ assertEquals(notificationIndices(0, 1, 2, 3), curatorDb.readNotifications(tenant));
+ assertTrue(curatorDb.curator().exists(Path.fromString("/controller/v1/notifications/" + tenant.value())));
+
+ notificationsDb.removeNotifications(NotificationSource.from(tenant));
+ assertEquals(List.of(), curatorDb.readNotifications(tenant));
+ assertFalse(curatorDb.curator().exists(Path.fromString("/controller/v1/notifications/" + tenant.value())));
+ }
+
+ @Before
+ public void init() {
+ curatorDb.writeNotifications(tenant, notifications);
+ }
+
+ private static List<Notification> notificationIndices(int... indices) {
+ return Arrays.stream(indices).mapToObj(notifications::get).collect(Collectors.toCollection(ArrayList::new));
+ }
+
+ private static Notification notification(long secondsSinceEpoch, Notification.Type type, NotificationSource source, String... messages) {
+ return new Notification(Instant.ofEpochSecond(secondsSinceEpoch), type, source, List.of(messages));
+ }
+}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/ChangeRequestSerializerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/ChangeRequestSerializerTest.java
new file mode 100644
index 00000000000..40a045c44cf
--- /dev/null
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/ChangeRequestSerializerTest.java
@@ -0,0 +1,45 @@
+package com.yahoo.vespa.hosted.controller.persistence;
+
+import com.yahoo.config.provision.zone.ZoneId;
+import com.yahoo.vespa.hosted.controller.api.integration.vcmr.ChangeRequest;
+import com.yahoo.vespa.hosted.controller.api.integration.vcmr.ChangeRequestSource;
+import com.yahoo.vespa.hosted.controller.api.integration.vcmr.HostAction;
+import com.yahoo.vespa.hosted.controller.api.integration.vcmr.VespaChangeRequest;
+import org.junit.Test;
+
+import java.time.Instant;
+import java.time.ZonedDateTime;
+import java.util.List;
+
+import static org.junit.Assert.*;
+
+/**
+ * @author olaa
+ */
+public class ChangeRequestSerializerTest {
+
+ @Test
+ public void reserialization_equality() {
+ var source = new ChangeRequestSource("aws", "id321", "url", ChangeRequestSource.Status.STARTED, ZonedDateTime.now(), ZonedDateTime.now());
+ var actionPlan = List.of(
+ new HostAction("host1", HostAction.State.RETIRING, Instant.now()),
+ new HostAction("host2", HostAction.State.RETIRED, Instant.now())
+ );
+
+ var changeRequest = new VespaChangeRequest(
+ "id123",
+ source,
+ List.of("switch1"),
+ List.of("host1", "host2"),
+ ChangeRequest.Approval.APPROVED,
+ ChangeRequest.Impact.VERY_HIGH,
+ VespaChangeRequest.Status.IN_PROGRESS,
+ actionPlan,
+ ZoneId.defaultId()
+ );
+
+ var reserialized = ChangeRequestSerializer.fromSlime(ChangeRequestSerializer.toSlime(changeRequest));
+ assertEquals(changeRequest, reserialized);
+ }
+
+} \ No newline at end of file
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/NotificationsSerializerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/NotificationsSerializerTest.java
new file mode 100644
index 00000000000..f3f2d10cfd0
--- /dev/null
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/NotificationsSerializerTest.java
@@ -0,0 +1,59 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.persistence;
+
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.TenantName;
+import com.yahoo.slime.Slime;
+import com.yahoo.slime.SlimeUtils;
+import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
+import com.yahoo.vespa.hosted.controller.api.integration.deployment.RunId;
+import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
+import com.yahoo.vespa.hosted.controller.notification.Notification;
+import com.yahoo.vespa.hosted.controller.notification.NotificationSource;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.time.Instant;
+import java.util.List;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * @author freva
+ */
+public class NotificationsSerializerTest {
+
+ @Test
+ public void serialization_test() throws IOException {
+ TenantName tenantName = TenantName.from("tenant1");
+ List<Notification> notifications = List.of(
+ new Notification(Instant.ofEpochSecond(1234),
+ Notification.Type.APPLICATION_PACKAGE_WARNING,
+ NotificationSource.from(TenantAndApplicationId.from(tenantName.value(), "app1")),
+ List.of("Something something deprecated...")),
+ new Notification(Instant.ofEpochSecond(2345),
+ Notification.Type.DEPLOYMENT_FAILURE,
+ NotificationSource.from(new RunId(ApplicationId.from(tenantName.value(), "app1", "instance1"), JobType.systemTest, 12)),
+ List.of("Failed to deploy: Out of capacity")));
+
+ Slime serialized = NotificationsSerializer.toSlime(notifications);
+ assertEquals("{\"notifications\":[" +
+ "{" +
+ "\"at\":1234000," +
+ "\"type\":\"APPLICATION_PACKAGE_WARNING\"," +
+ "\"messages\":[\"Something something deprecated...\"]," +
+ "\"application\":\"app1\"" +
+ "},{" +
+ "\"at\":2345000," +
+ "\"type\":\"DEPLOYMENT_FAILURE\"," +
+ "\"messages\":[\"Failed to deploy: Out of capacity\"]," +
+ "\"application\":\"app1\"," +
+ "\"instance\":\"instance1\"," +
+ "\"jobId\":\"system-test\"," +
+ "\"runNumber\":12" +
+ "}]}", new String(SlimeUtils.toJsonBytes(serialized)));
+
+ List<Notification> deserialized = NotificationsSerializer.fromSlime(tenantName, serialized);
+ assertEquals(notifications, deserialized);
+ }
+} \ No newline at end of file
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java
index b1b1c7ffe7a..0137ea7eeba 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java
@@ -39,6 +39,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.athenz.AthenzDbMock;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.ConfigServerException;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.ApplicationVersion;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
+import com.yahoo.vespa.hosted.controller.api.integration.deployment.RunId;
import com.yahoo.vespa.hosted.controller.api.integration.organization.Contact;
import com.yahoo.vespa.hosted.controller.api.integration.organization.IssueId;
import com.yahoo.vespa.hosted.controller.api.integration.organization.User;
@@ -58,6 +59,8 @@ import com.yahoo.vespa.hosted.controller.deployment.DeploymentTrigger;
import com.yahoo.vespa.hosted.controller.integration.ConfigServerMock;
import com.yahoo.vespa.hosted.controller.integration.ZoneApiMock;
import com.yahoo.vespa.hosted.controller.metric.ApplicationMetrics;
+import com.yahoo.vespa.hosted.controller.notification.Notification;
+import com.yahoo.vespa.hosted.controller.notification.NotificationSource;
import com.yahoo.vespa.hosted.controller.restapi.ContainerTester;
import com.yahoo.vespa.hosted.controller.restapi.ControllerContainerTest;
import com.yahoo.vespa.hosted.controller.routing.GlobalRouting;
@@ -801,6 +804,13 @@ public class ApplicationApiTest extends ControllerContainerTest {
.userIdentity(USER_ID),
"");
+ addNotifications(TenantName.from("tenant1"));
+ tester.assertResponse(request("/application/v4/tenant/tenant1/notifications", GET).userIdentity(USER_ID),
+ new File("notifications-tenant1.json"));
+ tester.assertResponse(request("/application/v4/tenant/tenant1/notifications", GET)
+ .properties(Map.of("application", "app2")).userIdentity(USER_ID),
+ new File("notifications-tenant1-app2.json"));
+
// DELETE the application which no longer has any deployments
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", DELETE)
.userIdentity(USER_ID)
@@ -1117,7 +1127,7 @@ public class ApplicationApiTest extends ControllerContainerTest {
400);
ConfigServerMock configServer = tester.serviceRegistry().configServerMock();
- configServer.throwOnNextPrepare(new ConfigServerException(new URI("server-url"), "Failed to prepare application", "Invalid application package", ConfigServerException.ErrorCode.INVALID_APPLICATION_PACKAGE, null));
+ configServer.throwOnNextPrepare(new ConfigServerException(ConfigServerException.ErrorCode.INVALID_APPLICATION_PACKAGE, "Failed to prepare application", "Invalid application package"));
// GET non-existent application package
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET).userIdentity(HOSTED_VESPA_OPERATOR),
@@ -1628,6 +1638,17 @@ public class ApplicationApiTest extends ControllerContainerTest {
));
}
+ private void addNotifications(TenantName tenantName) {
+ tester.controller().notificationsDb().setNotification(
+ NotificationSource.from(TenantAndApplicationId.from(tenantName.value(), "app1")),
+ Notification.Type.APPLICATION_PACKAGE_WARNING,
+ "Something something deprecated...");
+ tester.controller().notificationsDb().setNotification(
+ NotificationSource.from(new RunId(ApplicationId.from(tenantName.value(), "app2", "instance1"), JobType.systemTest, 12)),
+ Notification.Type.DEPLOYMENT_FAILURE,
+ "Failed to deploy: Out of capacity");
+ }
+
private void assertGlobalRouting(DeploymentId deployment, GlobalRouting.Status status, GlobalRouting.Agent agent) {
var changedAt = tester.controller().clock().instant();
var westPolicies = tester.controller().routing().policies().get(deployment);
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelperTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelperTest.java
index f574d6bc3f1..1be7f16e85f 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelperTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelperTest.java
@@ -81,7 +81,7 @@ public class JobControllerApiHandlerHelperTest {
tester.triggerJobs();
// us-east-3 eats the deployment failure and fails before deployment, while us-west-1 fails after.
- tester.configServer().throwOnNextPrepare(new ConfigServerException(URI.create("url"), "Failed to deploy application", "ERROR!", INVALID_APPLICATION_PACKAGE, null));
+ tester.configServer().throwOnNextPrepare(new ConfigServerException(INVALID_APPLICATION_PACKAGE, "ERROR!", "Failed to deploy application"));
tester.runner().run();
assertEquals(deploymentFailed, tester.jobs().last(app.instanceId(), productionUsEast3).get().status());
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application-clusters.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application-clusters.json
index 499a425087d..9df83cb2089 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application-clusters.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application-clusters.json
@@ -90,7 +90,8 @@
},
"cost": "(ignore)"
},
- "at": 1234
+ "at": 1234,
+ "completion": 2234
}
],
"autoscalingStatus": "the autoscaling status",
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/notifications-tenant1-app2.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/notifications-tenant1-app2.json
new file mode 100644
index 00000000000..ab8262e26bd
--- /dev/null
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/notifications-tenant1-app2.json
@@ -0,0 +1,16 @@
+{
+ "notifications": [
+ {
+ "at": "(ignore)",
+ "level": "error",
+ "type": "DEPLOYMENT_FAILURE",
+ "messages": [
+ "Failed to deploy: Out of capacity"
+ ],
+ "application": "app2",
+ "instance": "instance1",
+ "jobName": "system-test",
+ "runNumber": 12
+ }
+ ]
+}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/notifications-tenant1.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/notifications-tenant1.json
new file mode 100644
index 00000000000..2b2c03bb75a
--- /dev/null
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/notifications-tenant1.json
@@ -0,0 +1,25 @@
+{
+ "notifications": [
+ {
+ "at": "(ignore)",
+ "level": "warning",
+ "type": "APPLICATION_PACKAGE_WARNING",
+ "messages": [
+ "Something something deprecated..."
+ ],
+ "application": "app1"
+ },
+ {
+ "at": "(ignore)",
+ "level": "error",
+ "type": "DEPLOYMENT_FAILURE",
+ "messages": [
+ "Failed to deploy: Out of capacity"
+ ],
+ "application": "app2",
+ "instance": "instance1",
+ "jobName": "system-test",
+ "runNumber": 12
+ }
+ ]
+}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/us-east-3-log-without-first.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/us-east-3-log-without-first.json
index 6c9315ca64b..588f8839ab7 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/us-east-3-log-without-first.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/us-east-3-log-without-first.json
@@ -5,7 +5,7 @@
"deployReal": [
{
"at": 1000,
- "type": "info",
+ "type": "warning",
"message": "Failed to deploy application: ERROR!"
}
]
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/ChangeManagementApiHandlerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/ChangeManagementApiHandlerTest.java
index cd815a2064b..c4412531f80 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/ChangeManagementApiHandlerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/ChangeManagementApiHandlerTest.java
@@ -12,6 +12,10 @@ import com.yahoo.vespa.hosted.controller.api.integration.noderepository.NodeOwne
import com.yahoo.vespa.hosted.controller.api.integration.noderepository.NodeRepositoryNode;
import com.yahoo.vespa.hosted.controller.api.integration.noderepository.NodeState;
import com.yahoo.vespa.hosted.controller.api.integration.noderepository.NodeType;
+import com.yahoo.vespa.hosted.controller.api.integration.vcmr.ChangeRequest;
+import com.yahoo.vespa.hosted.controller.api.integration.vcmr.ChangeRequestSource;
+import com.yahoo.vespa.hosted.controller.api.integration.vcmr.HostAction;
+import com.yahoo.vespa.hosted.controller.api.integration.vcmr.VespaChangeRequest;
import com.yahoo.vespa.hosted.controller.restapi.ContainerTester;
import com.yahoo.vespa.hosted.controller.restapi.ControllerContainerTest;
import org.intellij.lang.annotations.Language;
@@ -19,6 +23,8 @@ import org.junit.Before;
import org.junit.Test;
import java.io.File;
+import java.time.Instant;
+import java.time.ZonedDateTime;
import java.util.ArrayList;
import java.util.List;
@@ -35,11 +41,14 @@ public class ChangeManagementApiHandlerTest extends ControllerContainerTest {
addUserToHostedOperatorRole(operator);
tester.serviceRegistry().configServer().nodeRepository().addNodes(ZoneId.from("prod.us-east-3"), createNodes());
tester.serviceRegistry().configServer().nodeRepository().putNodes(ZoneId.from("prod.us-east-3"), createNode());
+ tester.controller().curator().writeChangeRequest(createChangeRequest());
+
}
@Test
public void test_api() {
assertFile(new Request("http://localhost:8080/changemanagement/v1/assessment", "{\"zone\":\"prod.us-east-3\", \"hosts\": [\"host1\"]}", Request.Method.POST), "initial.json");
+ assertFile(new Request("http://localhost:8080/changemanagement/v1/vcmr"), "vcmrs.json");
}
private void assertResponse(Request request, @Language("JSON") String body, int statusCode) {
@@ -58,6 +67,28 @@ public class ChangeManagementApiHandlerTest extends ControllerContainerTest {
.build();
}
+ private VespaChangeRequest createChangeRequest() {
+ var instant = Instant.ofEpochMilli(9001);
+ var date = ZonedDateTime.ofInstant(instant, java.time.ZoneId.of("UTC"));
+ var source = new ChangeRequestSource("aws", "id321", "url", ChangeRequestSource.Status.STARTED, date, date);
+ var actionPlan = List.of(
+ new HostAction("host1", HostAction.State.RETIRING, instant),
+ new HostAction("host2", HostAction.State.RETIRED, instant)
+ );
+
+ return new VespaChangeRequest(
+ "id123",
+ source,
+ List.of("switch1"),
+ List.of("host1", "host2"),
+ ChangeRequest.Approval.APPROVED,
+ ChangeRequest.Impact.VERY_HIGH,
+ VespaChangeRequest.Status.IN_PROGRESS,
+ actionPlan,
+ ZoneId.defaultId()
+ );
+ }
+
private List<NodeRepositoryNode> createNodes() {
List<NodeRepositoryNode> nodes = new ArrayList<>();
nodes.add(createNode("node1", "host1", "default", 0 ));
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/responses/vcmrs.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/responses/vcmrs.json
new file mode 100644
index 00000000000..54d4ea8bcbd
--- /dev/null
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/responses/vcmrs.json
@@ -0,0 +1,40 @@
+{
+ "vcmrs": [
+ {
+ "id": "id123",
+ "status": "IN_PROGRESS",
+ "impact": "VERY_HIGH",
+ "approval": "APPROVED",
+ "zoneId": "prod.default",
+ "source": {
+ "system": "aws",
+ "id": "id321",
+ "url": "url",
+ "plannedStartTime": "1970-01-01T00:00:09.001Z[UTC]",
+ "plannedEndTime": "1970-01-01T00:00:09.001Z[UTC]",
+ "status": "STARTED"
+ },
+ "actionPlan": {
+ "hosts": [
+ {
+ "hostname": "host1",
+ "state": "RETIRING",
+ "lastUpdated": "1970-01-01T00:00:09.001Z"
+ },
+ {
+ "hostname": "host2",
+ "state": "RETIRED",
+ "lastUpdated": "1970-01-01T00:00:09.001Z"
+ }
+ ]
+ },
+ "impactedHosts": [
+ "host1",
+ "host2"
+ ],
+ "impactedSwitches": [
+ "switch1"
+ ]
+ }
+ ]
+} \ No newline at end of file
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/controller/responses/maintenance.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/controller/responses/maintenance.json
index 17c93c070fb..3cf79977fb8 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/controller/responses/maintenance.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/controller/responses/maintenance.json
@@ -43,7 +43,7 @@
"name": "EndpointCertificateMaintainer"
},
{
- "name": "HostSwitchUpdater"
+ "name": "HostInfoUpdater"
},
{
"name": "JobRunner"
@@ -91,6 +91,9 @@
"name": "Upgrader"
},
{
+ "name": "VCMRMaintainer"
+ },
+ {
"name": "VersionStatusUpdater"
}
],
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java
index e96af475216..d03dec06753 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java
@@ -416,7 +416,7 @@ public class RoutingPoliciesTest {
var loadBalancer = new LoadBalancer("LB-0-Z-" + zone1.value(),
context.instanceId(),
ClusterSpec.Id.from("c0"),
- newHostname,
+ Optional.of(newHostname),
LoadBalancer.State.active,
Optional.of("dns-zone-1"));
tester.controllerTester().configServer().putLoadBalancers(zone1, List.of(loadBalancer));
@@ -705,7 +705,7 @@ public class RoutingPoliciesTest {
new LoadBalancer("LB-" + i + "-Z-" + zone.value(),
application,
ClusterSpec.Id.from("c" + i),
- lbHostname,
+ Optional.of(lbHostname),
LoadBalancer.State.active,
Optional.of("dns-zone-1")));
}
diff --git a/dist/vespa.spec b/dist/vespa.spec
index 3707d4afb9c..ab69acb7315 100644
--- a/dist/vespa.spec
+++ b/dist/vespa.spec
@@ -67,7 +67,7 @@ BuildRequires: vespa-boost-devel >= 1.59.0-6
BuildRequires: vespa-gtest >= 1.8.1-1
BuildRequires: vespa-icu-devel >= 65.1.0-1
BuildRequires: vespa-lz4-devel >= 1.9.2-2
-BuildRequires: vespa-onnxruntime-devel = 1.4.0
+BuildRequires: vespa-onnxruntime-devel = 1.7.1
BuildRequires: vespa-openssl-devel >= 1.1.1k-1
BuildRequires: vespa-protobuf-devel >= 3.7.0-4
BuildRequires: vespa-libzstd-devel >= 1.4.5-2
@@ -83,7 +83,7 @@ BuildRequires: boost-devel >= 1.66
BuildRequires: openssl-devel
BuildRequires: vespa-gtest >= 1.8.1-1
BuildRequires: vespa-lz4-devel >= 1.9.2-2
-BuildRequires: vespa-onnxruntime-devel = 1.4.0
+BuildRequires: vespa-onnxruntime-devel = 1.7.1
BuildRequires: vespa-protobuf-devel >= 3.7.0-4
BuildRequires: vespa-libzstd-devel >= 1.4.5-2
%endif
@@ -92,7 +92,7 @@ BuildRequires: cmake >= 3.9.1
BuildRequires: maven
BuildRequires: openssl-devel
BuildRequires: vespa-lz4-devel >= 1.9.2-2
-BuildRequires: vespa-onnxruntime-devel = 1.4.0
+BuildRequires: vespa-onnxruntime-devel = 1.7.1
BuildRequires: vespa-libzstd-devel >= 1.4.5-2
%if 0%{?fc32}
BuildRequires: protobuf-devel
@@ -177,6 +177,7 @@ Requires: libicu
Requires: perf
Requires: gdb
Requires: nc
+Requires: nghttp2
Requires: net-tools
Requires: unzip
Requires: zstd
@@ -184,7 +185,7 @@ Requires: zstd
Requires: llvm7.0
Requires: vespa-icu >= 65.1.0-1
Requires: vespa-lz4 >= 1.9.2-2
-Requires: vespa-onnxruntime = 1.4.0
+Requires: vespa-onnxruntime = 1.7.1
Requires: vespa-openssl >= 1.1.1k-1
Requires: vespa-protobuf >= 3.7.0-4
Requires: vespa-telegraf >= 1.1.1-1
@@ -204,7 +205,7 @@ Requires: llvm-libs >= 10.0.1
%endif
Requires: openssl-libs
Requires: vespa-lz4 >= 1.9.2-2
-Requires: vespa-onnxruntime = 1.4.0
+Requires: vespa-onnxruntime = 1.7.1
Requires: vespa-protobuf >= 3.7.0-4
Requires: vespa-zstd >= 1.4.5-2
%define _extra_link_directory %{_vespa_deps_prefix}/lib64
@@ -213,7 +214,7 @@ Requires: vespa-zstd >= 1.4.5-2
%if 0%{?fedora}
Requires: openssl-libs
Requires: vespa-lz4 >= 1.9.2-2
-Requires: vespa-onnxruntime = 1.4.0
+Requires: vespa-onnxruntime = 1.7.1
Requires: vespa-zstd >= 1.4.5-2
%if 0%{?fc32}
Requires: protobuf
@@ -622,10 +623,10 @@ fi
%dir %{_prefix}
%dir %{_prefix}/lib
%dir %{_prefix}/lib/jars
+%{_prefix}/lib/jars/asm-*.jar
%{_prefix}/lib/jars/aopalliance-repackaged-*.jar
%{_prefix}/lib/jars/bcpkix-jdk15on-*.jar
%{_prefix}/lib/jars/bcprov-jdk15on-*.jar
-%{_prefix}/lib/jars/component-jar-with-dependencies.jar
%{_prefix}/lib/jars/config-bundle-jar-with-dependencies.jar
%{_prefix}/lib/jars/configdefinitions-jar-with-dependencies.jar
%{_prefix}/lib/jars/configgen.jar
@@ -650,13 +651,15 @@ fi
%{_prefix}/lib/jars/jdisc_core-jar-with-dependencies.jar
%{_prefix}/lib/jars/jdisc-security-filters-jar-with-dependencies.jar
%{_prefix}/lib/jars/jersey-*.jar
+%{_prefix}/lib/jars/alpn-*.jar
+%{_prefix}/lib/jars/http2-*.jar
%{_prefix}/lib/jars/jetty-*.jar
%{_prefix}/lib/jars/mimepull-*.jar
%{_prefix}/lib/jars/model-evaluation-jar-with-dependencies.jar
%{_prefix}/lib/jars/model-integration-jar-with-dependencies.jar
+%{_prefix}/lib/jars/org.apache.aries.spifly.dynamic.bundle-*.jar
%{_prefix}/lib/jars/osgi-resource-locator-*.jar
%{_prefix}/lib/jars/security-utils-jar-with-dependencies.jar
-%{_prefix}/lib/jars/simplemetrics-jar-with-dependencies.jar
%{_prefix}/lib/jars/standalone-container-jar-with-dependencies.jar
%{_prefix}/lib/jars/validation-api-*.jar
%{_prefix}/lib/jars/vespa-athenz-jar-with-dependencies.jar
diff --git a/docker/build/build-vespa-internal.sh b/docker/build/build-vespa-internal.sh
index 780713ec732..63eb0efacb8 100755
--- a/docker/build/build-vespa-internal.sh
+++ b/docker/build/build-vespa-internal.sh
@@ -22,12 +22,11 @@ yum -y install epel-release
yum -y install centos-release-scl
if ! yum-config-manager --add-repo https://copr.fedorainfracloud.org/coprs/g/vespa/vespa/repo/epel-7/group_vespa-vespa-epel-7.repo; then
- cat << 'EOF' > /etc/yum.repos.d/vespa-engine-stable.repo
-[vespa-engine-stable]
-name=vespa-engine-stable
-baseurl=https://yahoo.bintray.com/vespa-engine/centos/$releasever/stable/$basearch
+ cat << 'EOF' > /etc/yum.repos.d/vespa-release.repo
+[vespa-release]
+name=Vespa releases
+baseurl=https://verizonmedia.jfrog.io/artifactory/vespa/centos/$releasever/release/$basearch
gpgcheck=0
-repo_gpgcheck=0
enabled=1
EOF
fi
diff --git a/document/src/tests/tensor_fieldvalue/partial_add/partial_add_test.cpp b/document/src/tests/tensor_fieldvalue/partial_add/partial_add_test.cpp
index 8d9ce8d5511..c67ac55e9f4 100644
--- a/document/src/tests/tensor_fieldvalue/partial_add/partial_add_test.cpp
+++ b/document/src/tests/tensor_fieldvalue/partial_add/partial_add_test.cpp
@@ -1,7 +1,7 @@
// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include <vespa/eval/eval/simple_value.h>
-#include <vespa/eval/eval/test/tensor_model.h>
+#include <vespa/eval/eval/test/gen_spec.h>
#include <vespa/eval/eval/value_codec.h>
#include <vespa/document/update/tensor_partial_update.h>
#include <vespa/vespalib/util/stringfmt.h>
@@ -15,14 +15,10 @@ using namespace vespalib::eval::test;
using vespalib::make_string_short::fmt;
-std::vector<Layout> add_layouts = {
- {x({"a"})}, {x({"b"})},
- {x({"a","b"})}, {x({"a","c"})},
- float_cells({x({"a","b"})}), {x({"a","c"})},
- {x({"a","b"})}, float_cells({x({"a","c"})}),
- float_cells({x({"a","b"})}), float_cells({x({"a","c"})}),
- {x({"a","b","c"}),y({"d","e"})}, {x({"b","f"}),y({"d","g"})},
- {x(3),y({"a","b"})}, {x(3),y({"b","c"})}
+std::vector<std::pair<vespalib::string,vespalib::string>> add_layouts = {
+ { "x4_1", "x4_2" },
+ { "x4_2y4_1", "x4_1y4_2" },
+ { "x3y4_1", "x3y4_2" }
};
TensorSpec reference_add(const TensorSpec &a, const TensorSpec &b) {
@@ -37,7 +33,7 @@ TensorSpec reference_add(const TensorSpec &a, const TensorSpec &b) {
result.add(cell.first, cell.second);
}
}
- return result;
+ return result.normalize();
}
Value::UP try_partial_add(const TensorSpec &a, const TensorSpec &b) {
@@ -54,32 +50,34 @@ TensorSpec perform_partial_add(const TensorSpec &a, const TensorSpec &b) {
}
TEST(PartialAddTest, partial_add_works_for_simple_values) {
- ASSERT_TRUE((add_layouts.size() % 2) == 0);
- for (size_t i = 0; i < add_layouts.size(); i += 2) {
- TensorSpec lhs = spec(add_layouts[i], N());
- TensorSpec rhs = spec(add_layouts[i + 1], Div16(N()));
- SCOPED_TRACE(fmt("\n===\nLHS: %s\nRHS: %s\n===\n", lhs.to_string().c_str(), rhs.to_string().c_str()));
- auto expect = reference_add(lhs, rhs);
- auto actual = perform_partial_add(lhs, rhs);
- EXPECT_EQ(actual, expect);
+ for (const auto &layouts: add_layouts) {
+ for (auto lhs_ct: CellTypeUtils::list_types()) {
+ for (auto rhs_ct: CellTypeUtils::list_types()) {
+ TensorSpec lhs = GenSpec::from_desc(layouts.first).cells(lhs_ct).seq(N());
+ TensorSpec rhs = GenSpec::from_desc(layouts.second).cells(rhs_ct).seq(Div16(N()));
+ SCOPED_TRACE(fmt("\n===\nLHS: %s\nRHS: %s\n===\n", lhs.to_string().c_str(), rhs.to_string().c_str()));
+ auto expect = reference_add(lhs, rhs);
+ auto actual = perform_partial_add(lhs, rhs);
+ EXPECT_EQ(actual, expect);
+ }
+ }
}
}
-std::vector<Layout> bad_layouts = {
- {x(3)}, {x(3),y(1)},
- {x(3),y(1)}, {x(3)},
- {x(3),y(3)}, {x(3),y({"a"})},
- {x(3),y({"a"})}, {x(3),y(3)},
- {x({"a"})}, {x({"a"}),y({"b"})},
- {x({"a"}),y({"b"})}, {x({"a"})},
- {x({"a"})}, {x({"a"}),y(1)}
+std::vector<std::pair<vespalib::string,vespalib::string>> bad_layouts = {
+ { "x3", "x3y1" },
+ { "x3y1", "x3" },
+ { "x3y3", "x3y3_1" },
+ { "x3y3_1", "x3y3" },
+ { "x3_1", "x3_1y3_1" },
+ { "x3_1y3_1", "x3_1" },
+ { "x3_1", "x3_1y1" }
};
TEST(PartialAddTest, partial_add_returns_nullptr_on_invalid_inputs) {
- ASSERT_TRUE((bad_layouts.size() % 2) == 0);
- for (size_t i = 0; i < bad_layouts.size(); i += 2) {
- TensorSpec lhs = spec(bad_layouts[i], N());
- TensorSpec rhs = spec(bad_layouts[i + 1], Div16(N()));
+ for (const auto &layouts: bad_layouts) {
+ TensorSpec lhs = GenSpec::from_desc(layouts.first).seq(N());
+ TensorSpec rhs = GenSpec::from_desc(layouts.second).seq(Div16(N()));
SCOPED_TRACE(fmt("\n===\nLHS: %s\nRHS: %s\n===\n", lhs.to_string().c_str(), rhs.to_string().c_str()));
auto actual = try_partial_add(lhs, rhs);
auto expect = Value::UP();
diff --git a/document/src/tests/tensor_fieldvalue/partial_modify/partial_modify_test.cpp b/document/src/tests/tensor_fieldvalue/partial_modify/partial_modify_test.cpp
index 71f35102d17..26f7a10468a 100644
--- a/document/src/tests/tensor_fieldvalue/partial_modify/partial_modify_test.cpp
+++ b/document/src/tests/tensor_fieldvalue/partial_modify/partial_modify_test.cpp
@@ -1,7 +1,7 @@
// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include <vespa/eval/eval/simple_value.h>
-#include <vespa/eval/eval/test/tensor_model.h>
+#include <vespa/eval/eval/test/gen_spec.h>
#include <vespa/eval/eval/value_codec.h>
#include <vespa/document/update/tensor_partial_update.h>
#include <vespa/vespalib/util/stringfmt.h>
@@ -15,15 +15,13 @@ using namespace vespalib::eval::test;
using vespalib::make_string_short::fmt;
-std::vector<Layout> modify_layouts = {
- {x({"a"})}, {x({"a"})},
- {x({"a",""})}, {x({"b","c","d","e"})},
- {x(5)}, {x({"1","2","foo","17"})},
- {x({"a","b","c"}),y({"d","e"})}, {x({"b"}),y({"d"})},
- {x({"a","b","c"})}, {x({"b","c","d"})},
- {x(4),y({"a","b","c","d"}),z(5)}, {x({"1","2"}),y({"b","d"}),z({"1","3"})},
- {x(3),y(2)}, {x({"0","1"}),y({"0","1"})},
- {x({"a","","b"})}, {x({""})}
+std::vector<std::pair<vespalib::string,vespalib::string>> modify_layouts = {
+ { "x4_1", "x4_1" },
+ { "x4_1", "x4_2" },
+ { "x4", "x4_2" },
+ { "x4_1y4_2", "x4_2y4_1" },
+ { "x4y4_1z4", "x4_2y4_2z4_2" },
+ { "x3y2", "x2_1y2_1" }
};
TensorSpec::Address sparsify(const TensorSpec::Address &input) {
@@ -52,7 +50,7 @@ TensorSpec reference_modify(const TensorSpec &a, const TensorSpec &b, join_fun_t
result.add(cell.first, fun(v, iter->second));
}
}
- return result;
+ return result.normalize();
}
Value::UP try_partial_modify(const TensorSpec &a, const TensorSpec &b, join_fun_t fun) {
@@ -69,36 +67,38 @@ TensorSpec perform_partial_modify(const TensorSpec &a, const TensorSpec &b, join
}
TEST(PartialModifyTest, partial_modify_works_for_simple_values) {
- ASSERT_TRUE((modify_layouts.size() % 2) == 0);
- for (size_t i = 0; i < modify_layouts.size(); i += 2) {
- TensorSpec lhs = spec(modify_layouts[i], N());
- TensorSpec rhs = spec(modify_layouts[i + 1], Div16(N()));
- SCOPED_TRACE(fmt("\n===\nLHS: %s\nRHS: %s\n===\n", lhs.to_string().c_str(), rhs.to_string().c_str()));
- for (auto fun: {operation::Add::f, operation::Mul::f, operation::Sub::f}) {
- auto expect = reference_modify(lhs, rhs, fun);
- auto actual = perform_partial_modify(lhs, rhs, fun);
- EXPECT_EQ(actual, expect);
+ for (const auto &layouts: modify_layouts) {
+ for (auto lhs_ct: CellTypeUtils::list_types()) {
+ for (auto rhs_ct: CellTypeUtils::list_types()) {
+ TensorSpec lhs = GenSpec::from_desc(layouts.first).cells(lhs_ct).seq(N());
+ TensorSpec rhs = GenSpec::from_desc(layouts.second).cells(rhs_ct).seq(Div16(N()));
+ SCOPED_TRACE(fmt("\n===\nLHS: %s\nRHS: %s\n===\n", lhs.to_string().c_str(), rhs.to_string().c_str()));
+ for (auto fun: {operation::Add::f, operation::Mul::f, operation::Sub::f}) {
+ auto expect = reference_modify(lhs, rhs, fun);
+ auto actual = perform_partial_modify(lhs, rhs, fun);
+ EXPECT_EQ(actual, expect);
+ }
+ auto fun = [](double, double keep) { return keep; };
+ auto expect = reference_modify(lhs, rhs, fun);
+ auto actual = perform_partial_modify(lhs, rhs, fun);
+ EXPECT_EQ(actual, expect);
+ }
}
- auto fun = [](double, double keep) { return keep; };
- auto expect = reference_modify(lhs, rhs, fun);
- auto actual = perform_partial_modify(lhs, rhs, fun);
- EXPECT_EQ(actual, expect);
}
}
-std::vector<Layout> bad_layouts = {
- {x(3)}, {x(3)},
- {x(3),y({"a"})}, {x(3),y({"a"})},
- {x({"a"})}, {x({"a"}),y({"b"})},
- {x({"a"}),y({"b"})}, {x({"a"})},
- {x({"a"})}, {x({"a"}),y(1)}
+std::vector<std::pair<vespalib::string,vespalib::string>> bad_layouts = {
+ { "x3", "x3" },
+ { "x3y4_1", "x3y4_1" },
+ { "x4_1", "x4_1y4_1" },
+ { "x4_1y4_1", "x4_1" },
+ { "x4_1", "x4_1y1" }
};
TEST(PartialModifyTest, partial_modify_returns_nullptr_on_invalid_inputs) {
- ASSERT_TRUE((bad_layouts.size() % 2) == 0);
- for (size_t i = 0; i < bad_layouts.size(); i += 2) {
- TensorSpec lhs = spec(bad_layouts[i], N());
- TensorSpec rhs = spec(bad_layouts[i + 1], Div16(N()));
+ for (const auto &layouts: bad_layouts) {
+ TensorSpec lhs = GenSpec::from_desc(layouts.first).seq(N());
+ TensorSpec rhs = GenSpec::from_desc(layouts.second).seq(Div16(N()));
SCOPED_TRACE(fmt("\n===\nLHS: %s\nRHS: %s\n===\n", lhs.to_string().c_str(), rhs.to_string().c_str()));
for (auto fun: {operation::Add::f}) {
auto actual = try_partial_modify(lhs, rhs, fun);
diff --git a/document/src/tests/tensor_fieldvalue/partial_remove/partial_remove_test.cpp b/document/src/tests/tensor_fieldvalue/partial_remove/partial_remove_test.cpp
index 58a06273139..9107f590125 100644
--- a/document/src/tests/tensor_fieldvalue/partial_remove/partial_remove_test.cpp
+++ b/document/src/tests/tensor_fieldvalue/partial_remove/partial_remove_test.cpp
@@ -1,7 +1,7 @@
// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include <vespa/eval/eval/simple_value.h>
-#include <vespa/eval/eval/test/tensor_model.h>
+#include <vespa/eval/eval/test/gen_spec.h>
#include <vespa/eval/eval/value_codec.h>
#include <vespa/document/update/tensor_partial_update.h>
#include <vespa/vespalib/util/stringfmt.h>
@@ -15,15 +15,10 @@ using namespace vespalib::eval::test;
using vespalib::make_string_short::fmt;
-std::vector<Layout> remove_layouts = {
- {x({"a"})}, {x({"b"})},
- {x({"a","b"})}, {x({"a","c"})},
- {x({"a","b"})}, {x({"a","b"})},
- float_cells({x({"a","b"})}), {x({"a","c"})},
- {x({"a","b"})}, float_cells({x({"a","c"})}),
- float_cells({x({"a","b"})}), float_cells({x({"a","c"})}),
- {x({"a","b","c"}),y({"d","e"})}, {x({"b","f"}),y({"d","g"})},
- {x(3),y({"a","b"})}, {y({"b","c"})}
+std::vector<std::pair<vespalib::string,vespalib::string>> remove_layouts = {
+ { "x4_1", "x4_2" },
+ { "x4_2y4_1", "x4_1y4_2" },
+ { "x3y4_1", "y4_2" }
};
TensorSpec::Address only_sparse(const TensorSpec::Address &input) {
@@ -45,7 +40,7 @@ TensorSpec reference_remove(const TensorSpec &a, const TensorSpec &b) {
result.add(cell.first, cell.second);
}
}
- return result;
+ return result.normalize();
}
Value::UP try_partial_remove(const TensorSpec &a, const TensorSpec &b) {
@@ -62,30 +57,32 @@ TensorSpec perform_partial_remove(const TensorSpec &a, const TensorSpec &b) {
}
TEST(PartialRemoveTest, partial_remove_works_for_simple_values) {
- ASSERT_TRUE((remove_layouts.size() % 2) == 0);
- for (size_t i = 0; i < remove_layouts.size(); i += 2) {
- TensorSpec lhs = spec(remove_layouts[i], N());
- TensorSpec rhs = spec(remove_layouts[i + 1], Div16(N()));
- SCOPED_TRACE(fmt("\n===\nLHS: %s\nRHS: %s\n===\n", lhs.to_string().c_str(), rhs.to_string().c_str()));
- auto expect = reference_remove(lhs, rhs);
- auto actual = perform_partial_remove(lhs, rhs);
- EXPECT_EQ(actual, expect);
+ for (const auto &layouts: remove_layouts) {
+ for (auto lhs_ct: CellTypeUtils::list_types()) {
+ for (auto rhs_ct: CellTypeUtils::list_types()) {
+ TensorSpec lhs = GenSpec::from_desc(layouts.first).cells(lhs_ct).seq(N());
+ TensorSpec rhs = GenSpec::from_desc(layouts.second).cells(rhs_ct).seq(Div16(N()));
+ SCOPED_TRACE(fmt("\n===\nLHS: %s\nRHS: %s\n===\n", lhs.to_string().c_str(), rhs.to_string().c_str()));
+ auto expect = reference_remove(lhs, rhs);
+ auto actual = perform_partial_remove(lhs, rhs);
+ EXPECT_EQ(actual, expect);
+ }
+ }
}
}
-std::vector<Layout> bad_layouts = {
- {x(3)}, {x(3)},
- {x(3),y({"a"})}, {x(3)},
- {x(3),y({"a"})}, {x(3),y({"a"})},
- {x({"a"})}, {y({"a"})},
- {x({"a"})}, {x({"a"}),y({"b"})}
+std::vector<std::pair<vespalib::string,vespalib::string>> bad_layouts = {
+ { "x3", "x3" },
+ { "x3y4_1", "x3" },
+ { "x3y4_1", "x3y4_2" },
+ { "x4_1", "y4_1" },
+ { "x4_1", "x4_2y4_1" }
};
TEST(PartialRemoveTest, partial_remove_returns_nullptr_on_invalid_inputs) {
- ASSERT_TRUE((bad_layouts.size() % 2) == 0);
- for (size_t i = 0; i < bad_layouts.size(); i += 2) {
- TensorSpec lhs = spec(bad_layouts[i], N());
- TensorSpec rhs = spec(bad_layouts[i + 1], Div16(N()));
+ for (const auto &layouts: bad_layouts) {
+ TensorSpec lhs = GenSpec::from_desc(layouts.first).seq(N());
+ TensorSpec rhs = GenSpec::from_desc(layouts.second).seq(Div16(N()));
SCOPED_TRACE(fmt("\n===\nLHS: %s\nRHS: %s\n===\n", lhs.to_string().c_str(), rhs.to_string().c_str()));
auto actual = try_partial_remove(lhs, rhs);
auto expect = Value::UP();
diff --git a/eval/src/apps/tensor_conformance/generate.cpp b/eval/src/apps/tensor_conformance/generate.cpp
index 5feb6b3b3e4..4776b9f220b 100644
--- a/eval/src/apps/tensor_conformance/generate.cpp
+++ b/eval/src/apps/tensor_conformance/generate.cpp
@@ -1,167 +1,250 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "generate.h"
-#include <vespa/vespalib/testkit/test_kit.h>
-#include <vespa/eval/eval/test/tensor_model.h>
-#include <vespa/eval/eval/operation.h>
+#include <vespa/eval/eval/test/gen_spec.h>
+#include <vespa/eval/eval/value_type_spec.h>
#include <vespa/eval/eval/aggr.h>
#include <vespa/vespalib/util/stringfmt.h>
using namespace vespalib::eval;
using namespace vespalib::eval::test;
+using vespalib::make_string_short::fmt;
+
+//-----------------------------------------------------------------------------
+
+namespace {
+
+//-----------------------------------------------------------------------------
+
+const std::vector<vespalib::string> basic_layouts = {
+ "",
+ "a3", "a3c5", "a3c5e7",
+ "b2_1", "b2_1d3_1", "b2_1d3_1f4_1",
+ "a3b2_1c5d3_1", "b2_1c5d3_1e7"
+};
+
+const std::vector<std::pair<vespalib::string,vespalib::string>> join_layouts = {
+ {"", ""},
+ {"", "a3"},
+ {"", "b2_1"},
+ {"", "a3b2_1"},
+ {"a3c5e7", "a3c5e7"},
+ {"c5", "a3e7"},
+ {"a3c5", "c5e7"},
+ {"b4_1d6_1f8_1", "b2_2d3_2f4_2"},
+ {"d3_1", "b2_1f4_1"},
+ {"b2_1d6_1", "d3_2f4_2"},
+ {"a3b4_1c5d6_1", "a3b2_1c5d3_1"},
+ {"a3b2_1", "c5d3_1"},
+ {"a3b4_1c5", "b2_1c5d3_1"}
+};
+
+const std::vector<std::pair<vespalib::string,vespalib::string>> merge_layouts = {
+ {"", ""},
+ {"a3c5e7", "a3c5e7"},
+ {"b15_2", "b10_3"},
+ {"b6_2d4_3f6_2", "b4_3d6_2f4_3"},
+ {"a3b6_2c1d4_3e2f6_2", "a3b4_3c1d6_2e2f4_3"},
+};
+
+const std::vector<vespalib::string> concat_c_layouts_a = {
+ "", "c3", "a3", "b6_2", "a3b6_2", "a3b6_2c3"
+};
+
+const std::vector<vespalib::string> concat_c_layouts_b = {
+ "", "c5", "a3", "b4_3", "a3b4_3", "a3b4_3c5"
+};
+
+//-----------------------------------------------------------------------------
+
+const std::vector<CellType> just_double = {CellType::DOUBLE};
+const std::vector<CellType> just_float = {CellType::FLOAT};
+const std::vector<CellType> all_types = CellTypeUtils::list_types();
const double my_nan = std::numeric_limits<double>::quiet_NaN();
+Sequence skew(const Sequence &seq) {
+ return [seq](size_t i) { return seq(i + 7); };
+}
+
+Sequence my_seq(double x0, double delta, size_t n) {
+ std::vector<double> values;
+ double x = x0;
+ for (size_t i = 0; i < n; ++i) {
+ values.push_back(x);
+ x += delta;
+ }
+ return Seq(values);
+}
+
+//-----------------------------------------------------------------------------
+
+void generate(const vespalib::string &expr, const GenSpec &a, TestBuilder &dst) {
+ auto a_cell_types = a.dims().empty() ? just_double : dst.full ? all_types : just_float;
+ for (auto a_ct: a_cell_types) {
+ dst.add(expr, {{"a", a.cpy().cells(a_ct)}});
+ }
+}
+
+void generate(const vespalib::string &expr, const GenSpec &a, const GenSpec &b, TestBuilder &dst) {
+ auto a_cell_types = a.dims().empty() ? just_double : dst.full ? all_types : just_float;
+ auto b_cell_types = b.dims().empty() ? just_double : dst.full ? all_types : just_float;
+ for (auto a_ct: a_cell_types) {
+ for (auto b_ct: b_cell_types) {
+ dst.add(expr, {{"a", a.cpy().cells(a_ct)},{"b", b.cpy().cells(b_ct)}});
+ }
+ }
+}
+
+void generate_with_cell_type(const char *expr_fmt, TestBuilder &dst) {
+ auto cell_types = dst.full ? all_types : just_float;
+ for (auto ct: cell_types) {
+ auto name = value_type::cell_type_to_name(ct);
+ dst.add(fmt(expr_fmt, name.c_str()), {});
+ }
+}
+
+void generate_with_cell_type(const char *expr_fmt, double a, double b, double c, TestBuilder &dst) {
+ auto cell_types = dst.full ? all_types : just_float;
+ for (auto ct: cell_types) {
+ auto name = value_type::cell_type_to_name(ct);
+ dst.add(fmt(expr_fmt, name.c_str()), {{"a", GenSpec(a)},{"b", GenSpec(b)},{"c", GenSpec(c)}});
+ }
+}
+
+//-----------------------------------------------------------------------------
+
+void generate_const(TestBuilder &dst) {
+ dst.add("1.25", {});
+ dst.add("2.75", {});
+ dst.add("\"this is a string that will be hashed\"", {});
+ dst.add("\"foo bar baz\"", {});
+ // constant tensor lambda
+ generate_with_cell_type("tensor<%s>(x[10])(x+1)", dst);
+ generate_with_cell_type("tensor<%s>(x[5],y[4])(x*4+(y+1))", dst);
+ generate_with_cell_type("tensor<%s>(x[5],y[4])(x==y)", dst);
+ // constant verbose tensor create
+ generate_with_cell_type("tensor<%s>(x[3]):{{x:0}:1,{x:1}:2,{x:2}:3}", dst);
+ generate_with_cell_type("tensor<%s>(x{}):{{x:a}:1,{x:b}:2,{x:c}:3}", dst);
+ generate_with_cell_type("tensor<%s>(x{},y[2]):{{x:a,y:0}:1,{x:a,y:1}:2}", dst);
+ // constant convenient tensor create
+ generate_with_cell_type("tensor<%s>(x[3]):[1,2,3]", dst);
+ generate_with_cell_type("tensor<%s>(x{}):{a:1,b:2,c:3}", dst);
+ generate_with_cell_type("tensor<%s>(x{},y[2]):{a:[1,2]}", dst);
+}
+
+//-----------------------------------------------------------------------------
+
+void generate_inject(TestBuilder &dst) {
+ for (const auto &layout: basic_layouts) {
+ GenSpec a = GenSpec::from_desc(layout).seq(N());
+ generate("a", a, dst);
+ }
+}
+
//-----------------------------------------------------------------------------
void generate_reduce(Aggr aggr, const Sequence &seq, TestBuilder &dst) {
- std::vector<Layout> layouts = {
- {x(3)},
- {x(3),y(5)},
- {x(3),y(5),z(7)},
- float_cells({x(3),y(5),z(7)}),
- {x({"a","b","c"})},
- {x({"a","b","c"}),y({"foo","bar"})},
- {x({"a","b","c"}),y({"foo","bar"}),z({"i","j","k","l"})},
- float_cells({x({"a","b","c"}),y({"foo","bar"}),z({"i","j","k","l"})}),
- {x(3),y({"foo", "bar"}),z(7)},
- {x({"a","b","c"}),y(5),z({"i","j","k","l"})},
- float_cells({x({"a","b","c"}),y(5),z({"i","j","k","l"})})
- };
- for (const Layout &layout: layouts) {
- TensorSpec input = spec(layout, seq);
- for (const Domain &domain: layout) {
- vespalib::string expr = vespalib::make_string("reduce(a,%s,%s)",
- AggrNames::name_of(aggr)->c_str(), domain.name().c_str());
- dst.add(expr, {{"a", input}});
+ for (const auto &layout: basic_layouts) {
+ GenSpec a = GenSpec::from_desc(layout).seq(seq);
+ for (const auto &dim: a.dims()) {
+ vespalib::string expr = fmt("reduce(a,%s,%s)",
+ AggrNames::name_of(aggr)->c_str(),
+ dim.name().c_str());
+ generate(expr, a, dst);
+ }
+ if (a.dims().size() > 1) {
+ vespalib::string expr = fmt("reduce(a,%s,%s,%s)",
+ AggrNames::name_of(aggr)->c_str(),
+ a.dims().back().name().c_str(),
+ a.dims().front().name().c_str());
+ generate(expr, a, dst);
}
{
- vespalib::string expr = vespalib::make_string("reduce(a,%s)", AggrNames::name_of(aggr)->c_str());
- dst.add(expr, {{"a", input}});
+ vespalib::string expr = fmt("reduce(a,%s)",
+ AggrNames::name_of(aggr)->c_str());
+ generate(expr, a, dst);
}
}
}
-void generate_tensor_reduce(TestBuilder &dst) {
+void generate_reduce(TestBuilder &dst) {
generate_reduce(Aggr::AVG, N(), dst);
generate_reduce(Aggr::COUNT, N(), dst);
generate_reduce(Aggr::PROD, SigmoidF(N()), dst);
generate_reduce(Aggr::SUM, N(), dst);
generate_reduce(Aggr::MAX, N(), dst);
- // add MEDIAN cases when supported in Java
- // generate_reduce(Aggr::MEDIAN, N(), dst);
+ generate_reduce(Aggr::MEDIAN, N(), dst);
generate_reduce(Aggr::MIN, N(), dst);
}
//-----------------------------------------------------------------------------
-void generate_map_expr(const vespalib::string &expr, map_fun_t ref_op, const Sequence &seq, TestBuilder &dst) {
- std::vector<Layout> layouts = {
- {},
- {x(3)},
- {x(3),y(5)},
- {x(3),y(5),z(7)},
- float_cells({x(3),y(5),z(7)}),
- {x({"a","b","c"})},
- {x({"a","b","c"}),y({"foo","bar"})},
- {x({"a","b","c"}),y({"foo","bar"}),z({"i","j","k","l"})},
- float_cells({x({"a","b","c"}),y({"foo","bar"}),z({"i","j","k","l"})}),
- {x(3),y({"foo", "bar"}),z(7)},
- {x({"a","b","c"}),y(5),z({"i","j","k","l"})},
- float_cells({x({"a","b","c"}),y(5),z({"i","j","k","l"})})
- };
- for (const Layout &layout: layouts) {
- dst.add(expr, {{"a", spec(layout, seq)}}, spec(layout, OpSeq(seq, ref_op)));
+void generate_map_expr(const vespalib::string &expr, const Sequence &seq, TestBuilder &dst) {
+ for (const auto &layout: basic_layouts) {
+ GenSpec a = GenSpec::from_desc(layout).seq(seq);
+ generate(expr, a, dst);
}
}
-void generate_op1_map(const vespalib::string &op1_expr, map_fun_t ref_op, const Sequence &seq, TestBuilder &dst) {
- generate_map_expr(op1_expr, ref_op, seq, dst);
- generate_map_expr(vespalib::make_string("map(a,f(a)(%s))", op1_expr.c_str()), ref_op, seq, dst);
-}
-
-void generate_tensor_map(TestBuilder &dst) {
- generate_op1_map("-a", operation::Neg::f, Sub2(Div16(N())), dst);
- generate_op1_map("!a", operation::Not::f, Seq({0.0, 1.0, 1.0}), dst);
- generate_op1_map("cos(a)", operation::Cos::f, Div16(N()), dst);
- generate_op1_map("sin(a)", operation::Sin::f, Div16(N()), dst);
- generate_op1_map("tan(a)", operation::Tan::f, Div16(N()), dst);
- generate_op1_map("cosh(a)", operation::Cosh::f, Div16(N()), dst);
- generate_op1_map("sinh(a)", operation::Sinh::f, Div16(N()), dst);
- generate_op1_map("tanh(a)", operation::Tanh::f, Div16(N()), dst);
- generate_op1_map("acos(a)", operation::Acos::f, SigmoidF(Div16(N())), dst);
- generate_op1_map("asin(a)", operation::Asin::f, SigmoidF(Div16(N())), dst);
- generate_op1_map("atan(a)", operation::Atan::f, Div16(N()), dst);
- generate_op1_map("exp(a)", operation::Exp::f, Div16(N()), dst);
- generate_op1_map("log10(a)", operation::Log10::f, Div16(N()), dst);
- generate_op1_map("log(a)", operation::Log::f, Div16(N()), dst);
- generate_op1_map("sqrt(a)", operation::Sqrt::f, Div16(N()), dst);
- generate_op1_map("ceil(a)", operation::Ceil::f, Div16(N()), dst);
- generate_op1_map("fabs(a)", operation::Fabs::f, Div16(N()), dst);
- generate_op1_map("floor(a)", operation::Floor::f, Div16(N()), dst);
- generate_op1_map("isNan(a)", operation::IsNan::f, Seq({my_nan, 1.0, 1.0}), dst);
- generate_op1_map("relu(a)", operation::Relu::f, Sub2(Div16(N())), dst);
- generate_op1_map("sigmoid(a)", operation::Sigmoid::f, Sub2(Div16(N())), dst);
- generate_op1_map("elu(a)", operation::Elu::f, Sub2(Div16(N())), dst);
- // TODO(havardpe): add erf when supported by Java
- // generate_op1_map("erf(a)", operation::Erf::f, Sub2(Div16(N())), dst);
- generate_op1_map("a in [1,5,7,13,42]", MyIn::f, N(), dst);
- generate_map_expr("map(a,f(a)((a+1)*2))", MyOp::f, Div16(N()), dst);
+void generate_op1_map(const vespalib::string &op1_expr, const Sequence &seq, TestBuilder &dst) {
+ generate_map_expr(op1_expr, seq, dst);
+ generate_map_expr(fmt("map(a,f(a)(%s))", op1_expr.c_str()), seq, dst);
+}
+
+void generate_map(TestBuilder &dst) {
+ generate_op1_map("-a", Sub2(Div16(N())), dst);
+ generate_op1_map("!a", Seq({0.0, 1.0, 1.0}), dst);
+ generate_op1_map("cos(a)", Div16(N()), dst);
+ generate_op1_map("sin(a)", Div16(N()), dst);
+ generate_op1_map("tan(a)", Div16(N()), dst);
+ generate_op1_map("cosh(a)", Div16(N()), dst);
+ generate_op1_map("sinh(a)", Div16(N()), dst);
+ generate_op1_map("tanh(a)", Div16(N()), dst);
+ generate_op1_map("acos(a)", SigmoidF(Div16(N())), dst);
+ generate_op1_map("asin(a)", SigmoidF(Div16(N())), dst);
+ generate_op1_map("atan(a)", Div16(N()), dst);
+ generate_op1_map("exp(a)", Div16(N()), dst);
+ generate_op1_map("log10(a)", Div16(N()), dst);
+ generate_op1_map("log(a)", Div16(N()), dst);
+ generate_op1_map("sqrt(a)", Div16(N()), dst);
+ generate_op1_map("ceil(a)", Div16(N()), dst);
+ generate_op1_map("fabs(a)", Div16(N()), dst);
+ generate_op1_map("floor(a)", Div16(N()), dst);
+ generate_op1_map("isNan(a)", Seq({my_nan, 1.0, 1.0}), dst);
+ generate_op1_map("relu(a)", Sub2(Div16(N())), dst);
+ generate_op1_map("sigmoid(a)", Sub2(Div16(N())), dst);
+ generate_op1_map("elu(a)", Sub2(Div16(N())), dst);
+ generate_op1_map("erf(a)", Sub2(Div16(N())), dst);
+ generate_op1_map("a in [1,5,7,13,42]", N(), dst);
+ // custom lambda
+ generate_map_expr("map(a,f(a)((a+1)*2))", Div16(N()), dst);
}
//-----------------------------------------------------------------------------
void generate_join_expr(const vespalib::string &expr, const Sequence &seq, TestBuilder &dst) {
- std::vector<Layout> layouts = {
- {}, {},
- {}, {x(5)},
- {x(5)}, {},
- {}, float_cells({x(5)}),
- float_cells({x(5)}), {},
- {x(5)}, {x(5)},
- {x(5)}, {y(5)},
- {x(5)}, {x(5),y(5)},
- {y(3)}, {x(2),z(3)},
- {x(3),y(5)}, {y(5),z(7)},
- float_cells({x(3),y(5)}), {y(5),z(7)},
- {x(3),y(5)}, float_cells({y(5),z(7)}),
- float_cells({x(3),y(5)}), float_cells({y(5),z(7)}),
- {x({"a","b","c"})}, {x({"a","b","c"})},
- {x({"a","b","c"})}, {x({"a","b"})},
- {x({"a","b","c"})}, {y({"foo","bar","baz"})},
- {x({"a","b","c"})}, {x({"a","b","c"}),y({"foo","bar","baz"})},
- {x({"a","b"}),y({"foo","bar","baz"})}, {x({"a","b","c"}),y({"foo","bar"})},
- {x({"a","b"}),y({"foo","bar","baz"})}, {y({"foo","bar"}),z({"i","j","k","l"})},
- float_cells({x({"a","b"}),y({"foo","bar","baz"})}), {y({"foo","bar"}),z({"i","j","k","l"})},
- {x({"a","b"}),y({"foo","bar","baz"})}, float_cells({y({"foo","bar"}),z({"i","j","k","l"})}),
- float_cells({x({"a","b"}),y({"foo","bar","baz"})}), float_cells({y({"foo","bar"}),z({"i","j","k","l"})}),
- {x(3),y({"foo", "bar"})}, {y({"foo", "bar"}),z(7)},
- {x({"a","b","c"}),y(5)}, {y(5),z({"i","j","k","l"})},
- float_cells({x({"a","b","c"}),y(5)}), {y(5),z({"i","j","k","l"})},
- {x({"a","b","c"}),y(5)}, float_cells({y(5),z({"i","j","k","l"})}),
- float_cells({x({"a","b","c"}),y(5)}), float_cells({y(5),z({"i","j","k","l"})})
- };
- ASSERT_TRUE((layouts.size() % 2) == 0);
- for (size_t i = 0; i < layouts.size(); i += 2) {
- auto a = spec(layouts[i], seq);
- auto b = spec(layouts[i + 1], seq);
- dst.add(expr, {{"a", a}, {"b", b}});
+ for (const auto &layouts: join_layouts) {
+ GenSpec a = GenSpec::from_desc(layouts.first).seq(seq);
+ GenSpec b = GenSpec::from_desc(layouts.second).seq(skew(seq));
+ generate(expr, a, b, dst);
+ generate(expr, b, a, dst);
}
}
void generate_op2_join(const vespalib::string &op2_expr, const Sequence &seq, TestBuilder &dst) {
generate_join_expr(op2_expr, seq, dst);
- generate_join_expr(vespalib::make_string("join(a,b,f(a,b)(%s))", op2_expr.c_str()), seq, dst);
+ generate_join_expr(fmt("join(a,b,f(a,b)(%s))", op2_expr.c_str()), seq, dst);
}
-void generate_tensor_join(TestBuilder &dst) {
+void generate_join(TestBuilder &dst) {
generate_op2_join("a+b", Div16(N()), dst);
generate_op2_join("a-b", Div16(N()), dst);
generate_op2_join("a*b", Div16(N()), dst);
generate_op2_join("a/b", Div16(N()), dst);
generate_op2_join("a%b", Div16(N()), dst);
- generate_op2_join("a^b", Div16(N()), dst);
- generate_op2_join("pow(a,b)", Div16(N()), dst);
+ generate_op2_join("a^b", my_seq(1.0, 1.0, 5), dst);
+ generate_op2_join("pow(a,b)", my_seq(1.0, 1.0, 5), dst);
generate_op2_join("a==b", Div16(N()), dst);
generate_op2_join("a!=b", Div16(N()), dst);
generate_op2_join("a~=b", Div16(N()), dst);
@@ -176,145 +259,250 @@ void generate_tensor_join(TestBuilder &dst) {
generate_op2_join("fmod(a,b)", Div16(N()), dst);
generate_op2_join("min(a,b)", Div16(N()), dst);
generate_op2_join("max(a,b)", Div16(N()), dst);
+ // inverted lambda
+ generate_join_expr("join(a,b,f(a,b)(b-a))", Div16(N()), dst);
+ // custom lambda
generate_join_expr("join(a,b,f(a,b)((a+b)/(a*b)))", Div16(N()), dst);
}
//-----------------------------------------------------------------------------
-void generate_dot_product(TestBuilder &dst,
- double expect,
- const Layout &lhs, const Sequence &lhs_seq,
- const Layout &rhs, const Sequence &rhs_seq)
-{
- dst.add("reduce(a*b,sum)",
- { {"a", spec(lhs, lhs_seq)},{"b", spec(rhs, rhs_seq)} },
- spec(expect));
+void generate_merge_expr(const vespalib::string &expr, const Sequence &seq, TestBuilder &dst) {
+ for (const auto &layouts: merge_layouts) {
+ GenSpec a = GenSpec::from_desc(layouts.first).seq(seq);
+ GenSpec b = GenSpec::from_desc(layouts.second).seq(skew(seq));
+ generate(expr, a, b, dst);
+ generate(expr, b, a, dst);
+ }
}
-void generate_dot_product(TestBuilder &dst,
- double expect,
- const Layout &layout,
- const Sequence &lhs_seq,
- const Sequence &rhs_seq)
-{
- auto fl_lay = float_cells(layout);
- generate_dot_product(dst, expect, layout, lhs_seq, layout, rhs_seq);
- generate_dot_product(dst, expect, fl_lay, lhs_seq, layout, rhs_seq);
- generate_dot_product(dst, expect, layout, lhs_seq, fl_lay, rhs_seq);
- generate_dot_product(dst, expect, fl_lay, lhs_seq, fl_lay, rhs_seq);
+void generate_op2_merge(const vespalib::string &op2_expr, const Sequence &seq, TestBuilder &dst) {
+ generate_merge_expr(op2_expr, seq, dst);
+ generate_merge_expr(fmt("merge(a,b,f(a,b)(%s))", op2_expr.c_str()), seq, dst);
+}
+
+void generate_merge(TestBuilder &dst) {
+ generate_op2_merge("a+b", Div16(N()), dst);
+ generate_op2_merge("a-b", Div16(N()), dst);
+ generate_op2_merge("a*b", Div16(N()), dst);
+ generate_op2_merge("a/b", Div16(N()), dst);
+ generate_op2_merge("a%b", Div16(N()), dst);
+ generate_op2_merge("a^b", my_seq(1.0, 1.0, 5), dst);
+ generate_op2_merge("pow(a,b)", my_seq(1.0, 1.0, 5), dst);
+ generate_op2_merge("a==b", Div16(N()), dst);
+ generate_op2_merge("a!=b", Div16(N()), dst);
+ generate_op2_merge("a~=b", Div16(N()), dst);
+ generate_op2_merge("a<b", Div16(N()), dst);
+ generate_op2_merge("a<=b", Div16(N()), dst);
+ generate_op2_merge("a>b", Div16(N()), dst);
+ generate_op2_merge("a>=b", Div16(N()), dst);
+ generate_op2_merge("a&&b", Seq({0.0, 1.0, 1.0}), dst);
+ generate_op2_merge("a||b", Seq({0.0, 1.0, 1.0}), dst);
+ generate_op2_merge("atan2(a,b)", Div16(N()), dst);
+ generate_op2_merge("ldexp(a,b)", Div16(N()), dst);
+ generate_op2_merge("fmod(a,b)", Div16(N()), dst);
+ generate_op2_merge("min(a,b)", Div16(N()), dst);
+ generate_op2_merge("max(a,b)", Div16(N()), dst);
+ // inverted lambda
+ generate_merge_expr("merge(a,b,f(a,b)(b-a))", Div16(N()), dst);
+ // custom lambda
+ generate_merge_expr("merge(a,b,f(a,b)((a+b)/(a*b)))", Div16(N()), dst);
+}
+
+//-----------------------------------------------------------------------------
+
+void generate_concat(TestBuilder &dst) {
+ for (const auto &layout_a: concat_c_layouts_a) {
+ for (const auto &layout_b: concat_c_layouts_b) {
+ GenSpec a = GenSpec::from_desc(layout_a).seq(N());
+ GenSpec b = GenSpec::from_desc(layout_b).seq(skew(N()));
+ generate("concat(a, b, c)", a, b, dst);
+ generate("concat(a, b, c)", b, a, dst);
+ }
+ }
+}
+
+//-----------------------------------------------------------------------------
+
+void generate_create(TestBuilder &dst) {
+ generate_with_cell_type("tensor<%s>(x[3]):[a,b,c]", 1, 2, 3, dst);
+ generate_with_cell_type("tensor<%s>(x{}):{a:a,b:b,c:c}", 1, 2, 3, dst);
+ generate_with_cell_type("tensor<%s>(x{},y[2]):{a:[a,b+c]}", 1, 2, 3, dst);
+}
+
+//-----------------------------------------------------------------------------
+
+void generate_lambda(TestBuilder &dst) {
+ generate_with_cell_type("tensor<%s>(x[10])(a+b+c+x+1)", 1, 2, 3, dst);
+ generate_with_cell_type("tensor<%s>(x[5],y[4])(a+b+c+x*4+(y+1))", 1, 2, 3, dst);
+ generate_with_cell_type("tensor<%s>(x[5],y[4])(a+b+c+(x==y))", 1, 2, 3, dst);
+}
+
+//-----------------------------------------------------------------------------
+
+void generate_cell_cast(TestBuilder &dst) {
+ for (const auto &layout: basic_layouts) {
+ GenSpec a = GenSpec::from_desc(layout).seq(N(-100));
+ auto from_cell_types = a.dims().empty() ? just_double : dst.full ? all_types : just_float;
+ auto to_cell_types = a.dims().empty() ? just_double : all_types;
+ for (auto a_ct: from_cell_types) {
+ for (auto to_ct: to_cell_types) {
+ auto name = value_type::cell_type_to_name(to_ct);
+ dst.add(fmt("cell_cast(a,%s)", name.c_str()), {{"a", a.cpy().cells(a_ct)}});
+ }
+ }
+ }
+}
+
+//-----------------------------------------------------------------------------
+
+void generate_peek(TestBuilder &dst) {
+ GenSpec num(2);
+ GenSpec dense = GenSpec::from_desc("x3y5z7").seq(N());
+ GenSpec sparse = GenSpec::from_desc("x3_1y5_1z7_1").seq(N());
+ GenSpec mixed = GenSpec::from_desc("x3_1y5z7").seq(N());
+ for (const auto &spec: {dense, sparse, mixed}) {
+ generate("a{x:1,y:2,z:4}", spec, dst);
+ generate("a{y:2,z:5}", spec, dst);
+ generate("a{x:2}", spec, dst);
+ generate("a{x:1,y:(b),z:(b+2)}", spec, num, dst);
+ generate("a{y:(b),z:5}", spec, num, dst);
+ generate("a{x:(b)}", spec, num, dst);
+ }
+}
+
+//-----------------------------------------------------------------------------
+
+void generate_rename(TestBuilder &dst) {
+ GenSpec dense = GenSpec::from_desc("x3y5z7").seq(N());
+ GenSpec sparse = GenSpec::from_desc("x3_1y5_1z7_1").seq(N());
+ GenSpec mixed = GenSpec::from_desc("x3_1y5z7").seq(N());
+ for (const auto &spec: {dense, sparse, mixed}) {
+ generate("rename(a,x,d)", spec, dst);
+ generate("rename(a,y,d)", spec, dst);
+ generate("rename(a,z,d)", spec, dst);
+ generate("rename(a,(x,z),(z,x))", spec, dst);
+ }
+}
+
+//-----------------------------------------------------------------------------
+
+void generate_if(TestBuilder &dst) {
+ vespalib::string expr = "if(a,b,c)";
+ for (const auto &layout: basic_layouts) {
+ GenSpec b = GenSpec::from_desc(layout).seq(N());
+ GenSpec c = GenSpec::from_desc(layout).seq(skew(N()));
+ auto cell_types = b.dims().empty() ? just_double : dst.full ? all_types : just_float;
+ for (auto ct: cell_types) {
+ dst.add(expr, {{"a", GenSpec(0.0)},{"b", b.cpy().cells(ct)},{"c", c.cpy().cells(ct)}});
+ dst.add(expr, {{"a", GenSpec(1.0)},{"b", b.cpy().cells(ct)},{"c", c.cpy().cells(ct)}});
+ }
+ }
+}
+
+//-----------------------------------------------------------------------------
+
+void generate_products(TestBuilder &dst) {
+ auto z1 = GenSpec(1).from_desc("z7");
+ auto z2 = GenSpec(7).from_desc("z7");
+ auto xz = GenSpec(1).from_desc("x3z7");
+ auto yz = GenSpec(3).from_desc("y5z7");
+ // dot product
+ generate("reduce(a*b,sum,z)", z1, z2, dst);
+ // xw product
+ generate("reduce(a*b,sum,z)", z1, xz, dst);
+ generate("reduce(a*b,sum,z)", xz, z2, dst);
+ // matmul
+ generate("reduce(a*b,sum,z)", xz, yz, dst);
+}
+
+//-----------------------------------------------------------------------------
+
+void generate_expanding_reduce(TestBuilder &dst) {
+ auto spec = GenSpec::from_desc("x5y0_0");
+ for (Aggr aggr: Aggregator::list()) {
+ // end up with more cells than you started with
+ auto expr1 = fmt("reduce(a,%s,y)", AggrNames::name_of(aggr)->c_str());
+ auto expr2 = fmt("reduce(a,%s)", AggrNames::name_of(aggr)->c_str());
+ dst.add(expr1, {{"a", spec}});
+ dst.add(expr2, {{"a", spec}});
+ }
}
-void generate_dot_product(TestBuilder &dst) {
- generate_dot_product(dst, ((2 * 7) + (3 * 11) + (5 * 13)), {x(3)},
- Seq({ 2, 3, 5 }),
- Seq({ 7, 11, 13 }));
+//-----------------------------------------------------------------------------
+
+void generate_converting_lambda(TestBuilder &dst) {
+ auto spec = GenSpec::from_desc("x3y5_2");
+ // change cell type and dimension types
+ dst.add("tensor<bfloat16>(x[5],y[10])(a{x:(x),y:(y)})", {{"a", spec}});
}
//-----------------------------------------------------------------------------
-void generate_xw_product(TestBuilder &dst) {
- auto matrix = spec({x(2),y(3)}, Seq({ 3, 5, 7, 11, 13, 17 }));
- auto fmatrix = spec(float_cells({x(2),y(3)}), Seq({ 3, 5, 7, 11, 13, 17 }));
- dst.add("reduce(a*b,sum,x)", {{"a", spec(x(2), Seq({ 1, 2 }))}, {"b", matrix}},
- spec(y(3), Seq({(1*3+2*11),(1*5+2*13),(1*7+2*17)})));
- dst.add("reduce(a*b,sum,x)",
- {{"a", spec(float_cells({x(2)}), Seq({ 1, 2 }))}, {"b", matrix}},
- spec(y(3), Seq({(1*3+2*11),(1*5+2*13),(1*7+2*17)})));
- dst.add("reduce(a*b,sum,x)", {{"a", spec(x(2), Seq({ 1, 2 }))}, {"b", fmatrix}},
- spec(y(3), Seq({(1*3+2*11),(1*5+2*13),(1*7+2*17)})));
- dst.add("reduce(a*b,sum,x)",
- {{"a", spec(float_cells({x(2)}), Seq({ 1, 2 }))}, {"b", fmatrix}},
- spec(float_cells({y(3)}), Seq({(1*3+2*11),(1*5+2*13),(1*7+2*17)})));
- dst.add("reduce(a*b,sum,y)", {{"a", spec(y(3), Seq({ 1, 2, 3 }))}, {"b", matrix}},
- spec(x(2), Seq({(1*3+2*5+3*7),(1*11+2*13+3*17)})));
+void generate_shadowing_lambda(TestBuilder &dst) {
+ auto a = GenSpec::from_desc("a3");
+ auto b = GenSpec::from_desc("b3");
+ // index 'a' shadows external parameter 'a'
+ dst.add("tensor(a[5])(reduce(a,sum)+reduce(b,sum))", {{"a", a}, {"b", b}});
}
//-----------------------------------------------------------------------------
-void generate_tensor_concat(TestBuilder &dst) {
- dst.add("concat(a,b,x)", {{"a", spec(10.0)}, {"b", spec(20.0)}}, spec(x(2), Seq({10.0, 20.0})));
- dst.add("concat(a,b,x)", {{"a", spec(x(1), Seq({10.0}))}, {"b", spec(20.0)}}, spec(x(2), Seq({10.0, 20.0})));
- dst.add("concat(a,b,x)", {{"a", spec(10.0)}, {"b", spec(x(1), Seq({20.0}))}}, spec(x(2), Seq({10.0, 20.0})));
- dst.add("concat(a,b,x)", {{"a", spec(x(3), Seq({1.0, 2.0, 3.0}))}, {"b", spec(x(2), Seq({4.0, 5.0}))}},
- spec(x(5), Seq({1.0, 2.0, 3.0, 4.0, 5.0})));
- dst.add("concat(a,b,y)", {{"a", spec({x(2),y(2)}, Seq({1.0, 2.0, 3.0, 4.0}))}, {"b", spec(y(2), Seq({5.0, 6.0}))}},
- spec({x(2),y(4)}, Seq({1.0, 2.0, 5.0, 6.0, 3.0, 4.0, 5.0, 6.0})));
- dst.add("concat(a,b,x)", {{"a", spec({x(2),y(2)}, Seq({1.0, 2.0, 3.0, 4.0}))}, {"b", spec(x(2), Seq({5.0, 6.0}))}},
- spec({x(4),y(2)}, Seq({1.0, 2.0, 3.0, 4.0, 5.0, 5.0, 6.0, 6.0})));
- dst.add("concat(a,b,x)", {{"a", spec(z(3), Seq({1.0, 2.0, 3.0}))}, {"b", spec(y(2), Seq({4.0, 5.0}))}},
- spec({x(2),y(2),z(3)}, Seq({1.0, 2.0, 3.0, 1.0, 2.0, 3.0, 4.0, 4.0, 4.0, 5.0, 5.0, 5.0})));
- dst.add("concat(a,b,x)", {{"a", spec(y(2), Seq({1.0, 2.0}))}, {"b", spec(y(2), Seq({4.0, 5.0}))}},
- spec({x(2), y(2)}, Seq({1.0, 2.0, 4.0, 5.0})));
- dst.add("concat(concat(a,b,x),concat(c,d,x),y)", {{"a", spec(1.0)}, {"b", spec(2.0)}, {"c", spec(3.0)}, {"d", spec(4.0)}},
- spec({x(2), y(2)}, Seq({1.0, 3.0, 2.0, 4.0})));
-
- dst.add("concat(a,b,x)",
- {{"a", spec(float_cells({x(1)}), Seq({10.0}))}, {"b", spec(20.0) }},
- spec(float_cells({x(2)}), Seq({10.0, 20.0})));
-
- dst.add("concat(a,b,x)",
- {{"a", spec(10.0)}, {"b", spec(float_cells({x(1)}), Seq({20.0}))}},
- spec(float_cells({x(2)}), Seq({10.0, 20.0})));
-
- dst.add("concat(a,b,x)",
- {
- {"a", spec(float_cells({x(3)}), Seq({1.0, 2.0, 3.0})) },
- {"b", spec(x(2), Seq({4.0, 5.0})) }
- },
- spec(x(5), N()));
-
- dst.add("concat(a,b,x)",
- {
- {"a", spec(x(3), Seq({1.0, 2.0, 3.0}))},
- {"b", spec(float_cells({x(2)}), Seq({4.0, 5.0}))}
- },
- spec(x(5), N()));
-
- dst.add("concat(a,b,x)",
- {
- {"a", spec(float_cells({x(3)}), Seq({1.0, 2.0, 3.0})) },
- {"b", spec(float_cells({x(2)}), Seq({4.0, 5.0})) }
- },
- spec(float_cells({x(5)}), N()));
+void generate_strict_verbatim_peek(TestBuilder &dst) {
+ auto a = GenSpec(3);
+ auto b = GenSpec().map("x", {"3", "a"});
+ // 'a' without () is verbatim even if 'a' is a known value
+ dst.add("b{x:a}", {{"a", a}, {"b", b}});
}
//-----------------------------------------------------------------------------
-void generate_tensor_rename(TestBuilder &dst) {
- dst.add("rename(a,x,y)", {{"a", spec(x(5), N())}}, spec(y(5), N()));
- dst.add("rename(a,y,x)", {{"a", spec({y(5),z(5)}, N())}}, spec({x(5),z(5)}, N()));
- dst.add("rename(a,z,x)", {{"a", spec({y(5),z(5)}, N())}}, spec({y(5),x(5)}, N()));
- dst.add("rename(a,x,z)", {{"a", spec({x(5),y(5)}, N())}}, spec({z(5),y(5)}, N()));
- dst.add("rename(a,y,z)", {{"a", spec({x(5),y(5)}, N())}}, spec({x(5),z(5)}, N()));
- dst.add("rename(a,y,z)", {{"a", spec(float_cells({x(5),y(5)}), N())}}, spec(float_cells({x(5),z(5)}), N()));
- dst.add("rename(a,(x,y),(y,x))", {{"a", spec({x(5),y(5)}, N())}}, spec({y(5),x(5)}, N()));
+void generate_nested_tensor_lambda(TestBuilder &dst) {
+ auto a = GenSpec(2);
+ auto b = GenSpec::from_desc("x3").seq(Seq({3,5,7}));
+ // constant nested tensor lambda
+ dst.add("tensor(x[2],y[3],z[5])(tensor(x[5],y[3],z[2])(x*6+y*2+z){x:(z),y:(y),z:(x)})", {});
+ // dynamic nested tensor lambda
+ dst.add("tensor(x[2],y[3],z[5])(tensor(x[5],y[3],z[2])(20*(a+x)+2*(b{x:(a)}+y)+z){x:(z),y:(y),z:(x)})",
+ {{"a", a}, {"b", b}});
}
//-----------------------------------------------------------------------------
-void generate_tensor_lambda(TestBuilder &dst) {
- dst.add("tensor(x[10])(x+1)", {{}}, spec(x(10), N()));
- dst.add("tensor<float>(x[5],y[4])(x*4+(y+1))", {{}}, spec(float_cells({x(5),y(4)}), N()));
- dst.add("tensor(x[5],y[4])(x*4+(y+1))", {{}}, spec({x(5),y(4)}, N()));
- dst.add("tensor(x[5],y[4])(x==y)", {{}}, spec({x(5),y(4)},
- Seq({ 1.0, 0.0, 0.0, 0.0,
- 0.0, 1.0, 0.0, 0.0,
- 0.0, 0.0, 1.0, 0.0,
- 0.0, 0.0, 0.0, 1.0,
- 0.0, 0.0, 0.0, 0.0})));
+void generate_erf_value_test(TestBuilder &dst) {
+ auto a = GenSpec().idx("x", 16 * 17 * 6).seq(Div17(Div16(N(0))));
+ dst.add("erf(a)", {{"a", a}});
+ dst.add("erf(-a)", {{"a", a}});
}
//-----------------------------------------------------------------------------
+} // namespace <unnamed>
+
+//-----------------------------------------------------------------------------
+
void
Generator::generate(TestBuilder &dst)
{
- generate_tensor_reduce(dst);
- generate_tensor_map(dst);
- generate_tensor_join(dst);
- generate_dot_product(dst);
- generate_xw_product(dst);
- generate_tensor_concat(dst);
- generate_tensor_rename(dst);
- generate_tensor_lambda(dst);
+ generate_const(dst);
+ generate_inject(dst);
+ generate_reduce(dst);
+ generate_map(dst);
+ generate_join(dst);
+ generate_merge(dst);
+ generate_concat(dst);
+ generate_create(dst);
+ generate_lambda(dst);
+ generate_cell_cast(dst);
+ generate_peek(dst);
+ generate_rename(dst);
+ generate_if(dst);
+ //--------------------
+ generate_products(dst);
+ generate_expanding_reduce(dst);
+ generate_converting_lambda(dst);
+ generate_shadowing_lambda(dst);
+ generate_strict_verbatim_peek(dst);
+ generate_nested_tensor_lambda(dst);
+ generate_erf_value_test(dst);
}
diff --git a/eval/src/apps/tensor_conformance/generate.h b/eval/src/apps/tensor_conformance/generate.h
index 0f74ce924b3..a71531f7cf3 100644
--- a/eval/src/apps/tensor_conformance/generate.h
+++ b/eval/src/apps/tensor_conformance/generate.h
@@ -6,12 +6,9 @@
#include <map>
struct TestBuilder {
+ bool full;
+ TestBuilder(bool full_in) : full(full_in) {}
using TensorSpec = vespalib::eval::TensorSpec;
- // add test with pre-defined expected result
- virtual void add(const vespalib::string &expression,
- const std::map<vespalib::string,TensorSpec> &inputs,
- const TensorSpec &expect) = 0;
- // add test with undefined expected result
virtual void add(const vespalib::string &expression,
const std::map<vespalib::string,TensorSpec> &inputs) = 0;
virtual ~TestBuilder() {}
diff --git a/eval/src/apps/tensor_conformance/tensor_conformance.cpp b/eval/src/apps/tensor_conformance/tensor_conformance.cpp
index a62e775abd5..37ecce51714 100644
--- a/eval/src/apps/tensor_conformance/tensor_conformance.cpp
+++ b/eval/src/apps/tensor_conformance/tensor_conformance.cpp
@@ -5,8 +5,8 @@
#include <vespa/vespalib/data/slime/slime.h>
#include <vespa/vespalib/io/mapped_file_input.h>
#include <vespa/vespalib/objects/nbostream.h>
-#include <vespa/vespalib/testkit/test_kit.h>
#include <vespa/vespalib/util/size_literals.h>
+#include <vespa/vespalib/util/require.h>
#include <vespa/vespalib/util/stringfmt.h>
#include <vespa/eval/eval/fast_value.h>
#include <vespa/eval/eval/function.h>
@@ -15,6 +15,7 @@
#include <vespa/eval/eval/tensor_spec.h>
#include <vespa/eval/eval/test/reference_evaluation.h>
#include <vespa/eval/eval/test/test_io.h>
+#include <vespa/eval/eval/test/gen_spec.h>
#include <vespa/eval/eval/value.h>
#include <vespa/eval/eval/value_codec.h>
#include <vespa/eval/eval/value_type.h>
@@ -35,6 +36,10 @@ using namespace std::placeholders;
//-----------------------------------------------------------------------------
+size_t fail_cnt = 0;
+
+//-----------------------------------------------------------------------------
+
const ValueBuilderFactory &prod_factory = FastValueBuilderFactory::get();
const ValueBuilderFactory &simple_factory = SimpleValueBuilderFactory::get();
const ValueBuilderFactory &streamed_factory = StreamedValueBuilderFactory::get();
@@ -48,7 +53,7 @@ uint8_t unhex(char c) {
if (c >= 'A' && c <= 'F') {
return ((c - 'A') + 10);
}
- TEST_ERROR("bad hex char");
+ REQUIRE_FAILED("bad hex char");
return 0;
}
@@ -87,13 +92,41 @@ TensorSpec extract_value(const Inspector &inspector) {
//-----------------------------------------------------------------------------
+std::vector<vespalib::string> extract_fields(const Inspector &object) {
+ struct FieldExtractor : slime::ObjectTraverser {
+ std::vector<vespalib::string> result;
+ void field(const Memory &symbol, const Inspector &) override {
+ result.push_back(symbol.make_string());
+ }
+ } extractor;
+ object.traverse(extractor);
+ return std::move(extractor.result);
+};
+
+//-----------------------------------------------------------------------------
+
+void dump_test(const Inspector &test) {
+ fprintf(stderr, "expression: '%s'\n", test["expression"].asString().make_string().c_str());
+ for (const auto &input: extract_fields(test["inputs"])) {
+ auto value = extract_value(test["inputs"][input]);
+ fprintf(stderr, "input '%s': %s\n", input.c_str(), value.to_string().c_str());
+ }
+}
+
+//-----------------------------------------------------------------------------
+
TensorSpec ref_eval(const Inspector &test) {
auto fun = Function::parse(test["expression"].asString().make_string());
std::vector<TensorSpec> params;
for (size_t i = 0; i < fun->num_params(); ++i) {
params.push_back(extract_value(test["inputs"][fun->param_name(i)]));
}
- return ReferenceEvaluation::eval(*fun, params);
+ auto result = ReferenceEvaluation::eval(*fun, params);
+ if (result.type() == "error") {
+ dump_test(test);
+ REQUIRE_FAILED("reference evaluation failed!");
+ }
+ return result;
}
//-----------------------------------------------------------------------------
@@ -119,25 +152,12 @@ TensorSpec eval_expr(const Inspector &test, const ValueBuilderFactory &factory)
InterpretedFunction::Context ctx(ifun);
SimpleObjectParams params(param_refs);
const Value &result = ifun.eval(ctx, params);
- ASSERT_EQUAL(result.type(), types.get_type(fun->root()));
+ REQUIRE_EQ(result.type(), types.get_type(fun->root()));
return spec_from_value(result);
}
//-----------------------------------------------------------------------------
-std::vector<vespalib::string> extract_fields(const Inspector &object) {
- struct FieldExtractor : slime::ObjectTraverser {
- std::vector<vespalib::string> result;
- void field(const Memory &symbol, const Inspector &) override {
- result.push_back(symbol.make_string());
- }
- } extractor;
- object.traverse(extractor);
- return std::move(extractor.result);
-};
-
-//-----------------------------------------------------------------------------
-
void print_test(const Inspector &test, OutputWriter &dst) {
dst.printf("expression: '%s'\n", test["expression"].asString().make_string().c_str());
for (const auto &input: extract_fields(test["inputs"])) {
@@ -153,186 +173,92 @@ void print_test(const Inspector &test, OutputWriter &dst) {
class MyTestBuilder : public TestBuilder {
private:
TestWriter _writer;
- void make_test(const vespalib::string &expression,
- const std::map<vespalib::string,TensorSpec> &input_map,
- const TensorSpec *expect = nullptr)
+public:
+ MyTestBuilder(bool full_in, Output &out) : TestBuilder(full_in), _writer(out) {}
+ void add(const vespalib::string &expression,
+ const std::map<vespalib::string,TensorSpec> &inputs_in) override
{
Cursor &test = _writer.create();
test.setString("expression", expression);
Cursor &inputs = test.setObject("inputs");
- for (const auto &input: input_map) {
- insert_value(inputs, input.first, input.second);
+ for (const auto& [name, spec]: inputs_in) {
+ insert_value(inputs, name, spec);
}
- if (expect != nullptr) {
- insert_value(test.setObject("result"), "expect", *expect);
- } else {
- insert_value(test.setObject("result"), "expect", ref_eval(test));
- }
- }
-public:
- MyTestBuilder(Output &out) : _writer(out) {}
- void add(const vespalib::string &expression,
- const std::map<vespalib::string,TensorSpec> &inputs,
- const TensorSpec &expect) override
- {
- make_test(expression, inputs, &expect);
+ insert_value(test.setObject("result"), "expect", ref_eval(test));
}
- void add(const vespalib::string &expression,
- const std::map<vespalib::string,TensorSpec> &inputs) override
- {
- make_test(expression, inputs);
+ void add_failing_test() {
+ Cursor &test = _writer.create();
+ test.setString("expression", "a");
+ insert_value(test.setObject("inputs"), "a", GenSpec(1).idx("x", 3));
+ insert_value(test.setObject("result"), "dummy", GenSpec(2).idx("x", 3));
}
};
-void generate(Output &out) {
- MyTestBuilder my_test_builder(out);
+void generate(Output &out, bool full) {
+ MyTestBuilder my_test_builder(full, out);
Generator::generate(my_test_builder);
+ // my_test_builder.add_failing_test();
}
//-----------------------------------------------------------------------------
void evaluate(Input &in, Output &out) {
- auto handle_test = [&out](Slime &slime)
- {
- insert_value(slime["result"], "cpp_prod",
- eval_expr(slime.get(), prod_factory));
- insert_value(slime["result"], "cpp_simple_value",
- eval_expr(slime.get(), simple_factory));
- insert_value(slime["result"], "cpp_streamed_value",
- eval_expr(slime.get(), streamed_factory));
- write_compact(slime, out);
- };
- auto handle_summary = [&out](Slime &slime)
- {
- write_compact(slime, out);
- };
+ auto handle_test = [&out](Slime &slime) {
+ insert_value(slime["result"], "cpp_prod",
+ eval_expr(slime.get(), prod_factory));
+ insert_value(slime["result"], "cpp_simple_value",
+ eval_expr(slime.get(), simple_factory));
+ insert_value(slime["result"], "cpp_streamed_value",
+ eval_expr(slime.get(), streamed_factory));
+ write_compact(slime, out);
+ };
+ auto handle_summary = [&out](Slime &slime) {
+ write_compact(slime, out);
+ };
for_each_test(in, handle_test, handle_summary);
}
//-----------------------------------------------------------------------------
-void dump_test(const Inspector &test) {
- fprintf(stderr, "expression: '%s'\n", test["expression"].asString().make_string().c_str());
- for (const auto &input: extract_fields(test["inputs"])) {
- auto value = extract_value(test["inputs"][input]);
- fprintf(stderr, "input '%s': %s\n", input.c_str(), value.to_string().c_str());
- }
-}
-
void verify(Input &in, Output &out) {
std::map<vespalib::string,size_t> result_map;
- auto handle_test = [&result_map](Slime &slime)
- {
- TensorSpec reference_result = ref_eval(slime.get());
- for (const auto &result: extract_fields(slime["result"])) {
- ++result_map[result];
- TEST_STATE(make_string("verifying result: '%s'", result.c_str()).c_str());
- if (!EXPECT_EQUAL(reference_result, extract_value(slime["result"][result]))) {
- dump_test(slime.get());
- }
- }
- };
- auto handle_summary = [&out,&result_map](Slime &slime)
- {
- Cursor &stats = slime.get().setObject("stats");
- for (const auto &entry: result_map) {
- stats.setLong(entry.first, entry.second);
- }
- JsonFormat::encode(slime, out, false);
- };
- for_each_test(in, handle_test, handle_summary);
-}
-
-//-----------------------------------------------------------------------------
-
-struct TestList {
- std::vector<Slime> list;
- void add_test(Slime &slime) {
- list.emplace_back();
- inject(slime.get(), SlimeInserter(list.back()));
- }
-};
-
-struct TestSpec {
- vespalib::string expression;
- std::vector<TensorSpec> inputs;
- TensorSpec result;
- TestSpec() : expression(), inputs(), result("error") {}
- ~TestSpec();
- void decode(const Inspector &test) {
- const auto &my_expression = test["expression"];
- ASSERT_TRUE(my_expression.valid());
- expression = my_expression.asString().make_string();
- auto fun = Function::parse(expression);
- ASSERT_TRUE(!fun->has_error());
- ASSERT_EQUAL(fun->num_params(), test["inputs"].fields());
- for (size_t i = 0; i < fun->num_params(); ++i) {
- TEST_STATE(make_string("input #%zu", i).c_str());
- const auto &my_input = test["inputs"][fun->param_name(i)];
- ASSERT_TRUE(my_input.valid());
- inputs.push_back(extract_value(my_input));
+ auto handle_test = [&result_map](Slime &slime) {
+ TensorSpec reference_result = ref_eval(slime.get());
+ for (const auto &result: extract_fields(slime["result"])) {
+ ++result_map[result];
+ auto actual_result = extract_value(slime["result"][result]);
+ if (!require_impl::eq(actual_result, reference_result)) {
+ ++fail_cnt;
+ fprintf(stderr, "expression failed('%s'): '%s'\n", result.c_str(),
+ slime["expression"].asString().make_string().c_str());
+ fprintf(stderr, "%s", TensorSpec::diff(actual_result, "actual", reference_result, "expected").c_str());
+ dump_test(slime.get());
+ }
}
- const auto &my_result = test["result"]["expect"];
- ASSERT_TRUE(my_result.valid());
- result = extract_value(my_result);
- }
-};
-TestSpec::~TestSpec() = default;
-
-void compare_test(const Inspector &expect_in, const Inspector &actual_in) {
- TestSpec expect;
- TestSpec actual;
- {
- TEST_STATE("decoding expected test case");
- expect.decode(expect_in);
- }
- {
- TEST_STATE("decoding actual test case");
- actual.decode(actual_in);
- }
- {
- TEST_STATE("comparing test cases");
- ASSERT_EQUAL(expect.expression, actual.expression);
- ASSERT_EQUAL(expect.inputs.size(), actual.inputs.size());
- for (size_t i = 0; i < expect.inputs.size(); ++i) {
- TEST_STATE(make_string("input #%zu", i).c_str());
- ASSERT_EQUAL(expect.inputs[i], actual.inputs[i]);
+ };
+ auto handle_summary = [&out,&result_map](Slime &slime) {
+ Cursor &stats = slime.get().setObject("stats");
+ for (const auto &entry: result_map) {
+ stats.setLong(entry.first, entry.second);
}
- ASSERT_EQUAL(expect.result, actual.result);
- }
-}
-
-void compare(Input &expect, Input &actual) {
- TestList expect_tests;
- TestList actual_tests;
- for_each_test(expect, std::bind(&TestList::add_test, &expect_tests, _1), [](Slime &) noexcept {});
- for_each_test(actual, std::bind(&TestList::add_test, &actual_tests, _1), [](Slime &) noexcept {});
- ASSERT_TRUE(!expect_tests.list.empty());
- ASSERT_TRUE(!actual_tests.list.empty());
- ASSERT_EQUAL(expect_tests.list.size(), actual_tests.list.size());
- size_t num_tests = expect_tests.list.size();
- fprintf(stderr, "...found %zu test cases to compare...\n", num_tests);
- for (size_t i = 0; i < num_tests; ++i) {
- TEST_STATE(make_string("test case #%zu", i).c_str());
- compare_test(expect_tests.list[i].get(), actual_tests.list[i].get());
- }
+ JsonFormat::encode(slime, out, false);
+ };
+ for_each_test(in, handle_test, handle_summary);
}
//-----------------------------------------------------------------------------
void display(Input &in, Output &out) {
size_t test_cnt = 0;
- auto handle_test = [&out,&test_cnt](Slime &slime)
- {
- OutputWriter dst(out, 4_Ki);
- dst.printf("\n------- TEST #%zu -------\n\n", test_cnt++);
- print_test(slime.get(), dst);
- };
- auto handle_summary = [&out,&test_cnt](Slime &)
- {
- OutputWriter dst(out, 1024);
- dst.printf("%zu tests displayed\n", test_cnt);
- };
+ auto handle_test = [&out,&test_cnt](Slime &slime) {
+ OutputWriter dst(out, 4_Ki);
+ dst.printf("\n------- TEST #%zu -------\n\n", test_cnt++);
+ print_test(slime.get(), dst);
+ };
+ auto handle_summary = [&out,&test_cnt](Slime &) {
+ OutputWriter dst(out, 1024);
+ dst.printf("%zu tests displayed\n", test_cnt);
+ };
for_each_test(in, handle_test, handle_summary);
}
@@ -340,7 +266,6 @@ void display(Input &in, Output &out) {
int usage(const char *self) {
fprintf(stderr, "usage: %s <mode>\n", self);
- fprintf(stderr, "usage: %s compare <expect> <actual>\n", self);
fprintf(stderr, " <mode>: which mode to activate\n");
fprintf(stderr, " 'generate': write test cases to stdout\n");
fprintf(stderr, " 'evaluate': read test cases from stdin, annotate them with\n");
@@ -350,8 +275,7 @@ int usage(const char *self) {
fprintf(stderr, " that all results are as expected\n");
fprintf(stderr, " 'display': read tests from stdin and print them to stdout\n");
fprintf(stderr, " in human-readable form\n");
- fprintf(stderr, " 'compare': read test cases from two separate files and\n");
- fprintf(stderr, " compare them to verify equivalence\n");
+ fprintf(stderr, " 'generate-some': write some test cases to stdout\n");
return 1;
}
@@ -362,34 +286,24 @@ int main(int argc, char **argv) {
return usage(argv[0]);
}
vespalib::string mode = argv[1];
- TEST_MASTER.init(make_string("vespa-tensor-conformance-%s", mode.c_str()).c_str());
if (mode == "generate") {
- generate(std_out);
+ generate(std_out, true);
+ } else if (mode == "generate-some") {
+ generate(std_out, false);
} else if (mode == "evaluate") {
evaluate(std_in, std_out);
} else if (mode == "verify") {
verify(std_in, std_out);
} else if (mode == "display") {
display(std_in, std_out);
- } else if (mode == "compare") {
- if (argc == 4) {
- MappedFileInput expect(argv[2]);
- MappedFileInput actual(argv[3]);
- if (expect.valid() && actual.valid()) {
- compare(expect, actual);
- } else {
- if (!expect.valid()) {
- TEST_ERROR(make_string("could not read file: %s", argv[2]).c_str());
- }
- if (!actual.valid()) {
- TEST_ERROR(make_string("could not read file: %s", argv[3]).c_str());
- }
- }
- } else {
- TEST_ERROR("wrong number of parameters for 'compare'\n");
- }
} else {
- TEST_ERROR(make_string("unknown mode: %s", mode.c_str()).c_str());
+ REQUIRE_FAILED(make_string("unknown mode: %s", mode.c_str()).c_str());
+ }
+ if (fail_cnt == 0) {
+ fprintf(stderr, "(mode=%s) DONE (no failures detected)\n", mode.c_str());
+ return 0;
+ } else {
+ fprintf(stderr, "(mode=%s) ERROR: detected %zu failure(s)\n", mode.c_str(), fail_cnt);
+ return 1;
}
- return (TEST_MASTER.fini() ? 0 : 1);
}
diff --git a/eval/src/tests/eval/node_types/node_types_test.cpp b/eval/src/tests/eval/node_types/node_types_test.cpp
index a5a17ea15a0..504f66ac717 100644
--- a/eval/src/tests/eval/node_types/node_types_test.cpp
+++ b/eval/src/tests/eval/node_types/node_types_test.cpp
@@ -63,6 +63,8 @@ TEST("require that input parameters preserve their type") {
TEST_DO(verify("tensor()", "double"));
TEST_DO(verify("tensor(x{},y[10],z[5])", "tensor(x{},y[10],z[5])"));
TEST_DO(verify("tensor<float>(x{},y[10],z[5])", "tensor<float>(x{},y[10],z[5])"));
+ TEST_DO(verify("tensor<bfloat16>(x{},y[10],z[5])", "tensor<bfloat16>(x{},y[10],z[5])"));
+ TEST_DO(verify("tensor<int8>(x{},y[10],z[5])", "tensor<int8>(x{},y[10],z[5])"));
}
TEST("require that if resolves to the appropriate type") {
@@ -74,7 +76,12 @@ TEST("require that if resolves to the appropriate type") {
TEST_DO(verify("if(double,tensor(a{}),tensor(a{}))", "tensor(a{})"));
TEST_DO(verify("if(double,tensor(a[2]),tensor(a[2]))", "tensor(a[2])"));
TEST_DO(verify("if(double,tensor<float>(a[2]),tensor<float>(a[2]))", "tensor<float>(a[2])"));
+ TEST_DO(verify("if(double,tensor<bfloat16>(a[2]),tensor<bfloat16>(a[2]))", "tensor<bfloat16>(a[2])"));
+ TEST_DO(verify("if(double,tensor<int8>(a[2]),tensor<int8>(a[2]))", "tensor<int8>(a[2])"));
TEST_DO(verify("if(double,tensor(a[2]),tensor<float>(a[2]))", "error"));
+ TEST_DO(verify("if(double,tensor<float>(a[2]),tensor<bfloat16>(a[2]))", "error"));
+ TEST_DO(verify("if(double,tensor<float>(a[2]),tensor<int8>(a[2]))", "error"));
+ TEST_DO(verify("if(double,tensor<bfloat16>(a[2]),tensor<int8>(a[2]))", "error"));
TEST_DO(verify("if(double,tensor(a[2]),tensor(a[3]))", "error"));
TEST_DO(verify("if(double,tensor(a[2]),tensor(a{}))", "error"));
TEST_DO(verify("if(double,tensor(a{}),tensor(b{}))", "error"));
@@ -94,8 +101,14 @@ TEST("require that reduce resolves correct type") {
TEST_DO(verify("reduce(tensor(x{},y{},z{}),sum,a,b,c)", "error"));
TEST_DO(verify("reduce(tensor(x{}),sum,x)", "double"));
TEST_DO(verify("reduce(tensor<float>(x{},y{},z{}),sum,x,z)", "tensor<float>(y{})"));
+ TEST_DO(verify("reduce(tensor<bfloat16>(x{},y{},z{}),sum,x,z)", "tensor<float>(y{})"));
+ TEST_DO(verify("reduce(tensor<int8>(x{},y{},z{}),sum,x,z)", "tensor<float>(y{})"));
TEST_DO(verify("reduce(tensor<float>(x{}),sum,x)", "double"));
TEST_DO(verify("reduce(tensor<float>(x{}),sum)", "double"));
+ TEST_DO(verify("reduce(tensor<bfloat16>(x{}),sum,x)", "double"));
+ TEST_DO(verify("reduce(tensor<bfloat16>(x{}),sum)", "double"));
+ TEST_DO(verify("reduce(tensor<int8>(x{}),sum,x)", "double"));
+ TEST_DO(verify("reduce(tensor<int8>(x{}),sum)", "double"));
}
TEST("require that rename resolves correct type") {
@@ -111,6 +124,8 @@ TEST("require that rename resolves correct type") {
TEST_DO(verify("rename(tensor(x{},y[1],z[5]),(x,z),(z,x))", "tensor(z{},y[1],x[5])"));
TEST_DO(verify("rename(tensor(x{},y[1],z[5]),(x,y,z),(a,b,c))", "tensor(a{},b[1],c[5])"));
TEST_DO(verify("rename(tensor<float>(x{},y[1],z[5]),(x,y,z),(a,b,c))", "tensor<float>(a{},b[1],c[5])"));
+ TEST_DO(verify("rename(tensor<bfloat16>(x{},y[1],z[5]),(x,y,z),(a,b,c))", "tensor<bfloat16>(a{},b[1],c[5])"));
+ TEST_DO(verify("rename(tensor<int8>(x{},y[1],z[5]),(x,y,z),(a,b,c))", "tensor<int8>(a{},b[1],c[5])"));
}
vespalib::string strfmt(const char *pattern, const char *a) {
@@ -126,6 +141,8 @@ void verify_op1(const char *pattern) {
TEST_DO(verify(strfmt(pattern, "double"), "double"));
TEST_DO(verify(strfmt(pattern, "tensor(x{},y[10],z[1])"), "tensor(x{},y[10],z[1])"));
TEST_DO(verify(strfmt(pattern, "tensor<float>(x{},y[10],z[1])"), "tensor<float>(x{},y[10],z[1])"));
+ TEST_DO(verify(strfmt(pattern, "tensor<bfloat16>(x{},y[10],z[1])"), "tensor<float>(x{},y[10],z[1])"));
+ TEST_DO(verify(strfmt(pattern, "tensor<int8>(x{},y[10],z[1])"), "tensor<float>(x{},y[10],z[1])"));
}
void verify_op2(const char *pattern) {
@@ -146,6 +163,15 @@ void verify_op2(const char *pattern) {
TEST_DO(verify(strfmt(pattern, "tensor<float>(x[5])", "tensor<float>(x[5])"), "tensor<float>(x[5])"));
TEST_DO(verify(strfmt(pattern, "tensor<float>(x[5])", "tensor(x[5])"), "tensor(x[5])"));
TEST_DO(verify(strfmt(pattern, "tensor<float>(x[5])", "double"), "tensor<float>(x[5])"));
+ TEST_DO(verify(strfmt(pattern, "tensor<bfloat16>(x[5])", "tensor<bfloat16>(x[5])"), "tensor<float>(x[5])"));
+ TEST_DO(verify(strfmt(pattern, "tensor<bfloat16>(x[5])", "tensor<float>(x[5])"), "tensor<float>(x[5])"));
+ TEST_DO(verify(strfmt(pattern, "tensor<bfloat16>(x[5])", "tensor(x[5])"), "tensor(x[5])"));
+ TEST_DO(verify(strfmt(pattern, "tensor<bfloat16>(x[5])", "double"), "tensor<float>(x[5])"));
+ TEST_DO(verify(strfmt(pattern, "tensor<int8>(x[5])", "tensor<int8>(x[5])"), "tensor<float>(x[5])"));
+ TEST_DO(verify(strfmt(pattern, "tensor<bfloat16>(x[5])", "tensor<int8>(x[5])"), "tensor<float>(x[5])"));
+ TEST_DO(verify(strfmt(pattern, "tensor<float>(x[5])", "tensor<int8>(x[5])"), "tensor<float>(x[5])"));
+ TEST_DO(verify(strfmt(pattern, "tensor(x[5])", "tensor<int8>(x[5])"), "tensor(x[5])"));
+ TEST_DO(verify(strfmt(pattern, "double", "tensor<int8>(x[5])"), "tensor<float>(x[5])"));
}
TEST("require that various operations resolve appropriate type") {
@@ -223,7 +249,11 @@ TEST("require that merge resolves to the appropriate type") {
TEST_DO(verify(strfmt(pattern, "tensor(x[5])", "tensor(x[3])"), "error"));
TEST_DO(verify(strfmt(pattern, "tensor(x{})", "tensor(x[5])"), "error"));
TEST_DO(verify(strfmt(pattern, "tensor<float>(x[5])", "tensor<float>(x[5])"), "tensor<float>(x[5])"));
+ TEST_DO(verify(strfmt(pattern, "tensor<bfloat16>(x[5])", "tensor<bfloat16>(x[5])"), "tensor<float>(x[5])"));
+ TEST_DO(verify(strfmt(pattern, "tensor<int8>(x[5])", "tensor<int8>(x[5])"), "tensor<float>(x[5])"));
TEST_DO(verify(strfmt(pattern, "tensor<float>(x[5])", "tensor(x[5])"), "tensor(x[5])"));
+ TEST_DO(verify(strfmt(pattern, "tensor<bfloat16>(x[5])", "tensor(x[5])"), "tensor(x[5])"));
+ TEST_DO(verify(strfmt(pattern, "tensor<int8>(x[5])", "tensor(x[5])"), "tensor(x[5])"));
TEST_DO(verify(strfmt(pattern, "tensor(x[5])", "tensor<float>(x[5])"), "tensor(x[5])"));
TEST_DO(verify(strfmt(pattern, "tensor<float>(x[5])", "double"), "error"));
}
@@ -234,6 +264,8 @@ TEST("require that static tensor lambda resolves correct type") {
TEST_DO(verify("tensor(x[5],y[10],z[15])(1.0)", "tensor(x[5],y[10],z[15])"));
TEST_DO(verify("tensor<double>(x[5],y[10],z[15])(1.0)", "tensor(x[5],y[10],z[15])"));
TEST_DO(verify("tensor<float>(x[5],y[10],z[15])(1.0)", "tensor<float>(x[5],y[10],z[15])"));
+ TEST_DO(verify("tensor<bfloat16>(x[5],y[10],z[15])(1.0)", "tensor<bfloat16>(x[5],y[10],z[15])"));
+ TEST_DO(verify("tensor<int8>(x[5],y[10],z[15])(1.0)", "tensor<int8>(x[5],y[10],z[15])"));
}
TEST("require that tensor create resolves correct type") {
@@ -241,6 +273,8 @@ TEST("require that tensor create resolves correct type") {
TEST_DO(verify("tensor(x{}):{{x:a}:double,{x:b}:double,{x:c}:double}", "tensor(x{})"));
TEST_DO(verify("tensor(x{},y[2]):{{x:a,y:0}:double,{x:a,y:1}:double}", "tensor(x{},y[2])"));
TEST_DO(verify("tensor<float>(x[3]):{{x:0}:double,{x:1}:double,{x:2}:double}", "tensor<float>(x[3])"));
+ TEST_DO(verify("tensor<bfloat16>(x[3]):{{x:0}:double,{x:1}:double,{x:2}:double}", "tensor<bfloat16>(x[3])"));
+ TEST_DO(verify("tensor<int8>(x[3]):{{x:0}:double,{x:1}:double,{x:2}:double}", "tensor<int8>(x[3])"));
TEST_DO(verify("tensor(x[3]):{{x:0}:double+double,{x:1}:double-double,{x:2}:double/double}", "tensor(x[3])"));
TEST_DO(verify("tensor(x[3]):{{x:0}:double,{x:1}:reduce(tensor(x[2]),sum),{x:2}:double}", "tensor(x[3])"));
TEST_DO(verify("tensor(x[3]):{{x:0}:double,{x:1}:tensor(x[2]),{x:2}:double}", "error"));
@@ -251,6 +285,8 @@ TEST("require that dynamic tensor lambda resolves correct type") {
TEST_DO(verify("tensor(x[3])(error)", "error"));
TEST_DO(verify("tensor(x[3])(double)", "tensor(x[3])"));
TEST_DO(verify("tensor<float>(x[3])(double)", "tensor<float>(x[3])"));
+ TEST_DO(verify("tensor<bfloat16>(x[3])(double)", "tensor<bfloat16>(x[3])"));
+ TEST_DO(verify("tensor<int8>(x[3])(double)", "tensor<int8>(x[3])"));
TEST_DO(verify("tensor(x[3])(tensor(x[2]))", "error"));
TEST_DO(verify("tensor(x[3])(reduce(tensor(x[2])+tensor(x[4]),sum))", "error"));
}
@@ -273,11 +309,15 @@ TEST("require that tensor peek resolves correct type") {
TEST_DO(verify("tensor<float>(x[3]){x:3}", "error"));
TEST_DO(verify("tensor<float>(x{}){x:1}", "double"));
TEST_DO(verify("tensor<float>(x{}){x:foo}", "double"));
+ TEST_DO(verify("tensor<bfloat16>(x{}){x:foo}", "double"));
+ TEST_DO(verify("tensor<int8>(x{}){x:foo}", "double"));
TEST_DO(verify("tensor<float>(x{}){x:(double)}", "double"));
TEST_DO(verify("tensor<float>(x{}){x:(tensor(x[3]))}", "error"));
TEST_DO(verify("tensor<float>(x{},y[3]){x:foo,y:2}", "double"));
TEST_DO(verify("tensor<float>(x{},y[3]){x:foo}", "tensor<float>(y[3])"));
TEST_DO(verify("tensor<float>(x{},y[3]){y:2}", "tensor<float>(x{})"));
+ TEST_DO(verify("tensor<bfloat16>(x{},y[3]){y:2}", "tensor<bfloat16>(x{})"));
+ TEST_DO(verify("tensor<int8>(x{},y[3]){y:2}", "tensor<int8>(x{})"));
}
TEST("require that tensor concat resolves correct type") {
@@ -290,6 +330,15 @@ TEST("require that tensor concat resolves correct type") {
TEST_DO(verify("concat(tensor<float>(x[2]),tensor<float>(x[3]),x)", "tensor<float>(x[5])"));
TEST_DO(verify("concat(tensor<float>(x[2]),tensor(x[3]),x)", "tensor(x[5])"));
TEST_DO(verify("concat(tensor<float>(x[2]),double,x)", "tensor<float>(x[3])"));
+ TEST_DO(verify("concat(tensor<bfloat16>(x[2]),tensor<bfloat16>(x[3]),x)", "tensor<bfloat16>(x[5])"));
+ TEST_DO(verify("concat(tensor<bfloat16>(x[2]),tensor<float>(x[3]),x)", "tensor<float>(x[5])"));
+ TEST_DO(verify("concat(tensor<bfloat16>(x[2]),tensor(x[3]),x)", "tensor(x[5])"));
+ TEST_DO(verify("concat(tensor<bfloat16>(x[2]),double,x)", "tensor<bfloat16>(x[3])"));
+ TEST_DO(verify("concat(tensor<int8>(x[3]),tensor<int8>(x[2]),x)", "tensor<int8>(x[5])"));
+ TEST_DO(verify("concat(tensor<bfloat16>(x[3]),tensor<int8>(x[2]),x)", "tensor<float>(x[5])"));
+ TEST_DO(verify("concat(tensor<float>(x[3]),tensor<int8>(x[2]),x)", "tensor<float>(x[5])"));
+ TEST_DO(verify("concat(tensor(x[3]),tensor<int8>(x[2]),x)", "tensor(x[5])"));
+ TEST_DO(verify("concat(double,tensor<int8>(x[2]),x)", "tensor<int8>(x[3])"));
}
TEST("require that tensor cell_cast resolves correct type") {
@@ -298,6 +347,8 @@ TEST("require that tensor cell_cast resolves correct type") {
TEST_DO(verify("cell_cast(tensor<double>(x{},y[5]),float)", "tensor<float>(x{},y[5])"));
TEST_DO(verify("cell_cast(tensor<float>(x{},y[5]),double)", "tensor<double>(x{},y[5])"));
TEST_DO(verify("cell_cast(tensor<float>(x{},y[5]),float)", "tensor<float>(x{},y[5])"));
+ TEST_DO(verify("cell_cast(tensor<float>(x{},y[5]),bfloat16)", "tensor<bfloat16>(x{},y[5])"));
+ TEST_DO(verify("cell_cast(tensor<float>(x{},y[5]),int8)", "tensor<int8>(x{},y[5])"));
}
TEST("require that double only expressions can be detected") {
diff --git a/eval/src/tests/eval/tensor_lambda/tensor_lambda_test.cpp b/eval/src/tests/eval/tensor_lambda/tensor_lambda_test.cpp
index 4b17e49fa80..bb971c5df74 100644
--- a/eval/src/tests/eval/tensor_lambda/tensor_lambda_test.cpp
+++ b/eval/src/tests/eval/tensor_lambda/tensor_lambda_test.cpp
@@ -28,12 +28,18 @@ EvalFixture::ParamRepo make_params() {
.add("a", GenSpec(1))
.add("b", GenSpec(2))
.add("x3", GenSpec().idx("x", 3))
- .add("x3f", GenSpec().idx("x", 3).cells_float())
+ .add("x3_float", GenSpec().idx("x", 3).cells(CellType::FLOAT))
+ .add("x3_bfloat16", GenSpec().idx("x", 3).cells(CellType::BFLOAT16))
+ .add("x3_int8", GenSpec().idx("x", 3).cells(CellType::INT8))
.add("x3m", GenSpec().map("x", 3))
.add("x3y5", GenSpec().idx("x", 3).idx("y", 5))
- .add("x3y5f", GenSpec().idx("x", 3).idx("y", 5).cells_float())
+ .add("x3y5_float", GenSpec().idx("x", 3).idx("y", 5).cells(CellType::FLOAT))
+ .add("x3y5_bfloat16", GenSpec().idx("x", 3).idx("y", 5).cells(CellType::BFLOAT16))
+ .add("x3y5_int8", GenSpec().idx("x", 3).idx("y", 5).cells(CellType::INT8))
.add("x15", GenSpec().idx("x", 15))
- .add("x15f", GenSpec().idx("x", 15).cells_float());
+ .add("x15_float", GenSpec().idx("x", 15).cells(CellType::FLOAT))
+ .add("x15_bfloat16", GenSpec().idx("x", 15).cells(CellType::BFLOAT16))
+ .add("x15_int8", GenSpec().idx("x", 15).cells(CellType::INT8));
}
EvalFixture::ParamRepo param_repo = make_params();
@@ -90,8 +96,12 @@ TEST("require that simple constant tensor lambda works") {
}
TEST("require that tensor lambda can be used for cell type casting") {
- TEST_DO(verify_idx_fun("tensor(x[3])(x3f{x:(x)})", "tensor(x[3]):[1,2,3]", "f(x)(x)"));
+ TEST_DO(verify_idx_fun("tensor(x[3])(x3_float{x:(x)})", "tensor(x[3]):[1,2,3]", "f(x)(x)"));
+ TEST_DO(verify_idx_fun("tensor(x[3])(x3_bfloat16{x:(x)})", "tensor(x[3]):[1,2,3]", "f(x)(x)"));
+ TEST_DO(verify_idx_fun("tensor(x[3])(x3_int8{x:(x)})", "tensor(x[3]):[1,2,3]", "f(x)(x)"));
TEST_DO(verify_idx_fun("tensor<float>(x[3])(x3{x:(x)})", "tensor<float>(x[3]):[1,2,3]", "f(x)(x)"));
+ TEST_DO(verify_idx_fun("tensor<bfloat16>(x[3])(x3{x:(x)})", "tensor<bfloat16>(x[3]):[1,2,3]", "f(x)(x)"));
+ TEST_DO(verify_idx_fun("tensor<int8>(x[3])(x3{x:(x)})", "tensor<int8>(x[3]):[1,2,3]", "f(x)(x)"));
}
TEST("require that constant nested tensor lambda using tensor peek works") {
@@ -101,37 +111,51 @@ TEST("require that constant nested tensor lambda using tensor peek works") {
TEST("require that tensor reshape is optimized") {
TEST_DO(verify_reshape("tensor(x[15])(x3y5{x:(x/5),y:(x%5)})", "x15"));
TEST_DO(verify_reshape("tensor(x[3],y[5])(x15{x:(x*5+y)})", "x3y5"));
- TEST_DO(verify_reshape("tensor<float>(x[15])(x3y5f{x:(x/5),y:(x%5)})", "x15f"));
+ TEST_DO(verify_reshape("tensor<float>(x[15])(x3y5_float{x:(x/5),y:(x%5)})", "x15_float"));
+ TEST_DO(verify_reshape("tensor<bfloat16>(x[15])(x3y5_bfloat16{x:(x/5),y:(x%5)})", "x15_bfloat16"));
+ TEST_DO(verify_reshape("tensor<int8>(x[15])(x3y5_int8{x:(x/5),y:(x%5)})", "x15_int8"));
}
TEST("require that tensor reshape with non-matching cell type requires cell copy") {
- TEST_DO(verify_idx_fun("tensor(x[15])(x3y5f{x:(x/5),y:(x%5)})", "x15", "f(x)((floor((x/5))*5)+(x%5))"));
- TEST_DO(verify_idx_fun("tensor<float>(x[15])(x3y5{x:(x/5),y:(x%5)})", "x15f", "f(x)((floor((x/5))*5)+(x%5))"));
- TEST_DO(verify_idx_fun("tensor(x[3],y[5])(x15f{x:(x*5+y)})", "x3y5", "f(x,y)((x*5)+y)"));
- TEST_DO(verify_idx_fun("tensor<float>(x[3],y[5])(x15{x:(x*5+y)})", "x3y5f", "f(x,y)((x*5)+y)"));
+ TEST_DO(verify_idx_fun("tensor(x[15])(x3y5_float{x:(x/5),y:(x%5)})", "x15", "f(x)((floor((x/5))*5)+(x%5))"));
+ TEST_DO(verify_idx_fun("tensor<float>(x[15])(x3y5{x:(x/5),y:(x%5)})", "x15_float", "f(x)((floor((x/5))*5)+(x%5))"));
+ TEST_DO(verify_idx_fun("tensor(x[3],y[5])(x15_float{x:(x*5+y)})", "x3y5", "f(x,y)((x*5)+y)"));
+ TEST_DO(verify_idx_fun("tensor<float>(x[3],y[5])(x15{x:(x*5+y)})", "x3y5_float", "f(x,y)((x*5)+y)"));
+ TEST_DO(verify_idx_fun("tensor<bfloat16>(x[3],y[5])(x15{x:(x*5+y)})", "x3y5_bfloat16", "f(x,y)((x*5)+y)"));
+ TEST_DO(verify_idx_fun("tensor<int8>(x[3],y[5])(x15{x:(x*5+y)})", "x3y5_int8", "f(x,y)((x*5)+y)"));
}
TEST("require that tensor cell subrange view is optimized") {
TEST_DO(verify_range("tensor(y[5])(x3y5{x:1,y:(y)})", "x3y5{x:1}"));
TEST_DO(verify_range("tensor(x[3])(x15{x:(x+5)})", "tensor(x[3]):[6,7,8]"));
- TEST_DO(verify_range("tensor<float>(y[5])(x3y5f{x:1,y:(y)})", "x3y5f{x:1}"));
- TEST_DO(verify_range("tensor<float>(x[3])(x15f{x:(x+5)})", "tensor<float>(x[3]):[6,7,8]"));
+ TEST_DO(verify_range("tensor<float>(y[5])(x3y5_float{x:1,y:(y)})", "x3y5_float{x:1}"));
+ TEST_DO(verify_range("tensor<float>(x[3])(x15_float{x:(x+5)})", "tensor<float>(x[3]):[6,7,8]"));
+ TEST_DO(verify_range("tensor<float>(x[3])(x15_float{x:(x+5)})", "tensor<float>(x[3]):[6,7,8]"));
+ TEST_DO(verify_range("tensor<bfloat16>(x[3])(x15_bfloat16{x:(x+5)})", "tensor<bfloat16>(x[3]):[6,7,8]"));
+ TEST_DO(verify_range("tensor<int8>(x[3])(x15_int8{x:(x+5)})", "tensor<int8>(x[3]):[6,7,8]"));
}
TEST("require that tensor cell subrange with non-matching cell type requires cell copy") {
- TEST_DO(verify_idx_fun("tensor(x[3])(x15f{x:(x+5)})", "tensor(x[3]):[6,7,8]", "f(x)(x+5)"));
+ TEST_DO(verify_idx_fun("tensor(x[3])(x15_float{x:(x+5)})", "tensor(x[3]):[6,7,8]", "f(x)(x+5)"));
TEST_DO(verify_idx_fun("tensor<float>(x[3])(x15{x:(x+5)})", "tensor<float>(x[3]):[6,7,8]", "f(x)(x+5)"));
+ TEST_DO(verify_idx_fun("tensor<bfloat16>(x[3])(x15{x:(x+5)})", "tensor<bfloat16>(x[3]):[6,7,8]", "f(x)(x+5)"));
+ TEST_DO(verify_idx_fun("tensor<int8>(x[3])(x15{x:(x+5)})", "tensor<int8>(x[3]):[6,7,8]", "f(x)(x+5)"));
}
TEST("require that non-continuous cell extraction is optimized") {
TEST_DO(verify_idx_fun("tensor(x[3])(x3y5{x:(x),y:2})", "x3y5{y:2}", "f(x)((floor(x)*5)+2)"));
- TEST_DO(verify_idx_fun("tensor(x[3])(x3y5f{x:(x),y:2})", "x3y5{y:2}", "f(x)((floor(x)*5)+2)"));
- TEST_DO(verify_idx_fun("tensor<float>(x[3])(x3y5{x:(x),y:2})", "x3y5f{y:2}", "f(x)((floor(x)*5)+2)"));
- TEST_DO(verify_idx_fun("tensor<float>(x[3])(x3y5f{x:(x),y:2})", "x3y5f{y:2}", "f(x)((floor(x)*5)+2)"));
+ TEST_DO(verify_idx_fun("tensor(x[3])(x3y5_float{x:(x),y:2})", "x3y5{y:2}", "f(x)((floor(x)*5)+2)"));
+ TEST_DO(verify_idx_fun("tensor<float>(x[3])(x3y5{x:(x),y:2})", "x3y5_float{y:2}", "f(x)((floor(x)*5)+2)"));
+ TEST_DO(verify_idx_fun("tensor<float>(x[3])(x3y5_float{x:(x),y:2})", "x3y5_float{y:2}", "f(x)((floor(x)*5)+2)"));
+ TEST_DO(verify_idx_fun("tensor<bfloat16>(x[3])(x3y5_bfloat16{x:(x),y:2})", "x3y5_bfloat16{y:2}", "f(x)((floor(x)*5)+2)"));
+ TEST_DO(verify_idx_fun("tensor<int8>(x[3])(x3y5_int8{x:(x),y:2})", "x3y5_int8{y:2}", "f(x)((floor(x)*5)+2)"));
}
TEST("require that simple dynamic tensor lambda works") {
TEST_DO(verify_generic("tensor(x[3])(x+a)", "tensor(x[3]):[1,2,3]"));
+ TEST_DO(verify_generic("tensor<float>(x[3])(x+a)", "tensor<float>(x[3]):[1,2,3]"));
+ TEST_DO(verify_generic("tensor<bfloat16>(x[3])(x+a)", "tensor<bfloat16>(x[3]):[1,2,3]"));
+ TEST_DO(verify_generic("tensor<int8>(x[3])(x+a)", "tensor<int8>(x[3]):[1,2,3]"));
}
TEST("require that compiled multi-dimensional multi-param dynamic tensor lambda works") {
diff --git a/eval/src/tests/eval/tensor_spec/tensor_spec_test.cpp b/eval/src/tests/eval/tensor_spec/tensor_spec_test.cpp
index 7fad8f65e17..aa62e52f6f4 100644
--- a/eval/src/tests/eval/tensor_spec/tensor_spec_test.cpp
+++ b/eval/src/tests/eval/tensor_spec/tensor_spec_test.cpp
@@ -19,4 +19,17 @@ TEST("require that a tensor spec can be converted to and from slime") {
EXPECT_EQUAL(TensorSpec::from_slime(slime.get()), spec);
}
+TEST("require that tensor specs can be diffed") {
+ TensorSpec expect("tensor(x[2],y{})");
+ expect.add({{"x", 0}, {"y", "xxx"}}, 1.5)
+ .add({{"x", 0}, {"y", "yyy"}}, 2.0)
+ .add({{"x", 1}, {"y", "yyy"}}, 4.0);
+ TensorSpec actual("tensor<float>(x[2],y{})");
+ actual.add({{"x", 0}, {"y", "xxx"}}, 1.0)
+ .add({{"x", 0}, {"y", "yyy"}}, 2.0)
+ .add({{"x", 1}, {"y", "xxx"}}, 3.0);
+ EXPECT_TRUE(!(expect == actual));
+ fprintf(stderr, "tensor spec diff:\n%s", TensorSpec::diff(expect, "expect", actual, "actual").c_str());
+}
+
TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/eval/src/tests/eval/value_type/value_type_test.cpp b/eval/src/tests/eval/value_type/value_type_test.cpp
index a2b25a12b4b..9aec613f507 100644
--- a/eval/src/tests/eval/value_type/value_type_test.cpp
+++ b/eval/src/tests/eval/value_type/value_type_test.cpp
@@ -2,10 +2,13 @@
#include <vespa/vespalib/testkit/test_kit.h>
#include <vespa/eval/eval/value_type.h>
#include <vespa/eval/eval/value_type_spec.h>
+#include <vespa/eval/eval/int8float.h>
+#include <vespa/vespalib/util/bfloat16.h>
#include <vespa/vespalib/util/stringfmt.h>
#include <vespa/vespalib/test/insertion_operators.h>
#include <ostream>
+using vespalib::BFloat16;
using namespace vespalib::eval;
const size_t npos = ValueType::Dimension::npos;
@@ -58,6 +61,28 @@ TEST("require that float TENSOR value type can be created") {
EXPECT_EQUAL(t.dimensions()[1].size, npos);
}
+TEST("require that bfloat16 TENSOR value type can be created") {
+ ValueType t = ValueType::make_type(CellType::BFLOAT16, {{"x", 10},{"y"}});
+ EXPECT_FALSE(t.is_error());
+ EXPECT_TRUE(t.cell_type() == CellType::BFLOAT16);
+ ASSERT_EQUAL(t.dimensions().size(), 2u);
+ EXPECT_EQUAL(t.dimensions()[0].name, "x");
+ EXPECT_EQUAL(t.dimensions()[0].size, 10u);
+ EXPECT_EQUAL(t.dimensions()[1].name, "y");
+ EXPECT_EQUAL(t.dimensions()[1].size, npos);
+}
+
+TEST("require that int8 TENSOR value type can be created") {
+ ValueType t = ValueType::make_type(CellType::INT8, {{"x", 10},{"y"}});
+ EXPECT_FALSE(t.is_error());
+ EXPECT_TRUE(t.cell_type() == CellType::INT8);
+ ASSERT_EQUAL(t.dimensions().size(), 2u);
+ EXPECT_EQUAL(t.dimensions()[0].name, "x");
+ EXPECT_EQUAL(t.dimensions()[0].size, 10u);
+ EXPECT_EQUAL(t.dimensions()[1].name, "y");
+ EXPECT_EQUAL(t.dimensions()[1].size, npos);
+}
+
TEST("require that TENSOR value type sorts dimensions") {
ValueType t = ValueType::make_type(CellType::DOUBLE, {{"x", 10}, {"z", 30}, {"y"}});
EXPECT_FALSE(t.is_error());
@@ -73,6 +98,8 @@ TEST("require that TENSOR value type sorts dimensions") {
TEST("require that non-double scalar values are not allowed") {
EXPECT_TRUE(ValueType::make_type(CellType::FLOAT, {}).is_error());
+ EXPECT_TRUE(ValueType::make_type(CellType::BFLOAT16, {}).is_error());
+ EXPECT_TRUE(ValueType::make_type(CellType::INT8, {}).is_error());
}
TEST("require that use of zero-size dimensions result in error types") {
@@ -116,7 +143,12 @@ TEST("require that value types can be compared") {
TEST_DO(verify_not_equal(ValueType::make_type(CellType::DOUBLE, {{"x", 10}, {"y", 20}}), ValueType::make_type(CellType::DOUBLE, {{"x", 10}, {"y", 10}})));
TEST_DO(verify_not_equal(ValueType::make_type(CellType::DOUBLE, {{"x", 10}}), ValueType::make_type(CellType::DOUBLE, {{"x"}})));
TEST_DO(verify_equal(ValueType::make_type(CellType::FLOAT, {{"x", 10}}), ValueType::make_type(CellType::FLOAT, {{"x", 10}})));
+ TEST_DO(verify_equal(ValueType::make_type(CellType::BFLOAT16, {{"x", 10}}), ValueType::make_type(CellType::BFLOAT16, {{"x", 10}})));
+ TEST_DO(verify_equal(ValueType::make_type(CellType::INT8, {{"x", 10}}), ValueType::make_type(CellType::INT8, {{"x", 10}})));
TEST_DO(verify_not_equal(ValueType::make_type(CellType::DOUBLE, {{"x", 10}}), ValueType::make_type(CellType::FLOAT, {{"x", 10}})));
+ TEST_DO(verify_not_equal(ValueType::make_type(CellType::FLOAT, {{"x", 10}}), ValueType::make_type(CellType::BFLOAT16, {{"x", 10}})));
+ TEST_DO(verify_not_equal(ValueType::make_type(CellType::FLOAT, {{"x", 10}}), ValueType::make_type(CellType::INT8, {{"x", 10}})));
+ TEST_DO(verify_not_equal(ValueType::make_type(CellType::BFLOAT16, {{"x", 10}}), ValueType::make_type(CellType::INT8, {{"x", 10}})));
}
//-----------------------------------------------------------------------------
@@ -125,6 +157,8 @@ TEST("require that value type can make spec") {
EXPECT_EQUAL("error", ValueType::error_type().to_spec());
EXPECT_EQUAL("double", ValueType::double_type().to_spec());
EXPECT_EQUAL("error", ValueType::make_type(CellType::FLOAT, {}).to_spec());
+ EXPECT_EQUAL("error", ValueType::make_type(CellType::BFLOAT16, {}).to_spec());
+ EXPECT_EQUAL("error", ValueType::make_type(CellType::INT8, {}).to_spec());
EXPECT_EQUAL("double", ValueType::make_type(CellType::DOUBLE, {}).to_spec());
EXPECT_EQUAL("tensor(x{})", ValueType::make_type(CellType::DOUBLE, {{"x"}}).to_spec());
EXPECT_EQUAL("tensor(y[10])", ValueType::make_type(CellType::DOUBLE, {{"y", 10}}).to_spec());
@@ -132,6 +166,12 @@ TEST("require that value type can make spec") {
EXPECT_EQUAL("tensor<float>(x{})", ValueType::make_type(CellType::FLOAT, {{"x"}}).to_spec());
EXPECT_EQUAL("tensor<float>(y[10])", ValueType::make_type(CellType::FLOAT, {{"y", 10}}).to_spec());
EXPECT_EQUAL("tensor<float>(x{},y[10],z[5])", ValueType::make_type(CellType::FLOAT, {{"x"}, {"y", 10}, {"z", 5}}).to_spec());
+ EXPECT_EQUAL("tensor<bfloat16>(x{})", ValueType::make_type(CellType::BFLOAT16, {{"x"}}).to_spec());
+ EXPECT_EQUAL("tensor<bfloat16>(y[10])", ValueType::make_type(CellType::BFLOAT16, {{"y", 10}}).to_spec());
+ EXPECT_EQUAL("tensor<bfloat16>(x{},y[10],z[5])", ValueType::make_type(CellType::BFLOAT16, {{"x"}, {"y", 10}, {"z", 5}}).to_spec());
+ EXPECT_EQUAL("tensor<int8>(x{})", ValueType::make_type(CellType::INT8, {{"x"}}).to_spec());
+ EXPECT_EQUAL("tensor<int8>(y[10])", ValueType::make_type(CellType::INT8, {{"y", 10}}).to_spec());
+ EXPECT_EQUAL("tensor<int8>(x{},y[10],z[5])", ValueType::make_type(CellType::INT8, {{"x"}, {"y", 10}, {"z", 5}}).to_spec());
}
//-----------------------------------------------------------------------------
@@ -145,6 +185,8 @@ TEST("require that value type spec can be parsed") {
EXPECT_EQUAL(ValueType::make_type(CellType::DOUBLE, {{"x"}, {"y", 10}, {"z", 5}}), type("tensor(x{},y[10],z[5])"));
EXPECT_EQUAL(ValueType::make_type(CellType::DOUBLE, {{"y", 10}}), type("tensor<double>(y[10])"));
EXPECT_EQUAL(ValueType::make_type(CellType::FLOAT, {{"y", 10}}), type("tensor<float>(y[10])"));
+ EXPECT_EQUAL(ValueType::make_type(CellType::BFLOAT16, {{"y", 10}}), type("tensor<bfloat16>(y[10])"));
+ EXPECT_EQUAL(ValueType::make_type(CellType::INT8, {{"y", 10}}), type("tensor<int8>(y[10])"));
}
TEST("require that value type spec can be parsed with extra whitespace") {
@@ -196,6 +238,8 @@ TEST("require that malformed value type spec is parsed as error") {
EXPECT_TRUE(ValueType::from_spec("error").is_error());
EXPECT_TRUE(ValueType::from_spec("any").is_error());
EXPECT_TRUE(ValueType::from_spec("float").is_error());
+ EXPECT_TRUE(ValueType::from_spec("bfloat16").is_error());
+ EXPECT_TRUE(ValueType::from_spec("int8").is_error());
EXPECT_TRUE(ValueType::from_spec("tensor").is_error());
EXPECT_TRUE(ValueType::from_spec("tensor<double>").is_error());
EXPECT_TRUE(ValueType::from_spec("tensor() tensor()").is_error());
@@ -214,6 +258,8 @@ TEST("require that malformed value type spec is parsed as error") {
EXPECT_TRUE(ValueType::from_spec("tensor(x{},x[])").is_error());
EXPECT_TRUE(ValueType::from_spec("tensor(z[])").is_error());
EXPECT_TRUE(ValueType::from_spec("tensor<float>()").is_error());
+ EXPECT_TRUE(ValueType::from_spec("tensor<bfloat16>()").is_error());
+ EXPECT_TRUE(ValueType::from_spec("tensor<int8>()").is_error());
EXPECT_TRUE(ValueType::from_spec("tensor<int7>(x[10])").is_error());
}
@@ -257,12 +303,16 @@ TEST("require that value types preserve cell type") {
EXPECT_TRUE(type("tensor(x[10])").cell_type() == CellType::DOUBLE);
EXPECT_TRUE(type("tensor<double>(x[10])").cell_type() == CellType::DOUBLE);
EXPECT_TRUE(type("tensor<float>(x[10])").cell_type() == CellType::FLOAT);
+ EXPECT_TRUE(type("tensor<bfloat16>(x[10])").cell_type() == CellType::BFLOAT16);
+ EXPECT_TRUE(type("tensor<int8>(x[10])").cell_type() == CellType::INT8);
}
TEST("require that dimension names can be obtained") {
EXPECT_EQUAL(type("double").dimension_names(), str_list({}));
EXPECT_EQUAL(type("tensor(y[30],x[10])").dimension_names(), str_list({"x", "y"}));
EXPECT_EQUAL(type("tensor<float>(y[10],x[30],z{})").dimension_names(), str_list({"x", "y", "z"}));
+ EXPECT_EQUAL(type("tensor<bfloat16>(y[10],x[30],z{})").dimension_names(), str_list({"x", "y", "z"}));
+ EXPECT_EQUAL(type("tensor<int8>(y[10],x[30],z{})").dimension_names(), str_list({"x", "y", "z"}));
}
TEST("require that nontrivial indexed dimensions can be obtained") {
@@ -295,6 +345,8 @@ TEST("require that dimension index can be obtained") {
EXPECT_EQUAL(type("tensor()").dimension_index("x"), ValueType::Dimension::npos);
EXPECT_EQUAL(type("tensor(y[10],x{},z[5])").dimension_index("x"), 0u);
EXPECT_EQUAL(type("tensor<float>(y[10],x{},z[5])").dimension_index("y"), 1u);
+ EXPECT_EQUAL(type("tensor<bfloat16>(y[10],x{},z[5])").dimension_index("y"), 1u);
+ EXPECT_EQUAL(type("tensor<int8>(y[10],x{},z[5])").dimension_index("y"), 1u);
EXPECT_EQUAL(type("tensor(y[10],x{},z[5])").dimension_index("z"), 2u);
EXPECT_EQUAL(type("tensor(y[10],x{},z[5])").dimension_index("w"), ValueType::Dimension::npos);
}
@@ -322,6 +374,12 @@ TEST("require that type-related predicate functions work as expected") {
TEST_DO(verify_predicates(type("tensor<float>(x{})"), false, false, true, true, false));
TEST_DO(verify_predicates(type("tensor<float>(x[5])"), false, false, true, false, true));
TEST_DO(verify_predicates(type("tensor<float>(x[5],y{})"), false, false, true, false, false));
+ TEST_DO(verify_predicates(type("tensor<bfloat16>(x{})"), false, false, true, true, false));
+ TEST_DO(verify_predicates(type("tensor<bfloat16>(x[5])"), false, false, true, false, true));
+ TEST_DO(verify_predicates(type("tensor<bfloat16>(x[5],y{})"), false, false, true, false, false));
+ TEST_DO(verify_predicates(type("tensor<int8>(x{})"), false, false, true, true, false));
+ TEST_DO(verify_predicates(type("tensor<int8>(x[5])"), false, false, true, false, true));
+ TEST_DO(verify_predicates(type("tensor<int8>(x[5],y{})"), false, false, true, false, false));
}
TEST("require that mapped and indexed dimensions can be counted") {
@@ -349,6 +407,12 @@ TEST("require that dense subspace size calculation works as expected") {
EXPECT_EQUAL(type("tensor<float>(x{})").dense_subspace_size(), 1u);
EXPECT_EQUAL(type("tensor<float>(x[5])").dense_subspace_size(), 5u);
EXPECT_EQUAL(type("tensor<float>(x[5],y{})").dense_subspace_size(), 5u);
+ EXPECT_EQUAL(type("tensor<bfloat16>(x{})").dense_subspace_size(), 1u);
+ EXPECT_EQUAL(type("tensor<bfloat16>(x[5])").dense_subspace_size(), 5u);
+ EXPECT_EQUAL(type("tensor<bfloat16>(x[5],y{})").dense_subspace_size(), 5u);
+ EXPECT_EQUAL(type("tensor<int8>(x{})").dense_subspace_size(), 1u);
+ EXPECT_EQUAL(type("tensor<int8>(x[5])").dense_subspace_size(), 5u);
+ EXPECT_EQUAL(type("tensor<int8>(x[5],y{})").dense_subspace_size(), 5u);
}
TEST("require that dimension predicates work as expected") {
@@ -363,32 +427,51 @@ TEST("require that dimension predicates work as expected") {
EXPECT_TRUE(z.is_indexed());
}
-TEST("require that removing dimensions from non-tensor types gives error type") {
+TEST("require that value type map decays cell type") {
+ EXPECT_EQUAL(type("tensor(x[10])").map(), type("tensor(x[10])"));
+ EXPECT_EQUAL(type("tensor<float>(x[10])").map(), type("tensor<float>(x[10])"));
+ EXPECT_EQUAL(type("tensor<bfloat16>(x[10])").map(), type("tensor<float>(x[10])"));
+ EXPECT_EQUAL(type("tensor<int8>(x[10])").map(), type("tensor<float>(x[10])"));
+}
+
+TEST("require that reducing dimensions from non-tensor types gives error type") {
EXPECT_TRUE(type("error").reduce({"x"}).is_error());
EXPECT_TRUE(type("double").reduce({"x"}).is_error());
}
-TEST("require that dimensions can be removed from tensor value types") {
+TEST("require that a scalar value can be fully reduced to a scalar value") {
+ EXPECT_EQUAL(type("double").reduce({}), type("double"));
+}
+
+TEST("require that tensor value types can be reduced") {
EXPECT_EQUAL(type("tensor(x[10],y[20],z[30])").reduce({"x"}), type("tensor(y[20],z[30])"));
EXPECT_EQUAL(type("tensor(x[10],y[20],z[30])").reduce({"y"}), type("tensor(x[10],z[30])"));
EXPECT_EQUAL(type("tensor<float>(x[10],y[20],z[30])").reduce({"z"}), type("tensor<float>(x[10],y[20])"));
+ EXPECT_EQUAL(type("tensor<bfloat16>(x[10],y[20],z[30])").reduce({"z"}), type("tensor<float>(x[10],y[20])"));
+ EXPECT_EQUAL(type("tensor<int8>(x[10],y[20],z[30])").reduce({"z"}), type("tensor<float>(x[10],y[20])"));
EXPECT_EQUAL(type("tensor(x[10],y[20],z[30])").reduce({"x", "z"}), type("tensor(y[20])"));
EXPECT_EQUAL(type("tensor<float>(x[10],y[20],z[30])").reduce({"z", "x"}), type("tensor<float>(y[20])"));
+ EXPECT_EQUAL(type("tensor<bfloat16>(x[10],y[20],z[30])").reduce({"z", "x"}), type("tensor<float>(y[20])"));
+ EXPECT_EQUAL(type("tensor<int8>(x[10],y[20],z[30])").reduce({"z", "x"}), type("tensor<float>(y[20])"));
}
-TEST("require that removing an empty set of dimensions means removing them all") {
+TEST("require that reducing an empty set of dimensions means reducing them all") {
EXPECT_EQUAL(type("tensor(x[10],y[20],z[30])").reduce({}), type("double"));
EXPECT_EQUAL(type("tensor<float>(x[10],y[20],z[30])").reduce({}), type("double"));
+ EXPECT_EQUAL(type("tensor<bfloat16>(x[10],y[20],z[30])").reduce({}), type("double"));
+ EXPECT_EQUAL(type("tensor<int8>(x[10],y[20],z[30])").reduce({}), type("double"));
}
-TEST("require that removing non-existing dimensions gives error type") {
+TEST("require that reducing non-existing dimensions gives error type") {
EXPECT_TRUE(type("tensor(y{})").reduce({"x"}).is_error());
EXPECT_TRUE(type("tensor<float>(y[10])").reduce({"x"}).is_error());
}
-TEST("require that removing all dimensions gives double type") {
+TEST("require that reducing all dimensions gives double type") {
EXPECT_EQUAL(type("tensor(x[10],y[20],z[30])").reduce({"x", "y", "z"}), type("double"));
EXPECT_EQUAL(type("tensor<float>(x[10],y[20],z[30])").reduce({"x", "y", "z"}), type("double"));
+ EXPECT_EQUAL(type("tensor<bfloat16>(x[10],y[20],z[30])").reduce({"x", "y", "z"}), type("double"));
+ EXPECT_EQUAL(type("tensor<int8>(x[10],y[20],z[30])").reduce({"x", "y", "z"}), type("double"));
}
void verify_join(const ValueType &a, const ValueType b, const ValueType &res) {
@@ -407,9 +490,20 @@ TEST("require that dimensions can be combined for value types") {
}
TEST("require that cell type is handled correctly for join") {
- TEST_DO(verify_join(type("tensor(x{})"), type("tensor<float>(y{})"), type("tensor(x{},y{})")));
- TEST_DO(verify_join(type("tensor<float>(x{})"), type("tensor<float>(y{})"), type("tensor<float>(x{},y{})")));
+ TEST_DO(verify_join(type("tensor(x{})"), type("tensor(y{})"), type("tensor(x{},y{})")));
+ TEST_DO(verify_join(type("tensor(x{})"), type("tensor<float>(y{})"), type("tensor(x{},y{})")));
+ TEST_DO(verify_join(type("tensor(x{})"), type("tensor<bfloat16>(y{})"), type("tensor(x{},y{})")));
+ TEST_DO(verify_join(type("tensor(x{})"), type("tensor<int8>(y{})"), type("tensor(x{},y{})")));
+ TEST_DO(verify_join(type("tensor<float>(x{})"), type("tensor<float>(y{})"), type("tensor<float>(x{},y{})")));
+ TEST_DO(verify_join(type("tensor<float>(x{})"), type("tensor<bfloat16>(y{})"), type("tensor<float>(x{},y{})")));
+ TEST_DO(verify_join(type("tensor<float>(x{})"), type("tensor<int8>(y{})"), type("tensor<float>(x{},y{})")));
+ TEST_DO(verify_join(type("tensor<bfloat16>(x{})"), type("tensor<bfloat16>(y{})"), type("tensor<float>(x{},y{})")));
+ TEST_DO(verify_join(type("tensor<bfloat16>(x{})"), type("tensor<int8>(y{})"), type("tensor<float>(x{},y{})")));
+ TEST_DO(verify_join(type("tensor<int8>(x{})"), type("tensor<int8>(y{})"), type("tensor<float>(x{},y{})")));
+ TEST_DO(verify_join(type("tensor(x{})"), type("double"), type("tensor(x{})")));
TEST_DO(verify_join(type("tensor<float>(x{})"), type("double"), type("tensor<float>(x{})")));
+ TEST_DO(verify_join(type("tensor<bfloat16>(x{})"), type("double"), type("tensor<float>(x{})")));
+ TEST_DO(verify_join(type("tensor<int8>(x{})"), type("double"), type("tensor<float>(x{})")));
}
void verify_not_joinable(const ValueType &a, const ValueType &b) {
@@ -444,14 +538,32 @@ TEST("require that tensor dimensions can be renamed") {
EXPECT_EQUAL(type("error").rename({"a"}, {"b"}), type("error"));
}
+TEST("require that dimension rename preserves cell type") {
+ EXPECT_EQUAL(type("tensor(x{})").rename({"x"}, {"y"}), type("tensor(y{})"));
+ EXPECT_EQUAL(type("tensor<float>(x{})").rename({"x"}, {"y"}), type("tensor<float>(y{})"));
+ EXPECT_EQUAL(type("tensor<bfloat16>(x{})").rename({"x"}, {"y"}), type("tensor<bfloat16>(y{})"));
+ EXPECT_EQUAL(type("tensor<int8>(x{})").rename({"x"}, {"y"}), type("tensor<int8>(y{})"));
+}
+
+void verify_merge(const ValueType &a, const ValueType &b, const ValueType &res) {
+ EXPECT_EQUAL(ValueType::merge(a, b), res);
+ EXPECT_EQUAL(ValueType::merge(b, a), res);
+}
+
TEST("require that similar types can be merged") {
- EXPECT_EQUAL(ValueType::merge(type("error"), type("error")), type("error"));
- EXPECT_EQUAL(ValueType::merge(type("double"), type("double")), type("double"));
- EXPECT_EQUAL(ValueType::merge(type("tensor(x[5])"), type("tensor(x[5])")), type("tensor(x[5])"));
- EXPECT_EQUAL(ValueType::merge(type("tensor<float>(x[5])"), type("tensor(x[5])")), type("tensor(x[5])"));
- EXPECT_EQUAL(ValueType::merge(type("tensor(x[5])"), type("tensor<float>(x[5])")), type("tensor(x[5])"));
- EXPECT_EQUAL(ValueType::merge(type("tensor<float>(x[5])"), type("tensor<float>(x[5])")), type("tensor<float>(x[5])"));
- EXPECT_EQUAL(ValueType::merge(type("tensor(x{})"), type("tensor(x{})")), type("tensor(x{})"));
+ TEST_DO(verify_merge(type("error"), type("error"), type("error")));
+ TEST_DO(verify_merge(type("double"), type("double"), type("double")));
+ TEST_DO(verify_merge(type("tensor(x[5])"), type("tensor(x[5])"), type("tensor(x[5])")));
+ TEST_DO(verify_merge(type("tensor(x[5])"), type("tensor<float>(x[5])"), type("tensor(x[5])")));
+ TEST_DO(verify_merge(type("tensor(x[5])"), type("tensor<bfloat16>(x[5])"), type("tensor(x[5])")));
+ TEST_DO(verify_merge(type("tensor(x[5])"), type("tensor<int8>(x[5])"), type("tensor(x[5])")));
+ TEST_DO(verify_merge(type("tensor<float>(x[5])"), type("tensor<float>(x[5])"), type("tensor<float>(x[5])")));
+ TEST_DO(verify_merge(type("tensor<float>(x[5])"), type("tensor<bfloat16>(x[5])"), type("tensor<float>(x[5])")));
+ TEST_DO(verify_merge(type("tensor<float>(x[5])"), type("tensor<int8>(x[5])"), type("tensor<float>(x[5])")));
+ TEST_DO(verify_merge(type("tensor<bfloat16>(x[5])"), type("tensor<bfloat16>(x[5])"), type("tensor<float>(x[5])")));
+ TEST_DO(verify_merge(type("tensor<bfloat16>(x[5])"), type("tensor<int8>(x[5])"), type("tensor<float>(x[5])")));
+ TEST_DO(verify_merge(type("tensor<int8>(x[5])"), type("tensor<int8>(x[5])"), type("tensor<float>(x[5])")));
+ TEST_DO(verify_merge(type("tensor(x{})"), type("tensor(x{})"), type("tensor(x{})")));
}
TEST("require that diverging types can not be merged") {
@@ -463,7 +575,7 @@ TEST("require that diverging types can not be merged") {
EXPECT_EQUAL(ValueType::merge(type("tensor(x{})"), type("tensor(y{})")), type("error"));
}
-void verify_concat(const ValueType &a, const ValueType b, const vespalib::string &dim, const ValueType &res) {
+void verify_concat(const ValueType &a, const ValueType &b, const vespalib::string &dim, const ValueType &res) {
EXPECT_EQUAL(ValueType::concat(a, b, dim), res);
EXPECT_EQUAL(ValueType::concat(b, a, dim), res);
}
@@ -486,9 +598,23 @@ TEST("require that types can be concatenated") {
}
TEST("require that cell type is handled correctly for concat") {
- TEST_DO(verify_concat(type("tensor<float>(x[3])"), type("tensor(x[2])"), "x", type("tensor(x[5])")));
- TEST_DO(verify_concat(type("tensor<float>(x[3])"), type("tensor<float>(x[2])"), "x", type("tensor<float>(x[5])")));
+ TEST_DO(verify_concat(type("tensor(x[3])"), type("tensor(x[2])"), "x", type("tensor(x[5])")));
+ TEST_DO(verify_concat(type("tensor(x[3])"), type("tensor<float>(x[2])"), "x", type("tensor(x[5])")));
+ TEST_DO(verify_concat(type("tensor(x[3])"), type("tensor<bfloat16>(x[2])"), "x", type("tensor(x[5])")));
+ TEST_DO(verify_concat(type("tensor(x[3])"), type("tensor<int8>(x[2])"), "x", type("tensor(x[5])")));
+ TEST_DO(verify_concat(type("tensor<float>(x[3])"), type("tensor<float>(x[2])"), "x", type("tensor<float>(x[5])")));
+ TEST_DO(verify_concat(type("tensor<float>(x[3])"), type("tensor<bfloat16>(x[2])"), "x", type("tensor<float>(x[5])")));
+ TEST_DO(verify_concat(type("tensor<float>(x[3])"), type("tensor<int8>(x[2])"), "x", type("tensor<float>(x[5])")));
+ TEST_DO(verify_concat(type("tensor<bfloat16>(x[3])"), type("tensor<bfloat16>(x[2])"), "x", type("tensor<bfloat16>(x[5])")));
+ TEST_DO(verify_concat(type("tensor<bfloat16>(x[3])"), type("tensor<int8>(x[2])"), "x", type("tensor<float>(x[5])")));
+ TEST_DO(verify_concat(type("tensor<int8>(x[3])"), type("tensor<int8>(x[2])"), "x", type("tensor<int8>(x[5])")));
+}
+
+TEST("require that concat with number preserves cell type") {
+ TEST_DO(verify_concat(type("tensor(x[3])"), type("double"), "x", type("tensor(x[4])")));
TEST_DO(verify_concat(type("tensor<float>(x[3])"), type("double"), "x", type("tensor<float>(x[4])")));
+ TEST_DO(verify_concat(type("tensor<bfloat16>(x[3])"), type("double"), "x", type("tensor<bfloat16>(x[4])")));
+ TEST_DO(verify_concat(type("tensor<int8>(x[3])"), type("double"), "x", type("tensor<int8>(x[4])")));
}
void verify_cell_cast(const ValueType &type) {
@@ -514,12 +640,18 @@ void verify_cell_cast(const ValueType &type) {
TEST("require that value type cell cast works correctly") {
TEST_DO(verify_cell_cast(type("error")));
TEST_DO(verify_cell_cast(type("double")));
- TEST_DO(verify_cell_cast(type("tensor<float>(x[10])")));
TEST_DO(verify_cell_cast(type("tensor<double>(x[10])")));
- TEST_DO(verify_cell_cast(type("tensor<float>(x{})")));
+ TEST_DO(verify_cell_cast(type("tensor<float>(x[10])")));
+ TEST_DO(verify_cell_cast(type("tensor<bfloat16>(x[10])")));
+ TEST_DO(verify_cell_cast(type("tensor<int8>(x[10])")));
TEST_DO(verify_cell_cast(type("tensor<double>(x{})")));
- TEST_DO(verify_cell_cast(type("tensor<float>(x{},y[5])")));
+ TEST_DO(verify_cell_cast(type("tensor<float>(x{})")));
+ TEST_DO(verify_cell_cast(type("tensor<bfloat16>(x{})")));
+ TEST_DO(verify_cell_cast(type("tensor<int8>(x{})")));
TEST_DO(verify_cell_cast(type("tensor<double>(x{},y[5])")));
+ TEST_DO(verify_cell_cast(type("tensor<float>(x{},y[5])")));
+ TEST_DO(verify_cell_cast(type("tensor<bfloat16>(x{},y[5])")));
+ TEST_DO(verify_cell_cast(type("tensor<int8>(x{},y[5])")));
}
TEST("require that actual cell type can be converted to cell type name") {
@@ -561,14 +693,32 @@ TEST("require that peek type inference works as expected") {
EXPECT_EQUAL(input2.peek({"a", "b", "c", "d", "e"}), type("double"));
}
+TEST("require that non-scalar peek preserves cell type") {
+ EXPECT_EQUAL(type("tensor(x[3],y[5])").peek({"x"}), type("tensor(y[5])"));
+ EXPECT_EQUAL(type("tensor<float>(x[3],y[5])").peek({"x"}), type("tensor<float>(y[5])"));
+ EXPECT_EQUAL(type("tensor<bfloat16>(x[3],y[5])").peek({"x"}), type("tensor<bfloat16>(y[5])"));
+ EXPECT_EQUAL(type("tensor<int8>(x[3],y[5])").peek({"x"}), type("tensor<int8>(y[5])"));
+}
+
+TEST("require that scalar peek is always double") {
+ EXPECT_EQUAL(type("tensor(x[3],y[5])").peek({"x", "y"}), type("double"));
+ EXPECT_EQUAL(type("tensor<float>(x[3],y[5])").peek({"x", "y"}), type("double"));
+ EXPECT_EQUAL(type("tensor<bfloat16>(x[3],y[5])").peek({"x", "y"}), type("double"));
+ EXPECT_EQUAL(type("tensor<int8>(x[3],y[5])").peek({"x", "y"}), type("double"));
+}
+
TEST("require that cell alignment can be obtained") {
EXPECT_EQUAL(CellTypeUtils::alignment(CellType::DOUBLE), alignof(double));
EXPECT_EQUAL(CellTypeUtils::alignment(CellType::FLOAT), alignof(float));
+ EXPECT_EQUAL(CellTypeUtils::alignment(CellType::BFLOAT16), alignof(BFloat16));
+ EXPECT_EQUAL(CellTypeUtils::alignment(CellType::INT8), alignof(Int8Float));
}
TEST("require that cell array size can be calculated") {
EXPECT_EQUAL(CellTypeUtils::mem_size(CellType::DOUBLE, 37), 37 * sizeof(double));
EXPECT_EQUAL(CellTypeUtils::mem_size(CellType::FLOAT, 37), 37 * sizeof(float));
+ EXPECT_EQUAL(CellTypeUtils::mem_size(CellType::BFLOAT16, 37), 37 * sizeof(BFloat16));
+ EXPECT_EQUAL(CellTypeUtils::mem_size(CellType::INT8, 37), 37 * sizeof(Int8Float));
}
TEST("require that all cell types can be listed") {
diff --git a/eval/src/tests/tensor/binary_format/binary_format_test.cpp b/eval/src/tests/tensor/binary_format/binary_format_test.cpp
index 671765d4050..d0e05d303a2 100644
--- a/eval/src/tests/tensor/binary_format/binary_format_test.cpp
+++ b/eval/src/tests/tensor/binary_format/binary_format_test.cpp
@@ -128,7 +128,7 @@ TEST(TensorBinaryFormatTest, tensor_binary_format_test_spec) {
path.append("src/apps/make_tensor_binary_format_test_spec/test_spec.json");
MappedFileInput file(path);
EXPECT_TRUE(file.valid());
- auto handle_test = [this](Slime &slime)
+ auto handle_test = [](Slime &slime)
{
test_binary_format_spec(slime.get());
};
diff --git a/eval/src/tests/tensor/onnx_wrapper/dynamic.py b/eval/src/tests/tensor/onnx_wrapper/dynamic.py
index d098324fae8..cdf59c4f700 100755
--- a/eval/src/tests/tensor/onnx_wrapper/dynamic.py
+++ b/eval/src/tests/tensor/onnx_wrapper/dynamic.py
@@ -35,5 +35,5 @@ graph_def = helper.make_graph(
],
[OUTPUT],
)
-model_def = helper.make_model(graph_def, producer_name='dynamic.py')
+model_def = helper.make_model(graph_def, producer_name='dynamic.py', opset_imports=[onnx.OperatorSetIdProto(version=12)])
onnx.save(model_def, 'dynamic.onnx')
diff --git a/eval/src/tests/tensor/onnx_wrapper/float_to_int8.onnx b/eval/src/tests/tensor/onnx_wrapper/float_to_int8.onnx
new file mode 100644
index 00000000000..cde81d428bd
--- /dev/null
+++ b/eval/src/tests/tensor/onnx_wrapper/float_to_int8.onnx
@@ -0,0 +1,12 @@
+float_to_int8.py:P
+
+inout"Cast*
+to  float_to_int8Z
+in
+
+
+b
+out
+
+
+B \ No newline at end of file
diff --git a/eval/src/tests/tensor/onnx_wrapper/float_to_int8.py b/eval/src/tests/tensor/onnx_wrapper/float_to_int8.py
new file mode 100755
index 00000000000..2a8e47b3ffa
--- /dev/null
+++ b/eval/src/tests/tensor/onnx_wrapper/float_to_int8.py
@@ -0,0 +1,23 @@
+# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+import onnx
+from onnx import helper, TensorProto
+
+IN = helper.make_tensor_value_info('in', TensorProto.FLOAT, [7])
+OUT = helper.make_tensor_value_info('out', TensorProto.INT8, [7])
+
+nodes = [
+ helper.make_node(
+ 'Cast',
+ ['in'],
+ ['out'],
+ to=getattr(TensorProto, 'INT8'),
+ ),
+]
+graph_def = helper.make_graph(
+ nodes,
+ 'float_to_int8',
+ [IN],
+ [OUT],
+)
+model_def = helper.make_model(graph_def, producer_name='float_to_int8.py', opset_imports=[onnx.OperatorSetIdProto(version=13)])
+onnx.save(model_def, 'float_to_int8.onnx')
diff --git a/eval/src/tests/tensor/onnx_wrapper/guess_batch.py b/eval/src/tests/tensor/onnx_wrapper/guess_batch.py
index c43448c58a7..63b2c84e934 100755
--- a/eval/src/tests/tensor/onnx_wrapper/guess_batch.py
+++ b/eval/src/tests/tensor/onnx_wrapper/guess_batch.py
@@ -22,5 +22,5 @@ graph_def = helper.make_graph(
],
[OUT],
)
-model_def = helper.make_model(graph_def, producer_name='guess_batch.py')
+model_def = helper.make_model(graph_def, producer_name='guess_batch.py', opset_imports=[onnx.OperatorSetIdProto(version=12)])
onnx.save(model_def, 'guess_batch.onnx')
diff --git a/eval/src/tests/tensor/onnx_wrapper/int_types.py b/eval/src/tests/tensor/onnx_wrapper/int_types.py
index cd82bfd44b5..e5adf035e4b 100755
--- a/eval/src/tests/tensor/onnx_wrapper/int_types.py
+++ b/eval/src/tests/tensor/onnx_wrapper/int_types.py
@@ -29,5 +29,5 @@ graph_def = helper.make_graph(
],
[OUTPUT],
)
-model_def = helper.make_model(graph_def, producer_name='int_types.py')
+model_def = helper.make_model(graph_def, producer_name='int_types.py', opset_imports=[onnx.OperatorSetIdProto(version=12)])
onnx.save(model_def, 'int_types.onnx')
diff --git a/eval/src/tests/tensor/onnx_wrapper/onnx_wrapper_test.cpp b/eval/src/tests/tensor/onnx_wrapper/onnx_wrapper_test.cpp
index b474d2458b9..9b44dd7519e 100644
--- a/eval/src/tests/tensor/onnx_wrapper/onnx_wrapper_test.cpp
+++ b/eval/src/tests/tensor/onnx_wrapper/onnx_wrapper_test.cpp
@@ -1,12 +1,16 @@
// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include <vespa/eval/eval/tensor_spec.h>
+#include <vespa/eval/eval/int8float.h>
#include <vespa/eval/onnx/onnx_wrapper.h>
+#include <vespa/vespalib/util/bfloat16.h>
#include <vespa/vespalib/util/stringfmt.h>
#include <vespa/vespalib/gtest/gtest.h>
using namespace vespalib::eval;
+using vespalib::BFloat16;
+
using vespalib::make_string_short::fmt;
using TensorInfo = Onnx::TensorInfo;
using ElementType = Onnx::ElementType;
@@ -21,6 +25,8 @@ std::string simple_model = source_dir + "/simple.onnx";
std::string dynamic_model = source_dir + "/dynamic.onnx";
std::string int_types_model = source_dir + "/int_types.onnx";
std::string guess_batch_model = source_dir + "/guess_batch.onnx";
+std::string unstable_types_model = source_dir + "/unstable_types.onnx";
+std::string float_to_int8_model = source_dir + "/float_to_int8.onnx";
void dump_info(const char *ctx, const std::vector<TensorInfo> &info) {
fprintf(stderr, "%s:\n", ctx);
@@ -306,4 +312,128 @@ TEST(OnnxTest, we_guess_batch_dimension_size_when_inference_fails) {
//-------------------------------------------------------------------------
}
+TEST(OnnxTest, zero_copy_unstable_types) {
+ Onnx model(unstable_types_model, Onnx::Optimize::ENABLE);
+ ASSERT_EQ(model.inputs().size(), 2);
+ ASSERT_EQ(model.outputs().size(), 2);
+
+ ValueType in8_type = ValueType::from_spec("tensor<int8>(a[3])");
+ std::vector<Int8Float> in8_values({1.0, 2.0, 3.0});
+ DenseValueView in8(in8_type, TypedCells(in8_values));
+
+ ValueType in16_type = ValueType::from_spec("tensor<bfloat16>(a[3])");
+ std::vector<BFloat16> in16_values({4.0, 5.0, 6.0});
+ DenseValueView in16(in16_type, TypedCells(in16_values));
+
+ Onnx::WirePlanner planner;
+ EXPECT_TRUE(planner.bind_input_type(in8_type, model.inputs()[0]));
+ EXPECT_TRUE(planner.bind_input_type(in16_type, model.inputs()[1]));
+ EXPECT_EQ(planner.make_output_type(model.outputs()[0]).to_spec(), "tensor<int8>(d0[3])");
+ EXPECT_EQ(planner.make_output_type(model.outputs()[1]).to_spec(), "tensor<bfloat16>(d0[3])");
+
+ auto wire_info = planner.get_wire_info(model);
+ Onnx::EvalContext ctx(model, wire_info);
+
+ const Value &out8 = ctx.get_result(0);
+ const Value &out16 = ctx.get_result(1);
+ EXPECT_EQ(out8.type().to_spec(), "tensor<int8>(d0[3])");
+ EXPECT_EQ(out16.type().to_spec(), "tensor<bfloat16>(d0[3])");
+ //-------------------------------------------------------------------------
+ ctx.bind_param(0, in8);
+ ctx.bind_param(1, in16);
+ ctx.eval();
+ auto cells8 = out8.cells();
+ auto cells16 = out16.cells();
+ ASSERT_EQ(cells8.type, CellType::INT8);
+ ASSERT_EQ(cells16.type, CellType::BFLOAT16);
+ ASSERT_EQ(cells8.size, 3);
+ ASSERT_EQ(cells16.size, 3);
+ EXPECT_EQ(cells8.typify<Int8Float>()[0], 4.0);
+ EXPECT_EQ(cells8.typify<Int8Float>()[1], 5.0);
+ EXPECT_EQ(cells8.typify<Int8Float>()[2], 6.0);
+ EXPECT_EQ(cells16.typify<BFloat16>()[0], 1.0);
+ EXPECT_EQ(cells16.typify<BFloat16>()[1], 2.0);
+ EXPECT_EQ(cells16.typify<BFloat16>()[2], 3.0);
+ //-------------------------------------------------------------------------
+}
+
+TEST(OnnxTest, converted_unstable_types) {
+ Onnx model(unstable_types_model, Onnx::Optimize::ENABLE);
+ ASSERT_EQ(model.inputs().size(), 2);
+ ASSERT_EQ(model.outputs().size(), 2);
+
+ ValueType in8_type = ValueType::from_spec("tensor<float>(a[3])");
+ std::vector<float> in8_values({1.0, 2.0, 3.0});
+ DenseValueView in8(in8_type, TypedCells(in8_values));
+
+ ValueType in16_type = ValueType::from_spec("tensor<float>(a[3])");
+ std::vector<float> in16_values({4.0, 5.0, 6.0});
+ DenseValueView in16(in16_type, TypedCells(in16_values));
+
+ Onnx::WirePlanner planner;
+ EXPECT_TRUE(planner.bind_input_type(in8_type, model.inputs()[0]));
+ EXPECT_TRUE(planner.bind_input_type(in16_type, model.inputs()[1]));
+ EXPECT_EQ(planner.make_output_type(model.outputs()[0]).to_spec(), "tensor<int8>(d0[3])");
+ EXPECT_EQ(planner.make_output_type(model.outputs()[1]).to_spec(), "tensor<bfloat16>(d0[3])");
+
+ auto wire_info = planner.get_wire_info(model);
+ Onnx::EvalContext ctx(model, wire_info);
+
+ const Value &out8 = ctx.get_result(0);
+ const Value &out16 = ctx.get_result(1);
+ EXPECT_EQ(out8.type().to_spec(), "tensor<int8>(d0[3])");
+ EXPECT_EQ(out16.type().to_spec(), "tensor<bfloat16>(d0[3])");
+ //-------------------------------------------------------------------------
+ ctx.bind_param(0, in8);
+ ctx.bind_param(1, in16);
+ ctx.eval();
+ auto cells8 = out8.cells();
+ auto cells16 = out16.cells();
+ ASSERT_EQ(cells8.type, CellType::INT8);
+ ASSERT_EQ(cells16.type, CellType::BFLOAT16);
+ ASSERT_EQ(cells8.size, 3);
+ ASSERT_EQ(cells16.size, 3);
+ EXPECT_EQ(cells8.typify<Int8Float>()[0], 4.0);
+ EXPECT_EQ(cells8.typify<Int8Float>()[1], 5.0);
+ EXPECT_EQ(cells8.typify<Int8Float>()[2], 6.0);
+ EXPECT_EQ(cells16.typify<BFloat16>()[0], 1.0);
+ EXPECT_EQ(cells16.typify<BFloat16>()[1], 2.0);
+ EXPECT_EQ(cells16.typify<BFloat16>()[2], 3.0);
+ //-------------------------------------------------------------------------
+}
+
+TEST(OnnxTest, inspect_float_to_int8_conversion) {
+ Onnx model(float_to_int8_model, Onnx::Optimize::ENABLE);
+ ASSERT_EQ(model.inputs().size(), 1);
+ ASSERT_EQ(model.outputs().size(), 1);
+
+ ValueType in_type = ValueType::from_spec("tensor<float>(a[7])");
+ const float my_nan = std::numeric_limits<float>::quiet_NaN();
+ const float my_inf = std::numeric_limits<float>::infinity();
+ std::vector<float> in_values({-my_inf, -142, -42, my_nan, 42, 142, my_inf});
+ DenseValueView in(in_type, TypedCells(in_values));
+
+ Onnx::WirePlanner planner;
+ EXPECT_TRUE(planner.bind_input_type(in_type, model.inputs()[0]));
+ EXPECT_EQ(planner.make_output_type(model.outputs()[0]).to_spec(), "tensor<int8>(d0[7])");
+
+ auto wire_info = planner.get_wire_info(model);
+ Onnx::EvalContext ctx(model, wire_info);
+
+ const Value &out = ctx.get_result(0);
+ EXPECT_EQ(out.type().to_spec(), "tensor<int8>(d0[7])");
+ //-------------------------------------------------------------------------
+ ctx.bind_param(0, in);
+ ctx.eval();
+ auto cells = out.cells();
+ ASSERT_EQ(cells.type, CellType::INT8);
+ ASSERT_EQ(cells.size, 7);
+ auto out_values = cells.typify<Int8Float>();
+ for (size_t i = 0; i < 7; ++i) {
+ fprintf(stderr, "convert(float->int8): '%g' -> '%d'\n",
+ in_values[i], out_values[i].get_bits());
+ }
+ //-------------------------------------------------------------------------
+}
+
GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/eval/src/tests/tensor/onnx_wrapper/simple.py b/eval/src/tests/tensor/onnx_wrapper/simple.py
index a3cd2425d58..c8db58b5ebb 100755
--- a/eval/src/tests/tensor/onnx_wrapper/simple.py
+++ b/eval/src/tests/tensor/onnx_wrapper/simple.py
@@ -29,5 +29,6 @@ graph_def = helper.make_graph(
],
[OUTPUT],
)
-model_def = helper.make_model(graph_def, producer_name='simple.py')
+
+model_def = helper.make_model(graph_def, producer_name='simple.py', opset_imports=[onnx.OperatorSetIdProto(version=12)])
onnx.save(model_def, 'simple.onnx')
diff --git a/eval/src/tests/tensor/onnx_wrapper/unstable_types.onnx b/eval/src/tests/tensor/onnx_wrapper/unstable_types.onnx
new file mode 100644
index 00000000000..b833086ddd0
--- /dev/null
+++ b/eval/src/tests/tensor/onnx_wrapper/unstable_types.onnx
@@ -0,0 +1,23 @@
+unstable_types.py:ž
+
+in8out16"Cast*
+to 
+
+in16out8"Cast*
+to unstable_typesZ
+in8
+
+
+Z
+in16
+
+
+b
+out8
+
+
+b
+out16
+
+
+B \ No newline at end of file
diff --git a/eval/src/tests/tensor/onnx_wrapper/unstable_types.py b/eval/src/tests/tensor/onnx_wrapper/unstable_types.py
new file mode 100755
index 00000000000..94a1975a560
--- /dev/null
+++ b/eval/src/tests/tensor/onnx_wrapper/unstable_types.py
@@ -0,0 +1,31 @@
+# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+import onnx
+from onnx import helper, TensorProto
+
+IN8 = helper.make_tensor_value_info('in8', TensorProto.INT8, [3])
+IN16 = helper.make_tensor_value_info('in16', TensorProto.BFLOAT16, [3])
+OUT8 = helper.make_tensor_value_info('out8', TensorProto.INT8, [3])
+OUT16 = helper.make_tensor_value_info('out16', TensorProto.BFLOAT16, [3])
+
+nodes = [
+ helper.make_node(
+ 'Cast',
+ ['in8'],
+ ['out16'],
+ to=getattr(TensorProto, 'BFLOAT16')
+ ),
+ helper.make_node(
+ 'Cast',
+ ['in16'],
+ ['out8'],
+ to=getattr(TensorProto, 'INT8')
+ ),
+]
+graph_def = helper.make_graph(
+ nodes,
+ 'unstable_types',
+ [IN8, IN16],
+ [OUT8, OUT16],
+)
+model_def = helper.make_model(graph_def, producer_name='unstable_types.py', opset_imports=[onnx.OperatorSetIdProto(version=13)])
+onnx.save(model_def, 'unstable_types.onnx')
diff --git a/eval/src/tests/tensor/tensor_conformance/tensor_conformance_test.cpp b/eval/src/tests/tensor/tensor_conformance/tensor_conformance_test.cpp
index 3d21f9b4113..e33cc116fba 100644
--- a/eval/src/tests/tensor/tensor_conformance/tensor_conformance_test.cpp
+++ b/eval/src/tests/tensor/tensor_conformance/tensor_conformance_test.cpp
@@ -1,41 +1,21 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
#include <vespa/vespalib/testkit/test_kit.h>
-#include <vespa/eval/eval/test/tensor_conformance.h>
-#include <vespa/eval/eval/simple_value.h>
-#include <vespa/eval/streamed/streamed_value_builder_factory.h>
-#include <vespa/eval/eval/fast_value.h>
#include <vespa/vespalib/util/stringfmt.h>
#include <vespa/vespalib/io/mapped_file_input.h>
#include <vespa/vespalib/data/slime/slime.h>
-using vespalib::eval::SimpleValueBuilderFactory;
-using vespalib::eval::StreamedValueBuilderFactory;
-using vespalib::eval::FastValueBuilderFactory;
-using vespalib::eval::test::TensorConformance;
using vespalib::make_string_short::fmt;
using vespalib::Slime;
using vespalib::slime::JsonFormat;
using vespalib::MappedFileInput;
-vespalib::string module_src_path(TEST_PATH("../../../../"));
vespalib::string module_build_path("../../../../");
-TEST("require that SimpleValue implementation passes all conformance tests") {
- TEST_DO(TensorConformance::run_tests(module_src_path, SimpleValueBuilderFactory::get()));
-}
-
-TEST("require that StreamedValue implementation passes all conformance tests") {
- TEST_DO(TensorConformance::run_tests(module_src_path, StreamedValueBuilderFactory::get()));
-}
-
-TEST("require that FastValue implementation passes all conformance tests") {
- TEST_DO(TensorConformance::run_tests(module_src_path, FastValueBuilderFactory::get()));
-}
-
-TEST("require that cross-language tensor conformance tests pass with C++ expression evaluation") {
+TEST("require that (some) cross-language tensor conformance tests pass with C++ expression evaluation") {
vespalib::string result_file = "conformance_result.json";
vespalib::string binary = module_build_path + "src/apps/tensor_conformance/vespa-tensor-conformance";
- EXPECT_EQUAL(system(fmt("%s generate | %s evaluate | %s verify > %s", binary.c_str(), binary.c_str(), binary.c_str(), result_file.c_str()).c_str()), 0);
+ EXPECT_EQUAL(system(fmt("%s generate-some | %s evaluate | %s verify > %s", binary.c_str(), binary.c_str(), binary.c_str(), result_file.c_str()).c_str()), 0);
Slime result;
MappedFileInput input(result_file);
JsonFormat::decode(input, result);
diff --git a/eval/src/vespa/eval/eval/tensor_spec.cpp b/eval/src/vespa/eval/eval/tensor_spec.cpp
index beafba97ca1..684242fc485 100644
--- a/eval/src/vespa/eval/eval/tensor_spec.cpp
+++ b/eval/src/vespa/eval/eval/tensor_spec.cpp
@@ -7,6 +7,8 @@
#include "value.h"
#include "value_codec.h"
#include "value_type.h"
+#include <vespa/vespalib/util/overload.h>
+#include <vespa/vespalib/util/visit_ranges.h>
#include <vespa/vespalib/util/stringfmt.h>
#include <vespa/eval/eval/test/reference_evaluation.h>
#include <vespa/vespalib/data/slime/slime.h>
@@ -33,6 +35,123 @@ TensorSpec::Address extract_address(const slime::Inspector &address) {
return extractor.address;
}
+vespalib::string addr_to_compact_string(const TensorSpec::Address &addr) {
+ size_t n = 0;
+ vespalib::string str("[");
+ for (const auto &[dim, label]: addr) {
+ if (n++) {
+ str.append(",");
+ }
+ if (label.is_mapped()) {
+ str.append(label.name);
+ } else {
+ str.append(make_string("%zu", label.index));
+ }
+ }
+ str.append("]");
+ return str;
+}
+
+vespalib::string value_to_verbose_string(const TensorSpec::Value &value) {
+ return make_string("%g (%a)", value.value, value.value);
+}
+
+struct DiffTable {
+ struct Entry {
+ vespalib::string tag;
+ vespalib::string lhs;
+ vespalib::string rhs;
+ bool is_separator() const {
+ return (tag.empty() && lhs.empty() && rhs.empty());
+ }
+ static Entry separator() { return {"","",""}; }
+ static Entry header(const vespalib::string &lhs_desc,
+ const vespalib::string &rhs_desc)
+ {
+ return {"", lhs_desc, rhs_desc};
+ }
+ static Entry only_lhs(const TensorSpec::Address &addr,
+ const TensorSpec::Value &lhs)
+ {
+ return {addr_to_compact_string(addr),
+ value_to_verbose_string(lhs),
+ "<missing>"};
+ }
+ static Entry only_rhs(const TensorSpec::Address &addr,
+ const TensorSpec::Value &rhs)
+ {
+ return {addr_to_compact_string(addr),
+ "<missing>",
+ value_to_verbose_string(rhs)};
+ }
+ static Entry value_mismatch(const TensorSpec::Address &addr,
+ const TensorSpec::Value &lhs,
+ const TensorSpec::Value &rhs)
+ {
+ return {addr_to_compact_string(addr),
+ value_to_verbose_string(lhs),
+ value_to_verbose_string(rhs)};
+ }
+ ~Entry();
+ };
+ struct Result {
+ size_t tag_len;
+ size_t lhs_len;
+ size_t rhs_len;
+ vespalib::string str;
+ Result(size_t tag_len_in, size_t lhs_len_in, size_t rhs_len_in)
+ : tag_len(tag_len_in + 1), lhs_len(lhs_len_in + 1), rhs_len(rhs_len_in + 1),
+ str() {}
+ void add(const vespalib::string &stuff) {
+ str.append(stuff);
+ }
+ void add(const vespalib::string &stuff, size_t width, char pad = ' ') {
+ int n = (width - stuff.size());
+ for (int i = 0; i < n; ++i) {
+ str.push_back(pad);
+ }
+ str.append(stuff);
+ }
+ void add(const Entry &entry) {
+ if (entry.is_separator()) {
+ add("+");
+ add("", tag_len, '-');
+ add("-+");
+ add("", lhs_len, '-');
+ add("-+");
+ add("", rhs_len, '-');
+ add("-+\n");
+ } else {
+ add("|");
+ add(entry.tag, tag_len);
+ add(" |");
+ add(entry.lhs, lhs_len);
+ add(" |");
+ add(entry.rhs, rhs_len);
+ add(" |\n");
+ }
+ }
+ };
+ size_t tag_len = 0;
+ size_t lhs_len = 0;
+ size_t rhs_len = 0;
+ std::vector<Entry> entries;
+ void add(const Entry &entry) {
+ tag_len = std::max(tag_len, entry.tag.size());
+ lhs_len = std::max(lhs_len, entry.lhs.size());
+ rhs_len = std::max(rhs_len, entry.rhs.size());
+ entries.push_back(entry);
+ }
+ vespalib::string to_string() {
+ Result res(tag_len, lhs_len, rhs_len);
+ for (const auto &entry: entries) {
+ res.add(entry);
+ }
+ return std::move(res.str);
+ }
+};
+DiffTable::Entry::~Entry() = default;
+
struct NormalizeTensorSpec {
/*
* This is basically value_from_spec() + spec_from_value()
@@ -140,19 +259,7 @@ TensorSpec::to_string() const
{
vespalib::string out = make_string("spec(%s) {\n", _type.c_str());
for (const auto &cell: _cells) {
- size_t n = 0;
- out.append(" [");
- for (const auto &label: cell.first) {
- if (n++) {
- out.append(",");
- }
- if (label.second.is_mapped()) {
- out.append(label.second.name);
- } else {
- out.append(make_string("%zu", label.second.index));
- }
- }
- out.append(make_string("]: %g\n", cell.second.value));
+ out.append(make_string(" %s: %g\n", addr_to_compact_string(cell.first).c_str(), cell.second.value));
}
out.append("}");
return out;
@@ -230,6 +337,30 @@ TensorSpec::normalize() const
return typify_invoke<1,TypifyCellType,NormalizeTensorSpec>(my_type.cell_type(), my_type, *this);
}
+vespalib::string
+TensorSpec::diff(const TensorSpec &lhs, const vespalib::string &lhs_desc,
+ const TensorSpec &rhs, const vespalib::string &rhs_desc)
+{
+ using Entry = DiffTable::Entry;
+ DiffTable table;
+ table.add(Entry::separator());
+ table.add(Entry::header(lhs_desc, rhs_desc));
+ table.add(Entry::header(lhs.type(), rhs.type()));
+ auto visitor = overload {
+ [&](visit_ranges_first, const auto &a) { table.add(Entry::only_lhs(a.first, a.second)); },
+ [&](visit_ranges_second, const auto &b) { table.add(Entry::only_rhs(b.first, b.second)); },
+ [&](visit_ranges_both, const auto &a, const auto &b) {
+ if (!(a.second == b.second)) {
+ table.add(Entry::value_mismatch(a.first, a.second, b.second));
+ }
+ }
+ };
+ table.add(Entry::separator());
+ visit_ranges(visitor, lhs._cells.begin(), lhs._cells.end(), rhs._cells.begin(), rhs._cells.end(),
+ [](const auto &a, const auto &b){ return (a.first < b.first); });
+ table.add(Entry::separator());
+ return table.to_string();
+}
} // namespace vespalib::eval
} // namespace vespalib
diff --git a/eval/src/vespa/eval/eval/tensor_spec.h b/eval/src/vespa/eval/eval/tensor_spec.h
index f8a06adf331..eab4c4ff49d 100644
--- a/eval/src/vespa/eval/eval/tensor_spec.h
+++ b/eval/src/vespa/eval/eval/tensor_spec.h
@@ -78,6 +78,8 @@ public:
static TensorSpec from_slime(const slime::Inspector &tensor);
static TensorSpec from_value(const eval::Value &value);
static TensorSpec from_expr(const vespalib::string &expr);
+ static vespalib::string diff(const TensorSpec &lhs, const vespalib::string &lhs_desc,
+ const TensorSpec &rhs, const vespalib::string &rhs_desc);
};
bool operator==(const TensorSpec &lhs, const TensorSpec &rhs);
diff --git a/eval/src/vespa/eval/eval/test/CMakeLists.txt b/eval/src/vespa/eval/eval/test/CMakeLists.txt
index dc0576f07a2..aeccc00a01c 100644
--- a/eval/src/vespa/eval/eval/test/CMakeLists.txt
+++ b/eval/src/vespa/eval/eval/test/CMakeLists.txt
@@ -7,8 +7,6 @@ vespa_add_library(eval_eval_test OBJECT
gen_spec.cpp
reference_evaluation.cpp
reference_operations.cpp
- tensor_conformance.cpp
- tensor_model.cpp
test_io.cpp
value_compare.cpp
)
diff --git a/eval/src/vespa/eval/eval/test/gen_spec.cpp b/eval/src/vespa/eval/eval/test/gen_spec.cpp
index fd2c1f39382..8921cf28bab 100644
--- a/eval/src/vespa/eval/eval/test/gen_spec.cpp
+++ b/eval/src/vespa/eval/eval/test/gen_spec.cpp
@@ -24,6 +24,10 @@ Sequence Div16(const Sequence &seq) {
return [seq](size_t i) { return (seq(i) / 16.0); };
}
+Sequence Div17(const Sequence &seq) {
+ return [seq](size_t i) { return (seq(i) / 17.0); };
+}
+
Sequence Sub2(const Sequence &seq) {
return [seq](size_t i) { return (seq(i) - 2.0); };
}
diff --git a/eval/src/vespa/eval/eval/test/gen_spec.h b/eval/src/vespa/eval/eval/test/gen_spec.h
index 3f7550ba644..0fa07218ee9 100644
--- a/eval/src/vespa/eval/eval/test/gen_spec.h
+++ b/eval/src/vespa/eval/eval/test/gen_spec.h
@@ -25,6 +25,9 @@ Sequence AX_B(double a, double b);
// Sequence of another sequence divided by 16
Sequence Div16(const Sequence &seq);
+// Sequence of another sequence divided by 17
+Sequence Div17(const Sequence &seq);
+
// Sequence of another sequence minus 2
Sequence Sub2(const Sequence &seq);
@@ -116,7 +119,7 @@ public:
GenSpec &operator=(GenSpec &&other);
GenSpec &operator=(const GenSpec &other);
~GenSpec();
- std::vector<DimSpec> dims() const { return _dims; }
+ const std::vector<DimSpec> &dims() const { return _dims; }
CellType cells() const { return _cells; }
const seq_t &seq() const { return _seq; }
GenSpec cpy() const { return *this; }
diff --git a/eval/src/vespa/eval/eval/test/tensor_conformance.cpp b/eval/src/vespa/eval/eval/test/tensor_conformance.cpp
deleted file mode 100644
index c58f8312cbf..00000000000
--- a/eval/src/vespa/eval/eval/test/tensor_conformance.cpp
+++ /dev/null
@@ -1,681 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include "tensor_conformance.h"
-#include <vespa/eval/eval/tensor_spec.h>
-#include <vespa/eval/eval/function.h>
-#include <vespa/eval/eval/interpreted_function.h>
-#include <vespa/eval/eval/aggr.h>
-#include <vespa/eval/eval/value_codec.h>
-#include <vespa/eval/eval/simple_value.h>
-#include <vespa/eval/eval/value_type_spec.h>
-#include <vespa/vespalib/testkit/test_kit.h>
-#include <vespa/vespalib/util/require.h>
-#include <vespa/vespalib/util/stringfmt.h>
-#include <vespa/vespalib/objects/nbostream.h>
-#include <vespa/vespalib/data/slime/slime.h>
-#include <vespa/vespalib/io/mapped_file_input.h>
-#include "tensor_model.h"
-#include "reference_evaluation.h"
-
-using vespalib::make_string_short::fmt;
-
-namespace vespalib::eval::test {
-
-namespace {
-
-using slime::Cursor;
-using slime::Inspector;
-using slime::JsonFormat;
-
-//-----------------------------------------------------------------------------
-
-TensorSpec ref_eval(const vespalib::string &expr, const std::vector<TensorSpec> &params) {
- TensorSpec result = ReferenceEvaluation::eval(*Function::parse(expr), params);
- EXPECT_FALSE(ValueType::from_spec(result.type()).is_error());
- return result;
-}
-
-TensorSpec eval(const ValueBuilderFactory &factory, const vespalib::string &expr, const std::vector<TensorSpec> &params) {
- auto fun = Function::parse(expr);
- std::vector<ValueType> param_types;
- std::vector<Value::UP> param_values;
- std::vector<Value::CREF> param_refs;
- for (const auto &param: params) {
- param_types.push_back(ValueType::from_spec(param.type()));
- param_values.push_back(value_from_spec(param, factory));
- param_refs.emplace_back(*param_values.back());
- }
- NodeTypes types(*fun, param_types);
- const auto &expect_type = types.get_type(fun->root());
- REQUIRE(!expect_type.is_error());
- InterpretedFunction ifun(factory, *fun, types);
- InterpretedFunction::Context ctx(ifun);
- const Value &result = ifun.eval(ctx, SimpleObjectParams{param_refs});
- EXPECT_EQUAL(result.type(), expect_type);
- return spec_from_value(result);
-}
-
-void verify_result(const ValueBuilderFactory &factory, const vespalib::string &expr, const std::vector<TensorSpec> &params, const TensorSpec &expect) {
- auto actual = eval(factory, expr, params);
- EXPECT_EQUAL(actual, expect);
-}
-
-void verify_result(const ValueBuilderFactory &factory, const vespalib::string &expr, const std::vector<TensorSpec> &params) {
- TEST_DO(verify_result(factory, expr, params, ref_eval(expr, params)));
-}
-
-//-----------------------------------------------------------------------------
-
-// NaN value
-const double my_nan = std::numeric_limits<double>::quiet_NaN();
-
-// Test wrapper to avoid passing global test parameters around
-struct TestContext {
-
- vespalib::string module_path;
- const ValueBuilderFactory &factory;
-
- TestContext(const vespalib::string &module_path_in, const ValueBuilderFactory &factory_in)
- : module_path(module_path_in), factory(factory_in) {}
-
- //-------------------------------------------------------------------------
-
- void verify_create_type(const vespalib::string &type_spec) {
- Value::UP value = value_from_spec(TensorSpec(type_spec), factory);
- EXPECT_EQUAL(type_spec, value->type().to_spec());
- }
-
- void test_tensor_create_type() {
- TEST_DO(verify_create_type("double"));
- TEST_DO(verify_create_type("tensor(x{})"));
- TEST_DO(verify_create_type("tensor(x{},y{})"));
- TEST_DO(verify_create_type("tensor<float>(x{},y{})"));
- TEST_DO(verify_create_type("tensor(x[5])"));
- TEST_DO(verify_create_type("tensor(x[5],y[10])"));
- TEST_DO(verify_create_type("tensor<float>(x[5],y[10])"));
- TEST_DO(verify_create_type("tensor(x{},y[10])"));
- TEST_DO(verify_create_type("tensor(x[5],y{})"));
- TEST_DO(verify_create_type("tensor<float>(x[5],y{})"));
- }
-
- //-------------------------------------------------------------------------
-
- void test_reduce_op(Aggr aggr, const Sequence &seq) {
- std::vector<Layout> layouts = {
- {x(3)},
- {x(3),y(5)},
- {x(3),y(5),z(7)},
- float_cells({x(3),y(5),z(7)}),
- {x({"a","b","c"})},
- {x({"a","b","c"}),y({"foo","bar"})},
- {x({"a","b","c"}),y({"foo","bar"}),z({"i","j","k","l"})},
- float_cells({x({"a","b","c"}),y({"foo","bar"}),z({"i","j","k","l"})}),
- {x(3),y({"foo", "bar"}),z(7)},
- {x({"a","b","c"}),y(5),z({"i","j","k","l"})},
- float_cells({x({"a","b","c"}),y(5),z({"i","j","k","l"})})
- };
- for (const Layout &layout: layouts) {
- TensorSpec input = spec(layout, seq);
- for (const Domain &domain: layout) {
- TEST_STATE(fmt("shape: %s, reduce dimension: %s",
- infer_type(layout).c_str(), domain.name().c_str()).c_str());
- vespalib::string expr = fmt("reduce(a,%s,%s)",
- AggrNames::name_of(aggr)->c_str(), domain.name().c_str());
- TEST_DO(verify_result(factory, expr, {input}));
- }
- {
- TEST_STATE(fmt("shape: %s, reduce all dimensions",
- infer_type(layout).c_str()).c_str());
- vespalib::string expr = fmt("reduce(a,%s)", AggrNames::name_of(aggr)->c_str());
- TEST_DO(verify_result(factory, expr, {input}));
- }
- }
- }
-
- void test_tensor_reduce() {
- TEST_DO(test_reduce_op(Aggr::AVG, N()));
- TEST_DO(test_reduce_op(Aggr::COUNT, N()));
- TEST_DO(test_reduce_op(Aggr::PROD, SigmoidF(N())));
- TEST_DO(test_reduce_op(Aggr::SUM, N()));
- TEST_DO(test_reduce_op(Aggr::MAX, N()));
- TEST_DO(test_reduce_op(Aggr::MEDIAN, N()));
- TEST_DO(test_reduce_op(Aggr::MIN, N()));
- }
-
- //-------------------------------------------------------------------------
-
- void test_map_op_inner(const vespalib::string &expr, map_fun_t ref_op, const Sequence &seq) {
- std::vector<Layout> layouts = {
- {},
- {x(3)},
- {x(3),y(5)},
- {x(3),y(5),z(7)},
- float_cells({x(3),y(5),z(7)}),
- {x({"a","b","c"})},
- {x({"a","b","c"}),y({"foo","bar"})},
- {x({"a","b","c"}),y({"foo","bar"}),z({"i","j","k","l"})},
- float_cells({x({"a","b","c"}),y({"foo","bar"}),z({"i","j","k","l"})}),
- {x(3),y({"foo", "bar"}),z(7)},
- {x({"a","b","c"}),y(5),z({"i","j","k","l"})},
- float_cells({x({"a","b","c"}),y(5),z({"i","j","k","l"})})
- };
- for (const Layout &layout: layouts) {
- TEST_DO(verify_result(factory, expr, {spec(layout, seq)}, spec(layout, OpSeq(seq, ref_op))));
- }
- }
-
- void test_map_op(const vespalib::string &expr, map_fun_t op, const Sequence &seq) {
- TEST_DO(test_map_op_inner(expr, op, seq));
- TEST_DO(test_map_op_inner(fmt("map(x,f(a)(%s))", expr.c_str()), op, seq));
- }
-
- void test_tensor_map() {
- TEST_DO(test_map_op("-a", operation::Neg::f, Sub2(Div16(N()))));
- TEST_DO(test_map_op("!a", operation::Not::f, Seq({0.0, 1.0, 1.0})));
- TEST_DO(test_map_op("cos(a)", operation::Cos::f, Div16(N())));
- TEST_DO(test_map_op("sin(a)", operation::Sin::f, Div16(N())));
- TEST_DO(test_map_op("tan(a)", operation::Tan::f, Div16(N())));
- TEST_DO(test_map_op("cosh(a)", operation::Cosh::f, Div16(N())));
- TEST_DO(test_map_op("sinh(a)", operation::Sinh::f, Div16(N())));
- TEST_DO(test_map_op("tanh(a)", operation::Tanh::f, Div16(N())));
- TEST_DO(test_map_op("acos(a)", operation::Acos::f, SigmoidF(Div16(N()))));
- TEST_DO(test_map_op("asin(a)", operation::Asin::f, SigmoidF(Div16(N()))));
- TEST_DO(test_map_op("atan(a)", operation::Atan::f, Div16(N())));
- TEST_DO(test_map_op("exp(a)", operation::Exp::f, Div16(N())));
- TEST_DO(test_map_op("log10(a)", operation::Log10::f, Div16(N())));
- TEST_DO(test_map_op("log(a)", operation::Log::f, Div16(N())));
- TEST_DO(test_map_op("sqrt(a)", operation::Sqrt::f, Div16(N())));
- TEST_DO(test_map_op("ceil(a)", operation::Ceil::f, Div16(N())));
- TEST_DO(test_map_op("fabs(a)", operation::Fabs::f, Div16(N())));
- TEST_DO(test_map_op("floor(a)", operation::Floor::f, Div16(N())));
- TEST_DO(test_map_op("isNan(a)", operation::IsNan::f, Seq({my_nan, 1.0, 1.0})));
- TEST_DO(test_map_op("relu(a)", operation::Relu::f, Sub2(Div16(N()))));
- TEST_DO(test_map_op("sigmoid(a)", operation::Sigmoid::f, Sub2(Div16(N()))));
- TEST_DO(test_map_op("elu(a)", operation::Elu::f, Sub2(Div16(N()))));
- TEST_DO(test_map_op("erf(a)", operation::Erf::f, Sub2(Div16(N()))));
- TEST_DO(test_map_op("a in [1,5,7,13,42]", MyIn::f, N()));
- TEST_DO(test_map_op("(a+1)*2", MyOp::f, Div16(N())));
- }
-
- //-------------------------------------------------------------------------
-
- void test_apply_op(const vespalib::string &expr,
- const TensorSpec &expect,
- const TensorSpec &lhs,
- const TensorSpec &rhs) {
- TEST_DO(verify_result(factory, expr, {lhs, rhs}, expect));
- }
-
- void test_fixed_sparse_cases_apply_op(const vespalib::string &expr,
- join_fun_t op)
- {
- TEST_DO(test_apply_op(expr,
- spec("x{}", {}),
- spec("x{}", { { {{"x","1"}}, 3 } }),
- spec("x{}", { { {{"x","2"}}, 5 } })));
- TEST_DO(test_apply_op(expr,
- spec("x{}", { { {{"x","1"}}, op(3,5) } }),
- spec("x{}", { { {{"x","1"}}, 3 } }),
- spec("x{}", { { {{"x","1"}}, 5 } })));
- TEST_DO(test_apply_op(expr,
- spec("x{}", { { {{"x","1"}}, op(3,-5) } }),
- spec("x{}", { { {{"x","1"}}, 3 } }),
- spec("x{}", { { {{"x","1"}}, -5 } })));
- TEST_DO(test_apply_op(expr,
- spec("x{},y{},z{}",
- { { {{"x","-"},{"y","2"},{"z","-"}},
- op(5,7) },
- { {{"x","1"},{"y","-"},{"z","3"}},
- op(3,11) } }),
- spec("x{},y{}",
- { { {{"x","-"},{"y","2"}}, 5 },
- { {{"x","1"},{"y","-"}}, 3 } }),
- spec("y{},z{}",
- { { {{"y","-"},{"z","3"}}, 11 },
- { {{"y","2"},{"z","-"}}, 7 } })));
- TEST_DO(test_apply_op(expr,
- spec("x{},y{},z{}",
- { { {{"x","-"},{"y","2"},{"z","-"}},
- op(7,5) },
- { {{"x","1"},{"y","-"},{"z","3"}},
- op(11,3) } }),
- spec("y{},z{}",
- { { {{"y","-"},{"z","3"}}, 11 },
- { {{"y","2"},{"z","-"}}, 7 } }),
- spec("x{},y{}",
- { { {{"x","-"},{"y","2"}}, 5 },
- { {{"x","1"},{"y","-"}}, 3 } })));
- TEST_DO(test_apply_op(expr,
- spec("y{},z{}",
- { { {{"y","2"},{"z","-"}},
- op(5,7) } }),
- spec("y{}", { { {{"y","2"}}, 5 } }),
- spec("y{},z{}",
- { { {{"y","-"},{"z","3"}}, 11 },
- { {{"y","2"},{"z","-"}}, 7 } })));
- TEST_DO(test_apply_op(expr,
- spec("y{},z{}",
- { { {{"y","2"},{"z","-"}},
- op(7,5) } }),
- spec("y{},z{}",
- { { {{"y","-"},{"z","3"}}, 11 },
- { {{"y","2"},{"z","-"}}, 7 } }),
- spec("y{}", { { {{"y","2"}}, 5 } })));
- TEST_DO(test_apply_op(expr,
- spec("x{},y{}",
- { { {{"x","-"},{"y","2"}},
- op(5,7) } }),
- spec("x{},y{}",
- { { {{"x","-"},{"y","2"}}, 5 },
- { {{"x","1"},{"y","-"}}, 3 } }),
- spec("y{}", { { {{"y","2"}}, 7 } })));
- TEST_DO(test_apply_op(expr,
- spec("x{},y{}",
- { { {{"x","-"},{"y","2"}},
- op(7,5) } }),
- spec("y{}", { { {{"y","2"}}, 7 } }),
- spec("x{},y{}",
- { { {{"x","-"},{"y","2"}}, 5 },
- { {{"x","1"},{"y","-"}}, 3 } })));
- TEST_DO(test_apply_op(expr,
- spec("x{},z{}",
- { { {{"x","1"},{"z","3"}},
- op(3,11) } }),
- spec("x{}", { { {{"x","1"}}, 3 } }),
- spec("z{}", { { {{"z","3"}}, 11 } })));
- TEST_DO(test_apply_op(expr,
- spec("x{},z{}",
- { { {{"x","1"},{"z","3"}},
- op(11,3) } }),
- spec("z{}",{ { {{"z","3"}}, 11 } }),
- spec("x{}",{ { {{"x","1"}}, 3 } })));
- TEST_DO(test_apply_op(expr,
- spec("x{},y{}",
- { { {{"x","1"},{"y","1"}},
- op(3,5) },
- { {{"x","2"},{"y","1"}},
- op(7,5) } }),
- spec("x{}",
- { { {{"x","1"}}, 3 },
- { {{"x","2"}}, 7 } }),
- spec("y{}",
- { { {{"y","1"}}, 5 } })));
- TEST_DO(test_apply_op(expr,
- spec("x{},y{},z{}",
- { { {{"x","1"},{"y","1"},{"z","1"}},
- op(1,7) },
- { {{"x","1"},{"y","1"},{"z","2"}},
- op(1,13) },
- { {{"x","1"},{"y","2"},{"z","1"}},
- op(5,11) },
- { {{"x","2"},{"y","1"},{"z","1"}},
- op(3,7) },
- { {{"x","2"},{"y","1"},{"z","2"}},
- op(3,13) } }),
- spec("x{},y{}",
- { { {{"x","1"},{"y","1"}}, 1 },
- { {{"x","1"},{"y","2"}}, 5 },
- { {{"x","2"},{"y","1"}}, 3 } }),
- spec("y{},z{}",
- { { {{"y","1"},{"z","1"}}, 7 },
- { {{"y","1"},{"z","2"}}, 13 },
- { {{"y","2"},{"z","1"}}, 11 } })));
- TEST_DO(test_apply_op(expr,
- spec("x{},y{},z{}",
- { { {{"x","1"},{"y","1"},{"z","1"}},
- op(1,7) } }),
- spec("x{},y{}",
- { { {{"x","1"},{"y","-"}}, 5 },
- { {{"x","1"},{"y","1"}}, 1 } }),
- spec("y{},z{}",
- { { {{"y","1"},{"z","1"}}, 7 } })));
- TEST_DO(test_apply_op(expr,
- spec("x{},y{},z{}",
- { { {{"x","1"},{"y","-"},{"z","1"}},
- op(5,11) },
- { {{"x","1"},{"y","1"},{"z","1"}},
- op(1,7) } }),
- spec("x{},y{}",
- { { {{"x","1"},{"y","-"}}, 5 },
- { {{"x","1"},{"y","1"}}, 1 } }),
- spec("y{},z{}",
- { { {{"y","-"},{"z","1"}}, 11 },
- { {{"y","1"},{"z","1"}}, 7 } })));
- TEST_DO(test_apply_op(expr,
- spec("x{},y{},z{}",
- { { {{"x","1"},{"y","1"},{"z","1"}},
- op(1,7) } }),
- spec("x{},y{}",
- { { {{"x","-"},{"y","-"}}, 5 },
- { {{"x","1"},{"y","1"}}, 1 } }),
- spec("y{},z{}",
- { { {{"y","1"},{"z","1"}}, 7 } })));
- TEST_DO(test_apply_op(expr,
- spec("x{},y{},z{}",
- { { {{"x","-"},{"y","-"},{"z", "-"}},
- op(5,11) },
- { {{"x","1"},{"y","1"},{"z","1"}},
- op(1,7) } }),
- spec("x{},y{}",
- { { {{"x","-"},{"y","-"}}, 5 },
- { {{"x","1"},{"y","1"}}, 1 } }),
- spec("y{},z{}",
- { { {{"y","-"},{"z","-"}}, 11 },
- { {{"y","1"},{"z","1"}}, 7 } })));
- }
-
- void test_fixed_dense_cases_apply_op(const vespalib::string &expr,
- join_fun_t op)
- {
- TEST_DO(test_apply_op(expr,
- spec(op(0.1,0.2)), spec(0.1), spec(0.2)));
- TEST_DO(test_apply_op(expr,
- spec(x(1), Seq({ op(3,5) })),
- spec(x(1), Seq({ 3 })),
- spec(x(1), Seq({ 5 }))));
- TEST_DO(test_apply_op(expr,
- spec(x(1), Seq({ op(3,-5) })),
- spec(x(1), Seq({ 3 })),
- spec(x(1), Seq({ -5 }))));
- TEST_DO(test_apply_op(expr,
- spec(x(2), Seq({ op(3,7), op(5,11) })),
- spec(x(2), Seq({ 3, 5 })),
- spec(x(2), Seq({ 7, 11 }))));
- TEST_DO(test_apply_op(expr,
- spec({x(1),y(1)}, Seq({ op(3,5) })),
- spec({x(1),y(1)}, Seq({ 3 })),
- spec({x(1),y(1)}, Seq({ 5 }))));
- TEST_DO(test_apply_op(expr,
- spec({x(2),y(2),z(2)},
- Seq({ op(1, 7), op(1, 11),
- op(2, 13), op(2, 17),
- op(3, 7), op(3, 11),
- op(5, 13), op(5, 17)
- })),
- spec({x(2),y(2)},
- Seq({ 1, 2,
- 3, 5 })),
- spec({y(2),z(2)},
- Seq({ 7, 11,
- 13, 17 }))));
- }
-
- void test_apply_op_inner(const vespalib::string &expr, join_fun_t op, const Sequence &seq) {
- std::vector<Layout> layouts = {
- {}, {},
- {x(5)}, {x(5)},
- {x(5)}, {y(5)},
- {x(5)}, {x(5),y(5)},
- {y(3)}, {x(2),z(3)},
- {x(3),y(5)}, {y(5),z(7)},
- float_cells({x(3),y(5)}), {y(5),z(7)},
- {x(3),y(5)}, float_cells({y(5),z(7)}),
- float_cells({x(3),y(5)}), float_cells({y(5),z(7)}),
- {x({"a","b","c"})}, {x({"a","b","c"})},
- {x({"a","b","c"})}, {x({"a","b"})},
- {x({"a","b","c"})}, {y({"foo","bar","baz"})},
- {x({"a","b","c"})}, {x({"a","b","c"}),y({"foo","bar","baz"})},
- {x({"a","b"}),y({"foo","bar","baz"})}, {x({"a","b","c"}),y({"foo","bar"})},
- {x({"a","b"}),y({"foo","bar","baz"})}, {y({"foo","bar"}),z({"i","j","k","l"})},
- float_cells({x({"a","b"}),y({"foo","bar","baz"})}), {y({"foo","bar"}),z({"i","j","k","l"})},
- {x({"a","b"}),y({"foo","bar","baz"})}, float_cells({y({"foo","bar"}),z({"i","j","k","l"})}),
- float_cells({x({"a","b"}),y({"foo","bar","baz"})}), float_cells({y({"foo","bar"}),z({"i","j","k","l"})}),
- {x(3),y({"foo", "bar"})}, {y({"foo", "bar"}),z(7)},
- {x({"a","b","c"}),y(5)}, {y(5),z({"i","j","k","l"})},
- float_cells({x({"a","b","c"}),y(5)}), {y(5),z({"i","j","k","l"})},
- {x({"a","b","c"}),y(5)}, float_cells({y(5),z({"i","j","k","l"})}),
- float_cells({x({"a","b","c"}),y(5)}), float_cells({y(5),z({"i","j","k","l"})})
- };
- REQUIRE((layouts.size() % 2) == 0);
- for (size_t i = 0; i < layouts.size(); i += 2) {
- TensorSpec lhs_input = spec(layouts[i], seq);
- TensorSpec rhs_input = spec(layouts[i + 1], seq);
- TEST_STATE(fmt("lhs shape: %s, rhs shape: %s",
- lhs_input.type().c_str(),
- rhs_input.type().c_str()).c_str());
- TEST_DO(verify_result(factory, expr, {lhs_input, rhs_input}));
- }
- TEST_DO(test_fixed_sparse_cases_apply_op(expr, op));
- TEST_DO(test_fixed_dense_cases_apply_op(expr, op));
- }
-
- void test_apply_op(const vespalib::string &expr, join_fun_t op, const Sequence &seq) {
- TEST_DO(test_apply_op_inner(expr, op, seq));
- TEST_DO(test_apply_op_inner(fmt("join(x,y,f(a,b)(%s))", expr.c_str()), op, seq));
- }
-
- void test_tensor_apply() {
- TEST_DO(test_apply_op("a+b", operation::Add::f, Div16(N())));
- TEST_DO(test_apply_op("a-b", operation::Sub::f, Div16(N())));
- TEST_DO(test_apply_op("a*b", operation::Mul::f, Div16(N())));
- TEST_DO(test_apply_op("a/b", operation::Div::f, Div16(N())));
- TEST_DO(test_apply_op("a%b", operation::Mod::f, Div16(N())));
- TEST_DO(test_apply_op("a^b", operation::Pow::f, Div16(N())));
- TEST_DO(test_apply_op("pow(a,b)", operation::Pow::f, Div16(N())));
- TEST_DO(test_apply_op("a==b", operation::Equal::f, Div16(N())));
- TEST_DO(test_apply_op("a!=b", operation::NotEqual::f, Div16(N())));
- TEST_DO(test_apply_op("a~=b", operation::Approx::f, Div16(N())));
- TEST_DO(test_apply_op("a<b", operation::Less::f, Div16(N())));
- TEST_DO(test_apply_op("a<=b", operation::LessEqual::f, Div16(N())));
- TEST_DO(test_apply_op("a>b", operation::Greater::f, Div16(N())));
- TEST_DO(test_apply_op("a>=b", operation::GreaterEqual::f, Div16(N())));
- TEST_DO(test_apply_op("a&&b", operation::And::f, Seq({0.0, 1.0, 1.0})));
- TEST_DO(test_apply_op("a||b", operation::Or::f, Seq({0.0, 1.0, 1.0})));
- TEST_DO(test_apply_op("atan2(a,b)", operation::Atan2::f, Div16(N())));
- TEST_DO(test_apply_op("ldexp(a,b)", operation::Ldexp::f, Div16(N())));
- TEST_DO(test_apply_op("fmod(a,b)", operation::Mod::f, Div16(N())));
- TEST_DO(test_apply_op("min(a,b)", operation::Min::f, Div16(N())));
- TEST_DO(test_apply_op("max(a,b)", operation::Max::f, Div16(N())));
- }
-
- //-------------------------------------------------------------------------
-
- void test_dot_product(double expect,
- const TensorSpec &lhs,
- const TensorSpec &rhs)
- {
- vespalib::string expr("reduce(a*b,sum)");
- TEST_DO(verify_result(factory, expr, {lhs, rhs}, spec(expect)));
- }
-
- void test_dot_product(double expect,
- const Layout &lhs, const Sequence &lhs_seq,
- const Layout &rhs, const Sequence &rhs_seq)
- {
- TEST_DO(test_dot_product(expect, spec(lhs, lhs_seq), spec(rhs, rhs_seq)));
- TEST_DO(test_dot_product(expect, spec(float_cells(lhs), lhs_seq), spec(rhs, rhs_seq)));
- TEST_DO(test_dot_product(expect, spec(lhs, lhs_seq), spec(float_cells(rhs), rhs_seq)));
- TEST_DO(test_dot_product(expect, spec(float_cells(lhs), lhs_seq), spec(float_cells(rhs), rhs_seq)));
- }
-
- void test_dot_product() {
- TEST_DO(test_dot_product(((2 * 7) + (3 * 11) + (5 * 13)),
- {x(3)}, Seq({ 2, 3, 5 }),
- {x(3)}, Seq({ 7, 11, 13 })));
- }
-
- //-------------------------------------------------------------------------
-
- void test_concat(const TensorSpec &a,
- const TensorSpec &b,
- const vespalib::string &dimension,
- const TensorSpec &expect)
- {
- vespalib::string expr = fmt("concat(a,b,%s)", dimension.c_str());
- TEST_DO(verify_result(factory, expr, {a, b}, expect));
- }
-
- void test_concat() {
- TEST_DO(test_concat(spec(10.0), spec(20.0), "x", spec(x(2), Seq({10.0, 20.0}))));
- TEST_DO(test_concat(spec(x(1), Seq({10.0})), spec(20.0), "x", spec(x(2), Seq({10.0, 20.0}))));
- TEST_DO(test_concat(spec(10.0), spec(x(1), Seq({20.0})), "x", spec(x(2), Seq({10.0, 20.0}))));
- TEST_DO(test_concat(spec(x(3), Seq({1.0, 2.0, 3.0})), spec(x(2), Seq({4.0, 5.0})), "x",
- spec(x(5), Seq({1.0, 2.0, 3.0, 4.0, 5.0}))));
- TEST_DO(test_concat(spec({x(2),y(2)}, Seq({1.0, 2.0, 3.0, 4.0})), spec(y(2), Seq({5.0, 6.0})), "y",
- spec({x(2),y(4)}, Seq({1.0, 2.0, 5.0, 6.0, 3.0, 4.0, 5.0, 6.0}))));
- TEST_DO(test_concat(spec({x(2),y(2)}, Seq({1.0, 2.0, 3.0, 4.0})), spec(x(2), Seq({5.0, 6.0})), "x",
- spec({x(4),y(2)}, Seq({1.0, 2.0, 3.0, 4.0, 5.0, 5.0, 6.0, 6.0}))));
- TEST_DO(test_concat(spec(z(3), Seq({1.0, 2.0, 3.0})), spec(y(2), Seq({4.0, 5.0})), "x",
- spec({x(2),y(2),z(3)}, Seq({1.0, 2.0, 3.0, 1.0, 2.0, 3.0, 4.0, 4.0, 4.0, 5.0, 5.0, 5.0}))));
- TEST_DO(test_concat(spec(y(2), Seq({1.0, 2.0})), spec(y(2), Seq({4.0, 5.0})), "x",
- spec({x(2), y(2)}, Seq({1.0, 2.0, 4.0, 5.0}))));
-
- TEST_DO(test_concat(spec(float_cells({x(1)}), Seq({10.0})), spec(20.0), "x", spec(float_cells({x(2)}), Seq({10.0, 20.0}))));
- TEST_DO(test_concat(spec(10.0), spec(float_cells({x(1)}), Seq({20.0})), "x", spec(float_cells({x(2)}), Seq({10.0, 20.0}))));
-
- TEST_DO(test_concat(spec(float_cells({x(3)}), Seq({1.0, 2.0, 3.0})), spec(x(2), Seq({4.0, 5.0})), "x",
- spec(x(5), Seq({1.0, 2.0, 3.0, 4.0, 5.0}))));
- TEST_DO(test_concat(spec(x(3), Seq({1.0, 2.0, 3.0})), spec(float_cells({x(2)}), Seq({4.0, 5.0})), "x",
- spec(x(5), Seq({1.0, 2.0, 3.0, 4.0, 5.0}))));
- TEST_DO(test_concat(spec(float_cells({x(3)}), Seq({1.0, 2.0, 3.0})), spec(float_cells({x(2)}), Seq({4.0, 5.0})), "x",
- spec(float_cells({x(5)}), Seq({1.0, 2.0, 3.0, 4.0, 5.0}))));
- }
-
- //-------------------------------------------------------------------------
-
- void test_cell_cast(const GenSpec &a) {
- for (CellType cell_type: CellTypeUtils::list_types()) {
- auto expect = a.cpy().cells(cell_type);
- if (expect.bad_scalar()) continue;
- vespalib::string expr = fmt("cell_cast(a,%s)", value_type::cell_type_to_name(cell_type).c_str());
- TEST_DO(verify_result(factory, expr, {a}, expect));
- }
- }
-
- void test_cell_cast() {
- std::vector<GenSpec> gen_list;
- for (CellType cell_type: CellTypeUtils::list_types()) {
- gen_list.push_back(GenSpec(-3).cells(cell_type));
- }
- TEST_DO(test_cell_cast(GenSpec(42)));
- for (const auto &gen: gen_list) {
- TEST_DO(test_cell_cast(gen.cpy().idx("x", 10)));
- TEST_DO(test_cell_cast(gen.cpy().map("x", 10, 1)));
- TEST_DO(test_cell_cast(gen.cpy().map("x", 4, 1).idx("y", 4)));
- }
- }
-
- //-------------------------------------------------------------------------
-
- void test_rename(const vespalib::string &expr,
- const TensorSpec &input,
- const TensorSpec &expect)
- {
- TEST_DO(verify_result(factory, expr, {input}, expect));
- }
-
- void test_rename() {
- TEST_DO(test_rename("rename(a,x,y)", spec(x(5), N()), spec(y(5), N())));
- TEST_DO(test_rename("rename(a,y,x)", spec({y(5),z(5)}, N()), spec({x(5),z(5)}, N())));
- TEST_DO(test_rename("rename(a,y,x)", spec(float_cells({y(5),z(5)}), N()), spec(float_cells({x(5),z(5)}), N())));
- TEST_DO(test_rename("rename(a,z,x)", spec({y(5),z(5)}, N()), spec({y(5),x(5)}, N())));
- TEST_DO(test_rename("rename(a,x,z)", spec({x(5),y(5)}, N()), spec({z(5),y(5)}, N())));
- TEST_DO(test_rename("rename(a,y,z)", spec({x(5),y(5)}, N()), spec({x(5),z(5)}, N())));
- TEST_DO(test_rename("rename(a,(x,y),(y,x))", spec({x(5),y(5)}, N()), spec({y(5),x(5)}, N())));
- }
-
- //-------------------------------------------------------------------------
-
- void test_tensor_lambda(const vespalib::string &expr, const TensorSpec &expect) {
- TEST_DO(verify_result(factory, expr, {}, expect));
- }
-
- void test_tensor_lambda() {
- TEST_DO(test_tensor_lambda("tensor(x[10])(x+1)", spec(x(10), N())));
- TEST_DO(test_tensor_lambda("tensor<float>(x[10])(x+1)", spec(float_cells({x(10)}), N())));
- TEST_DO(test_tensor_lambda("tensor(x[5],y[4])(x*4+(y+1))", spec({x(5),y(4)}, N())));
- TEST_DO(test_tensor_lambda("tensor(x[5],y[4])(x==y)", spec({x(5),y(4)},
- Seq({ 1.0, 0.0, 0.0, 0.0,
- 0.0, 1.0, 0.0, 0.0,
- 0.0, 0.0, 1.0, 0.0,
- 0.0, 0.0, 0.0, 1.0,
- 0.0, 0.0, 0.0, 0.0}))));
- }
-
- //-------------------------------------------------------------------------
-
- void test_tensor_create(const vespalib::string &expr, double a, double b, const TensorSpec &expect) {
- TEST_DO(verify_result(factory, expr, {spec(a), spec(b)}, expect));
- }
-
- void test_tensor_create() {
- TEST_DO(test_tensor_create("tensor(x[3]):{{x:0}:a,{x:1}:b,{x:2}:3}", 1, 2, spec(x(3), N())));
- TEST_DO(test_tensor_create("tensor<float>(x[3]):{{x:0}:a,{x:1}:b,{x:2}:3}", 1, 2, spec(float_cells({x(3)}), N())));
- TEST_DO(test_tensor_create("tensor(x{}):{{x:a}:a,{x:b}:b,{x:c}:3}", 1, 2, spec(x({"a", "b", "c"}), N())));
- TEST_DO(test_tensor_create("tensor(x{},y[2]):{{x:a,y:0}:a,{x:a,y:1}:b}", 1, 2, spec({x({"a"}),y(2)}, N())));
- }
-
- //-------------------------------------------------------------------------
-
- void test_tensor_peek(const vespalib::string &expr, const TensorSpec &param, const TensorSpec &expect) {
- TEST_DO(verify_result(factory, expr, {param, spec(1.0)}, expect));
- }
-
- void test_tensor_peek() {
- auto param_double = spec({x({"0", "1"}),y(2)}, Seq({1.0, 2.0, 3.0, 4.0}));
- auto param_float = spec(float_cells({x({"0", "1"}),y(2)}), Seq({1.0, 2.0, 3.0, 4.0}));
- TEST_DO(test_tensor_peek("tensor(x[2]):[a{x:1,y:1},a{x:(b-1),y:(b-1)}]", param_double, spec(x(2), Seq({4.0, 1.0}))));
- TEST_DO(test_tensor_peek("tensor(x[2]):[a{x:1,y:1},a{x:(b-1),y:(b-1)}]", param_float, spec(x(2), Seq({4.0, 1.0}))));
- TEST_DO(test_tensor_peek("tensor<float>(x[2]):[a{x:1,y:1},a{x:(b-1),y:(b-1)}]", param_double, spec(float_cells({x(2)}), Seq({4.0, 1.0}))));
- TEST_DO(test_tensor_peek("tensor<float>(x[2]):[a{x:1,y:1},a{x:(b-1),y:(b-1)}]", param_float, spec(float_cells({x(2)}), Seq({4.0, 1.0}))));
- TEST_DO(test_tensor_peek("a{x:(b)}", param_double, spec(y(2), Seq({3.0, 4.0}))));
- TEST_DO(test_tensor_peek("a{x:(b)}", param_float, spec(float_cells({y(2)}), Seq({3.0, 4.0}))));
- TEST_DO(test_tensor_peek("a{y:(b)}", param_double, spec(x({"0", "1"}), Seq({2.0, 4.0}))));
- TEST_DO(test_tensor_peek("a{y:(b)}", param_float, spec(float_cells({x({"0", "1"})}), Seq({2.0, 4.0}))));
- }
-
- //-------------------------------------------------------------------------
-
- void test_tensor_merge(const vespalib::string &type_base, const vespalib::string &a_str,
- const vespalib::string &b_str, const vespalib::string &expect_str)
- {
- vespalib::string expr = "merge(a,b,f(x,y)(2*x+y))";
- for (bool a_float: {false, true}) {
- for (bool b_float: {false, true}) {
- bool both_float = a_float && b_float;
- vespalib::string a_expr = fmt("tensor%s(%s):%s", a_float ? "<float>" : "", type_base.c_str(), a_str.c_str());
- vespalib::string b_expr = fmt("tensor%s(%s):%s", b_float ? "<float>" : "", type_base.c_str(), b_str.c_str());
- vespalib::string expect_expr = fmt("tensor%s(%s):%s", both_float ? "<float>" : "", type_base.c_str(), expect_str.c_str());
- TensorSpec a = spec(a_expr);
- TensorSpec b = spec(b_expr);
- TensorSpec expect = spec(expect_expr);
- TEST_DO(verify_result(factory, expr, {a, b}, expect));
- }
- }
- }
-
- void test_tensor_merge() {
- TEST_DO(test_tensor_merge("x[3]", "[1,2,3]", "[4,5,6]", "[6,9,12]"));
- TEST_DO(test_tensor_merge("x{}", "{a:1,b:2,c:3}", "{b:4,c:5,d:6}", "{a:1,b:8,c:11,d:6}"));
- TEST_DO(test_tensor_merge("x{},y[2]", "{a:[1,2],b:[3,4]}", "{b:[5,6],c:[6,7]}", "{a:[1,2],b:[11,14],c:[6,7]}"));
- }
-
- //-------------------------------------------------------------------------
-
- void run_tests() {
- TEST_DO(test_tensor_create_type());
- TEST_DO(test_tensor_reduce());
- TEST_DO(test_tensor_map());
- TEST_DO(test_tensor_apply());
- TEST_DO(test_dot_product());
- TEST_DO(test_concat());
- TEST_DO(test_cell_cast());
- TEST_DO(test_rename());
- TEST_DO(test_tensor_lambda());
- TEST_DO(test_tensor_create());
- TEST_DO(test_tensor_peek());
- TEST_DO(test_tensor_merge());
- }
-};
-
-} // <unnamed>
-
-void
-TensorConformance::run_tests(const vespalib::string &module_path, const ValueBuilderFactory &factory)
-{
- TestContext ctx(module_path, factory);
- fprintf(stderr, "module path: '%s'\n", ctx.module_path.c_str());
- ctx.run_tests();
-}
-
-} // namespace
diff --git a/eval/src/vespa/eval/eval/test/tensor_conformance.h b/eval/src/vespa/eval/eval/test/tensor_conformance.h
deleted file mode 100644
index ad30b60e24c..00000000000
--- a/eval/src/vespa/eval/eval/test/tensor_conformance.h
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include <vespa/eval/eval/value.h>
-#include <vespa/vespalib/stllike/string.h>
-
-namespace vespalib::eval::test {
-
-/**
- * A collection of tensor-related tests that can be run for various
- * implementations.
- **/
-struct TensorConformance {
- static void run_tests(const vespalib::string &module_path, const ValueBuilderFactory &factory);
-};
-
-} // namespace
diff --git a/eval/src/vespa/eval/eval/test/tensor_model.cpp b/eval/src/vespa/eval/eval/test/tensor_model.cpp
deleted file mode 100644
index b7ad4d0071f..00000000000
--- a/eval/src/vespa/eval/eval/test/tensor_model.cpp
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include "tensor_model.h"
-#include <vespa/eval/eval/value_type.h>
-
-namespace vespalib::eval::test {
-
-Layout float_cells(const Layout &layout) {
- return Layout(CellType::FLOAT, layout.domains);
-}
-
-Domain x() { return Domain("x", {}); }
-Domain x(size_t size) { return Domain("x", size); }
-Domain x(const std::vector<vespalib::string> &keys) { return Domain("x", keys); }
-
-Domain y() { return Domain("y", {}); }
-Domain y(size_t size) { return Domain("y", size); }
-Domain y(const std::vector<vespalib::string> &keys) { return Domain("y", keys); }
-
-Domain z() { return Domain("z", {}); }
-Domain z(size_t size) { return Domain("z", size); }
-Domain z(const std::vector<vespalib::string> &keys) { return Domain("z", keys); }
-
-vespalib::string infer_type(const Layout &layout) {
- return GenSpec(layout.domains).cells(layout.cell_type).type().to_spec();
-}
-
-TensorSpec spec(const Layout &layout, const Sequence &seq) {
- return GenSpec(layout.domains).cells(layout.cell_type).seq(seq);
-}
-TensorSpec spec(const Domain &domain, const Sequence &seq) {
- return spec(Layout({domain}), seq);
-}
-TensorSpec spec(double value) {
- return GenSpec(value);
-}
-
-TensorSpec spec(const vespalib::string &type,
- const std::vector<std::pair<TensorSpec::Address, TensorSpec::Value>> &cells) {
- TensorSpec spec("tensor(" + type + ")");
-
- for (const auto &cell : cells) {
- spec.add(cell.first, cell.second);
- }
- return spec;
-}
-
-TensorSpec spec(const vespalib::string &value_expr) {
- return TensorSpec::from_expr(value_expr);
-}
-
-}
diff --git a/eval/src/vespa/eval/eval/test/tensor_model.h b/eval/src/vespa/eval/eval/test/tensor_model.h
deleted file mode 100644
index 85f6e5ccfa4..00000000000
--- a/eval/src/vespa/eval/eval/test/tensor_model.h
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include "gen_spec.h"
-
-namespace vespalib::eval::test {
-
-// custom op1
-struct MyOp {
- static double f(double a) {
- return ((a + 1) * 2);
- }
-};
-
-// 'a in [1,5,7,13,42]'
-struct MyIn {
- static double f(double a) {
- if ((a == 1) ||
- (a == 5) ||
- (a == 7) ||
- (a == 13) ||
- (a == 42))
- {
- return 1.0;
- } else {
- return 0.0;
- }
- }
-};
-
-using Domain = DimSpec;
-
-struct Layout {
- CellType cell_type;
- std::vector<Domain> domains;
- Layout(std::initializer_list<Domain> domains_in)
- : cell_type(CellType::DOUBLE), domains(domains_in) {}
- Layout(CellType cell_type_in, std::vector<Domain> domains_in)
- : cell_type(cell_type_in), domains(std::move(domains_in)) {}
- auto begin() const { return domains.begin(); }
- auto end() const { return domains.end(); }
- auto size() const { return domains.size(); }
- auto operator[](size_t idx) const { return domains[idx]; }
-};
-
-Layout float_cells(const Layout &layout);
-
-Domain x();
-Domain x(size_t size);
-Domain x(const std::vector<vespalib::string> &keys);
-
-Domain y();
-Domain y(size_t size);
-Domain y(const std::vector<vespalib::string> &keys);
-
-Domain z();
-Domain z(size_t size);
-Domain z(const std::vector<vespalib::string> &keys);
-
-// Infer the tensor type implied by the given layout
-vespalib::string infer_type(const Layout &layout);
-
-TensorSpec spec(const Layout &layout, const Sequence &seq);
-TensorSpec spec(const Domain &domain, const Sequence &seq);
-TensorSpec spec(double value);
-TensorSpec spec(const vespalib::string &type,
- const std::vector<std::pair<TensorSpec::Address, TensorSpec::Value>> &cells);
-TensorSpec spec(const vespalib::string &value_expr);
-
-}
diff --git a/eval/src/vespa/eval/onnx/onnx_wrapper.cpp b/eval/src/vespa/eval/onnx/onnx_wrapper.cpp
index e9758f2ddc8..3a593f491d8 100644
--- a/eval/src/vespa/eval/onnx/onnx_wrapper.cpp
+++ b/eval/src/vespa/eval/onnx/onnx_wrapper.cpp
@@ -38,16 +38,17 @@ struct TypifyOnnxElementType {
template <typename T> using Result = TypifyResultType<T>;
template <typename F> static decltype(auto) resolve(Onnx::ElementType value, F &&f) {
switch(value) {
- case Onnx::ElementType::INT8: return f(Result<int8_t>());
- case Onnx::ElementType::INT16: return f(Result<int16_t>());
- case Onnx::ElementType::INT32: return f(Result<int32_t>());
- case Onnx::ElementType::INT64: return f(Result<int64_t>());
- case Onnx::ElementType::UINT8: return f(Result<uint8_t>());
- case Onnx::ElementType::UINT16: return f(Result<uint16_t>());
- case Onnx::ElementType::UINT32: return f(Result<uint32_t>());
- case Onnx::ElementType::UINT64: return f(Result<uint64_t>());
- case Onnx::ElementType::FLOAT: return f(Result<float>());
- case Onnx::ElementType::DOUBLE: return f(Result<double>());
+ case Onnx::ElementType::INT8: return f(Result<Int8Float>());
+ case Onnx::ElementType::INT16: return f(Result<int16_t>());
+ case Onnx::ElementType::INT32: return f(Result<int32_t>());
+ case Onnx::ElementType::INT64: return f(Result<int64_t>());
+ case Onnx::ElementType::UINT8: return f(Result<uint8_t>());
+ case Onnx::ElementType::UINT16: return f(Result<uint16_t>());
+ case Onnx::ElementType::UINT32: return f(Result<uint32_t>());
+ case Onnx::ElementType::UINT64: return f(Result<uint64_t>());
+ case Onnx::ElementType::BFLOAT16: return f(Result<BFloat16>());
+ case Onnx::ElementType::FLOAT: return f(Result<float>());
+ case Onnx::ElementType::DOUBLE: return f(Result<double>());
}
abort();
}
@@ -118,32 +119,34 @@ auto convert_optimize(Onnx::Optimize optimize) {
CellType to_cell_type(Onnx::ElementType type) {
switch (type) {
- case Onnx::ElementType::INT8: [[fallthrough]];
- case Onnx::ElementType::INT16: [[fallthrough]];
- case Onnx::ElementType::UINT8: [[fallthrough]];
- case Onnx::ElementType::UINT16: [[fallthrough]];
- case Onnx::ElementType::FLOAT: return CellType::FLOAT;
- case Onnx::ElementType::INT32: [[fallthrough]];
- case Onnx::ElementType::INT64: [[fallthrough]];
- case Onnx::ElementType::UINT32: [[fallthrough]];
- case Onnx::ElementType::UINT64: [[fallthrough]];
- case Onnx::ElementType::DOUBLE: return CellType::DOUBLE;
+ case Onnx::ElementType::INT8: return CellType::INT8;
+ case Onnx::ElementType::BFLOAT16: return CellType::BFLOAT16;
+ case Onnx::ElementType::UINT8: [[fallthrough]];
+ case Onnx::ElementType::INT16: [[fallthrough]];
+ case Onnx::ElementType::UINT16: [[fallthrough]];
+ case Onnx::ElementType::FLOAT: return CellType::FLOAT;
+ case Onnx::ElementType::INT32: [[fallthrough]];
+ case Onnx::ElementType::INT64: [[fallthrough]];
+ case Onnx::ElementType::UINT32: [[fallthrough]];
+ case Onnx::ElementType::UINT64: [[fallthrough]];
+ case Onnx::ElementType::DOUBLE: return CellType::DOUBLE;
}
abort();
}
Onnx::ElementType make_element_type(ONNXTensorElementDataType element_type) {
switch (element_type) {
- case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT8: return Onnx::ElementType::INT8;
- case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT16: return Onnx::ElementType::INT16;
- case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32: return Onnx::ElementType::INT32;
- case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64: return Onnx::ElementType::INT64;
- case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8: return Onnx::ElementType::UINT8;
- case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT16: return Onnx::ElementType::UINT16;
- case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT32: return Onnx::ElementType::UINT32;
- case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT64: return Onnx::ElementType::UINT64;
- case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT: return Onnx::ElementType::FLOAT;
- case ONNX_TENSOR_ELEMENT_DATA_TYPE_DOUBLE: return Onnx::ElementType::DOUBLE;
+ case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT8: return Onnx::ElementType::INT8;
+ case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT16: return Onnx::ElementType::INT16;
+ case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32: return Onnx::ElementType::INT32;
+ case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64: return Onnx::ElementType::INT64;
+ case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8: return Onnx::ElementType::UINT8;
+ case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT16: return Onnx::ElementType::UINT16;
+ case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT32: return Onnx::ElementType::UINT32;
+ case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT64: return Onnx::ElementType::UINT64;
+ case ONNX_TENSOR_ELEMENT_DATA_TYPE_BFLOAT16: return Onnx::ElementType::BFLOAT16;
+ case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT: return Onnx::ElementType::FLOAT;
+ case ONNX_TENSOR_ELEMENT_DATA_TYPE_DOUBLE: return Onnx::ElementType::DOUBLE;
default:
throw Ort::Exception(fmt("[onnx wrapper] unsupported element type: %d", element_type), ORT_FAIL);
}
diff --git a/eval/src/vespa/eval/onnx/onnx_wrapper.h b/eval/src/vespa/eval/onnx/onnx_wrapper.h
index 68c31f04cdc..1f36d576c33 100644
--- a/eval/src/vespa/eval/onnx/onnx_wrapper.h
+++ b/eval/src/vespa/eval/onnx/onnx_wrapper.h
@@ -51,7 +51,7 @@ public:
};
// supported onnx element types
- enum class ElementType { INT8, INT16, INT32, INT64, UINT8, UINT16, UINT32, UINT64, FLOAT, DOUBLE };
+ enum class ElementType { INT8, INT16, INT32, INT64, UINT8, UINT16, UINT32, UINT64, BFLOAT16, FLOAT, DOUBLE };
// information about a single input or output tensor
struct TensorInfo {
diff --git a/fat-model-dependencies/pom.xml b/fat-model-dependencies/pom.xml
index a934024b4b9..50efa870e60 100644
--- a/fat-model-dependencies/pom.xml
+++ b/fat-model-dependencies/pom.xml
@@ -66,11 +66,6 @@
</dependency>
<dependency>
<groupId>com.yahoo.vespa</groupId>
- <artifactId>simplemetrics</artifactId>
- <version>${project.version}</version>
- </dependency>
- <dependency>
- <groupId>com.yahoo.vespa</groupId>
<artifactId>model-evaluation</artifactId>
<version>${project.version}</version>
</dependency>
diff --git a/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReferenceDownloader.java b/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReferenceDownloader.java
index 9f872ac7042..05fbd457a0d 100644
--- a/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReferenceDownloader.java
+++ b/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReferenceDownloader.java
@@ -127,7 +127,7 @@ public class FileReferenceDownloader {
return true;
} else {
log.log(logLevel, "File reference '" + fileReference + "' not found at " + connection.getAddress());
- connectionPool.setNewCurrentConnection();
+ connectionPool.switchConnection();
return false;
}
} else {
diff --git a/filedistribution/src/test/java/com/yahoo/vespa/filedistribution/FileDownloaderTest.java b/filedistribution/src/test/java/com/yahoo/vespa/filedistribution/FileDownloaderTest.java
index a68b6527aad..1344b7afbb3 100644
--- a/filedistribution/src/test/java/com/yahoo/vespa/filedistribution/FileDownloaderTest.java
+++ b/filedistribution/src/test/java/com/yahoo/vespa/filedistribution/FileDownloaderTest.java
@@ -341,7 +341,7 @@ public class FileDownloaderTest {
}
@Override
- public Connection setNewCurrentConnection() {
+ public Connection switchConnection() {
return this;
}
diff --git a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
index 90332536447..38d2bbe6fc0 100644
--- a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
+++ b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
@@ -16,7 +16,6 @@ import static com.yahoo.vespa.flags.FetchVector.Dimension.APPLICATION_ID;
import static com.yahoo.vespa.flags.FetchVector.Dimension.CLUSTER_ID;
import static com.yahoo.vespa.flags.FetchVector.Dimension.CLUSTER_TYPE;
import static com.yahoo.vespa.flags.FetchVector.Dimension.HOSTNAME;
-import static com.yahoo.vespa.flags.FetchVector.Dimension.NODE_TYPE;
import static com.yahoo.vespa.flags.FetchVector.Dimension.TENANT_ID;
import static com.yahoo.vespa.flags.FetchVector.Dimension.VESPA_VERSION;
import static com.yahoo.vespa.flags.FetchVector.Dimension.ZONE_ID;
@@ -46,7 +45,7 @@ public class Flags {
private static volatile TreeMap<FlagId, FlagDefinition> flags = new TreeMap<>();
public static final UnboundStringFlag ALLOCATE_OS_REQUIREMENT = defineStringFlag(
- "allocate-os-requirement", "rhel7",
+ "allocate-os-requirement", "any",
List.of("hakonhall"), "2021-01-26", "2021-07-26",
"Allocations of new nodes are limited to the given host OS. Must be one of 'rhel7', " +
"'rhel8', or 'any'",
@@ -109,20 +108,6 @@ public class Flags {
"Takes effect at redeployment",
ZONE_ID, APPLICATION_ID);
- public static final UnboundStringFlag TLS_FOR_ZOOKEEPER_CLIENT_SERVER_COMMUNICATION = defineStringFlag(
- "tls-for-zookeeper-client-server-communication", "TLS_WITH_PORT_UNIFICATION",
- List.of("hmusum"), "2020-12-02", "2021-06-01",
- "How to setup TLS for ZooKeeper client/server communication. Valid values are OFF, PORT_UNIFICATION, TLS_WITH_PORT_UNIFICATION, TLS_ONLY",
- "Takes effect on restart of config server",
- NODE_TYPE, HOSTNAME);
-
- public static final UnboundBooleanFlag USE_TLS_FOR_ZOOKEEPER_CLIENT = defineFeatureFlag(
- "use-tls-for-zookeeper-client", true,
- List.of("hmusum"), "2020-12-02", "2021-05-01",
- "Whether to use TLS for ZooKeeper clients",
- "Takes effect on restart of process",
- NODE_TYPE, HOSTNAME);
-
public static final UnboundBooleanFlag PROVISION_TENANT_ROLES = defineFeatureFlag(
"provision-tenant-roles", false,
List.of("tokle"), "2020-12-02", "2021-06-01",
@@ -182,7 +167,7 @@ public class Flags {
public static final UnboundBooleanFlag GROUP_SUSPENSION = defineFeatureFlag(
"group-suspension", true,
- List.of("hakon"), "2021-01-22", "2021-04-22",
+ List.of("hakon"), "2021-01-22", "2021-05-22",
"Allow all content nodes in a hierarchical group to suspend at the same time",
"Takes effect on the next suspension request to the Orchestrator.",
APPLICATION_ID);
@@ -207,15 +192,22 @@ public class Flags {
"Takes effect on next run of S3 log sync task in host-admin",
TENANT_ID, ZONE_ID);
+ public static final UnboundBooleanFlag CACHE_ACL = defineFeatureFlag(
+ "cache-acl", true,
+ List.of("hakon"), "2021-04-26", "2021-05-26",
+ "Whether host-admin should cache the ACL responses w/TTL 115s, or always ask config server.",
+ "Takes effect on next host-admin tick.",
+ ZONE_ID);
+
public static final UnboundIntFlag CLUSTER_CONTROLLER_MAX_HEAP_SIZE_IN_MB = defineIntFlag(
"cluster-controller-max-heap-size-in-mb", 128,
- List.of("hmusum"), "2021-02-10", "2021-04-10",
+ List.of("hmusum"), "2021-02-10", "2021-05-15",
"JVM max heap size for cluster controller in Mb",
"Takes effect when restarting cluster controller");
public static final UnboundIntFlag METRICS_PROXY_MAX_HEAP_SIZE_IN_MB = defineIntFlag(
"metrics-proxy-max-heap-size-in-mb", 256,
- List.of("hmusum"), "2021-03-01", "2021-05-01",
+ List.of("hmusum"), "2021-03-01", "2021-05-15",
"JVM max heap size for metrics proxy in Mb",
"Takes effect when restarting metrics proxy",
CLUSTER_TYPE);
@@ -228,7 +220,7 @@ public class Flags {
CLUSTER_TYPE, CLUSTER_ID);
public static final UnboundStringFlag DEDICATED_CLUSTER_CONTROLLER_FLAVOR = defineStringFlag(
- "dedicated-cluster-controller-flavor", "", List.of("jonmv"), "2021-02-25", "2021-04-25",
+ "dedicated-cluster-controller-flavor", "", List.of("jonmv"), "2021-02-25", "2021-05-25",
"Flavor as <vpu>-<memgb>-<diskgb> to use for dedicated cluster controller nodes",
"Takes effect immediately, for subsequent provisioning",
APPLICATION_ID);
@@ -254,17 +246,40 @@ public class Flags {
"Takes effect at redeployment",
ZONE_ID, APPLICATION_ID);
- public static final UnboundBooleanFlag WAIT_FOR_ALL_CONFIG_SERVERS_WHEN_DELETING_APPLICATION = defineFeatureFlag(
- "wait-for-all-config-servers-when-deleting-application", false,
- List.of("hmusum"), "2021-03-24", "2021-06-24",
- "Whether to wait for all participating servers to delete application on config servers (with timeout) on",
- "Takes effect on next delete of an application");
-
- public static final UnboundBooleanFlag REBUILD_HOST = defineFeatureFlag(
- "rebuild-host", false,
- List.of("mpolden"), "2021-04-09", "2021-06-01",
- "Whether HostRebuilder should rebuild hosts marked wantToRebuild",
- "Takes effect on next HostRebuilder maintenance run");
+ public static final UnboundBooleanFlag ENABLE_JDISC_HTTP2 = defineFeatureFlag(
+ "enable-jdisc-http2", false,
+ List.of("bjorncs", "jonmv"), "2021-04-12", "2021-08-01",
+ "Whether jdisc HTTPS connectors should allow HTTP/2",
+ "Takes effect at redeployment",
+ APPLICATION_ID);
+
+ public static final UnboundBooleanFlag ENABLE_CUSTOM_ACL_MAPPING = defineFeatureFlag(
+ "enable-custom-acl-mapping", false,
+ List.of("mortent","bjorncs"), "2021-04-13", "2021-08-01",
+ "Whether access control filters should read acl request mapping from handler or use default",
+ "Takes effect at redeployment",
+ APPLICATION_ID);
+
+ public static final UnboundBooleanFlag UPGRADE_DELL_SSD_FIRMWARE = defineFeatureFlag(
+ "upgrade_dell_ssd_firmware", false,
+ List.of("andreer"), "2021-04-13", "2021-05-13",
+ "Whether to consider upgrading Dell SSD firmware",
+ "Takes effect on next host-admin tick",
+ HOSTNAME);
+
+ public static final UnboundIntFlag NUM_DISTRIBUTOR_STRIPES = defineIntFlag(
+ "num-distributor-stripes", 0,
+ List.of("geirst", "vekterli"), "2021-04-20", "2021-07-01",
+ "Specifies the number of stripes used by the distributor. When 0, legacy single stripe behavior is used.",
+ "Takes effect after distributor restart",
+ ZONE_ID, APPLICATION_ID);
+
+ public static final UnboundBooleanFlag ENABLE_ROUTING_CORE_DUMP = defineFeatureFlag(
+ "enable-routing-core-dumps", false,
+ List.of("tokle"), "2021-04-16", "2021-08-01",
+ "Whether to enable core dumps for routing layer",
+ "Takes effect on next host-admin tick",
+ HOSTNAME);
/** WARNING: public for testing: All flags should be defined in {@link Flags}. */
public static UnboundBooleanFlag defineFeatureFlag(String flagId, boolean defaultValue, List<String> owners,
diff --git a/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java b/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java
index ad096a20786..d6d8b1a26e1 100644
--- a/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java
+++ b/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java
@@ -143,7 +143,7 @@ public class PermanentFlags {
"jvm-omit-stack-trace-in-fast-throw", true,
"Controls JVM option OmitStackTraceInFastThrow (default feature flag value is true, which is the default JVM option value as well)",
"takes effect on JVM restart",
- NODE_TYPE, APPLICATION_ID);
+ CLUSTER_TYPE, APPLICATION_ID);
public static final UnboundIntFlag MAX_TRIAL_TENANTS = defineIntFlag(
"max-trial-tenants", -1,
@@ -151,6 +151,12 @@ public class PermanentFlags {
"Takes effect immediately"
);
+ public static final UnboundBooleanFlag ALLOW_DISABLE_MTLS = defineFeatureFlag(
+ "allow-disable-mtls", true,
+ "Allow application to disable client authentication",
+ "Takes effect on redeployment",
+ APPLICATION_ID);
+
private PermanentFlags() {}
private static UnboundBooleanFlag defineFeatureFlag(
diff --git a/hosted-tenant-base/pom.xml b/hosted-tenant-base/pom.xml
index 094238454c9..08648f47ce2 100644
--- a/hosted-tenant-base/pom.xml
+++ b/hosted-tenant-base/pom.xml
@@ -66,6 +66,11 @@
<type>pom</type>
<scope>import</scope>
</dependency>
+ <dependency>
+ <groupId>org.glassfish.jaxb</groupId>
+ <artifactId>jaxb-runtime</artifactId>
+ <version>2.3.2</version> <!-- 2.3.3 has BROKEN manifest -->
+ </dependency>
</dependencies>
</dependencyManagement>
diff --git a/jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/athenz/AthenzAuthorizationFilter.java b/jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/athenz/AthenzAuthorizationFilter.java
index 56dd3bcbf5b..dd4b62ee494 100644
--- a/jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/athenz/AthenzAuthorizationFilter.java
+++ b/jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/athenz/AthenzAuthorizationFilter.java
@@ -83,7 +83,7 @@ public class AthenzAuthorizationFilter extends JsonSecurityRequestFilterBase {
public Optional<ErrorResponse> filter(DiscFilterRequest request) {
try {
Optional<ResourceNameAndAction> resourceMapping =
- requestResourceMapper.getResourceNameAndAction(request.getMethod(), request.getRequestURI(), request.getQueryString());
+ requestResourceMapper.getResourceNameAndAction(request);
log.log(Level.FINE, () -> String.format("Resource mapping for '%s': %s", request, resourceMapping));
if (resourceMapping.isEmpty()) {
incrementAcceptedMetrics(request, false);
diff --git a/jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/athenz/RequestResourceMapper.java b/jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/athenz/RequestResourceMapper.java
index 0bf000efc00..56c52bd71c4 100644
--- a/jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/athenz/RequestResourceMapper.java
+++ b/jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/athenz/RequestResourceMapper.java
@@ -1,6 +1,7 @@
// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.jdisc.http.filter.security.athenz;
+import com.yahoo.jdisc.http.filter.DiscFilterRequest;
import com.yahoo.vespa.athenz.api.AthenzResourceName;
import java.util.Optional;
@@ -17,6 +18,13 @@ public interface RequestResourceMapper {
*/
Optional<ResourceNameAndAction> getResourceNameAndAction(String method, String uriPath, String uriQuery);
+ /**
+ * @return A resource name + action to use for access control, empty if no access control should be performed.
+ */
+ default Optional<ResourceNameAndAction> getResourceNameAndAction(DiscFilterRequest request) {
+ return getResourceNameAndAction(request.getMethod(), request.getRequestURI(), request.getQueryString());
+ }
+
class ResourceNameAndAction {
private final AthenzResourceName resourceName;
private final String action;
diff --git a/jdisc_core/src/main/java/com/yahoo/jdisc/AbstractResource.java b/jdisc_core/src/main/java/com/yahoo/jdisc/AbstractResource.java
index 28f0bb75b33..b561f08af92 100644
--- a/jdisc_core/src/main/java/com/yahoo/jdisc/AbstractResource.java
+++ b/jdisc_core/src/main/java/com/yahoo/jdisc/AbstractResource.java
@@ -70,7 +70,7 @@ public abstract class AbstractResource implements SharedResource {
}
@Override
- public void release() {
+ public final void release() {
initialCreationReference.close();
}
diff --git a/jdisc_core/src/main/java/com/yahoo/jdisc/ProxyRequestHandler.java b/jdisc_core/src/main/java/com/yahoo/jdisc/ProxyRequestHandler.java
index f3fa6740fbd..6e385535e40 100644
--- a/jdisc_core/src/main/java/com/yahoo/jdisc/ProxyRequestHandler.java
+++ b/jdisc_core/src/main/java/com/yahoo/jdisc/ProxyRequestHandler.java
@@ -3,6 +3,7 @@ package com.yahoo.jdisc;
import com.yahoo.jdisc.handler.CompletionHandler;
import com.yahoo.jdisc.handler.ContentChannel;
+import com.yahoo.jdisc.handler.DelegatedRequestHandler;
import com.yahoo.jdisc.handler.NullContent;
import com.yahoo.jdisc.handler.RequestHandler;
import com.yahoo.jdisc.handler.ResponseHandler;
@@ -16,7 +17,7 @@ import java.util.logging.Logger;
/**
* @author bakksjo
*/
-class ProxyRequestHandler implements RequestHandler {
+class ProxyRequestHandler implements DelegatedRequestHandler {
private static final CompletionHandler IGNORED_COMPLETION = new IgnoredCompletion();
private static final Logger log = Logger.getLogger(ProxyRequestHandler.class.getName());
@@ -71,6 +72,11 @@ class ProxyRequestHandler implements RequestHandler {
return delegate.toString();
}
+ @Override
+ public RequestHandler getDelegate() {
+ return delegate;
+ }
+
private static class ProxyResponseHandler implements ResponseHandler {
final SharedResource request;
diff --git a/jdisc_core/src/main/java/com/yahoo/jdisc/core/ContainerSnapshot.java b/jdisc_core/src/main/java/com/yahoo/jdisc/core/ContainerSnapshot.java
index 1a3f7068024..7b72e95ac09 100644
--- a/jdisc_core/src/main/java/com/yahoo/jdisc/core/ContainerSnapshot.java
+++ b/jdisc_core/src/main/java/com/yahoo/jdisc/core/ContainerSnapshot.java
@@ -9,6 +9,7 @@ import com.yahoo.jdisc.ResourceReference;
import com.yahoo.jdisc.application.BindingMatch;
import com.yahoo.jdisc.application.BindingSet;
import com.yahoo.jdisc.handler.ContentChannel;
+import com.yahoo.jdisc.handler.DelegatedRequestHandler;
import com.yahoo.jdisc.handler.NullContent;
import com.yahoo.jdisc.handler.RequestHandler;
import com.yahoo.jdisc.handler.ResponseHandler;
@@ -71,7 +72,7 @@ class ContainerSnapshot extends AbstractResource implements Container {
return timeoutMgr.timer().currentTimeMillis();
}
- private static class NullContentRequestHandler implements RequestHandler {
+ private static class NullContentRequestHandler implements DelegatedRequestHandler {
final RequestHandler delegate;
@@ -108,5 +109,10 @@ class ContainerSnapshot extends AbstractResource implements Container {
public String toString() {
return delegate.toString();
}
+
+ @Override
+ public RequestHandler getDelegate() {
+ return delegate;
+ }
}
}
diff --git a/jdisc_core/src/main/java/com/yahoo/jdisc/core/TimeoutManagerImpl.java b/jdisc_core/src/main/java/com/yahoo/jdisc/core/TimeoutManagerImpl.java
index 43cddaea803..a6d6e32df06 100644
--- a/jdisc_core/src/main/java/com/yahoo/jdisc/core/TimeoutManagerImpl.java
+++ b/jdisc_core/src/main/java/com/yahoo/jdisc/core/TimeoutManagerImpl.java
@@ -9,6 +9,7 @@ import com.yahoo.jdisc.TimeoutManager;
import com.yahoo.jdisc.Timer;
import com.yahoo.jdisc.handler.CompletionHandler;
import com.yahoo.jdisc.handler.ContentChannel;
+import com.yahoo.jdisc.handler.DelegatedRequestHandler;
import com.yahoo.jdisc.handler.RequestHandler;
import com.yahoo.jdisc.handler.ResponseHandler;
@@ -106,7 +107,7 @@ public class TimeoutManagerImpl {
}
}
- private class ManagedRequestHandler implements RequestHandler {
+ private class ManagedRequestHandler implements DelegatedRequestHandler {
final RequestHandler delegate;
@@ -152,6 +153,11 @@ public class TimeoutManagerImpl {
public String toString() {
return delegate.toString();
}
+
+ @Override
+ public RequestHandler getDelegate() {
+ return delegate;
+ }
}
private class TimeoutHandler implements ResponseHandler, TimeoutManager {
diff --git a/jdisc_core/src/main/java/com/yahoo/jdisc/handler/DelegatedRequestHandler.java b/jdisc_core/src/main/java/com/yahoo/jdisc/handler/DelegatedRequestHandler.java
new file mode 100644
index 00000000000..540fe03accf
--- /dev/null
+++ b/jdisc_core/src/main/java/com/yahoo/jdisc/handler/DelegatedRequestHandler.java
@@ -0,0 +1,15 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package com.yahoo.jdisc.handler;
+
+public interface DelegatedRequestHandler extends RequestHandler {
+ RequestHandler getDelegate();
+
+ default RequestHandler getDelegateRecursive() {
+ RequestHandler delegate = getDelegate();
+ while(delegate instanceof DelegatedRequestHandler) {
+ delegate = ((DelegatedRequestHandler) delegate).getDelegate();
+ }
+ return delegate;
+ }
+}
diff --git a/jdisc_jetty/pom.xml b/jdisc_jetty/pom.xml
index 89e573651e9..379255c4fd8 100644
--- a/jdisc_jetty/pom.xml
+++ b/jdisc_jetty/pom.xml
@@ -16,6 +16,18 @@
<packaging>jar</packaging>
<dependencies>
<dependency>
+ <groupId>org.eclipse.jetty.alpn</groupId>
+ <artifactId>alpn-api</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.jetty.http2</groupId>
+ <artifactId>http2-server</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.jetty</groupId>
+ <artifactId>jetty-alpn-java-server</artifactId>
+ </dependency>
+ <dependency>
<groupId>org.eclipse.jetty</groupId>
<artifactId>jetty-continuation</artifactId>
</dependency>
@@ -35,6 +47,11 @@
<groupId>org.eclipse.jetty</groupId>
<artifactId>jetty-jmx</artifactId>
</dependency>
+ <dependency>
+ <!-- Required for ServiceLoader to function in OSGi environment. ServiceLoader is used by Jetty -->
+ <groupId>org.apache.aries.spifly</groupId>
+ <artifactId>org.apache.aries.spifly.dynamic.bundle</artifactId>
+ </dependency>
</dependencies>
<build>
<plugins>
diff --git a/jrt/src/com/yahoo/jrt/Connection.java b/jrt/src/com/yahoo/jrt/Connection.java
index 5a4478cf91e..7393d30fc81 100644
--- a/jrt/src/com/yahoo/jrt/Connection.java
+++ b/jrt/src/com/yahoo/jrt/Connection.java
@@ -32,8 +32,8 @@ class Connection extends Target {
private int state = INITIAL;
private final Queue queue = new Queue();
private final Queue myQueue = new Queue();
- private final Buffer input = new Buffer(READ_SIZE * 2);
- private final Buffer output = new Buffer(WRITE_SIZE * 2);
+ private final Buffer input = new Buffer(0x1000); // Start off with small buffer.
+ private final Buffer output = new Buffer(0x1000); // Start off with small buffer.
private int maxInputSize = 64*1024;
private int maxOutputSize = 64*1024;
private final boolean tcpNoDelay;
diff --git a/jrt/src/com/yahoo/jrt/slobrok/api/BackOff.java b/jrt/src/com/yahoo/jrt/slobrok/api/BackOff.java
index d8a6718b407..f883ac32b72 100644
--- a/jrt/src/com/yahoo/jrt/slobrok/api/BackOff.java
+++ b/jrt/src/com/yahoo/jrt/slobrok/api/BackOff.java
@@ -1,15 +1,18 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.jrt.slobrok.api;
-
class BackOff implements BackOffPolicy
{
- private double time = 0.50;
+ private static final double initialTime = 0.50;
+
+ private double time = initialTime;
+ @Override
public void reset() {
- time = 0.50;
+ time = initialTime;
}
+ @Override
public double get() {
double ret = time;
if (time < 5.0) {
@@ -24,7 +27,13 @@ class BackOff implements BackOffPolicy
return ret;
}
+ @Override
public boolean shouldWarn(double t) {
return ((t > 8.1 && t < 9.9) || (t > 29.9));
}
+
+ @Override
+ public boolean shouldInform(double t) {
+ return (t == initialTime);
+ }
}
diff --git a/jrt/src/com/yahoo/jrt/slobrok/api/BackOffPolicy.java b/jrt/src/com/yahoo/jrt/slobrok/api/BackOffPolicy.java
index f98e3456880..4cc402b1706 100644
--- a/jrt/src/com/yahoo/jrt/slobrok/api/BackOffPolicy.java
+++ b/jrt/src/com/yahoo/jrt/slobrok/api/BackOffPolicy.java
@@ -35,4 +35,12 @@ public interface BackOffPolicy
* @param t current delay value
**/
public boolean shouldWarn(double t);
+
+ /**
+ * Check if a certain delay should result in an information message logged.
+ *
+ * @return true if we should log
+ * @param t current delay value
+ **/
+ public boolean shouldInform(double t);
}
diff --git a/jrt/src/com/yahoo/jrt/slobrok/api/Mirror.java b/jrt/src/com/yahoo/jrt/slobrok/api/Mirror.java
index 1a00cd25a2c..6e39e2a3dbd 100644
--- a/jrt/src/com/yahoo/jrt/slobrok/api/Mirror.java
+++ b/jrt/src/com/yahoo/jrt/slobrok/api/Mirror.java
@@ -39,6 +39,7 @@ public class Mirror implements IMirror {
private final BackOffPolicy backOff;
private volatile int updates = 0;
private boolean requestDone = false;
+ private boolean logOnSuccess = true;
private AtomicReference<Entry[]> specs = new AtomicReference<>(new Entry[0]);
private int specsGeneration = 0;
private final TransportThread transportThread;
@@ -176,18 +177,22 @@ public class Mirror implements IMirror {
}
if (target != null && ! slobroks.contains(currSlobrok)) {
+ log.log(Level.INFO, "location broker "+currSlobrok+" removed, will disconnect and use one of: "+slobroks);
target.close();
target = null;
}
if (target == null) {
+ logOnSuccess = true;
currSlobrok = slobroks.nextSlobrokSpec();
if (currSlobrok == null) {
double delay = backOff.get();
+ Level level = Level.FINE;
+ if (backOff.shouldInform(delay)) level = Level.INFO;
+ // note: since this happens quite often during normal operation,
+ // the level is lower than the more-natural Level.WARNING:
+ if (backOff.shouldWarn(delay)) level = Level.INFO;
+ log.log(level, "no location brokers available, retrying: "+slobroks+" (in " + delay + " seconds)");
updateTask.schedule(delay);
- if (backOff.shouldWarn(delay)) {
- log.log(Level.INFO, "no location brokers available "
- + "(retry in " + delay + " seconds) for: " + slobroks);
- }
return;
}
target = orb.connect(new Spec(currSlobrok));
@@ -216,6 +221,11 @@ public class Mirror implements IMirror {
for (int idx = 0; idx < numNames; idx++) {
newSpecs[idx] = new Entry(n[idx], s[idx]);
}
+ if (logOnSuccess) {
+ log.log(Level.INFO, "successfully connected to location broker "+currSlobrok+
+ "(mirror initialized with "+numNames+" service names)");
+ logOnSuccess = false;
+ }
specs.set(newSpecs);
specsGeneration = answer.get(2).asInt32();
@@ -233,15 +243,16 @@ public class Mirror implements IMirror {
|| (req.returnValues().get(2).count() !=
req.returnValues().get(3).count()))
{
- log.log(Level.INFO, "Error when handling update from slobrok. Error: " + req.errorMessage() +
- " (error code " + req.errorCode() + ")" + ", target: " + target);
+ if (! logOnSuccess) {
+ log.log(Level.INFO, "Error with location broker "+currSlobrok+" update: " + req.errorMessage() +
+ " (error code " + req.errorCode() + ")");
+ }
target.close();
target = null;
updateTask.scheduleNow(); // try next slobrok
return;
}
-
Values answer = req.returnValues();
int diffFromGeneration = answer.get(0).asInt32();
@@ -279,7 +290,10 @@ public class Mirror implements IMirror {
newSpecs[idx++] = e;
}
}
-
+ if (logOnSuccess) {
+ log.log(Level.INFO, "successfully connected to location broker "+currSlobrok+" (mirror initialized with "+newSpecs.length+" service names)");
+ logOnSuccess = false;
+ }
specs.set(newSpecs);
specsGeneration = diffToGeneration;
diff --git a/jrt/src/com/yahoo/jrt/slobrok/api/Register.java b/jrt/src/com/yahoo/jrt/slobrok/api/Register.java
index 713cecc62d1..0da390f8bf6 100644
--- a/jrt/src/com/yahoo/jrt/slobrok/api/Register.java
+++ b/jrt/src/com/yahoo/jrt/slobrok/api/Register.java
@@ -181,6 +181,7 @@ public class Register {
reqDone = false;
boolean logOnSuccess = false;
+ boolean logOnFailure = true;
synchronized (this) {
if (req.methodName().equals(UNREGISTER_METHOD_NAME)) {
logOnSuccess = true;
@@ -191,16 +192,21 @@ public class Register {
lastRegisterSucceeded.remove(name);
} else {
final Boolean lastSucceeded = lastRegisterSucceeded.get(name);
- if (lastSucceeded == null || lastSucceeded != !req.isError()) {
+ if (lastSucceeded == null) {
+ logOnSuccess = true;
+ logOnFailure = false;
+ } else if (lastSucceeded != !req.isError()) {
logOnSuccess = true;
- lastRegisterSucceeded.put(name, !req.isError());
}
+ lastRegisterSucceeded.put(name, !req.isError());
}
}
if (req.isError()) {
if (req.errorCode() != ErrorCode.METHOD_FAILED) {
- log.log(Level.INFO, logMessagePrefix() + " failed, will disconnect: " + req.errorMessage() + " (code " + req.errorCode() + ")");
+ if (logOnFailure) {
+ log.log(Level.INFO, logMessagePrefix() + " failed, will disconnect: " + req.errorMessage() + " (code " + req.errorCode() + ")");
+ }
target.close();
target = null;
} else {
@@ -210,7 +216,6 @@ public class Register {
log.log(logOnSuccess ? Level.INFO : Level.FINE, logMessagePrefix() + " completed successfully");
backOff.reset();
}
-
req = null;
name = null;
}
@@ -219,7 +224,7 @@ public class Register {
return; // current request still in progress
}
if (target != null && ! slobroks.contains(currSlobrok)) {
- log.log(Level.INFO, "RPC server " + mySpec + ": Slobrok server " + currSlobrok + " removed, will disconnect");
+ log.log(Level.INFO, "[RPC @ " + mySpec + "] location broker " + currSlobrok + " removed, will disconnect and use one of: "+slobroks);
target.close();
target = null;
}
@@ -227,9 +232,10 @@ public class Register {
currSlobrok = slobroks.nextSlobrokSpec();
if (currSlobrok == null) {
double delay = backOff.get();
- Level level = backOff.shouldWarn(delay) ? Level.WARNING : Level.FINE;
- log.log(level, "RPC server " + mySpec + ": All Slobrok servers tried, will retry in " + delay
- + " seconds: " + slobroks);
+ Level level = Level.FINE;
+ if (backOff.shouldInform(delay)) level = Level.INFO;
+ if (backOff.shouldWarn(delay)) level = Level.WARNING;
+ log.log(level, "[RPC @ " + mySpec + "] no location brokers available, retrying: "+slobroks+" (in " + delay + " seconds)");
updateTask.schedule(delay);
return;
}
@@ -247,8 +253,8 @@ public class Register {
}
if (logFine) {
- log.log(Level.FINE, "RPC server " + mySpec + ": Connect to Slobrok server " + currSlobrok +
- " and reregister all Slobrok names: " + namesString);
+ log.log(Level.FINE, "[RPC @ " + mySpec + "] Connect to location broker " + currSlobrok +
+ " and reregister all service names: " + namesString);
}
}
@@ -261,7 +267,7 @@ public class Register {
req = new Request(REGISTER_METHOD_NAME);
} else {
pending.addAll(names);
- log.log(Level.FINE, "RPC server " + mySpec + ": Reregister all Slobrok names in 30 seconds: " + names);
+ log.log(Level.FINE, "[RPC @ " + mySpec + "] Reregister all service names in 30 seconds: " + names);
updateTask.schedule(30.0);
return;
}
@@ -274,9 +280,9 @@ public class Register {
}
private String logMessagePrefix() {
- return "RPC server " + mySpec
- + (req.methodName().equals(UNREGISTER_METHOD_NAME) ? " unregistering " : " registering ")
- + name + " with Slobrok server " + currSlobrok;
+ return "[RPC @ " + mySpec + "] "
+ + (req.methodName().equals(UNREGISTER_METHOD_NAME) ? "unregistering " : "registering ")
+ + name + " with location broker " + currSlobrok;
}
private synchronized void handleRpcList(Request req) {
diff --git a/jrt/src/com/yahoo/jrt/slobrok/api/SlobrokList.java b/jrt/src/com/yahoo/jrt/slobrok/api/SlobrokList.java
index b524c25d8ff..10d8923d9f5 100644
--- a/jrt/src/com/yahoo/jrt/slobrok/api/SlobrokList.java
+++ b/jrt/src/com/yahoo/jrt/slobrok/api/SlobrokList.java
@@ -48,7 +48,7 @@ public class SlobrokList {
public boolean contains(String slobrok) {
checkUpdate();
for (String s : slobroks) {
- if (s == slobrok) return true;
+ if (s.equals(slobrok)) return true;
}
return false;
}
diff --git a/linguistics/src/main/java/com/yahoo/language/LinguisticsCase.java b/linguistics/src/main/java/com/yahoo/language/LinguisticsCase.java
index 7a3f5fa4055..174d16fbd67 100644
--- a/linguistics/src/main/java/com/yahoo/language/LinguisticsCase.java
+++ b/linguistics/src/main/java/com/yahoo/language/LinguisticsCase.java
@@ -14,13 +14,14 @@ import java.util.Locale;
public class LinguisticsCase {
/**
- * <p>The lower casing method to use in Vespa when doing language independent processing of natural language data.
- * It is placed in a single place to ensure symmetry between e.g. query processing and indexing.</p>
- * <p>Return a lowercased version of the given string. Since this is language independent, this is more of a case
- * normalization operation than lowercasing.</p>
+ * The lower casing method to use in Vespa when doing language independent processing of natural language data.
+ * It is placed in a single place to ensure symmetry between e.g. query processing and indexing.
*
- * @param in The string to lowercase.
- * @return A string containing only lowercase character.
+ * Return a lowercased version of the given string. Since this is language independent, this is more of a case
+ * normalization operation than lowercasing.
+ *
+ * @param in the string to lowercase
+ * @return a string containing only lowercase characters
*/
public static String toLowerCase(String in) {
// def is picked from http://docs.oracle.com/javase/6/docs/api/java/lang/String.html#toLowerCase%28%29
diff --git a/linguistics/src/main/java/com/yahoo/language/LocaleFactory.java b/linguistics/src/main/java/com/yahoo/language/LocaleFactory.java
index 2760f9e673e..05b57937625 100644
--- a/linguistics/src/main/java/com/yahoo/language/LocaleFactory.java
+++ b/linguistics/src/main/java/com/yahoo/language/LocaleFactory.java
@@ -2,6 +2,7 @@
package com.yahoo.language;
import java.util.Locale;
+import java.util.Objects;
/**
* @author Simon Thoresen Hult
@@ -10,25 +11,20 @@ public final class LocaleFactory {
private static final Locale UNKNOWN = new Locale("", "", "");
- private LocaleFactory() {
- // hide
- }
+ private LocaleFactory() {}
/**
* Implements a simple parser for RFC5646 language tags. The language tag is parsed into a Locale.
*
- * @param tag The language tag to parse.
- * @return The corresponding Locale.
+ * @param tag the language tag to parse
+ * @return the corresponding Locale
*/
- @SuppressWarnings("ConstantConditions")
public static Locale fromLanguageTag(String tag) {
- // TODO: Should be replaced by return Locale.forLanguageTag(tag); ?
+ Objects.requireNonNull(tag, "tag cannot be null");
- tag.getClass(); // throws NullPointerException
tag = tag.trim();
- if (tag.isEmpty()) {
- return UNKNOWN;
- }
+ if (tag.isEmpty()) return UNKNOWN;
+
String language = "";
String region = "";
String script = "";
@@ -48,9 +44,7 @@ public final class LocaleFactory {
}
}
}
- if (language.isEmpty()) {
- return UNKNOWN;
- }
+ if (language.isEmpty()) return UNKNOWN;
return new Locale(language, region, script);
}
diff --git a/linguistics/src/main/java/com/yahoo/language/opennlp/OpenNlpLinguistics.java b/linguistics/src/main/java/com/yahoo/language/opennlp/OpenNlpLinguistics.java
index 0837b25c151..a5f77fca0af 100644
--- a/linguistics/src/main/java/com/yahoo/language/opennlp/OpenNlpLinguistics.java
+++ b/linguistics/src/main/java/com/yahoo/language/opennlp/OpenNlpLinguistics.java
@@ -12,6 +12,8 @@ import java.util.logging.Level;
/**
* Returns a linguistics implementation based on OpenNlp,
* and (optionally, default on) Optimaize for language detection.
+ *
+ * @author bratseth
*/
public class OpenNlpLinguistics extends SimpleLinguistics {
diff --git a/linguistics/src/main/java/com/yahoo/language/opennlp/OpenNlpTokenizer.java b/linguistics/src/main/java/com/yahoo/language/opennlp/OpenNlpTokenizer.java
index 93599fa7dbe..e1185cb2457 100644
--- a/linguistics/src/main/java/com/yahoo/language/opennlp/OpenNlpTokenizer.java
+++ b/linguistics/src/main/java/com/yahoo/language/opennlp/OpenNlpTokenizer.java
@@ -3,21 +3,32 @@ package com.yahoo.language.opennlp;
import com.yahoo.language.Language;
import com.yahoo.language.LinguisticsCase;
-import com.yahoo.language.process.*;
-import com.yahoo.language.simple.*;
+import com.yahoo.language.process.Normalizer;
+import com.yahoo.language.process.StemMode;
+import com.yahoo.language.process.Token;
+import com.yahoo.language.process.TokenType;
+import com.yahoo.language.process.Tokenizer;
+import com.yahoo.language.process.Transformer;
+import com.yahoo.language.simple.SimpleNormalizer;
+import com.yahoo.language.simple.SimpleToken;
+import com.yahoo.language.simple.SimpleTokenType;
+import com.yahoo.language.simple.SimpleTokenizer;
+import com.yahoo.language.simple.SimpleTransformer;
import opennlp.tools.stemmer.Stemmer;
import opennlp.tools.stemmer.snowball.SnowballStemmer;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
-import java.util.logging.Logger;
-import java.util.logging.Level;
+/**
+ * Tokenizer using OpenNlp
+ *
+ * @author matskin
+ */
public class OpenNlpTokenizer implements Tokenizer {
private final static int SPACE_CODE = 32;
- private static final Logger log = Logger.getLogger(OpenNlpTokenizer.class.getName());
private final Normalizer normalizer;
private final Transformer transformer;
private final SimpleTokenizer simpleTokenizer;
@@ -35,10 +46,8 @@ public class OpenNlpTokenizer implements Tokenizer {
@Override
public Iterable<Token> tokenize(String input, Language language, StemMode stemMode, boolean removeAccents) {
if (input.isEmpty()) return Collections.emptyList();
- Stemmer stemmer = getStemmerForLanguage(language, stemMode);
- if (stemmer == null) {
- return simpleTokenizer.tokenize(input, language, stemMode, removeAccents);
- }
+ Stemmer stemmer = stemmerFor(language, stemMode);
+ if (stemmer == null) return simpleTokenizer.tokenize(input, language, stemMode, removeAccents);
List<Token> tokens = new ArrayList<>();
int nextCode = input.codePointAt(0);
@@ -49,9 +58,7 @@ public class OpenNlpTokenizer implements Tokenizer {
if (!prevType.isIndexable() || !nextType.isIndexable()) {
String original = input.substring(prev, next);
String token = processToken(original, language, stemMode, removeAccents, stemmer);
- tokens.add(new SimpleToken(original).setOffset(prev)
- .setType(prevType)
- .setTokenString(token));
+ tokens.add(new SimpleToken(original).setOffset(prev).setType(prevType).setTokenString(token));
prev = next;
prevType = nextType;
}
@@ -60,89 +67,45 @@ public class OpenNlpTokenizer implements Tokenizer {
return tokens;
}
- private Stemmer getStemmerForLanguage(Language language, StemMode stemMode) {
- log.log(Level.FINEST, () -> "getStemmerForLanguage '"+language+"' mode: "+stemMode);
- if (language == null || Language.ENGLISH.equals(language) || StemMode.NONE.equals(stemMode)) {
- return null;
- }
- SnowballStemmer.ALGORITHM alg;
- switch (language) {
- case DANISH:
- alg = SnowballStemmer.ALGORITHM.DANISH;
- break;
- case DUTCH:
- alg = SnowballStemmer.ALGORITHM.DUTCH;
- break;
- case FINNISH:
- alg = SnowballStemmer.ALGORITHM.FINNISH;
- break;
- case FRENCH:
- alg = SnowballStemmer.ALGORITHM.FRENCH;
- break;
- case GERMAN:
- alg = SnowballStemmer.ALGORITHM.GERMAN;
- break;
- case HUNGARIAN:
- alg = SnowballStemmer.ALGORITHM.HUNGARIAN;
- break;
- case IRISH:
- alg = SnowballStemmer.ALGORITHM.IRISH;
- break;
- case ITALIAN:
- alg = SnowballStemmer.ALGORITHM.ITALIAN;
- break;
- case NORWEGIAN_BOKMAL:
- case NORWEGIAN_NYNORSK:
- alg = SnowballStemmer.ALGORITHM.NORWEGIAN;
- break;
- case PORTUGUESE:
- alg = SnowballStemmer.ALGORITHM.PORTUGUESE;
- break;
- case ROMANIAN:
- alg = SnowballStemmer.ALGORITHM.ROMANIAN;
- break;
- case RUSSIAN:
- alg = SnowballStemmer.ALGORITHM.RUSSIAN;
- break;
- case SPANISH:
- alg = SnowballStemmer.ALGORITHM.SPANISH;
- break;
- case SWEDISH:
- alg = SnowballStemmer.ALGORITHM.SWEDISH;
- break;
- case TURKISH:
- alg = SnowballStemmer.ALGORITHM.TURKISH;
- break;
- case ENGLISH:
- alg = SnowballStemmer.ALGORITHM.ENGLISH;
- break;
- default:
- return null;
-
- }
- return new SnowballStemmer(alg);
- }
-
private String processToken(String token, Language language, StemMode stemMode, boolean removeAccents,
Stemmer stemmer) {
- final String original = token;
- log.log(Level.FINEST, () -> "processToken '"+original+"'");
token = normalizer.normalize(token);
token = LinguisticsCase.toLowerCase(token);
if (removeAccents)
token = transformer.accentDrop(token, language);
- if (stemMode != StemMode.NONE) {
- final String oldToken = token;
- token = doStemming(token, stemmer);
- final String newToken = token;
- log.log(Level.FINEST, () -> "stem '"+oldToken+"' to '"+newToken+"'");
- }
- final String result = token;
- log.log(Level.FINEST, () -> "processed token is: "+result);
- return result;
+ if (stemMode != StemMode.NONE)
+ token = stemmer.stem(token).toString();
+ return token;
+ }
+
+ private Stemmer stemmerFor(Language language, StemMode stemMode) {
+ if (language == null || language == Language.ENGLISH || stemMode == StemMode.NONE) return null;
+ SnowballStemmer.ALGORITHM algorithm = algorithmFor(language);
+ if (algorithm == null) return null;
+ return new SnowballStemmer(algorithm);
}
- private String doStemming(String token, Stemmer stemmer) {
- return stemmer.stem(token).toString();
+ private SnowballStemmer.ALGORITHM algorithmFor(Language language) {
+ switch (language) {
+ case DANISH: return SnowballStemmer.ALGORITHM.DANISH;
+ case DUTCH: return SnowballStemmer.ALGORITHM.DUTCH;
+ case FINNISH: return SnowballStemmer.ALGORITHM.FINNISH;
+ case FRENCH: return SnowballStemmer.ALGORITHM.FRENCH;
+ case GERMAN: return SnowballStemmer.ALGORITHM.GERMAN;
+ case HUNGARIAN: return SnowballStemmer.ALGORITHM.HUNGARIAN;
+ case IRISH: return SnowballStemmer.ALGORITHM.IRISH;
+ case ITALIAN: return SnowballStemmer.ALGORITHM.ITALIAN;
+ case NORWEGIAN_BOKMAL: return SnowballStemmer.ALGORITHM.NORWEGIAN;
+ case NORWEGIAN_NYNORSK: return SnowballStemmer.ALGORITHM.NORWEGIAN;
+ case PORTUGUESE: return SnowballStemmer.ALGORITHM.PORTUGUESE;
+ case ROMANIAN: return SnowballStemmer.ALGORITHM.ROMANIAN;
+ case RUSSIAN: return SnowballStemmer.ALGORITHM.RUSSIAN;
+ case SPANISH: return SnowballStemmer.ALGORITHM.SPANISH;
+ case SWEDISH: return SnowballStemmer.ALGORITHM.SWEDISH;
+ case TURKISH: return SnowballStemmer.ALGORITHM.TURKISH;
+ case ENGLISH: return SnowballStemmer.ALGORITHM.ENGLISH;
+ default: return null;
+ }
}
+
}
diff --git a/linguistics/src/main/java/com/yahoo/language/simple/SimpleLinguistics.java b/linguistics/src/main/java/com/yahoo/language/simple/SimpleLinguistics.java
index 389926f1c1b..e1a04b2985d 100644
--- a/linguistics/src/main/java/com/yahoo/language/simple/SimpleLinguistics.java
+++ b/linguistics/src/main/java/com/yahoo/language/simple/SimpleLinguistics.java
@@ -33,7 +33,6 @@ public class SimpleLinguistics implements Linguistics {
private final GramSplitter gramSplitter;
@Inject
- @SuppressWarnings("deprecation")
public SimpleLinguistics() {
this.normalizer = new SimpleNormalizer();
this.transformer = new SimpleTransformer();
diff --git a/linguistics/src/test/java/com/yahoo/language/detect/AbstractDetectorTestCase.java b/linguistics/src/test/java/com/yahoo/language/detect/AbstractDetectorTestCase.java
index c0f1b92a6bf..f2891a0c5d5 100644
--- a/linguistics/src/test/java/com/yahoo/language/detect/AbstractDetectorTestCase.java
+++ b/linguistics/src/test/java/com/yahoo/language/detect/AbstractDetectorTestCase.java
@@ -6,6 +6,7 @@ import org.junit.Test;
import java.nio.ByteBuffer;
import java.nio.charset.Charset;
+import java.nio.charset.StandardCharsets;
import static org.junit.Assert.*;
@@ -15,7 +16,7 @@ import static org.junit.Assert.*;
public class AbstractDetectorTestCase {
private static final Detection DETECTION = new Detection(Language.ARABIC, "encoding", true);
- private static final Charset UTF8 = Charset.forName("UTF-8");
+ private static final Charset UTF8 = StandardCharsets.UTF_8;
@Test
public void requireThatDetectStringForwardsUtf8Bytes() {
diff --git a/linguistics/src/test/java/com/yahoo/language/opennlp/OpenNlpTokenizationTestCase.java b/linguistics/src/test/java/com/yahoo/language/opennlp/OpenNlpTokenizationTestCase.java
index fb313e2d281..2239a62f840 100644
--- a/linguistics/src/test/java/com/yahoo/language/opennlp/OpenNlpTokenizationTestCase.java
+++ b/linguistics/src/test/java/com/yahoo/language/opennlp/OpenNlpTokenizationTestCase.java
@@ -9,7 +9,6 @@ import org.junit.Test;
import java.util.ArrayList;
-import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import java.util.NoSuchElementException;
@@ -24,7 +23,7 @@ import static org.junit.Assert.fail;
/**
* Test of tokenization, with stemming and accent removal
*
- * @author <a href="mailto:mathiasm@yahoo-inc.com">Mathias Mølster Lidal</a>
+ * @author matskin
*/
public class OpenNlpTokenizationTestCase {
@@ -33,61 +32,54 @@ public class OpenNlpTokenizationTestCase {
@Test
public void testTokenizer() {
assertTokenize("This is a test, 123",
- Arrays.asList("this", "is", "a", "test", "123"),
- Arrays.asList("This", " ", "is", " ", "a", " ", "test", ",", " ", "123"));
+ List.of("this", "is", "a", "test", "123"),
+ List.of("This", " ", "is", " ", "a", " ", "test", ",", " ", "123"));
}
@Test
public void testUnderScoreTokenization() {
- assertTokenize("ugcapi_1", Language.ENGLISH, StemMode.SHORTEST, true, Arrays.asList("ugcapi", "1"), null);
+ assertTokenize("ugcapi_1", Language.ENGLISH, StemMode.SHORTEST, true, List.of("ugcapi", "1"), null);
}
@Test
public void testPhrasesWithPunctuation() {
assertTokenize("PHY_101.html a space/time or space-time course", Language.ENGLISH, StemMode.NONE,
false,
- Arrays.asList("phy", "101", "html", "a", "space", "time", "or", "space", "time", "course"),
+ List.of("phy", "101", "html", "a", "space", "time", "or", "space", "time", "course"),
null);
- assertTokenize("PHY_101.", Language.ENGLISH, StemMode.NONE, false, Arrays.asList("phy", "101"), null);
- assertTokenize("101.3", Language.ENGLISH, StemMode.NONE, false, Arrays.asList("101", "3"), null);
+ assertTokenize("PHY_101.", Language.ENGLISH, StemMode.NONE, false, List.of("phy", "101"), null);
+ assertTokenize("101.3", Language.ENGLISH, StemMode.NONE, false, List.of("101", "3"), null);
}
@Test
public void testDoubleWidthTokenization() {
// "sony"
assertTokenize("\uFF53\uFF4F\uFF4E\uFF59", Language.ENGLISH, StemMode.NONE, false,
- Arrays.asList("sony"), null);
+ List.of("sony"), null);
assertTokenize("\uFF53\uFF4F\uFF4E\uFF59", Language.ENGLISH, StemMode.SHORTEST, false,
- Arrays.asList("sony"), null);
+ List.of("sony"), null);
// "SONY"
assertTokenize("\uFF33\uFF2F\uFF2E\uFF39", Language.ENGLISH, StemMode.NONE, false,
- Arrays.asList("sony"), null);
+ List.of("sony"), null);
assertTokenize("\uFF33\uFF2F\uFF2E\uFF39", Language.ENGLISH, StemMode.SHORTEST, false,
- Arrays.asList("sony"), null);
+ List.of("sony"), null);
// "on"
assertTokenize("\uFF4F\uFF4E", Language.ENGLISH, StemMode.NONE, false,
- Arrays.asList("on"), null);
+ List.of("on"), null);
assertTokenize("\uFF4F\uFF4E", Language.ENGLISH, StemMode.SHORTEST, false,
- Arrays.asList("on"), null);
+ List.of("on"), null);
// "ON"
assertTokenize("\uFF2F\uFF2E", Language.ENGLISH, StemMode.NONE, false,
- Arrays.asList("on"), null);
+ List.of("on"), null);
assertTokenize("\uFF2F\uFF2E", Language.ENGLISH, StemMode.SHORTEST, false,
- Arrays.asList("on"), null);
+ List.of("on"), null);
assertTokenize("наименование", Language.RUSSIAN, StemMode.SHORTEST, false,
- Arrays.asList("наименован"), null);
+ List.of("наименован"), null);
}
@Test
public void testLargeTextTokenization() {
- StringBuilder sb = new StringBuilder();
- String s = "teststring ";
- for (int i = 0; i < 100000; i++) {
- sb.append(s);
- }
-
- String input = sb.toString();
-
+ String input = "teststring ".repeat(100000);
int numTokens = 0;
List<Long> pos = new ArrayList<>();
for (Token t : tokenizer.tokenize(input, Language.ENGLISH, StemMode.NONE, false)) {
@@ -103,11 +95,8 @@ public class OpenNlpTokenizationTestCase {
@Test
public void testLargeTokenGuard() {
- StringBuilder str = new StringBuilder();
- for (int i = 0; i < 128 * 256; i++) {
- str.append("ab");
- }
- Iterator<Token> it = tokenizer.tokenize(str.toString(), Language.ENGLISH, StemMode.NONE, false).iterator();
+ String input = "ab".repeat(128 * 256);
+ Iterator<Token> it = tokenizer.tokenize(input, Language.ENGLISH, StemMode.NONE, false).iterator();
assertTrue(it.hasNext());
assertNotNull(it.next().getTokenString());
assertFalse(it.hasNext());
@@ -203,15 +192,15 @@ public class OpenNlpTokenizationTestCase {
}
/**
- * <p>Compare the results of running an input string through the tokenizer with an "index" truth, and an optional
- * "orig" truth.</p>
+ * Compare the results of running an input string through the tokenizer with an "index" truth, and an optional
+ * "orig" truth.
*
- * @param input The text to process, passed to tokenizer.
- * @param language The language tag, passed to tokenizer.
- * @param stemMode If stemMode != NONE, test will silently succeed if tokenizer does not do stemming.
- * @param accentDrop Passed to the tokenizer.
- * @param indexed Compared to the "TokenString" result from the tokenizer.
- * @param orig Compared to the "Orig" result from the tokenizer.
+ * @param input the text to process, passed to tokenizer
+ * @param language the language tag, passed to tokenizer
+ * @param stemMode if stemMode != NONE, test will silently succeed if tokenizer does not do stemming
+ * @param accentDrop passed to the tokenizer
+ * @param indexed compared to the "TokenString" result from the tokenizer
+ * @param orig compared to the "Orig" result from the tokenizer
*/
private void assertTokenize(String input, Language language, StemMode stemMode, boolean accentDrop,
List<String> indexed, List<String> orig) {
diff --git a/linguistics/src/test/java/com/yahoo/language/process/NormalizationTestCase.java b/linguistics/src/test/java/com/yahoo/language/process/NormalizationTestCase.java
index daa3e2a4541..524f1b5b6fe 100644
--- a/linguistics/src/test/java/com/yahoo/language/process/NormalizationTestCase.java
+++ b/linguistics/src/test/java/com/yahoo/language/process/NormalizationTestCase.java
@@ -7,7 +7,7 @@ import org.junit.Test;
import static org.junit.Assert.assertEquals;
/**
- * @author <a href="mailto:mathiasm@yahoo-inc.com">Mathias Mølster Lidal</a>
+ * @author Mathias Mølster Lidal
*/
public class NormalizationTestCase {
diff --git a/linguistics/src/test/java/com/yahoo/language/process/StemListTestCase.java b/linguistics/src/test/java/com/yahoo/language/process/StemListTestCase.java
index 43b4b711b2b..2d3ac291716 100644
--- a/linguistics/src/test/java/com/yahoo/language/process/StemListTestCase.java
+++ b/linguistics/src/test/java/com/yahoo/language/process/StemListTestCase.java
@@ -10,7 +10,7 @@ import org.junit.Test;
/**
* Functional testing of StemList.
*
- * @author steinar
+ * @author Steinar Knutsen
*/
public class StemListTestCase {
diff --git a/linguistics/src/test/java/com/yahoo/language/process/TokenTypeTestCase.java b/linguistics/src/test/java/com/yahoo/language/process/TokenTypeTestCase.java
index 11263ccafe8..a2f51ee7367 100644
--- a/linguistics/src/test/java/com/yahoo/language/process/TokenTypeTestCase.java
+++ b/linguistics/src/test/java/com/yahoo/language/process/TokenTypeTestCase.java
@@ -11,7 +11,6 @@ import static org.junit.Assert.*;
public class TokenTypeTestCase {
@Test
- @SuppressWarnings("deprecation")
public void requireThatValueOfWorks() {
for (TokenType type : TokenType.values()) {
assertEquals(type, TokenType.valueOf(type.getValue()));
@@ -19,7 +18,6 @@ public class TokenTypeTestCase {
}
@Test
- @SuppressWarnings("deprecation")
public void requireThatValueOfUnknownIsUnknown() {
assertEquals(TokenType.UNKNOWN, TokenType.valueOf(-1));
}
diff --git a/linguistics/src/test/java/com/yahoo/language/process/TokenizationTestCase.java b/linguistics/src/test/java/com/yahoo/language/process/TokenizationTestCase.java
index 041a27fb1fc..f99dc5633f5 100644
--- a/linguistics/src/test/java/com/yahoo/language/process/TokenizationTestCase.java
+++ b/linguistics/src/test/java/com/yahoo/language/process/TokenizationTestCase.java
@@ -22,7 +22,7 @@ import static org.junit.Assert.fail;
/**
* Test of tokenization, with stemming and accent removal
*
- * @author <a href="mailto:mathiasm@yahoo-inc.com">Mathias Mølster Lidal</a>
+ * @author Mathias Mølster Lidal
*/
public class TokenizationTestCase {
@@ -54,26 +54,24 @@ public class TokenizationTestCase {
public void testDoubleWidthTokenization() {
// "sony"
assertTokenize("\uFF53\uFF4F\uFF4E\uFF59", Language.ENGLISH, StemMode.NONE, false,
- Arrays.asList("sony"), null);
+ List.of("sony"), null);
assertTokenize("\uFF53\uFF4F\uFF4E\uFF59", Language.ENGLISH, StemMode.SHORTEST, false,
- Arrays.asList("sony"), null);
+ List.of("sony"), null);
// "SONY"
assertTokenize("\uFF33\uFF2F\uFF2E\uFF39", Language.ENGLISH, StemMode.NONE, false,
- Arrays.asList("sony"), null);
+ List.of("sony"), null);
assertTokenize("\uFF33\uFF2F\uFF2E\uFF39", Language.ENGLISH, StemMode.SHORTEST, false,
- Arrays.asList("sony"), null);
+ List.of("sony"), null);
// "on"
assertTokenize("\uFF4F\uFF4E", Language.ENGLISH, StemMode.NONE, false,
- Arrays.asList("on"), null);
+ List.of("on"), null);
assertTokenize("\uFF4F\uFF4E", Language.ENGLISH, StemMode.SHORTEST, false,
- Arrays.asList("on"), null);
+ List.of("on"), null);
// "ON"
assertTokenize("\uFF2F\uFF2E", Language.ENGLISH, StemMode.NONE, false,
- Arrays.asList("on"), null);
+ List.of("on"), null);
assertTokenize("\uFF2F\uFF2E", Language.ENGLISH, StemMode.SHORTEST, false,
- Arrays.asList("on"), null);
-
-
+ List.of("on"), null);
}
@Test
diff --git a/linguistics/src/test/java/com/yahoo/language/simple/SimpleTokenTypeTestCase.java b/linguistics/src/test/java/com/yahoo/language/simple/SimpleTokenTypeTestCase.java
index fc69fc998a7..fe25e5fe17f 100644
--- a/linguistics/src/test/java/com/yahoo/language/simple/SimpleTokenTypeTestCase.java
+++ b/linguistics/src/test/java/com/yahoo/language/simple/SimpleTokenTypeTestCase.java
@@ -9,7 +9,7 @@ import static org.junit.Assert.assertEquals;
/**
* Check simple token types.
*
- * @author <a href="mailto:steinar@yahoo-inc.com">Steinar Knutsen</a>
+ * @author Steinar Knutsen
*/
public class SimpleTokenTypeTestCase {
diff --git a/linguistics/src/test/java/com/yahoo/language/simple/SimpleTokenizerTestCase.java b/linguistics/src/test/java/com/yahoo/language/simple/SimpleTokenizerTestCase.java
index 2cebfe26dc7..4c2a8f9f591 100644
--- a/linguistics/src/test/java/com/yahoo/language/simple/SimpleTokenizerTestCase.java
+++ b/linguistics/src/test/java/com/yahoo/language/simple/SimpleTokenizerTestCase.java
@@ -6,7 +6,7 @@ import com.yahoo.language.process.StemMode;
import org.junit.Test;
/**
- * @author <a href="mailto:steinar@yahoo-inc.com">Steinar Knutsen</a>
+ * @author Steinar Knutsen
* @author bratseth
*/
public class SimpleTokenizerTestCase extends AbstractTokenizerTestCase {
diff --git a/metrics-proxy/pom.xml b/metrics-proxy/pom.xml
index e776aca271e..a7579aeb2ea 100644
--- a/metrics-proxy/pom.xml
+++ b/metrics-proxy/pom.xml
@@ -65,12 +65,6 @@
<scope>provided</scope>
</dependency>
<dependency>
- <groupId>com.yahoo.vespa</groupId>
- <artifactId>container-di</artifactId>
- <version>${project.version}</version>
- <scope>provided</scope>
- </dependency>
- <dependency>
<!-- Not directly used in this module, but needed to get Import-Packages for JDK packages it exports. -->
<groupId>com.yahoo.vespa</groupId>
<artifactId>jdisc_core</artifactId>
diff --git a/metrics/pom.xml b/metrics/pom.xml
index 4121467985c..a021454a049 100644
--- a/metrics/pom.xml
+++ b/metrics/pom.xml
@@ -8,45 +8,14 @@
<version>7-SNAPSHOT</version>
<relativePath>../parent/pom.xml</relativePath>
</parent>
- <groupId>com.yahoo.vespa</groupId>
<artifactId>metrics</artifactId>
<packaging>jar</packaging>
<version>7-SNAPSHOT</version>
<name>metrics</name>
- <description></description>
<dependencies>
<dependency>
- <groupId>junit</groupId>
- <artifactId>junit</artifactId>
- <scope>test</scope>
- </dependency>
- <dependency>
- <groupId>com.yahoo.vespa</groupId>
- <artifactId>vespajlib</artifactId>
- <version>${project.version}</version>
- <scope>provided</scope>
- </dependency>
- <dependency>
- <groupId>com.yahoo.vespa</groupId>
- <artifactId>config-bundle</artifactId>
- <version>${project.version}</version>
- <scope>provided</scope>
- </dependency>
- <dependency>
- <groupId>com.yahoo.vespa</groupId>
- <artifactId>jdisc_core</artifactId>
- <version>${project.version}</version>
- <scope>provided</scope>
- </dependency>
- <dependency>
- <groupId>com.yahoo.vespa</groupId>
- <artifactId>container-di</artifactId>
- <version>${project.version}</version>
- <scope>provided</scope>
- </dependency>
- <dependency>
<groupId>com.yahoo.vespa</groupId>
- <artifactId>container-core</artifactId>
+ <artifactId>config-lib</artifactId>
<version>${project.version}</version>
<scope>provided</scope>
</dependency>
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/orchestrator/OrchestratorImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/orchestrator/OrchestratorImpl.java
index 20c0604b5dc..a3cc7042c47 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/orchestrator/OrchestratorImpl.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/orchestrator/OrchestratorImpl.java
@@ -5,8 +5,6 @@ import com.yahoo.vespa.hosted.node.admin.configserver.ConfigServerApi;
import com.yahoo.vespa.hosted.node.admin.configserver.ConnectionException;
import com.yahoo.vespa.hosted.node.admin.configserver.HttpException;
import com.yahoo.vespa.hosted.node.admin.nodeadmin.ConvergenceException;
-import com.yahoo.vespa.orchestrator.restapi.HostApi;
-import com.yahoo.vespa.orchestrator.restapi.HostSuspensionApi;
import com.yahoo.vespa.orchestrator.restapi.wire.BatchOperationResult;
import com.yahoo.vespa.orchestrator.restapi.wire.UpdateHostResponse;
@@ -32,9 +30,9 @@ public class OrchestratorImpl implements Orchestrator {
// TODO: Find a way to avoid duplicating this (present in orchestrator's services.xml also).
private static final String ORCHESTRATOR_PATH_PREFIX = "/orchestrator";
static final String ORCHESTRATOR_PATH_PREFIX_HOST_API
- = ORCHESTRATOR_PATH_PREFIX + HostApi.PATH_PREFIX;
+ = ORCHESTRATOR_PATH_PREFIX + "/v1/hosts";
static final String ORCHESTRATOR_PATH_PREFIX_HOST_SUSPENSION_API
- = ORCHESTRATOR_PATH_PREFIX + HostSuspensionApi.PATH_PREFIX;
+ = ORCHESTRATOR_PATH_PREFIX + "/v1/suspensions/hosts";
private final ConfigServerApi configServerApi;
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/ContainerOperations.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/ContainerOperations.java
index ed1c035f543..89e13294a71 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/ContainerOperations.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/ContainerOperations.java
@@ -45,20 +45,21 @@ public interface ContainerOperations {
CommandResult executeCommandInNetworkNamespace(NodeAgentContext context, String... command);
- /** Resume node. Resuming a node means that it is ready to take on traffic. */
- void resumeNode(NodeAgentContext context);
+ /** Resume node. Resuming a node means that it is ready to take on traffic.
+ * @return*/
+ String resumeNode(NodeAgentContext context);
/**
- * Suspend node. Suspending a node means the node should be taken temporarly offline,
+ * Suspend node and return output. Suspending a node means the node should be taken temporarly offline,
* such that maintenance of the node can be done (upgrading, rebooting, etc).
*/
- void suspendNode(NodeAgentContext context);
+ String suspendNode(NodeAgentContext context);
- void restartVespa(NodeAgentContext context);
+ String restartVespa(NodeAgentContext context);
- void startServices(NodeAgentContext context);
+ String startServices(NodeAgentContext context);
- void stopServices(NodeAgentContext context);
+ String stopServices(NodeAgentContext context);
Optional<ContainerStats> getContainerStats(NodeAgentContext context);
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/ContainerOperationsImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/ContainerOperationsImpl.java
index 480e78c2399..4f57a03786b 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/ContainerOperationsImpl.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/ContainerOperationsImpl.java
@@ -243,28 +243,28 @@ public class ContainerOperationsImpl implements ContainerOperations {
}
@Override
- public void resumeNode(NodeAgentContext context) {
- executeNodeCtlInContainer(context, "resume");
+ public String resumeNode(NodeAgentContext context) {
+ return executeNodeCtlInContainer(context, "resume").getOutput();
}
@Override
- public void suspendNode(NodeAgentContext context) {
- executeNodeCtlInContainer(context, "suspend");
+ public String suspendNode(NodeAgentContext context) {
+ return executeNodeCtlInContainer(context, "suspend").getOutput();
}
@Override
- public void restartVespa(NodeAgentContext context) {
- executeNodeCtlInContainer(context, "restart-vespa");
+ public String restartVespa(NodeAgentContext context) {
+ return executeNodeCtlInContainer(context, "restart-vespa").getOutput();
}
@Override
- public void startServices(NodeAgentContext context) {
- executeNodeCtlInContainer(context, "start");
+ public String startServices(NodeAgentContext context) {
+ return executeNodeCtlInContainer(context, "start").getOutput();
}
@Override
- public void stopServices(NodeAgentContext context) {
- executeNodeCtlInContainer(context, "stop");
+ public String stopServices(NodeAgentContext context) {
+ return executeNodeCtlInContainer(context, "stop").getOutput();
}
ProcessResult executeNodeCtlInContainer(NodeAgentContext context, String program) {
@@ -306,7 +306,6 @@ public class ContainerOperationsImpl implements ContainerOperations {
"var/db/vespa",
"var/jdisc_container",
"var/vespa",
- "var/yca",
"var/zookeeper");
if (context.nodeType() == NodeType.proxy) {
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainer.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainer.java
index 5be6c660dd5..93717543a1c 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainer.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainer.java
@@ -173,6 +173,7 @@ public class StorageMaintainer {
private Map<String, Object> getCoredumpNodeAttributes(NodeAgentContext context, Optional<Container> container) {
Map<String, String> attributes = new HashMap<>();
attributes.put("hostname", context.node().hostname());
+ attributes.put("system", context.zone().getSystemName().value());
attributes.put("region", context.zone().getRegionName().value());
attributes.put("environment", context.zone().getEnvironment().value());
attributes.put("flavor", context.node().flavor());
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoredumpHandler.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoredumpHandler.java
index 9a34aa82e9c..a912de18b94 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoredumpHandler.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoredumpHandler.java
@@ -13,6 +13,7 @@ import com.yahoo.vespa.hosted.node.admin.task.util.process.Terminal;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
+import java.time.Clock;
import java.time.Duration;
import java.util.Comparator;
import java.util.HashMap;
@@ -53,8 +54,9 @@ public class CoredumpHandler {
private final Path crashPatchInContainer;
private final Path doneCoredumpsPath;
private final String operatorGroupName;
- private final Supplier<String> coredumpIdSupplier;
private final Metrics metrics;
+ private final Clock clock;
+ private final Supplier<String> coredumpIdSupplier;
/**
* @param crashPathInContainer path inside the container where core dump are dumped
@@ -64,11 +66,12 @@ public class CoredumpHandler {
public CoredumpHandler(Terminal terminal, CoreCollector coreCollector, CoredumpReporter coredumpReporter,
Path crashPathInContainer, Path doneCoredumpsPath, String operatorGroupName, Metrics metrics) {
this(terminal, coreCollector, coredumpReporter, crashPathInContainer, doneCoredumpsPath,
- operatorGroupName, metrics, () -> UUID.randomUUID().toString());
+ operatorGroupName, metrics, Clock.systemUTC(), () -> UUID.randomUUID().toString());
}
CoredumpHandler(Terminal terminal, CoreCollector coreCollector, CoredumpReporter coredumpReporter,
- Path crashPathInContainer, Path doneCoredumpsPath, String operatorGroupName, Metrics metrics, Supplier<String> coredumpIdSupplier) {
+ Path crashPathInContainer, Path doneCoredumpsPath, String operatorGroupName, Metrics metrics,
+ Clock clock, Supplier<String> coredumpIdSupplier) {
this.terminal = terminal;
this.coreCollector = coreCollector;
this.coredumpReporter = coredumpReporter;
@@ -76,6 +79,7 @@ public class CoredumpHandler {
this.doneCoredumpsPath = doneCoredumpsPath;
this.operatorGroupName = operatorGroupName;
this.metrics = metrics;
+ this.clock = clock;
this.coredumpIdSupplier = coredumpIdSupplier;
}
@@ -110,7 +114,7 @@ public class CoredumpHandler {
*/
Optional<Path> enqueueCoredump(Path containerCrashPathOnHost, Path containerProcessingPathOnHost) {
List<Path> toProcess = FileFinder.files(containerCrashPathOnHost)
- .match(nameStartsWith(".").negate()) // Skip core dump files currently being written
+ .match(this::isReadyForProcessing)
.maxDepth(1)
.stream()
.sorted(Comparator.comparing(FileFinder.FileAttributes::lastModifiedTime))
@@ -159,7 +163,6 @@ public class CoredumpHandler {
Map<String, Object> metadata = new HashMap<>(coreCollector.collect(context, coredumpFilePathInContainer));
metadata.putAll(nodeAttributesSupplier.get());
metadata.put("coredump_path", doneCoredumpsPath.resolve(context.containerName().asString()).resolve(coredumpDirectory.getFileName()).toString());
- metadata.put("system", context.zone().getSystemName().value());
String metadataFields = objectMapper.writeValueAsString(Map.of("fields", metadata));
metadataPath.writeUtf8File(metadataFields);
@@ -251,4 +254,10 @@ public class CoredumpHandler {
return dimensionsBuilder.build();
}
+ private boolean isReadyForProcessing(FileFinder.FileAttributes fileAttributes) {
+ // Wait at least a minute until we start processing a core/heap dump to ensure that
+ // kernel/JVM has finished writing it
+ return clock.instant().minusSeconds(60).isAfter(fileAttributes.lastModifiedTime());
+ }
+
}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdater.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdater.java
index 9d44f125efc..772e17291ef 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdater.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdater.java
@@ -3,7 +3,9 @@ package com.yahoo.vespa.hosted.node.admin.nodeadmin;
import com.yahoo.concurrent.ThreadFactoryFactory;
import com.yahoo.config.provision.HostName;
-import java.util.logging.Level;
+import com.yahoo.vespa.flags.BooleanFlag;
+import com.yahoo.vespa.flags.FlagSource;
+import com.yahoo.vespa.flags.Flags;
import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.Acl;
import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeRepository;
import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeSpec;
@@ -27,6 +29,7 @@ import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
import java.util.function.Supplier;
+import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.stream.Collectors;
@@ -53,6 +56,7 @@ public class NodeAdminStateUpdater {
private final Orchestrator orchestrator;
private final NodeAdmin nodeAdmin;
private final String hostHostname;
+ private final BooleanFlag cacheAclFlag;
public enum State { TRANSITIONING, RESUMED, SUSPENDED_NODE_ADMIN, SUSPENDED }
@@ -64,13 +68,15 @@ public class NodeAdminStateUpdater {
Orchestrator orchestrator,
NodeAdmin nodeAdmin,
HostName hostHostname,
- Clock clock) {
+ Clock clock,
+ FlagSource flagSource) {
this.nodeAgentContextFactory = nodeAgentContextFactory;
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.nodeAdmin = nodeAdmin;
this.hostHostname = hostHostname.value();
this.cachedAclSupplier = new CachedSupplier<>(clock, Duration.ofSeconds(115), () -> nodeRepository.getAcls(this.hostHostname));
+ this.cacheAclFlag = Flags.CACHE_ACL.bindTo(flagSource);
}
public void start() {
@@ -168,9 +174,11 @@ public class NodeAdminStateUpdater {
try {
Map<String, NodeSpec> nodeSpecByHostname = nodeRepository.getNodes(hostHostname).stream()
.collect(Collectors.toMap(NodeSpec::hostname, Function.identity()));
- Map<String, Acl> aclByHostname = Optional.of(cachedAclSupplier.get())
- .filter(acls -> acls.keySet().containsAll(nodeSpecByHostname.keySet()))
- .orElseGet(cachedAclSupplier::invalidateAndGet);
+ Map<String, Acl> aclByHostname = cacheAclFlag.value() ?
+ Optional.of(cachedAclSupplier.get())
+ .filter(acls -> acls.keySet().containsAll(nodeSpecByHostname.keySet()))
+ .orElseGet(cachedAclSupplier::invalidateAndGet) :
+ cachedAclSupplier.invalidateAndGet();
Set<NodeAgentContext> nodeAgentContexts = nodeSpecByHostname.keySet().stream()
.map(hostname -> nodeAgentContextFactory.create(
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentContext.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentContext.java
index 646e5bd7773..09e7b468e01 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentContext.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentContext.java
@@ -44,6 +44,8 @@ public interface NodeAgentContext extends TaskContext {
String vespaUser();
+ String vespaGroup();
+
String vespaUserOnHost();
default boolean isDisabled(NodeAgentTask task) {
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentContextImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentContextImpl.java
index dc2b9fbb670..b63d3ee1d7c 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentContextImpl.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentContextImpl.java
@@ -1,4 +1,4 @@
-// Copyright 2020 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.node.admin.nodeagent;
import com.yahoo.config.provision.ApplicationId;
@@ -44,6 +44,7 @@ public class NodeAgentContextImpl implements NodeAgentContext {
private final Path pathToNodeRootOnHost;
private final Path pathToVespaHome;
private final String vespaUser;
+ private final String vespaGroup;
private final String vespaUserOnHost;
private final double cpuSpeedup;
private final Set<NodeAgentTask> disabledNodeAgentTasks;
@@ -53,7 +54,7 @@ public class NodeAgentContextImpl implements NodeAgentContext {
ContainerNetworkMode containerNetworkMode, ZoneApi zone,
FileSystem fileSystem, FlagSource flagSource,
Path pathToContainerStorage, Path pathToVespaHome,
- String vespaUser, String vespaUserOnHost, double cpuSpeedup,
+ String vespaUser, String vespaGroup, String vespaUserOnHost, double cpuSpeedup,
Optional<ApplicationId> hostExclusiveTo) {
if (cpuSpeedup <= 0)
throw new IllegalArgumentException("cpuSpeedUp must be positive, was: " + cpuSpeedup);
@@ -69,6 +70,7 @@ public class NodeAgentContextImpl implements NodeAgentContext {
this.pathToVespaHome = requireValidPath(pathToVespaHome);
this.logPrefix = containerName.asString() + ": ";
this.vespaUser = vespaUser;
+ this.vespaGroup = vespaGroup;
this.vespaUserOnHost = vespaUserOnHost;
this.cpuSpeedup = cpuSpeedup;
this.disabledNodeAgentTasks = NodeAgentTask.fromString(
@@ -112,6 +114,11 @@ public class NodeAgentContextImpl implements NodeAgentContext {
}
@Override
+ public String vespaGroup() {
+ return vespaGroup;
+ }
+
+ @Override
public String vespaUserOnHost() {
return vespaUserOnHost;
}
@@ -196,6 +203,7 @@ public class NodeAgentContextImpl implements NodeAgentContext {
", pathToNodeRootOnHost=" + pathToNodeRootOnHost +
", pathToVespaHome=" + pathToVespaHome +
", vespaUser='" + vespaUser + '\'' +
+ ", vespaGroup='" + vespaGroup + '\'' +
", vespaUserOnHost='" + vespaUserOnHost + '\'' +
", hostExclusiveTo='" + hostExclusiveTo + '\'' +
'}';
@@ -221,6 +229,7 @@ public class NodeAgentContextImpl implements NodeAgentContext {
private ContainerNetworkMode containerNetworkMode;
private ZoneApi zone;
private String vespaUser;
+ private String vespaGroup;
private String vespaUserOnHost;
private FileSystem fileSystem = FileSystems.getDefault();
private FlagSource flagSource;
@@ -271,6 +280,11 @@ public class NodeAgentContextImpl implements NodeAgentContext {
return this;
}
+ public Builder vespaGroup(String vespaGroup) {
+ this.vespaGroup = vespaGroup;
+ return this;
+ }
+
public Builder vespaUserOnHost(String vespaUserOnHost) {
this.vespaUserOnHost = vespaUserOnHost;
return this;
@@ -334,6 +348,7 @@ public class NodeAgentContextImpl implements NodeAgentContext {
Optional.ofNullable(containerStorage).orElseGet(() -> fileSystem.getPath("/home/docker/container-storage")),
fileSystem.getPath("/opt/vespa"),
Optional.ofNullable(vespaUser).orElse("vespa"),
+ Optional.ofNullable(vespaGroup).orElse("vespa"),
Optional.ofNullable(vespaUserOnHost).orElse("container_vespa"),
cpuSpeedUp, hostExclusiveTo);
}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java
index e335e66c1c1..74ba19a72c5 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java
@@ -166,16 +166,22 @@ public class NodeAgentImpl implements NodeAgent {
void startServicesIfNeeded(NodeAgentContext context) {
if (!hasStartedServices) {
- context.log(logger, "Starting services");
- containerOperations.startServices(context);
+ context.log(logger, "Invoking vespa-nodectl to start services");
+ String output = containerOperations.startServices(context);
+ if (!output.isBlank()) {
+ context.log(logger, "Start services output: " + output);
+ }
hasStartedServices = true;
}
}
void resumeNodeIfNeeded(NodeAgentContext context) {
if (!hasResumedNode) {
- context.log(logger, Level.FINE, "Starting optional node program resume command");
- containerOperations.resumeNode(context);
+ context.log(logger, "Invoking vespa-nodectl to resume services");
+ String output = containerOperations.resumeNode(context);
+ if (!output.isBlank()) {
+ context.log(logger, "Resume services output: " + output);
+ }
hasResumedNode = true;
}
}
@@ -243,10 +249,13 @@ public class NodeAgentImpl implements NodeAgent {
}
shouldRestartServices(context, existingContainer.get()).ifPresent(restartReason -> {
- context.log(logger, "Will restart services: " + restartReason);
+ context.log(logger, "Invoking vespa-nodectl to restart services: " + restartReason);
orchestratorSuspendNode(context);
- containerOperations.restartVespa(context);
+ String output = containerOperations.restartVespa(context);
+ if (!output.isBlank()) {
+ context.log(logger, "Restart services output: " + output);
+ }
currentRestartGeneration = context.node().wantedRestartGeneration();
});
}
@@ -290,11 +299,14 @@ public class NodeAgentImpl implements NodeAgent {
}
public void suspend(NodeAgentContext context) {
- context.log(logger, "Suspending services on node");
if (containerState == ABSENT) return;
try {
hasResumedNode = false;
- containerOperations.suspendNode(context);
+ context.log(logger, "Invoking vespa-nodectl to suspend services");
+ String output = containerOperations.suspendNode(context);
+ if (!output.isBlank()) {
+ context.log(logger, "Suspend services output: " + output);
+ }
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
} catch (RuntimeException e) {
@@ -505,7 +517,7 @@ public class NodeAgentImpl implements NodeAgent {
}
break;
case provisioned:
- nodeRepository.setNodeState(context.hostname().value(), NodeState.dirty);
+ nodeRepository.setNodeState(context.hostname().value(), NodeState.ready);
break;
case dirty:
removeContainerIfNeededUpdateContainerState(context, container);
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/ContainerEngineMock.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/ContainerEngineMock.java
index 7aeaa37b4af..a856c9d9b8d 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/ContainerEngineMock.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/ContainerEngineMock.java
@@ -92,7 +92,7 @@ public class ContainerEngineMock implements ContainerEngine {
@Override
public ProcessResult executeInContainerAsUser(ContainerName containerName, String user, OptionalLong timeout, String... args) {
- return new ProcessResult(0, null, "");
+ return new ProcessResult(0, "", "");
}
@Override
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerTester.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerTester.java
index de908f9a62e..9830bfce3a4 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerTester.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerTester.java
@@ -93,7 +93,7 @@ public class DockerTester implements AutoCloseable {
NodeAgentContextFactory nodeAgentContextFactory = (nodeSpec, acl) ->
new NodeAgentContextImpl.Builder(nodeSpec).acl(acl).fileSystem(fileSystem).build();
nodeAdminStateUpdater = new NodeAdminStateUpdater(nodeAgentContextFactory, nodeRepository, orchestrator,
- nodeAdmin, HOST_HOSTNAME, clock);
+ nodeAdmin, HOST_HOSTNAME, clock, flagSource);
loopThread = new Thread(() -> {
nodeAdminStateUpdater.start();
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoredumpHandlerTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoredumpHandlerTest.java
index c899198f419..4f2f2f985b6 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoredumpHandlerTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoredumpHandlerTest.java
@@ -1,6 +1,7 @@
// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.node.admin.maintenance.coredump;
+import com.yahoo.test.ManualClock;
import com.yahoo.vespa.hosted.dockerapi.metrics.DimensionMetrics;
import com.yahoo.vespa.hosted.dockerapi.metrics.Metrics;
import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgentContext;
@@ -19,7 +20,6 @@ import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.attribute.FileTime;
import java.time.Duration;
-import java.time.Instant;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -54,10 +54,11 @@ public class CoredumpHandlerTest {
private final CoreCollector coreCollector = mock(CoreCollector.class);
private final CoredumpReporter coredumpReporter = mock(CoredumpReporter.class);
private final Metrics metrics = new Metrics();
+ private final ManualClock clock = new ManualClock();
@SuppressWarnings("unchecked")
private final Supplier<String> coredumpIdSupplier = mock(Supplier.class);
private final CoredumpHandler coredumpHandler = new CoredumpHandler(terminal, coreCollector, coredumpReporter,
- crashPathInContainer, doneCoredumpsPath, "users", metrics, coredumpIdSupplier);
+ crashPathInContainer, doneCoredumpsPath, "users", metrics, clock, coredumpIdSupplier);
@Test
@@ -66,14 +67,14 @@ public class CoredumpHandlerTest {
final Path processingDir = fileSystem.getPath("/home/docker/container-1/some/other/processing");
Files.createDirectories(crashPathOnHost);
- createFileAged(crashPathOnHost.resolve(".bash.core.431"), Duration.ZERO);
+ createFileAged(crashPathOnHost.resolve("bash.core.431"), Duration.ZERO);
- assertFolderContents(crashPathOnHost, ".bash.core.431");
+ assertFolderContents(crashPathOnHost, "bash.core.431");
Optional<Path> enqueuedPath = coredumpHandler.enqueueCoredump(crashPathOnHost, processingDir);
assertEquals(Optional.empty(), enqueuedPath);
// bash.core.431 finished writing... and 2 more have since been written
- Files.move(crashPathOnHost.resolve(".bash.core.431"), crashPathOnHost.resolve("bash.core.431"));
+ clock.advance(Duration.ofMinutes(3));
createFileAged(crashPathOnHost.resolve("vespa-proton.core.119"), Duration.ofMinutes(10));
createFileAged(crashPathOnHost.resolve("vespa-slobrok.core.673"), Duration.ofMinutes(5));
@@ -100,12 +101,12 @@ public class CoredumpHandlerTest {
final Path processingDir = fileSystem.getPath("/home/docker/container-1/some/other/processing");
Files.createDirectories(crashPathOnHost);
- createFileAged(crashPathOnHost.resolve("java.core.69"), Duration.ofSeconds(15));
- createFileAged(crashPathOnHost.resolve("hs_err_pid69.log"), Duration.ofSeconds(20));
+ createFileAged(crashPathOnHost.resolve("java.core.69"), Duration.ofSeconds(515));
+ createFileAged(crashPathOnHost.resolve("hs_err_pid69.log"), Duration.ofSeconds(520));
- createFileAged(crashPathOnHost.resolve("java.core.2420"), Duration.ofSeconds(40));
- createFileAged(crashPathOnHost.resolve("hs_err_pid2420.log"), Duration.ofSeconds(49));
- createFileAged(crashPathOnHost.resolve("hs_err_pid2421.log"), Duration.ofSeconds(50));
+ createFileAged(crashPathOnHost.resolve("java.core.2420"), Duration.ofSeconds(540));
+ createFileAged(crashPathOnHost.resolve("hs_err_pid2420.log"), Duration.ofSeconds(549));
+ createFileAged(crashPathOnHost.resolve("hs_err_pid2421.log"), Duration.ofSeconds(550));
when(coredumpIdSupplier.get()).thenReturn("id-123").thenReturn("id-321");
Optional<Path> enqueuedPath = coredumpHandler.enqueueCoredump(crashPathOnHost, processingDir);
@@ -154,7 +155,6 @@ public class CoredumpHandlerTest {
String expectedMetadataStr = "{\"fields\":{" +
"\"hostname\":\"host123.yahoo.com\"," +
- "\"system\":\"main\"," +
"\"kernel_version\":\"3.10.0-862.9.1.el7.x86_64\"," +
"\"backtrace\":[\"call 1\",\"function 2\",\"something something\"]," +
"\"vespa_version\":\"6.48.4\"," +
@@ -256,9 +256,9 @@ public class CoredumpHandlerTest {
assertEquals(expectedContentsOfFolder, actualContentsOfFolder);
}
- private static Path createFileAged(Path path, Duration age) {
+ private Path createFileAged(Path path, Duration age) {
return uncheck(() -> Files.setLastModifiedTime(
Files.createFile(path),
- FileTime.from(Instant.now().minus(age))));
+ FileTime.from(clock.instant().minus(age))));
}
}
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdaterTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdaterTest.java
index 36530ff014c..d348d2b74e9 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdaterTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdaterTest.java
@@ -3,6 +3,8 @@ package com.yahoo.vespa.hosted.node.admin.nodeadmin;
import com.yahoo.config.provision.HostName;
import com.yahoo.test.ManualClock;
+import com.yahoo.vespa.flags.Flags;
+import com.yahoo.vespa.flags.InMemoryFlagSource;
import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.Acl;
import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeRepository;
import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeSpec;
@@ -49,9 +51,10 @@ public class NodeAdminStateUpdaterTest {
private final NodeAdmin nodeAdmin = mock(NodeAdmin.class);
private final HostName hostHostname = HostName.from("basehost1.test.yahoo.com");
private final ManualClock clock = new ManualClock();
+ private final InMemoryFlagSource flagSource = new InMemoryFlagSource();
private final NodeAdminStateUpdater updater = spy(new NodeAdminStateUpdater(
- nodeAgentContextFactory, nodeRepository, orchestrator, nodeAdmin, hostHostname, clock));
+ nodeAgentContextFactory, nodeRepository, orchestrator, nodeAdmin, hostHostname, clock, flagSource));
@Test
@@ -213,6 +216,25 @@ public class NodeAdminStateUpdaterTest {
}
@Test
+ public void node_spec_and_acl_aligned_with_acl_cache_disabled() {
+ flagSource.withBooleanFlag(Flags.CACHE_ACL.id(), false);
+
+ Acl acl = new Acl.Builder().withTrustedPorts(22).build();
+ mockNodeRepo(NodeState.active, 3);
+ mockAcl(acl, 1, 2, 3);
+
+ updater.adjustNodeAgentsToRunFromNodeRepository();
+ updater.adjustNodeAgentsToRunFromNodeRepository();
+ updater.adjustNodeAgentsToRunFromNodeRepository();
+
+ verify(nodeAgentContextFactory, times(3)).create(argThat(spec -> spec.hostname().equals("host1.yahoo.com")), eq(acl));
+ verify(nodeAgentContextFactory, times(3)).create(argThat(spec -> spec.hostname().equals("host2.yahoo.com")), eq(acl));
+ verify(nodeAgentContextFactory, times(3)).create(argThat(spec -> spec.hostname().equals("host3.yahoo.com")), eq(acl));
+ verify(nodeRepository, times(3)).getNodes(eq(hostHostname.value()));
+ verify(nodeRepository, times(3)).getAcls(eq(hostHostname.value()));
+ }
+
+ @Test
public void node_spec_and_acl_mismatch_missing_one_acl() {
Acl acl = new Acl.Builder().withTrustedPorts(22).build();
mockNodeRepo(NodeState.active, 3);
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImplTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImplTest.java
index 2e421945f4f..9475e3720c2 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImplTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImplTest.java
@@ -27,6 +27,7 @@ import com.yahoo.vespa.hosted.node.admin.maintenance.StorageMaintainer;
import com.yahoo.vespa.hosted.node.admin.maintenance.acl.AclMaintainer;
import com.yahoo.vespa.hosted.node.admin.maintenance.identity.CredentialsMaintainer;
import com.yahoo.vespa.hosted.node.admin.nodeadmin.ConvergenceException;
+import org.junit.Before;
import org.junit.Test;
import org.mockito.InOrder;
@@ -71,6 +72,14 @@ public class NodeAgentImplTest {
private final InMemoryFlagSource flagSource = new InMemoryFlagSource();
private final ManualClock clock = new ManualClock(Instant.now());
+ @Before
+ public void setUp() {
+ when(containerOperations.suspendNode(any())).thenReturn("");
+ when(containerOperations.resumeNode(any())).thenReturn("");
+ when(containerOperations.restartVespa(any())).thenReturn("");
+ when(containerOperations.startServices(any())).thenReturn("");
+ when(containerOperations.stopServices(any())).thenReturn("");
+ }
@Test
public void upToDateContainerIsUntouched() {
@@ -467,7 +476,7 @@ public class NodeAgentImplTest {
}
@Test
- public void provisionedNodeIsMarkedAsDirty() {
+ public void provisionedNodeIsMarkedAsReady() {
final NodeSpec node = nodeBuilder(NodeState.provisioned)
.wantedDockerImage(dockerImage)
.build();
@@ -477,7 +486,7 @@ public class NodeAgentImplTest {
when(nodeRepository.getOptionalNode(hostName)).thenReturn(Optional.of(node));
nodeAgent.doConverge(context);
- verify(nodeRepository, times(1)).setNodeState(eq(hostName), eq(NodeState.dirty));
+ verify(nodeRepository, times(1)).setNodeState(eq(hostName), eq(NodeState.ready));
}
@Test
@@ -515,7 +524,7 @@ public class NodeAgentImplTest {
final InOrder inOrder = inOrder(orchestrator, containerOperations, nodeRepository);
doThrow(new RuntimeException("Failed 1st time"))
- .doNothing()
+ .doReturn("")
.when(containerOperations).resumeNode(eq(context));
// 1st try
diff --git a/node-repository/pom.xml b/node-repository/pom.xml
index a8bc33e1db6..05220b6977f 100644
--- a/node-repository/pom.xml
+++ b/node-repository/pom.xml
@@ -26,19 +26,6 @@
</dependency>
<dependency>
<groupId>com.yahoo.vespa</groupId>
- <artifactId>vespa_jersey2</artifactId>
- <version>${project.version}</version>
- <scope>provided</scope>
- <type>pom</type>
- </dependency>
- <dependency>
- <groupId>com.yahoo.vespa</groupId>
- <artifactId>container-jersey2</artifactId>
- <version>${project.version}</version>
- <scope>provided</scope>
- </dependency>
- <dependency>
- <groupId>com.yahoo.vespa</groupId>
<artifactId>config-provisioning</artifactId>
<version>${project.version}</version>
<scope>provided</scope>
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/Node.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/Node.java
index de72024cb77..3dbafdc2aba 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/Node.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/Node.java
@@ -196,7 +196,7 @@ public final class Node implements Nodelike {
/**
* Returns a copy of this where wantToFail is set to true and history is updated to reflect this.
*/
- public Node withWantToFail(boolean wantToFail, Agent agent, String reason, Instant at) {
+ public Node withWantToFail(boolean wantToFail, Agent agent, Instant at) {
Node node = this.with(status.withWantToFail(wantToFail));
if (wantToFail)
node = node.with(history.with(new History.Event(History.Event.Type.wantToFail, agent, at)));
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeList.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeList.java
index 0c19cf99539..8a35febc9a7 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeList.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeList.java
@@ -280,13 +280,17 @@ public class NodeList extends AbstractFilteringList<Node, NodeList> {
ClusterSpec firstNodeSpec = first().get().allocation().get().membership().cluster().with(Optional.empty());
if (stream().map(node -> node.allocation().get().membership().cluster().with(Optional.empty()))
- .anyMatch(clusterSpec -> ! clusterSpec.equals(firstNodeSpec)))
+ .anyMatch(clusterSpec -> ! clusterSpec.id().equals(firstNodeSpec.id())))
throw new IllegalStateException("Nodes belong to multiple clusters");
}
/** Returns the nodes of this as a stream */
public Stream<Node> stream() { return asList().stream(); }
+ public static NodeList of(Node ... nodes) {
+ return copyOf(List.of(nodes));
+ }
+
public static NodeList copyOf(List<Node> nodes) {
return new NodeList(nodes, false);
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java
index e3622c8f076..f5eb67f0979 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java
@@ -150,7 +150,8 @@ public class ClusterModel {
for (ScalingEvent event : cluster.scalingEvents()) {
if (event.duration().isEmpty()) continue;
completedEventCount++;
- totalDuration = totalDuration.plus(event.duration().get());
+ // Assume we have missed timely recording completion if it is longer than 4 days
+ totalDuration = totalDuration.plus(maximum(Duration.ofDays(4), event.duration().get()));
}
if (completedEventCount == 0) { // Use defaults
@@ -160,13 +161,25 @@ public class ClusterModel {
else {
Duration predictedDuration = totalDuration.dividedBy(completedEventCount);
- // TODO: Remove when we have reliable completion for content clusters
- if (clusterSpec.isStateful() && predictedDuration.minus(Duration.ofHours(12)).isNegative())
- return Duration.ofHours(12);
+ if ( clusterSpec.isStateful() ) // TODO: Remove when we have reliable completion for content clusters
+ predictedDuration = minimum(Duration.ofHours(12), predictedDuration);
+
+ predictedDuration = minimum(Duration.ofMinutes(5), predictedDuration);
- if (predictedDuration.minus(Duration.ofMinutes(5)).isNegative()) return Duration.ofMinutes(5); // minimum
return predictedDuration;
}
}
+ private static Duration minimum(Duration smallestAllowed, Duration duration) {
+ if (duration.minus(smallestAllowed).isNegative())
+ return smallestAllowed;
+ return duration;
+ }
+
+ private static Duration maximum(Duration largestAllowed, Duration duration) {
+ if ( ! duration.minus(largestAllowed).isNegative())
+ return largestAllowed;
+ return duration;
+ }
+
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/QuestMetricsDb.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/QuestMetricsDb.java
index c933e16041a..f71e7d608e0 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/QuestMetricsDb.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/QuestMetricsDb.java
@@ -17,6 +17,7 @@ import io.questdb.cairo.TableWriter;
import io.questdb.cairo.sql.Record;
import io.questdb.cairo.sql.RecordCursor;
import io.questdb.cairo.sql.RecordCursorFactory;
+import io.questdb.griffin.CompiledQuery;
import io.questdb.griffin.SqlCompiler;
import io.questdb.griffin.SqlException;
import io.questdb.griffin.SqlExecutionContext;
@@ -54,6 +55,7 @@ public class QuestMetricsDb extends AbstractComponent implements MetricsDb {
private final Clock clock;
private final String dataDir;
private CairoEngine engine;
+ private ThreadLocal<SqlCompiler> sqlCompiler;
private long highestTimestampAdded = 0;
@@ -83,6 +85,7 @@ public class QuestMetricsDb extends AbstractComponent implements MetricsDb {
CairoConfiguration configuration = new DefaultCairoConfiguration(dataDir);
engine = new CairoEngine(configuration);
+ sqlCompiler = ThreadLocal.withInitial(() -> new SqlCompiler(engine));
ensureTablesExist();
}
@@ -159,9 +162,8 @@ public class QuestMetricsDb extends AbstractComponent implements MetricsDb {
@Override
public List<NodeTimeseries> getNodeTimeseries(Duration period, Set<String> hostnames) {
- try (SqlCompiler compiler = new SqlCompiler(engine)) {
- SqlExecutionContext context = newContext();
- var snapshots = getNodeSnapshots(clock.instant().minus(period), hostnames, compiler, context);
+ try {
+ var snapshots = getNodeSnapshots(clock.instant().minus(period), hostnames, newContext());
return snapshots.entrySet().stream()
.map(entry -> new NodeTimeseries(entry.getKey(), entry.getValue()))
.collect(Collectors.toList());
@@ -173,9 +175,8 @@ public class QuestMetricsDb extends AbstractComponent implements MetricsDb {
@Override
public ClusterTimeseries getClusterTimeseries(ApplicationId applicationId, ClusterSpec.Id clusterId) {
- try (SqlCompiler compiler = new SqlCompiler(engine)) {
- SqlExecutionContext context = newContext();
- return getClusterSnapshots(applicationId, clusterId, compiler, context);
+ try {
+ return getClusterSnapshots(applicationId, clusterId);
}
catch (SqlException e) {
throw new IllegalStateException("Could not read cluster timeseries data in Quest stored in " + dataDir, e);
@@ -193,7 +194,7 @@ public class QuestMetricsDb extends AbstractComponent implements MetricsDb {
Instant oldestToKeep = clock.instant().minus(Duration.ofDays(4));
SqlExecutionContext context = newContext();
int partitions = 0;
- try (SqlCompiler compiler = new SqlCompiler(engine)) {
+ try {
File tableRoot = new File(dataDir, table);
List<String> removeList = new ArrayList<>();
for (String dirEntry : tableRoot.list()) {
@@ -209,9 +210,9 @@ public class QuestMetricsDb extends AbstractComponent implements MetricsDb {
}
// Remove unless all partitions are old: Removing all partitions "will be supported in the future"
if ( removeList.size() < partitions && ! removeList.isEmpty()) {
- compiler.compile("alter table " + table + " drop partition list " +
- removeList.stream().map(dir -> "'" + dir + "'").collect(Collectors.joining(",")),
- context);
+ issue("alter table " + table + " drop partition list " +
+ removeList.stream().map(dir -> "'" + dir + "'").collect(Collectors.joining(",")),
+ context);
}
}
catch (SqlException e) {
@@ -257,13 +258,13 @@ public class QuestMetricsDb extends AbstractComponent implements MetricsDb {
}
private void createNodeTable(SqlExecutionContext context) {
- try (SqlCompiler compiler = new SqlCompiler(engine)) {
- compiler.compile("create table " + nodeTable +
- " (hostname string, at timestamp, cpu_util float, mem_total_util float, disk_util float," +
- " application_generation long, inService boolean, stable boolean, queries_rate float)" +
- " timestamp(at)" +
- "PARTITION BY DAY;",
- context);
+ try {
+ issue("create table " + nodeTable +
+ " (hostname string, at timestamp, cpu_util float, mem_total_util float, disk_util float," +
+ " application_generation long, inService boolean, stable boolean, queries_rate float)" +
+ " timestamp(at)" +
+ "PARTITION BY DAY;",
+ context);
// We should do this if we get a version where selecting on strings work embedded, see below
// compiler.compile("alter table " + tableName + " alter column hostname add index", context);
}
@@ -273,12 +274,12 @@ public class QuestMetricsDb extends AbstractComponent implements MetricsDb {
}
private void createClusterTable(SqlExecutionContext context) {
- try (SqlCompiler compiler = new SqlCompiler(engine)) {
- compiler.compile("create table " + clusterTable +
- " (application string, cluster string, at timestamp, queries_rate float, write_rate float)" +
- " timestamp(at)" +
- "PARTITION BY DAY;",
- context);
+ try {
+ issue("create table " + clusterTable +
+ " (application string, cluster string, at timestamp, queries_rate float, write_rate float)" +
+ " timestamp(at)" +
+ "PARTITION BY DAY;",
+ context);
// We should do this if we get a version where selecting on strings work embedded, see below
// compiler.compile("alter table " + tableName + " alter column cluster add index", context);
}
@@ -288,9 +289,9 @@ public class QuestMetricsDb extends AbstractComponent implements MetricsDb {
}
private void ensureNodeTableIsUpdated(SqlExecutionContext context) {
- try (SqlCompiler compiler = new SqlCompiler(engine)) {
+ try {
if (0 == engine.getStatus(context.getCairoSecurityContext(), new Path(), nodeTable)) {
- ensureColumnExists("queries_rate", "float", nodeTable, compiler, context); // TODO: Remove after March 2021
+ ensureColumnExists("queries_rate", "float", nodeTable,context); // TODO: Remove after March 2021
}
} catch (SqlException e) {
repair(e);
@@ -298,9 +299,9 @@ public class QuestMetricsDb extends AbstractComponent implements MetricsDb {
}
private void ensureClusterTableIsUpdated(SqlExecutionContext context) {
- try (SqlCompiler compiler = new SqlCompiler(engine)) {
+ try {
if (0 == engine.getStatus(context.getCairoSecurityContext(), new Path(), nodeTable)) {
- ensureColumnExists("write_rate", "float", nodeTable, compiler, context); // TODO: Remove after March 2021
+ ensureColumnExists("write_rate", "float", nodeTable, context); // TODO: Remove after March 2021
}
} catch (SqlException e) {
repair(e);
@@ -308,14 +309,14 @@ public class QuestMetricsDb extends AbstractComponent implements MetricsDb {
}
private void ensureColumnExists(String column, String columnType,
- String table, SqlCompiler compiler, SqlExecutionContext context) throws SqlException {
- if (columnNamesOf(table, compiler, context).contains(column)) return;
- compiler.compile("alter table " + table + " add column " + column + " " + columnType, context);
+ String table, SqlExecutionContext context) throws SqlException {
+ if (columnNamesOf(table, context).contains(column)) return;
+ issue("alter table " + table + " add column " + column + " " + columnType, context);
}
- private List<String> columnNamesOf(String tableName, SqlCompiler compiler, SqlExecutionContext context) throws SqlException {
+ private List<String> columnNamesOf(String tableName, SqlExecutionContext context) throws SqlException {
List<String> columns = new ArrayList<>();
- try (RecordCursorFactory factory = compiler.compile("show columns from " + tableName, context).getRecordCursorFactory()) {
+ try (RecordCursorFactory factory = issue("show columns from " + tableName, context).getRecordCursorFactory()) {
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
while (cursor.hasNext()) {
@@ -339,7 +340,6 @@ public class QuestMetricsDb extends AbstractComponent implements MetricsDb {
private ListMap<String, NodeMetricSnapshot> getNodeSnapshots(Instant startTime,
Set<String> hostnames,
- SqlCompiler compiler,
SqlExecutionContext context) throws SqlException {
DateTimeFormatter formatter = DateTimeFormatter.ISO_DATE_TIME.withZone(ZoneId.of("UTC"));
String from = formatter.format(startTime).substring(0, 19) + ".000000Z";
@@ -349,7 +349,7 @@ public class QuestMetricsDb extends AbstractComponent implements MetricsDb {
// WHERE clauses does not work:
// String sql = "select * from " + tableName + " where hostname in('host1', 'host2', 'host3');";
- try (RecordCursorFactory factory = compiler.compile(sql, context).getRecordCursorFactory()) {
+ try (RecordCursorFactory factory = issue(sql, context).getRecordCursorFactory()) {
ListMap<String, NodeMetricSnapshot> snapshots = new ListMap<>();
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
@@ -372,12 +372,10 @@ public class QuestMetricsDb extends AbstractComponent implements MetricsDb {
}
}
- private ClusterTimeseries getClusterSnapshots(ApplicationId application,
- ClusterSpec.Id cluster,
- SqlCompiler compiler,
- SqlExecutionContext context) throws SqlException {
+ private ClusterTimeseries getClusterSnapshots(ApplicationId application, ClusterSpec.Id cluster) throws SqlException {
String sql = "select * from " + clusterTable;
- try (RecordCursorFactory factory = compiler.compile(sql, context).getRecordCursorFactory()) {
+ var context = newContext();
+ try (RecordCursorFactory factory = issue(sql, context).getRecordCursorFactory()) {
List<ClusterMetricSnapshot> snapshots = new ArrayList<>();
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
@@ -396,6 +394,11 @@ public class QuestMetricsDb extends AbstractComponent implements MetricsDb {
}
}
+ /** Issues an SQL statement against the QuestDb engine */
+ private CompiledQuery issue(String sql, SqlExecutionContext context) throws SqlException {
+ return sqlCompiler.get().compile(sql, context);
+ }
+
private SqlExecutionContext newContext() {
return new SqlExecutionContextImpl(engine, 1);
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancer.java
index 6f7b7c4d57d..a6bb57bef29 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancer.java
@@ -5,6 +5,7 @@ import com.yahoo.vespa.hosted.provision.maintenance.LoadBalancerExpirer;
import java.time.Instant;
import java.util.Objects;
+import java.util.Optional;
/**
* Represents a load balancer for an application's cluster. This is immutable.
@@ -14,15 +15,18 @@ import java.util.Objects;
public class LoadBalancer {
private final LoadBalancerId id;
- private final LoadBalancerInstance instance;
+ private final Optional<LoadBalancerInstance> instance;
private final State state;
private final Instant changedAt;
- public LoadBalancer(LoadBalancerId id, LoadBalancerInstance instance, State state, Instant changedAt) {
+ public LoadBalancer(LoadBalancerId id, Optional<LoadBalancerInstance> instance, State state, Instant changedAt) {
this.id = Objects.requireNonNull(id, "id must be non-null");
this.instance = Objects.requireNonNull(instance, "instance must be non-null");
this.state = Objects.requireNonNull(state, "state must be non-null");
this.changedAt = Objects.requireNonNull(changedAt, "changedAt must be non-null");
+ if (state == State.active && instance.isEmpty()) {
+ throw new IllegalArgumentException("Load balancer instance is required in state " + state);
+ }
}
/** An identifier for this load balancer. The ID is unique inside the zone */
@@ -31,7 +35,7 @@ public class LoadBalancer {
}
/** The instance associated with this */
- public LoadBalancerInstance instance() {
+ public Optional<LoadBalancerInstance> instance() {
return instance;
}
@@ -58,7 +62,7 @@ public class LoadBalancer {
}
/** Returns a copy of this with instance set to given instance */
- public LoadBalancer with(LoadBalancerInstance instance) {
+ public LoadBalancer with(Optional<LoadBalancerInstance> instance) {
return new LoadBalancer(id, instance, state, changedAt);
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainer.java
index 17d33ef501c..7da6e0d3ebe 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainer.java
@@ -111,16 +111,15 @@ public class AutoscalingMaintainer extends NodeRepositoryMaintainer {
if (clusterNodes.retired().stream()
.anyMatch(node -> node.history().hasEventAt(History.Event.Type.retired, event.at())))
return cluster;
- // - 2. all nodes have switched to the right config generation
+ // - 2. all nodes have switched to the right config generation (currently only measured on containers)
for (var nodeTimeseries : nodeRepository().metricsDb().getNodeTimeseries(Duration.between(event.at(), clock().instant()),
clusterNodes)) {
- Optional<NodeMetricSnapshot> firstOnNewGeneration =
+ Optional<NodeMetricSnapshot> onNewGeneration =
nodeTimeseries.asList().stream()
- .filter(snapshot -> snapshot.generation() >= event.generation()).findFirst();
- if (firstOnNewGeneration.isEmpty()) return cluster; // Not completed
+ .filter(snapshot -> snapshot.generation() >= event.generation()).findAny();
+ if (onNewGeneration.isEmpty()) return cluster; // Not completed
}
-
// Set the completion time to the instant we notice completion.
Instant completionTime = nodeRepository().clock().instant();
return cluster.with(event.withCompletion(completionTime));
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/CapacityChecker.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/CapacityChecker.java
index 2b48ae98549..3fd07e1db2a 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/CapacityChecker.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/CapacityChecker.java
@@ -64,18 +64,10 @@ public class CapacityChecker {
}
public List<Node> nodesFromHostnames(List<String> hostnames) {
- List<Node> nodes = hostnames.stream().filter(nodeMap::containsKey)
+ return hostnames.stream().filter(nodeMap::containsKey)
.map(nodeMap::get)
.collect(Collectors.toList());
- if (nodes.size() != hostnames.size()) {
- Set<String> notFoundNodes = new HashSet<>(hostnames);
- notFoundNodes.removeAll(nodes.stream().map(Node::hostname).collect(Collectors.toList()));
- throw new IllegalArgumentException(String.format("Host(s) not found: [ %s ]",
- String.join(", ", notFoundNodes)));
- }
-
- return nodes;
}
public Optional<HostFailurePath> findHostRemovalFailure(List<Node> hostsToRemove) {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/DynamicProvisioningMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/DynamicProvisioningMaintainer.java
index 55548e70ddd..4224667a726 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/DynamicProvisioningMaintainer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/DynamicProvisioningMaintainer.java
@@ -110,7 +110,7 @@ public class DynamicProvisioningMaintainer extends NodeRepositoryMaintainer {
log.log(Level.SEVERE, "Failed to provision " + host.hostname() + " with " + children.size() +
" children, failing out the host recursively", e);
// Fail out as operator to force a quick redeployment
- nodeRepository().nodes().failRecursively(
+ nodeRepository().nodes().failOrMarkRecursively(
host.hostname(), Agent.operator, "Failed by HostProvisioner due to provisioning failure");
} catch (RuntimeException e) {
if (e.getCause() instanceof NameNotFoundException)
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirer.java
index 2ef12177eaf..9665d8872de 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirer.java
@@ -12,6 +12,7 @@ import com.yahoo.vespa.hosted.provision.lb.LoadBalancerSpec;
import com.yahoo.vespa.hosted.provision.persistence.CuratorDatabaseClient;
import java.time.Duration;
+import java.time.Instant;
import java.util.ArrayList;
import java.util.LinkedHashSet;
import java.util.List;
@@ -56,10 +57,10 @@ public class LoadBalancerExpirer extends NodeRepositoryMaintainer {
/** Move reserved load balancer that have expired to inactive */
private void expireReserved() {
- var expiry = nodeRepository().clock().instant().minus(reservedExpiry);
- patchLoadBalancers(lb -> lb.state() == State.reserved &&
- lb.changedAt().isBefore(expiry),
- lb -> db.writeLoadBalancer(lb.with(State.inactive, nodeRepository().clock().instant())));
+ Instant now = nodeRepository().clock().instant();
+ Instant expiry = now.minus(reservedExpiry);
+ patchLoadBalancers(lb -> lb.state() == State.reserved && lb.changedAt().isBefore(expiry),
+ lb -> db.writeLoadBalancer(lb.with(State.inactive, now)));
}
/** Deprovision inactive load balancers that have expired */
@@ -95,13 +96,14 @@ public class LoadBalancerExpirer extends NodeRepositoryMaintainer {
var failed = new ArrayList<LoadBalancerId>();
var lastException = new AtomicReference<Exception>();
patchLoadBalancers(lb -> lb.state() == State.inactive, lb -> {
+ if (lb.instance().isEmpty()) return;
var allocatedNodes = allocatedNodes(lb.id()).stream().map(Node::hostname).collect(Collectors.toSet());
- var reals = new LinkedHashSet<>(lb.instance().reals());
+ var reals = new LinkedHashSet<>(lb.instance().get().reals());
// Remove any real no longer allocated to this application
reals.removeIf(real -> !allocatedNodes.contains(real.hostname().value()));
try {
service.create(new LoadBalancerSpec(lb.id().application(), lb.id().cluster(), reals), true);
- db.writeLoadBalancer(lb.with(lb.instance().withReals(reals)));
+ db.writeLoadBalancer(lb.with(lb.instance().map(instance -> instance.withReals(reals))));
} catch (Exception e) {
failed.add(lb.id());
lastException.set(e);
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java
index ac6ecd98fac..effa41dc69f 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java
@@ -10,6 +10,7 @@ import com.yahoo.transaction.Mutex;
import com.yahoo.vespa.applicationmodel.HostName;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeList;
+import com.yahoo.vespa.hosted.provision.NodeMutex;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.node.Agent;
import com.yahoo.vespa.hosted.provision.node.History;
@@ -29,8 +30,6 @@ import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.stream.Collectors;
-import static java.util.stream.Collectors.collectingAndThen;
-
/**
* Maintains information in the node repo about when this node last responded to ping
* and fails nodes which have not responded within the given time limit.
@@ -84,8 +83,10 @@ public class NodeFailer extends NodeRepositoryMaintainer {
for (Map.Entry<Node, String> entry : getReadyNodesByFailureReason().entrySet()) {
Node node = entry.getKey();
if (throttle(node)) {
- if (node.type().isHost()) throttledHostFailures++;
- else throttledNodeFailures++;
+ if (node.type().isHost())
+ throttledHostFailures++;
+ else
+ throttledNodeFailures++;
continue;
}
String reason = entry.getValue();
@@ -103,13 +104,28 @@ public class NodeFailer extends NodeRepositoryMaintainer {
throttledHostFailures++;
else
throttledNodeFailures++;
-
continue;
}
String reason = entry.getValue();
failActive(node, reason);
}
+ // Active hosts
+ NodeList activeNodes = nodeRepository().nodes().list(Node.State.active);
+ for (Node host : activeNodes.hosts().failing()) {
+ if ( ! activeNodes.childrenOf(host).isEmpty()) continue;
+ Optional<NodeMutex> locked = Optional.empty();
+ try {
+ locked = nodeRepository().nodes().lockAndGet(host);
+ if (locked.isEmpty()) continue;
+ nodeRepository().nodes().fail(List.of(locked.get().node()), Agent.NodeFailer,
+ "Host should be failed and have no tenant nodes");
+ }
+ finally {
+ locked.ifPresent(NodeMutex::close);
+ }
+ }
+
int throttlingActive = Math.min(1, throttledHostFailures + throttledNodeFailures);
metric.set(throttlingActiveMetric, throttlingActive, null);
metric.set(throttledHostFailuresMetric, throttledHostFailures, null);
@@ -277,7 +293,7 @@ public class NodeFailer extends NodeRepositoryMaintainer {
}
if (! allTenantNodesFailedOutSuccessfully) return false;
- wantToFail(node, true, reason, lock);
+ wantToFail(node, true, lock);
try {
deployment.get().activate();
return true;
@@ -289,7 +305,7 @@ public class NodeFailer extends NodeRepositoryMaintainer {
} catch (RuntimeException e) {
// Reset want to fail: We'll retry failing unless it heals in the meantime
nodeRepository().nodes().node(node.hostname())
- .ifPresent(n -> wantToFail(n, false, "Could not fail", lock));
+ .ifPresent(n -> wantToFail(n, false, lock));
log.log(Level.WARNING, "Could not fail " + node + " for " + node.allocation().get().owner() +
" for " + reason + ": " + Exceptions.toMessageString(e));
return false;
@@ -297,29 +313,31 @@ public class NodeFailer extends NodeRepositoryMaintainer {
}
}
- private void wantToFail(Node node, boolean wantToFail, String reason, Mutex lock) {
- nodeRepository().nodes().write(node.withWantToFail(wantToFail, Agent.NodeFailer, reason, clock().instant()), lock);
+ private void wantToFail(Node node, boolean wantToFail, Mutex lock) {
+ nodeRepository().nodes().write(node.withWantToFail(wantToFail, Agent.NodeFailer, clock().instant()), lock);
}
/** Returns true if node failing should be throttled */
private boolean throttle(Node node) {
if (throttlePolicy == ThrottlePolicy.disabled) return false;
Instant startOfThrottleWindow = clock().instant().minus(throttlePolicy.throttleWindow);
- NodeList nodes = nodeRepository().nodes().list();
- NodeList recentlyFailedNodes = nodes.stream()
- .filter(n -> n.state() == Node.State.failed)
- .filter(n -> n.history().hasEventAfter(History.Event.Type.failed, startOfThrottleWindow))
- .collect(collectingAndThen(Collectors.toList(), NodeList::copyOf));
+ NodeList allNodes = nodeRepository().nodes().list();
+ NodeList recentlyFailedNodes = allNodes.state(Node.State.failed)
+ .matching(n -> n.history().hasEventAfter(History.Event.Type.failed,
+ startOfThrottleWindow));
- // Allow failing nodes within policy
- if (recentlyFailedNodes.size() < throttlePolicy.allowedToFailOf(nodes.size())) return false;
+ // Allow failing any node within policy
+ if (recentlyFailedNodes.size() < throttlePolicy.allowedToFailOf(allNodes.size())) return false;
- // Always allow failing physical nodes up to minimum limit
+ // Always allow failing a minimum number of hosts
if (node.parentHostname().isEmpty() &&
recentlyFailedNodes.parents().size() < throttlePolicy.minimumAllowedToFail) return false;
+ // Always allow failing children of a failed host
+ if (recentlyFailedNodes.parentOf(node).isPresent()) return false;
+
log.info(String.format("Want to fail node %s, but throttling is in effect: %s", node.hostname(),
- throttlePolicy.toHumanReadableString(nodes.size())));
+ throttlePolicy.toHumanReadableString(allNodes.size())));
return true;
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ProvisionedExpirer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ProvisionedExpirer.java
index 7a29414642e..856d534bbd2 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ProvisionedExpirer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ProvisionedExpirer.java
@@ -12,7 +12,7 @@ import java.time.Duration;
import java.util.List;
/**
- * This moves nodes of type {@link NodeType#host} from preovisioned to parked if they have been in provisioned too long.
+ * This moves nodes of type {@link NodeType#host} from provisioned to parked if they have been in provisioned too long.
*
* Only {@link NodeType#host} is moved because any number of nodes of that type can exist. Other node types such as
* {@link NodeType#confighost} have a fixed number and thus cannot be replaced while the fixed number of nodes exist in
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/NodeAcl.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/NodeAcl.java
index dddc535b36a..4cd01167293 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/NodeAcl.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/NodeAcl.java
@@ -12,6 +12,7 @@ import com.yahoo.vespa.hosted.provision.lb.LoadBalancers;
import java.util.Comparator;
import java.util.LinkedHashSet;
import java.util.Objects;
+import java.util.Optional;
import java.util.Set;
import java.util.TreeSet;
@@ -70,6 +71,7 @@ public class NodeAcl {
loadBalancers.list(allocation.owner()).asList()
.stream()
.map(LoadBalancer::instance)
+ .flatMap(Optional::stream)
.map(LoadBalancerInstance::networks)
.forEach(trustedNetworks::addAll);
});
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java
index b4d72a35b80..b9602ef0ad4 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java
@@ -16,8 +16,6 @@ import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeMutex;
import com.yahoo.vespa.hosted.provision.maintenance.NodeFailer;
-import com.yahoo.vespa.hosted.provision.node.filter.NodeFilter;
-import com.yahoo.vespa.hosted.provision.node.filter.NodeListFilter;
import com.yahoo.vespa.hosted.provision.node.filter.StateFilter;
import com.yahoo.vespa.hosted.provision.persistence.CuratorDatabaseClient;
import com.yahoo.vespa.hosted.provision.restapi.NotFoundException;
@@ -33,6 +31,7 @@ import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.function.BiFunction;
+import java.util.function.Predicate;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.stream.Collectors;
@@ -123,7 +122,7 @@ public class Nodes {
/** Adds a list of newly created reserved nodes to the node repository */
public List<Node> addReservedNodes(LockedNodeList nodes) {
for (Node node : nodes) {
- if ( ! node.flavor().getType().equals(Flavor.Type.DOCKER_CONTAINER))
+ if ( node.flavor().getType() != Flavor.Type.DOCKER_CONTAINER)
illegal("Cannot add " + node + ": This is not a child node");
if (node.allocation().isEmpty())
illegal("Cannot add " + node + ": Child nodes need to be allocated");
@@ -181,10 +180,13 @@ public class Nodes {
.map(node -> {
if (node.state() != Node.State.provisioned && node.state() != Node.State.dirty)
illegal("Can not set " + node + " ready. It is not provisioned or dirty.");
- return node.withWantToRetire(false, false, Agent.system, clock.instant());
+ return node.withWantToRetire(false,
+ false,
+ false,
+ Agent.system,
+ clock.instant());
})
.collect(Collectors.toList());
-
return db.writeTo(Node.State.ready, nodesWithResetFields, agent, Optional.of(reason));
}
}
@@ -197,22 +199,6 @@ public class Nodes {
return setReady(List.of(nodeToReady), agent, reason).get(0);
}
- /** Restore a node that has been rebuilt */
- public Node restore(String hostname, Agent agent, String reason) {
- // A deprovisioned host has no children so this doesn't need to to be recursive
- try (NodeMutex lock = lockAndGetRequired(hostname)) {
- Node existing = lock.node();
- if (existing.state() != Node.State.deprovisioned) illegal("Can not move node " + hostname + " to " +
- Node.State.provisioned + ". It is not in " +
- Node.State.deprovisioned);
- if (!existing.status().wantToRebuild()) illegal("Can not move node " + hostname + " to " +
- Node.State.provisioned +
- ". Rebuild has not been requested");
- Node nodeWithResetFields = existing.withWantToRetire(false, false, false, agent, clock.instant());
- return db.writeTo(Node.State.provisioned, nodeWithResetFields, agent, Optional.of(reason));
- }
- }
-
/** Reserve nodes. This method does <b>not</b> lock the node repository */
public List<Node> reserve(List<Node> nodes) {
return db.writeTo(Node.State.reserved, nodes, Agent.application, Optional.empty());
@@ -257,12 +243,26 @@ public class Nodes {
* transaction commits.
*/
public List<Node> fail(List<Node> nodes, ApplicationTransaction transaction) {
- return db.writeTo(Node.State.failed, nodes, Agent.application, Optional.of("Failed by application"), transaction.nested());
+ return fail(nodes, Agent.application, "Failed by application", transaction.nested());
+ }
+
+ public List<Node> fail(List<Node> nodes, Agent agent, String reason) {
+ NestedTransaction transaction = new NestedTransaction();
+ nodes = fail(nodes, agent, reason, transaction);
+ transaction.commit();;
+ return nodes;
+ }
+
+ private List<Node> fail(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) {
+ nodes = nodes.stream()
+ .map(n -> n.withWantToFail(false, agent, clock.instant()))
+ .collect(Collectors.toList());
+ return db.writeTo(Node.State.failed, nodes, agent, Optional.of(reason), transaction);
}
/** Move nodes to the dirty state */
public List<Node> deallocate(List<Node> nodes, Agent agent, String reason) {
- return performOn(NodeListFilter.from(nodes), (node, lock) -> deallocate(node, agent, reason));
+ return performOn(NodeList.copyOf(nodes), (node, lock) -> deallocate(node, agent, reason));
}
public List<Node> deallocateRecursively(String hostname, Agent agent, String reason) {
@@ -328,11 +328,32 @@ public class Nodes {
/**
* Fails all the nodes that are children of hostname before finally failing the hostname itself.
+ * Non-active nodes are failed immediately, while active nodes are marked as wantToFail.
+ * The host is failed if it has no active nodes and marked wantToFail if it has.
*
- * @return List of all the failed nodes in their new state
+ * @return all the nodes that were changed by this request
*/
- public List<Node> failRecursively(String hostname, Agent agent, String reason) {
- return moveRecursively(hostname, Node.State.failed, agent, Optional.of(reason));
+ public List<Node> failOrMarkRecursively(String hostname, Agent agent, String reason) {
+ NodeList children = list().childrenOf(hostname);
+ List<Node> changed = performOn(children, (node, lock) -> failOrMark(node, agent, reason, lock));
+
+ if (children.state(Node.State.active).isEmpty())
+ changed.add(move(hostname, true, Node.State.failed, agent, Optional.of(reason)));
+ else
+ changed.addAll(performOn(NodeList.of(node(hostname).orElseThrow()), (node, lock) -> failOrMark(node, agent, reason, lock)));
+
+ return changed;
+ }
+
+ private Node failOrMark(Node node, Agent agent, String reason, Mutex lock) {
+ if (node.state() == Node.State.active) {
+ node = node.withWantToFail(true, agent, clock.instant());
+ write(node, lock);
+ return node;
+ }
+ else {
+ return move(node, Node.State.failed, agent, Optional.of(reason));
+ }
}
/**
@@ -472,12 +493,10 @@ public class Nodes {
if (node.type().isHost()) {
List<Node> removed = removeChildren(node, force);
- if (zone.getCloud().dynamicProvisioning() || node.type() != NodeType.host)
+ if (zone.getCloud().dynamicProvisioning())
db.removeNodes(List.of(node));
else {
- if (!node.status().wantToRebuild()) { // Keep IP addresses if we're rebuilding
- node = node.with(IP.Config.EMPTY);
- }
+ node = node.with(IP.Config.EMPTY);
move(node, Node.State.deprovisioned, Agent.system, Optional.empty());
}
removed.add(node);
@@ -562,8 +581,8 @@ public class Nodes {
*
* @return the nodes in their new state
*/
- public List<Node> restart(NodeFilter filter) {
- return performOn(StateFilter.from(Node.State.active, filter),
+ public List<Node> restart(Predicate<Node> filter) {
+ return performOn(StateFilter.from(Node.State.active).and(filter),
(node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()),
lock));
}
@@ -573,7 +592,7 @@ public class Nodes {
*
* @return the nodes in their new state
*/
- public List<Node> reboot(NodeFilter filter) {
+ public List<Node> reboot(Predicate<Node> filter) {
return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock));
}
@@ -582,7 +601,7 @@ public class Nodes {
*
* @return the nodes in their new state
*/
- public List<Node> upgradeOs(NodeFilter filter, Optional<Version> version) {
+ public List<Node> upgradeOs(Predicate<Node> filter, Optional<Version> version) {
return performOn(filter, (node, lock) -> {
var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version));
return write(node.with(newStatus), lock);
@@ -590,7 +609,7 @@ public class Nodes {
}
/** Retire nodes matching given filter */
- public List<Node> retire(NodeFilter filter, Agent agent, Instant instant) {
+ public List<Node> retire(Predicate<Node> filter, Agent agent, Instant instant) {
return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock));
}
@@ -615,8 +634,7 @@ public class Nodes {
try (NodeMutex lock = nodeMutex.get(); Mutex allocationLock = lockUnallocated()) {
// This takes allocationLock to prevent any further allocation of nodes on this host
host = lock.node();
- NodeList children = list(allocationLock).childrenOf(host);
- result = performOn(NodeListFilter.from(children.asList()),
+ result = performOn(list(allocationLock).childrenOf(host),
(node, nodeLock) -> write(node.withWantToRetire(true, wantToDeprovision, wantToRebuild, agent, instant),
nodeLock));
result.add(write(host.withWantToRetire(true, wantToDeprovision, wantToRebuild, agent, instant), lock));
@@ -644,20 +662,22 @@ public class Nodes {
return db.writeTo(nodes, Agent.system, Optional.empty());
}
+ private List<Node> performOn(Predicate<Node> filter, BiFunction<Node, Mutex, Node> action) {
+ return performOn(list().matching(filter), action);
+ }
+
/**
* Performs an operation requiring locking on all nodes matching some filter.
*
- * @param filter the filter determining the set of nodes where the operation will be performed
* @param action the action to perform
* @return the set of nodes on which the action was performed, as they became as a result of the operation
*/
- private List<Node> performOn(NodeFilter filter, BiFunction<Node, Mutex, Node> action) {
+ private List<Node> performOn(NodeList nodes, BiFunction<Node, Mutex, Node> action) {
List<Node> unallocatedNodes = new ArrayList<>();
ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>();
// Group matching nodes by the lock needed
- for (Node node : db.readNodes()) {
- if ( ! filter.matches(node)) continue;
+ for (Node node : nodes) {
if (node.allocation().isPresent())
allocatedNodes.put(node.allocation().get().owner(), node);
else
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/filter/ApplicationFilter.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/filter/ApplicationFilter.java
index aa285cd707d..d2226fa71b0 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/filter/ApplicationFilter.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/filter/ApplicationFilter.java
@@ -1,16 +1,13 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.provision.node.filter;
-import com.google.common.collect.ImmutableSet;
import com.yahoo.config.provision.ApplicationId;
-import com.yahoo.config.provision.ApplicationName;
-import com.yahoo.config.provision.InstanceName;
-import com.yahoo.config.provision.TenantName;
import com.yahoo.text.StringUtilities;
import com.yahoo.vespa.hosted.provision.Node;
import java.util.Objects;
import java.util.Set;
+import java.util.function.Predicate;
import java.util.stream.Collectors;
/**
@@ -18,33 +15,29 @@ import java.util.stream.Collectors;
*
* @author bratseth
*/
-public class ApplicationFilter extends NodeFilter {
+public class ApplicationFilter {
- private final Set<ApplicationId> applications;
+ private ApplicationFilter() {}
/** Creates a node filter which filters using the given host filter */
- private ApplicationFilter(Set<ApplicationId> applications, NodeFilter next) {
- super(next);
+ private static Predicate<Node> makePredicate(Set<ApplicationId> applications) {
Objects.requireNonNull(applications, "Applications set cannot be null, use an empty set");
- this.applications = applications;
+ if (applications.isEmpty()) return node -> true;
+ return node -> node.allocation().isPresent() && applications.contains(node.allocation().get().owner());
}
- @Override
- public boolean matches(Node node) {
- if ( ! applications.isEmpty() && ! (node.allocation().isPresent() && applications.contains(node.allocation().get().owner()))) return false;
- return nextMatches(node);
+ public static Predicate<Node> from(ApplicationId applicationId) {
+ return makePredicate(Set.of(applicationId));
}
- public static ApplicationFilter from(ApplicationId applicationId, NodeFilter next) {
- return new ApplicationFilter(ImmutableSet.of(applicationId), next);
+ public static Predicate<Node> from(Set<ApplicationId> applicationIds) {
+ return makePredicate(Set.copyOf(applicationIds));
}
- public static ApplicationFilter from(Set<ApplicationId> applicationIds, NodeFilter next) {
- return new ApplicationFilter(ImmutableSet.copyOf(applicationIds), next);
- }
-
- public static ApplicationFilter from(String applicationIds, NodeFilter next) {
- return new ApplicationFilter(StringUtilities.split(applicationIds).stream().map(ApplicationFilter::toApplicationId).collect(Collectors.toSet()), next);
+ public static Predicate<Node> from(String applicationIds) {
+ return makePredicate(StringUtilities.split(applicationIds).stream()
+ .map(ApplicationFilter::toApplicationId)
+ .collect(Collectors.toUnmodifiableSet()));
}
public static ApplicationId toApplicationId(String applicationIdString) {
@@ -52,7 +45,7 @@ public class ApplicationFilter extends NodeFilter {
if (parts.length != 3)
throw new IllegalArgumentException("Application id must be on the form tenant.application.instance, got '" +
applicationIdString + "'");
- return ApplicationId.from(TenantName.from(parts[0]), ApplicationName.from(parts[1]), InstanceName.from(parts[2]));
+ return ApplicationId.from(parts[0], parts[1], parts[2]);
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/filter/NodeFilter.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/filter/NodeFilter.java
deleted file mode 100644
index bc433c83b2e..00000000000
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/filter/NodeFilter.java
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.provision.node.filter;
-
-import com.yahoo.vespa.hosted.provision.Node;
-
-/**
- * A chainable node filter
- *
- * @author bratseth
- */
-public abstract class NodeFilter {
-
- private final NodeFilter next;
-
- /** Creates a node filter with a chained filter, or null if this is the last filter */
- protected NodeFilter(NodeFilter next) {
- this.next = next;
- }
-
- /** Returns whether this node matches this filter */
- public abstract boolean matches(Node node);
-
- /** Returns whether this is a match according to the chained filter */
- protected final boolean nextMatches(Node node) {
- if (next == null) return true;
- return next.matches(node);
- }
-
-}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/filter/NodeHostFilter.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/filter/NodeHostFilter.java
index ed66d085972..8891720ec33 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/filter/NodeHostFilter.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/filter/NodeHostFilter.java
@@ -1,47 +1,29 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.provision.node.filter;
-import com.yahoo.config.provision.ClusterMembership;
import com.yahoo.config.provision.HostFilter;
import com.yahoo.vespa.hosted.provision.Node;
+import com.yahoo.vespa.hosted.provision.node.Allocation;
import java.util.Objects;
-import java.util.Optional;
+import java.util.function.Predicate;
/**
* A node filter adaption of a host filter
*
* @author bratseth
*/
-public class NodeHostFilter extends NodeFilter {
+public class NodeHostFilter {
- private final HostFilter filter;
+ private NodeHostFilter() {}
/** Creates a node filter which filters using the given host filter */
- private NodeHostFilter(HostFilter filter, NodeFilter next) {
- super(next);
- this.filter = Objects.requireNonNull(filter, "filter cannot be null, use HostFilter.all()");
- }
-
- @Override
- public boolean matches(Node node) {
- if ( ! filter.matches(node.hostname(), node.flavor().name(), membership(node))) return false;
- return nextMatches(node);
- }
-
- private Optional<ClusterMembership> membership(Node node) {
- if (node.allocation().isPresent())
- return Optional.of(node.allocation().get().membership());
- else
- return Optional.empty();
- }
-
- public static NodeHostFilter from(HostFilter hostFilter) {
- return new NodeHostFilter(hostFilter, null);
- }
-
- public static NodeHostFilter from(HostFilter hostFilter, NodeFilter next) {
- return new NodeHostFilter(hostFilter, next);
+ public static Predicate<Node> from(HostFilter filter) {
+ Objects.requireNonNull(filter, "filter cannot be null, use HostFilter.all()");
+ return node -> filter.matches(
+ node.hostname(),
+ node.flavor().name(),
+ node.allocation().map(Allocation::membership));
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/filter/NodeListFilter.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/filter/NodeListFilter.java
index 9bd9f6400d5..0e33eab6aba 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/filter/NodeListFilter.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/filter/NodeListFilter.java
@@ -1,43 +1,33 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.provision.node.filter;
-import com.google.common.collect.ImmutableSet;
import com.yahoo.vespa.hosted.provision.Node;
-import java.util.Collections;
import java.util.List;
import java.util.Objects;
import java.util.Set;
+import java.util.function.Predicate;
/**
* A node filter which matches a particular list of nodes
*
* @author bratseth
*/
-public class NodeListFilter extends NodeFilter {
+public class NodeListFilter {
- private final Set<Node> nodes;
+ private NodeListFilter() {}
- private NodeListFilter(List<Node> nodes, NodeFilter next) {
- super(next);
- this.nodes = ImmutableSet.copyOf(Objects.requireNonNull(nodes, "nodes cannot be null"));
+ private static Predicate<Node> makePredicate(List<Node> nodes) {
+ Objects.requireNonNull(nodes, "nodes cannot be null");
+ Set<Node> nodesSet = Set.copyOf(nodes);
+ return nodesSet::contains;
}
- @Override
- public boolean matches(Node node) {
- return nodes.contains(node);
+ public static Predicate<Node> from(Node nodes) {
+ return makePredicate(List.of(nodes));
}
- public static NodeListFilter from(Node nodes) {
- return new NodeListFilter(Collections.singletonList(nodes), null);
+ public static Predicate<Node> from(List<Node> nodes) {
+ return makePredicate(nodes);
}
-
- public static NodeListFilter from(List<Node> nodes) {
- return new NodeListFilter(nodes, null);
- }
-
- public static NodeListFilter from(List<Node> nodes, NodeFilter next) {
- return new NodeListFilter(nodes, next);
- }
-
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/filter/NodeOsVersionFilter.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/filter/NodeOsVersionFilter.java
index e2718cf8b68..5c3040b12bd 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/filter/NodeOsVersionFilter.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/filter/NodeOsVersionFilter.java
@@ -5,31 +5,25 @@ import com.yahoo.component.Version;
import com.yahoo.vespa.hosted.provision.Node;
import java.util.Objects;
+import java.util.function.Predicate;
/**
* Filter nodes by their OS version.
*
* @author mpolden
*/
-public class NodeOsVersionFilter extends NodeFilter {
+public class NodeOsVersionFilter {
- private final Version version;
+ private NodeOsVersionFilter() {}
- private NodeOsVersionFilter(Version version, NodeFilter next) {
- super(next);
- this.version = Objects.requireNonNull(version, "version cannot be null");
+ private static Predicate<Node> makePredicate(Version version) {
+ Objects.requireNonNull(version, "version cannot be null");
+ if (version.isEmpty()) return node -> true;
+ return node -> node.status().osVersion().matches(version);
}
- @Override
- public boolean matches(Node node) {
- if (!version.isEmpty() && !node.status().osVersion().matches(version)) {
- return false;
- }
- return nextMatches(node);
- }
-
- public static NodeOsVersionFilter from(String version, NodeFilter filter) {
- return new NodeOsVersionFilter(Version.fromString(version), filter);
+ public static Predicate<Node> from(String version) {
+ return makePredicate(Version.fromString(version));
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/filter/NodeTypeFilter.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/filter/NodeTypeFilter.java
index 2a50961e0a4..cac7b870366 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/filter/NodeTypeFilter.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/filter/NodeTypeFilter.java
@@ -1,42 +1,38 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.provision.node.filter;
-import com.google.common.collect.ImmutableSet;
import com.yahoo.config.provision.NodeType;
import com.yahoo.text.StringUtilities;
import com.yahoo.vespa.hosted.provision.Node;
-import java.util.Collections;
+import java.util.EnumSet;
import java.util.Objects;
-import java.util.Set;
+import java.util.function.Predicate;
import java.util.stream.Collectors;
/**
* @author bratseth
*/
-public class NodeTypeFilter extends NodeFilter {
+public class NodeTypeFilter {
- private final Set<NodeType> types;
-
- protected NodeTypeFilter(Set<NodeType> types, NodeFilter next) {
- super(next);
- this.types = ImmutableSet.copyOf(Objects.requireNonNull(types, "Node types cannot be null"));
- }
+ private NodeTypeFilter() {}
- @Override
- public boolean matches(Node node) {
- if (! types.isEmpty() && ! types.contains(node.type())) return false;
- return nextMatches(node);
+ private static Predicate<Node> makePredicate(EnumSet<NodeType> types) {
+ Objects.requireNonNull(types, "Node types cannot be null");
+ if (types.isEmpty()) return node -> true;
+ return node -> types.contains(node.type());
}
/** Returns a copy of the given filter which only matches for the given type */
- public static NodeTypeFilter from(NodeType type, NodeFilter filter) {
- return new NodeTypeFilter(Collections.singleton(type), filter);
+ public static Predicate<Node> from(NodeType type) {
+ return makePredicate(EnumSet.of(type));
}
/** Returns a node filter which matches a comma or space-separated list of types */
- public static NodeTypeFilter from(String types, NodeFilter next) {
- return new NodeTypeFilter(StringUtilities.split(types).stream().map(NodeType::valueOf).collect(Collectors.toSet()), next);
+ public static Predicate<Node> from(String types) {
+ return makePredicate(StringUtilities.split(types).stream()
+ .map(NodeType::valueOf)
+ .collect(Collectors.toCollection(() -> EnumSet.noneOf(NodeType.class))));
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/filter/ParentHostFilter.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/filter/ParentHostFilter.java
index 3c51161d4ca..25ce0373f15 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/filter/ParentHostFilter.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/filter/ParentHostFilter.java
@@ -1,12 +1,12 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.provision.node.filter;
-import com.google.common.collect.ImmutableSet;
import com.yahoo.text.StringUtilities;
import com.yahoo.vespa.hosted.provision.Node;
import java.util.Objects;
import java.util.Set;
+import java.util.function.Predicate;
import java.util.stream.Collectors;
/**
@@ -14,27 +14,19 @@ import java.util.stream.Collectors;
*
* @author dybis
*/
-public class ParentHostFilter extends NodeFilter {
+public class ParentHostFilter {
- private final Set<String> parentHostNames;
+ private ParentHostFilter() {}
/** Creates a node filter which filters using the given parent host name */
- private ParentHostFilter(Set<String> parentHostNames, NodeFilter next) {
- super(next);
- this.parentHostNames = ImmutableSet.copyOf(Objects.requireNonNull(parentHostNames, "parentHostNames cannot be null"));
- }
-
- @Override
- public boolean matches(Node node) {
- if (! parentHostNames.isEmpty() && (
- ! node.parentHostname().isPresent() || ! parentHostNames.contains(node.parentHostname().get())))
- return false;
- return nextMatches(node);
+ private static Predicate<Node> makePredicate(Set<String> parentHostNames) {
+ Objects.requireNonNull(parentHostNames, "parentHostNames cannot be null");
+ if (parentHostNames.isEmpty()) return node -> true;
+ return node -> node.parentHostname().isPresent() && parentHostNames.contains(node.parentHostname().get());
}
/** Returns a copy of the given filter which only matches for the given parent */
- public static ParentHostFilter from(String parentNames, NodeFilter filter) {
- return new ParentHostFilter(StringUtilities.split(parentNames).stream().collect(Collectors.toSet()), filter);
+ public static Predicate<Node> from(String parentNames) {
+ return makePredicate(StringUtilities.split(parentNames).stream().collect(Collectors.toUnmodifiableSet()));
}
-
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/filter/StateFilter.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/filter/StateFilter.java
index a2590e8c727..48f9513a434 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/filter/StateFilter.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/filter/StateFilter.java
@@ -6,7 +6,7 @@ import com.yahoo.vespa.hosted.provision.Node;
import java.util.EnumSet;
import java.util.Objects;
-import java.util.Set;
+import java.util.function.Predicate;
import java.util.stream.Collectors;
/**
@@ -14,36 +14,30 @@ import java.util.stream.Collectors;
*
* @author bratseth
*/
-public class StateFilter extends NodeFilter {
+public class StateFilter {
- private final Set<Node.State> states;
+ private StateFilter() {}
- /** Creates a node filter which filters using the given host filter */
- private StateFilter(Set<Node.State> states, NodeFilter next) {
- super(next);
+ private static Predicate<Node> makePredicate(EnumSet<Node.State> states) {
Objects.requireNonNull(states, "state cannot be null, use an empty set");
- this.states = EnumSet.copyOf(states);
- }
-
- @Override
- public boolean matches(Node node) {
- if ( ! states.contains(node.state())) return false;
- return nextMatches(node);
+ return node -> states.contains(node.state());
}
/** Returns a copy of the given filter which only matches for the given state */
- public static StateFilter from(Node.State state, NodeFilter filter) {
- return new StateFilter(EnumSet.of(state), filter);
+ public static Predicate<Node> from(Node.State state) {
+ return makePredicate(EnumSet.of(state));
}
/** Returns a node filter which matches a comma or space-separated list of states */
- public static StateFilter from(String states, boolean includeDeprovisioned, NodeFilter next) {
+ public static Predicate<Node> from(String states, boolean includeDeprovisioned) {
if (states == null) {
- return new StateFilter(includeDeprovisioned ?
- EnumSet.allOf(Node.State.class) : EnumSet.complementOf(EnumSet.of(Node.State.deprovisioned)), next);
+ return makePredicate(includeDeprovisioned ?
+ EnumSet.allOf(Node.State.class) : EnumSet.complementOf(EnumSet.of(Node.State.deprovisioned)));
}
- return new StateFilter(StringUtilities.split(states).stream().map(Node.State::valueOf).collect(Collectors.toSet()), next);
+ return makePredicate(StringUtilities.split(states).stream()
+ .map(Node.State::valueOf)
+ .collect(Collectors.toCollection(() -> EnumSet.noneOf(Node.State.class))));
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/DelegatingOsUpgrader.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/DelegatingOsUpgrader.java
index af17934a878..84454e0d06a 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/DelegatingOsUpgrader.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/DelegatingOsUpgrader.java
@@ -31,7 +31,8 @@ public class DelegatingOsUpgrader implements OsUpgrader {
public DelegatingOsUpgrader(NodeRepository nodeRepository, int maxActiveUpgrades) {
this.nodeRepository = Objects.requireNonNull(nodeRepository);
this.maxActiveUpgrades = maxActiveUpgrades;
- if (maxActiveUpgrades < 1) throw new IllegalArgumentException("maxActiveUpgrades must be positive");
+ if (maxActiveUpgrades < 1) throw new IllegalArgumentException("maxActiveUpgrades must be positive, was " +
+ maxActiveUpgrades);
}
@Override
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/OsVersions.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/OsVersions.java
index 613738458c2..748edfd936b 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/OsVersions.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/OsVersions.java
@@ -30,20 +30,28 @@ public class OsVersions {
private static final Logger log = Logger.getLogger(OsVersions.class.getName());
+ /** The maximum number of concurrent upgrades per node type triggered by {@link DelegatingOsUpgrader} */
+ private static final int MAX_DELEGATED_UPGRADES = 30;
+
+ /** The maximum number of concurrent upgrades (rebuilds) per node type triggered by {@link RebuildingOsUpgrader} */
+ private static final int MAX_REBUILDS = 10;
+
private final NodeRepository nodeRepository;
private final CuratorDatabaseClient db;
private final boolean reprovisionToUpgradeOs;
private final int maxDelegatedUpgrades;
+ private final int maxRebuilds;
public OsVersions(NodeRepository nodeRepository) {
- this(nodeRepository, nodeRepository.zone().getCloud().reprovisionToUpgradeOs(), 30);
+ this(nodeRepository, nodeRepository.zone().getCloud().reprovisionToUpgradeOs(), MAX_DELEGATED_UPGRADES, MAX_REBUILDS);
}
- OsVersions(NodeRepository nodeRepository, boolean reprovisionToUpgradeOs, int maxDelegatedUpgrades) {
+ OsVersions(NodeRepository nodeRepository, boolean reprovisionToUpgradeOs, int maxDelegatedUpgrades, int maxRebuilds) {
this.nodeRepository = Objects.requireNonNull(nodeRepository);
this.db = nodeRepository.database();
this.reprovisionToUpgradeOs = reprovisionToUpgradeOs;
this.maxDelegatedUpgrades = maxDelegatedUpgrades;
+ this.maxRebuilds = maxRebuilds;
// Read and write all versions to make sure they are stored in the latest version of the serialized format
try (var lock = db.lockOsVersionChange()) {
@@ -87,23 +95,27 @@ public class OsVersions {
}
/** Set the target OS version and upgrade budget for nodes of given type */
- public void setTarget(NodeType nodeType, Version newTarget, Duration upgradeBudget, boolean force) {
+ public void setTarget(NodeType nodeType, Version version, Duration upgradeBudget, boolean force) {
require(nodeType);
- requireNonZero(newTarget);
+ requireNonEmpty(version);
writeChange((change) -> {
- var oldTarget = targetFor(nodeType);
- if (oldTarget.filter(v -> v.equals(newTarget)).isPresent()) {
- return change; // Old target matches new target, nothing to do
+ Optional<OsVersionTarget> currentTarget = Optional.ofNullable(change.targets().get(nodeType));
+ Optional<Version> currentVersion = currentTarget.map(OsVersionTarget::version);
+ Optional<Duration> currentBudget = currentTarget.map(OsVersionTarget::upgradeBudget);
+
+ if (currentVersion.equals(Optional.of(version)) && currentBudget.equals(Optional.of(upgradeBudget))) {
+ return change; // Version and upgrade budget are unchanged: Nothing to do
}
- if (!force && oldTarget.filter(v -> v.isAfter(newTarget)).isPresent()) {
- throw new IllegalArgumentException("Cannot set target OS version to " + newTarget.toFullString() +
+ if (!force && currentVersion.filter(v -> v.isAfter(version)).isPresent()) {
+ throw new IllegalArgumentException("Cannot set target OS version to " + version.toFullString() +
" without setting 'force', as it's lower than the current version: "
- + oldTarget.get());
+ + currentTarget.get().version().toFullString());
}
- log.info("Set OS target version for " + nodeType + " nodes to " + newTarget.toFullString());
- return change.withTarget(newTarget, nodeType, upgradeBudget);
+ log.info("Set OS target version for " + nodeType + " nodes to " + version.toFullString() +
+ ", with time budget " + upgradeBudget);
+ return change.withTarget(version, nodeType, upgradeBudget);
});
}
@@ -134,12 +146,12 @@ public class OsVersions {
.anyMatch(osVersion -> osVersion.current().isPresent() &&
osVersion.current().get().getMajor() < target.getMajor());
if (rebuildRequired) {
- return new RebuildingOsUpgrader(nodeRepository);
+ return new RebuildingOsUpgrader(nodeRepository, maxRebuilds);
}
return new DelegatingOsUpgrader(nodeRepository, maxDelegatedUpgrades);
}
- private static void requireNonZero(Version version) {
+ private static void requireNonEmpty(Version version) {
if (version.isEmpty()) {
throw new IllegalArgumentException("Invalid target version: " + version.toFullString());
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/RebuildingOsUpgrader.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/RebuildingOsUpgrader.java
index 0e10e9f44de..25e901ebce3 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/RebuildingOsUpgrader.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/RebuildingOsUpgrader.java
@@ -24,16 +24,28 @@ public class RebuildingOsUpgrader extends RetiringOsUpgrader {
private static final Logger LOG = Logger.getLogger(RebuildingOsUpgrader.class.getName());
- public RebuildingOsUpgrader(NodeRepository nodeRepository) {
+ private final int maxRebuilds;
+
+ public RebuildingOsUpgrader(NodeRepository nodeRepository, int maxRebuilds) {
super(nodeRepository);
+ this.maxRebuilds = maxRebuilds;
+ if (maxRebuilds < 1) throw new IllegalArgumentException("maxRebuilds must be positive, was " + maxRebuilds);
+ }
+
+ @Override
+ protected NodeList candidates(Instant instant, OsVersionTarget target, NodeList allNodes) {
+ if (allNodes.nodeType(target.nodeType()).rebuilding().size() < maxRebuilds) {
+ return super.candidates(instant, target, allNodes);
+ }
+ return NodeList.of();
}
- protected void upgradeNodes(NodeList activeNodes, Version version, Instant instant) {
- activeNodes.osVersionIsBefore(version)
- .not().rebuilding()
- .byIncreasingOsVersion()
- .first(1)
- .forEach(node -> rebuild(node, version, instant));
+ @Override
+ protected void upgradeNodes(NodeList candidates, Version version, Instant instant) {
+ candidates.not().rebuilding()
+ .byIncreasingOsVersion()
+ .first(1)
+ .forEach(node -> rebuild(node, version, instant));
}
private void rebuild(Node host, Version target, Instant now) {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/RetiringOsUpgrader.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/RetiringOsUpgrader.java
index 61d9c6b6b5d..cee52cb2177 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/RetiringOsUpgrader.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/RetiringOsUpgrader.java
@@ -33,31 +33,36 @@ public class RetiringOsUpgrader implements OsUpgrader {
}
@Override
- public void upgradeTo(OsVersionTarget target) {
+ public final void upgradeTo(OsVersionTarget target) {
NodeList allNodes = nodeRepository.nodes().list();
- NodeList activeNodes = allNodes.state(Node.State.active).nodeType(target.nodeType());
- if (activeNodes.isEmpty()) return; // No nodes eligible for upgrade
-
Instant now = nodeRepository.clock().instant();
- Duration nodeBudget = target.upgradeBudget()
- .dividedBy(activeNodes.size());
- Instant retiredAt = target.lastRetiredAt().orElse(Instant.EPOCH);
- if (now.isBefore(retiredAt.plus(nodeBudget))) return; // Budget has not been spent yet
-
- upgradeNodes(activeNodes, target.version(), now);
+ NodeList candidates = candidates(now, target, allNodes);
+ upgradeNodes(candidates, target.version(), now);
}
@Override
- public void disableUpgrade(NodeType type) {
+ public final void disableUpgrade(NodeType type) {
// No action needed in this implementation.
}
- protected void upgradeNodes(NodeList activeNodes, Version version, Instant instant) {
- activeNodes.osVersionIsBefore(version)
- .not().deprovisioning()
- .byIncreasingOsVersion()
- .first(1)
- .forEach(node -> deprovision(node, version, instant));
+ /** Returns nodes that are candidates for upgrade */
+ protected NodeList candidates(Instant instant, OsVersionTarget target, NodeList allNodes) {
+ NodeList activeNodes = allNodes.state(Node.State.active).nodeType(target.nodeType());
+ if (activeNodes.isEmpty()) return NodeList.of();
+
+ Duration nodeBudget = target.upgradeBudget().dividedBy(activeNodes.size());
+ Instant retiredAt = target.lastRetiredAt().orElse(Instant.EPOCH);
+ if (instant.isBefore(retiredAt.plus(nodeBudget))) return NodeList.of(); // Budget has not been spent yet
+
+ return activeNodes.osVersionIsBefore(target.version());
+ }
+
+ /** Trigger upgrade of candidates to given version */
+ protected void upgradeNodes(NodeList candidates, Version version, Instant instant) {
+ candidates.not().deprovisioning()
+ .byIncreasingOsVersion()
+ .first(1)
+ .forEach(node -> deprovision(node, version, instant));
}
/** Upgrade given host by retiring and deprovisioning it */
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java
index 3ed29e14527..2d8ab74ce71 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java
@@ -246,21 +246,9 @@ public class CuratorDatabaseClient {
private Status newNodeStatus(Node node, Node.State toState) {
if (node.state() != Node.State.failed && toState == Node.State.failed) return node.status().withIncreasedFailCount();
if (node.state() == Node.State.failed && toState == Node.State.active) return node.status().withDecreasedFailCount(); // fail undo
- if (rebootOnTransitionTo(toState, node)) {
- return node.status().withReboot(node.status().reboot().withIncreasedWanted());
- }
return node.status();
}
- /** Returns whether to reboot node as part of transition to given state. This is done to get rid of any lingering
- * unwanted state (e.g. processes) on non-host nodes. */
- private boolean rebootOnTransitionTo(Node.State state, Node node) {
- if (node.type().isHost()) return false; // Reboot of host nodes is handled by NodeRebooter
- if (zone.environment().isTest()) return false; // We want to reuse nodes quickly in test environments
-
- return node.state() != Node.State.dirty && state == Node.State.dirty;
- }
-
/**
* Returns all nodes which are in one of the given states.
* If no states are given this returns all nodes.
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializer.java
index 66172521d4c..04c8f012b8b 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializer.java
@@ -18,6 +18,7 @@ import java.io.UncheckedIOException;
import java.time.Instant;
import java.util.LinkedHashSet;
import java.util.Optional;
+import java.util.Set;
import java.util.function.Function;
/**
@@ -50,21 +51,22 @@ public class LoadBalancerSerializer {
Cursor root = slime.setObject();
root.setString(idField, loadBalancer.id().serializedForm());
- root.setString(hostnameField, loadBalancer.instance().hostname().toString());
+ // TODO(mpolden): Stop writing this field for empty instance after 2021-06-01
+ root.setString(hostnameField, loadBalancer.instance().map(instance -> instance.hostname().value()).orElse(""));
root.setString(stateField, asString(loadBalancer.state()));
root.setLong(changedAtField, loadBalancer.changedAt().toEpochMilli());
- loadBalancer.instance().dnsZone().ifPresent(dnsZone -> root.setString(dnsZoneField, dnsZone.id()));
+ loadBalancer.instance().flatMap(LoadBalancerInstance::dnsZone).ifPresent(dnsZone -> root.setString(dnsZoneField, dnsZone.id()));
Cursor portArray = root.setArray(portsField);
- loadBalancer.instance().ports().forEach(portArray::addLong);
+ loadBalancer.instance().ifPresent(instance -> instance.ports().forEach(portArray::addLong));
Cursor networkArray = root.setArray(networksField);
- loadBalancer.instance().networks().forEach(networkArray::addString);
+ loadBalancer.instance().ifPresent(instance -> instance.networks().forEach(networkArray::addString));
Cursor realArray = root.setArray(realsField);
- loadBalancer.instance().reals().forEach(real -> {
+ loadBalancer.instance().ifPresent(instance -> instance.reals().forEach(real -> {
Cursor realObject = realArray.addObject();
realObject.setString(hostnameField, real.hostname().value());
realObject.setString(ipAddressField, real.ipAddress());
realObject.setLong(portField, real.port());
- });
+ }));
try {
return SlimeUtils.toJsonBytes(slime);
} catch (IOException e) {
@@ -75,7 +77,7 @@ public class LoadBalancerSerializer {
public static LoadBalancer fromJson(byte[] data) {
Cursor object = SlimeUtils.jsonToSlime(data).get();
- var reals = new LinkedHashSet<Real>();
+ Set<Real> reals = new LinkedHashSet<>();
object.field(realsField).traverse((ArrayTraverser) (i, realObject) -> {
reals.add(new Real(HostName.from(realObject.field(hostnameField).asString()),
realObject.field(ipAddressField).asString(),
@@ -83,20 +85,19 @@ public class LoadBalancerSerializer {
});
- var ports = new LinkedHashSet<Integer>();
+ Set<Integer> ports = new LinkedHashSet<>();
object.field(portsField).traverse((ArrayTraverser) (i, port) -> ports.add((int) port.asLong()));
- var networks = new LinkedHashSet<String>();
+ Set<String> networks = new LinkedHashSet<>();
object.field(networksField).traverse((ArrayTraverser) (i, network) -> networks.add(network.asString()));
+ Optional<HostName> hostname = optionalString(object.field(hostnameField), Function.identity()).filter(s -> !s.isEmpty()).map(HostName::from);
+ Optional<DnsZone> dnsZone = optionalString(object.field(dnsZoneField), DnsZone::new);
+ Optional<LoadBalancerInstance> instance = hostname.map(h -> new LoadBalancerInstance(h, dnsZone, ports,
+ networks, reals));
+
return new LoadBalancer(LoadBalancerId.fromSerializedForm(object.field(idField).asString()),
- new LoadBalancerInstance(
- HostName.from(object.field(hostnameField).asString()),
- optionalString(object.field(dnsZoneField), DnsZone::new),
- ports,
- networks,
- reals
- ),
+ instance,
stateFromString(object.field(stateField).asString()),
Instant.ofEpochMilli(object.field(changedAtField).asLong()));
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java
index 29f3bcebfbf..81a56e4d47e 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java
@@ -92,6 +92,8 @@ public class GroupPreparer {
final Version osVersion;
if (allocateOsRequirement.equals("rhel8")) {
osVersion = new Version(8, Integer.MAX_VALUE /* always use latest 8 version */, 0);
+ } else if (allocateOsRequirement.equals("rhel7")) {
+ osVersion = new Version(7, Integer.MAX_VALUE /* always use latest 7 version */, 0);
} else {
osVersion = nodeRepository.osVersions().targetFor(hostType).orElse(Version.emptyVersion);
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/HostCapacity.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/HostCapacity.java
index 0e0211beea7..58a7aa2b189 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/HostCapacity.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/HostCapacity.java
@@ -45,6 +45,7 @@ public class HostCapacity {
return candidates.stream()
.filter(node -> node.type().canRun(NodeType.tenant))
.filter(host -> host.state() == Node.State.active)
+ .filter(host -> host.reservedTo().isEmpty())
.filter(host -> freeIps(host) > 0)
.sorted(this::compareWithoutInactive)
.limit(count)
@@ -55,6 +56,7 @@ public class HostCapacity {
return candidates.stream()
.filter(node -> node.type() == NodeType.host)
.filter(host -> host.state() == Node.State.active)
+ .filter(host -> host.reservedTo().isEmpty())
.filter(host -> allNodes.childrenOf(host).isEmpty())
.collect(Collectors.toSet());
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java
index 09d19300c59..f53eb189ec1 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java
@@ -170,18 +170,35 @@ public class LoadBalancerProvisioner {
Instant now = nodeRepository.clock().instant();
Optional<LoadBalancer> loadBalancer = db.readLoadBalancer(id);
if (loadBalancer.isEmpty() && activate) return; // Nothing to activate as this load balancer was never prepared
- LoadBalancerInstance instance = provisionInstance(id, realsOf(nodes), loadBalancer);
+
+ Set<Real> reals = realsOf(nodes);
+ Optional<LoadBalancerInstance> instance = provisionInstance(id, reals, loadBalancer);
LoadBalancer newLoadBalancer;
if (loadBalancer.isEmpty()) {
newLoadBalancer = new LoadBalancer(id, instance, LoadBalancer.State.reserved, now);
} else {
- var newState = activate ? LoadBalancer.State.active : loadBalancer.get().state();
- newLoadBalancer = loadBalancer.get().with(instance).with(newState, now);
+ LoadBalancer.State state = activate && instance.isPresent()
+ ? LoadBalancer.State.active
+ : loadBalancer.get().state();
+ newLoadBalancer = loadBalancer.get().with(instance).with(state, now);
if (loadBalancer.get().state() != newLoadBalancer.state()) {
log.log(Level.FINE, "Moving " + newLoadBalancer.id() + " to state " + newLoadBalancer.state());
}
}
- db.writeLoadBalancers(List.of(newLoadBalancer), transaction.nested());
+
+ if (activate) {
+ db.writeLoadBalancers(List.of(newLoadBalancer), transaction.nested());
+ } else {
+ // Always store load balancer so that LoadBalancerExpirer can expire partially provisioned load balancers
+ db.writeLoadBalancer(newLoadBalancer);
+ }
+
+ // Signal that load balancer is not ready yet
+ if (instance.isEmpty()) {
+ throw new LoadBalancerServiceException("Could not (re)configure " + id + ", targeting: " +
+ reals + ". The operation will be retried on next deployment",
+ null);
+ }
}
private void provision(ApplicationTransaction transaction, ClusterSpec.Id clusterId, NodeList nodes) {
@@ -189,17 +206,18 @@ public class LoadBalancerProvisioner {
}
/** Provision or reconfigure a load balancer instance, if necessary */
- private LoadBalancerInstance provisionInstance(LoadBalancerId id, Set<Real> reals,
- Optional<LoadBalancer> currentLoadBalancer) {
+ private Optional<LoadBalancerInstance> provisionInstance(LoadBalancerId id, Set<Real> reals,
+ Optional<LoadBalancer> currentLoadBalancer) {
if (hasReals(currentLoadBalancer, reals)) return currentLoadBalancer.get().instance();
log.log(Level.FINE, "Creating " + id + ", targeting: " + reals);
try {
- return service.create(new LoadBalancerSpec(id.application(), id.cluster(), reals),
- allowEmptyReals(currentLoadBalancer));
+ return Optional.of(service.create(new LoadBalancerSpec(id.application(), id.cluster(), reals),
+ allowEmptyReals(currentLoadBalancer)));
} catch (Exception e) {
- throw new LoadBalancerServiceException("Failed to (re)configure " + id + ", targeting: " +
- reals + ". The operation will be retried on next deployment", e);
+ log.log(Level.WARNING, "Could not (re)configure " + id + ", targeting: " +
+ reals + ". The operation will be retried on next deployment", e);
}
+ return Optional.empty();
}
/** Returns the nodes allocated to the given load balanced cluster */
@@ -246,7 +264,8 @@ public class LoadBalancerProvisioner {
/** Returns whether load balancer has given reals */
private static boolean hasReals(Optional<LoadBalancer> loadBalancer, Set<Real> reals) {
if (loadBalancer.isEmpty()) return false;
- return loadBalancer.get().instance().reals().equals(reals);
+ if (loadBalancer.get().instance().isEmpty()) return false;
+ return loadBalancer.get().instance().get().reals().equals(reals);
}
/** Returns whether to allow given load balancer to have no reals */
@@ -273,5 +292,4 @@ public class LoadBalancerProvisioner {
return cluster.combinedId().orElse(cluster.id());
}
-
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java
index 720548c2d99..b401bcfc03f 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java
@@ -360,8 +360,9 @@ class NodeAllocation {
* @return the final list of nodes
*/
List<Node> finalNodes() {
+ int wantToRetireCount = (int) nodes.values().stream().filter(NodeCandidate::wantToRetire).count();
int currentRetiredCount = (int) nodes.values().stream().filter(node -> node.allocation().get().membership().retired()).count();
- int deltaRetiredCount = requestedNodes.idealRetiredCount(nodes.size(), currentRetiredCount) - currentRetiredCount;
+ int deltaRetiredCount = requestedNodes.idealRetiredCount(nodes.size(), wantToRetireCount, currentRetiredCount);
if (deltaRetiredCount > 0) { // retire until deltaRetiredCount is 0
for (NodeCandidate candidate : byRetiringPriority(nodes.values())) {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java
index 6ab8fc8ad49..8eca4ff2d95 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java
@@ -206,7 +206,7 @@ public class NodePrioritizer {
/** Returns whether we are allocating to replace a failed node */
private boolean isReplacement(NodeList nodesInCluster) {
- int failedNodesInCluster = nodesInCluster.failing().size();
+ int failedNodesInCluster = nodesInCluster.failing().size() + nodesInCluster.state(Node.State.failed).size();
if (failedNodesInCluster == 0) return false;
return ! requestedNodes.fulfilledBy(nodesInCluster.size() - failedNodesInCluster);
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
index a54acbe52ae..ab881a68ebe 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
@@ -122,7 +122,7 @@ public class NodeRepositoryProvisioner implements Provisioner {
@Override
public void restart(ApplicationId application, HostFilter filter) {
- nodeRepository.nodes().restart(ApplicationFilter.from(application, NodeHostFilter.from(filter)));
+ nodeRepository.nodes().restart(ApplicationFilter.from(application).and(NodeHostFilter.from(filter)));
}
@Override
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeSpec.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeSpec.java
index b41c5d75832..a84f35a314b 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeSpec.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeSpec.java
@@ -47,7 +47,7 @@ public interface NodeSpec {
boolean considerRetiring();
/** Returns the ideal number of nodes that should be retired to fulfill this spec */
- int idealRetiredCount(int acceptedCount, int currentRetiredCount);
+ int idealRetiredCount(int acceptedCount, int wantToRetireCount, int currentRetiredCount);
/** Returns number of additional nodes needed for this spec to be fulfilled given the current node count */
int fulfilledDeficitCount(int count);
@@ -132,7 +132,9 @@ public interface NodeSpec {
}
@Override
- public int idealRetiredCount(int acceptedCount, int currentRetiredCount) { return acceptedCount - this.count; }
+ public int idealRetiredCount(int acceptedCount, int wantToRetireCount, int currentRetiredCount) {
+ return acceptedCount - this.count - currentRetiredCount;
+ }
@Override
public int fulfilledDeficitCount(int count) {
@@ -207,9 +209,8 @@ public interface NodeSpec {
public boolean considerRetiring() { return true; }
@Override
- public int idealRetiredCount(int acceptedCount, int currentRetiredCount) {
- // All nodes marked with wantToRetire get marked as retired just before this function is called
- return currentRetiredCount;
+ public int idealRetiredCount(int acceptedCount, int wantToRetireCount, int currentRetiredCount) {
+ return wantToRetireCount - currentRetiredCount;
}
@Override
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/ApplicationSerializer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/ApplicationSerializer.java
index 692d757f41d..95909a64b52 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/ApplicationSerializer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/ApplicationSerializer.java
@@ -99,6 +99,7 @@ public class ApplicationSerializer {
toSlime(scalingEvent.from(), scalingEventObject.setObject("from"));
toSlime(scalingEvent.to(), scalingEventObject.setObject("to"));
scalingEventObject.setLong("at", scalingEvent.at().toEpochMilli());
+ scalingEvent.completion().ifPresent(completion -> scalingEventObject.setLong("completion", completion.toEpochMilli()));
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/LoadBalancersResponse.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/LoadBalancersResponse.java
index 1ef449555d9..bcd0af4e121 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/LoadBalancersResponse.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/LoadBalancersResponse.java
@@ -9,6 +9,7 @@ import com.yahoo.slime.JsonFormat;
import com.yahoo.slime.Slime;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.lb.LoadBalancer;
+import com.yahoo.vespa.hosted.provision.lb.LoadBalancerInstance;
import com.yahoo.vespa.hosted.provision.lb.LoadBalancerList;
import com.yahoo.vespa.hosted.provision.node.filter.ApplicationFilter;
@@ -65,21 +66,23 @@ public class LoadBalancersResponse extends HttpResponse {
lbObject.setString("tenant", lb.id().application().tenant().value());
lbObject.setString("instance", lb.id().application().instance().value());
lbObject.setString("cluster", lb.id().cluster().value());
- lbObject.setString("hostname", lb.instance().hostname().value());
- lb.instance().dnsZone().ifPresent(dnsZone -> lbObject.setString("dnsZone", dnsZone.id()));
+ lb.instance().ifPresent(instance -> lbObject.setString("hostname", instance.hostname().value()));
+ lb.instance().flatMap(LoadBalancerInstance::dnsZone).ifPresent(dnsZone -> lbObject.setString("dnsZone", dnsZone.id()));
Cursor networkArray = lbObject.setArray("networks");
- lb.instance().networks().forEach(networkArray::addString);
+ lb.instance().ifPresent(instance -> instance.networks().forEach(networkArray::addString));
Cursor portArray = lbObject.setArray("ports");
- lb.instance().ports().forEach(portArray::addLong);
+ lb.instance().ifPresent(instance -> instance.ports().forEach(portArray::addLong));
Cursor realArray = lbObject.setArray("reals");
- lb.instance().reals().forEach(real -> {
- Cursor realObject = realArray.addObject();
- realObject.setString("hostname", real.hostname().value());
- realObject.setString("ipAddress", real.ipAddress());
- realObject.setLong("port", real.port());
+ lb.instance().ifPresent(instance -> {
+ instance.reals().forEach(real -> {
+ Cursor realObject = realArray.addObject();
+ realObject.setString("hostname", real.hostname().value());
+ realObject.setString("ipAddress", real.ipAddress());
+ realObject.setLong("port", real.port());
+ });
});
});
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesResponse.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesResponse.java
index 1528a455046..1b3b2f81f11 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesResponse.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesResponse.java
@@ -15,7 +15,6 @@ import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.node.Address;
import com.yahoo.vespa.hosted.provision.node.History;
-import com.yahoo.vespa.hosted.provision.node.filter.NodeFilter;
import com.yahoo.vespa.orchestrator.Orchestrator;
import com.yahoo.vespa.orchestrator.status.HostInfo;
import com.yahoo.vespa.orchestrator.status.HostStatus;
@@ -25,6 +24,7 @@ import java.util.List;
import java.util.Optional;
import java.util.Set;
import java.util.function.Function;
+import java.util.function.Predicate;
/**
* @author bratseth
@@ -40,7 +40,7 @@ class NodesResponse extends SlimeJsonResponse {
/** The parent url of nodes */
private final String nodeParentUrl;
- private final NodeFilter filter;
+ private final Predicate<Node> filter;
private final boolean recursive;
private final Function<HostName, Optional<HostInfo>> orchestrator;
private final NodeRepository nodeRepository;
@@ -104,7 +104,7 @@ class NodesResponse extends SlimeJsonResponse {
private void toSlime(List<Node> nodes, Cursor array) {
for (Node node : nodes) {
- if ( ! filter.matches(node)) continue;
+ if ( ! filter.test(node)) continue;
toSlime(node, recursive, array.addObject());
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java
index c850962bf53..c7bd12204a6 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java
@@ -26,6 +26,7 @@ import com.yahoo.slime.Slime;
import com.yahoo.slime.SlimeUtils;
import com.yahoo.vespa.hosted.provision.NoSuchNodeException;
import com.yahoo.vespa.hosted.provision.Node;
+import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeMutex;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.applications.Application;
@@ -35,7 +36,6 @@ import com.yahoo.vespa.hosted.provision.node.Address;
import com.yahoo.vespa.hosted.provision.node.Agent;
import com.yahoo.vespa.hosted.provision.node.IP;
import com.yahoo.vespa.hosted.provision.node.filter.ApplicationFilter;
-import com.yahoo.vespa.hosted.provision.node.filter.NodeFilter;
import com.yahoo.vespa.hosted.provision.node.filter.NodeHostFilter;
import com.yahoo.vespa.hosted.provision.node.filter.NodeOsVersionFilter;
import com.yahoo.vespa.hosted.provision.node.filter.NodeTypeFilter;
@@ -57,6 +57,7 @@ import java.util.List;
import java.util.Optional;
import java.util.Set;
import java.util.function.Function;
+import java.util.function.Predicate;
import java.util.logging.Level;
import java.util.stream.Collectors;
@@ -138,8 +139,9 @@ public class NodesV2ApiHandler extends LoggingRequestHandler {
return new MessageResponse("Moved " + path.get("hostname") + " to " + Node.State.ready);
}
else if (path.matches("/nodes/v2/state/failed/{hostname}")) {
- List<Node> failedNodes = nodeRepository.nodes().failRecursively(path.get("hostname"), Agent.operator, "Failed through the nodes/v2 API");
- return new MessageResponse("Moved " + hostnamesAsString(failedNodes) + " to " + Node.State.failed);
+ var failedOrMarkedNodes = NodeList.copyOf(nodeRepository.nodes().failOrMarkRecursively(path.get("hostname"), Agent.operator, "Failed through the nodes/v2 API"));
+ return new MessageResponse("Moved " + hostnamesAsString(failedOrMarkedNodes.state(Node.State.failed).asList()) + " to " + Node.State.failed +
+ " and marked " + hostnamesAsString(failedOrMarkedNodes.failing().asList()) + " as wantToFail");
}
else if (path.matches("/nodes/v2/state/parked/{hostname}")) {
List<Node> parkedNodes = nodeRepository.nodes().parkRecursively(path.get("hostname"), Agent.operator, "Parked through the nodes/v2 API");
@@ -157,10 +159,6 @@ public class NodesV2ApiHandler extends LoggingRequestHandler {
List<Node> breakfixedNodes = nodeRepository.nodes().breakfixRecursively(path.get("hostname"), Agent.operator, "Breakfixed through the nodes/v2 API");
return new MessageResponse("Moved " + hostnamesAsString(breakfixedNodes) + " to " + Node.State.breakfixed);
}
- else if (path.matches("/nodes/v2/state/provisioned/{hostname}")) {
- Node restoredNode = nodeRepository.nodes().restore(path.get("hostname"), Agent.operator, "Restored through the nodes/v2 API");
- return new MessageResponse("Moved " + hostnamesAsString(List.of(restoredNode)) + " to " + Node.State.provisioned);
- }
throw new NotFoundException("Cannot put to path '" + path + "'");
}
@@ -332,17 +330,16 @@ public class NodesV2ApiHandler extends LoggingRequestHandler {
return NodeSerializer.typeFrom(object.asString());
}
- public static NodeFilter toNodeFilter(HttpRequest request) {
- NodeFilter filter = NodeHostFilter.from(HostFilter.from(request.getProperty("hostname"),
- request.getProperty("flavor"),
- request.getProperty("clusterType"),
- request.getProperty("clusterId")));
- filter = ApplicationFilter.from(request.getProperty("application"), filter);
- filter = StateFilter.from(request.getProperty("state"), request.getBooleanProperty("includeDeprovisioned"), filter);
- filter = NodeTypeFilter.from(request.getProperty("type"), filter);
- filter = ParentHostFilter.from(request.getProperty("parentHost"), filter);
- filter = NodeOsVersionFilter.from(request.getProperty("osVersion"), filter);
- return filter;
+ public static Predicate<Node> toNodeFilter(HttpRequest request) {
+ return NodeHostFilter.from(HostFilter.from(request.getProperty("hostname"),
+ request.getProperty("flavor"),
+ request.getProperty("clusterType"),
+ request.getProperty("clusterId")))
+ .and(ApplicationFilter.from(request.getProperty("application")))
+ .and(StateFilter.from(request.getProperty("state"), request.getBooleanProperty("includeDeprovisioned")))
+ .and(NodeTypeFilter.from(request.getProperty("type")))
+ .and(ParentHostFilter.from(request.getProperty("parentHost")))
+ .and(NodeOsVersionFilter.from(request.getProperty("osVersion")));
}
private static boolean isPatchOverride(HttpRequest request) {
@@ -431,6 +428,7 @@ public class NodesV2ApiHandler extends LoggingRequestHandler {
}
private static String hostnamesAsString(List<Node> nodes) {
+ if (nodes.isEmpty()) return "none";
return nodes.stream().map(Node::hostname).sorted().collect(Collectors.joining(", "));
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTest.java
index c0699ebf835..567e71c7f9e 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTest.java
@@ -11,13 +11,13 @@ import org.junit.Test;
import java.time.Duration;
import java.time.Instant;
+import java.util.List;
import java.util.Set;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
@@ -36,14 +36,17 @@ public class NodeRepositoryTest {
tester.addHost("id1", "host1", "default", NodeType.host);
tester.addHost("id2", "host2", "default", NodeType.host);
tester.addHost("id3", "host3", "default", NodeType.host);
+ tester.addHost("id4", "cfghost1", "default", NodeType.confighost);
- assertEquals(3, tester.nodeRepository().nodes().list().size());
-
- tester.nodeRepository().nodes().park("host2", true, Agent.system, "Parking to unit test");
- tester.nodeRepository().nodes().removeRecursively("host2");
+ assertEquals(4, tester.nodeRepository().nodes().list().size());
+
+ for (var hostname : List.of("host2", "cfghost1")) {
+ tester.nodeRepository().nodes().park(hostname, true, Agent.system, "Parking to unit test");
+ tester.nodeRepository().nodes().removeRecursively(hostname);
+ }
- assertEquals(3, tester.nodeRepository().nodes().list().size());
- assertEquals(1, tester.nodeRepository().nodes().list(Node.State.deprovisioned).size());
+ assertEquals(4, tester.nodeRepository().nodes().list().size());
+ assertEquals(2, tester.nodeRepository().nodes().list(Node.State.deprovisioned).size());
}
@Test
@@ -158,7 +161,17 @@ public class NodeRepositoryTest {
assertEquals(2, tester.nodeRepository().nodes().list().size());
// Fail host and container
- tester.nodeRepository().nodes().failRecursively(cfghost1, Agent.system, getClass().getSimpleName());
+ tester.nodeRepository().nodes().failOrMarkRecursively(cfghost1, Agent.system, getClass().getSimpleName());
+
+ assertEquals("cfg1 is not failed yet as it active",
+ Node.State.active, tester.nodeRepository().nodes().node(cfg1).get().state());
+ assertEquals("cfghost1 is not failed yet as it active",
+ Node.State.active, tester.nodeRepository().nodes().node(cfghost1).get().state());
+ assertTrue(tester.nodeRepository().nodes().node(cfg1).get().status().wantToFail());
+ assertTrue(tester.nodeRepository().nodes().node(cfghost1).get().status().wantToFail());
+
+ tester.nodeRepository().nodes().fail(cfg1, Agent.system, "test");
+ tester.nodeRepository().nodes().fail(cfghost1, Agent.system, "test");
// Remove recursively
tester.nodeRepository().nodes().removeRecursively(cfghost1);
@@ -205,42 +218,6 @@ public class NodeRepositoryTest {
}
@Test
- public void restore_rebuilt_host() {
- NodeRepositoryTester tester = new NodeRepositoryTester();
- assertEquals(0, tester.nodeRepository().nodes().list().size());
-
- String host1 = "host1";
- String host2 = "host2";
- tester.addHost("id1", host1, "default", NodeType.host);
- tester.addHost("id2", host2, "default", NodeType.host);
- assertEquals(2, tester.nodeRepository().nodes().list().size());
-
- // One host is requested to rebuild, two hosts are parked
- tester.nodeRepository().nodes().rebuild(host2, Agent.system, tester.clock().instant());
- tester.nodeRepository().nodes().park(host1, false, Agent.system, getClass().getSimpleName());
- tester.nodeRepository().nodes().park(host2, false, Agent.system, getClass().getSimpleName());
- IP.Config ipConfigOfHost2 = tester.nodeRepository().nodes().node(host2).get().ipConfig();
-
- // Two hosts are removed
- tester.nodeRepository().nodes().removeRecursively(host1);
- tester.nodeRepository().nodes().removeRecursively(host2);
- assertEquals(2, tester.nodeRepository().nodes().list(Node.State.deprovisioned).size());
-
- // Host not rebuilding cannot be restored
- try {
- tester.nodeRepository().nodes().restore(host1, Agent.system, getClass().getSimpleName());
- fail("Expected exception");
- } catch (IllegalArgumentException ignored) {}
-
- // Other host is restored
- Node node = tester.nodeRepository().nodes().restore(host2, Agent.system, getClass().getSimpleName());
- assertSame(Node.State.provisioned, node.state());
- assertEquals("IP addresses are preserved", ipConfigOfHost2, node.ipConfig());
- assertFalse(node.status().wantToRetire());
- assertFalse(node.status().wantToRebuild());
- }
-
- @Test
public void dirty_host_only_if_we_can_dirty_children() {
NodeRepositoryTester tester = new NodeRepositoryTester();
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/InactiveAndFailedExpirerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/InactiveAndFailedExpirerTest.java
index d5699f0cffe..e4225d780f0 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/InactiveAndFailedExpirerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/InactiveAndFailedExpirerTest.java
@@ -90,35 +90,6 @@ public class InactiveAndFailedExpirerTest {
}
@Test
- public void reboot_generation_is_increased_when_node_moves_to_dirty() {
- ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
- tester.makeReadyNodes(2, nodeResources);
-
- // Allocate and deallocate a single node
- ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test")).vespaVersion("6.42").build();
- List<HostSpec> preparedNodes = tester.prepare(applicationId, cluster, Capacity.from(new ClusterResources(2, 1, nodeResources)));
- tester.activate(applicationId, new HashSet<>(preparedNodes));
- assertEquals(2, tester.getNodes(applicationId, Node.State.active).size());
- tester.deactivate(applicationId);
- List<Node> inactiveNodes = tester.getNodes(applicationId, Node.State.inactive).asList();
- assertEquals(2, inactiveNodes.size());
-
- // Check reboot generation before node is moved. New nodes transition from provisioned to dirty, so their
- // wanted reboot generation will always be 1.
- long wantedRebootGeneration = inactiveNodes.get(0).status().reboot().wanted();
- assertEquals(1, wantedRebootGeneration);
-
- // Inactive times out and node is moved to dirty
- tester.advanceTime(Duration.ofMinutes(14));
- new InactiveExpirer(tester.nodeRepository(), Duration.ofMinutes(10), Map.of(), new TestMetric()).run();
- NodeList dirty = tester.nodeRepository().nodes().list(Node.State.dirty);
- assertEquals(2, dirty.size());
-
- // Reboot generation is increased
- assertEquals(wantedRebootGeneration + 1, dirty.first().get().status().reboot().wanted());
- }
-
- @Test
public void node_that_wants_to_retire_is_moved_to_parked() throws OrchestrationException {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test")).vespaVersion("6.42").build();
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirerTest.java
index fec6e40fd35..4d19c2c1d41 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirerTest.java
@@ -63,7 +63,7 @@ public class LoadBalancerExpirerTest {
tester.nodeRepository().nodes().setReady(tester.nodeRepository().nodes().list(Node.State.dirty).asList(), Agent.system, getClass().getSimpleName());
expirer.maintain();
assertEquals(Set.of(), tester.loadBalancerService().instances().get(lb1).reals());
- assertEquals(Set.of(), loadBalancers.get().get(lb1).instance().reals());
+ assertEquals(Set.of(), loadBalancers.get().get(lb1).instance().get().reals());
// Expirer defers removal of load balancer until expiration time passes
expirer.maintain();
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java
index 3f66f9cadc4..9ded28094d2 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java
@@ -114,7 +114,7 @@ public class NodeFailTester {
return tester;
}
- public static NodeFailTester withTwoApplicationsOnDocker(int numberOfHosts) {
+ public static NodeFailTester withTwoApplications(int numberOfHosts) {
NodeFailTester tester = new NodeFailTester();
int nodesPerHost = 3;
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java
index bb954058916..7eff8af8b8d 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java
@@ -47,7 +47,7 @@ public class NodeFailerTest {
@Test
public void fail_nodes_with_severe_reports_if_allowed_to_be_down() {
- NodeFailTester tester = NodeFailTester.withTwoApplicationsOnDocker(6);
+ NodeFailTester tester = NodeFailTester.withTwoApplications(6);
String hostWithFailureReports = selectFirstParentHostWithNActiveNodesExcept(tester.nodeRepository, 2);
// Set failure report to the parent and all its children.
@@ -106,7 +106,7 @@ public class NodeFailerTest {
@Test
public void hw_fail_only_if_whole_host_is_suspended() {
- NodeFailTester tester = NodeFailTester.withTwoApplicationsOnDocker(6);
+ NodeFailTester tester = NodeFailTester.withTwoApplications(6);
String hostWithFailureReports = selectFirstParentHostWithNActiveNodesExcept(tester.nodeRepository, 2);
assertEquals(Node.State.active, tester.nodeRepository.nodes().node(hostWithFailureReports).get().state());
@@ -380,7 +380,7 @@ public class NodeFailerTest {
public void failing_ready_nodes() {
NodeFailTester tester = NodeFailTester.withTwoApplications();
- // Add ready docker node
+ // Add ready node
NodeResources newNodeResources = new NodeResources(3, 4, 5, 1);
tester.createReadyNodes(1, 16, newNodeResources);
@@ -394,20 +394,20 @@ public class NodeFailerTest {
NodeList ready = tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant);
- // Two ready nodes and a ready docker node die, but only 2 of those are failed out
+ // Two ready hosts and a ready node die, but only 2 of those are failed out
tester.clock.advance(Duration.ofMinutes(180));
- Node dockerNode = ready.stream().filter(node -> node.resources().equals(newNodeResources)).findFirst().get();
+ Node failingNode = ready.stream().filter(node -> node.resources().equals(newNodeResources)).findFirst().get();
List<Node> otherNodes = ready.stream()
- .filter(node -> ! node.resources().equals(newNodeResources))
- .collect(Collectors.toList());
- tester.allNodesMakeAConfigRequestExcept(otherNodes.get(0), otherNodes.get(2), dockerNode);
+ .filter(node -> !node.resources().equals(newNodeResources))
+ .collect(Collectors.toList());
+ tester.allNodesMakeAConfigRequestExcept(otherNodes.get(0), otherNodes.get(2), failingNode);
tester.runMaintainers();
assertEquals( 3, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals( 2, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
// Another ready node dies and the node that died earlier, are allowed to fail
tester.clock.advance(Duration.ofDays(1));
- tester.allNodesMakeAConfigRequestExcept(otherNodes.get(0), otherNodes.get(2), dockerNode, otherNodes.get(3));
+ tester.allNodesMakeAConfigRequestExcept(otherNodes.get(0), otherNodes.get(2), failingNode, otherNodes.get(3));
tester.runMaintainers();
assertEquals( 1, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(otherNodes.get(1), tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).first().get());
@@ -415,7 +415,7 @@ public class NodeFailerTest {
}
@Test
- public void docker_host_not_failed_without_config_requests() {
+ public void host_not_failed_without_config_requests() {
NodeFailTester tester = NodeFailTester.withTwoApplications();
// For a day all nodes work so nothing happens
@@ -427,18 +427,17 @@ public class NodeFailerTest {
assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.host).size());
}
- // Two ready nodes and a ready docker node die, but only 2 of those are failed out
tester.clock.advance(Duration.ofMinutes(180));
- Node dockerHost = tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.host).iterator().next();
- tester.allNodesMakeAConfigRequestExcept(dockerHost);
+ Node host = tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.host).first().get();
+ tester.allNodesMakeAConfigRequestExcept(host);
tester.runMaintainers();
assertEquals( 3, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.host).size());
assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.host).size());
}
@Test
- public void failing_docker_hosts() {
- NodeFailTester tester = NodeFailTester.withTwoApplicationsOnDocker(7);
+ public void failing_hosts() {
+ NodeFailTester tester = NodeFailTester.withTwoApplications(7);
// For a day all nodes work so nothing happens
for (int minutes = 0, interval = 30; minutes < 24 * 60; minutes += interval) {
@@ -475,6 +474,7 @@ public class NodeFailerTest {
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(10, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(6, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size());
+ assertEquals(1, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.host).size());
// Now lets fail an active tenant node
@@ -514,6 +514,7 @@ public class NodeFailerTest {
assertEquals(6, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(5, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size());
+
// We have only 5 hosts remaining, so if we fail another host, we should only be able to redeploy app1's
// node, while app2's should remain
String downHost3 = selectFirstParentHostWithNActiveNodesExcept(tester.nodeRepository, 2, downTenant1.parentHostname().get());
@@ -601,7 +602,7 @@ public class NodeFailerTest {
// Throttles based on a absolute number in small zone
{
// 50 regular tenant nodes, 10 hosts with each 3 tenant nodes, total 90 nodes
- NodeFailTester tester = NodeFailTester.withTwoApplicationsOnDocker(10);
+ NodeFailTester tester = NodeFailTester.withTwoApplications(10);
List<Node> readyNodes = tester.createReadyNodes(50, 30);
NodeList hosts = tester.nodeRepository.nodes().list().nodeType(NodeType.host);
List<Node> deadNodes = readyNodes.subList(0, 4);
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirerTest.java
index 924d38cc6c2..5043529507f 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirerTest.java
@@ -185,7 +185,7 @@ public class RetiredExpirerTest {
// Set wantToRetire on all 3 config servers
List<Node> wantToRetireNodes = tester.nodeRepository().nodes()
- .retire(NodeTypeFilter.from(NodeType.config, null), Agent.operator, Instant.now());
+ .retire(NodeTypeFilter.from(NodeType.config), Agent.operator, Instant.now());
assertEquals(3, wantToRetireNodes.size());
// Redeploy to retire all 3 config servers
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java
index 8c9e52e80f5..1a81511e698 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java
@@ -91,7 +91,7 @@ public class OsVersionsTest {
public void max_active_upgrades() {
int totalNodes = 20;
int maxActiveUpgrades = 5;
- var versions = new OsVersions(tester.nodeRepository(), false, maxActiveUpgrades);
+ var versions = new OsVersions(tester.nodeRepository(), false, maxActiveUpgrades, Integer.MAX_VALUE);
provisionInfraApplication(totalNodes);
Supplier<NodeList> hostNodes = () -> tester.nodeRepository().nodes().list().state(Node.State.active).hosts();
@@ -156,7 +156,7 @@ public class OsVersionsTest {
@Test
public void upgrade_by_retiring() {
- var versions = new OsVersions(tester.nodeRepository(), true, Integer.MAX_VALUE);
+ var versions = new OsVersions(tester.nodeRepository(), true, Integer.MAX_VALUE, Integer.MAX_VALUE);
var clock = (ManualClock) tester.nodeRepository().clock();
int hostCount = 10;
// Provision hosts and children
@@ -171,6 +171,8 @@ public class OsVersionsTest {
// Target is set and upgrade started
var version1 = Version.fromString("7.1");
+ Duration initialBudget = Duration.ofHours(24);
+ versions.setTarget(NodeType.host, version1, initialBudget, false);
Duration totalBudget = Duration.ofHours(12);
Duration nodeBudget = totalBudget.dividedBy(hostCount);
versions.setTarget(NodeType.host, version1, totalBudget,false);
@@ -221,9 +223,9 @@ public class OsVersionsTest {
@Test
public void upgrade_by_retiring_everything_at_once() {
- var versions = new OsVersions(tester.nodeRepository(), true, Integer.MAX_VALUE);
+ var versions = new OsVersions(tester.nodeRepository(), true, Integer.MAX_VALUE, Integer.MAX_VALUE);
int hostCount = 3;
- provisionInfraApplication(hostCount, NodeType.confighost);
+ provisionInfraApplication(hostCount, infraApplication, NodeType.confighost);
Supplier<NodeList> hostNodes = () -> tester.nodeRepository().nodes().list()
.nodeType(NodeType.confighost)
.not().state(Node.State.deprovisioned);
@@ -244,7 +246,7 @@ public class OsVersionsTest {
@Test
public void upgrade_by_rebuilding() {
- var versions = new OsVersions(tester.nodeRepository(), false, Integer.MAX_VALUE);
+ var versions = new OsVersions(tester.nodeRepository(), false, Integer.MAX_VALUE, 1);
var clock = tester.clock();
int hostCount = 10;
provisionInfraApplication(hostCount + 1);
@@ -273,14 +275,17 @@ public class OsVersionsTest {
versions.resumeUpgradeOf(NodeType.host, true);
assertEquals(1, hostNodes.get().rebuilding().size());
- // Budget has been spent and another host is rebuilt
+ // Time budget has been spent, but we cannot rebuild another host until the current one is done
clock.advance(nodeBudget);
versions.resumeUpgradeOf(NodeType.host, true);
NodeList hostsRebuilding = hostNodes.get().rebuilding();
- assertEquals(2, hostsRebuilding.size());
-
- // Hosts are rebuilt
+ assertEquals(1, hostsRebuilding.size());
completeRebuildOf(hostsRebuilding.asList(), NodeType.host);
+ assertEquals(1, hostNodes.get().onOsVersion(version1).size());
+
+ // Second host is rebuilt
+ versions.resumeUpgradeOf(NodeType.host, true);
+ completeRebuildOf(hostNodes.get().rebuilding().asList(), NodeType.host);
assertEquals(2, hostNodes.get().onOsVersion(version1).size());
// The remaining hosts complete their upgrade
@@ -322,6 +327,42 @@ public class OsVersionsTest {
completeRebuildOf(hostsRebuilding.asList(), NodeType.host);
}
+ @Test
+ public void upgrade_by_rebuilding_multiple_host_types() {
+ var versions = new OsVersions(tester.nodeRepository(), false, Integer.MAX_VALUE, 1);
+ var clock = tester.clock();
+ int hostCount = 3;
+ provisionInfraApplication(hostCount, infraApplication, NodeType.host);
+ provisionInfraApplication(hostCount, ApplicationId.from("hosted-vespa", "confighost", "default"), NodeType.confighost);
+ Supplier<NodeList> hosts = () -> tester.nodeRepository().nodes().list().nodeType(NodeType.host,
+ NodeType.confighost);
+
+ // All hosts upgrade to first version. Upgrades are delegated
+ var version0 = Version.fromString("7.0");
+ versions.setTarget(NodeType.host, version0, Duration.ZERO, false);
+ versions.setTarget(NodeType.confighost, version0, Duration.ZERO, false);
+ setCurrentVersion(hosts.get().asList(), version0);
+
+ // Target is set for new major version
+ var version1 = Version.fromString("8.0");
+ Duration totalBudget = Duration.ofHours(12);
+ Duration nodeBudget = totalBudget.dividedBy(hostCount);
+ versions.setTarget(NodeType.host, version1, totalBudget, false);
+ versions.setTarget(NodeType.confighost, version1, totalBudget, false);
+
+ // One host of each type is upgraded
+ for (int i = 0; i < hostCount; i++) {
+ clock.advance(nodeBudget);
+ versions.resumeUpgradeOf(NodeType.host, true);
+ versions.resumeUpgradeOf(NodeType.confighost, true);
+ NodeList hostsRebuilding = hosts.get().rebuilding();
+ assertEquals(2, hostsRebuilding.size());
+ completeRebuildOf(hostsRebuilding.nodeType(NodeType.host).asList(), NodeType.host);
+ completeRebuildOf(hostsRebuilding.nodeType(NodeType.confighost).asList(), NodeType.confighost);
+ }
+ assertEquals("All hosts upgraded", hostCount * 2, hosts.get().onOsVersion(version1).size());
+ }
+
private NodeList deprovisioningChildrenOf(Node parent) {
return tester.nodeRepository().nodes().list()
.childrenOf(parent)
@@ -329,12 +370,12 @@ public class OsVersionsTest {
}
private List<Node> provisionInfraApplication(int nodeCount) {
- return provisionInfraApplication(nodeCount, NodeType.host);
+ return provisionInfraApplication(nodeCount, infraApplication, NodeType.host);
}
- private List<Node> provisionInfraApplication(int nodeCount, NodeType nodeType) {
+ private List<Node> provisionInfraApplication(int nodeCount, ApplicationId application, NodeType nodeType) {
var nodes = tester.makeReadyNodes(nodeCount, "default", nodeType, 1);
- tester.prepareAndActivateInfraApplication(infraApplication, nodeType);
+ tester.prepareAndActivateInfraApplication(application, nodeType);
return nodes.stream()
.map(Node::hostname)
.flatMap(hostname -> tester.nodeRepository().nodes().node(hostname).stream())
@@ -369,10 +410,11 @@ public class OsVersionsTest {
tester.patchNodes(nodes, (node) -> {
Optional<Version> wantedOsVersion = node.status().osVersion().wanted();
if (node.status().wantToDeprovision()) {
+ ApplicationId application = node.allocation().get().owner();
tester.nodeRepository().nodes().park(node.hostname(), false, Agent.system,
getClass().getSimpleName());
tester.nodeRepository().nodes().removeRecursively(node.hostname());
- node = provisionInfraApplication(1, nodeType).get(0);
+ node = provisionInfraApplication(1, application, nodeType).get(0);
}
return node.with(node.status().withOsVersion(node.status().osVersion().withCurrent(wantedOsVersion)));
});
@@ -383,12 +425,15 @@ public class OsVersionsTest {
tester.patchNodes(nodes, (node) -> {
Optional<Version> wantedOsVersion = node.status().osVersion().wanted();
if (node.status().wantToRebuild()) {
+ ApplicationId application = node.allocation().get().owner();
tester.nodeRepository().nodes().park(node.hostname(), false, Agent.system,
getClass().getSimpleName());
tester.nodeRepository().nodes().removeRecursively(node.hostname());
- node = tester.nodeRepository().nodes().restore(node.hostname(), Agent.system, getClass().getSimpleName());
+ Node newNode = Node.create(node.id(), node.ipConfig(), node.hostname(), node.flavor(), node.type())
+ .build();
+ node = tester.nodeRepository().nodes().addNodes(List.of(newNode), Agent.system).get(0);
node = tester.nodeRepository().nodes().setReady(node.hostname(), Agent.system, getClass().getSimpleName());
- tester.prepareAndActivateInfraApplication(infraApplication, nodeType);
+ tester.prepareAndActivateInfraApplication(application, nodeType);
node = tester.nodeRepository().nodes().node(node.hostname()).get();
}
return node.with(node.status().withOsVersion(node.status().osVersion().withCurrent(wantedOsVersion)));
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializerTest.java
index bcb78844666..51d6f7fb2fb 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializerTest.java
@@ -30,7 +30,7 @@ public class LoadBalancerSerializerTest {
"application1",
"default"),
ClusterSpec.Id.from("qrs")),
- new LoadBalancerInstance(
+ Optional.of(new LoadBalancerInstance(
HostName.from("lb-host"),
Optional.of(new DnsZone("zone-id-1")),
ImmutableSet.of(4080, 4443),
@@ -40,19 +40,19 @@ public class LoadBalancerSerializerTest {
4080),
new Real(HostName.from("real-2"),
"127.0.0.2",
- 4080))),
+ 4080)))),
LoadBalancer.State.active,
now);
var serialized = LoadBalancerSerializer.fromJson(LoadBalancerSerializer.toJson(loadBalancer));
assertEquals(loadBalancer.id(), serialized.id());
- assertEquals(loadBalancer.instance().hostname(), serialized.instance().hostname());
- assertEquals(loadBalancer.instance().dnsZone(), serialized.instance().dnsZone());
- assertEquals(loadBalancer.instance().ports(), serialized.instance().ports());
- assertEquals(loadBalancer.instance().networks(), serialized.instance().networks());
+ assertEquals(loadBalancer.instance().get().hostname(), serialized.instance().get().hostname());
+ assertEquals(loadBalancer.instance().get().dnsZone(), serialized.instance().get().dnsZone());
+ assertEquals(loadBalancer.instance().get().ports(), serialized.instance().get().ports());
+ assertEquals(loadBalancer.instance().get().networks(), serialized.instance().get().networks());
assertEquals(loadBalancer.state(), serialized.state());
assertEquals(loadBalancer.changedAt().truncatedTo(MILLIS), serialized.changedAt());
- assertEquals(loadBalancer.instance().reals(), serialized.instance().reals());
+ assertEquals(loadBalancer.instance().get().reals(), serialized.instance().get().reals());
}
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AclProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AclProvisioningTest.java
index b7e671a7b76..744322c9edc 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AclProvisioningTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AclProvisioningTest.java
@@ -180,7 +180,7 @@ public class AclProvisioningTest {
// Load balancer is allocated to application
var loadBalancers = tester.nodeRepository().loadBalancers().list(application);
assertEquals(1, loadBalancers.asList().size());
- var lbNetworks = loadBalancers.asList().get(0).instance().networks();
+ var lbNetworks = loadBalancers.asList().get(0).instance().get().networks();
assertEquals(2, lbNetworks.size());
// ACL for nodes with allocation trust their respective load balancer networks, if any
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerAllocationTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerAllocationTest.java
index 72410c204a3..0c1466e7bf0 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerAllocationTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerAllocationTest.java
@@ -130,7 +130,7 @@ public class DynamicDockerAllocationTest {
// App 2 and 3 should have been allocated to the same nodes - fail one of the parent hosts from there
String parent = "host-1.yahoo.com";
- tester.nodeRepository().nodes().failRecursively(parent, Agent.system, "Testing");
+ tester.nodeRepository().nodes().failOrMarkRecursively(parent, Agent.system, "Testing");
// Redeploy all applications
deployApp(application1, clusterSpec1, resources, tester, 3);
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java
index a3780db4789..eb3bdff484d 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java
@@ -10,6 +10,7 @@ import com.yahoo.config.provision.HostName;
import com.yahoo.config.provision.HostSpec;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.NodeType;
+import com.yahoo.config.provision.exception.LoadBalancerServiceException;
import com.yahoo.vespa.flags.InMemoryFlagSource;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeList;
@@ -59,7 +60,7 @@ public class LoadBalancerProvisionerTest {
clusterRequest(ClusterSpec.Type.container, containerCluster1),
clusterRequest(ClusterSpec.Type.content, contentCluster));
assertEquals(1, lbApp1.get().size());
- assertEquals("Prepare provisions load balancer with reserved nodes", 2, lbApp1.get().get(0).instance().reals().size());
+ assertEquals("Prepare provisions load balancer with reserved nodes", 2, lbApp1.get().get(0).instance().get().reals().size());
tester.activate(app1, nodes);
tester.activate(app2, prepare(app2, clusterRequest(ClusterSpec.Type.container, ClusterSpec.Id.from("qrs"))));
assertEquals(1, lbApp2.get().size());
@@ -67,11 +68,11 @@ public class LoadBalancerProvisionerTest {
// Reals are configured after activation
assertEquals(app1, lbApp1.get().get(0).id().application());
assertEquals(containerCluster1, lbApp1.get().get(0).id().cluster());
- assertEquals(Collections.singleton(4443), lbApp1.get().get(0).instance().ports());
- assertEquals("127.0.0.1", get(lbApp1.get().get(0).instance().reals(), 0).ipAddress());
- assertEquals(4443, get(lbApp1.get().get(0).instance().reals(), 0).port());
- assertEquals("127.0.0.2", get(lbApp1.get().get(0).instance().reals(), 1).ipAddress());
- assertEquals(4443, get(lbApp1.get().get(0).instance().reals(), 1).port());
+ assertEquals(Collections.singleton(4443), lbApp1.get().get(0).instance().get().ports());
+ assertEquals("127.0.0.1", get(lbApp1.get().get(0).instance().get().reals(), 0).ipAddress());
+ assertEquals(4443, get(lbApp1.get().get(0).instance().get().reals(), 0).port());
+ assertEquals("127.0.0.2", get(lbApp1.get().get(0).instance().get().reals(), 1).ipAddress());
+ assertEquals(4443, get(lbApp1.get().get(0).instance().get().reals(), 1).port());
// A container is failed
Supplier<List<Node>> containers = () -> tester.getNodes(app1).container().asList();
@@ -83,13 +84,13 @@ public class LoadBalancerProvisionerTest {
clusterRequest(ClusterSpec.Type.container, containerCluster1),
clusterRequest(ClusterSpec.Type.content, contentCluster)));
LoadBalancer loadBalancer = tester.nodeRepository().loadBalancers().list(app1).asList().get(0);
- assertEquals(2, loadBalancer.instance().reals().size());
- assertTrue("Failed node is removed", loadBalancer.instance().reals().stream()
+ assertEquals(2, loadBalancer.instance().get().reals().size());
+ assertTrue("Failed node is removed", loadBalancer.instance().get().reals().stream()
.map(Real::hostname)
.map(HostName::value)
.noneMatch(hostname -> hostname.equals(toFail.hostname())));
- assertEquals(containers.get().get(0).hostname(), get(loadBalancer.instance().reals(), 0).hostname().value());
- assertEquals(containers.get().get(1).hostname(), get(loadBalancer.instance().reals(), 1).hostname().value());
+ assertEquals(containers.get().get(0).hostname(), get(loadBalancer.instance().get().reals(), 0).hostname().value());
+ assertEquals(containers.get().get(1).hostname(), get(loadBalancer.instance().get().reals(), 1).hostname().value());
assertSame("State is unchanged", LoadBalancer.State.active, loadBalancer.state());
// Add another container cluster to first app
@@ -110,6 +111,7 @@ public class LoadBalancerProvisionerTest {
.collect(Collectors.toList());
List<HostName> reals = lbApp1.get().stream()
.map(LoadBalancer::instance)
+ .flatMap(Optional::stream)
.map(LoadBalancerInstance::reals)
.flatMap(Collection::stream)
.map(Real::hostname)
@@ -152,7 +154,7 @@ public class LoadBalancerProvisionerTest {
.findFirst()
.orElseThrow());
- // Next redeploy does not create a new load balancer instance
+ // Next redeploy does not create a new load balancer instance because reals are unchanged
tester.loadBalancerService().throwOnCreate(true);
tester.activate(app1, prepare(app1,
clusterRequest(ClusterSpec.Type.container, containerCluster1),
@@ -208,7 +210,7 @@ public class LoadBalancerProvisionerTest {
var combinedId = ClusterSpec.Id.from("container1");
var nodes = prepare(app1, clusterRequest(ClusterSpec.Type.combined, ClusterSpec.Id.from("content1"), Optional.of(combinedId)));
assertEquals(1, lbs.get().size());
- assertEquals("Prepare provisions load balancer with reserved nodes", 2, lbs.get().get(0).instance().reals().size());
+ assertEquals("Prepare provisions load balancer with reserved nodes", 2, lbs.get().get(0).instance().get().reals().size());
tester.activate(app1, nodes);
assertSame(LoadBalancer.State.active, lbs.get().get(0).state());
assertEquals(combinedId, lbs.get().get(0).id().cluster());
@@ -222,7 +224,7 @@ public class LoadBalancerProvisionerTest {
var nodes = prepare(configServerApp, Capacity.fromRequiredNodeType(NodeType.config),
clusterRequest(ClusterSpec.Type.admin, cluster));
assertEquals(1, lbs.get().size());
- assertEquals("Prepare provisions load balancer with reserved nodes", 2, lbs.get().get(0).instance().reals().size());
+ assertEquals("Prepare provisions load balancer with reserved nodes", 2, lbs.get().get(0).instance().get().reals().size());
tester.activate(configServerApp, nodes);
assertSame(LoadBalancer.State.active, lbs.get().get(0).state());
assertEquals(cluster, lbs.get().get(0).id().cluster());
@@ -236,7 +238,7 @@ public class LoadBalancerProvisionerTest {
var nodes = prepare(controllerApp, Capacity.fromRequiredNodeType(NodeType.controller),
clusterRequest(ClusterSpec.Type.container, cluster));
assertEquals(1, lbs.get().size());
- assertEquals("Prepare provisions load balancer with reserved nodes", 2, lbs.get().get(0).instance().reals().size());
+ assertEquals("Prepare provisions load balancer with reserved nodes", 2, lbs.get().get(0).instance().get().reals().size());
tester.activate(controllerApp, nodes);
assertSame(LoadBalancer.State.active, lbs.get().get(0).state());
assertEquals(cluster, lbs.get().get(0).id().cluster());
@@ -253,7 +255,7 @@ public class LoadBalancerProvisionerTest {
// instance1 is deployed
tester.activate(instance1, prepare(instance1, clusterRequest(ClusterSpec.Type.container, devCluster)));
- // instance2 clashes because cluster name matches instance1
+ // instance2 clashes because instance name matches instance1 cluster name
try {
prepare(instance2, clusterRequest(ClusterSpec.Type.container, defaultCluster));
fail("Expected exception");
@@ -263,10 +265,38 @@ public class LoadBalancerProvisionerTest {
// instance2 changes cluster name and does not clash
tester.activate(instance2, prepare(instance2, clusterRequest(ClusterSpec.Type.container, ClusterSpec.Id.from("qrs"))));
- // instance3 clashes because instance name matches instance2 cluster
+ // instance3 does not clash
tester.activate(instance3, prepare(instance3, clusterRequest(ClusterSpec.Type.container, defaultCluster)));
}
+ @Test
+ public void provisioning_load_balancer_fails_initially() {
+ Supplier<List<LoadBalancer>> lbs = () -> tester.nodeRepository().loadBalancers().list(app1).asList();
+ ClusterSpec.Id cluster = ClusterSpec.Id.from("qrs1");
+
+ // Provisioning load balancer fails on deployment
+ tester.loadBalancerService().throwOnCreate(true);
+ try {
+ prepare(app1, clusterRequest(ClusterSpec.Type.container, cluster));
+ fail("Expected exception");
+ } catch (LoadBalancerServiceException ignored) {}
+ List<LoadBalancer> loadBalancers = lbs.get();
+ assertEquals(1, loadBalancers.size());
+ assertSame(LoadBalancer.State.reserved, loadBalancers.get(0).state());
+ assertTrue("Load balancer has no instance", loadBalancers.get(0).instance().isEmpty());
+
+ // Next deployment succeeds
+ tester.loadBalancerService().throwOnCreate(false);
+ Set<HostSpec> nodes = prepare(app1, clusterRequest(ClusterSpec.Type.container, cluster));
+ loadBalancers = lbs.get();
+ assertSame(LoadBalancer.State.reserved, loadBalancers.get(0).state());
+ assertTrue("Load balancer has instance", loadBalancers.get(0).instance().isPresent());
+ tester.activate(app1, nodes);
+ loadBalancers = lbs.get();
+ assertSame(LoadBalancer.State.active, loadBalancers.get(0).state());
+ assertTrue("Load balancer has instance", loadBalancers.get(0).instance().isPresent());
+ }
+
private void dirtyNodesOf(ApplicationId application) {
tester.nodeRepository().nodes().deallocate(tester.nodeRepository().nodes().list().owner(application).asList(), Agent.system, this.getClass().getSimpleName());
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java
index 889fd537740..d6ceef81b04 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java
@@ -798,6 +798,29 @@ public class ProvisioningTest {
}
@Test
+ public void allow_unretire_nodes_allocated_through_type_spec() {
+ ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
+ ApplicationId tenantHostAppId = ProvisioningTester.applicationId();
+ tester.makeReadyHosts(10, defaultResources).prepareAndActivateInfraApplication(tenantHostAppId, NodeType.host);
+
+ NodeList list = tester.nodeRepository().nodes().list();
+ assertEquals(10, list.state(Node.State.active).nodeType(NodeType.host).size());
+
+ // Pick out 5 random nodes and retire those
+ Set<String> retiredHostnames = list.shuffle(new Random()).stream().map(Node::hostname).limit(5).collect(Collectors.toSet());
+ tester.patchNodes(node -> retiredHostnames.contains(node.hostname()), node -> node.withWantToRetire(true, Agent.system, tester.clock().instant()));
+ tester.prepareAndActivateInfraApplication(tenantHostAppId, NodeType.host);
+
+ assertEquals(retiredHostnames, tester.nodeRepository().nodes().list().retired().stream().map(Node::hostname).collect(Collectors.toSet()));
+
+ Set<String> unretiredHostnames = retiredHostnames.stream().limit(2).collect(Collectors.toSet());
+ tester.patchNodes(node -> unretiredHostnames.contains(node.hostname()), node -> node.withWantToRetire(false, Agent.system, tester.clock().instant()));
+ tester.prepareAndActivateInfraApplication(tenantHostAppId, NodeType.host);
+
+ assertEquals(3, tester.nodeRepository().nodes().list().retired().size());
+ }
+
+ @Test
public void application_deployment_extends_existing_reservations_on_deploy() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
index df101c416c1..f987d796a3a 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
@@ -58,6 +58,7 @@ import java.util.Optional;
import java.util.Set;
import java.util.UUID;
import java.util.function.Function;
+import java.util.function.Predicate;
import java.util.function.UnaryOperator;
import java.util.logging.Level;
import java.util.stream.Collectors;
@@ -154,6 +155,10 @@ public class ProvisioningTester {
return patchNodes(List.of(node), patcher).get(0);
}
+ public List<Node> patchNodes(Predicate<Node> filter, UnaryOperator<Node> patcher) {
+ return patchNodes(nodeRepository.nodes().list().stream().filter(filter).collect(Collectors.toList()), patcher);
+ }
+
public List<Node> patchNodes(List<Node> nodes, UnaryOperator<Node> patcher) {
List<Node> updated = new ArrayList<>();
for (var node : nodes) {
@@ -271,7 +276,7 @@ public class ProvisioningTester {
for (Node node : nodeRepository.nodes().list(Node.State.active).owner(application)) {
int expectedRestarts = 0;
for (HostFilter filter : filters)
- if (NodeHostFilter.from(filter).matches(node))
+ if (NodeHostFilter.from(filter).test(node))
expectedRestarts++;
assertEquals(expectedRestarts, node.allocation().get().restartGeneration().wanted());
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiTest.java
index a17fcdd7ff1..81ae6651d25 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiTest.java
@@ -88,7 +88,7 @@ public class NodesV2ApiTest {
assertReboot(19, new Request("http://localhost:8080/nodes/v2/command/reboot",
new byte[0], Request.Method.POST));
tester.assertResponseContains(new Request("http://localhost:8080/nodes/v2/node/host2.yahoo.com"),
- "\"rebootGeneration\":4");
+ "\"rebootGeneration\":3");
// POST new nodes
assertResponse(new Request("http://localhost:8080/nodes/v2/node",
@@ -133,7 +133,7 @@ public class NodesV2ApiTest {
// PUT a node in failed ...
assertResponse(new Request("http://localhost:8080/nodes/v2/state/failed/host2.yahoo.com",
new byte[0], Request.Method.PUT),
- "{\"message\":\"Moved host2.yahoo.com to failed\"}");
+ "{\"message\":\"Moved host2.yahoo.com to failed and marked none as wantToFail\"}");
tester.assertResponseContains(new Request("http://localhost:8080/nodes/v2/node/host2.yahoo.com"),
"\"state\":\"failed\"");
// ... and put it back in active (after fixing). This is useful to restore data when multiple nodes fail.
@@ -149,7 +149,7 @@ public class NodesV2ApiTest {
// or, PUT a node in failed ...
assertResponse(new Request("http://localhost:8080/nodes/v2/state/failed/test-node-pool-102-2",
new byte[0], Request.Method.PUT),
- "{\"message\":\"Moved test-node-pool-102-2 to failed\"}");
+ "{\"message\":\"Moved test-node-pool-102-2 to failed and marked none as wantToFail\"}");
tester.assertResponseContains(new Request("http://localhost:8080/nodes/v2/node/test-node-pool-102-2"),
"\"state\":\"failed\"");
// ... and deallocate it such that it moves to dirty and is recycled
@@ -165,14 +165,12 @@ public class NodesV2ApiTest {
tester.assertResponse(new Request("http://localhost:8080/nodes/v2/node/test-node-pool-102-2", new byte[0], Request.Method.GET),
404, "{\"error-code\":\"NOT_FOUND\",\"message\":\"No node with hostname 'test-node-pool-102-2'\"}");
- // Put a host in failed and make sure its children are also failed
+ // Mark a node and its children as want to fail
assertResponse(new Request("http://localhost:8080/nodes/v2/state/failed/dockerhost1.yahoo.com", new byte[0], Request.Method.PUT),
- "{\"message\":\"Moved dockerhost1.yahoo.com, host4.yahoo.com to failed\"}");
-
+ "{\"message\":\"Moved none to failed and marked dockerhost1.yahoo.com, host4.yahoo.com as wantToFail\"}");
+ // Nodes are not failed yet
assertResponse(new Request("http://localhost:8080/nodes/v2/state/failed"), "{\"nodes\":[" +
- "{\"url\":\"http://localhost:8080/nodes/v2/node/host5.yahoo.com\"}," +
- "{\"url\":\"http://localhost:8080/nodes/v2/node/host4.yahoo.com\"}," +
- "{\"url\":\"http://localhost:8080/nodes/v2/node/dockerhost1.yahoo.com\"}]}");
+ "{\"url\":\"http://localhost:8080/nodes/v2/node/host5.yahoo.com\"}]}");
// Update (PATCH) a node (multiple fields can also be sent in one request body)
assertResponse(new Request("http://localhost:8080/nodes/v2/node/host4.yahoo.com",
@@ -237,16 +235,10 @@ public class NodesV2ApiTest {
assertFile(new Request("http://localhost:8080/nodes/v2/node/host4.yahoo.com"), "node4-after-changes.json");
- // move a host marked as wantToRebuild to deprovisioned
- assertResponse(new Request("http://localhost:8080/nodes/v2/node/dockerhost1.yahoo.com",
- new byte[0], Request.Method.DELETE),
- "{\"message\":\"Removed dockerhost1.yahoo.com\"}");
- // ... and then restore it
- assertResponse(new Request("http://localhost:8080/nodes/v2/state/provisioned/dockerhost1.yahoo.com",
+ // park and remove host
+ assertResponse(new Request("http://localhost:8080/nodes/v2/state/parked/dockerhost1.yahoo.com",
new byte[0], Request.Method.PUT),
- "{\"message\":\"Moved dockerhost1.yahoo.com to provisioned\"}");
-
- // move a host to deprovisioned
+ "{\"message\":\"Moved dockerhost1.yahoo.com to parked\"}");
assertResponse(new Request("http://localhost:8080/nodes/v2/node/dockerhost1.yahoo.com",
new byte[0], Request.Method.DELETE),
"{\"message\":\"Removed dockerhost1.yahoo.com\"}");
@@ -423,17 +415,17 @@ public class NodesV2ApiTest {
"{\"message\":\"Added 1 nodes to the provisioned state\"}");
assertResponse(new Request("http://localhost:8080/nodes/v2/state/failed/foo.yahoo.com",
new byte[0], Request.Method.PUT),
- "{\"message\":\"Moved foo.yahoo.com to failed\"}");
+ "{\"message\":\"Moved foo.yahoo.com to failed and marked none as wantToFail\"}");
assertResponse(new Request("http://localhost:8080/nodes/v2/state/dirty/foo.yahoo.com",
new byte[0], Request.Method.PUT),
"{\"message\":\"Moved foo.yahoo.com to dirty\"}");
tester.assertResponseContains(new Request("http://localhost:8080/nodes/v2/node/foo.yahoo.com"),
- "\"rebootGeneration\":1");
+ "\"rebootGeneration\":0");
assertResponse(new Request("http://localhost:8080/nodes/v2/node/foo.yahoo.com",
Utf8.toBytes("{\"currentRebootGeneration\": 42}"), Request.Method.PATCH),
"{\"message\":\"Updated foo.yahoo.com\"}");
tester.assertResponseContains(new Request("http://localhost:8080/nodes/v2/node/foo.yahoo.com"),
- "\"rebootGeneration\":1");
+ "\"rebootGeneration\":0");
}
@Test
@@ -471,7 +463,7 @@ public class NodesV2ApiTest {
// Attempt to fail and ready an allocated node without going through dirty
assertResponse(new Request("http://localhost:8080/nodes/v2/state/failed/host1.yahoo.com",
new byte[0], Request.Method.PUT),
- "{\"message\":\"Moved host1.yahoo.com to failed\"}");
+ "{\"message\":\"Moved host1.yahoo.com to failed and marked none as wantToFail\"}");
tester.assertResponse(new Request("http://localhost:8080/nodes/v2/state/ready/host1.yahoo.com",
new byte[0], Request.Method.PUT),
400,
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/cfg1.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/cfg1.json
index 61c381ae67e..87fa1b15d84 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/cfg1.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/cfg1.json
@@ -9,7 +9,7 @@
"cpuCores": 2.0,
"resources":{"vcpu":2.0,"memoryGb":16.0,"diskGb":400.0,"bandwidthGbps":10.0,"diskSpeed":"fast","storageType":"remote"},
"environment": "BARE_METAL",
- "rebootGeneration": 1,
+ "rebootGeneration": 0,
"currentRebootGeneration": 0,
"failCount": 0,
"wantToRetire": false,
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/cfg2.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/cfg2.json
index 1b9504baa64..ed95343586b 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/cfg2.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/cfg2.json
@@ -9,7 +9,7 @@
"cpuCores": 2.0,
"resources":{"vcpu":2.0,"memoryGb":16.0,"diskGb":400.0,"bandwidthGbps":10.0,"diskSpeed":"fast","storageType":"remote"},
"environment": "BARE_METAL",
- "rebootGeneration": 1,
+ "rebootGeneration": 0,
"currentRebootGeneration": 0,
"failCount": 0,
"wantToRetire": false,
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node1.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node1.json
index 402b2a7873b..0eec239f7dc 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node1.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node1.json
@@ -26,7 +26,7 @@
"wantedVespaVersion": "6.42.0",
"requestedResources": { "vcpu":2.0, "memoryGb":8.0, "diskGb":50.0, "bandwidthGbps":1.0, "diskSpeed":"fast", "storageType":"any" },
"allowedToBeDown": false,
- "rebootGeneration": 1,
+ "rebootGeneration": 0,
"currentRebootGeneration": 0,
"failCount": 0,
"wantToRetire": false,
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node10.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node10.json
index 8c3801dc589..bc101402725 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node10.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node10.json
@@ -27,7 +27,7 @@
"wantedVespaVersion": "6.42.0",
"requestedResources": { "vcpu":2.0, "memoryGb":8.0, "diskGb":50.0, "bandwidthGbps":1.0, "diskSpeed":"fast", "storageType":"any" },
"allowedToBeDown": false,
- "rebootGeneration": 1,
+ "rebootGeneration": 0,
"currentRebootGeneration": 0,
"vespaVersion": "5.104.142",
"currentDockerImage": "docker-registry.domain.tld:8080/dist/vespa:5.104.142",
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node2.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node2.json
index 134c7688b62..73f9cabba17 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node2.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node2.json
@@ -26,7 +26,7 @@
"wantedVespaVersion": "6.42.0",
"requestedResources": { "vcpu":2.0, "memoryGb":8.0, "diskGb":50.0, "bandwidthGbps":1.0, "diskSpeed":"fast", "storageType":"any" },
"allowedToBeDown": false,
- "rebootGeneration": 1,
+ "rebootGeneration": 0,
"currentRebootGeneration": 0,
"failCount": 0,
"wantToRetire": false,
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node3.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node3.json
index 81d2a30e73d..9997478d8bb 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node3.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node3.json
@@ -8,7 +8,7 @@
"flavor": "[vcpu: 0.5, memory: 48.0 Gb, disk 500.0 Gb, bandwidth: 1.0 Gbps, storage type: local]",
"resources":{"vcpu":0.5,"memoryGb":48.0,"diskGb":500.0,"bandwidthGbps":1.0,"diskSpeed":"fast","storageType":"local"},
"environment": "DOCKER_CONTAINER",
- "rebootGeneration": 1,
+ "rebootGeneration": 0,
"currentRebootGeneration": 0,
"failCount": 0,
"wantToRetire": false,
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4-after-changes.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4-after-changes.json
index 15e76cae558..04d4cdfa505 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4-after-changes.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4-after-changes.json
@@ -1,7 +1,7 @@
{
"url": "http://localhost:8080/nodes/v2/node/host4.yahoo.com",
"id": "host4.yahoo.com",
- "state": "failed",
+ "state": "active",
"type": "tenant",
"hostname": "host4.yahoo.com",
"parentHostname": "parent.yahoo.com",
@@ -30,11 +30,11 @@
"allowedToBeDown": true,
"orchestratorStatus": "ALLOWED_TO_BE_DOWN",
"suspendedSinceMillis": 0,
- "rebootGeneration": 3,
+ "rebootGeneration": 2,
"currentRebootGeneration": 1,
"vespaVersion": "6.43.0",
"currentDockerImage": "docker-registry.domain.tld:8080/dist/vespa:6.45.0",
- "failCount": 1,
+ "failCount": 0,
"wantToRetire": true,
"preferToRetire": false,
"wantToDeprovision": false,
@@ -61,7 +61,7 @@
"agent": "application"
},
{
- "event": "failed",
+ "event": "wantToFail",
"at": 123,
"agent": "operator"
},
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4-with-hostnames.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4-with-hostnames.json
index 869d9417a06..e6c2ba350d2 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4-with-hostnames.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4-with-hostnames.json
@@ -27,7 +27,7 @@
"wantedVespaVersion": "6.42.0",
"requestedResources": { "vcpu":1.0, "memoryGb":4.0, "diskGb":100.0, "bandwidthGbps":1.0, "diskSpeed":"fast", "storageType":"any" },
"allowedToBeDown": false,
- "rebootGeneration": 1,
+ "rebootGeneration": 0,
"currentRebootGeneration": 0,
"vespaVersion": "6.41.0",
"currentDockerImage": "docker-registry.domain.tld:8080/dist/vespa:6.41.0",
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4.json
index 7068796ab3f..1ae403bcba6 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node4.json
@@ -27,7 +27,7 @@
"wantedVespaVersion": "6.42.0",
"requestedResources": { "vcpu":1.0, "memoryGb":4.0, "diskGb":100.0, "bandwidthGbps":1.0, "diskSpeed":"fast", "storageType":"any" },
"allowedToBeDown": false,
- "rebootGeneration": 1,
+ "rebootGeneration": 0,
"currentRebootGeneration": 0,
"vespaVersion": "6.41.0",
"currentDockerImage": "docker-registry.domain.tld:8080/dist/vespa:6.41.0",
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node5-after-changes.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node5-after-changes.json
index 4ffd3c87d14..35c2ee570c8 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node5-after-changes.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node5-after-changes.json
@@ -9,7 +9,7 @@
"flavor": "[vcpu: 1.0, memory: 8.0 Gb, disk 100.0 Gb, bandwidth: 1.0 Gbps, disk speed: slow, storage type: remote]",
"resources":{"vcpu":1.0,"memoryGb":8.0,"diskGb":100.0,"bandwidthGbps":1.0,"diskSpeed":"slow","storageType":"remote"},
"environment": "DOCKER_CONTAINER",
- "rebootGeneration": 1,
+ "rebootGeneration": 0,
"currentRebootGeneration": 0,
"failCount": 1,
"wantToRetire": false,
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node5.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node5.json
index 6197ff7b083..a45bf4a0c53 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node5.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node5.json
@@ -9,7 +9,7 @@
"flavor": "[vcpu: 1.0, memory: 8.0 Gb, disk 100.0 Gb, bandwidth: 1.0 Gbps, disk speed: slow, storage type: remote]",
"resources":{"vcpu":1.0,"memoryGb":8.0,"diskGb":100.0,"bandwidthGbps":1.0,"diskSpeed":"slow","storageType":"remote"},
"environment": "DOCKER_CONTAINER",
- "rebootGeneration": 1,
+ "rebootGeneration": 0,
"currentRebootGeneration": 0,
"vespaVersion": "1.2.3",
"currentDockerImage": "docker-registry.domain.tld:8080/dist/vespa:1.2.3",
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node6.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node6.json
index b1f439c3745..a6eede0675c 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node6.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/node6.json
@@ -26,7 +26,7 @@
"wantedVespaVersion": "6.42.0",
"requestedResources": { "vcpu":2.0, "memoryGb":8.0, "diskGb":50.0, "bandwidthGbps":1.0, "diskSpeed":"fast", "storageType":"any" },
"allowedToBeDown": false,
- "rebootGeneration": 1,
+ "rebootGeneration": 0,
"currentRebootGeneration": 0,
"failCount": 0,
"wantToRetire": false,
diff --git a/orchestrator-restapi/pom.xml b/orchestrator-restapi/pom.xml
index 2d4ceb069df..37552b7aa0b 100644
--- a/orchestrator-restapi/pom.xml
+++ b/orchestrator-restapi/pom.xml
@@ -23,18 +23,6 @@
<scope>provided</scope>
</dependency>
<dependency>
- <groupId>javax.ws.rs</groupId>
- <artifactId>javax.ws.rs-api</artifactId>
- <version>2.0</version>
- <scope>provided</scope>
- </dependency>
- <dependency>
- <groupId>com.yahoo.vespa</groupId>
- <artifactId>jaxrs_utils</artifactId>
- <version>${project.version}</version>
- <scope>compile</scope>
- </dependency>
- <dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-annotations</artifactId>
<version>${jackson2.version}</version>
diff --git a/orchestrator-restapi/src/main/java/com/yahoo/vespa/orchestrator/restapi/ApplicationSuspensionApi.java b/orchestrator-restapi/src/main/java/com/yahoo/vespa/orchestrator/restapi/ApplicationSuspensionApi.java
deleted file mode 100644
index e44f6fa0df7..00000000000
--- a/orchestrator-restapi/src/main/java/com/yahoo/vespa/orchestrator/restapi/ApplicationSuspensionApi.java
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.orchestrator.restapi;
-
-import javax.ws.rs.DELETE;
-import javax.ws.rs.GET;
-import javax.ws.rs.POST;
-import javax.ws.rs.Path;
-import javax.ws.rs.PathParam;
-import javax.ws.rs.Produces;
-import javax.ws.rs.core.MediaType;
-import java.util.Set;
-
-/**
- * Definition of Orchestrator's REST API for suspensions of applications aka application instances.
- *
- * Implementing classes must not put any JAX-RS annotation on the overridden methods. Doing so will cause all
- * method annotations in this interface to be ignored by the JAX-RS container (see section 3.6 of JSR-339).
- *
- * @author smorgrav
- */
-@Path("/orchestrator" + ApplicationSuspensionApi.PATH_PREFIX)
-public interface ApplicationSuspensionApi {
- /**
- * Path prefix for this api. Resources implementing this API should use this with a @Path annotation.
- */
- String PATH_PREFIX = "/v1/suspensions/applications";
-
- /**
- * Lists all applications that is currently suspended.
- *
- * HTTP Behavior:
- * Always 200
- *
- * @return A list of application ids of suspended applications
- */
- @GET
- @Produces(MediaType.APPLICATION_JSON)
- Set<String> getApplications();
-
- /**
- * Shows the Orchestrator status for an application instance
- *
- * HTTP Behavior:
- * 204 if the application is suspended
- * 400 if the applicationId is invalid
- * 404 if the application is not suspended
- *
- * @param applicationIdString the fully qualified application id.
- */
- @GET
- @Path("/{application}")
- @Produces(MediaType.APPLICATION_JSON)
- void getApplication(@PathParam("application") String applicationIdString);
-
- /**
- * Ask for permission to temporarily suspend all services for an application instance.
- *
- * On success all content nodes for this application instance have been set in maintenance mode.
- *
- * Once the application is ready to resume normal operations, it must finish with resume() (see below).
- *
- * If the application has already been granted permission to suspend all services, requesting
- * suspension again is idempotent and will succeed.
- *
- * HTTP Behavior:
- * 204 is the suspend operation was successful
- * 400 if the applicationId is invalid
- * 409 if the suspend was denied
- *
- * @param applicationIdString the fully qualified application id.
- */
- @POST
- void suspend(String applicationIdString);
-
- /**
- * Resume normal operations for all services for an application
- * instance that has previously been allowed suspension.
- *
- * If the host is already registered as running normal operations, then resume() is idempotent
- * and will succeed.
- *
- * HTTP Behavior:
- * Returns 204 is the resume operation was successful (or the application was not suspended)
- * Returns 400 if the applicationId is invalid
- *
- * @param applicationIdString the fully qualified application id.
- */
- @DELETE
- @Path("/{application}")
- void resume(@PathParam("application") String applicationIdString);
-}
diff --git a/orchestrator-restapi/src/main/java/com/yahoo/vespa/orchestrator/restapi/HostApi.java b/orchestrator-restapi/src/main/java/com/yahoo/vespa/orchestrator/restapi/HostApi.java
deleted file mode 100644
index 1c4d138acef..00000000000
--- a/orchestrator-restapi/src/main/java/com/yahoo/vespa/orchestrator/restapi/HostApi.java
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.orchestrator.restapi;
-
-import com.yahoo.vespa.jaxrs.annotation.PATCH;
-import com.yahoo.vespa.orchestrator.restapi.wire.GetHostResponse;
-import com.yahoo.vespa.orchestrator.restapi.wire.PatchHostRequest;
-import com.yahoo.vespa.orchestrator.restapi.wire.PatchHostResponse;
-import com.yahoo.vespa.orchestrator.restapi.wire.UpdateHostResponse;
-
-import javax.ws.rs.Consumes;
-import javax.ws.rs.DELETE;
-import javax.ws.rs.GET;
-import javax.ws.rs.PUT;
-import javax.ws.rs.Path;
-import javax.ws.rs.PathParam;
-import javax.ws.rs.Produces;
-import javax.ws.rs.core.MediaType;
-
-/**
- * Definition of Orchestrator's REST API for hosts.
- *
- * Implementing classes must not put any JAX-RS annotation on the overridden methods. Doing so will cause all
- * method annotations in this interface to be ignored by the JAX-RS container (see section 3.6 of JSR-339).
- *
- * @author bakksjo
- */
-public interface HostApi {
- /**
- * Path prefix for this api. Resources implementing this API should use this with a @Path annotation.
- */
- String PATH_PREFIX = "/v1/hosts";
-
- /**
- * Shows the Orchestrator state of a host.
- *
- * @param hostNameString the fully qualified host name
- */
- @GET
- @Path("/{hostname}")
- @Produces(MediaType.APPLICATION_JSON)
- GetHostResponse getHost(@PathParam("hostname") String hostNameString);
-
- /**
- * Tweak internal Orchestrator state for host.
- */
- @PATCH
- @Path("/{hostname}")
- @Consumes(MediaType.APPLICATION_JSON)
- @Produces(MediaType.APPLICATION_JSON)
- PatchHostResponse patch(@PathParam("hostname") String hostNameString, PatchHostRequest request);
-
- /**
- * Ask for permission to temporarily suspend all services on a host.
- *
- * On success, none, some, or all services on the host may already have been effectively suspended,
- * e.g. as of Feb 2015, a content node would already be set in the maintenance state.
- *
- * Once the host is ready to resume normal operations, it must finish with resume() (see below).
- *
- * If the host has already been granted permission to suspend all services, requesting
- * suspension again is idempotent and will succeed.
- *
- * @param hostNameString the fully qualified host name.
- */
- @PUT
- @Path("/{hostname}/suspended")
- @Produces(MediaType.APPLICATION_JSON)
- UpdateHostResponse suspend(@PathParam("hostname") String hostNameString);
-
- /**
- * Resume normal operations for all services on a host that has previously been allowed suspension.
- *
- * If the host is already registered as running normal operations, then resume() is idempotent
- * and will succeed.
- *
- * @param hostNameString the fully qualified host name.
- */
- @DELETE
- @Path("/{hostname}/suspended")
- @Produces(MediaType.APPLICATION_JSON)
- UpdateHostResponse resume(@PathParam("hostname") String hostNameString);
-}
diff --git a/orchestrator-restapi/src/main/java/com/yahoo/vespa/orchestrator/restapi/HostSuspensionApi.java b/orchestrator-restapi/src/main/java/com/yahoo/vespa/orchestrator/restapi/HostSuspensionApi.java
deleted file mode 100644
index 9535096af4f..00000000000
--- a/orchestrator-restapi/src/main/java/com/yahoo/vespa/orchestrator/restapi/HostSuspensionApi.java
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.orchestrator.restapi;
-
-import com.yahoo.vespa.orchestrator.restapi.wire.BatchOperationResult;
-
-import javax.ws.rs.Consumes;
-import javax.ws.rs.PUT;
-import javax.ws.rs.Path;
-import javax.ws.rs.PathParam;
-import javax.ws.rs.Produces;
-import javax.ws.rs.QueryParam;
-import javax.ws.rs.core.MediaType;
-import java.util.List;
-
-/**
- * @author hakonhall
- */
-public interface HostSuspensionApi {
- /**
- * Path prefix for this api. Resources implementing this API should use this with a @Path annotation.
- */
- String PATH_PREFIX = "/v1/suspensions/hosts";
-
- /**
- * Ask for permission to temporarily suspend all services on a set of hosts (nodes).
- *
- * See HostApi::suspend for semantics of suspending a node.
- */
- @PUT
- @Path("/{hostname}")
- @Produces(MediaType.APPLICATION_JSON)
- @Consumes(MediaType.APPLICATION_JSON)
- BatchOperationResult suspendAll(@PathParam("hostname") String parentHostname,
- @QueryParam("hostname") List<String> hostnames);
-}
diff --git a/orchestrator-restapi/src/main/java/com/yahoo/vespa/orchestrator/restapi/package-info.java b/orchestrator-restapi/src/main/java/com/yahoo/vespa/orchestrator/restapi/package-info.java
deleted file mode 100644
index 72da3faa44f..00000000000
--- a/orchestrator-restapi/src/main/java/com/yahoo/vespa/orchestrator/restapi/package-info.java
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-@ExportPackage
-@PublicApi
-package com.yahoo.vespa.orchestrator.restapi;
-
-import com.yahoo.api.annotations.PublicApi;
-import com.yahoo.osgi.annotation.ExportPackage; \ No newline at end of file
diff --git a/orchestrator-restapi/src/main/java/com/yahoo/vespa/orchestrator/restapi/wire/SlobrokEntryResponse.java b/orchestrator-restapi/src/main/java/com/yahoo/vespa/orchestrator/restapi/wire/SlobrokEntryResponse.java
index 062fa186cb6..672f391a4cd 100644
--- a/orchestrator-restapi/src/main/java/com/yahoo/vespa/orchestrator/restapi/wire/SlobrokEntryResponse.java
+++ b/orchestrator-restapi/src/main/java/com/yahoo/vespa/orchestrator/restapi/wire/SlobrokEntryResponse.java
@@ -1,6 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.orchestrator.restapi.wire;
+import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonProperty;
@@ -17,7 +18,8 @@ public class SlobrokEntryResponse {
@JsonProperty("spec")
public final String spec;
- public SlobrokEntryResponse(String name, String spec) {
+ @JsonCreator
+ public SlobrokEntryResponse(@JsonProperty("name") String name, @JsonProperty("spec") String spec) {
this.name = name;
this.spec = spec;
}
diff --git a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/resources/ApplicationSuspensionRequestHandler.java b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/resources/ApplicationSuspensionRequestHandler.java
new file mode 100644
index 00000000000..c706aed1143
--- /dev/null
+++ b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/resources/ApplicationSuspensionRequestHandler.java
@@ -0,0 +1,158 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.orchestrator.resources;
+
+import com.google.inject.Inject;
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.container.jdisc.EmptyResponse;
+import com.yahoo.container.jdisc.HttpResponse;
+import com.yahoo.container.jdisc.LoggingRequestHandler;
+import com.yahoo.jdisc.http.HttpResponse.Status;
+import com.yahoo.restapi.RestApi;
+import com.yahoo.restapi.RestApiException;
+import com.yahoo.restapi.RestApiRequestHandler;
+import com.yahoo.vespa.orchestrator.ApplicationIdNotFoundException;
+import com.yahoo.vespa.orchestrator.ApplicationStateChangeDeniedException;
+import com.yahoo.vespa.orchestrator.Orchestrator;
+import com.yahoo.vespa.orchestrator.status.ApplicationInstanceStatus;
+
+import java.util.Set;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+import java.util.stream.Collectors;
+
+/**
+ * @author smorgrav
+ * @author bjorncs
+ */
+public class ApplicationSuspensionRequestHandler extends RestApiRequestHandler<ApplicationSuspensionRequestHandler> {
+
+ private static final Logger log = Logger.getLogger(ApplicationSuspensionRequestHandler.class.getName());
+
+ private final Orchestrator orchestrator;
+
+ @Inject
+ public ApplicationSuspensionRequestHandler(LoggingRequestHandler.Context context, Orchestrator orchestrator) {
+ super(context, ApplicationSuspensionRequestHandler::createRestApiDefinition);
+ this.orchestrator = orchestrator;
+ }
+
+ private static RestApi createRestApiDefinition(ApplicationSuspensionRequestHandler self) {
+ return RestApi.builder()
+ .addRoute(RestApi.route("/orchestrator/v1/suspensions/applications")
+ .get(self::getApplications)
+ .post(String.class, self::suspend))
+ .addRoute(RestApi.route("/orchestrator/v1/suspensions/applications/{application}")
+ .get(self::getApplication)
+ .delete(self::resume))
+ .registerJacksonResponseEntity(Set.class)
+ .build();
+ }
+
+ /**
+ * Lists all applications that is currently suspended.
+ *
+ * HTTP Behavior:
+ * Always 200
+ *
+ * @return A list of application ids of suspended applications
+ */
+ private Set<String> getApplications(RestApi.RequestContext context) {
+ Set<ApplicationId> refs = orchestrator.getAllSuspendedApplications();
+ return refs.stream().map(ApplicationId::serializedForm).collect(Collectors.toSet());
+ }
+
+ /**
+ * Shows the Orchestrator status for an application instance
+ *
+ * HTTP Behavior:
+ * 204 if the application is suspended
+ * 400 if the applicationId is invalid
+ * 404 if the application is not suspended
+ */
+ private HttpResponse getApplication(RestApi.RequestContext context) {
+ String applicationIdString = context.pathParameters().getStringOrThrow("application");
+ ApplicationId appId = toApplicationId(applicationIdString);
+ ApplicationInstanceStatus status;
+
+ try {
+ status = orchestrator.getApplicationInstanceStatus(appId);
+ } catch (ApplicationIdNotFoundException e) {
+ throw new RestApiException.NotFound("Application " + applicationIdString + " could not be found", e);
+ }
+
+ if (status.equals(ApplicationInstanceStatus.NO_REMARKS)) {
+ throw new RestApiException.NotFound("Application " + applicationIdString + " is not suspended");
+ }
+ return new EmptyResponse(Status.NO_CONTENT);
+ }
+
+ /**
+ * Ask for permission to temporarily suspend all services for an application instance.
+ *
+ * On success all content nodes for this application instance have been set in maintenance mode.
+ *
+ * Once the application is ready to resume normal operations, it must finish with resume() (see below).
+ *
+ * If the application has already been granted permission to suspend all services, requesting
+ * suspension again is idempotent and will succeed.
+ *
+ * HTTP Behavior:
+ * 204 is the suspend operation was successful
+ * 400 if the applicationId is invalid
+ * 409 if the suspend was denied
+ */
+ private HttpResponse suspend(RestApi.RequestContext context, String applicationIdString) {
+ ApplicationId applicationId = toApplicationId(applicationIdString);
+ try {
+ orchestrator.suspend(applicationId);
+ } catch (ApplicationIdNotFoundException e) {
+ log.log(Level.INFO, "ApplicationId " + applicationIdString + " not found.", e);
+ throw new RestApiException.NotFound(e);
+ } catch (ApplicationStateChangeDeniedException e) {
+ log.log(Level.INFO, "Suspend for " + applicationIdString + " failed.", e);
+ throw new RestApiException.Conflict();
+ } catch (RuntimeException e) {
+ log.log(Level.INFO, "Suspend for " + applicationIdString + " failed from unknown reasons", e);
+ throw new RestApiException.InternalServerError(e);
+ }
+ return new EmptyResponse(Status.NO_CONTENT);
+ }
+
+ /**
+ * Resume normal operations for all services for an application
+ * instance that has previously been allowed suspension.
+ *
+ * If the host is already registered as running normal operations, then resume() is idempotent
+ * and will succeed.
+ *
+ * HTTP Behavior:
+ * Returns 204 is the resume operation was successful (or the application was not suspended)
+ * Returns 400 if the applicationId is invalid
+ */
+ private HttpResponse resume(RestApi.RequestContext context) {
+ String applicationIdString = context.pathParameters().getStringOrThrow("application");
+ ApplicationId applicationId = toApplicationId(applicationIdString);
+ try {
+ orchestrator.resume(applicationId);
+ } catch (ApplicationIdNotFoundException e) {
+ log.log(Level.INFO, "ApplicationId " + applicationIdString + " not found.", e);
+ throw new RestApiException.NotFound(e);
+ } catch (ApplicationStateChangeDeniedException e) {
+ log.log(Level.INFO, "Suspend for " + applicationIdString + " failed.", e);
+ throw new RestApiException.Conflict();
+ } catch (RuntimeException e) {
+ log.log(Level.INFO, "Suspend for " + applicationIdString + " failed from unknown reasons", e);
+ throw new RestApiException.InternalServerError(e);
+ }
+ return new EmptyResponse(Status.NO_CONTENT);
+ }
+
+ private ApplicationId toApplicationId(String applicationIdString) {
+ try {
+ return ApplicationId.fromSerializedForm(applicationIdString);
+ } catch (IllegalArgumentException e) {
+ throw new RestApiException.BadRequest(e);
+ }
+ }
+
+}
diff --git a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/resources/health/HealthResource.java b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/resources/HealthRequestHandler.java
index 3265ac75642..b5e63786df9 100644
--- a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/resources/health/HealthResource.java
+++ b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/resources/HealthRequestHandler.java
@@ -1,24 +1,17 @@
-// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.orchestrator.resources.health;
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.orchestrator.resources;
import com.google.inject.Inject;
import com.yahoo.config.provision.ApplicationId;
-import com.yahoo.container.jaxrs.annotation.Component;
+import com.yahoo.container.jdisc.LoggingRequestHandler;
+import com.yahoo.restapi.RestApi;
+import com.yahoo.restapi.RestApiRequestHandler;
import com.yahoo.vespa.applicationmodel.ServiceStatusInfo;
-import com.yahoo.vespa.orchestrator.resources.ApplicationServices;
-import com.yahoo.vespa.orchestrator.resources.ServiceResource;
import com.yahoo.vespa.orchestrator.restapi.wire.ApplicationReferenceList;
import com.yahoo.vespa.orchestrator.restapi.wire.UrlReference;
import com.yahoo.vespa.service.manager.HealthMonitorApi;
import com.yahoo.vespa.service.monitor.ServiceId;
-import javax.ws.rs.GET;
-import javax.ws.rs.Path;
-import javax.ws.rs.PathParam;
-import javax.ws.rs.Produces;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.MediaType;
-import javax.ws.rs.core.UriInfo;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.List;
@@ -27,31 +20,39 @@ import java.util.stream.Collectors;
/**
* @author hakonhall
+ * @author bjorncs
*/
-@Path("")
-public class HealthResource {
- private final UriInfo uriInfo;
+public class HealthRequestHandler extends RestApiRequestHandler<HealthRequestHandler> {
+
private final HealthMonitorApi healthMonitorApi;
@Inject
- public HealthResource(@Context UriInfo uriInfo, @Component HealthMonitorApi healthMonitorApi) {
- this.uriInfo = uriInfo;
+ public HealthRequestHandler(LoggingRequestHandler.Context context,
+ HealthMonitorApi healthMonitorApi) {
+ super(context, HealthRequestHandler::createRestApiDefinition);
this.healthMonitorApi = healthMonitorApi;
}
- @GET
- @Produces(MediaType.APPLICATION_JSON)
- public ApplicationReferenceList getAllInstances() {
+ private static RestApi createRestApiDefinition(HealthRequestHandler self) {
+ return RestApi.builder()
+ .addRoute(RestApi.route("/orchestrator/v1/health")
+ .get(self::getAllInstances))
+ .addRoute(RestApi.route("/orchestrator/v1/health/{applicationId}")
+ .get(self::getInstance))
+ .registerJacksonResponseEntity(ApplicationReferenceList.class)
+ .registerJacksonResponseEntity(ApplicationServices.class)
+ .build();
+ }
+
+ private ApplicationReferenceList getAllInstances(RestApi.RequestContext context) {
List<ApplicationId> applications = new ArrayList<>(healthMonitorApi.getMonitoredApplicationIds());
applications.sort(Comparator.comparing(ApplicationId::serializedForm));
ApplicationReferenceList list = new ApplicationReferenceList();
list.applicationList = applications.stream().map(applicationId -> {
UrlReference reference = new UrlReference();
- reference.url = uriInfo.getBaseUriBuilder()
- .path(HealthResource.class)
- .path(applicationId.serializedForm())
- .build()
+ reference.url = context.uriBuilder()
+ .withPath("/orchestrator/v1/health/" + applicationId.serializedForm())
.toString();
return reference;
}).collect(Collectors.toList());
@@ -59,11 +60,8 @@ public class HealthResource {
return list;
}
- @GET
- @Path("/{applicationId}")
- @Produces(MediaType.APPLICATION_JSON)
- public ApplicationServices getInstance(@PathParam("applicationId") String applicationIdString) {
- ApplicationId applicationId = ApplicationId.fromSerializedForm(applicationIdString);
+ private ApplicationServices getInstance(RestApi.RequestContext context) {
+ ApplicationId applicationId = ApplicationId.fromSerializedForm(context.pathParameters().getStringOrThrow("applicationId"));
Map<ServiceId, ServiceStatusInfo> services = healthMonitorApi.getServices(applicationId);
@@ -82,5 +80,4 @@ public class HealthResource {
applicationServices.services = serviceResources;
return applicationServices;
}
-
}
diff --git a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/resources/host/HostResource.java b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/resources/HostRequestHandler.java
index c55eeeef069..7b28925205f 100644
--- a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/resources/host/HostResource.java
+++ b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/resources/HostRequestHandler.java
@@ -1,8 +1,14 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.orchestrator.resources.host;
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.orchestrator.resources;
import com.google.common.util.concurrent.UncheckedTimeoutException;
-import com.yahoo.container.jaxrs.annotation.Component;
+import com.google.inject.Inject;
+import com.yahoo.container.jdisc.LoggingRequestHandler;
+import com.yahoo.jdisc.Response;
+import com.yahoo.restapi.JacksonJsonResponse;
+import com.yahoo.restapi.RestApi;
+import com.yahoo.restapi.RestApiException;
+import com.yahoo.restapi.RestApiRequestHandler;
import com.yahoo.vespa.applicationmodel.HostName;
import com.yahoo.vespa.orchestrator.Host;
import com.yahoo.vespa.orchestrator.HostNameNotFoundException;
@@ -10,8 +16,6 @@ import com.yahoo.vespa.orchestrator.OrchestrationException;
import com.yahoo.vespa.orchestrator.Orchestrator;
import com.yahoo.vespa.orchestrator.policy.HostStateChangeDeniedException;
import com.yahoo.vespa.orchestrator.policy.HostedVespaPolicy;
-import com.yahoo.vespa.orchestrator.resources.instance.InstanceResource;
-import com.yahoo.vespa.orchestrator.restapi.HostApi;
import com.yahoo.vespa.orchestrator.restapi.wire.GetHostResponse;
import com.yahoo.vespa.orchestrator.restapi.wire.HostService;
import com.yahoo.vespa.orchestrator.restapi.wire.HostStateChangeDenialReason;
@@ -20,16 +24,6 @@ import com.yahoo.vespa.orchestrator.restapi.wire.PatchHostResponse;
import com.yahoo.vespa.orchestrator.restapi.wire.UpdateHostResponse;
import com.yahoo.vespa.orchestrator.status.HostStatus;
-import javax.inject.Inject;
-import javax.ws.rs.BadRequestException;
-import javax.ws.rs.InternalServerErrorException;
-import javax.ws.rs.NotFoundException;
-import javax.ws.rs.Path;
-import javax.ws.rs.WebApplicationException;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.MediaType;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.UriInfo;
import java.net.URI;
import java.time.Instant;
import java.util.List;
@@ -39,30 +33,47 @@ import java.util.stream.Collectors;
/**
* @author oyving
+ * @author bjorncs
*/
-@Path("")
-public class HostResource implements HostApi {
- private static final Logger log = Logger.getLogger(HostResource.class.getName());
+public class HostRequestHandler extends RestApiRequestHandler<HostRequestHandler> {
+
+ private static final Logger log = Logger.getLogger(HostRequestHandler.class.getName());
private final Orchestrator orchestrator;
- private final UriInfo uriInfo;
@Inject
- public HostResource(@Component Orchestrator orchestrator, @Context UriInfo uriInfo) {
+ public HostRequestHandler(LoggingRequestHandler.Context context, Orchestrator orchestrator) {
+ super(context, HostRequestHandler::createRestApiDefinition);
this.orchestrator = orchestrator;
- this.uriInfo = uriInfo;
}
- @Override
- public GetHostResponse getHost(String hostNameString) {
+ private static RestApi createRestApiDefinition(HostRequestHandler self) {
+ return RestApi.builder()
+ .addRoute(RestApi.route("/orchestrator/v1/hosts/{hostname}")
+ .get(self::getHost)
+ .patch(PatchHostRequest.class, self::patch))
+ .addRoute(RestApi.route("/orchestrator/v1/hosts/{hostname}/suspended")
+ .put(self::suspend)
+ .delete(self::resume))
+ .registerJacksonRequestEntity(PatchHostRequest.class)
+ .registerJacksonResponseEntity(GetHostResponse.class)
+ .registerJacksonResponseEntity(PatchHostResponse.class)
+ .registerJacksonResponseEntity(UpdateHostResponse.class)
+ .build();
+ }
+
+ /**
+ * Shows the Orchestrator state of a host.
+ */
+ private GetHostResponse getHost(RestApi.RequestContext context) {
+ String hostNameString = context.pathParameters().getStringOrThrow("hostname");
HostName hostName = new HostName(hostNameString);
try {
Host host = orchestrator.getHost(hostName);
- URI applicationUri = uriInfo.getBaseUriBuilder()
- .path(InstanceResource.class)
- .path(host.getApplicationInstanceReference().asString())
- .build();
+ URI applicationUri = context.uriBuilder()
+ .withPath("/orchestrator/v1/instances/" + host.getApplicationInstanceReference().asString())
+ .toURI();
List<HostService> hostServices = host.getServiceInstances().stream()
.map(serviceInstance -> new HostService(
@@ -80,15 +91,18 @@ public class HostResource implements HostApi {
hostServices);
} catch (UncheckedTimeoutException e) {
log.log(Level.FINE, "Failed to get host " + hostName + ": " + e.getMessage());
- throw webExceptionFromTimeout("getHost", hostName, e);
+ throw restApiExceptionFromTimeout("getHost", hostName, e);
} catch (HostNameNotFoundException e) {
log.log(Level.FINE, "Host not found: " + hostName);
- throw new NotFoundException(e);
+ throw new RestApiException.NotFound(e);
}
}
- @Override
- public PatchHostResponse patch(String hostNameString, PatchHostRequest request) {
+ /**
+ * Tweak internal Orchestrator state for host.
+ */
+ private PatchHostResponse patch(RestApi.RequestContext context, PatchHostRequest request) {
+ String hostNameString = context.pathParameters().getStringOrThrow("hostname");
HostName hostName = new HostName(hostNameString);
if (request.state != null) {
@@ -96,21 +110,21 @@ public class HostResource implements HostApi {
try {
state = HostStatus.valueOf(request.state);
} catch (IllegalArgumentException dummy) {
- throw new BadRequestException("Bad state in request: '" + request.state + "'");
+ throw new RestApiException.BadRequest("Bad state in request: '" + request.state + "'");
}
try {
orchestrator.setNodeStatus(hostName, state);
} catch (HostNameNotFoundException e) {
log.log(Level.FINE, "Host not found: " + hostName);
- throw new NotFoundException(e);
+ throw new RestApiException.NotFound(e);
} catch (UncheckedTimeoutException e) {
log.log(Level.FINE, "Failed to patch " + hostName + ": " + e.getMessage());
- throw webExceptionFromTimeout("patch", hostName, e);
+ throw restApiExceptionFromTimeout("patch", hostName, e);
} catch (OrchestrationException e) {
String message = "Failed to set " + hostName + " to " + state + ": " + e.getMessage();
log.log(Level.FINE, message, e);
- throw new InternalServerErrorException(message);
+ throw new RestApiException.InternalServerError(message);
}
}
@@ -119,74 +133,82 @@ public class HostResource implements HostApi {
return response;
}
- @Override
- public UpdateHostResponse suspend(String hostNameString) {
+ /**
+ * Ask for permission to temporarily suspend all services on a host.
+ *
+ * On success, none, some, or all services on the host may already have been effectively suspended,
+ * e.g. as of Feb 2015, a content node would already be set in the maintenance state.
+ *
+ * Once the host is ready to resume normal operations, it must finish with resume() (see below).
+ *
+ * If the host has already been granted permission to suspend all services, requesting
+ * suspension again is idempotent and will succeed.
+ */
+ private UpdateHostResponse suspend(RestApi.RequestContext context) {
+ String hostNameString = context.pathParameters().getStringOrThrow("hostname");
HostName hostName = new HostName(hostNameString);
try {
orchestrator.suspend(hostName);
} catch (HostNameNotFoundException e) {
log.log(Level.FINE, "Host not found: " + hostName);
- throw new NotFoundException(e);
+ throw new RestApiException.NotFound(e);
} catch (UncheckedTimeoutException e) {
log.log(Level.FINE, "Failed to suspend " + hostName + ": " + e.getMessage());
- throw webExceptionFromTimeout("suspend", hostName, e);
+ throw restApiExceptionFromTimeout("suspend", hostName, e);
} catch (HostStateChangeDeniedException e) {
log.log(Level.FINE, "Failed to suspend " + hostName + ": " + e.getMessage());
- throw webExceptionWithDenialReason("suspend", hostName, e);
+ throw restApiExceptionWithDenialReason("suspend", hostName, e);
}
return new UpdateHostResponse(hostName.s(), null);
}
-
- @Override
- public UpdateHostResponse resume(final String hostNameString) {
+ /**
+ * Resume normal operations for all services on a host that has previously been allowed suspension.
+ *
+ * If the host is already registered as running normal operations, then resume() is idempotent
+ * and will succeed.
+ */
+ private UpdateHostResponse resume(RestApi.RequestContext context) {
+ String hostNameString = context.pathParameters().getStringOrThrow("hostname");
HostName hostName = new HostName(hostNameString);
try {
orchestrator.resume(hostName);
} catch (HostNameNotFoundException e) {
log.log(Level.FINE, "Host not found: " + hostName);
- throw new NotFoundException(e);
+ throw new RestApiException.NotFound(e);
} catch (UncheckedTimeoutException e) {
log.log(Level.FINE, "Failed to resume " + hostName + ": " + e.getMessage());
- throw webExceptionFromTimeout("resume", hostName, e);
+ throw restApiExceptionFromTimeout("resume", hostName, e);
} catch (HostStateChangeDeniedException e) {
log.log(Level.FINE, "Failed to resume " + hostName + ": " + e.getMessage());
- throw webExceptionWithDenialReason("resume", hostName, e);
+ throw restApiExceptionWithDenialReason("resume", hostName, e);
}
return new UpdateHostResponse(hostName.s(), null);
}
- private static WebApplicationException webExceptionFromTimeout(String operationDescription,
- HostName hostName,
- UncheckedTimeoutException e) {
+ private RestApiException restApiExceptionFromTimeout(String operationDescription,
+ HostName hostName,
+ UncheckedTimeoutException e) {
// Return timeouts as 409 Conflict instead of 504 Gateway Timeout to reduce noise in 5xx graphs.
- return createWebException(operationDescription, hostName, e,
+ return createRestApiException(operationDescription, hostName, e,
HostedVespaPolicy.DEADLINE_CONSTRAINT, e.getMessage(), Response.Status.CONFLICT);
}
- private static WebApplicationException webExceptionWithDenialReason(
+ private RestApiException restApiExceptionWithDenialReason(
String operationDescription,
HostName hostName,
HostStateChangeDeniedException e) {
- return createWebException(operationDescription, hostName, e, e.getConstraintName(), e.getMessage(),
+ return createRestApiException(operationDescription, hostName, e, e.getConstraintName(), e.getMessage(),
Response.Status.CONFLICT);
}
- private static WebApplicationException createWebException(String operationDescription,
- HostName hostname,
- Exception e,
- String constraint,
- String message,
- Response.Status status) {
+ private RestApiException createRestApiException(
+ String operationDescription, HostName hostname, Exception e, String constraint, String message, int status) {
HostStateChangeDenialReason hostStateChangeDenialReason = new HostStateChangeDenialReason(
constraint, operationDescription + " failed: " + message);
UpdateHostResponse response = new UpdateHostResponse(hostname.s(), hostStateChangeDenialReason);
- return new WebApplicationException(
+ return new RestApiException(
+ new JacksonJsonResponse<>(status, response, restApi().jacksonJsonMapper(), true),
hostStateChangeDenialReason.toString(),
- e,
- Response.status(status)
- .entity(response)
- .type(MediaType.APPLICATION_JSON_TYPE)
- .build());
+ e);
}
}
-
diff --git a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/resources/HostSuspensionHandler.java b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/resources/HostSuspensionRequestHandler.java
index ca720ec8b68..73859f385e1 100644
--- a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/resources/HostSuspensionHandler.java
+++ b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/resources/HostSuspensionRequestHandler.java
@@ -25,19 +25,19 @@ import java.util.stream.Collectors;
* @author hakonhall
* @author bjorncs
*/
-public class HostSuspensionHandler extends RestApiRequestHandler<HostSuspensionHandler> {
+public class HostSuspensionRequestHandler extends RestApiRequestHandler<HostSuspensionRequestHandler> {
- private static final Logger log = Logger.getLogger(HostSuspensionHandler.class.getName());
+ private static final Logger log = Logger.getLogger(HostSuspensionRequestHandler.class.getName());
private final Orchestrator orchestrator;
@Inject
- public HostSuspensionHandler(LoggingRequestHandler.Context context, Orchestrator orchestrator) {
- super(context, HostSuspensionHandler::createRestApiDefinition);
+ public HostSuspensionRequestHandler(LoggingRequestHandler.Context context, Orchestrator orchestrator) {
+ super(context, HostSuspensionRequestHandler::createRestApiDefinition);
this.orchestrator = orchestrator;
}
- private static RestApi createRestApiDefinition(HostSuspensionHandler self) {
+ private static RestApi createRestApiDefinition(HostSuspensionRequestHandler self) {
return RestApi.builder()
.addRoute(RestApi.route("/orchestrator/v1/suspensions/hosts/{hostname}")
.put(self::suspendAll))
diff --git a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/resources/instance/InstanceResource.java b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/resources/InstanceRequestHandler.java
index 742f7d6bbd7..567013e7453 100644
--- a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/resources/instance/InstanceResource.java
+++ b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/resources/InstanceRequestHandler.java
@@ -1,9 +1,17 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.orchestrator.resources.instance;
-
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.orchestrator.resources;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.SerializationFeature;
+import com.fasterxml.jackson.datatype.jdk8.Jdk8Module;
+import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule;
+import com.google.inject.Inject;
import com.yahoo.config.provision.ApplicationId;
-import com.yahoo.container.jaxrs.annotation.Component;
+import com.yahoo.container.jdisc.LoggingRequestHandler;
import com.yahoo.jrt.slobrok.api.Mirror;
+import com.yahoo.restapi.RestApi;
+import com.yahoo.restapi.RestApiException;
+import com.yahoo.restapi.RestApiRequestHandler;
import com.yahoo.vespa.applicationmodel.ApplicationInstance;
import com.yahoo.vespa.applicationmodel.ApplicationInstanceReference;
import com.yahoo.vespa.applicationmodel.ClusterId;
@@ -12,7 +20,6 @@ import com.yahoo.vespa.applicationmodel.HostName;
import com.yahoo.vespa.applicationmodel.ServiceStatusInfo;
import com.yahoo.vespa.applicationmodel.ServiceType;
import com.yahoo.vespa.orchestrator.OrchestratorUtil;
-import com.yahoo.vespa.orchestrator.resources.InstanceStatusResponse;
import com.yahoo.vespa.orchestrator.restapi.wire.SlobrokEntryResponse;
import com.yahoo.vespa.orchestrator.restapi.wire.WireHostInfo;
import com.yahoo.vespa.orchestrator.status.HostInfo;
@@ -23,15 +30,6 @@ import com.yahoo.vespa.service.manager.UnionMonitorManager;
import com.yahoo.vespa.service.monitor.ServiceMonitor;
import com.yahoo.vespa.service.monitor.SlobrokApi;
-import javax.inject.Inject;
-import javax.ws.rs.GET;
-import javax.ws.rs.Path;
-import javax.ws.rs.PathParam;
-import javax.ws.rs.Produces;
-import javax.ws.rs.QueryParam;
-import javax.ws.rs.WebApplicationException;
-import javax.ws.rs.core.MediaType;
-import javax.ws.rs.core.Response;
import java.time.Instant;
import java.util.List;
import java.util.TreeMap;
@@ -45,10 +43,10 @@ import static com.yahoo.vespa.orchestrator.OrchestratorUtil.parseApplicationInst
* This API can be unstable and is not meant to be used programmatically.
*
* @author andreer
- * @author bakksjo
+ * @author Oyvind Bakksjo
+ * @author bjorncs
*/
-@Path("")
-public class InstanceResource {
+public class InstanceRequestHandler extends RestApiRequestHandler<InstanceRequestHandler> {
public static final String DEFAULT_SLOBROK_PATTERN = "**";
@@ -58,31 +56,50 @@ public class InstanceResource {
private final ServiceMonitor serviceMonitor;
@Inject
- public InstanceResource(@Component ServiceMonitor serviceMonitor,
- @Component StatusService statusService,
- @Component SlobrokApi slobrokApi,
- @Component UnionMonitorManager rootManager) {
- this.serviceMonitor = serviceMonitor;
+ public InstanceRequestHandler(LoggingRequestHandler.Context context,
+ ServiceMonitor serviceMonitor,
+ StatusService statusService,
+ SlobrokApi slobrokApi,
+ UnionMonitorManager rootManager) {
+ super(context, InstanceRequestHandler::createRestApiDefinition);
this.statusService = statusService;
this.slobrokApi = slobrokApi;
this.rootManager = rootManager;
+ this.serviceMonitor = serviceMonitor;
}
- @GET
- @Produces(MediaType.APPLICATION_JSON)
- public List<ApplicationInstanceReference> getAllInstances() {
+ private static RestApi createRestApiDefinition(InstanceRequestHandler self) {
+ return RestApi.builder()
+ .addRoute(RestApi.route("/orchestrator/v1/instances")
+ .get(self::getAllInstances))
+ .addRoute(RestApi.route("/orchestrator/v1/instances/{instanceId}")
+ .get(self::getInstance))
+ .addRoute(RestApi.route("/orchestrator/v1/instances/{instanceId}/slobrok")
+ .get(self::getSlobrokEntries))
+ .addRoute(RestApi.route("/orchestrator/v1/instances/{instanceId}/serviceStatusInfo")
+ .get(self::getServiceStatus))
+ .registerJacksonResponseEntity(List.class)
+ .registerJacksonResponseEntity(InstanceStatusResponse.class)
+ .registerJacksonResponseEntity(ServiceStatusInfo.class)
+ // Overriding object mapper to change serialization of timestamps
+ .setObjectMapper(new ObjectMapper()
+ .registerModule(new JavaTimeModule())
+ .registerModule(new Jdk8Module())
+ .configure(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS, true))
+ .build();
+ }
+
+ private List<ApplicationInstanceReference> getAllInstances(RestApi.RequestContext context) {
return serviceMonitor.getAllApplicationInstanceReferences().stream().sorted().collect(Collectors.toList());
}
- @GET
- @Path("/{instanceId}")
- @Produces(MediaType.APPLICATION_JSON)
- public InstanceStatusResponse getInstance(@PathParam("instanceId") String instanceIdString) {
+ private InstanceStatusResponse getInstance(RestApi.RequestContext context) {
+ String instanceIdString = context.pathParameters().getStringOrThrow("instanceId");
ApplicationInstanceReference instanceId = parseInstanceId(instanceIdString);
ApplicationInstance applicationInstance
= serviceMonitor.getApplication(instanceId)
- .orElseThrow(() -> new WebApplicationException(Response.status(Response.Status.NOT_FOUND).build()));
+ .orElseThrow(RestApiException.NotFound::new);
HostInfos hostInfos = statusService.getHostInfosByApplicationResolver().apply(applicationInstance.reference());
TreeMap<HostName, WireHostInfo> hostStatusMap =
@@ -102,12 +119,9 @@ public class InstanceResource {
return new WireHostInfo(hostStatusString, suspendedSinceUtcOrNull);
}
- @GET
- @Path("/{instanceId}/slobrok")
- @Produces(MediaType.APPLICATION_JSON)
- public List<SlobrokEntryResponse> getSlobrokEntries(
- @PathParam("instanceId") String instanceId,
- @QueryParam("pattern") String pattern) {
+ private List<SlobrokEntryResponse> getSlobrokEntries(RestApi.RequestContext context) {
+ String instanceId = context.pathParameters().getStringOrThrow("instanceId");
+ String pattern = context.queryParameters().getString("pattern").orElse(null);
ApplicationInstanceReference reference = parseInstanceId(instanceId);
ApplicationId applicationId = OrchestratorUtil.toApplicationId(reference);
@@ -121,29 +135,14 @@ public class InstanceResource {
.collect(Collectors.toList());
}
- @GET
- @Path("/{instanceId}/serviceStatusInfo")
- @Produces(MediaType.APPLICATION_JSON)
- public ServiceStatusInfo getServiceStatus(
- @PathParam("instanceId") String instanceId,
- @QueryParam("clusterId") String clusterIdString,
- @QueryParam("serviceType") String serviceTypeString,
- @QueryParam("configId") String configIdString) {
+ private ServiceStatusInfo getServiceStatus(RestApi.RequestContext context) {
+ String instanceId = context.pathParameters().getStringOrThrow("instanceId");
+ String clusterIdString = context.queryParameters().getStringOrThrow("clusterId");
+ String serviceTypeString = context.queryParameters().getStringOrThrow("serviceType");
+ String configIdString = context.queryParameters().getStringOrThrow("configId");
ApplicationInstanceReference reference = parseInstanceId(instanceId);
ApplicationId applicationId = OrchestratorUtil.toApplicationId(reference);
- if (clusterIdString == null) {
- throwBadRequest("Missing clusterId query parameter");
- }
-
- if (serviceTypeString == null) {
- throwBadRequest("Missing serviceType query parameter");
- }
-
- if (configIdString == null) {
- throwBadRequest("Missing configId query parameter");
- }
-
ClusterId clusterId = new ClusterId(clusterIdString);
ServiceType serviceType = new ServiceType(serviceTypeString);
ConfigId configId = new ConfigId(configIdString);
@@ -151,18 +150,12 @@ public class InstanceResource {
return rootManager.getStatus(applicationId, clusterId, serviceType, configId);
}
- static ApplicationInstanceReference parseInstanceId(String instanceIdString) {
+ private static ApplicationInstanceReference parseInstanceId(String instanceIdString) {
try {
return parseApplicationInstanceReference(instanceIdString);
} catch (IllegalArgumentException e) {
- throwBadRequest(e.getMessage());
- return null; // Necessary for compiler
+ throw new RestApiException.BadRequest(e.getMessage(), e);
}
}
- static void throwBadRequest(String message) {
- throw new WebApplicationException(
- Response.status(Response.Status.BAD_REQUEST).entity(message).build());
- }
-
}
diff --git a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/resources/appsuspension/ApplicationSuspensionResource.java b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/resources/appsuspension/ApplicationSuspensionResource.java
deleted file mode 100644
index 361b1f5e361..00000000000
--- a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/resources/appsuspension/ApplicationSuspensionResource.java
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.orchestrator.resources.appsuspension;
-
-import com.yahoo.config.provision.ApplicationId;
-import com.yahoo.container.jaxrs.annotation.Component;
-import com.yahoo.vespa.orchestrator.ApplicationIdNotFoundException;
-import com.yahoo.vespa.orchestrator.ApplicationStateChangeDeniedException;
-import com.yahoo.vespa.orchestrator.OrchestratorImpl;
-import com.yahoo.vespa.orchestrator.restapi.ApplicationSuspensionApi;
-import com.yahoo.vespa.orchestrator.status.ApplicationInstanceStatus;
-
-import javax.inject.Inject;
-import javax.ws.rs.BadRequestException;
-import javax.ws.rs.InternalServerErrorException;
-import javax.ws.rs.NotFoundException;
-import javax.ws.rs.Path;
-import javax.ws.rs.WebApplicationException;
-import javax.ws.rs.core.Response;
-import java.util.Set;
-import java.util.logging.Level;
-import java.util.logging.Logger;
-import java.util.stream.Collectors;
-
-/**
- * @author smorgrav
- */
-@Path("")
-public class ApplicationSuspensionResource implements ApplicationSuspensionApi {
-
- private static final Logger log = Logger.getLogger(ApplicationSuspensionResource.class.getName());
-
- private final OrchestratorImpl orchestrator;
-
- @Inject
- public ApplicationSuspensionResource(@Component OrchestratorImpl orchestrator) {
- this.orchestrator = orchestrator;
- }
-
- @Override
- public Set<String> getApplications() {
- Set<ApplicationId> refs = orchestrator.getAllSuspendedApplications();
- return refs.stream().map(ApplicationId::serializedForm).collect(Collectors.toSet());
- }
-
- @Override
- public void getApplication(String applicationIdString) {
- ApplicationId appId = toApplicationId(applicationIdString);
- ApplicationInstanceStatus status;
-
- try {
- status = orchestrator.getApplicationInstanceStatus(appId);
- } catch (ApplicationIdNotFoundException e) {
- throw new NotFoundException("Application " + applicationIdString + " could not be found");
- }
-
- if (status.equals(ApplicationInstanceStatus.NO_REMARKS)) {
- throw new NotFoundException("Application " + applicationIdString + " is not suspended");
- }
-
- // Return void as we have nothing to return except 204 No
- // Content. Unfortunately, Jersey outputs a warning for this case:
- //
- // The following warnings have been detected: HINT: A HTTP GET
- // method, public void com.yahoo.vespa.orchestrator.resources.
- // ApplicationSuspensionResource.getApplication(java.lang.String),
- // returns a void type. It can be intentional and perfectly fine,
- // but it is a little uncommon that GET method returns always "204
- // No Content"
- //
- // We have whitelisted the warning for our systemtests.
- //
- // bakksjo has a pending jersey PR fix that avoids making the hint
- // become a warning:
- // https://github.com/jersey/jersey/pull/212
- //
- // TODO: Remove whitelisting and this comment once jersey has been
- // fixed.
- }
-
- @Override
- public void suspend(String applicationIdString) {
- ApplicationId applicationId = toApplicationId(applicationIdString);
- try {
- orchestrator.suspend(applicationId);
- } catch (ApplicationIdNotFoundException e) {
- log.log(Level.INFO, "ApplicationId " + applicationIdString + " not found.", e);
- throw new NotFoundException(e);
- } catch (ApplicationStateChangeDeniedException e) {
- log.log(Level.INFO, "Suspend for " + applicationIdString + " failed.", e);
- throw new WebApplicationException(Response.Status.CONFLICT);
- } catch (RuntimeException e) {
- log.log(Level.INFO, "Suspend for " + applicationIdString + " failed from unknown reasons", e);
- throw new InternalServerErrorException(e);
- }
- }
-
- @Override
- public void resume(String applicationIdString) {
- ApplicationId applicationId = toApplicationId(applicationIdString);
- try {
- orchestrator.resume(applicationId);
- } catch (ApplicationIdNotFoundException e) {
- log.log(Level.INFO, "ApplicationId " + applicationIdString + " not found.", e);
- throw new NotFoundException(e);
- } catch (ApplicationStateChangeDeniedException e) {
- log.log(Level.INFO, "Suspend for " + applicationIdString + " failed.", e);
- throw new WebApplicationException(Response.Status.CONFLICT);
- } catch (RuntimeException e) {
- log.log(Level.INFO, "Suspend for " + applicationIdString + " failed from unknown reasons", e);
- throw new InternalServerErrorException(e);
- }
- }
-
- private ApplicationId toApplicationId(String applicationIdString) {
- try {
- return ApplicationId.fromSerializedForm(applicationIdString);
- } catch (IllegalArgumentException e) {
- throw new BadRequestException(e);
- }
- }
-
-}
diff --git a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/resources/ApplicationSuspensionRequestHandlerTest.java b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/resources/ApplicationSuspensionRequestHandlerTest.java
new file mode 100644
index 00000000000..864516acbc5
--- /dev/null
+++ b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/resources/ApplicationSuspensionRequestHandlerTest.java
@@ -0,0 +1,151 @@
+package com.yahoo.vespa.orchestrator.resources;// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+import com.fasterxml.jackson.core.type.TypeReference;
+import com.yahoo.cloud.config.ConfigserverConfig;
+import com.yahoo.container.jdisc.HttpRequestBuilder;
+import com.yahoo.container.jdisc.HttpResponse;
+import com.yahoo.jdisc.core.SystemTimer;
+import com.yahoo.jdisc.test.MockMetric;
+import com.yahoo.restapi.RestApiTestDriver;
+import com.yahoo.vespa.curator.mock.MockCurator;
+import com.yahoo.vespa.flags.InMemoryFlagSource;
+import com.yahoo.vespa.orchestrator.DummyServiceMonitor;
+import com.yahoo.vespa.orchestrator.Orchestrator;
+import com.yahoo.vespa.orchestrator.OrchestratorImpl;
+import com.yahoo.vespa.orchestrator.config.OrchestratorConfig;
+import com.yahoo.vespa.orchestrator.controller.ClusterControllerClientFactoryMock;
+import com.yahoo.vespa.orchestrator.status.ZkStatusService;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import java.util.Set;
+
+import static com.yahoo.jdisc.http.HttpRequest.Method;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+/**
+ * Tests the implementation of the orchestrator Application API.
+ *
+ * @author smorgrav
+ * @author bjorncs
+ */
+class ApplicationSuspensionRequestHandlerTest {
+ private static final String RESOURCE_1 = "mediasearch:imagesearch:default";
+ private static final String RESOURCE_2 = "test-tenant-id:application:instance";
+ private static final String INVALID_RESOURCE_NAME = "something_without_colons";
+
+ private RestApiTestDriver testDriver;
+
+ @BeforeEach
+ void createHandler() {
+ DummyServiceMonitor serviceMonitor = new DummyServiceMonitor();
+ Orchestrator orchestrator = new OrchestratorImpl(
+ new ClusterControllerClientFactoryMock(),
+ new ZkStatusService(new MockCurator(), new MockMetric(), new SystemTimer(), serviceMonitor),
+ new OrchestratorConfig(new OrchestratorConfig.Builder()),
+ serviceMonitor,
+ new ConfigserverConfig(new ConfigserverConfig.Builder()),
+ new InMemoryFlagSource());
+ var handler = new ApplicationSuspensionRequestHandler(RestApiTestDriver.createHandlerTestContext(), orchestrator);
+ testDriver = RestApiTestDriver.newBuilder(handler).build();
+ }
+
+
+ @Test
+ void get_all_suspended_applications_return_empty_list_initially() throws IOException {
+ HttpResponse httpResponse = executeRequest(Method.GET, "", null);
+ assertEquals(200, httpResponse.getStatus());
+ Set<String> set = parseResponseContent(httpResponse, new TypeReference<>() {});
+ assertEquals(0, set.size());
+ }
+
+ @Test
+ void invalid_application_id_throws_http_400() throws IOException {
+ HttpResponse httpResponse = executeRequest(Method.POST, "", INVALID_RESOURCE_NAME);
+ assertEquals(400, httpResponse.getStatus());
+ }
+
+ @Test
+ void get_application_status_returns_404_for_not_suspended_and_204_for_suspended() throws IOException {
+ // Get on application that is not suspended
+ HttpResponse httpResponse = executeRequest(Method.GET, "/"+RESOURCE_1, null);
+ assertEquals(404, httpResponse.getStatus());
+
+ // Post application
+ httpResponse = executeRequest(Method.POST, "", RESOURCE_1);
+ assertEquals(204, httpResponse.getStatus());
+
+ // Get on the application that now should be in suspended
+ httpResponse = executeRequest(Method.GET, "/"+RESOURCE_1, null);
+ assertEquals(204, httpResponse.getStatus());
+ }
+
+ @Test
+ void delete_works_on_suspended_and_not_suspended_applications() throws IOException {
+ // Delete an application that is not suspended
+ HttpResponse httpResponse = executeRequest(Method.DELETE, "/"+RESOURCE_1, null);
+ assertEquals(204, httpResponse.getStatus());
+
+ // Put application in suspend
+ httpResponse = executeRequest(Method.POST, "", RESOURCE_1);
+ assertEquals(204, httpResponse.getStatus());
+
+ // Check that it is in suspend
+ httpResponse = executeRequest(Method.GET, "/"+RESOURCE_1, null);
+ assertEquals(204, httpResponse.getStatus());
+
+ // Delete it
+ httpResponse = executeRequest(Method.DELETE, "/"+RESOURCE_1, null);
+ assertEquals(204, httpResponse.getStatus());
+
+ // Check that it is not in suspend anymore
+ httpResponse = executeRequest(Method.GET, "/"+RESOURCE_1, null);
+ assertEquals(404, httpResponse.getStatus());
+ }
+
+ @Test
+ void list_applications_returns_the_correct_list_of_suspended_applications() throws IOException {
+ // Test that initially we have the empty set
+ HttpResponse httpResponse = executeRequest(Method.GET, "", null);
+ assertEquals(200, httpResponse.getStatus());
+ Set<String> set = parseResponseContent(httpResponse, new TypeReference<>() {});
+ assertEquals(0, set.size());
+
+ // Add a couple of applications to maintenance
+ executeRequest(Method.POST, "", RESOURCE_1);
+ executeRequest(Method.POST, "", RESOURCE_2);
+
+ // Test that we get them back
+ httpResponse = executeRequest(Method.GET, "", null);
+ assertEquals(200, httpResponse.getStatus());
+ set = parseResponseContent(httpResponse, new TypeReference<>() {});
+ assertEquals(2, set.size());
+
+ // Remove suspend for the first resource
+ executeRequest(Method.DELETE, "/"+RESOURCE_1, null);
+
+ // Test that we are back to the start with the empty set
+ httpResponse = executeRequest(Method.GET, "", null);
+ assertEquals(200, httpResponse.getStatus());
+ set = parseResponseContent(httpResponse, new TypeReference<>() {});
+ assertEquals(1, set.size());
+ assertEquals(RESOURCE_2, set.iterator().next());
+ }
+
+ private HttpResponse executeRequest(Method method, String relativePath, String applicationId) {
+ String fullPath = "/orchestrator/v1/suspensions/applications" + relativePath;
+ var builder = HttpRequestBuilder.create(method, fullPath);
+ if (applicationId != null) {
+ builder.withRequestContent(new ByteArrayInputStream(applicationId.getBytes(StandardCharsets.UTF_8)));
+ }
+ return testDriver.executeRequest(builder.build());
+ }
+
+ private <T> T parseResponseContent(HttpResponse response, TypeReference<T> type) {
+ assertEquals(200, response.getStatus());
+ return testDriver.parseJacksonResponseContent(response, type);
+ }
+} \ No newline at end of file
diff --git a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/resources/host/HostResourceTest.java b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/resources/HostRequestHandlerTest.java
index d056c3730fd..f6dc6d7676c 100644
--- a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/resources/host/HostResourceTest.java
+++ b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/resources/HostRequestHandlerTest.java
@@ -1,9 +1,14 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.orchestrator.resources.host;
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.orchestrator.resources;
import com.google.common.util.concurrent.UncheckedTimeoutException;
+import com.yahoo.container.jdisc.HttpRequestBuilder;
+import com.yahoo.container.jdisc.HttpResponse;
import com.yahoo.jdisc.Metric;
+import com.yahoo.jdisc.http.HttpRequest.Method;
import com.yahoo.jdisc.test.TestTimer;
+import com.yahoo.restapi.RestApiTestDriver;
+import com.yahoo.test.json.JsonTestHelper;
import com.yahoo.vespa.applicationmodel.ApplicationInstance;
import com.yahoo.vespa.applicationmodel.ApplicationInstanceId;
import com.yahoo.vespa.applicationmodel.ApplicationInstanceReference;
@@ -41,15 +46,11 @@ import com.yahoo.vespa.orchestrator.status.StatusService;
import com.yahoo.vespa.orchestrator.status.ZkStatusService;
import com.yahoo.vespa.service.monitor.ServiceModel;
import com.yahoo.vespa.service.monitor.ServiceMonitor;
-import org.junit.Before;
-import org.junit.Test;
-
-import javax.ws.rs.BadRequestException;
-import javax.ws.rs.InternalServerErrorException;
-import javax.ws.rs.WebApplicationException;
-import javax.ws.rs.core.UriBuilder;
-import javax.ws.rs.core.UriInfo;
-import java.net.URI;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
import java.time.Clock;
import java.time.Instant;
import java.util.Collections;
@@ -57,8 +58,7 @@ import java.util.Map;
import java.util.Optional;
import java.util.Set;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.mock;
@@ -68,8 +68,10 @@ import static org.mockito.Mockito.when;
/**
* @author hakonhall
+ * @author bjorncs
*/
-public class HostResourceTest {
+class HostRequestHandlerTest {
+
private static final Clock clock = mock(Clock.class);
private static final int SERVICE_MONITOR_CONVERGENCE_LATENCY_SECONDS = 0;
private static final TenantId TENANT_ID = new TenantId("tenantId");
@@ -121,14 +123,14 @@ public class HostResourceTest {
private final OrchestratorImpl alwaysAllowOrchestrator = createAlwaysAllowOrchestrator(clock);
private final OrchestratorImpl hostNotFoundOrchestrator = createHostNotFoundOrchestrator(clock);
- private final UriInfo uriInfo = mock(UriInfo.class);
+ private final OrchestratorImpl alwaysRejectOrchestrator = createAlwaysRejectResolver(clock);
- @Before
- public void setUp() {
+ @BeforeEach
+ void setUp() {
when(clock.instant()).thenReturn(Instant.now());
}
- public static OrchestratorImpl createAlwaysAllowOrchestrator(Clock clock) {
+ static OrchestratorImpl createAlwaysAllowOrchestrator(Clock clock) {
return new OrchestratorImpl(
new AlwaysAllowPolicy(),
new ClusterControllerClientFactoryMock(),
@@ -140,7 +142,7 @@ public class HostResourceTest {
new InMemoryFlagSource());
}
- public static OrchestratorImpl createHostNotFoundOrchestrator(Clock clock) {
+ static OrchestratorImpl createHostNotFoundOrchestrator(Clock clock) {
return new OrchestratorImpl(
new AlwaysAllowPolicy(),
new ClusterControllerClientFactoryMock(),
@@ -152,9 +154,9 @@ public class HostResourceTest {
new InMemoryFlagSource());
}
- public static OrchestratorImpl createAlwaysRejectResolver(Clock clock) {
+ static OrchestratorImpl createAlwaysRejectResolver(Clock clock) {
return new OrchestratorImpl(
- new HostResourceTest.AlwaysFailPolicy(),
+ new AlwaysFailPolicy(),
new ClusterControllerClientFactoryMock(),
EVERY_HOST_IS_UP_HOST_STATUS_SERVICE,
serviceMonitor,
@@ -165,27 +167,20 @@ public class HostResourceTest {
}
@Test
- public void returns_200_on_success() {
- HostResource hostResource =
- new HostResource(alwaysAllowOrchestrator, uriInfo);
-
- final String hostName = "hostname";
+ void returns_200_on_success() throws IOException {
+ RestApiTestDriver testDriver = createTestDriver(alwaysAllowOrchestrator);
- UpdateHostResponse response = hostResource.suspend(hostName);
-
- assertEquals(hostName, response.hostname());
+ HttpResponse response = executeRequest(testDriver, Method.PUT, "/orchestrator/v1/hosts/hostname/suspended", null);
+ UpdateHostResponse updateHostResponse = parseResponseContent(testDriver, response, UpdateHostResponse.class);
+ assertEquals("hostname", updateHostResponse.hostname());
}
@Test
- public void throws_404_when_host_unknown() {
- try {
- HostResource hostResource =
- new HostResource(hostNotFoundOrchestrator, uriInfo);
- hostResource.suspend("hostname");
- fail();
- } catch (WebApplicationException w) {
- assertEquals(404, w.getResponse().getStatus());
- }
+ void throws_404_when_host_unknown() throws IOException {
+ RestApiTestDriver testDriver = createTestDriver(hostNotFoundOrchestrator);
+
+ HttpResponse response = executeRequest(testDriver, Method.PUT, "/orchestrator/v1/hosts/hostname/suspended", null);
+ assertEquals(404, response.getStatus());
}
private static class AlwaysFailPolicy implements Policy {
@@ -221,79 +216,61 @@ public class HostResourceTest {
}
@Test
- public void throws_409_when_request_rejected_by_policies() {
- final OrchestratorImpl alwaysRejectResolver = new OrchestratorImpl(
- new AlwaysFailPolicy(),
- new ClusterControllerClientFactoryMock(),
- EVERY_HOST_IS_UP_HOST_STATUS_SERVICE,
- serviceMonitor,
- SERVICE_MONITOR_CONVERGENCE_LATENCY_SECONDS,
- clock,
- applicationApiFactory,
- new InMemoryFlagSource());
+ void throws_409_when_request_rejected_by_policies() throws IOException {
+ RestApiTestDriver testDriver = createTestDriver(alwaysRejectOrchestrator);
- try {
- HostResource hostResource = new HostResource(alwaysRejectResolver, uriInfo);
- hostResource.suspend("hostname");
- fail();
- } catch (WebApplicationException w) {
- assertEquals(409, w.getResponse().getStatus());
- }
+ HttpResponse response = executeRequest(testDriver, Method.PUT, "/orchestrator/v1/hosts/hostname/suspended", null);
+ assertEquals(409, response.getStatus());
}
- @Test(expected = BadRequestException.class)
- public void patch_state_may_throw_bad_request() {
+ @Test
+ void patch_state_may_throw_bad_request() throws IOException {
Orchestrator orchestrator = mock(Orchestrator.class);
- HostResource hostResource = new HostResource(orchestrator, uriInfo);
+ RestApiTestDriver testDriver = createTestDriver(orchestrator);
- String hostNameString = "hostname";
PatchHostRequest request = new PatchHostRequest();
request.state = "bad state";
- hostResource.patch(hostNameString, request);
+ HttpResponse response = executeRequest(testDriver, Method.PATCH, "/orchestrator/v1/hosts/hostname", request);
+ assertEquals(400, response.getStatus());
}
@Test
- public void patch_works() throws OrchestrationException {
+ void patch_works() throws OrchestrationException, IOException {
Orchestrator orchestrator = mock(Orchestrator.class);
- HostResource hostResource = new HostResource(orchestrator, uriInfo);
+ RestApiTestDriver testDriver = createTestDriver(orchestrator);
String hostNameString = "hostname";
PatchHostRequest request = new PatchHostRequest();
request.state = "NO_REMARKS";
- PatchHostResponse response = hostResource.patch(hostNameString, request);
+ HttpResponse httpResponse = executeRequest(testDriver, Method.PATCH, "/orchestrator/v1/hosts/hostname", request);
+ PatchHostResponse response = parseResponseContent(testDriver, httpResponse, PatchHostResponse.class);
assertEquals(response.description, "ok");
verify(orchestrator, times(1)).setNodeStatus(new HostName(hostNameString), HostStatus.NO_REMARKS);
}
- @Test(expected = InternalServerErrorException.class)
- public void patch_handles_exception_in_orchestrator() throws OrchestrationException {
+ @Test
+ void patch_handles_exception_in_orchestrator() throws OrchestrationException, IOException {
Orchestrator orchestrator = mock(Orchestrator.class);
- HostResource hostResource = new HostResource(orchestrator, uriInfo);
+ RestApiTestDriver testDriver = createTestDriver(orchestrator);
String hostNameString = "hostname";
PatchHostRequest request = new PatchHostRequest();
request.state = "NO_REMARKS";
doThrow(new OrchestrationException("error")).when(orchestrator).setNodeStatus(new HostName(hostNameString), HostStatus.NO_REMARKS);
- hostResource.patch(hostNameString, request);
+ HttpResponse httpResponse = executeRequest(testDriver, Method.PATCH, "/orchestrator/v1/hosts/hostname", request);
+ assertEquals(500, httpResponse.getStatus());
}
@Test
- public void getHost_works() throws Exception {
+ void getHost_works() throws Exception {
Orchestrator orchestrator = mock(Orchestrator.class);
- HostResource hostResource = new HostResource(orchestrator, uriInfo);
+ RestApiTestDriver testDriver = createTestDriver(orchestrator);
HostName hostName = new HostName("hostname");
- UriBuilder baseUriBuilder = mock(UriBuilder.class);
- when(uriInfo.getBaseUriBuilder()).thenReturn(baseUriBuilder);
- when(baseUriBuilder.path(any(String.class))).thenReturn(baseUriBuilder);
- when(baseUriBuilder.path(any(Class.class))).thenReturn(baseUriBuilder);
- URI uri = new URI("https://foo.com/bar");
- when(baseUriBuilder.build()).thenReturn(uri);
-
ServiceInstance serviceInstance = new ServiceInstance(
new ConfigId("configId"),
hostName,
@@ -312,8 +289,11 @@ public class HostResourceTest {
new ApplicationInstanceId("applicationId")),
Collections.singletonList(serviceInstance));
when(orchestrator.getHost(hostName)).thenReturn(host);
- GetHostResponse response = hostResource.getHost(hostName.s());
- assertEquals("https://foo.com/bar", response.applicationUrl());
+
+ HttpResponse httpResponse = executeRequest(testDriver, Method.GET, "/orchestrator/v1/hosts/hostname", null);
+ GetHostResponse response = parseResponseContent(testDriver, httpResponse, GetHostResponse.class);
+
+ assertEquals("http://localhost/orchestrator/v1/instances/tenantId:applicationId", response.applicationUrl());
assertEquals("hostname", response.hostname());
assertEquals("ALLOWED_TO_BE_DOWN", response.state());
assertEquals("1970-01-01T00:00:00Z", response.suspendedSince());
@@ -325,18 +305,41 @@ public class HostResourceTest {
}
@Test
- public void throws_409_on_timeout() throws HostNameNotFoundException, HostStateChangeDeniedException {
+ void throws_409_on_timeout() throws HostNameNotFoundException, HostStateChangeDeniedException, IOException {
Orchestrator orchestrator = mock(Orchestrator.class);
doThrow(new UncheckedTimeoutException("Timeout Message")).when(orchestrator).resume(any(HostName.class));
- try {
- HostResource hostResource = new HostResource(orchestrator, uriInfo);
- hostResource.resume("hostname");
- fail();
- } catch (WebApplicationException w) {
- assertEquals(409, w.getResponse().getStatus());
- assertEquals("resume failed: Timeout Message [deadline]", w.getMessage());
+ RestApiTestDriver testDriver = createTestDriver(orchestrator);
+ HttpResponse httpResponse = executeRequest(testDriver, Method.DELETE, "/orchestrator/v1/hosts/hostname/suspended", null);
+ assertEquals(409, httpResponse.getStatus());
+ ByteArrayOutputStream out = new ByteArrayOutputStream();
+ httpResponse.render(out);
+ JsonTestHelper.assertJsonEquals("{\n" +
+ " \"hostname\" : \"hostname\",\n" +
+ " \"reason\" : {\n" +
+ " \"constraint\" : \"deadline\",\n" +
+ " \"message\" : \"resume failed: Timeout Message\"\n" +
+ " }\n" +
+ "}",
+ out.toString());
+ }
+
+ private RestApiTestDriver createTestDriver(Orchestrator orchestrator) {
+ return RestApiTestDriver.newBuilder(handlerContext -> new HostRequestHandler(handlerContext, orchestrator))
+ .build();
+ }
+
+ private HttpResponse executeRequest(RestApiTestDriver testDriver, Method method, String path, Object requestEntity) {
+ var builder = HttpRequestBuilder.create(method, path);
+ if (requestEntity != null) {
+ builder.withRequestContent(testDriver.requestContentOf(requestEntity));
}
+ return testDriver.executeRequest(builder.build());
+ }
+
+ private <T> T parseResponseContent(RestApiTestDriver testDriver, HttpResponse response, Class<T> responseEntityType) {
+ assertEquals(200, response.getStatus());
+ return testDriver.parseJacksonResponseContent(response, responseEntityType);
}
}
diff --git a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/resources/HostSuspensionHandlerTest.java b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/resources/HostSuspensionRequestHandlerTest.java
index 9d413526037..3b12160e708 100644
--- a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/resources/HostSuspensionHandlerTest.java
+++ b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/resources/HostSuspensionRequestHandlerTest.java
@@ -2,17 +2,15 @@
package com.yahoo.vespa.orchestrator.resources;
import com.google.common.util.concurrent.UncheckedTimeoutException;
-import com.yahoo.container.jdisc.HttpRequest;
+import com.yahoo.container.jdisc.HttpRequestBuilder;
import com.yahoo.container.jdisc.HttpResponse;
-import com.yahoo.container.jdisc.LoggingRequestHandler;
-import com.yahoo.jdisc.test.MockMetric;
+import com.yahoo.restapi.RestApiTestDriver;
import com.yahoo.test.json.JsonTestHelper;
import com.yahoo.vespa.orchestrator.BatchHostNameNotFoundException;
import com.yahoo.vespa.orchestrator.BatchInternalErrorException;
import com.yahoo.vespa.orchestrator.Orchestrator;
import com.yahoo.vespa.orchestrator.OrchestratorImpl;
import com.yahoo.vespa.orchestrator.policy.BatchHostStateChangeDeniedException;
-import com.yahoo.vespa.orchestrator.resources.host.HostResourceTest;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
@@ -21,8 +19,6 @@ import java.io.IOException;
import java.time.Clock;
import java.time.Instant;
import java.util.List;
-import java.util.concurrent.Executors;
-import java.util.stream.Collectors;
import static com.yahoo.jdisc.http.HttpRequest.Method;
import static org.junit.jupiter.api.Assertions.assertEquals;
@@ -35,7 +31,7 @@ import static org.mockito.Mockito.when;
* @author hakonhall
* @author bjorncs
*/
-class HostSuspensionHandlerTest {
+class HostSuspensionRequestHandlerTest {
private final Clock clock = mock(Clock.class);
@@ -46,15 +42,15 @@ class HostSuspensionHandlerTest {
@Test
void returns_200_on_success_batch() throws IOException {
- HostSuspensionHandler handler = createHandler(HostResourceTest.createAlwaysAllowOrchestrator(clock));
- HttpResponse response = executeSuspendAllRequest(handler, "parentHostname", List.of("hostname1", "hostname2"));
+ RestApiTestDriver testDriver = createTestDriver(HostRequestHandlerTest.createAlwaysAllowOrchestrator(clock));
+ HttpResponse response = executeSuspendAllRequest(testDriver, "parentHostname", List.of("hostname1", "hostname2"));
assertSuccess(response);
}
@Test
void returns_200_empty_batch() throws IOException {
- HostSuspensionHandler handler = createHandler(HostResourceTest.createAlwaysAllowOrchestrator(clock));
- HttpResponse response = executeSuspendAllRequest(handler, "parentHostname", List.of());
+ RestApiTestDriver testDriver = createTestDriver(HostRequestHandlerTest.createAlwaysAllowOrchestrator(clock));
+ HttpResponse response = executeSuspendAllRequest(testDriver, "parentHostname", List.of());
assertSuccess(response);
}
@@ -63,16 +59,16 @@ class HostSuspensionHandlerTest {
// hostnames are part of the request body for multi-host.
@Test
void returns_400_when_host_unknown_for_batch() {
- HostSuspensionHandler handler = createHandler(HostResourceTest.createHostNotFoundOrchestrator(clock));
- HttpResponse response = executeSuspendAllRequest(handler, "parentHostname", List.of("hostname1", "hostname2"));
+ RestApiTestDriver testDriver = createTestDriver(HostRequestHandlerTest.createHostNotFoundOrchestrator(clock));
+ HttpResponse response = executeSuspendAllRequest(testDriver, "parentHostname", List.of("hostname1", "hostname2"));
assertEquals(400, response.getStatus());
}
@Test
void returns_409_when_request_rejected_by_policies_for_batch() {
- OrchestratorImpl alwaysRejectResolver = HostResourceTest.createAlwaysRejectResolver(clock);
- HostSuspensionHandler handler = createHandler(alwaysRejectResolver);
- HttpResponse response = executeSuspendAllRequest(handler, "parentHostname", List.of("hostname1", "hostname2"));
+ OrchestratorImpl alwaysRejectResolver = HostRequestHandlerTest.createAlwaysRejectResolver(clock);
+ RestApiTestDriver testDriver = createTestDriver(alwaysRejectResolver);
+ HttpResponse response = executeSuspendAllRequest(testDriver, "parentHostname", List.of("hostname1", "hostname2"));
assertEquals(409, response.getStatus());
}
@@ -81,26 +77,20 @@ class HostSuspensionHandlerTest {
void throws_409_on_suspendAll_timeout() throws BatchHostStateChangeDeniedException, BatchHostNameNotFoundException, BatchInternalErrorException {
Orchestrator orchestrator = mock(Orchestrator.class);
doThrow(new UncheckedTimeoutException("Timeout Message")).when(orchestrator).suspendAll(any(), any());
- HostSuspensionHandler handler = createHandler(orchestrator);
- HttpResponse response = executeSuspendAllRequest(handler, "parenthost", List.of("h1", "h2", "h3"));
+ RestApiTestDriver testDriver = createTestDriver(orchestrator);
+ HttpResponse response = executeSuspendAllRequest(testDriver, "parenthost", List.of("h1", "h2", "h3"));
assertEquals(409, response.getStatus());
}
- private static HostSuspensionHandler createHandler(Orchestrator orchestrator) {
- return new HostSuspensionHandler(
- new LoggingRequestHandler.Context(Executors.newSingleThreadExecutor(), new MockMetric()),
- orchestrator);
+ private static RestApiTestDriver createTestDriver(Orchestrator orchestrator) {
+ return RestApiTestDriver.newBuilder(ctx -> new HostSuspensionRequestHandler(ctx, orchestrator))
+ .build();
}
- private static HttpResponse executeSuspendAllRequest(HostSuspensionHandler handler, String parentHostname, List<String> hostnames) {
- StringBuilder uriBuilder = new StringBuilder("/orchestrator/v1/suspensions/hosts/").append(parentHostname);
- if (!hostnames.isEmpty()) {
- uriBuilder.append(hostnames.stream()
- .map(hostname -> "hostname=" + hostname)
- .collect(Collectors.joining("&", "?", "")));
- }
- HttpRequest request = HttpRequest.createTestRequest(uriBuilder.toString(), Method.PUT);
- return handler.handle(request);
+ private static HttpResponse executeSuspendAllRequest(RestApiTestDriver testDriver, String parentHostname, List<String> hostnames) {
+ var builder = HttpRequestBuilder.create(Method.PUT, "/orchestrator/v1/suspensions/hosts/" + parentHostname);
+ hostnames.forEach(hostname -> builder.withQueryParameter("hostname", hostname));
+ return testDriver.executeRequest(builder.build());
}
private static void assertSuccess(HttpResponse response) throws IOException {
diff --git a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/resources/InstanceRequestHandlerTest.java b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/resources/InstanceRequestHandlerTest.java
new file mode 100644
index 00000000000..0a2c73e831f
--- /dev/null
+++ b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/resources/InstanceRequestHandlerTest.java
@@ -0,0 +1,117 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.orchestrator.resources;
+
+import com.fasterxml.jackson.core.type.TypeReference;
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.container.jdisc.HttpRequest;
+import com.yahoo.container.jdisc.HttpRequestBuilder;
+import com.yahoo.container.jdisc.HttpResponse;
+import com.yahoo.jrt.slobrok.api.Mirror;
+import com.yahoo.restapi.RestApiTestDriver;
+import com.yahoo.vespa.applicationmodel.ClusterId;
+import com.yahoo.vespa.applicationmodel.ConfigId;
+import com.yahoo.vespa.applicationmodel.ServiceStatus;
+import com.yahoo.vespa.applicationmodel.ServiceStatusInfo;
+import com.yahoo.vespa.applicationmodel.ServiceType;
+import com.yahoo.vespa.orchestrator.restapi.wire.SlobrokEntryResponse;
+import com.yahoo.vespa.service.manager.UnionMonitorManager;
+import com.yahoo.vespa.service.monitor.SlobrokApi;
+import org.junit.jupiter.api.Test;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
+
+import static com.yahoo.jdisc.http.HttpRequest.Method.GET;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+/**
+ * @author bjorncs
+ */
+class InstanceRequestHandlerTest {
+
+ private static final String APPLICATION_INSTANCE_REFERENCE = "tenant:app:prod:us-west-1:instance";
+ private static final ApplicationId APPLICATION_ID = ApplicationId.from(
+ "tenant", "app", "instance");
+ private static final List<Mirror.Entry> ENTRIES = Arrays.asList(
+ new Mirror.Entry("name1", "tcp/spec:1"),
+ new Mirror.Entry("name2", "tcp/spec:2"));
+ private static final ClusterId CLUSTER_ID = new ClusterId("cluster-id");
+
+ private final SlobrokApi slobrokApi = mock(SlobrokApi.class);
+ private final UnionMonitorManager rootManager = mock(UnionMonitorManager.class);
+ private final RestApiTestDriver testDriver =
+ RestApiTestDriver.newBuilder(ctx -> new InstanceRequestHandler(ctx, null, null, slobrokApi, rootManager))
+ .build();
+
+ @Test
+ void testGetSlobrokEntries() throws Exception {
+ testGetSlobrokEntriesWith("foo", "foo");
+ }
+
+ @Test
+ void testGetSlobrokEntriesWithoutPattern() throws Exception {
+ testGetSlobrokEntriesWith(null, InstanceRequestHandler.DEFAULT_SLOBROK_PATTERN);
+ }
+
+ @Test
+ void testGetServiceStatusInfo() throws IOException {
+ ServiceType serviceType = new ServiceType("serviceType");
+ ConfigId configId = new ConfigId("configId");
+ ServiceStatus serviceStatus = ServiceStatus.UP;
+ when(rootManager.getStatus(APPLICATION_ID, CLUSTER_ID, serviceType, configId))
+ .thenReturn(new ServiceStatusInfo(serviceStatus));
+
+
+ String uriPath = String.format("/orchestrator/v1/instances/%s/serviceStatusInfo", APPLICATION_INSTANCE_REFERENCE);
+ HttpRequest request = HttpRequestBuilder.create(GET, uriPath)
+ .withQueryParameter("clusterId", CLUSTER_ID.s())
+ .withQueryParameter("serviceType", serviceType.s())
+ .withQueryParameter("configId", configId.s())
+ .build();
+ HttpResponse response = testDriver.executeRequest(request);
+ assertEquals(200, response.getStatus());
+ ServiceStatusInfo serviceStatusInfo = testDriver.parseJacksonResponseContent(response, ServiceStatusInfo.class);
+
+ ServiceStatus actualServiceStatus = serviceStatusInfo.serviceStatus();
+ verify(rootManager).getStatus(APPLICATION_ID, CLUSTER_ID, serviceType, configId);
+ assertEquals(serviceStatus, actualServiceStatus);
+ }
+
+ @Test
+ void testBadRequest() {
+ String uriPath = String.format("/orchestrator/v1/instances/%s/serviceStatusInfo", APPLICATION_INSTANCE_REFERENCE);
+ HttpRequest request = HttpRequestBuilder.create(GET, uriPath)
+ .withQueryParameter("clusterId", CLUSTER_ID.s())
+ .build();
+ HttpResponse response = testDriver.executeRequest(request);
+ assertEquals(400, response.getStatus());
+ }
+
+ private void testGetSlobrokEntriesWith(String pattern, String expectedLookupPattern)
+ throws Exception{
+ when(slobrokApi.lookup(APPLICATION_ID, expectedLookupPattern))
+ .thenReturn(ENTRIES);
+
+ String uriPath = String.format("/orchestrator/v1/instances/%s/slobrok", APPLICATION_INSTANCE_REFERENCE);
+ var builder = HttpRequestBuilder.create(GET, uriPath);
+ if (pattern != null) {
+ builder.withQueryParameter("pattern", pattern);
+ }
+ HttpRequest request = builder.build();
+ HttpResponse response = testDriver.executeRequest(request);
+ assertEquals(200, response.getStatus());
+ List<SlobrokEntryResponse> result = testDriver.parseJacksonResponseContent(response, new TypeReference<>() {});
+
+ verify(slobrokApi).lookup(APPLICATION_ID, expectedLookupPattern);
+
+ String actualJson = testDriver.jacksonJsonMapper().writeValueAsString(result);
+ assertEquals(
+ "[{\"name\":\"name1\",\"spec\":\"tcp/spec:1\"},{\"name\":\"name2\",\"spec\":\"tcp/spec:2\"}]",
+ actualJson);
+ }
+
+}
diff --git a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/resources/appsuspension/ApplicationSuspensionResourceTest.java b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/resources/appsuspension/ApplicationSuspensionResourceTest.java
deleted file mode 100644
index a7514de5acd..00000000000
--- a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/resources/appsuspension/ApplicationSuspensionResourceTest.java
+++ /dev/null
@@ -1,168 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.orchestrator.resources.appsuspension;
-
-import com.yahoo.application.Application;
-import com.yahoo.application.Networking;
-import com.yahoo.container.Container;
-import com.yahoo.jdisc.http.server.jetty.JettyHttpServer;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Ignore;
-import org.junit.Test;
-
-import javax.ws.rs.client.Client;
-import javax.ws.rs.client.ClientBuilder;
-import javax.ws.rs.client.Entity;
-import javax.ws.rs.client.WebTarget;
-import javax.ws.rs.core.GenericType;
-import javax.ws.rs.core.MediaType;
-import javax.ws.rs.core.Response;
-import java.net.URI;
-import java.util.Set;
-
-import static org.junit.Assert.assertEquals;
-
-/**
- * Tests the implementation of the orchestrator Application API.
- *
- * @author smorgrav
- */
-public class ApplicationSuspensionResourceTest {
-
- private static final String BASE_PATH = "/orchestrator/v1/suspensions/applications";
- private static final String RESOURCE_1 = "mediasearch:imagesearch:default";
- private static final String RESOURCE_2 = "test-tenant-id:application:instance";
- private static final String INVALID_RESOURCE_NAME = "something_without_colons";
-
- private Application jdiscApplication;
- private WebTarget webTarget;
-
- @Before
- public void setup() throws Exception {
- jdiscApplication = Application.fromServicesXml(servicesXml(), Networking.enable);
- Client client = ClientBuilder.newClient();
-
- JettyHttpServer serverProvider = (JettyHttpServer) Container.get().getServerProviderRegistry().allComponents().get(0);
- String url = "http://localhost:" + serverProvider.getListenPort() + BASE_PATH;
- webTarget = client.target(new URI(url));
- }
-
- @After
- public void teardown() {
- jdiscApplication.close();
- webTarget = null;
- }
-
- @Ignore
- @Test
- public void run_application_locally_for_manual_browser_testing() throws Exception {
- System.out.println(webTarget.getUri());
- Thread.sleep(3600 * 1000);
- }
-
- @Test
- public void get_all_suspended_applications_return_empty_list_initially() {
- Response reply = webTarget.request().get();
- assertEquals(200, reply.getStatus());
- assertEquals("[]", reply.readEntity(String.class));
- }
-
- @Test
- public void invalid_application_id_throws_http_400() {
- Response reply = webTarget.request().post(Entity.entity(INVALID_RESOURCE_NAME, MediaType.APPLICATION_JSON_TYPE));
- assertEquals(400, reply.getStatus());
- }
-
- @Test
- public void get_application_status_returns_404_for_not_suspended_and_204_for_suspended() {
- // Get on application that is not suspended
- Response reply = webTarget.path(RESOURCE_1).request().get();
- assertEquals(404, reply.getStatus());
-
- // Post application
- reply = webTarget.request().post(Entity.entity(RESOURCE_1, MediaType.APPLICATION_JSON_TYPE));
- assertEquals(204, reply.getStatus());
-
- // Get on the application that now should be in suspended
- reply = webTarget.path(RESOURCE_1).request().get();
- assertEquals(204, reply.getStatus());
- }
-
- @Test
- public void delete_works_on_suspended_and_not_suspended_applications() {
- // Delete an application that is not suspended
- Response reply = webTarget.path(RESOURCE_1).request().delete();
- assertEquals(204, reply.getStatus());
-
- // Put application in suspend
- reply = webTarget.request().post(Entity.entity(RESOURCE_1, MediaType.APPLICATION_JSON_TYPE));
- assertEquals(204, reply.getStatus());
-
- // Check that it is in suspend
- reply = webTarget.path(RESOURCE_1).request(MediaType.APPLICATION_JSON).get();
- assertEquals(204, reply.getStatus());
-
- // Delete it
- reply = webTarget.path(RESOURCE_1).request().delete();
- assertEquals(204, reply.getStatus());
-
- // Check that it is not in suspend anymore
- reply = webTarget.path(RESOURCE_1).request(MediaType.APPLICATION_JSON).get();
- assertEquals(404, reply.getStatus());
- }
-
- @Test
- public void list_applications_returns_the_correct_list_of_suspended_applications() {
- // Test that initially we have the empty set
- Response reply = webTarget.request(MediaType.APPLICATION_JSON).get();
- assertEquals(200, reply.getStatus());
- assertEquals("[]", reply.readEntity(String.class));
-
- // Add a couple of applications to maintenance
- webTarget.request().post(Entity.entity(RESOURCE_1, MediaType.APPLICATION_JSON_TYPE));
- webTarget.request().post(Entity.entity(RESOURCE_2, MediaType.APPLICATION_JSON_TYPE));
- assertEquals(200, reply.getStatus());
-
- // Test that we get them back
- Set<String> responses = webTarget.request(MediaType.APPLICATION_JSON_TYPE)
- .get(new GenericType<Set<String>>() {});
- assertEquals(2, responses.size());
-
- // Remove suspend for the first resource
- webTarget.path(RESOURCE_1).request().delete();
-
- // Test that we are back to the start with the empty set
- responses = webTarget.request(MediaType.APPLICATION_JSON_TYPE)
- .get(new GenericType<Set<String>>() {});
- assertEquals(1, responses.size());
- assertEquals(RESOURCE_2, responses.iterator().next());
- }
-
- private String servicesXml() {
- return "<services>\n" +
- " <container version=\"1.0\" jetty=\"true\">\n" +
- " <accesslog type=\"disabled\"/>\n" +
- " <config name=\"container.handler.threadpool\">\n" +
- " <maxthreads>10</maxthreads>\n" +
- " </config>\n" +
- " <component id=\"com.yahoo.vespa.flags.InMemoryFlagSource\" bundle=\"flags\" />\n" +
- " <component id=\"com.yahoo.vespa.curator.mock.MockCurator\" bundle=\"zkfacade\" />\n" +
- " <component id=\"com.yahoo.vespa.orchestrator.status.ZkStatusService\" bundle=\"orchestrator\" />\n" +
- " <component id=\"com.yahoo.vespa.orchestrator.DummyServiceMonitor\" bundle=\"orchestrator\" />\n" +
- " <component id=\"com.yahoo.vespa.orchestrator.OrchestratorImpl\" bundle=\"orchestrator\" />\n" +
- " <component id=\"com.yahoo.vespa.orchestrator.controller.ClusterControllerClientFactoryMock\" bundle=\"orchestrator\" />\n" +
- "\n" +
- " <rest-api path=\"orchestrator/v1/suspensions/applications\" jersey2=\"true\">\n" +
- " <components bundle=\"orchestrator\">\n" +
- " <package>com.yahoo.vespa.orchestrator.resources.appsuspension</package>\n" +
- " </components>\n" +
- " </rest-api>\n" +
- "\n" +
- " <http>\n" +
- " <server id=\"foo\" port=\"0\"/>\n" +
- " </http>\n" +
- " </container>\n" +
- "</services>\n";
- }
-
-}
diff --git a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/resources/instance/InstanceResourceTest.java b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/resources/instance/InstanceResourceTest.java
deleted file mode 100644
index 8e2eeb7410d..00000000000
--- a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/resources/instance/InstanceResourceTest.java
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.orchestrator.resources.instance;
-
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.yahoo.config.provision.ApplicationId;
-import com.yahoo.jrt.slobrok.api.Mirror;
-import com.yahoo.vespa.applicationmodel.ClusterId;
-import com.yahoo.vespa.applicationmodel.ConfigId;
-import com.yahoo.vespa.applicationmodel.ServiceStatus;
-import com.yahoo.vespa.applicationmodel.ServiceStatusInfo;
-import com.yahoo.vespa.applicationmodel.ServiceType;
-import com.yahoo.vespa.orchestrator.resources.instance.InstanceResource;
-import com.yahoo.vespa.orchestrator.restapi.wire.SlobrokEntryResponse;
-import com.yahoo.vespa.service.manager.UnionMonitorManager;
-import com.yahoo.vespa.service.monitor.SlobrokApi;
-import org.junit.Test;
-
-import javax.ws.rs.WebApplicationException;
-import java.util.Arrays;
-import java.util.List;
-
-import static org.junit.Assert.assertEquals;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
-
-public class InstanceResourceTest {
- private static final String APPLICATION_INSTANCE_REFERENCE = "tenant:app:prod:us-west-1:instance";
- private static final ApplicationId APPLICATION_ID = ApplicationId.from(
- "tenant", "app", "instance");
- private static final List<Mirror.Entry> ENTRIES = Arrays.asList(
- new Mirror.Entry("name1", "tcp/spec:1"),
- new Mirror.Entry("name2", "tcp/spec:2"));
- private static final ClusterId CLUSTER_ID = new ClusterId("cluster-id");
-
- private final SlobrokApi slobrokApi = mock(SlobrokApi.class);
- private final UnionMonitorManager rootManager = mock(UnionMonitorManager.class);
- private final InstanceResource resource = new InstanceResource(
- null,
- null,
- slobrokApi,
- rootManager);
-
- @Test
- public void testGetSlobrokEntries() throws Exception {
- testGetSlobrokEntriesWith("foo", "foo");
- }
-
- @Test
- public void testGetSlobrokEntriesWithoutPattern() throws Exception {
- testGetSlobrokEntriesWith(null, InstanceResource.DEFAULT_SLOBROK_PATTERN);
- }
-
- @Test
- public void testGetServiceStatusInfo() {
- ServiceType serviceType = new ServiceType("serviceType");
- ConfigId configId = new ConfigId("configId");
- ServiceStatus serviceStatus = ServiceStatus.UP;
- when(rootManager.getStatus(APPLICATION_ID, CLUSTER_ID, serviceType, configId))
- .thenReturn(new ServiceStatusInfo(serviceStatus));
- ServiceStatus actualServiceStatus = resource.getServiceStatus(
- APPLICATION_INSTANCE_REFERENCE,
- CLUSTER_ID.s(),
- serviceType.s(),
- configId.s()).serviceStatus();
- verify(rootManager).getStatus(APPLICATION_ID, CLUSTER_ID, serviceType, configId);
- assertEquals(serviceStatus, actualServiceStatus);
- }
-
- @Test(expected = WebApplicationException.class)
- public void testBadRequest() {
- resource.getServiceStatus(APPLICATION_INSTANCE_REFERENCE, CLUSTER_ID.s(), null, null);
- }
-
- private void testGetSlobrokEntriesWith(String pattern, String expectedLookupPattern)
- throws Exception{
- when(slobrokApi.lookup(APPLICATION_ID, expectedLookupPattern))
- .thenReturn(ENTRIES);
-
- List<SlobrokEntryResponse> response = resource.getSlobrokEntries(
- APPLICATION_INSTANCE_REFERENCE,
- pattern);
-
- verify(slobrokApi).lookup(APPLICATION_ID, expectedLookupPattern);
-
- ObjectMapper mapper = new ObjectMapper();
- String actualJson = mapper.writeValueAsString(response);
- assertEquals(
- "[{\"name\":\"name1\",\"spec\":\"tcp/spec:1\"},{\"name\":\"name2\",\"spec\":\"tcp/spec:2\"}]",
- actualJson);
- }
-} \ No newline at end of file
diff --git a/parent/pom.xml b/parent/pom.xml
index 0f2ea616193..184ab475722 100644
--- a/parent/pom.xml
+++ b/parent/pom.xml
@@ -28,30 +28,12 @@
<url>https://github.com/vespa-engine</url>
</developer>
</developers>
- <distributionManagement>
- <repository>
- <id>bintray-vespa-repo</id>
- <url>https://api.bintray.com/maven/yahoo/maven/vespa;publish=1</url>
- </repository>
- </distributionManagement>
<scm>
<connection>scm:git:git@github.com:vespa-engine/vespa.git</connection>
<developerConnection>scm:git:git@github.com:vespa-engine/vespa.git</developerConnection>
<url>git@github.com:vespa-engine/vespa.git</url>
</scm>
- <repositories>
- <!-- Required for Athenz libraries -->
- <repository>
- <snapshots>
- <enabled>false</enabled>
- </snapshots>
- <id>bintray-yahoo-maven</id>
- <name>bintray</name>
- <url>https://yahoo.bintray.com/maven</url>
- </repository>
- </repositories>
-
<build>
<finalName>${project.artifactId}</finalName>
<extensions>
@@ -431,6 +413,16 @@
<version>3.10.0</version>
</dependency>
<dependency>
+ <groupId>com.fasterxml.jackson.jaxrs</groupId>
+ <artifactId>jackson-jaxrs-xml-provider</artifactId>
+ <version>${jackson2.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>com.fasterxml.jackson.dataformat</groupId>
+ <artifactId>jackson-dataformat-xml</artifactId>
+ <version>${jackson2.version}</version>
+ </dependency>
+ <dependency>
<groupId>com.github.cverges.expect4j</groupId>
<artifactId>expect4j</artifactId>
<version>1.6</version>
@@ -449,6 +441,11 @@
<version>2.6.0</version>
</dependency>
<dependency>
+ <groupId>com.google.guava</groupId>
+ <artifactId>guava-testlib</artifactId>
+ <version>${guava.version}</version>
+ </dependency>
+ <dependency>
<groupId>com.google.jimfs</groupId>
<artifactId>jimfs</artifactId>
<version>1.1</version>
@@ -490,6 +487,21 @@
<version>${athenz.version}</version>
</dependency>
<dependency>
+ <groupId>io.jsonwebtoken</groupId>
+ <artifactId>jjwt-api</artifactId>
+ <version>${jjwt.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>io.jsonwebtoken</groupId>
+ <artifactId>jjwt-impl</artifactId>
+ <version>${jjwt.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>io.jsonwebtoken</groupId>
+ <artifactId>jjwt-jackson</artifactId>
+ <version>${jjwt.version}</version>
+ </dependency>
+ <dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-math3</artifactId>
<version>${commons.math3.version}</version>
@@ -550,6 +562,11 @@
<version>${antlr4.version}</version>
</dependency>
<dependency>
+ <groupId>org.apache.aries.spifly</groupId>
+ <artifactId>org.apache.aries.spifly.dynamic.bundle</artifactId>
+ <version>${spifly.version}</version>
+ </dependency>
+ <dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-compress</artifactId>
<version>1.19</version>
@@ -687,6 +704,51 @@
<version>9.2.0</version>
</dependency>
<dependency>
+ <groupId>org.eclipse.jetty.alpn</groupId>
+ <artifactId>alpn-api</artifactId>
+ <version>${jetty-alpn.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.jetty.http2</groupId>
+ <artifactId>http2-server</artifactId>
+ <version>${jetty.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.jetty</groupId>
+ <artifactId>jetty-alpn-java-server</artifactId>
+ <version>${jetty.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.jetty</groupId>
+ <artifactId>jetty-continuation</artifactId>
+ <version>${jetty.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.jetty</groupId>
+ <artifactId>jetty-server</artifactId>
+ <version>${jetty.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.jetty</groupId>
+ <artifactId>jetty-servlet</artifactId>
+ <version>${jetty.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.jetty</groupId>
+ <artifactId>jetty-servlets</artifactId>
+ <version>${jetty.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.eclipse.jetty</groupId>
+ <artifactId>jetty-jmx</artifactId>
+ <version>${jetty.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.glassfish.jaxb</groupId>
+ <artifactId>jaxb-runtime</artifactId>
+ <version>2.3.2</version> <!-- 2.3.3 has a BROKEN manifest -->
+ </dependency>
+ <dependency>
<groupId>org.hamcrest</groupId>
<artifactId>hamcrest-all</artifactId>
<version>1.3</version>
@@ -736,11 +798,6 @@
<version>${asm.version}</version>
</dependency>
<dependency>
- <groupId>org.springframework</groupId>
- <artifactId>spring-test</artifactId>
- <version>4.0.6.RELEASE</version>
- </dependency>
- <dependency>
<groupId>org.tensorflow</groupId>
<artifactId>proto</artifactId>
<version>${tensorflow.version}</version>
@@ -780,9 +837,10 @@
<apache.httpclient.version>4.5.12</apache.httpclient.version>
<apache.httpcore.version>4.4.13</apache.httpcore.version>
<apache.httpclient5.version>5.0.3</apache.httpclient5.version>
- <asm.version>7.0</asm.version>
+ <asm.version>9.1</asm.version>
<!-- Athenz dependencies. Make sure these dependencies match those in Vespa's internal repositories -->
- <athenz.version>1.8.49</athenz.version>
+ <athenz.version>1.10.11</athenz.version>
+ <jjwt.version>0.11.2</jjwt.version>
<aws.sdk.version>1.11.974</aws.sdk.version>
<!-- WARNING: If you change curator version, you also need to update
zkfacade/src/main/java/org/apache/curator/**/package-info.java
@@ -811,6 +869,7 @@
<maven-source-plugin.version>3.0.1</maven-source-plugin.version>
<prometheus.client.version>0.6.0</prometheus.client.version>
<protobuf.version>3.7.0</protobuf.version>
+ <spifly.version>1.3.3</spifly.version>
<surefire.version>2.22.0</surefire.version>
<tensorflow.version>1.12.0</tensorflow.version>
<zookeeper.client.version>3.6.2</zookeeper.client.version>
diff --git a/pom.xml b/pom.xml
index 367a3901040..d20aac32ba0 100644
--- a/pom.xml
+++ b/pom.xml
@@ -57,7 +57,6 @@
<module>container-dependencies-enforcer</module>
<module>container-dependency-versions</module>
<module>container-dev</module>
- <module>container-di</module>
<module>container-disc</module>
<module>container-documentapi</module>
<module>container-integration-test</module>
@@ -119,7 +118,6 @@
<module>security-utils</module>
<module>serviceview</module>
<module>service-monitor</module>
- <module>simplemetrics</module>
<module>socket_test</module>
<module>standalone-container</module>
<module>statistics</module>
diff --git a/screwdriver.yaml b/screwdriver.yaml
index 9bf0ad95934..30a3ffaeaab 100644
--- a/screwdriver.yaml
+++ b/screwdriver.yaml
@@ -7,7 +7,7 @@ cache:
jobs:
main:
requires: [~pr, ~commit]
- image: vespaengine/vespa-build-centos7
+ image: vespaengine/vespa-build-centos7:current
annotations:
screwdriver.cd/cpu: 7
screwdriver.cd/ram: 16
@@ -30,6 +30,7 @@ jobs:
cat /proc/meminfo
df -h
uname -a
+ rpm -qa | grep "vespa"
- restore-cache: |
(cd /tmp && if [[ -f $MAIN_CACHE_FILE ]]; then tar xf $MAIN_CACHE_FILE; fi)
diff --git a/searchcommon/src/vespa/searchcommon/attribute/config.cpp b/searchcommon/src/vespa/searchcommon/attribute/config.cpp
index 065e9c14de9..c62d7ef0ea1 100644
--- a/searchcommon/src/vespa/searchcommon/attribute/config.cpp
+++ b/searchcommon/src/vespa/searchcommon/attribute/config.cpp
@@ -14,6 +14,7 @@ Config::Config() noexcept :
_isFilter(false),
_fastAccess(false),
_mutable(false),
+ _match(Match::UNCASED),
_dictionary(),
_growStrategy(),
_compactionStrategy(),
@@ -34,6 +35,7 @@ Config::Config(BasicType bt, CollectionType ct, bool fastSearch_, bool huge_) no
_isFilter(false),
_fastAccess(false),
_mutable(false),
+ _match(Match::UNCASED),
_dictionary(),
_growStrategy(),
_compactionStrategy(),
@@ -62,6 +64,7 @@ Config::operator==(const Config &b) const
_isFilter == b._isFilter &&
_fastAccess == b._fastAccess &&
_mutable == b._mutable &&
+ _match == b._match &&
_dictionary == b._dictionary &&
_growStrategy == b._growStrategy &&
_compactionStrategy == b._compactionStrategy &&
diff --git a/searchcommon/src/vespa/searchcommon/attribute/config.h b/searchcommon/src/vespa/searchcommon/attribute/config.h
index c1b30303606..fdf3a00ac99 100644
--- a/searchcommon/src/vespa/searchcommon/attribute/config.h
+++ b/searchcommon/src/vespa/searchcommon/attribute/config.h
@@ -22,6 +22,7 @@ namespace search::attribute {
*/
class Config {
public:
+ enum class Match { CASED, UNCASED };
Config() noexcept;
Config(BasicType bt) noexcept : Config(bt, CollectionType::SINGLE) { }
Config(BasicType bt, CollectionType ct) noexcept : Config(bt, ct, false) { }
@@ -68,6 +69,7 @@ public:
const GrowStrategy & getGrowStrategy() const { return _growStrategy; }
const CompactionStrategy &getCompactionStrategy() const { return _compactionStrategy; }
const DictionaryConfig & get_dictionary_config() const { return _dictionary; }
+ Match get_match() const { return _match; }
Config & setHuge(bool v) { _huge = v; return *this;}
Config & setFastSearch(bool v) { _fastSearch = v; return *this; }
Config & setPredicateParams(const PredicateParams &v) { _predicateParams = v; return *this; }
@@ -121,6 +123,7 @@ public:
return *this;
}
Config & set_dictionary_config(const DictionaryConfig & cfg) { _dictionary = cfg; return *this; }
+ Config & set_match(Match match) { _match = match; return *this; }
bool operator!=(const Config &b) const { return !(operator==(b)); }
bool operator==(const Config &b) const;
@@ -134,6 +137,7 @@ private:
bool _isFilter;
bool _fastAccess;
bool _mutable;
+ Match _match;
DictionaryConfig _dictionary;
GrowStrategy _growStrategy;
CompactionStrategy _compactionStrategy;
diff --git a/searchcore/src/apps/tests/persistenceconformance_test.cpp b/searchcore/src/apps/tests/persistenceconformance_test.cpp
index d28e7a1f8d3..69d509c25fd 100644
--- a/searchcore/src/apps/tests/persistenceconformance_test.cpp
+++ b/searchcore/src/apps/tests/persistenceconformance_test.cpp
@@ -192,34 +192,18 @@ public:
config::DirSpec spec(inputCfg + "/config-1");
TuneFileDocumentDB::SP tuneFileDocDB(new TuneFileDocumentDB());
DocumentDBConfigHelper mgr(spec, docType.getName());
- BootstrapConfig::SP b(new BootstrapConfig(1,
- factory.getTypeCfg(),
- factory.getTypeRepo(),
+ auto b = std::make_shared<BootstrapConfig>(1, factory.getTypeCfg(), factory.getTypeRepo(),
std::make_shared<ProtonConfig>(),
std::make_shared<FiledistributorrpcConfig>(),
std::make_shared<BucketspacesConfig>(),
- tuneFileDocDB, HwInfo()));
+ tuneFileDocDB, HwInfo());
mgr.forwardConfig(b);
mgr.nextGeneration(0ms);
- return std::make_shared<DocumentDB>(_baseDir,
- mgr.getConfig(),
- _tlsSpec,
- _queryLimiter,
- _clock,
- docType,
- bucketSpace,
- *b->getProtonConfigSP(),
- const_cast<DocumentDBFactory &>(*this),
- _summaryExecutor,
- _summaryExecutor,
- _bucketExecutor,
- _tls,
- _metricsWireService,
- _fileHeaderContext,
- _config_stores.getConfigStore(docType.toString()),
- std::make_shared<vespalib::ThreadStackExecutor>
- (16, 128_Ki),
- HwInfo());
+ return DocumentDB::create(_baseDir, mgr.getConfig(), _tlsSpec, _queryLimiter, _clock, docType, bucketSpace,
+ *b->getProtonConfigSP(), const_cast<DocumentDBFactory &>(*this),
+ _summaryExecutor, _summaryExecutor, _bucketExecutor, _tls, _metricsWireService,
+ _fileHeaderContext, _config_stores.getConfigStore(docType.toString()),
+ std::make_shared<vespalib::ThreadStackExecutor>(16, 128_Ki), HwInfo());
}
};
diff --git a/searchcore/src/apps/vespa-feed-bm/vespa_feed_bm.cpp b/searchcore/src/apps/vespa-feed-bm/vespa_feed_bm.cpp
index 846ff6ece08..4191d9bc442 100644
--- a/searchcore/src/apps/vespa-feed-bm/vespa_feed_bm.cpp
+++ b/searchcore/src/apps/vespa-feed-bm/vespa_feed_bm.cpp
@@ -798,24 +798,12 @@ PersistenceProviderFixture::create_document_db(const BMParams & params)
tuneFileDocDB, HwInfo());
mgr.forwardConfig(bootstrap_config);
mgr.nextGeneration(0ms);
- _document_db = std::make_shared<DocumentDB>(_base_dir,
- mgr.getConfig(),
- _tls_spec,
- _query_limiter,
- _clock,
- _doc_type_name,
- _bucket_space,
- *bootstrap_config->getProtonConfigSP(),
- _document_db_owner,
- _summary_executor,
- _summary_executor,
- *_persistence_engine,
- _tls,
- _metrics_wire_service,
- _file_header_context,
- _config_stores.getConfigStore(_doc_type_name.toString()),
- std::make_shared<vespalib::ThreadStackExecutor>(16, 128_Ki),
- HwInfo());
+ _document_db = DocumentDB::create(_base_dir, mgr.getConfig(), _tls_spec, _query_limiter, _clock, _doc_type_name,
+ _bucket_space, *bootstrap_config->getProtonConfigSP(), _document_db_owner,
+ _summary_executor, _summary_executor, *_persistence_engine, _tls,
+ _metrics_wire_service, _file_header_context,
+ _config_stores.getConfigStore(_doc_type_name.toString()),
+ std::make_shared<vespalib::ThreadStackExecutor>(16, 128_Ki), HwInfo());
_document_db->start();
_document_db->waitForOnlineState();
}
diff --git a/searchcore/src/tests/proton/docsummary/docsummary.cpp b/searchcore/src/tests/proton/docsummary/docsummary.cpp
index 9529555a33c..8dae53abf50 100644
--- a/searchcore/src/tests/proton/docsummary/docsummary.cpp
+++ b/searchcore/src/tests/proton/docsummary/docsummary.cpp
@@ -186,7 +186,7 @@ public:
const std::shared_ptr<const DocumentTypeRepo> _repo;
TuneFileDocumentDB::SP _tuneFileDocumentDB;
HwInfo _hwInfo;
- std::unique_ptr<DocumentDB> _ddb;
+ std::shared_ptr<DocumentDB> _ddb;
AttributeWriter::UP _aw;
ISummaryAdapter::SP _sa;
@@ -221,11 +221,11 @@ public:
if (! FastOS_File::MakeDirectory((std::string("tmpdb/") + docTypeName).c_str())) {
LOG_ABORT("should not be reached");
}
- _ddb = std::make_unique<DocumentDB>("tmpdb", _configMgr.getConfig(), "tcp/localhost:9013", _queryLimiter, _clock,
- DocTypeName(docTypeName), makeBucketSpace(), *b->getProtonConfigSP(), *this,
- _summaryExecutor, _summaryExecutor, _bucketExecutor, _tls, _dummy, _fileHeaderContext,
- std::make_unique<MemoryConfigStore>(),
- std::make_shared<vespalib::ThreadStackExecutor>(16, 128_Ki), _hwInfo),
+ _ddb = DocumentDB::create("tmpdb", _configMgr.getConfig(), "tcp/localhost:9013", _queryLimiter, _clock,
+ DocTypeName(docTypeName), makeBucketSpace(), *b->getProtonConfigSP(), *this,
+ _summaryExecutor, _summaryExecutor, _bucketExecutor, _tls, _dummy, _fileHeaderContext,
+ std::make_unique<MemoryConfigStore>(),
+ std::make_shared<vespalib::ThreadStackExecutor>(16, 128_Ki), _hwInfo),
_ddb->start();
_ddb->waitForOnlineState();
_aw = std::make_unique<AttributeWriter>(_ddb->getReadySubDB()->getAttributeManager());
diff --git a/searchcore/src/tests/proton/documentdb/documentbucketmover/documentbucketmover_v2_test.cpp b/searchcore/src/tests/proton/documentdb/documentbucketmover/documentbucketmover_v2_test.cpp
index 450fa7e8318..8dcad91f69a 100644
--- a/searchcore/src/tests/proton/documentdb/documentbucketmover/documentbucketmover_v2_test.cpp
+++ b/searchcore/src/tests/proton/documentdb/documentbucketmover/documentbucketmover_v2_test.cpp
@@ -37,6 +37,7 @@ struct ControllerFixtureBase : public ::testing::Test
MySubDb _notReady;
BucketCreateNotifier _bucketCreateNotifier;
test::DiskMemUsageNotifier _diskMemUsageNotifier;
+ MonitoredRefCount _refCount;
ThreadStackExecutor _singleExecutor;
ExecutorThreadService _master;
DummyBucketExecutor _bucketExecutor;
@@ -118,12 +119,13 @@ ControllerFixtureBase::ControllerFixtureBase(const BlockableMaintenanceJobConfig
_notReady(_builder.getRepo(), _bucketDB, 2, SubDbType::NOTREADY),
_bucketCreateNotifier(),
_diskMemUsageNotifier(),
+ _refCount(),
_singleExecutor(1, 0x10000),
_master(_singleExecutor),
_bucketExecutor(4),
_moveHandler(*_bucketDB, storeMoveDoneContexts),
_metrics("test", 1),
- _bmj(BucketMoveJobV2::create(_calc, _moveHandler, _modifiedHandler, _master, _bucketExecutor, _ready._subDb,
+ _bmj(BucketMoveJobV2::create(_calc, RetainGuard(_refCount), _moveHandler, _modifiedHandler, _master, _bucketExecutor, _ready._subDb,
_notReady._subDb, _bucketCreateNotifier,_clusterStateHandler, _bucketHandler,
_diskMemUsageNotifier, blockableConfig, "test", makeBucketSpace())),
_runner(*_bmj)
diff --git a/searchcore/src/tests/proton/documentdb/documentdb_test.cpp b/searchcore/src/tests/proton/documentdb/documentdb_test.cpp
index 3c65326b11a..463a7b164e1 100644
--- a/searchcore/src/tests/proton/documentdb/documentdb_test.cpp
+++ b/searchcore/src/tests/proton/documentdb/documentdb_test.cpp
@@ -108,7 +108,7 @@ Fixture::Fixture()
tuneFileDocumentDB, HwInfo());
mgr.forwardConfig(b);
mgr.nextGeneration(0ms);
- _db = std::make_shared<DocumentDB>(".", mgr.getConfig(), "tcp/localhost:9014", _queryLimiter, _clock, DocTypeName("typea"),
+ _db = DocumentDB::create(".", mgr.getConfig(), "tcp/localhost:9014", _queryLimiter, _clock, DocTypeName("typea"),
makeBucketSpace(),
*b->getProtonConfigSP(), _myDBOwner, _summaryExecutor, _summaryExecutor, _bucketExecutor, _tls, _dummy,
_fileHeaderContext, std::make_unique<MemoryConfigStore>(),
diff --git a/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_jobtest.cpp b/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_jobtest.cpp
index f592aeab9d2..d394769c0ee 100644
--- a/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_jobtest.cpp
+++ b/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_jobtest.cpp
@@ -28,11 +28,12 @@ struct MyCountJobRunner : public IMaintenanceJobRunner {
};
JobTestBase::JobTestBase()
- : _handler(),
+ : _refCount(),
+ _clusterStateHandler(),
+ _diskMemUsageNotifier(),
+ _handler(),
_storer(),
_frozenHandler(),
- _diskMemUsageNotifier(),
- _clusterStateHandler(),
_job()
{
init(ALLOWED_LID_BLOAT, ALLOWED_LID_BLOAT_FACTOR, RESOURCE_LIMIT_FACTOR, JOB_DELAY, false, MAX_OUTSTANDING_MOVE_OPS);
@@ -56,7 +57,7 @@ JobTestBase::init(uint32_t allowedLidBloat,
_singleExecutor = std::make_unique<vespalib::ThreadStackExecutor>(1, 0x10000);
_master = std::make_unique<proton::ExecutorThreadService> (*_singleExecutor);
_bucketExecutor = std::make_unique<storage::spi::dummy::DummyBucketExecutor>(4);
- _job = lidspace::CompactionJob::create(compactCfg, _handler, _storer, *_master, *_bucketExecutor,
+ _job = lidspace::CompactionJob::create(compactCfg, RetainGuard(_refCount), _handler, _storer, *_master, *_bucketExecutor,
_diskMemUsageNotifier, blockableCfg, _clusterStateHandler, nodeRetired,
document::BucketSpace::placeHolder());
} else {
diff --git a/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_jobtest.h b/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_jobtest.h
index 0a4e5c56acb..dde48a0a620 100644
--- a/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_jobtest.h
+++ b/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_jobtest.h
@@ -2,20 +2,22 @@
#include "lid_space_common.h"
#include <vespa/searchcore/proton/server/blockable_maintenance_job.h>
+#include <vespa/searchcore/proton/common/monitored_refcount.h>
#include <vespa/persistence/spi/bucketexecutor.h>
#include <vespa/searchcorespi/index/i_thread_service.h>
#include <vespa/vespalib/gtest/gtest.h>
namespace storage::spi::dummy { class DummyBucketExecutor; }
struct JobTestBase : public ::testing::TestWithParam<bool> {
+ MonitoredRefCount _refCount;
+ test::ClusterStateHandler _clusterStateHandler;
+ test::DiskMemUsageNotifier _diskMemUsageNotifier;
std::unique_ptr<storage::spi::dummy::DummyBucketExecutor> _bucketExecutor;
std::unique_ptr<vespalib::SyncableThreadExecutor> _singleExecutor;
std::unique_ptr<searchcorespi::index::IThreadService> _master;
std::shared_ptr<MyHandler> _handler;
MyStorer _storer;
MyFrozenBucketHandler _frozenHandler;
- test::DiskMemUsageNotifier _diskMemUsageNotifier;
- test::ClusterStateHandler _clusterStateHandler;
std::shared_ptr<BlockableMaintenanceJob> _job;
JobTestBase();
~JobTestBase() override;
diff --git a/searchcore/src/tests/proton/documentdb/maintenancecontroller/maintenancecontroller_test.cpp b/searchcore/src/tests/proton/documentdb/maintenancecontroller/maintenancecontroller_test.cpp
index b5e2e9d0b01..b0c46dbd789 100644
--- a/searchcore/src/tests/proton/documentdb/maintenancecontroller/maintenancecontroller_test.cpp
+++ b/searchcore/src/tests/proton/documentdb/maintenancecontroller/maintenancecontroller_test.cpp
@@ -6,6 +6,7 @@
#include <vespa/searchcore/proton/attribute/i_attribute_manager.h>
#include <vespa/searchcore/proton/bucketdb/bucket_create_notifier.h>
#include <vespa/searchcore/proton/common/doctypename.h>
+#include <vespa/searchcore/proton/common/monitored_refcount.h>
#include <vespa/searchcore/proton/common/transient_resource_usage_provider.h>
#include <vespa/searchcore/proton/documentmetastore/operation_listener.h>
#include <vespa/searchcore/proton/documentmetastore/documentmetastore.h>
@@ -307,7 +308,10 @@ struct MySimpleJob : public BlockableMaintenanceJob
++_runCnt;
return true;
}
- void onStop() override { _stopped = true; }
+ void onStop() override {
+ BlockableMaintenanceJob::onStop();
+ _stopped = true;
+ }
};
struct MySplitJob : public MySimpleJob
@@ -344,7 +348,10 @@ struct MyLongRunningJob : public BlockableMaintenanceJob
usleep(10000);
return false;
}
- void onStop() override { _stopped = true; }
+ void onStop() override {
+ BlockableMaintenanceJob::onStop();
+ _stopped = true;
+ }
};
using MyAttributeManager = test::MockAttributeManager;
@@ -376,6 +383,7 @@ public:
AttributeUsageFilter _attributeUsageFilter;
test::DiskMemUsageNotifier _diskMemUsageNotifier;
BucketCreateNotifier _bucketCreateNotifier;
+ MonitoredRefCount _refCount;
MaintenanceController _mc;
MaintenanceControllerFixture();
@@ -794,7 +802,8 @@ MaintenanceControllerFixture::MaintenanceControllerFixture()
_notReadyAttributeManager(std::make_shared<MyAttributeManager>()),
_attributeUsageFilter(),
_bucketCreateNotifier(),
- _mc(_threadService, _genericExecutor, _docTypeName)
+ _refCount(),
+ _mc(_threadService, _genericExecutor, _refCount, _docTypeName)
{
std::vector<MyDocumentSubDB *> subDBs;
subDBs.push_back(&_ready);
@@ -847,7 +856,7 @@ MaintenanceControllerFixture::injectMaintenanceJobs()
{
if (_injectDefaultJobs) {
MaintenanceJobsInjector::injectJobs(_mc, *_mcCfg, _bucketExecutor, _fh, _gsp, _fh, _mc,
- _bucketCreateNotifier, _docTypeName.getName(), makeBucketSpace(), _fh, _fh,
+ _bucketCreateNotifier, makeBucketSpace(), _fh, _fh,
_bmc, _clusterStateHandler, _bucketHandler, _calc, _diskMemUsageNotifier,
_jobTrackers, _readyAttributeManager, _notReadyAttributeManager,
std::make_unique<const AttributeConfigInspector>(AttributesConfigBuilder()),
diff --git a/searchcore/src/vespa/searchcore/config/proton.def b/searchcore/src/vespa/searchcore/config/proton.def
index cc11384ac18..ec7752368e9 100644
--- a/searchcore/src/vespa/searchcore/config/proton.def
+++ b/searchcore/src/vespa/searchcore/config/proton.def
@@ -71,8 +71,10 @@ flush.memory.each.maxmemory long default=1073741824
flush.memory.each.diskbloatfactor double default=0.2
## Age of unflushed content before forcing age prioritization.
-## Unit is seconds with 1 day being the default.
-flush.memory.maxage.time double default=86400.0
+## Unit is seconds with 31 hours being the default.
+## 31 is selected as it is both a prime and (31-24=7) is a prime and hence it will
+## not be closer to a multiple of a day for a month, and it will be at least one hour away.
+flush.memory.maxage.time double default=111600.0
## When resource limit for memory is reached we choose a conservative mode for the flush strategy.
## In this case this factor is multiplied with 'maxmemory' and 'each.maxmemory' to calculate conservative values to use instead.
diff --git a/searchcore/src/vespa/searchcore/proton/common/monitored_refcount.cpp b/searchcore/src/vespa/searchcore/proton/common/monitored_refcount.cpp
index 3cecc8308ad..6b35528318a 100644
--- a/searchcore/src/vespa/searchcore/proton/common/monitored_refcount.cpp
+++ b/searchcore/src/vespa/searchcore/proton/common/monitored_refcount.cpp
@@ -18,14 +18,14 @@ MonitoredRefCount::~MonitoredRefCount()
}
void
-MonitoredRefCount::retain()
+MonitoredRefCount::retain() noexcept
{
std::lock_guard<std::mutex> guard(_lock);
++_refCount;
}
void
-MonitoredRefCount::release()
+MonitoredRefCount::release() noexcept
{
std::lock_guard<std::mutex> guard(_lock);
--_refCount;
diff --git a/searchcore/src/vespa/searchcore/proton/common/monitored_refcount.h b/searchcore/src/vespa/searchcore/proton/common/monitored_refcount.h
index 44413814f51..3559278c70f 100644
--- a/searchcore/src/vespa/searchcore/proton/common/monitored_refcount.h
+++ b/searchcore/src/vespa/searchcore/proton/common/monitored_refcount.h
@@ -6,6 +6,7 @@
namespace proton {
+class RetainGuard;
/*
* Class containing a reference count that can be waited on to become zero.
* Typically ancestor or member of a class that has to be careful of when
@@ -16,13 +17,44 @@ class MonitoredRefCount
std::mutex _lock;
std::condition_variable _cv;
uint32_t _refCount;
-
+ void retain() noexcept;
+ void release() noexcept;
+ friend RetainGuard;
public:
MonitoredRefCount();
virtual ~MonitoredRefCount();
- void retain();
- void release();
void waitForZeroRefCount();
};
+class RetainGuard {
+public:
+ RetainGuard(MonitoredRefCount & refCount) noexcept
+ : _refCount(&refCount)
+ {
+ _refCount->retain();
+ }
+ RetainGuard(const RetainGuard & rhs) = delete;
+ RetainGuard & operator=(const RetainGuard & rhs) = delete;
+ RetainGuard(RetainGuard && rhs) noexcept
+ : _refCount(rhs._refCount)
+ {
+ rhs._refCount = nullptr;
+ }
+ RetainGuard & operator=(RetainGuard && rhs) noexcept {
+ release();
+ _refCount = rhs._refCount;
+ rhs._refCount = nullptr;
+ return *this;
+ }
+ ~RetainGuard() { release(); }
+private:
+ void release() noexcept{
+ if (_refCount != nullptr) {
+ _refCount->release();
+ _refCount = nullptr;
+ }
+ }
+ MonitoredRefCount * _refCount;
+};
+
} // namespace proton
diff --git a/searchcore/src/vespa/searchcore/proton/documentmetastore/documentmetastoreattribute.cpp b/searchcore/src/vespa/searchcore/proton/documentmetastore/documentmetastoreattribute.cpp
index fa8097419a5..6c5dbb8d258 100644
--- a/searchcore/src/vespa/searchcore/proton/documentmetastore/documentmetastoreattribute.cpp
+++ b/searchcore/src/vespa/searchcore/proton/documentmetastore/documentmetastoreattribute.cpp
@@ -1,7 +1,6 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "documentmetastoreattribute.h"
-#include <vespa/vespalib/util/exceptions.h>
namespace proton {
diff --git a/searchcore/src/vespa/searchcore/proton/matching/sessionmanager.cpp b/searchcore/src/vespa/searchcore/proton/matching/sessionmanager.cpp
index cf3a788ef7d..a1da4422f77 100644
--- a/searchcore/src/vespa/searchcore/proton/matching/sessionmanager.cpp
+++ b/searchcore/src/vespa/searchcore/proton/matching/sessionmanager.cpp
@@ -173,7 +173,7 @@ SessionManager::SessionManager(uint32_t maxSize)
_search_map(std::make_unique<SearchSessionCache>()) {
}
-SessionManager::~SessionManager() { }
+SessionManager::~SessionManager() = default;
void SessionManager::insert(search::grouping::GroupingSession::UP session) {
_grouping_cache->insert(std::move(session));
diff --git a/searchcore/src/vespa/searchcore/proton/metrics/documentdb_tagged_metrics.cpp b/searchcore/src/vespa/searchcore/proton/metrics/documentdb_tagged_metrics.cpp
index e032cc1cef6..713a03810b9 100644
--- a/searchcore/src/vespa/searchcore/proton/metrics/documentdb_tagged_metrics.cpp
+++ b/searchcore/src/vespa/searchcore/proton/metrics/documentdb_tagged_metrics.cpp
@@ -272,6 +272,7 @@ DocumentDBTaggedMetrics::DocumentDBTaggedMetrics(const vespalib::string &docType
bucketMove(this),
totalMemoryUsage(this),
totalDiskUsage("disk_usage", {}, "The total disk usage (in bytes) for this document db", this),
+ heart_beat_age("heart_beat_age", {}, "How long ago (in seconds) heart beat maintenace job was run", this),
maxNumThreads(maxNumThreads_)
{
}
diff --git a/searchcore/src/vespa/searchcore/proton/metrics/documentdb_tagged_metrics.h b/searchcore/src/vespa/searchcore/proton/metrics/documentdb_tagged_metrics.h
index c1bc5633d19..7f2e095a39b 100644
--- a/searchcore/src/vespa/searchcore/proton/metrics/documentdb_tagged_metrics.h
+++ b/searchcore/src/vespa/searchcore/proton/metrics/documentdb_tagged_metrics.h
@@ -205,6 +205,7 @@ struct DocumentDBTaggedMetrics : metrics::MetricSet
BucketMoveMetrics bucketMove;
MemoryUsageMetrics totalMemoryUsage;
metrics::LongValueMetric totalDiskUsage;
+ metrics::DoubleValueMetric heart_beat_age;
size_t maxNumThreads;
DocumentDBTaggedMetrics(const vespalib::string &docTypeName, size_t maxNumThreads_);
diff --git a/searchcore/src/vespa/searchcore/proton/reference/document_db_reference_resolver.cpp b/searchcore/src/vespa/searchcore/proton/reference/document_db_reference_resolver.cpp
index fa531385d52..5ffd474834d 100644
--- a/searchcore/src/vespa/searchcore/proton/reference/document_db_reference_resolver.cpp
+++ b/searchcore/src/vespa/searchcore/proton/reference/document_db_reference_resolver.cpp
@@ -125,11 +125,9 @@ DocumentDBReferenceResolver::listenToGidToLidChanges(const IAttributeManager &at
auto &attr = *attrSP;
vespalib::string docTypeName = getTargetDocTypeName(attr.getName(), _thisDocType);
GidToLidChangeRegistrator &registrator = getRegistrator(docTypeName);
- auto listener = std::make_unique<GidToLidChangeListener>(_attributeFieldWriter,
- attrSP,
- _refCount,
- attr.getName(),
- _thisDocType.getName());
+ auto listener = std::make_unique<GidToLidChangeListener>(_attributeFieldWriter, attrSP,
+ RetainGuard(_refCount),
+ attr.getName(), _thisDocType.getName());
registrator.addListener(std::move(listener));
}
}
@@ -159,7 +157,6 @@ DocumentDBReferenceResolver::DocumentDBReferenceResolver(const IDocumentDBRefere
const DocumentType &thisDocType,
const ImportedFieldsConfig &importedFieldsCfg,
const document::DocumentType &prevThisDocType,
-
MonitoredRefCount &refCount,
ISequencedTaskExecutor &attributeFieldWriter,
bool useReferences)
diff --git a/searchcore/src/vespa/searchcore/proton/reference/gid_to_lid_change_listener.cpp b/searchcore/src/vespa/searchcore/proton/reference/gid_to_lid_change_listener.cpp
index c54346defd5..cc26aad5f5b 100644
--- a/searchcore/src/vespa/searchcore/proton/reference/gid_to_lid_change_listener.cpp
+++ b/searchcore/src/vespa/searchcore/proton/reference/gid_to_lid_change_listener.cpp
@@ -7,22 +7,20 @@ namespace proton {
GidToLidChangeListener::GidToLidChangeListener(vespalib::ISequencedTaskExecutor &attributeFieldWriter,
std::shared_ptr<search::attribute::ReferenceAttribute> attr,
- MonitoredRefCount &refCount,
+ RetainGuard retainGuard,
const vespalib::string &name,
const vespalib::string &docTypeName)
: _attributeFieldWriter(attributeFieldWriter),
_executorId(_attributeFieldWriter.getExecutorIdFromName(attr->getNamePrefix())),
_attr(std::move(attr)),
- _refCount(refCount),
+ _retainGuard(std::move(retainGuard)),
_name(name),
_docTypeName(docTypeName)
-{
- _refCount.retain();
-}
+{ }
+
GidToLidChangeListener::~GidToLidChangeListener()
{
_attributeFieldWriter.sync();
- _refCount.release();
}
void
diff --git a/searchcore/src/vespa/searchcore/proton/reference/gid_to_lid_change_listener.h b/searchcore/src/vespa/searchcore/proton/reference/gid_to_lid_change_listener.h
index ae53e674c24..0a1026319a3 100644
--- a/searchcore/src/vespa/searchcore/proton/reference/gid_to_lid_change_listener.h
+++ b/searchcore/src/vespa/searchcore/proton/reference/gid_to_lid_change_listener.h
@@ -18,14 +18,14 @@ class GidToLidChangeListener : public IGidToLidChangeListener
vespalib::ISequencedTaskExecutor &_attributeFieldWriter;
vespalib::ISequencedTaskExecutor::ExecutorId _executorId;
std::shared_ptr<search::attribute::ReferenceAttribute> _attr;
- MonitoredRefCount &_refCount;
+ RetainGuard _retainGuard;
vespalib::string _name;
vespalib::string _docTypeName;
public:
GidToLidChangeListener(vespalib::ISequencedTaskExecutor &attributeFieldWriter,
std::shared_ptr<search::attribute::ReferenceAttribute> attr,
- MonitoredRefCount &refCount,
+ RetainGuard refCount,
const vespalib::string &name,
const vespalib::string &docTypeName);
~GidToLidChangeListener() override;
diff --git a/searchcore/src/vespa/searchcore/proton/server/blockable_maintenance_job.cpp b/searchcore/src/vespa/searchcore/proton/server/blockable_maintenance_job.cpp
index ce3bd3b8e9b..942e6c14a5f 100644
--- a/searchcore/src/vespa/searchcore/proton/server/blockable_maintenance_job.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/blockable_maintenance_job.cpp
@@ -91,6 +91,13 @@ BlockableMaintenanceJob::unBlock(BlockedReason reason)
}
}
+void
+BlockableMaintenanceJob::onStop()
+{
+ LockGuard guard(_mutex);
+ _runner = nullptr;
+}
+
bool
BlockableMaintenanceJob::isBlocked() const
{
diff --git a/searchcore/src/vespa/searchcore/proton/server/blockable_maintenance_job.h b/searchcore/src/vespa/searchcore/proton/server/blockable_maintenance_job.h
index f20511bc417..adf6f2f3b6c 100644
--- a/searchcore/src/vespa/searchcore/proton/server/blockable_maintenance_job.h
+++ b/searchcore/src/vespa/searchcore/proton/server/blockable_maintenance_job.h
@@ -49,12 +49,13 @@ public:
bool isBlocked(BlockedReason reason);
void considerRun();
+ void onStop() override;
void setBlocked(BlockedReason reason) override;
void unBlock(BlockedReason reason) override;
bool isBlocked() const override;
void registerRunner(IMaintenanceJobRunner *runner) override { _runner = runner; }
IMoveOperationLimiter & getLimiter() { return *_moveOpsLimiter; }
-
+ const IMoveOperationLimiter & getLimiter() const { return *_moveOpsLimiter; }
};
}
diff --git a/searchcore/src/vespa/searchcore/proton/server/bucketmovejob.h b/searchcore/src/vespa/searchcore/proton/server/bucketmovejob.h
index 9c1e92e2fec..7ef1a491667 100644
--- a/searchcore/src/vespa/searchcore/proton/server/bucketmovejob.h
+++ b/searchcore/src/vespa/searchcore/proton/server/bucketmovejob.h
@@ -125,7 +125,6 @@ public:
// IMaintenanceJob API
bool run() override;
- void onStop() override { }
// IClusterStateChangedHandler API
void notifyClusterStateChanged(const std::shared_ptr<IBucketStateCalculator> &newCalc) override;
diff --git a/searchcore/src/vespa/searchcore/proton/server/bucketmovejobv2.cpp b/searchcore/src/vespa/searchcore/proton/server/bucketmovejobv2.cpp
index f0847ff051e..9e34462ae21 100644
--- a/searchcore/src/vespa/searchcore/proton/server/bucketmovejobv2.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/bucketmovejobv2.cpp
@@ -54,6 +54,7 @@ blockedDueToClusterState(const std::shared_ptr<IBucketStateCalculator> &calc)
}
BucketMoveJobV2::BucketMoveJobV2(const std::shared_ptr<IBucketStateCalculator> &calc,
+ RetainGuard dbRetainer,
IDocumentMoveHandler &moveHandler,
IBucketModifiedHandler &modifiedHandler,
IThreadService & master,
@@ -74,6 +75,7 @@ BucketMoveJobV2::BucketMoveJobV2(const std::shared_ptr<IBucketStateCalculator> &
IDiskMemUsageListener(),
std::enable_shared_from_this<BucketMoveJobV2>(),
_calc(calc),
+ _dbRetainer(std::move(dbRetainer)),
_moveHandler(moveHandler),
_modifiedHandler(modifiedHandler),
_master(master),
@@ -112,6 +114,33 @@ BucketMoveJobV2::~BucketMoveJobV2()
_diskMemUsageNotifier.removeDiskMemUsageListener(this);
}
+std::shared_ptr<BucketMoveJobV2>
+BucketMoveJobV2::create(const std::shared_ptr<IBucketStateCalculator> &calc,
+ RetainGuard dbRetainer,
+ IDocumentMoveHandler &moveHandler,
+ IBucketModifiedHandler &modifiedHandler,
+ IThreadService & master,
+ BucketExecutor & bucketExecutor,
+ const MaintenanceDocumentSubDB &ready,
+ const MaintenanceDocumentSubDB &notReady,
+ bucketdb::IBucketCreateNotifier &bucketCreateNotifier,
+ IClusterStateChangedNotifier &clusterStateChangedNotifier,
+ IBucketStateChangedNotifier &bucketStateChangedNotifier,
+ IDiskMemUsageNotifier &diskMemUsageNotifier,
+ const BlockableMaintenanceJobConfig &blockableConfig,
+ const vespalib::string &docTypeName,
+ document::BucketSpace bucketSpace)
+{
+ return std::shared_ptr<BucketMoveJobV2>(
+ new BucketMoveJobV2(calc, std::move(dbRetainer), moveHandler, modifiedHandler, master, bucketExecutor, ready, notReady,
+ bucketCreateNotifier, clusterStateChangedNotifier, bucketStateChangedNotifier,
+ diskMemUsageNotifier, blockableConfig, docTypeName, bucketSpace),
+ [&master](auto job) {
+ auto failed = master.execute(makeLambdaTask([job]() { delete job; }));
+ assert(!failed);
+ });
+}
+
BucketMoveJobV2::NeedResult
BucketMoveJobV2::needMove(const ScanIterator &itr) const {
NeedResult noMove(false, false);
@@ -151,19 +180,16 @@ BucketMoveJobV2::needMove(const ScanIterator &itr) const {
class BucketMoveJobV2::StartMove : public storage::spi::BucketTask {
public:
using IDestructorCallbackSP = std::shared_ptr<vespalib::IDestructorCallback>;
- StartMove(std::shared_ptr<BucketMoveJobV2> job, std::shared_ptr<BucketMover> mover,
- std::vector<BucketMover::MoveKey> keys,
- IDestructorCallbackSP opsTracker)
+ StartMove(std::shared_ptr<BucketMoveJobV2> job, BucketMover::MoveKeys keys, IDestructorCallbackSP opsTracker)
: _job(job),
- _mover(std::move(mover)),
_keys(std::move(keys)),
_opsTracker(std::move(opsTracker))
{}
void run(const Bucket &bucket, IDestructorCallbackSP onDone) override {
- assert(_mover->getBucket() == bucket.getBucketId());
+ assert(_keys.mover().getBucket() == bucket.getBucketId());
using DoneContext = vespalib::KeepAlive<std::pair<IDestructorCallbackSP, IDestructorCallbackSP>>;
- BucketMoveJobV2::prepareMove(std::move(_job), std::move(_mover), std::move(_keys),
+ BucketMoveJobV2::prepareMove(std::move(_job), std::move(_keys),
std::make_shared<DoneContext>(std::make_pair(std::move(_opsTracker), std::move(onDone))));
}
@@ -172,15 +198,15 @@ public:
}
private:
- std::shared_ptr<BucketMoveJobV2> _job;
- std::shared_ptr<BucketMover> _mover;
- std::vector<BucketMover::MoveKey> _keys;
- IDestructorCallbackSP _opsTracker;
+ std::shared_ptr<BucketMoveJobV2> _job;
+ BucketMover::MoveKeys _keys;
+ IDestructorCallbackSP _opsTracker;
};
void
BucketMoveJobV2::failOperation(std::shared_ptr<BucketMoveJobV2> job, BucketId bucketId) {
auto & master = job->_master;
+ if (job->_stopped) return;
master.execute(makeLambdaTask([job=std::move(job), bucketId]() {
if (job->_stopped.load(std::memory_order_relaxed)) return;
job->considerBucket(job->_ready.meta_store()->getBucketDB().takeGuard(), bucketId);
@@ -188,35 +214,38 @@ BucketMoveJobV2::failOperation(std::shared_ptr<BucketMoveJobV2> job, BucketId bu
}
void
-BucketMoveJobV2::startMove(BucketMoverSP mover, size_t maxDocsToMove) {
- auto [keys, done] = mover->getKeysToMove(maxDocsToMove);
+BucketMoveJobV2::startMove(BucketMover & mover, size_t maxDocsToMove) {
+ auto [keys, done] = mover.getKeysToMove(maxDocsToMove);
if (done) {
- mover->setAllScheduled();
+ mover.setAllScheduled();
}
if (keys.empty()) return;
- mover->updateLastValidGid(keys.back()._gid);
- Bucket spiBucket(document::Bucket(_bucketSpace, mover->getBucket()));
- auto bucketTask = std::make_unique<StartMove>(shared_from_this(), std::move(mover), std::move(keys), getLimiter().beginOperation());
+ mover.updateLastValidGid(keys.back()._gid);
+ Bucket spiBucket(document::Bucket(_bucketSpace, mover.getBucket()));
+ auto bucketTask = std::make_unique<StartMove>(shared_from_this(), std::move(keys), getLimiter().beginOperation());
_bucketExecutor.execute(spiBucket, std::move(bucketTask));
}
void
-BucketMoveJobV2::prepareMove(std::shared_ptr<BucketMoveJobV2> job, BucketMoverSP mover, std::vector<MoveKey> keys, IDestructorCallbackSP onDone)
+BucketMoveJobV2::prepareMove(std::shared_ptr<BucketMoveJobV2> job, BucketMover::MoveKeys keys, IDestructorCallbackSP onDone)
{
- auto moveOps = mover->createMoveOperations(std::move(keys));
+ if (job->_stopped) return; //TODO Remove once lidtracker is no longer in use.
+ auto moveOps = keys.createMoveOperations();
auto & master = job->_master;
- master.execute(makeLambdaTask([job=std::move(job), mover=std::move(mover), moveOps=std::move(moveOps), onDone=std::move(onDone)]() mutable {
+ if (job->_stopped) return;
+ master.execute(makeLambdaTask([job=std::move(job), moveOps=std::move(moveOps), onDone=std::move(onDone)]() mutable {
if (job->_stopped.load(std::memory_order_relaxed)) return;
- job->completeMove(std::move(mover), std::move(moveOps), std::move(onDone));
+ job->completeMove(std::move(moveOps), std::move(onDone));
}));
}
void
-BucketMoveJobV2::completeMove(BucketMoverSP mover, GuardedMoveOps ops, IDestructorCallbackSP onDone) {
- mover->moveDocuments(std::move(ops.success), std::move(onDone));
- ops.failed.clear();
- if (checkIfMoverComplete(*mover)) {
- reconsiderBucket(_ready.meta_store()->getBucketDB().takeGuard(), mover->getBucket());
+BucketMoveJobV2::completeMove(GuardedMoveOps ops, IDestructorCallbackSP onDone) {
+ BucketMover & mover = ops.mover();
+ mover.moveDocuments(std::move(ops.success()), std::move(onDone));
+ ops.failed().clear();
+ if (checkIfMoverComplete(mover)) {
+ reconsiderBucket(_ready.meta_store()->getBucketDB().takeGuard(), mover.getBucket());
}
}
@@ -303,7 +332,7 @@ BucketMoveJobV2::createMover(BucketId bucket, bool wantReady) {
const MaintenanceDocumentSubDB &target(wantReady ? _ready : _notReady);
LOG(debug, "checkBucket(): mover.setupForBucket(%s, source:%u, target:%u)",
bucket.toString().c_str(), source.sub_db_id(), target.sub_db_id());
- return std::make_shared<BucketMover>(bucket, &source, target.sub_db_id(), _moveHandler);
+ return BucketMover::create(bucket, &source, target.sub_db_id(), _moveHandler);
}
std::shared_ptr<BucketMover>
@@ -324,12 +353,12 @@ BucketMoveJobV2::moveDocs(size_t maxDocsToMove) {
// Select mover
size_t index = _iterateCount++ % _movers.size();
- const auto & mover = _movers[index];
+ auto & mover = *_movers[index];
//Move, or reduce movers as we are tailing off
- if (!mover->allScheduled()) {
+ if (!mover.allScheduled()) {
startMove(mover, maxDocsToMove);
- if (mover->allScheduled()) {
+ if (mover.allScheduled()) {
_movers.erase(_movers.begin() + index);
}
}
@@ -422,6 +451,7 @@ BucketMoveJobV2::notifyDiskMemUsage(DiskMemUsageState state)
void
BucketMoveJobV2::onStop() {
// Called by master write thread
+ BlockableMaintenanceJob::onStop();
_stopped = true;
}
@@ -431,7 +461,7 @@ BucketMoveJobV2::updatePending() {
}
void
-BucketMoveJobV2::updateMetrics(DocumentDBTaggedMetrics & metrics) {
+BucketMoveJobV2::updateMetrics(DocumentDBTaggedMetrics & metrics) const {
// This is an over estimate to ensure we do not count down to zero until everything has been and completed and acked.
metrics.bucketMove.bucketsPending.set(_bucketsPending.load(std::memory_order_relaxed) +
getLimiter().numPending());
diff --git a/searchcore/src/vespa/searchcore/proton/server/bucketmovejobv2.h b/searchcore/src/vespa/searchcore/proton/server/bucketmovejobv2.h
index 6a9f28f0a01..df75c8c9766 100644
--- a/searchcore/src/vespa/searchcore/proton/server/bucketmovejobv2.h
+++ b/searchcore/src/vespa/searchcore/proton/server/bucketmovejobv2.h
@@ -10,6 +10,8 @@
#include "maintenancedocumentsubdb.h"
#include <vespa/searchcore/proton/bucketdb/bucketscaniterator.h>
#include <vespa/searchcore/proton/bucketdb/i_bucket_create_listener.h>
+#include <vespa/searchcore/proton/common/monitored_refcount.h>
+
namespace storage::spi { struct BucketExecutor; }
namespace searchcorespi::index { struct IThreadService; }
@@ -55,9 +57,9 @@ private:
using BucketMoverSP = std::shared_ptr<BucketMover>;
using Bucket2Mover = std::map<BucketId, BucketMoverSP>;
using Movers = std::vector<BucketMoverSP>;
- using MoveKey = BucketMover::MoveKey;
using GuardedMoveOps = BucketMover::GuardedMoveOps;
std::shared_ptr<IBucketStateCalculator> _calc;
+ RetainGuard _dbRetainer;
IDocumentMoveHandler &_moveHandler;
IBucketModifiedHandler &_modifiedHandler;
IThreadService &_master;
@@ -79,6 +81,7 @@ private:
IDiskMemUsageNotifier &_diskMemUsageNotifier;
BucketMoveJobV2(const std::shared_ptr<IBucketStateCalculator> &calc,
+ RetainGuard dbRetainer,
IDocumentMoveHandler &moveHandler,
IBucketModifiedHandler &modifiedHandler,
IThreadService & master,
@@ -93,10 +96,9 @@ private:
const vespalib::string &docTypeName,
document::BucketSpace bucketSpace);
- void startMove(BucketMoverSP mover, size_t maxDocsToMove);
- static void prepareMove(std::shared_ptr<BucketMoveJobV2> job, BucketMoverSP mover,
- std::vector<MoveKey> keysToMove, IDestructorCallbackSP context);
- void completeMove(BucketMoverSP mover, GuardedMoveOps moveOps, IDestructorCallbackSP context);
+ void startMove(BucketMover & mover, size_t maxDocsToMove);
+ static void prepareMove(std::shared_ptr<BucketMoveJobV2> job, BucketMover::MoveKeys keys, IDestructorCallbackSP context);
+ void completeMove(GuardedMoveOps moveOps, IDestructorCallbackSP context);
bool checkIfMoverComplete(const BucketMover & mover);
void considerBucket(const bucketdb::Guard & guard, BucketId bucket);
void reconsiderBucket(const bucketdb::Guard & guard, BucketId bucket);
@@ -114,6 +116,7 @@ private:
public:
static std::shared_ptr<BucketMoveJobV2>
create(const std::shared_ptr<IBucketStateCalculator> &calc,
+ RetainGuard dbRetainer,
IDocumentMoveHandler &moveHandler,
IBucketModifiedHandler &modifiedHandler,
IThreadService & master,
@@ -126,13 +129,7 @@ public:
IDiskMemUsageNotifier &diskMemUsageNotifier,
const BlockableMaintenanceJobConfig &blockableConfig,
const vespalib::string &docTypeName,
- document::BucketSpace bucketSpace)
- {
- return std::shared_ptr<BucketMoveJobV2>(
- new BucketMoveJobV2(calc, moveHandler, modifiedHandler, master, bucketExecutor, ready, notReady,
- bucketCreateNotifier, clusterStateChangedNotifier, bucketStateChangedNotifier,
- diskMemUsageNotifier, blockableConfig, docTypeName, bucketSpace));
- }
+ document::BucketSpace bucketSpace);
~BucketMoveJobV2() override;
@@ -146,7 +143,7 @@ public:
void notifyDiskMemUsage(DiskMemUsageState state) override;
void notifyCreateBucket(const bucketdb::Guard & guard, const BucketId &bucket) override;
void onStop() override;
- void updateMetrics(DocumentDBTaggedMetrics & metrics) override;
+ void updateMetrics(DocumentDBTaggedMetrics & metrics) const override;
};
} // namespace proton
diff --git a/searchcore/src/vespa/searchcore/proton/server/disk_mem_usage_forwarder.cpp b/searchcore/src/vespa/searchcore/proton/server/disk_mem_usage_forwarder.cpp
index 39716f93bc6..b75e3455162 100644
--- a/searchcore/src/vespa/searchcore/proton/server/disk_mem_usage_forwarder.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/disk_mem_usage_forwarder.cpp
@@ -2,7 +2,6 @@
#include "disk_mem_usage_forwarder.h"
#include <vespa/vespalib/util/lambdatask.h>
-#include <cassert>
using vespalib::makeLambdaTask;
@@ -14,17 +13,14 @@ DiskMemUsageForwarder::DiskMemUsageForwarder(searchcorespi::index::IThreadServic
_executor(executor),
_listeners(),
_state()
-{
-}
+{ }
-DiskMemUsageForwarder::~DiskMemUsageForwarder()
-{
-}
+DiskMemUsageForwarder::~DiskMemUsageForwarder() = default;
void
DiskMemUsageForwarder::addDiskMemUsageListener(IDiskMemUsageListener *listener)
{
- assert(_executor.isCurrentThread());
+ std::lock_guard guard(_lock);
_listeners.push_back(listener);
listener->notifyDiskMemUsage(_state);
}
@@ -32,7 +28,7 @@ DiskMemUsageForwarder::addDiskMemUsageListener(IDiskMemUsageListener *listener)
void
DiskMemUsageForwarder::removeDiskMemUsageListener(IDiskMemUsageListener *listener)
{
- assert(_executor.isCurrentThread());
+ std::lock_guard guard(_lock);
for (auto itr = _listeners.begin(); itr != _listeners.end(); ++itr) {
if (*itr == listener) {
_listeners.erase(itr);
@@ -51,6 +47,7 @@ DiskMemUsageForwarder::notifyDiskMemUsage(DiskMemUsageState state)
void
DiskMemUsageForwarder::forward(DiskMemUsageState state)
{
+ std::lock_guard guard(_lock);
if (_state != state) {
_state = state;
for (const auto &listener : _listeners) {
diff --git a/searchcore/src/vespa/searchcore/proton/server/disk_mem_usage_forwarder.h b/searchcore/src/vespa/searchcore/proton/server/disk_mem_usage_forwarder.h
index 618b9af91eb..431bbad4675 100644
--- a/searchcore/src/vespa/searchcore/proton/server/disk_mem_usage_forwarder.h
+++ b/searchcore/src/vespa/searchcore/proton/server/disk_mem_usage_forwarder.h
@@ -6,6 +6,7 @@
#include "i_disk_mem_usage_listener.h"
#include <vespa/searchcorespi/index/i_thread_service.h>
#include <vector>
+#include <mutex>
namespace proton {
@@ -18,6 +19,7 @@ class DiskMemUsageForwarder : public IDiskMemUsageNotifier,
{
searchcorespi::index::IThreadService &_executor;
std::vector<IDiskMemUsageListener *> _listeners;
+ std::mutex _lock;
DiskMemUsageState _state;
void forward(DiskMemUsageState state);
public:
diff --git a/searchcore/src/vespa/searchcore/proton/server/documentbucketmover.cpp b/searchcore/src/vespa/searchcore/proton/server/documentbucketmover.cpp
index f08cd4d7ab7..d0581a5b13f 100644
--- a/searchcore/src/vespa/searchcore/proton/server/documentbucketmover.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/documentbucketmover.cpp
@@ -52,6 +52,26 @@ BucketMover::MoveKey::MoveKey(uint32_t lid, const document::GlobalId &gid, Times
BucketMover::MoveKey::~MoveKey() = default;
+BucketMover::MoveKeys::~MoveKeys() = default;
+
+std::shared_ptr<BucketMover>
+BucketMover::MoveKeys::stealMover() {
+ return std::move(_mover);
+}
+
+BucketMover::GuardedMoveOps
+BucketMover::MoveKeys::createMoveOperations() {
+ auto & mover = *_mover;
+ return mover.createMoveOperations(std::move(*this));
+}
+
+BucketMover::GuardedMoveOps::GuardedMoveOps(std::shared_ptr<BucketMover> mover) noexcept
+ : _mover(std::move(mover)),
+ _success(),
+ _failed()
+{}
+BucketMover::GuardedMoveOps::~GuardedMoveOps() = default;
+
BucketMover::BucketMover(const BucketId &bucket, const MaintenanceDocumentSubDB *source,
uint32_t targetSubDbId, IDocumentMoveHandler &handler) noexcept
: _source(source),
@@ -70,9 +90,9 @@ BucketMover::~BucketMover() {
assert(inSync());
}
-std::pair<std::vector<BucketMover::MoveKey>, bool>
+std::pair<BucketMover::MoveKeys, bool>
BucketMover::getKeysToMove(size_t maxDocsToMove) {
- std::pair<std::vector<BucketMover::MoveKey>, bool> result;
+ std::pair<MoveKeys, bool> result(MoveKeys(shared_from_this()), false);
Iterator itr = (_lastGidValid ? _source->meta_store()->upperBound(_lastGid)
: _source->meta_store()->lowerBound(_bucket));
const Iterator end = _source->meta_store()->upperBound(_bucket);
@@ -81,7 +101,7 @@ BucketMover::getKeysToMove(size_t maxDocsToMove) {
uint32_t lid = itr.getKey().get_lid();
const RawDocumentMetaData &metaData = _source->meta_store()->getRawMetaData(lid);
if (metaData.getBucketUsedBits() == _bucket.getUsedBits()) {
- result.first.emplace_back(lid, metaData.getGid(), metaData.getTimestamp(), MoveGuard(*this));
+ result.first.keys().emplace_back(lid, metaData.getGid(), metaData.getTimestamp(), MoveGuard(*this));
++docsMoved;
}
}
@@ -90,22 +110,22 @@ BucketMover::getKeysToMove(size_t maxDocsToMove) {
}
BucketMover::GuardedMoveOps
-BucketMover::createMoveOperations(std::vector<MoveKey> toMove) {
- GuardedMoveOps moveOps;
- moveOps.success.reserve(toMove.size());
- for (MoveKey &key : toMove) {
- if (moveOps.failed.empty()) {
+BucketMover::createMoveOperations(MoveKeys toMove) {
+ GuardedMoveOps moveOps(toMove.stealMover());
+ moveOps.success().reserve(toMove.size());
+ for (MoveKey &key : toMove.keys()) {
+ if (moveOps.failed().empty()) {
auto moveOp = createMoveOperation(key);
if (moveOp) {
- moveOps.success.emplace_back(std::move(moveOp), std::move(key._guard));
+ moveOps.success().emplace_back(std::move(moveOp), std::move(key._guard));
} else {
- moveOps.failed.push_back(std::move(key._guard));
+ moveOps.failed().push_back(std::move(key._guard));
}
} else {
- moveOps.failed.push_back(std::move(key._guard));
+ moveOps.failed().push_back(std::move(key._guard));
}
}
- if ( ! moveOps.failed.empty()) {
+ if ( ! moveOps.failed().empty()) {
_needReschedule.store(true, std::memory_order_relaxed);
}
return moveOps;
@@ -141,7 +161,7 @@ void
DocumentBucketMover::setupForBucket(const document::BucketId &bucket, const MaintenanceDocumentSubDB *source,
uint32_t targetSubDbId, IDocumentMoveHandler &handler)
{
- _impl = std::make_unique<BucketMover>(bucket, source, targetSubDbId, handler);
+ _impl = BucketMover::create(bucket, source, targetSubDbId, handler);
}
bool
@@ -157,15 +177,15 @@ DocumentBucketMover::moveDocuments(size_t maxDocsToMove, IMoveOperationLimiter &
}
auto [keys, done] = _impl->getKeysToMove(maxDocsToMove);
auto moveOps = _impl->createMoveOperations(std::move(keys));
- bool allOk = moveOps.failed.empty();
+ bool allOk = moveOps.failed().empty();
if (done && allOk) {
_impl->setAllScheduled();
}
- if (moveOps.success.empty()) return allOk;
+ if (moveOps.success().empty()) return allOk;
- _impl->updateLastValidGid(moveOps.success.back().first->getDocument()->getId().getGlobalId());
+ _impl->updateLastValidGid(moveOps.success().back().first->getDocument()->getId().getGlobalId());
- for (auto & moveOp : moveOps.success) {
+ for (auto & moveOp : moveOps.success()) {
// We cache the bucket for the document we are going to move to avoid getting
// inconsistent bucket info (getBucketInfo()) while moving between ready and not-ready
// sub dbs as the bucket info is not updated atomically in this case.
@@ -176,4 +196,11 @@ DocumentBucketMover::moveDocuments(size_t maxDocsToMove, IMoveOperationLimiter &
return allOk;
}
+std::shared_ptr<BucketMover>
+BucketMover::create(const document::BucketId &bucket, const MaintenanceDocumentSubDB *source,
+ uint32_t targetSubDbId, IDocumentMoveHandler &handler)
+{
+ return std::shared_ptr<BucketMover>(new BucketMover(bucket, source, targetSubDbId, handler));
+}
+
}
diff --git a/searchcore/src/vespa/searchcore/proton/server/documentbucketmover.h b/searchcore/src/vespa/searchcore/proton/server/documentbucketmover.h
index fc7760a4dc4..d150ebf7436 100644
--- a/searchcore/src/vespa/searchcore/proton/server/documentbucketmover.h
+++ b/searchcore/src/vespa/searchcore/proton/server/documentbucketmover.h
@@ -25,7 +25,7 @@ class BucketDBOwner;
* to a target sub database. The actual moving is handled by a given instance
* of IDocumentMoveHandler.
*/
-class BucketMover
+class BucketMover : public std::enable_shared_from_this<BucketMover>
{
public:
using MoveOperationUP = std::unique_ptr<MoveOperation>;
@@ -65,13 +65,48 @@ public:
};
using GuardedMoveOp = std::pair<MoveOperationUP, MoveGuard>;
- struct GuardedMoveOps {
- std::vector<GuardedMoveOp> success;
- std::vector<MoveGuard> failed;
+ class GuardedMoveOps {
+ public:
+ GuardedMoveOps(std::shared_ptr<BucketMover> mover) noexcept;
+ GuardedMoveOps(GuardedMoveOps &&) = default;
+ GuardedMoveOps & operator =(GuardedMoveOps &&) = default;
+ GuardedMoveOps(const GuardedMoveOps &) = delete;
+ GuardedMoveOps & operator = (const GuardedMoveOps &) = delete;
+ ~GuardedMoveOps();
+ std::vector<GuardedMoveOp> & success() { return _success; }
+ std::vector<MoveGuard> & failed() { return _failed; }
+ BucketMover & mover() { return *_mover; }
+ private:
+ // It is important to keep the order so the mover is destructed last
+ std::shared_ptr<BucketMover> _mover;
+ std::vector<GuardedMoveOp> _success;
+ std::vector<MoveGuard> _failed;
};
- BucketMover(const document::BucketId &bucket, const MaintenanceDocumentSubDB *source,
- uint32_t targetSubDbId, IDocumentMoveHandler &handler) noexcept;
+ class MoveKeys {
+ public:
+ MoveKeys(std::shared_ptr<BucketMover> mover) noexcept : _mover(std::move(mover)) {}
+ MoveKeys(MoveKeys &&) noexcept = default;
+ MoveKeys & operator =(MoveKeys &&) noexcept = default;
+ MoveKeys(const MoveKeys &) noexcept = delete;
+ MoveKeys & operator =(const MoveKeys &) noexcept = delete;
+ ~MoveKeys();
+ GuardedMoveOps createMoveOperations();
+ std::shared_ptr<BucketMover> stealMover();
+ std::vector<MoveKey> & keys() { return _keys; }
+ size_t size() const { return _keys.size(); }
+ bool empty() const { return _keys.empty(); }
+ const MoveKey & back() const { return _keys.back(); }
+ const BucketMover & mover() const { return *_mover; }
+ private:
+ // It is important to keep the order so the mover is destructed last
+ std::shared_ptr<BucketMover> _mover;
+ std::vector<MoveKey> _keys;
+ };
+
+ static std::shared_ptr<BucketMover>
+ create(const document::BucketId &bucket, const MaintenanceDocumentSubDB *source,
+ uint32_t targetSubDbId, IDocumentMoveHandler &handler);
BucketMover(BucketMover &&) noexcept = delete;
BucketMover & operator=(BucketMover &&) noexcept = delete;
BucketMover(const BucketMover &) = delete;
@@ -79,9 +114,9 @@ public:
~BucketMover();
/// Must be called in master thread
- std::pair<std::vector<MoveKey>, bool> getKeysToMove(size_t maxDocsToMove);
+ std::pair<MoveKeys, bool> getKeysToMove(size_t maxDocsToMove);
/// Call from any thread
- GuardedMoveOps createMoveOperations(std::vector<MoveKey> toMove);
+ GuardedMoveOps createMoveOperations(MoveKeys toMove);
/// Must be called in master thread
void moveDocuments(std::vector<GuardedMoveOp> moveOps, IDestructorCallbackSP onDone);
void moveDocument(MoveOperationUP moveOp, IDestructorCallbackSP onDone);
@@ -102,6 +137,8 @@ public:
return pending() == 0;
}
private:
+ BucketMover(const document::BucketId &bucket, const MaintenanceDocumentSubDB *source,
+ uint32_t targetSubDbId, IDocumentMoveHandler &handler) noexcept;
const MaintenanceDocumentSubDB *_source;
IDocumentMoveHandler *_handler;
const document::BucketId _bucket;
@@ -130,7 +167,7 @@ class DocumentBucketMover
private:
IMoveOperationLimiter &_limiter;
bucketdb::BucketDBOwner *_bucketDb;
- std::unique_ptr<bucketdb::BucketMover> _impl;
+ std::shared_ptr<bucketdb::BucketMover> _impl;
bool moveDocuments(size_t maxDocsToMove, IMoveOperationLimiter &limiter);
public:
diff --git a/searchcore/src/vespa/searchcore/proton/server/documentdb.cpp b/searchcore/src/vespa/searchcore/proton/server/documentdb.cpp
index d99e579b89f..3f3fad55bd6 100644
--- a/searchcore/src/vespa/searchcore/proton/server/documentdb.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/documentdb.cpp
@@ -110,8 +110,33 @@ DocumentDB::masterExecute(FunctionType &&function) {
_writeService.master().execute(makeLambdaTask(std::forward<FunctionType>(function)));
}
+DocumentDB::SP
+DocumentDB::create(const vespalib::string &baseDir,
+ DocumentDBConfig::SP currentSnapshot,
+ const vespalib::string &tlsSpec,
+ matching::QueryLimiter &queryLimiter,
+ const vespalib::Clock &clock,
+ const DocTypeName &docTypeName,
+ document::BucketSpace bucketSpace,
+ const ProtonConfig &protonCfg,
+ IDocumentDBOwner &owner,
+ vespalib::SyncableThreadExecutor &warmupExecutor,
+ vespalib::ThreadExecutor &sharedExecutor,
+ storage::spi::BucketExecutor &bucketExecutor,
+ const search::transactionlog::WriterFactory &tlsWriterFactory,
+ MetricsWireService &metricsWireService,
+ const search::common::FileHeaderContext &fileHeaderContext,
+ ConfigStore::UP config_store,
+ InitializeThreads initializeThreads,
+ const HwInfo &hwInfo)
+{
+ return DocumentDB::SP(
+ new DocumentDB(baseDir, std::move(currentSnapshot), tlsSpec, queryLimiter, clock, docTypeName, bucketSpace,
+ protonCfg, owner, warmupExecutor, sharedExecutor, bucketExecutor, tlsWriterFactory,
+ metricsWireService, fileHeaderContext, std::move(config_store), initializeThreads, hwInfo));
+}
DocumentDB::DocumentDB(const vespalib::string &baseDir,
- const DocumentDBConfig::SP &configSnapshot,
+ DocumentDBConfig::SP configSnapshot,
const vespalib::string &tlsSpec,
matching::QueryLimiter &queryLimiter,
const vespalib::Clock &clock,
@@ -120,7 +145,7 @@ DocumentDB::DocumentDB(const vespalib::string &baseDir,
const ProtonConfig &protonCfg,
IDocumentDBOwner &owner,
vespalib::SyncableThreadExecutor &warmupExecutor,
- vespalib::ThreadStackExecutorBase &sharedExecutor,
+ vespalib::ThreadExecutor &sharedExecutor,
storage::spi::BucketExecutor & bucketExecutor,
const search::transactionlog::WriterFactory &tlsWriterFactory,
MetricsWireService &metricsWireService,
@@ -134,6 +159,7 @@ DocumentDB::DocumentDB(const vespalib::string &baseDir,
IDocumentSubDBOwner(),
IClusterStateChangedHandler(),
search::transactionlog::SyncProxy(),
+ std::enable_shared_from_this<DocumentDB>(),
_docTypeName(docTypeName),
_bucketSpace(bucketSpace),
_baseDir(baseDir + "/" + _docTypeName.toString()),
@@ -172,7 +198,7 @@ DocumentDB::DocumentDB(const vespalib::string &baseDir,
metricsWireService, getMetrics(), queryLimiter, clock, _configMutex, _baseDir,
DocumentSubDBCollection::Config(protonCfg.numsearcherthreads),
hwInfo),
- _maintenanceController(_writeService.master(), sharedExecutor, _docTypeName),
+ _maintenanceController(_writeService.master(), sharedExecutor, _refCount, _docTypeName),
_jobTrackers(),
_calc(),
_metricsUpdater(_subDBs, _writeService, _jobTrackers, *_sessionManager, _writeFilter)
@@ -382,8 +408,8 @@ DocumentDB::applySubDBConfig(const DocumentDBConfig &newConfigSnapshot,
auto newRepo = newConfigSnapshot.getDocumentTypeRepoSP();
auto newDocType = newRepo->getDocumentType(_docTypeName.getName());
assert(newDocType != nullptr);
- DocumentDBReferenceResolver resolver(*registry, *newDocType, newConfigSnapshot.getImportedFieldsConfig(),
- *oldDocType, _refCount, _writeService.attributeFieldWriter(), _state.getAllowReconfig());
+ DocumentDBReferenceResolver resolver(*registry, *newDocType, newConfigSnapshot.getImportedFieldsConfig(), *oldDocType,
+ _refCount, _writeService.attributeFieldWriter(), _state.getAllowReconfig());
_subDBs.applyConfig(newConfigSnapshot, *_activeConfigSnapshot, serialNum, params, resolver);
}
@@ -525,13 +551,8 @@ DocumentDB::tearDownReferences()
auto repo = activeConfig->getDocumentTypeRepoSP();
auto docType = repo->getDocumentType(_docTypeName.getName());
assert(docType != nullptr);
- DocumentDBReferenceResolver resolver(*registry,
- *docType,
- activeConfig->getImportedFieldsConfig(),
- *docType,
- _refCount,
- _writeService.attributeFieldWriter(),
- false);
+ DocumentDBReferenceResolver resolver(*registry, *docType, activeConfig->getImportedFieldsConfig(), *docType,
+ _refCount, _writeService.attributeFieldWriter(), false);
_subDBs.tearDownReferences(resolver);
registry->remove(_docTypeName.getName());
}
@@ -545,14 +566,14 @@ DocumentDB::close()
_state.enterShutdownState();
_configCV.notify_all();
}
+ // Abort any ongoing maintenance
+ stopMaintenance();
_writeService.master().sync(); // Complete all tasks that didn't observe shutdown
masterExecute([this]() { tearDownReferences(); });
_writeService.master().sync();
// Wait until inflight feed operations to this document db has left.
// Caller should have removed document DB from feed router.
_refCount.waitForZeroRefCount();
- // Abort any ongoing maintenance
- stopMaintenance();
_writeService.sync();
@@ -933,7 +954,6 @@ DocumentDB::injectMaintenanceJobs(const DocumentDBMaintenanceConfig &config, std
*_feedHandler, // IOperationStorer
_maintenanceController, // IFrozenBucketHandler
_subDBs.getBucketCreateNotifier(),
- _docTypeName.getName(),
_bucketSpace,
*_feedHandler, // IPruneRemovedDocumentsHandler
*_feedHandler, // IDocumentMoveHandler
@@ -1051,6 +1071,13 @@ DocumentDB::updateMetrics(const metrics::MetricLockGuard & guard)
}
_metricsUpdater.updateMetrics(guard, _metrics);
_maintenanceController.updateMetrics(_metrics);
+ auto heart_beat_time = _feedHandler->get_heart_beat_time();
+ if (heart_beat_time != vespalib::steady_time()) {
+ vespalib::steady_time now = vespalib::steady_clock::now();
+ _metrics.heart_beat_age.set(vespalib::to_s(now - heart_beat_time));
+ } else {
+ _metrics.heart_beat_age.set(0.0);
+ }
}
void
diff --git a/searchcore/src/vespa/searchcore/proton/server/documentdb.h b/searchcore/src/vespa/searchcore/proton/server/documentdb.h
index dd3d821e291..ad0225d6f86 100644
--- a/searchcore/src/vespa/searchcore/proton/server/documentdb.h
+++ b/searchcore/src/vespa/searchcore/proton/server/documentdb.h
@@ -68,7 +68,8 @@ class DocumentDB : public DocumentDBConfigOwner,
public IFeedHandlerOwner,
public IDocumentSubDBOwner,
public IClusterStateChangedHandler,
- public search::transactionlog::SyncProxy
+ public search::transactionlog::SyncProxy,
+ public std::enable_shared_from_this<DocumentDB>
{
private:
using InitializeThreads = std::shared_ptr<vespalib::SyncableThreadExecutor>;
@@ -89,42 +90,42 @@ private:
typedef search::index::Schema Schema;
using lock_guard = std::lock_guard<std::mutex>;
// variables related to reconfig
- DocumentDBConfig::SP _initConfigSnapshot;
- SerialNum _initConfigSerialNum;
+ DocumentDBConfig::SP _initConfigSnapshot;
+ SerialNum _initConfigSerialNum;
vespalib::VarHolder<DocumentDBConfig::SP> _pendingConfigSnapshot;
- mutable std::mutex _configMutex; // protects _active* below.
- mutable std::condition_variable _configCV;
- DocumentDBConfig::SP _activeConfigSnapshot;
- int64_t _activeConfigSnapshotGeneration;
- const bool _validateAndSanitizeDocStore;
+ mutable std::mutex _configMutex; // protects _active* below.
+ mutable std::condition_variable _configCV;
+ DocumentDBConfig::SP _activeConfigSnapshot;
+ int64_t _activeConfigSnapshotGeneration;
+ const bool _validateAndSanitizeDocStore;
vespalib::Gate _initGate;
typedef DocumentDBConfig::ComparisonResult ConfigComparisonResult;
- ClusterStateHandler _clusterStateHandler;
- BucketHandler _bucketHandler;
- index::IndexConfig _indexCfg;
- ConfigStore::UP _config_store;
- std::shared_ptr<matching::SessionManager> _sessionManager; // TODO: This should not have to be a shared pointer.
- MetricsWireService &_metricsWireService;
- DocumentDBTaggedMetrics _metrics;
- std::unique_ptr<metrics::UpdateHook> _metricsHook;
- vespalib::VarHolder<IFeedView::SP> _feedView;
- MonitoredRefCount _refCount;
- bool _syncFeedViewEnabled;
- IDocumentDBOwner &_owner;
- storage::spi::BucketExecutor &_bucketExecutor;
- DDBState _state;
- DiskMemUsageForwarder _dmUsageForwarder;
- AttributeUsageFilter _writeFilter;
+ ClusterStateHandler _clusterStateHandler;
+ BucketHandler _bucketHandler;
+ index::IndexConfig _indexCfg;
+ ConfigStore::UP _config_store;
+ std::shared_ptr<matching::SessionManager> _sessionManager; // TODO: This should not have to be a shared pointer.
+ MetricsWireService &_metricsWireService;
+ DocumentDBTaggedMetrics _metrics;
+ std::unique_ptr<metrics::UpdateHook> _metricsHook;
+ vespalib::VarHolder<IFeedView::SP> _feedView;
+ MonitoredRefCount _refCount;
+ bool _syncFeedViewEnabled;
+ IDocumentDBOwner &_owner;
+ storage::spi::BucketExecutor &_bucketExecutor;
+ DDBState _state;
+ DiskMemUsageForwarder _dmUsageForwarder;
+ AttributeUsageFilter _writeFilter;
std::shared_ptr<TransientResourceUsageProvider> _transient_usage_provider;
- std::unique_ptr<FeedHandler> _feedHandler;
- DocumentSubDBCollection _subDBs;
- MaintenanceController _maintenanceController;
- DocumentDBJobTrackers _jobTrackers;
- std::shared_ptr<IBucketStateCalculator> _calc;
- DocumentDBMetricsUpdater _metricsUpdater;
+ std::unique_ptr<FeedHandler> _feedHandler;
+ DocumentSubDBCollection _subDBs;
+ MaintenanceController _maintenanceController;
+ DocumentDBJobTrackers _jobTrackers;
+ std::shared_ptr<IBucketStateCalculator> _calc;
+ DocumentDBMetricsUpdater _metricsUpdater;
void registerReference();
void setActiveConfig(const DocumentDBConfig::SP &config, int64_t generation);
@@ -205,26 +206,8 @@ private:
// Invokes initFinish() on self
friend class InitDoneTask;
-public:
- typedef std::unique_ptr<DocumentDB> UP;
- typedef std::shared_ptr<DocumentDB> SP;
-
- /**
- * Constructs a new document database for the given document type.
- *
- * @param baseDir The base directory to use for persistent data.
- * @param configId The config id used to subscribe to config for this
- * database.
- * @param tlsSpec The frt connection spec for the TLS.
- * @param docType The document type that this database will handle.
- * @param docMgrCfg Current document manager config
- * @param docMgrSP The document manager holding the document type.
- * @param protonCfg The global proton config this database is a part of.
- * @param tuneFileDocumentDB file tune config for this database.
- * @param config_store Access to read and write configs.
- */
DocumentDB(const vespalib::string &baseDir,
- const DocumentDBConfig::SP &currentSnapshot,
+ DocumentDBConfig::SP currentSnapshot,
const vespalib::string &tlsSpec,
matching::QueryLimiter &queryLimiter,
const vespalib::Clock &clock,
@@ -233,14 +216,46 @@ public:
const ProtonConfig &protonCfg,
IDocumentDBOwner &owner,
vespalib::SyncableThreadExecutor &warmupExecutor,
- vespalib::ThreadStackExecutorBase &sharedExecutor,
- storage::spi::BucketExecutor & bucketExecutor,
+ vespalib::ThreadExecutor &sharedExecutor,
+ storage::spi::BucketExecutor &bucketExecutor,
const search::transactionlog::WriterFactory &tlsWriterFactory,
MetricsWireService &metricsWireService,
const search::common::FileHeaderContext &fileHeaderContext,
ConfigStore::UP config_store,
InitializeThreads initializeThreads,
const HwInfo &hwInfo);
+public:
+ using SP = std::shared_ptr<DocumentDB>;
+
+ /**
+ * Constructs a new document database for the given document type.
+ *
+ * @param baseDir The base directory to use for persistent data.
+ * @param tlsSpec The frt connection spec for the TLS.
+ * @param docType The document type that this database will handle.
+ * @param docMgrSP The document manager holding the document type.
+ * @param protonCfg The global proton config this database is a part of.
+ * @param config_store Access to read and write configs.
+ */
+ static DocumentDB::SP
+ create(const vespalib::string &baseDir,
+ DocumentDBConfig::SP currentSnapshot,
+ const vespalib::string &tlsSpec,
+ matching::QueryLimiter &queryLimiter,
+ const vespalib::Clock &clock,
+ const DocTypeName &docTypeName,
+ document::BucketSpace bucketSpace,
+ const ProtonConfig &protonCfg,
+ IDocumentDBOwner &owner,
+ vespalib::SyncableThreadExecutor &warmupExecutor,
+ vespalib::ThreadExecutor &sharedExecutor,
+ storage::spi::BucketExecutor & bucketExecutor,
+ const search::transactionlog::WriterFactory &tlsWriterFactory,
+ MetricsWireService &metricsWireService,
+ const search::common::FileHeaderContext &fileHeaderContext,
+ ConfigStore::UP config_store,
+ InitializeThreads initializeThreads,
+ const HwInfo &hwInfo);
/**
* Expose a cost view of the session manager. This is used by the
@@ -370,8 +385,7 @@ public:
/**
* Reference counting
*/
- void retain() { _refCount.retain(); }
- void release() { _refCount.release(); }
+ RetainGuard retain() { return RetainGuard(_refCount); }
bool getDelayedConfig() const { return _state.getDelayedConfig(); }
void replayConfig(SerialNum serialNum) override;
diff --git a/searchcore/src/vespa/searchcore/proton/server/feedhandler.cpp b/searchcore/src/vespa/searchcore/proton/server/feedhandler.cpp
index 1a97972645f..e4d2d3b3f16 100644
--- a/searchcore/src/vespa/searchcore/proton/server/feedhandler.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/feedhandler.cpp
@@ -435,7 +435,8 @@ FeedHandler::FeedHandler(IThreadingService &writeService,
_bucketDBHandler(nullptr),
_syncLock(),
_syncedSerialNum(0),
- _allowSync(false)
+ _allowSync(false),
+ _heart_beat_time(vespalib::steady_time())
{ }
@@ -764,6 +765,7 @@ void
FeedHandler::heartBeat()
{
assert(_writeService.master().isCurrentThread());
+ _heart_beat_time.store(vespalib::steady_clock::now());
_activeFeedView->heartBeat(_serialNum);
}
@@ -824,4 +826,10 @@ FeedHandler::syncTls(SerialNum syncTo)
}
}
+vespalib::steady_time
+FeedHandler::get_heart_beat_time() const
+{
+ return _heart_beat_time.load(std::memory_order_relaxed);
+}
+
} // namespace proton
diff --git a/searchcore/src/vespa/searchcore/proton/server/feedhandler.h b/searchcore/src/vespa/searchcore/proton/server/feedhandler.h
index 6a014c761cc..2bee6b8d7fa 100644
--- a/searchcore/src/vespa/searchcore/proton/server/feedhandler.h
+++ b/searchcore/src/vespa/searchcore/proton/server/feedhandler.h
@@ -96,6 +96,7 @@ private:
std::mutex _syncLock;
SerialNum _syncedSerialNum;
bool _allowSync; // Sanity check
+ std::atomic<vespalib::steady_time> _heart_beat_time;
/**
* Delayed handling of feed operations, in master write thread.
@@ -245,6 +246,7 @@ public:
[[nodiscard]] CommitResult startCommit(DoneCallback onDone) override;
[[nodiscard]] CommitResult storeOperationSync(const FeedOperation & op);
void considerDelayedPrune();
+ vespalib::steady_time get_heart_beat_time() const;
};
} // namespace proton
diff --git a/searchcore/src/vespa/searchcore/proton/server/flushhandlerproxy.cpp b/searchcore/src/vespa/searchcore/proton/server/flushhandlerproxy.cpp
index 0178dec86e4..e205700143b 100644
--- a/searchcore/src/vespa/searchcore/proton/server/flushhandlerproxy.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/flushhandlerproxy.cpp
@@ -9,16 +9,12 @@ namespace proton {
FlushHandlerProxy::FlushHandlerProxy(const DocumentDB::SP &documentDB)
: IFlushHandler(documentDB->getDocTypeName().toString()),
- _documentDB(documentDB)
-{
- _documentDB->retain();
-}
+ _documentDB(documentDB),
+ _retainGuard(_documentDB->retain())
+{ }
-FlushHandlerProxy::~FlushHandlerProxy()
-{
- _documentDB->release();
-}
+FlushHandlerProxy::~FlushHandlerProxy() = default;
std::vector<IFlushTarget::SP>
diff --git a/searchcore/src/vespa/searchcore/proton/server/flushhandlerproxy.h b/searchcore/src/vespa/searchcore/proton/server/flushhandlerproxy.h
index ededfe977e3..530a3997d84 100644
--- a/searchcore/src/vespa/searchcore/proton/server/flushhandlerproxy.h
+++ b/searchcore/src/vespa/searchcore/proton/server/flushhandlerproxy.h
@@ -3,6 +3,7 @@
#pragma once
#include <vespa/searchcore/proton/flushengine/iflushhandler.h>
+#include <vespa/searchcore/proton/common/monitored_refcount.h>
namespace proton {
@@ -12,10 +13,11 @@ class FlushHandlerProxy : public IFlushHandler
{
private:
std::shared_ptr<DocumentDB> _documentDB;
+ RetainGuard _retainGuard;
public:
FlushHandlerProxy(const std::shared_ptr<DocumentDB> &documentDB);
- virtual ~FlushHandlerProxy();
+ ~FlushHandlerProxy() override;
/**
* Implements IFlushHandler.
diff --git a/searchcore/src/vespa/searchcore/proton/server/i_maintenance_job.h b/searchcore/src/vespa/searchcore/proton/server/i_maintenance_job.h
index 3972072b41d..6bea9855c82 100644
--- a/searchcore/src/vespa/searchcore/proton/server/i_maintenance_job.h
+++ b/searchcore/src/vespa/searchcore/proton/server/i_maintenance_job.h
@@ -42,7 +42,7 @@ public:
virtual bool isBlocked() const { return false; }
virtual IBlockableMaintenanceJob *asBlockable() { return nullptr; }
virtual void onStop() = 0;
- virtual void updateMetrics(DocumentDBTaggedMetrics &) {}
+ virtual void updateMetrics(DocumentDBTaggedMetrics &) const {}
/**
* Register maintenance job runner, in case event passed to the
diff --git a/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job.h b/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job.h
index e29472ea154..4116a4cedf0 100644
--- a/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job.h
+++ b/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job.h
@@ -28,7 +28,6 @@ public:
IClusterStateChangedNotifier &clusterStateChangedNotifier,
bool nodeRetired);
~LidSpaceCompactionJob() override;
- void onStop() override { }
};
} // namespace proton
diff --git a/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job_take2.cpp b/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job_take2.cpp
index 52fd18cd971..6ac8f803800 100644
--- a/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job_take2.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job_take2.cpp
@@ -50,6 +50,7 @@ public:
void fail(const Bucket & bucket) override {
assert(bucket.getBucketId() == _meta.bucketId);
auto & master = _job->_master;
+ if (_job->_stopped) return;
master.execute(makeLambdaTask([job=std::move(_job)] { job->_scanItr.reset(); }));
}
private:
@@ -78,6 +79,7 @@ void
CompactionJob::moveDocument(std::shared_ptr<CompactionJob> job, const search::DocumentMetaData & metaThen,
std::shared_ptr<IDestructorCallback> context)
{
+ if (job->_stopped) return; //TODO Remove once lidtracker is no longer in use.
// The real lid must be sampled in the master thread.
//TODO remove target lid from createMoveOperation interface
auto op = job->_handler->createMoveOperation(metaThen, 0);
@@ -86,6 +88,7 @@ CompactionJob::moveDocument(std::shared_ptr<CompactionJob> job, const search::Do
if (metaThen.gid != op->getDocument()->getId().getGlobalId()) return;
auto & master = job->_master;
+ if (job->_stopped) return;
master.execute(makeLambdaTask([self=std::move(job), meta=metaThen, moveOp=std::move(op), onDone=std::move(context)]() mutable {
if (self->_stopped.load(std::memory_order_relaxed)) return;
self->completeMove(meta, std::move(moveOp), std::move(onDone));
@@ -111,6 +114,7 @@ CompactionJob::completeMove(const search::DocumentMetaData & metaThen, std::uniq
}
CompactionJob::CompactionJob(const DocumentDBLidSpaceCompactionConfig &config,
+ RetainGuard dbRetainer,
std::shared_ptr<ILidSpaceCompactionHandler> handler,
IOperationStorer &opStorer,
IThreadService & master,
@@ -125,14 +129,38 @@ CompactionJob::CompactionJob(const DocumentDBLidSpaceCompactionConfig &config,
std::enable_shared_from_this<CompactionJob>(),
_master(master),
_bucketExecutor(bucketExecutor),
+ _dbRetainer(std::move(dbRetainer)),
_bucketSpace(bucketSpace),
_stopped(false)
{ }
CompactionJob::~CompactionJob() = default;
+std::shared_ptr<CompactionJob>
+CompactionJob::create(const DocumentDBLidSpaceCompactionConfig &config,
+ RetainGuard dbRetainer,
+ std::shared_ptr<ILidSpaceCompactionHandler> handler,
+ IOperationStorer &opStorer,
+ IThreadService & master,
+ BucketExecutor & bucketExecutor,
+ IDiskMemUsageNotifier &diskMemUsageNotifier,
+ const BlockableMaintenanceJobConfig &blockableConfig,
+ IClusterStateChangedNotifier &clusterStateChangedNotifier,
+ bool nodeRetired,
+ document::BucketSpace bucketSpace)
+{
+ return std::shared_ptr<CompactionJob>(
+ new CompactionJob(config, std::move(dbRetainer), std::move(handler), opStorer, master, bucketExecutor,
+ diskMemUsageNotifier, blockableConfig, clusterStateChangedNotifier, nodeRetired, bucketSpace),
+ [&master](auto job) {
+ auto failed = master.execute(makeLambdaTask([job]() { delete job; }));
+ assert(!failed);
+ });
+}
+
void
CompactionJob::onStop() {
+ BlockableMaintenanceJob::onStop();
_stopped = true;
}
diff --git a/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job_take2.h b/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job_take2.h
index 12c0e30c61e..aa72d2e84bc 100644
--- a/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job_take2.h
+++ b/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job_take2.h
@@ -3,6 +3,7 @@
#pragma once
#include "lid_space_compaction_job_base.h"
+#include <vespa/searchcore/proton/common/monitored_refcount.h>
#include <vespa/document/bucket/bucketspace.h>
#include <atomic>
@@ -27,8 +28,9 @@ private:
using BucketExecutor = storage::spi::BucketExecutor;
using IDestructorCallback = vespalib::IDestructorCallback;
using IThreadService = searchcorespi::index::IThreadService;
- IThreadService & _master;
+ IThreadService &_master;
BucketExecutor &_bucketExecutor;
+ RetainGuard _dbRetainer;
document::BucketSpace _bucketSpace;
std::atomic<bool> _stopped;
@@ -41,6 +43,7 @@ private:
class MoveTask;
CompactionJob(const DocumentDBLidSpaceCompactionConfig &config,
+ RetainGuard dbRetainer,
std::shared_ptr<ILidSpaceCompactionHandler> handler,
IOperationStorer &opStorer,
IThreadService & master,
@@ -53,6 +56,7 @@ private:
public:
static std::shared_ptr<CompactionJob>
create(const DocumentDBLidSpaceCompactionConfig &config,
+ RetainGuard dbRetainer,
std::shared_ptr<ILidSpaceCompactionHandler> handler,
IOperationStorer &opStorer,
IThreadService & master,
@@ -61,12 +65,7 @@ public:
const BlockableMaintenanceJobConfig &blockableConfig,
IClusterStateChangedNotifier &clusterStateChangedNotifier,
bool nodeRetired,
- document::BucketSpace bucketSpace)
- {
- return std::shared_ptr<CompactionJob>(
- new CompactionJob(config, std::move(handler), opStorer, master, bucketExecutor, diskMemUsageNotifier,
- blockableConfig, clusterStateChangedNotifier, nodeRetired, bucketSpace));
- }
+ document::BucketSpace bucketSpace);
~CompactionJob() override;
};
diff --git a/searchcore/src/vespa/searchcore/proton/server/maintenance_controller_explorer.cpp b/searchcore/src/vespa/searchcore/proton/server/maintenance_controller_explorer.cpp
index 6ab7fe373c4..b446c5d07ba 100644
--- a/searchcore/src/vespa/searchcore/proton/server/maintenance_controller_explorer.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/maintenance_controller_explorer.cpp
@@ -14,7 +14,7 @@ void
convertRunningJobsToSlime(const std::vector<MaintenanceJobRunner::SP> &jobs, Cursor &array)
{
for (const auto &jobRunner : jobs) {
- if (jobRunner->isRunning()) {
+ if (jobRunner->isRunnable()) {
Cursor &object = array.addObject();
object.setString("name", jobRunner->getJob().getName());
}
diff --git a/searchcore/src/vespa/searchcore/proton/server/maintenance_controller_explorer.h b/searchcore/src/vespa/searchcore/proton/server/maintenance_controller_explorer.h
index 6aa2528b4f9..7fa4415a8e6 100644
--- a/searchcore/src/vespa/searchcore/proton/server/maintenance_controller_explorer.h
+++ b/searchcore/src/vespa/searchcore/proton/server/maintenance_controller_explorer.h
@@ -18,8 +18,7 @@ private:
public:
MaintenanceControllerExplorer(std::vector<MaintenanceJobRunner::SP> jobs);
- // Implements vespalib::StateExplorer
- virtual void get_state(const vespalib::slime::Inserter &inserter, bool full) const override;
+ void get_state(const vespalib::slime::Inserter &inserter, bool full) const override;
};
} // namespace proton
diff --git a/searchcore/src/vespa/searchcore/proton/server/maintenance_jobs_injector.cpp b/searchcore/src/vespa/searchcore/proton/server/maintenance_jobs_injector.cpp
index a6f408a984c..7a5a42b5608 100644
--- a/searchcore/src/vespa/searchcore/proton/server/maintenance_jobs_injector.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/maintenance_jobs_injector.cpp
@@ -41,10 +41,10 @@ injectLidSpaceCompactionJobs(MaintenanceController &controller,
for (auto &lidHandler : lscHandlers) {
std::shared_ptr<IMaintenanceJob> job;
if (config.getLidSpaceCompactionConfig().useBucketExecutor()) {
- job = lidspace::CompactionJob::create(config.getLidSpaceCompactionConfig(), std::move(lidHandler), opStorer,
- controller.masterThread(), bucketExecutor, diskMemUsageNotifier,
- config.getBlockableJobConfig(), clusterStateChangedNotifier,
- (calc ? calc->nodeRetired() : false), bucketSpace);
+ job = lidspace::CompactionJob::create(config.getLidSpaceCompactionConfig(), controller.retainDB(),
+ std::move(lidHandler), opStorer, controller.masterThread(),
+ bucketExecutor, diskMemUsageNotifier,config.getBlockableJobConfig(),
+ clusterStateChangedNotifier, (calc ? calc->nodeRetired() : false), bucketSpace);
} else {
job = std::make_shared<LidSpaceCompactionJob>(
config.getLidSpaceCompactionConfig(),
@@ -76,7 +76,7 @@ injectBucketMoveJob(MaintenanceController &controller,
{
std::shared_ptr<IMaintenanceJob> bmj;
if (config.getBucketMoveConfig().useBucketExecutor()) {
- bmj = BucketMoveJobV2::create(calc, moveHandler, bucketModifiedHandler, controller.masterThread(),
+ bmj = BucketMoveJobV2::create(calc, controller.retainDB(), moveHandler, bucketModifiedHandler, controller.masterThread(),
bucketExecutor, controller.getReadySubDB(), controller.getNotReadySubDB(),
bucketCreateNotifier, clusterStateChangedNotifier, bucketStateChangedNotifier,
diskMemUsageNotifier, config.getBlockableJobConfig(), docTypeName, bucketSpace);
@@ -108,7 +108,6 @@ MaintenanceJobsInjector::injectJobs(MaintenanceController &controller,
IOperationStorer &opStorer,
IFrozenBucketHandler &fbHandler,
bucketdb::IBucketCreateNotifier &bucketCreateNotifier,
- const vespalib::string &docTypeName,
document::BucketSpace bucketSpace,
IPruneRemovedDocumentsHandler &prdHandler,
IDocumentMoveHandler &moveHandler,
@@ -127,6 +126,7 @@ MaintenanceJobsInjector::injectJobs(MaintenanceController &controller,
controller.registerJobInMasterThread(std::make_unique<HeartBeatJob>(hbHandler, config.getHeartBeatConfig()));
controller.registerJobInDefaultPool(std::make_unique<PruneSessionCacheJob>(scPruner, config.getSessionCachePruneInterval()));
+ const auto & docTypeName = controller.getDocTypeName().getName();
const MaintenanceDocumentSubDB &mRemSubDB(controller.getRemSubDB());
auto pruneRDjob = std::make_unique<PruneRemovedDocumentsJob>(config.getPruneRemovedDocumentsConfig(), *mRemSubDB.meta_store(),
mRemSubDB.sub_db_id(), docTypeName, prdHandler, fbHandler);
diff --git a/searchcore/src/vespa/searchcore/proton/server/maintenance_jobs_injector.h b/searchcore/src/vespa/searchcore/proton/server/maintenance_jobs_injector.h
index 563ef227fcf..fb7117d2e66 100644
--- a/searchcore/src/vespa/searchcore/proton/server/maintenance_jobs_injector.h
+++ b/searchcore/src/vespa/searchcore/proton/server/maintenance_jobs_injector.h
@@ -40,7 +40,6 @@ struct MaintenanceJobsInjector
IOperationStorer &opStorer,
IFrozenBucketHandler &fbHandler,
bucketdb::IBucketCreateNotifier &bucketCreateNotifier,
- const vespalib::string &docTypeName,
document::BucketSpace bucketSpace,
IPruneRemovedDocumentsHandler &prdHandler,
IDocumentMoveHandler &moveHandler,
diff --git a/searchcore/src/vespa/searchcore/proton/server/maintenancecontroller.cpp b/searchcore/src/vespa/searchcore/proton/server/maintenancecontroller.cpp
index c71e9d832f3..3b4526e6f7c 100644
--- a/searchcore/src/vespa/searchcore/proton/server/maintenancecontroller.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/maintenancecontroller.cpp
@@ -7,6 +7,7 @@
#include <vespa/searchcorespi/index/i_thread_service.h>
#include <vespa/vespalib/util/lambdatask.h>
#include <vespa/vespalib/util/scheduledexecutor.h>
+#include <thread>
#include <vespa/log/log.h>
LOG_SETUP(".proton.server.maintenancecontroller");
@@ -28,14 +29,23 @@ public:
void run() override { _job->run(); }
};
+bool
+isRunningOrRunnable(const MaintenanceJobRunner & job, const Executor * master) {
+ return (&job.getExecutor() == master)
+ ? job.isRunning()
+ : job.isRunnable();
+}
+
}
MaintenanceController::MaintenanceController(IThreadService &masterThread,
- vespalib::SyncableThreadExecutor & defaultExecutor,
+ vespalib::Executor & defaultExecutor,
+ MonitoredRefCount & refCount,
const DocTypeName &docTypeName)
: IBucketFreezeListener(),
_masterThread(masterThread),
_defaultExecutor(defaultExecutor),
+ _refCount(refCount),
_readySubDB(),
_remSubDB(),
_notReadySubDB(),
@@ -78,7 +88,6 @@ MaintenanceController::registerJob(Executor & executor, IMaintenanceJob::UP job)
_jobs.push_back(std::make_shared<MaintenanceJobRunner>(executor, std::move(job)));
}
-
void
MaintenanceController::killJobs()
{
@@ -93,8 +102,11 @@ MaintenanceController::killJobs()
for (auto &job : _jobs) {
job->stop(); // Make sure no more tasks are added to the executor
}
- _defaultExecutor.sync();
- _defaultExecutor.sync();
+ for (auto &job : _jobs) {
+ while (isRunningOrRunnable(*job, &_masterThread)) {
+ std::this_thread::sleep_for(1ms);
+ }
+ }
JobList tmpJobs = _jobs;
{
Guard guard(_jobsLock);
diff --git a/searchcore/src/vespa/searchcore/proton/server/maintenancecontroller.h b/searchcore/src/vespa/searchcore/proton/server/maintenancecontroller.h
index fdb6f4fa880..6415c51eeed 100644
--- a/searchcore/src/vespa/searchcore/proton/server/maintenancecontroller.h
+++ b/searchcore/src/vespa/searchcore/proton/server/maintenancecontroller.h
@@ -7,12 +7,13 @@
#include "frozenbuckets.h"
#include "ibucketfreezelistener.h"
#include <vespa/searchcore/proton/common/doctypename.h>
-#include <mutex>
+#include <vespa/searchcore/proton/common/monitored_refcount.h>
#include <vespa/vespalib/util/scheduledexecutor.h>
+#include <mutex>
+
namespace vespalib {
class Timer;
- class SyncableThreadExecutor;
class Executor;
}
namespace searchcorespi::index { struct IThreadService; }
@@ -21,6 +22,7 @@ namespace proton {
class MaintenanceJobRunner;
class DocumentDBMaintenanceConfig;
+class MonitoredRefCount;
/**
* Class that controls the bucket moving between ready and notready sub databases
@@ -36,7 +38,7 @@ public:
using UP = std::unique_ptr<MaintenanceController>;
enum class State {INITIALIZING, STARTED, PAUSED, STOPPING};
- MaintenanceController(IThreadService &masterThread, vespalib::SyncableThreadExecutor & defaultExecutor, const DocTypeName &docTypeName);
+ MaintenanceController(IThreadService &masterThread, vespalib::Executor & defaultExecutor, MonitoredRefCount & refCount, const DocTypeName &docTypeName);
~MaintenanceController() override;
void registerJobInMasterThread(IMaintenanceJob::UP job);
@@ -73,12 +75,15 @@ public:
const MaintenanceDocumentSubDB & getRemSubDB() const { return _remSubDB; }
const MaintenanceDocumentSubDB & getNotReadySubDB() const { return _notReadySubDB; }
IThreadService & masterThread() { return _masterThread; }
+ const DocTypeName & getDocTypeName() const { return _docTypeName; }
+ RetainGuard retainDB() { return RetainGuard(_refCount); }
private:
using Mutex = std::mutex;
using Guard = std::lock_guard<Mutex>;
IThreadService &_masterThread;
- vespalib::SyncableThreadExecutor &_defaultExecutor;
+ vespalib::Executor &_defaultExecutor;
+ MonitoredRefCount &_refCount;
MaintenanceDocumentSubDB _readySubDB;
MaintenanceDocumentSubDB _remSubDB;
MaintenanceDocumentSubDB _notReadySubDB;
diff --git a/searchcore/src/vespa/searchcore/proton/server/maintenancedocumentsubdb.cpp b/searchcore/src/vespa/searchcore/proton/server/maintenancedocumentsubdb.cpp
index f91ed2bf608..ffbf42304d2 100644
--- a/searchcore/src/vespa/searchcore/proton/server/maintenancedocumentsubdb.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/maintenancedocumentsubdb.cpp
@@ -39,6 +39,7 @@ MaintenanceDocumentSubDB::clear()
_meta_store.reset();
_retriever.reset();
_feed_view.reset();
+ _pendingLidsForCommit = nullptr;
}
bool
diff --git a/searchcore/src/vespa/searchcore/proton/server/maintenancejobrunner.cpp b/searchcore/src/vespa/searchcore/proton/server/maintenancejobrunner.cpp
index 9518342a37f..658fa9f7482 100644
--- a/searchcore/src/vespa/searchcore/proton/server/maintenancejobrunner.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/maintenancejobrunner.cpp
@@ -80,6 +80,13 @@ bool
MaintenanceJobRunner::isRunning() const
{
Guard guard(_lock);
+ return _running;
+}
+
+bool
+MaintenanceJobRunner::isRunnable() const
+{
+ Guard guard(_lock);
return _running || _queued;
}
diff --git a/searchcore/src/vespa/searchcore/proton/server/maintenancejobrunner.h b/searchcore/src/vespa/searchcore/proton/server/maintenancejobrunner.h
index dc13cdfc7d9..6d51a4418ee 100644
--- a/searchcore/src/vespa/searchcore/proton/server/maintenancejobrunner.h
+++ b/searchcore/src/vespa/searchcore/proton/server/maintenancejobrunner.h
@@ -32,6 +32,7 @@ public:
void run() override;
void stop();
bool isRunning() const;
+ bool isRunnable() const;
const vespalib::Executor & getExecutor() const { return _executor; }
const IMaintenanceJob &getJob() const { return *_job; }
IMaintenanceJob &getJob() { return *_job; }
diff --git a/searchcore/src/vespa/searchcore/proton/server/persistencehandlerproxy.cpp b/searchcore/src/vespa/searchcore/proton/server/persistencehandlerproxy.cpp
index c2f80f78b8a..d298c0fac24 100644
--- a/searchcore/src/vespa/searchcore/proton/server/persistencehandlerproxy.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/persistencehandlerproxy.cpp
@@ -21,15 +21,11 @@ PersistenceHandlerProxy::PersistenceHandlerProxy(DocumentDB::SP documentDB)
: _documentDB(std::move(documentDB)),
_feedHandler(_documentDB->getFeedHandler()),
_bucketHandler(_documentDB->getBucketHandler()),
- _clusterStateHandler(_documentDB->getClusterStateHandler())
-{
- _documentDB->retain();
-}
+ _clusterStateHandler(_documentDB->getClusterStateHandler()),
+ _retainGuard(_documentDB->retain())
+{ }
-PersistenceHandlerProxy::~PersistenceHandlerProxy()
-{
- _documentDB->release();
-}
+PersistenceHandlerProxy::~PersistenceHandlerProxy() = default;
void
PersistenceHandlerProxy::initialize()
diff --git a/searchcore/src/vespa/searchcore/proton/server/persistencehandlerproxy.h b/searchcore/src/vespa/searchcore/proton/server/persistencehandlerproxy.h
index ee6e8a7ee42..ce95cc3bddd 100644
--- a/searchcore/src/vespa/searchcore/proton/server/persistencehandlerproxy.h
+++ b/searchcore/src/vespa/searchcore/proton/server/persistencehandlerproxy.h
@@ -3,6 +3,7 @@
#pragma once
#include <vespa/searchcore/proton/persistenceengine/ipersistencehandler.h>
+#include <vespa/searchcore/proton/common/monitored_refcount.h>
namespace proton {
@@ -13,10 +14,11 @@ class ClusterStateHandler;
class PersistenceHandlerProxy : public IPersistenceHandler
{
private:
- std::shared_ptr<DocumentDB> _documentDB;
- FeedHandler &_feedHandler;
- BucketHandler &_bucketHandler;
- ClusterStateHandler &_clusterStateHandler;
+ std::shared_ptr<DocumentDB> _documentDB;
+ FeedHandler &_feedHandler;
+ BucketHandler &_bucketHandler;
+ ClusterStateHandler &_clusterStateHandler;
+ RetainGuard _retainGuard;
public:
explicit PersistenceHandlerProxy(std::shared_ptr<DocumentDB> documentDB);
diff --git a/searchcore/src/vespa/searchcore/proton/server/proton.cpp b/searchcore/src/vespa/searchcore/proton/server/proton.cpp
index 1beb0a04de0..afeaec5333f 100644
--- a/searchcore/src/vespa/searchcore/proton/server/proton.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/proton.cpp
@@ -615,11 +615,11 @@ Proton::addDocumentDB(const document::DocumentType &docType,
// 1 thread per document type.
initializeThreads = std::make_shared<vespalib::ThreadStackExecutor>(1, 128_Ki);
}
- auto ret = std::make_shared<DocumentDB>(config.basedir + "/documents", documentDBConfig, config.tlsspec,
- _queryLimiter, _clock, docTypeName, bucketSpace, config, *this,
- *_warmupExecutor, *_sharedExecutor, *_persistenceEngine, *_tls->getTransLogServer(),
- *_metricsEngine, _fileHeaderContext, std::move(config_store),
- initializeThreads, bootstrapConfig->getHwInfo());
+ auto ret = DocumentDB::create(config.basedir + "/documents", documentDBConfig, config.tlsspec,
+ _queryLimiter, _clock, docTypeName, bucketSpace, config, *this,
+ *_warmupExecutor, *_sharedExecutor, *_persistenceEngine, *_tls->getTransLogServer(),
+ *_metricsEngine, _fileHeaderContext, std::move(config_store),
+ initializeThreads, bootstrapConfig->getHwInfo());
try {
ret->start();
} catch (vespalib::Exception &e) {
diff --git a/searchcore/src/vespa/searchcore/proton/server/pruneremoveddocumentsjob.h b/searchcore/src/vespa/searchcore/proton/server/pruneremoveddocumentsjob.h
index ba6faf556c0..76967635f4a 100644
--- a/searchcore/src/vespa/searchcore/proton/server/pruneremoveddocumentsjob.h
+++ b/searchcore/src/vespa/searchcore/proton/server/pruneremoveddocumentsjob.h
@@ -42,7 +42,6 @@ public:
// Implements IMaintenanceJob
bool run() override;
- void onStop() override { }
};
} // namespace proton
diff --git a/searchcore/src/vespa/searchcore/proton/server/searchhandlerproxy.cpp b/searchcore/src/vespa/searchcore/proton/server/searchhandlerproxy.cpp
index 6da6c09cdba..2c1133ad1d6 100644
--- a/searchcore/src/vespa/searchcore/proton/server/searchhandlerproxy.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/searchhandlerproxy.cpp
@@ -8,15 +8,11 @@
namespace proton {
SearchHandlerProxy::SearchHandlerProxy(DocumentDB::SP documentDB)
- : _documentDB(std::move(documentDB))
-{
- _documentDB->retain();
-}
+ : _documentDB(std::move(documentDB)),
+ _retainGuard(_documentDB->retain())
+{ }
-SearchHandlerProxy::~SearchHandlerProxy()
-{
- _documentDB->release();
-}
+SearchHandlerProxy::~SearchHandlerProxy() = default;
std::unique_ptr<search::engine::DocsumReply>
SearchHandlerProxy::getDocsums(const DocsumRequest & request)
diff --git a/searchcore/src/vespa/searchcore/proton/server/searchhandlerproxy.h b/searchcore/src/vespa/searchcore/proton/server/searchhandlerproxy.h
index fc4f517fb36..d6690fda47e 100644
--- a/searchcore/src/vespa/searchcore/proton/server/searchhandlerproxy.h
+++ b/searchcore/src/vespa/searchcore/proton/server/searchhandlerproxy.h
@@ -3,6 +3,7 @@
#pragma once
#include <vespa/searchcore/proton/summaryengine/isearchhandler.h>
+#include <vespa/searchcore/proton/common/monitored_refcount.h>
namespace proton {
@@ -12,6 +13,7 @@ class SearchHandlerProxy : public ISearchHandler
{
private:
std::shared_ptr<DocumentDB> _documentDB;
+ RetainGuard _retainGuard;
public:
SearchHandlerProxy(std::shared_ptr<DocumentDB> documentDB);
diff --git a/searchcorespi/src/vespa/searchcorespi/index/iindexcollection.h b/searchcorespi/src/vespa/searchcorespi/index/iindexcollection.h
index 6402cc4b1ef..99cf27dea41 100644
--- a/searchcorespi/src/vespa/searchcorespi/index/iindexcollection.h
+++ b/searchcorespi/src/vespa/searchcorespi/index/iindexcollection.h
@@ -4,11 +4,7 @@
#include "indexsearchable.h"
-namespace search {
- namespace queryeval {
- class ISourceSelector;
- }
-}
+namespace search::queryeval { class ISourceSelector; }
namespace searchcorespi {
/**
diff --git a/searchcorespi/src/vespa/searchcorespi/index/warmupindexcollection.cpp b/searchcorespi/src/vespa/searchcorespi/index/warmupindexcollection.cpp
index f7abcedbf89..168a6680f45 100644
--- a/searchcorespi/src/vespa/searchcorespi/index/warmupindexcollection.cpp
+++ b/searchcorespi/src/vespa/searchcorespi/index/warmupindexcollection.cpp
@@ -164,8 +164,8 @@ WarmupIndexCollection::createBlueprint(const IRequestContext & requestContext,
needWarmUp = needWarmUp || ! handledBefore(fs.getFieldId(), term);
}
if (needWarmUp) {
- Task::UP task(new WarmupTask(mdl.createMatchData(), *this));
- static_cast<WarmupTask &>(*task).createBlueprint(fsl, term);
+ auto task = std::make_unique<WarmupTask>(mdl.createMatchData(), *this);
+ task->createBlueprint(fsl, term);
fireWarmup(std::move(task));
}
return _prev->createBlueprint(requestContext, fields, term);
diff --git a/searchlib/CMakeLists.txt b/searchlib/CMakeLists.txt
index 7dbad86f76c..ea47dddb99b 100644
--- a/searchlib/CMakeLists.txt
+++ b/searchlib/CMakeLists.txt
@@ -91,6 +91,7 @@ vespa_define_module(
src/tests/attribute/posting_list_merger
src/tests/attribute/postinglist
src/tests/attribute/postinglistattribute
+ src/tests/attribute/posting_store
src/tests/attribute/reference_attribute
src/tests/attribute/save_target
src/tests/attribute/searchable
diff --git a/searchlib/src/apps/tests/CMakeLists.txt b/searchlib/src/apps/tests/CMakeLists.txt
index 9125d529800..5507a78b8c3 100644
--- a/searchlib/src/apps/tests/CMakeLists.txt
+++ b/searchlib/src/apps/tests/CMakeLists.txt
@@ -20,3 +20,11 @@ vespa_add_executable(searchlib_memoryindexstress_test_app
searchlib
)
vespa_add_test(NAME searchlib_memoryindexstress_test_app COMMAND searchlib_memoryindexstress_test_app BENCHMARK)
+vespa_add_executable(searchlib_document_weight_attribute_lookup_stress_test_app
+ SOURCES
+ document_weight_attribute_lookup_stress_test.cpp
+ DEPENDS
+ searchlib
+ GTest::GTest
+)
+vespa_add_test(NAME searchlib_document_weight_attribute_lookup_stress_test_app COMMAND searchlib_document_weight_attribute_lookup_stress_test_app BENCHMARK)
diff --git a/searchlib/src/apps/tests/document_weight_attribute_lookup_stress_test.cpp b/searchlib/src/apps/tests/document_weight_attribute_lookup_stress_test.cpp
new file mode 100644
index 00000000000..8aa09e261b5
--- /dev/null
+++ b/searchlib/src/apps/tests/document_weight_attribute_lookup_stress_test.cpp
@@ -0,0 +1,150 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/searchlib/attribute/attributefactory.h>
+#include <vespa/searchlib/attribute/integerbase.h>
+#include <vespa/searchlib/attribute/i_document_weight_attribute.h>
+#include <vespa/vespalib/gtest/gtest.h>
+#include <random>
+#include <chrono>
+#include <iostream>
+
+using search::AttributeFactory;
+using search::AttributeVector;
+using search::DictionaryConfig;
+using search::IntegerAttribute;
+using search::attribute::Config;
+using search::attribute::BasicType;
+using search::attribute::CollectionType;
+using std::chrono::steady_clock;
+
+namespace {
+
+Config make_config(bool hash)
+{
+ Config cfg(BasicType::INT64, CollectionType::WSET);
+ cfg.setFastSearch(true);
+ cfg.set_dictionary_config(DictionaryConfig(hash ? DictionaryConfig::Type::HASH : DictionaryConfig::Type::BTREE));
+ return cfg;
+}
+
+class MyKey : public search::IDocumentWeightAttribute::LookupKey
+{
+ int64_t _key;
+public:
+ MyKey(int64_t key)
+ : _key(key)
+ {
+ }
+ vespalib::stringref asString() const override { return ""; }
+ bool asInteger(int64_t &value) const override { value=_key; return true; }
+
+};
+
+static constexpr uint32_t num_test_docs = 100000000;
+static constexpr int64_t value_multiplier = 10;
+static constexpr uint32_t num_lookup_keys = 100000000;
+static constexpr uint32_t lookup_loops = 2;
+
+}
+
+
+class DocumentWeightAttributeLookupStressTest : public ::testing::Test
+{
+protected:
+ std::shared_ptr<AttributeVector> _btree_av;
+ std::shared_ptr<AttributeVector> _hash_av;
+ std::vector<int64_t> _lookup_keys;
+
+public:
+ DocumentWeightAttributeLookupStressTest();
+ ~DocumentWeightAttributeLookupStressTest() override;
+ void populate(AttributeVector& attr);
+ void make_lookup_keys();
+ std::pair<uint64_t, uint64_t> lookup_loop(AttributeVector& attr, uint32_t loops);
+};
+
+DocumentWeightAttributeLookupStressTest::DocumentWeightAttributeLookupStressTest()
+ : _btree_av(AttributeFactory::createAttribute("btree", make_config(false))),
+ _hash_av(AttributeFactory::createAttribute("hash", make_config(true))),
+ _lookup_keys()
+{
+ populate(*_btree_av);
+ populate(*_hash_av);
+ make_lookup_keys();
+}
+
+DocumentWeightAttributeLookupStressTest::~DocumentWeightAttributeLookupStressTest() = default;
+
+void
+DocumentWeightAttributeLookupStressTest::populate(AttributeVector& attr)
+{
+ std::cout << "Populate " << attr.getName() << " with " << num_test_docs << " values" << std::endl;
+ auto before = steady_clock::now();
+ auto& iattr = dynamic_cast<IntegerAttribute&>(attr);
+ attr.addReservedDoc();
+ attr.addDocs(num_test_docs);
+ for (uint32_t lid = 1; lid <= num_test_docs; ++lid) {
+ attr.clearDoc(lid);
+ iattr.append(lid, lid * value_multiplier, 42);
+ if ((lid % 1000) == 0) {
+ attr.commit();
+ }
+ }
+ attr.commit();
+ std::chrono::duration<double> elapsed = steady_clock::now() - before;
+ std::cout << elapsed.count() << " seconds elapsed" << std::endl;
+}
+
+void
+DocumentWeightAttributeLookupStressTest::make_lookup_keys()
+{
+ std::cout << "making lookup keys" << std::endl;
+ auto before = steady_clock::now();
+ std::mt19937_64 mt;
+ std::uniform_int_distribution<int64_t> distrib(1, num_test_docs * value_multiplier);
+ _lookup_keys.reserve(num_lookup_keys);
+ for (uint32_t n = 0; n < num_lookup_keys; ++n) {
+ _lookup_keys.emplace_back(distrib(mt));
+ }
+ std::chrono::duration<double> elapsed = steady_clock::now() - before;
+ std::cout << elapsed.count() << " seconds elapsed" << std::endl;
+}
+
+
+std::pair<uint64_t, uint64_t>
+DocumentWeightAttributeLookupStressTest::lookup_loop(AttributeVector& attr, uint32_t loops)
+{
+ size_t lookups = loops * _lookup_keys.size();
+ std::cout << "Performing " << lookups << " " << attr.getName() << " lookups" << std::endl;
+ auto before = steady_clock::now();
+ auto dwa = attr.asDocumentWeightAttribute();
+ uint64_t hits = 0;
+ uint64_t misses = 0;
+ for (uint32_t loop = 0; loop < loops; ++loop) {
+ auto root = dwa->get_dictionary_snapshot();
+ for (auto key : _lookup_keys) {
+ MyKey my_key(key);
+ auto result = dwa->lookup(my_key, root);
+ if (result.posting_idx.valid()) {
+ ++hits;
+ } else {
+ ++misses;
+ }
+ }
+ }
+ std::chrono::duration<double> elapsed = steady_clock::now() - before;
+ std::cout.precision(12);
+ std::cout << (lookups / elapsed.count()) << " " << attr.getName() << " lookups/s" << std::endl;
+ std::cout << hits << " hits, " << misses << " misses" << std::endl;
+ std::cout << elapsed.count() << " seconds elapsed" << std::endl;
+ return std::make_pair(hits, misses);
+}
+
+TEST_F(DocumentWeightAttributeLookupStressTest, lookup)
+{
+ auto btree_result = lookup_loop(*_btree_av, lookup_loops);
+ auto hash_result = lookup_loop(*_hash_av, lookup_loops);
+ EXPECT_EQ(btree_result, hash_result);
+}
+
+GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/evaluation/MapTypeContext.java b/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/evaluation/MapTypeContext.java
index 2a42e2d92f7..959eb16a2ac 100644
--- a/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/evaluation/MapTypeContext.java
+++ b/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/evaluation/MapTypeContext.java
@@ -24,7 +24,7 @@ public class MapTypeContext implements TypeContext<Reference> {
@Override
public TensorType getType(String reference) {
- throw new UnsupportedOperationException("Not able to parse gereral references from string form");
+ throw new UnsupportedOperationException("Not able to parse general references from string form");
}
@Override
diff --git a/searchlib/src/main/java/com/yahoo/searchlib/tensor/EvaluateTensorConformance.java b/searchlib/src/main/java/com/yahoo/searchlib/tensor/EvaluateTensorConformance.java
index cf506efcb78..bd6fb47be1e 100644
--- a/searchlib/src/main/java/com/yahoo/searchlib/tensor/EvaluateTensorConformance.java
+++ b/searchlib/src/main/java/com/yahoo/searchlib/tensor/EvaluateTensorConformance.java
@@ -103,10 +103,7 @@ public class EvaluateTensorConformance {
private Tensor evaluate(String expression, MapContext context) throws ParseException {
Value value = new RankingExpression(expression).evaluate(context);
- if (!(value instanceof TensorValue)) {
- throw new IllegalArgumentException("Result is not a tensor");
- }
- return ((TensorValue)value).asTensor();
+ return value.asTensor();
}
private MapContext getInput(Inspector inputs) {
diff --git a/searchlib/src/main/sh/vespa-evaluate-tensor-conformance.sh b/searchlib/src/main/sh/vespa-evaluate-tensor-conformance.sh
index 5205f32e61d..43844e07cd4 100755
--- a/searchlib/src/main/sh/vespa-evaluate-tensor-conformance.sh
+++ b/searchlib/src/main/sh/vespa-evaluate-tensor-conformance.sh
@@ -76,4 +76,4 @@ findhost
LJ=${VESPA_HOME}/lib/jars
-exec java -cp ${LJ}/searchlib.jar:${LJ}/vespaclient-java-jar-with-dependencies.jar com.yahoo.searchlib.tensor.EvaluateTensorConformance "$@"
+exec java -cp ${LJ}/vespajlib.jar:${LJ}/searchlib.jar:${LJ}/vespaclient-java-jar-with-dependencies.jar com.yahoo.searchlib.tensor.EvaluateTensorConformance "$@"
diff --git a/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/evaluation/EvaluationTestCase.java b/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/evaluation/EvaluationTestCase.java
index e1daf8c8fe2..d6302d7026e 100644
--- a/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/evaluation/EvaluationTestCase.java
+++ b/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/evaluation/EvaluationTestCase.java
@@ -234,6 +234,7 @@ public class EvaluationTestCase {
"sum(tensor0, y)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
tester.assertEvaluates("{ {}:16 }",
"sum(tensor0, x, y)", "{ {x:0,y:0}:1.0, {x:1,y:0}:3.0, {x:0,y:1}:5.0, {x:1,y:1}:7.0 }");
+ tester.assertEvaluates("{ {}: -1 }", "reduce(tensor0, max)", "tensor(x[2]):[-2,-1]");
// tensor join
tester.assertEvaluates("{ {x:0,y:0}:15, {x:1,y:0}:35 }", "join(tensor0, tensor1, f(x,y) (x*y))", "{ {x:0}:3, {x:1}:7 }", "{ {y:0}:5 }");
@@ -341,6 +342,12 @@ public class EvaluationTestCase {
tester.assertEvaluates("3.0", "tensor0{bar}", true, "{ {x:foo}:1, {x:bar}:3 }");
tester.assertEvaluates("3.3", "tensor0[2]", "tensor(values[4]):[1.1, 2.2, 3.3, 4.4]]");
+ // concat
+ tester.assertEvaluates("tensor(x[5]):[0, 1, 2, 3, 4]",
+ "concat(tensor0, tensor1, x)",
+ "tensor(x[2]):[0, 1]",
+ "tensor(x[3]):[2, 3, 4])");
+
// composite functions
tester.assertEvaluates("{ {x:0}:0.25, {x:1}:0.75 }", "l1_normalize(tensor0, x)", "{ {x:0}:1, {x:1}:3 }");
tester.assertEvaluates("{ {x:0}:0.31622776601683794, {x:1}:0.9486832980505138 }", "l2_normalize(tensor0, x)", "{ {x:0}:1, {x:1}:3 }");
@@ -402,8 +409,8 @@ public class EvaluationTestCase {
tester.assertEvaluates("tensor<float>(x[3]):[1.0, 2.0, 3.0]",
"cell_cast(tensor0, float)",
"tensor<double>(x[3]):[1, 2, 3]");
- tester.assertEvaluates("tensor<float>():{1}",
- "cell_cast(tensor0{x:1}, float)",
+ tester.assertEvaluates("tensor<float>(x[2]):[1.0, 2.0]",
+ "cell_cast(tensor(x[2]):[tensor0{x:1}, tensor0{x:2}], float)",
"tensor<double>(x{}):{1:1, 2:2, 3:3}");
tester.assertEvaluates("tensor<float>(x[2]):[3,8]",
"cell_cast(tensor0 * tensor1, float)",
diff --git a/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/evaluation/EvaluationTester.java b/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/evaluation/EvaluationTester.java
index 488930a8eb9..e974bcb47cd 100644
--- a/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/evaluation/EvaluationTester.java
+++ b/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/evaluation/EvaluationTester.java
@@ -75,7 +75,7 @@ public class EvaluationTester {
RankingExpression expression = new RankingExpression(expressionString);
if ( ! explanation.isEmpty())
explanation = explanation + ": ";
- assertEquals(explanation + expression.toString(), value, expression.evaluate(context));
+ assertEquals(explanation + expression, value, expression.evaluate(context));
return expression;
}
catch (ParseException e) {
diff --git a/searchlib/src/tests/attribute/document_weight_iterator/document_weight_iterator_test.cpp b/searchlib/src/tests/attribute/document_weight_iterator/document_weight_iterator_test.cpp
index 715df028e8d..bbac15828a7 100644
--- a/searchlib/src/tests/attribute/document_weight_iterator/document_weight_iterator_test.cpp
+++ b/searchlib/src/tests/attribute/document_weight_iterator/document_weight_iterator_test.cpp
@@ -161,23 +161,6 @@ TEST_F("require that string iterators are created correctly", StringFixture) {
verify_posting(*f1.api, "foo");
}
-TEST_F("require that dictionary snapshot works", LongFixture)
-{
- auto read_guard = f1.attr->makeReadGuard(false);
- auto dictionary_snapshot = f1.api->get_dictionary_snapshot();
- auto lookup1 = f1.api->lookup("111", dictionary_snapshot);
- EXPECT_TRUE(lookup1.enum_idx.valid());
- f1.attr->clearDoc(1);
- f1.attr->clearDoc(5);
- f1.attr->clearDoc(7);
- f1.attr->commit();
- auto lookup2 = f1.api->lookup("111", f1.api->get_dictionary_snapshot());
- EXPECT_FALSE(lookup2.enum_idx.valid());
- auto lookup3 = f1.api->lookup("111", dictionary_snapshot);
- EXPECT_TRUE(lookup3.enum_idx.valid());
- EXPECT_EQUAL(lookup1.enum_idx.ref(), lookup3.enum_idx.ref());
-}
-
TEST_F("require that collect_folded works for string", StringFixture)
{
StringAttribute *attr = static_cast<StringAttribute *>(f1.attr.get());
diff --git a/searchlib/src/tests/attribute/enum_comparator/enum_comparator_test.cpp b/searchlib/src/tests/attribute/enum_comparator/enum_comparator_test.cpp
index d999a6f37a2..3ab29797120 100644
--- a/searchlib/src/tests/attribute/enum_comparator/enum_comparator_test.cpp
+++ b/searchlib/src/tests/attribute/enum_comparator/enum_comparator_test.cpp
@@ -30,7 +30,7 @@ TEST("requireThatNumericLessIsWorking")
NumericEnumStore es(false, DictionaryConfig::Type::BTREE);
EnumIndex e1 = es.insert(10);
EnumIndex e2 = es.insert(30);
- auto cmp1 = es.make_comparator();
+ const auto & cmp1 = es.get_comparator();
EXPECT_TRUE(cmp1.less(e1, e2));
EXPECT_FALSE(cmp1.less(e2, e1));
EXPECT_FALSE(cmp1.less(e1, e1));
@@ -44,7 +44,7 @@ TEST("requireThatNumericEqualIsWorking")
NumericEnumStore es(false, DictionaryConfig::Type::BTREE);
EnumIndex e1 = es.insert(10);
EnumIndex e2 = es.insert(30);
- auto cmp1 = es.make_comparator();
+ const auto & cmp1 = es.get_comparator();
EXPECT_FALSE(cmp1.equal(e1, e2));
EXPECT_FALSE(cmp1.equal(e2, e1));
EXPECT_TRUE(cmp1.equal(e1, e1));
@@ -60,7 +60,7 @@ TEST("requireThatFloatLessIsWorking")
EnumIndex e1 = es.insert(10.5);
EnumIndex e2 = es.insert(30.5);
EnumIndex e3 = es.insert(std::numeric_limits<float>::quiet_NaN());
- auto cmp1 = es.make_comparator();
+ const auto & cmp1 = es.get_comparator();
EXPECT_TRUE(cmp1.less(e1, e2));
EXPECT_FALSE(cmp1.less(e2, e1));
EXPECT_FALSE(cmp1.less(e1, e1));
@@ -78,7 +78,7 @@ TEST("requireThatFloatEqualIsWorking")
EnumIndex e1 = es.insert(10.5);
EnumIndex e2 = es.insert(30.5);
EnumIndex e3 = es.insert(std::numeric_limits<float>::quiet_NaN());
- auto cmp1 = es.make_comparator();
+ const auto & cmp1 = es.get_comparator();
EXPECT_FALSE(cmp1.equal(e1, e2));
EXPECT_FALSE(cmp1.equal(e2, e1));
EXPECT_TRUE(cmp1.equal(e1, e1));
@@ -97,7 +97,7 @@ TEST("requireThatStringLessIsWorking")
EnumIndex e1 = es.insert("Aa");
EnumIndex e2 = es.insert("aa");
EnumIndex e3 = es.insert("aB");
- auto cmp1 = es.make_comparator();
+ const auto & cmp1 = es.get_comparator();
EXPECT_TRUE(cmp1.less(e1, e2)); // similar folded, fallback to regular
EXPECT_FALSE(cmp1.less(e2, e1));
EXPECT_FALSE(cmp1.less(e1, e1));
@@ -114,7 +114,7 @@ TEST("requireThatStringEqualIsWorking")
EnumIndex e1 = es.insert("Aa");
EnumIndex e2 = es.insert("aa");
EnumIndex e3 = es.insert("aB");
- auto cmp1 = es.make_comparator();
+ const auto & cmp1 = es.get_comparator();
EXPECT_FALSE(cmp1.equal(e1, e2)); // similar folded, fallback to regular
EXPECT_FALSE(cmp1.equal(e2, e1));
EXPECT_TRUE(cmp1.equal(e1, e1));
@@ -157,13 +157,13 @@ TEST("requireThatFoldedLessIsWorking")
EnumIndex e2 = es.insert("aa");
EnumIndex e3 = es.insert("aB");
EnumIndex e4 = es.insert("Folded");
- auto cmp1 = es.make_folded_comparator();
+ const auto & cmp1 = es.get_folded_comparator();
EXPECT_FALSE(cmp1.less(e1, e2)); // similar folded
EXPECT_FALSE(cmp1.less(e2, e1)); // similar folded
EXPECT_TRUE(cmp1.less(e2, e3)); // folded compare
EXPECT_FALSE(cmp1.less(e3, e2)); // folded compare
- auto cmp2 = es.make_folded_comparator("fol", false);
- auto cmp3 = es.make_folded_comparator("fol", true);
+ auto cmp2 = es.make_folded_comparator("fol");
+ auto cmp3 = es.make_folded_comparator_prefix("fol");
EXPECT_TRUE(cmp2.less(EnumIndex(), e4));
EXPECT_FALSE(cmp2.less(e4, EnumIndex()));
EXPECT_FALSE(cmp3.less(EnumIndex(), e4)); // similar when prefix
@@ -177,21 +177,20 @@ TEST("requireThatFoldedEqualIsWorking")
EnumIndex e2 = es.insert("aa");
EnumIndex e3 = es.insert("aB");
EnumIndex e4 = es.insert("Folded");
- auto cmp1 = es.make_folded_comparator();
+ const auto & cmp1 = es.get_folded_comparator();
EXPECT_TRUE(cmp1.equal(e1, e1)); // similar folded
EXPECT_TRUE(cmp1.equal(e2, e1)); // similar folded
EXPECT_TRUE(cmp1.equal(e2, e1));
EXPECT_FALSE(cmp1.equal(e2, e3)); // folded compare
EXPECT_FALSE(cmp1.equal(e3, e2)); // folded compare
- auto cmp2 = es.make_folded_comparator("fol", false);
- auto cmp3 = es.make_folded_comparator("fol", true);
+ auto cmp2 = es.make_folded_comparator("fol");
+ auto cmp3 = es.make_folded_comparator_prefix("fol");
EXPECT_FALSE(cmp2.equal(EnumIndex(), e4));
EXPECT_FALSE(cmp2.equal(e4, EnumIndex()));
EXPECT_TRUE(cmp2.equal(EnumIndex(), EnumIndex()));
EXPECT_FALSE(cmp3.equal(EnumIndex(), e4)); // similar when prefix
EXPECT_FALSE(cmp3.equal(e4, EnumIndex())); // similar when prefix
EXPECT_TRUE(cmp3.equal(EnumIndex(), EnumIndex())); // similar when prefix
-
}
}
diff --git a/searchlib/src/tests/attribute/enumstore/enumstore_test.cpp b/searchlib/src/tests/attribute/enumstore/enumstore_test.cpp
index 0f15c288f63..ada39f9d2de 100644
--- a/searchlib/src/tests/attribute/enumstore/enumstore_test.cpp
+++ b/searchlib/src/tests/attribute/enumstore/enumstore_test.cpp
@@ -606,7 +606,7 @@ EnumStoreDictionaryTest<EnumStoreTypeAndDictionaryType>::update_posting_idx(Enum
{
auto& dict = store.get_dictionary();
EntryRef old_posting_idx_check;
- dict.update_posting_list(enum_idx, store.make_comparator(), [&old_posting_idx_check, new_posting_idx](EntryRef posting_idx) noexcept -> EntryRef { old_posting_idx_check = posting_idx; return new_posting_idx; });
+ dict.update_posting_list(enum_idx, store.get_comparator(), [&old_posting_idx_check, new_posting_idx](EntryRef posting_idx) noexcept -> EntryRef { old_posting_idx_check = posting_idx; return new_posting_idx; });
EXPECT_EQ(old_posting_idx, old_posting_idx_check);
}
@@ -700,6 +700,71 @@ TYPED_TEST(EnumStoreDictionaryTest, normalize_posting_lists_works)
EXPECT_EQ(EntryRef(), find_result.second);
}
+namespace {
+
+void inc_generation(generation_t &gen, NumericEnumStore &store)
+{
+ store.freeze_dictionary();
+ store.transfer_hold_lists(gen);
+ ++gen;
+ store.trim_hold_lists(gen);
+}
+
+}
+
+TYPED_TEST(EnumStoreDictionaryTest, compact_worst_works)
+{
+ size_t entry_count = (search::CompactionStrategy::DEAD_BYTES_SLACK / 8) + 40;
+ auto updater = this->store.make_batch_updater();
+ for (int32_t i = 0; (size_t) i < entry_count; ++i) {
+ auto idx = updater.insert(i);
+ if (i < 20) {
+ updater.inc_ref_count(idx);
+ }
+ }
+ updater.commit();
+ generation_t gen = 3;
+ inc_generation(gen, this->store);
+ auto& dict = this->store.get_dictionary();
+ if (dict.get_has_btree_dictionary()) {
+ EXPECT_LT(search::CompactionStrategy::DEAD_BYTES_SLACK, dict.get_btree_memory_usage().deadBytes());
+ }
+ if (dict.get_has_hash_dictionary()) {
+ EXPECT_LT(search::CompactionStrategy::DEAD_BYTES_SLACK, dict.get_hash_memory_usage().deadBytes());
+ }
+ int compact_count = 0;
+ search::CompactionStrategy compaction_strategy;
+ for (uint32_t i = 0; i < 15; ++i) {
+ this->store.update_stat();
+ if (this->store.consider_compact_dictionary(compaction_strategy)) {
+ ++compact_count;
+ } else {
+ break;
+ }
+ EXPECT_FALSE(this->store.consider_compact_dictionary(compaction_strategy));
+ inc_generation(gen, this->store);
+ }
+ EXPECT_LT((TypeParam::type == Type::BTREE_AND_HASH) ? 1 : 0, compact_count);
+ EXPECT_GT(15, compact_count);
+ if (dict.get_has_btree_dictionary()) {
+ EXPECT_GT(search::CompactionStrategy::DEAD_BYTES_SLACK, dict.get_btree_memory_usage().deadBytes());
+ }
+ if (dict.get_has_hash_dictionary()) {
+ EXPECT_GT(search::CompactionStrategy::DEAD_BYTES_SLACK, dict.get_hash_memory_usage().deadBytes());
+ }
+ std::vector<int32_t> exp_values;
+ std::vector<int32_t> values;
+ for (int32_t i = 0; i < 20; ++i) {
+ exp_values.push_back(i);
+ }
+ auto read_snapshot = dict.get_read_snapshot();
+ auto& mystore = this->store;
+ read_snapshot->fill();
+ read_snapshot->sort();
+ read_snapshot->foreach_key([&values, &mystore](EntryRef idx) { values.push_back(mystore.get_value(idx)); });
+ EXPECT_EQ(exp_values, values);
+}
+
#pragma GCC diagnostic pop
}
diff --git a/searchlib/src/tests/attribute/posting_store/CMakeLists.txt b/searchlib/src/tests/attribute/posting_store/CMakeLists.txt
new file mode 100644
index 00000000000..96e6ee5d49b
--- /dev/null
+++ b/searchlib/src/tests/attribute/posting_store/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchlib_posting_store_test_app TEST
+ SOURCES
+ posting_store_test.cpp
+ DEPENDS
+ searchlib
+ GTest::GTest
+)
+vespa_add_test(NAME searchlib_posting_store_test_app COMMAND searchlib_posting_store_test_app COST 30)
diff --git a/searchlib/src/tests/attribute/posting_store/posting_store_test.cpp b/searchlib/src/tests/attribute/posting_store/posting_store_test.cpp
new file mode 100644
index 00000000000..a502dbcaf79
--- /dev/null
+++ b/searchlib/src/tests/attribute/posting_store/posting_store_test.cpp
@@ -0,0 +1,257 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/searchcommon/attribute/config.h>
+#include <vespa/searchcommon/attribute/status.h>
+#include <vespa/searchlib/attribute/postingstore.h>
+#include <vespa/searchlib/attribute/enumstore.hpp>
+#include <vespa/vespalib/btree/btreenodeallocator.hpp>
+#include <vespa/vespalib/btree/btreerootbase.hpp>
+#include <vespa/vespalib/btree/btreeroot.hpp>
+#include <vespa/searchlib/attribute/postingstore.hpp>
+#include <vespa/vespalib/datastore/buffer_type.hpp>
+#include <vespa/vespalib/gtest/gtest.h>
+#include <ostream>
+
+using vespalib::GenerationHandler;
+using vespalib::datastore::EntryRef;
+
+namespace search::attribute {
+
+using MyValueStore = EnumStoreT<int32_t>;
+using MyPostingStore = PostingStore<int32_t>;
+
+namespace {
+
+static constexpr uint32_t lid_limit = 20000;
+static constexpr uint32_t huge_sequence_length = 800;
+
+struct PostingStoreSetup {
+ bool enable_bitvectors;
+ bool enable_only_bitvector;
+ PostingStoreSetup(bool enable_bitvectors_in, bool enable_only_bitvector_in)
+ : enable_bitvectors(enable_bitvectors_in),
+ enable_only_bitvector(enable_only_bitvector_in)
+ {
+ }
+};
+
+std::ostream& operator<<(std::ostream& os, const PostingStoreSetup setup)
+{
+ os << (setup.enable_bitvectors ? "bv" : "nobv") << "_" << (setup.enable_only_bitvector ? "onlybv" : "mixed");
+ return os;
+}
+
+Config make_config(PostingStoreSetup param) {
+ Config cfg;
+ cfg.setEnableBitVectors(param.enable_bitvectors);
+ cfg.setEnableOnlyBitVector(param.enable_only_bitvector);
+ return cfg;
+}
+
+}
+
+class PostingStoreTest : public ::testing::TestWithParam<PostingStoreSetup>
+{
+protected:
+ GenerationHandler _gen_handler;
+ Config _config;
+ Status _status;
+ MyValueStore _value_store;
+ MyPostingStore _store;
+
+ PostingStoreTest();
+ ~PostingStoreTest() override;
+
+ void inc_generation()
+ {
+ _value_store.freeze_dictionary();
+ _store.freeze();
+ _value_store.transfer_hold_lists(_gen_handler.getCurrentGeneration());
+ _store.transferHoldLists(_gen_handler.getCurrentGeneration());
+ _gen_handler.incGeneration();
+ _value_store.trim_hold_lists(_gen_handler.getFirstUsedGeneration());
+ _store.trimHoldLists(_gen_handler.getFirstUsedGeneration());
+ }
+
+ EntryRef add_sequence(int start_key, int end_key)
+ {
+ std::vector<MyPostingStore::KeyDataType> additions;
+ std::vector<MyPostingStore::KeyType> removals;
+ EntryRef root;
+ for (int i = start_key; i < end_key; ++i) {
+ additions.emplace_back(i, 0);
+ }
+ _store.apply(root,
+ &additions[0], &additions[0] + additions.size(),
+ &removals[0], &removals[0] + removals.size());
+ return root;
+ }
+ static std::vector<int> make_exp_sequence(int start_key, int end_key)
+ {
+ std::vector<int> sequence;
+ for (int i = start_key; i < end_key; ++i) {
+ sequence.emplace_back(i);
+ }
+ return sequence;
+ }
+ std::vector<int> get_sequence(EntryRef root) const {
+ std::vector<int> sequence;
+ _store.foreach_frozen_key(root, [&sequence](int key) { sequence.emplace_back(key); });
+ return sequence;
+ }
+
+ void populate(uint32_t sequence_length);
+ EntryRef get_posting_ref(int key);
+ void test_compact_btree_nodes(uint32_t sequence_length);
+ void test_compact_sequence(uint32_t sequence_length);
+};
+
+PostingStoreTest::PostingStoreTest()
+ : _gen_handler(),
+ _config(make_config(GetParam())),
+ _status(),
+ _value_store(true, _config.get_dictionary_config()),
+ _store(_value_store.get_dictionary(), _status, _config)
+{
+ _store.resizeBitVectors(lid_limit, lid_limit);
+}
+
+PostingStoreTest::~PostingStoreTest()
+{
+ _value_store.get_dictionary().clear_all_posting_lists([this](EntryRef posting_idx) { _store.clear(posting_idx); });
+ _store.clearBuilder();
+ inc_generation();
+}
+
+void
+PostingStoreTest::populate(uint32_t sequence_length)
+{
+ auto& store = _store;
+ auto& dictionary = _value_store.get_dictionary();
+ std::vector<EntryRef> refs;
+ for (int i = 0; i < 9000; ++i) {
+ refs.emplace_back(add_sequence(i + 6, i + 6 + sequence_length));
+ }
+ dictionary.update_posting_list(_value_store.insert(1), _value_store.get_comparator(), [this, sequence_length](EntryRef) { return add_sequence(4, 4 + sequence_length); });
+ dictionary.update_posting_list(_value_store.insert(2), _value_store.get_comparator(), [this, sequence_length](EntryRef) { return add_sequence(5, 5 + sequence_length); });
+ for (int i = 9000; i < 11000; ++i) {
+ refs.emplace_back(add_sequence(i + 6, i + 6 + sequence_length));
+ }
+ for (auto& ref : refs) {
+ store.clear(ref);
+ }
+ inc_generation();
+}
+
+EntryRef
+PostingStoreTest::get_posting_ref(int key)
+{
+ auto &dictionary = _value_store.get_dictionary();
+ auto root = dictionary.get_frozen_root();
+ return dictionary.find_posting_list(_value_store.make_comparator(key), root).second;
+}
+
+void
+PostingStoreTest::test_compact_sequence(uint32_t sequence_length)
+{
+ populate(sequence_length);
+ auto &store = _store;
+ EntryRef old_ref1 = get_posting_ref(1);
+ EntryRef old_ref2 = get_posting_ref(2);
+ auto usage_before = store.getMemoryUsage();
+ bool compaction_done = false;
+ search::CompactionStrategy compaction_strategy(0.05, 0.2);
+ for (uint32_t pass = 0; pass < 45; ++pass) {
+ store.update_stat();
+ auto guard = _gen_handler.takeGuard();
+ if (!store.consider_compact_worst_buffers(compaction_strategy)) {
+ compaction_done = true;
+ break;
+ }
+ inc_generation();
+ EXPECT_FALSE(store.consider_compact_worst_buffers(compaction_strategy));
+ guard = GenerationHandler::Guard();
+ inc_generation();
+ }
+ EXPECT_TRUE(compaction_done);
+ EntryRef ref1 = get_posting_ref(1);
+ EntryRef ref2 = get_posting_ref(2);
+ EXPECT_NE(old_ref1, ref1);
+ EXPECT_NE(old_ref2, ref2);
+ EXPECT_EQ(make_exp_sequence(4, 4 + sequence_length), get_sequence(ref1));
+ EXPECT_EQ(make_exp_sequence(5, 5 + sequence_length), get_sequence(ref2));
+ auto usage_after = store.getMemoryUsage();
+ EXPECT_GT(usage_before.deadBytes(), usage_after.deadBytes());
+}
+
+void
+PostingStoreTest::test_compact_btree_nodes(uint32_t sequence_length)
+{
+ populate(sequence_length);
+ auto &store = _store;
+ EntryRef old_ref1 = get_posting_ref(1);
+ EntryRef old_ref2 = get_posting_ref(2);
+ auto usage_before = store.getMemoryUsage();
+ bool compaction_done = false;
+ search::CompactionStrategy compaction_strategy(0.05, 0.2);
+ for (uint32_t pass = 0; pass < 55; ++pass) {
+ store.update_stat();
+ auto guard = _gen_handler.takeGuard();
+ if (!store.consider_compact_worst_btree_nodes(compaction_strategy)) {
+ compaction_done = true;
+ break;
+ }
+ inc_generation();
+ EXPECT_FALSE(store.consider_compact_worst_btree_nodes(compaction_strategy));
+ guard = GenerationHandler::Guard();
+ inc_generation();
+ }
+ EXPECT_TRUE(compaction_done);
+ EntryRef ref1 = get_posting_ref(1);
+ EntryRef ref2 = get_posting_ref(2);
+ EXPECT_EQ(old_ref1, ref1);
+ EXPECT_EQ(old_ref2, ref2);
+ EXPECT_EQ(make_exp_sequence(4, 4 + sequence_length), get_sequence(ref1));
+ EXPECT_EQ(make_exp_sequence(5, 5 + sequence_length), get_sequence(ref2));
+ auto usage_after = store.getMemoryUsage();
+ if (sequence_length < huge_sequence_length ||
+ !_config.getEnableBitVectors() ||
+ !_config.getEnableOnlyBitVector()) {
+ EXPECT_GT(usage_before.deadBytes(), usage_after.deadBytes());
+ } else {
+ EXPECT_EQ(usage_before.deadBytes(), usage_after.deadBytes());
+ }
+}
+
+VESPA_GTEST_INSTANTIATE_TEST_SUITE_P(PostingStoreMultiTest,
+ PostingStoreTest,
+ testing::Values(PostingStoreSetup(false, false), PostingStoreSetup(true, false), PostingStoreSetup(true, true)), testing::PrintToStringParamName());
+
+TEST_P(PostingStoreTest, require_that_nodes_for_multiple_small_btrees_are_compacted)
+{
+ test_compact_btree_nodes(30);
+}
+
+TEST_P(PostingStoreTest, require_that_nodes_for_multiple_large_btrees_are_compacted)
+{
+ test_compact_btree_nodes(huge_sequence_length);
+}
+
+TEST_P(PostingStoreTest, require_that_short_arrays_are_compacted)
+{
+ test_compact_sequence(4);
+}
+
+TEST_P(PostingStoreTest, require_that_btree_roots_are_compacted)
+{
+ test_compact_sequence(10);
+}
+
+TEST_P(PostingStoreTest, require_that_bitvectors_are_compacted)
+{
+ test_compact_sequence(huge_sequence_length);
+}
+
+}
+
+GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/searchlib/src/tests/attribute/stringattribute/stringattribute_test.cpp b/searchlib/src/tests/attribute/stringattribute/stringattribute_test.cpp
index aaae2772687..84ed72d42ca 100644
--- a/searchlib/src/tests/attribute/stringattribute/stringattribute_test.cpp
+++ b/searchlib/src/tests/attribute/stringattribute/stringattribute_test.cpp
@@ -386,6 +386,7 @@ testSingleValue(Attribute & svsa, Config &cfg)
TEST("testSingleValue")
{
EXPECT_EQUAL(24u, sizeof(AttributeVector::SearchContext));
+ EXPECT_EQUAL(24u, sizeof(StringSearchHelper));
EXPECT_EQUAL(56u, sizeof(SingleValueStringAttribute::StringSingleImplSearchContext));
{
Config cfg(BasicType::STRING, CollectionType::SINGLE);
@@ -408,4 +409,89 @@ TEST("testSingleValue")
}
}
+TEST("test uncased match") {
+ QueryTermUCS4 xyz("xyz", QueryTermSimple::Type::WORD);
+ StringSearchHelper helper(xyz, false);
+ EXPECT_FALSE(helper.isCased());
+ EXPECT_FALSE(helper.isPrefix());
+ EXPECT_FALSE(helper.isRegex());
+ EXPECT_FALSE(helper.isMatch("axyz"));
+ EXPECT_FALSE(helper.isMatch("xyza"));
+ EXPECT_TRUE(helper.isMatch("xyz"));
+ EXPECT_TRUE(helper.isMatch("XyZ"));
+ EXPECT_FALSE(helper.isMatch("Xy"));
+}
+
+TEST("test uncased prefix match") {
+ QueryTermUCS4 xyz("xyz", QueryTermSimple::Type::PREFIXTERM);
+ StringSearchHelper helper(xyz, false);
+ EXPECT_FALSE(helper.isCased());
+ EXPECT_TRUE(helper.isPrefix());
+ EXPECT_FALSE(helper.isRegex());
+ EXPECT_FALSE(helper.isMatch("axyz"));
+ EXPECT_TRUE(helper.isMatch("xyza"));
+ EXPECT_TRUE(helper.isMatch("xYza"));
+ EXPECT_TRUE(helper.isMatch("xyz"));
+ EXPECT_TRUE(helper.isMatch("XyZ"));
+ EXPECT_FALSE(helper.isMatch("Xy"));
+}
+
+TEST("test cased match") {
+ QueryTermUCS4 xyz("XyZ", QueryTermSimple::Type::WORD);
+ StringSearchHelper helper(xyz, true);
+ EXPECT_TRUE(helper.isCased());
+ EXPECT_FALSE(helper.isPrefix());
+ EXPECT_FALSE(helper.isRegex());
+ EXPECT_FALSE(helper.isMatch("aXyZ"));
+ EXPECT_FALSE(helper.isMatch("XyZa"));
+ EXPECT_FALSE(helper.isMatch("xyz"));
+ EXPECT_FALSE(helper.isMatch("Xyz"));
+ EXPECT_TRUE(helper.isMatch("XyZ"));
+ EXPECT_FALSE(helper.isMatch("Xy"));
+}
+
+TEST("test cased prefix match") {
+ QueryTermUCS4 xyz("XyZ", QueryTermSimple::Type::PREFIXTERM);
+ StringSearchHelper helper(xyz, true);
+ EXPECT_TRUE(helper.isCased());
+ EXPECT_TRUE(helper.isPrefix());
+ EXPECT_FALSE(helper.isRegex());
+ EXPECT_FALSE(helper.isMatch("aXyZ"));
+ EXPECT_TRUE(helper.isMatch("XyZa"));
+ EXPECT_FALSE(helper.isMatch("xyZa"));
+ EXPECT_FALSE(helper.isMatch("xyz"));
+ EXPECT_FALSE(helper.isMatch("Xyz"));
+ EXPECT_TRUE(helper.isMatch("XyZ"));
+ EXPECT_FALSE(helper.isMatch("Xy"));
+}
+
+TEST("test uncased regex match") {
+ QueryTermUCS4 xyz("x[yY]+Z", QueryTermSimple::Type::REGEXP);
+ StringSearchHelper helper(xyz, false);
+ EXPECT_FALSE(helper.isCased());
+ EXPECT_FALSE(helper.isPrefix());
+ EXPECT_TRUE(helper.isRegex());
+ EXPECT_TRUE(helper.isMatch("axyZ"));
+ EXPECT_TRUE(helper.isMatch("xyZa"));
+ EXPECT_TRUE(helper.isMatch("xyZ"));
+ EXPECT_TRUE(helper.isMatch("xyz"));
+ EXPECT_FALSE(helper.isMatch("xyaZ"));
+ EXPECT_FALSE(helper.isMatch("xy"));
+}
+
+TEST("test cased regex match") {
+ QueryTermUCS4 xyz("x[Y]+Z", QueryTermSimple::Type::REGEXP);
+ StringSearchHelper helper(xyz, true);
+ EXPECT_TRUE(helper.isCased());
+ EXPECT_FALSE(helper.isPrefix());
+ EXPECT_TRUE(helper.isRegex());
+ EXPECT_TRUE(helper.isMatch("axYZ"));
+ EXPECT_TRUE(helper.isMatch("xYZa"));
+ EXPECT_FALSE(helper.isMatch("xyZ"));
+ EXPECT_TRUE(helper.isMatch("xYZ"));
+ EXPECT_FALSE(helper.isMatch("xYz"));
+ EXPECT_FALSE(helper.isMatch("xaYZ"));
+ EXPECT_FALSE(helper.isMatch("xY"));
+}
+
TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp b/searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp
index f1d910f2635..9621b93fd37 100644
--- a/searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp
+++ b/searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp
@@ -230,7 +230,7 @@ public:
const search::tensor::DistanceFunction *distance_function() const override {
- static search::tensor::SquaredEuclideanDistance<double> my_dist_fun;
+ static search::tensor::SquaredEuclideanDistance my_dist_fun(vespalib::eval::CellType::DOUBLE);
return &my_dist_fun;
}
};
diff --git a/searchlib/src/tests/query/streaming_query_test.cpp b/searchlib/src/tests/query/streaming_query_test.cpp
index 5ce34cfcc3f..21d55b485c0 100644
--- a/searchlib/src/tests/query/streaming_query_test.cpp
+++ b/searchlib/src/tests/query/streaming_query_test.cpp
@@ -708,6 +708,24 @@ TEST("require that we do not break the stack on bad query") {
EXPECT_FALSE(term.isValid());
}
+TEST("a unhandled sameElement stack") {
+ const char * stack = "\022\002\026xyz_abcdefghij_xyzxyzxQ\001\vxxxxxx_name\034xxxxxx_xxxx_xxxxxxx_xxxxxxxxE\002\005delta\b<0.00393";
+ vespalib::stringref stackDump(stack);
+ EXPECT_EQUAL(85u, stackDump.size());
+ AllowRewrite empty;
+ const Query q(empty, stackDump);
+ EXPECT_TRUE(q.valid());
+ const QueryNode & root = q.getRoot();
+ auto sameElement = dynamic_cast<const SameElementQueryNode *>(&root);
+ EXPECT_TRUE(sameElement != nullptr);
+ EXPECT_EQUAL(2u, sameElement->size());
+ EXPECT_EQUAL("xyz_abcdefghij_xyzxyzx", sameElement->getIndex());
+ auto term0 = dynamic_cast<const QueryTerm *>((*sameElement)[0].get());
+ EXPECT_TRUE(term0 != nullptr);
+ auto term1 = dynamic_cast<const QueryTerm *>((*sameElement)[1].get());
+ EXPECT_TRUE(term1 != nullptr);
+}
+
namespace {
void verifyQueryTermNode(const vespalib::string & index, const QueryNode *node) {
EXPECT_TRUE(dynamic_cast<const QueryTerm *>(node) != nullptr);
diff --git a/searchlib/src/tests/tensor/distance_functions/distance_functions_test.cpp b/searchlib/src/tests/tensor/distance_functions/distance_functions_test.cpp
index ee0a2aff80e..701f4c91ff2 100644
--- a/searchlib/src/tests/tensor/distance_functions/distance_functions_test.cpp
+++ b/searchlib/src/tests/tensor/distance_functions/distance_functions_test.cpp
@@ -10,10 +10,12 @@
LOG_SETUP("distance_function_test");
using namespace search::tensor;
+using vespalib::eval::Int8Float;
using vespalib::eval::TypedCells;
using search::attribute::DistanceMetric;
-TypedCells t(const std::vector<double> &v) { return TypedCells(v); }
+template <typename T>
+TypedCells t(const std::vector<T> &v) { return TypedCells(v); }
void verify_geo_miles(const DistanceFunction *dist_fun,
const std::vector<double> &p1,
@@ -58,6 +60,31 @@ TEST(DistanceFunctionsTest, euclidean_gives_expected_score)
EXPECT_EQ(threshold, 0.25);
}
+TEST(DistanceFunctionsTest, euclidean_int8_smoketest)
+{
+ auto ct = vespalib::eval::CellType::INT8;
+
+ auto euclid = make_distance_function(DistanceMetric::Euclidean, ct);
+
+ std::vector<double> p00{0.0, 0.0, 0.0};
+ std::vector<Int8Float> p0{0.0, 0.0, 0.0};
+ std::vector<Int8Float> p1{1.0, 0.0, 0.0};
+ std::vector<Int8Float> p5{0.0,-1.0, 0.0};
+ std::vector<Int8Float> p7{-1.0, 2.0, -2.0};
+
+ EXPECT_DOUBLE_EQ(1.0, euclid->calc(t(p0), t(p1)));
+ EXPECT_DOUBLE_EQ(1.0, euclid->calc(t(p0), t(p5)));
+ EXPECT_DOUBLE_EQ(9.0, euclid->calc(t(p0), t(p7)));
+
+ EXPECT_DOUBLE_EQ(2.0, euclid->calc(t(p1), t(p5)));
+ EXPECT_DOUBLE_EQ(12.0, euclid->calc(t(p1), t(p7)));
+ EXPECT_DOUBLE_EQ(14.0, euclid->calc(t(p5), t(p7)));
+
+ EXPECT_DOUBLE_EQ(1.0, euclid->calc(t(p00), t(p1)));
+ EXPECT_DOUBLE_EQ(1.0, euclid->calc(t(p00), t(p5)));
+ EXPECT_DOUBLE_EQ(9.0, euclid->calc(t(p00), t(p7)));
+}
+
TEST(DistanceFunctionsTest, angular_gives_expected_score)
{
auto ct = vespalib::eval::CellType::DOUBLE;
@@ -212,6 +239,11 @@ TEST(DistanceFunctionsTest, hamming_gives_expected_score)
EXPECT_DOUBLE_EQ(threshold, 0.5);
threshold = hamming->convert_threshold(1.0);
EXPECT_DOUBLE_EQ(threshold, 1.0);
+
+ std::vector<Int8Float> bytes_a = { 0, 1, 2, 4, 8, 16, 32, 64, -128, 0, 1, 2, 4, 8, 16, 32, 64, -128, 0, 1, 2 };
+ std::vector<Int8Float> bytes_b = { 1, 2, 2, 4, 8, 16, 32, 65, -128, 0, 1, 0, 4, 8, 16, 32, 64, -128, 0, 1, -1 };
+ // expect diff: 1 2 1 1 7
+ EXPECT_EQ(hamming->calc(TypedCells(bytes_a), TypedCells(bytes_b)), 12.0);
}
TEST(GeoDegreesTest, gives_expected_score)
diff --git a/searchlib/src/tests/tensor/hnsw_index/hnsw_index_test.cpp b/searchlib/src/tests/tensor/hnsw_index/hnsw_index_test.cpp
index 20dc55df329..3f7ec140781 100644
--- a/searchlib/src/tests/tensor/hnsw_index/hnsw_index_test.cpp
+++ b/searchlib/src/tests/tensor/hnsw_index/hnsw_index_test.cpp
@@ -51,7 +51,6 @@ struct LevelGenerator : public RandomLevelGenerator {
};
using FloatVectors = MyDocVectorAccess<float>;
-using FloatSqEuclideanDistance = SquaredEuclideanDistance<float>;
using HnswIndexUP = std::unique_ptr<HnswIndex>;
class HnswIndexTest : public ::testing::Test {
@@ -79,7 +78,7 @@ public:
void init(bool heuristic_select_neighbors) {
auto generator = std::make_unique<LevelGenerator>();
level_generator = generator.get();
- index = std::make_unique<HnswIndex>(vectors, std::make_unique<FloatSqEuclideanDistance>(),
+ index = std::make_unique<HnswIndex>(vectors, std::make_unique<SquaredEuclideanDistance>(vespalib::eval::CellType::FLOAT),
std::move(generator),
HnswIndex::Config(5, 2, 10, 0, heuristic_select_neighbors));
}
diff --git a/searchlib/src/tests/tensor/hnsw_index/stress_hnsw_mt.cpp b/searchlib/src/tests/tensor/hnsw_index/stress_hnsw_mt.cpp
index 8e3bb95a776..154950822ee 100644
--- a/searchlib/src/tests/tensor/hnsw_index/stress_hnsw_mt.cpp
+++ b/searchlib/src/tests/tensor/hnsw_index/stress_hnsw_mt.cpp
@@ -117,7 +117,7 @@ public:
}
};
-using FloatSqEuclideanDistance = SquaredEuclideanDistance<float>;
+using FloatSqEuclideanDistance = SquaredEuclideanDistanceHW<float>;
using HnswIndexUP = std::unique_ptr<HnswIndex>;
class Stressor : public ::testing::Test {
diff --git a/searchlib/src/vespa/searchlib/attribute/configconverter.cpp b/searchlib/src/vespa/searchlib/attribute/configconverter.cpp
index ced5ccfdc85..6387edc588b 100644
--- a/searchlib/src/vespa/searchlib/attribute/configconverter.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/configconverter.cpp
@@ -3,13 +3,11 @@
#include "configconverter.h"
using namespace vespa::config::search;
-using namespace search;
+namespace search::attribute {
namespace {
-using search::attribute::CollectionType;
-using search::attribute::BasicType;
using vespalib::eval::ValueType;
using vespalib::eval::CellType;
@@ -81,9 +79,18 @@ convert_dictionary(const AttributesConfig::Attribute::Dictionary & dictionary) {
return DictionaryConfig(convert(dictionary.type), convert(dictionary.match));
}
+Config::Match
+convertMatch(AttributesConfig::Attribute::Match match_cfg) {
+ switch (match_cfg) {
+ case AttributesConfig::Attribute::Match::CASED:
+ return Config::Match::CASED;
+ case AttributesConfig::Attribute::Match::UNCASED:
+ return Config::Match::UNCASED;
+ }
+ assert(false);
}
-namespace search::attribute {
+}
Config
ConfigConverter::convert(const AttributesConfig::Attribute & cfg)
@@ -106,6 +113,7 @@ ConfigConverter::convert(const AttributesConfig::Attribute & cfg)
predicateParams.setDensePostingListThreshold(cfg.densepostinglistthreshold);
retval.setPredicateParams(predicateParams);
retval.set_dictionary_config(convert_dictionary(cfg.dictionary));
+ retval.set_match(convertMatch(cfg.match));
using CfgDm = AttributesConfig::Attribute::Distancemetric;
DistanceMetric dm(DistanceMetric::Euclidean);
switch (cfg.distancemetric) {
diff --git a/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.cpp b/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.cpp
index 8bb4e3fc47c..d867ae9f211 100644
--- a/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.cpp
@@ -259,7 +259,9 @@ EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::update_posting_list(Inde
assert(itr.valid() && itr.getKey() == idx);
EntryRef old_posting_idx(itr.getData());
EntryRef new_posting_idx = updater(old_posting_idx);
- dict.thaw(itr);
+ // Note: Needs review when porting to other platforms
+ // Assumes that other CPUs observes stores from this CPU in order
+ std::atomic_thread_fence(std::memory_order_release);
itr.writeData(new_posting_idx.ref());
if constexpr (has_hash_dictionary) {
auto find_result = this->_hash_dict.find(this->_hash_dict.get_default_comparator(), idx);
@@ -295,7 +297,9 @@ EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::normalize_posting_lists(
EntryRef new_posting_idx = normalize(old_posting_idx);
if (new_posting_idx != old_posting_idx) {
changed = true;
- dict.thaw(itr);
+ // Note: Needs review when porting to other platforms
+ // Assumes that other CPUs observes stores from this CPU in order
+ std::atomic_thread_fence(std::memory_order_release);
itr.writeData(new_posting_idx.ref());
if constexpr (has_hash_dictionary) {
auto find_result = this->_hash_dict.find(this->_hash_dict.get_default_comparator(), itr.getKey());
diff --git a/searchlib/src/vespa/searchlib/attribute/enumcomparator.cpp b/searchlib/src/vespa/searchlib/attribute/enumcomparator.cpp
index a428ac77d87..74eae51729f 100644
--- a/searchlib/src/vespa/searchlib/attribute/enumcomparator.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/enumcomparator.cpp
@@ -5,17 +5,10 @@
namespace search {
-namespace {
-
-FoldedStringCompare _strCmp;
-
-}
-
template <typename EntryT>
-EnumStoreComparator<EntryT>::EnumStoreComparator(const DataStoreType& data_store, const EntryT& fallback_value, bool prefix)
+EnumStoreComparator<EntryT>::EnumStoreComparator(const DataStoreType& data_store, const EntryT& fallback_value)
: ParentType(data_store, fallback_value)
{
- (void) prefix;
}
template <typename EntryT>
@@ -31,52 +24,50 @@ EnumStoreComparator<EntryT>::equal_helper(const EntryT& lhs, const EntryT& rhs)
return vespalib::datastore::UniqueStoreComparatorHelper<EntryT>::equal(lhs, rhs);
}
-EnumStoreStringComparator::EnumStoreStringComparator(const DataStoreType& data_store)
- : ParentType(data_store, nullptr)
-{
-}
-
-EnumStoreStringComparator::EnumStoreStringComparator(const DataStoreType& data_store, const char* fallback_value)
- : ParentType(data_store, fallback_value)
+EnumStoreStringComparator::EnumStoreStringComparator(const DataStoreType& data_store, bool fold)
+ : ParentType(data_store, nullptr),
+ _fold(fold),
+ _prefix(false),
+ _prefix_len(0)
{
}
-EnumStoreFoldedStringComparator::EnumStoreFoldedStringComparator(const DataStoreType& data_store, bool prefix)
- : ParentType(data_store, nullptr),
- _prefix(prefix),
- _prefix_len(0u)
+EnumStoreStringComparator::EnumStoreStringComparator(const DataStoreType& data_store, bool fold, const char* fallback_value)
+ : ParentType(data_store, fallback_value),
+ _fold(fold),
+ _prefix(false),
+ _prefix_len(0)
{
}
-EnumStoreFoldedStringComparator::EnumStoreFoldedStringComparator(const DataStoreType& data_store,
- const char* fallback_value, bool prefix)
+EnumStoreStringComparator::EnumStoreStringComparator(const DataStoreType& data_store, bool fold, const char* fallback_value, bool prefix)
: ParentType(data_store, fallback_value),
+ _fold(fold),
_prefix(prefix),
- _prefix_len(0u)
+ _prefix_len(0)
{
if (use_prefix()) {
- _prefix_len = _strCmp.size(fallback_value);
+ _prefix_len = FoldedStringCompare::size(fallback_value);
}
}
-int
-EnumStoreStringComparator::compare(const char* lhs, const char* rhs)
-{
- return _strCmp.compare(lhs, rhs);
-}
+bool
+EnumStoreStringComparator::less(const vespalib::datastore::EntryRef lhs, const vespalib::datastore::EntryRef rhs) const {
+ return _fold
+ ? (use_prefix()
+ ? (FoldedStringCompare::compareFoldedPrefix(get(lhs), get(rhs), _prefix_len) < 0)
+ : (FoldedStringCompare::compareFolded(get(lhs), get(rhs)) < 0))
+ : (use_prefix()
+ ? (FoldedStringCompare::comparePrefix(get(lhs), get(rhs), _prefix_len) < 0)
+ : (FoldedStringCompare::compare(get(lhs), get(rhs)) < 0));
-int
-EnumStoreFoldedStringComparator::compare_folded(const char* lhs, const char* rhs)
-{
- return _strCmp.compareFolded(lhs, rhs);
}
-int
-EnumStoreFoldedStringComparator::compare_folded_prefix(const char* lhs,
- const char* rhs,
- size_t prefix_len)
-{
- return _strCmp.compareFoldedPrefix(lhs, rhs, prefix_len);
+bool
+EnumStoreStringComparator::equal(const vespalib::datastore::EntryRef lhs, const vespalib::datastore::EntryRef rhs) const {
+ return _fold
+ ? (FoldedStringCompare::compareFolded(get(lhs), get(rhs)) == 0)
+ : (FoldedStringCompare::compare(get(lhs), get(rhs)) == 0);
}
template class EnumStoreComparator<int8_t>;
diff --git a/searchlib/src/vespa/searchlib/attribute/enumcomparator.h b/searchlib/src/vespa/searchlib/attribute/enumcomparator.h
index 0215053ba3a..e822e08cfdc 100644
--- a/searchlib/src/vespa/searchlib/attribute/enumcomparator.h
+++ b/searchlib/src/vespa/searchlib/attribute/enumcomparator.h
@@ -18,7 +18,7 @@ public:
using ParentType = vespalib::datastore::UniqueStoreComparator<EntryT, IEnumStore::InternalIndex>;
using DataStoreType = typename ParentType::DataStoreType;
- EnumStoreComparator(const DataStoreType& data_store, const EntryT& fallback_value, bool prefix = false);
+ EnumStoreComparator(const DataStoreType& data_store, const EntryT& fallback_value);
EnumStoreComparator(const DataStoreType& data_store);
static bool equal_helper(const EntryT& lhs, const EntryT& rhs);
@@ -34,79 +34,32 @@ class EnumStoreStringComparator : public vespalib::datastore::UniqueStoreStringC
protected:
using ParentType = vespalib::datastore::UniqueStoreStringComparator<IEnumStore::InternalIndex>;
using DataStoreType = ParentType::DataStoreType;
+private:
using ParentType::get;
- static int compare(const char* lhs, const char* rhs);
-
public:
- EnumStoreStringComparator(const DataStoreType& data_store);
+ EnumStoreStringComparator(const DataStoreType& data_store)
+ : EnumStoreStringComparator(data_store, false)
+ {}
+ EnumStoreStringComparator(const DataStoreType& data_store, bool fold);
/**
* Creates a comparator using the given low-level data store and that uses the
* given value during compare if the enum index is invalid.
*/
- EnumStoreStringComparator(const DataStoreType& data_store, const char* fallback_value);
-
- static bool equal(const char* lhs, const char* rhs) {
- return compare(lhs, rhs) == 0;
- }
-
- bool less(const vespalib::datastore::EntryRef lhs, const vespalib::datastore::EntryRef rhs) const override {
- return compare(get(lhs), get(rhs)) < 0;
- }
- bool equal(const vespalib::datastore::EntryRef lhs, const vespalib::datastore::EntryRef rhs) const override {
- return compare(get(lhs), get(rhs)) == 0;
- }
-};
-
-
-/**
- * Less-than comparator used for folded-only comparing strings stored in an enum store.
- *
- * The input string values are first folded, then compared.
- * There is NO fallback if they are equal.
- */
-class EnumStoreFoldedStringComparator : public EnumStoreStringComparator {
+ EnumStoreStringComparator(const DataStoreType& data_store, const char* fallback_value)
+ : EnumStoreStringComparator(data_store, false, fallback_value)
+ {}
+ EnumStoreStringComparator(const DataStoreType& data_store, bool fold, const char* fallback_value);
+ EnumStoreStringComparator(const DataStoreType& data_store, bool fold, const char* fallback_value, bool prefix);
+
+ bool less(const vespalib::datastore::EntryRef lhs, const vespalib::datastore::EntryRef rhs) const override;
+ bool equal(const vespalib::datastore::EntryRef lhs, const vespalib::datastore::EntryRef rhs) const override;
private:
- using ParentType = EnumStoreStringComparator;
-
- bool _prefix;
- size_t _prefix_len;
-
inline bool use_prefix() const { return _prefix; }
- static int compare_folded(const char* lhs, const char* rhs);
- static int compare_folded_prefix(const char* lhs, const char* rhs, size_t prefix_len);
-
-public:
- /**
- * Creates a comparator using the given low-level data store.
- *
- * @param prefix whether we should perform prefix compare.
- */
- EnumStoreFoldedStringComparator(const DataStoreType& data_store, bool prefix = false);
-
- /**
- * Creates a comparator using the given low-level data store and that uses the
- * given value during compare if the enum index is invalid.
- *
- * @param prefix whether we should perform prefix compare.
- */
- EnumStoreFoldedStringComparator(const DataStoreType& data_store,
- const char* fallback_value, bool prefix = false);
-
- static bool equal(const char* lhs, const char* rhs) {
- return compare_folded(lhs, rhs) == 0;
- }
-
- bool less(const vespalib::datastore::EntryRef lhs, const vespalib::datastore::EntryRef rhs) const override {
- if (use_prefix()) {
- return compare_folded_prefix(get(lhs), get(rhs), _prefix_len) < 0;
- }
- return compare_folded(get(lhs), get(rhs)) < 0;
- }
- bool equal(const vespalib::datastore::EntryRef lhs, const vespalib::datastore::EntryRef rhs) const override {
- return compare_folded(get(lhs), get(rhs)) == 0;
- }
+ const bool _fold;
+ const bool _prefix;
+ uint32_t _prefix_len;
};
extern template class EnumStoreComparator<int8_t>;
diff --git a/searchlib/src/vespa/searchlib/attribute/enumstore.cpp b/searchlib/src/vespa/searchlib/attribute/enumstore.cpp
index 3a008557676..b07515a675e 100644
--- a/searchlib/src/vespa/searchlib/attribute/enumstore.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/enumstore.cpp
@@ -21,9 +21,7 @@ EnumStoreT<const char*>::write_value(BufferWriter& writer, Index idx) const
template <>
ssize_t
-EnumStoreT<const char*>::load_unique_value(const void* src,
- size_t available,
- Index& idx)
+EnumStoreT<const char*>::load_unique_value(const void* src, size_t available, Index& idx)
{
const char* value = static_cast<const char*>(src);
size_t slen = strlen(value);
@@ -42,9 +40,9 @@ EnumStoreT<const char*>::load_unique_value(const void* src,
}
std::unique_ptr<vespalib::datastore::IUniqueStoreDictionary>
-make_enum_store_dictionary(IEnumStore &store, bool has_postings, const search::DictionaryConfig & dict_cfg,
- std::unique_ptr<vespalib::datastore::EntryComparator> compare,
- std::unique_ptr<vespalib::datastore::EntryComparator> folded_compare)
+make_enum_store_dictionary(IEnumStore &store, bool has_postings, const DictionaryConfig & dict_cfg,
+ std::unique_ptr<EntryComparator> compare,
+ std::unique_ptr<EntryComparator> folded_compare)
{
using NoBTreeDictionary = vespalib::datastore::NoBTreeDictionary;
using ShardedHashMap = vespalib::datastore::ShardedHashMap;
@@ -53,9 +51,9 @@ make_enum_store_dictionary(IEnumStore &store, bool has_postings, const search::D
return std::make_unique<EnumStoreFoldedDictionary>(store, std::move(compare), std::move(folded_compare));
} else {
switch (dict_cfg.getType()) {
- case search::DictionaryConfig::Type::HASH:
+ case DictionaryConfig::Type::HASH:
return std::make_unique<EnumStoreDictionary<NoBTreeDictionary, ShardedHashMap>>(store, std::move(compare));
- case search::DictionaryConfig::Type::BTREE_AND_HASH:
+ case DictionaryConfig::Type::BTREE_AND_HASH:
return std::make_unique<EnumStoreDictionary<EnumPostingTree, ShardedHashMap>>(store, std::move(compare));
default:
return std::make_unique<EnumStoreDictionary<EnumPostingTree>>(store, std::move(compare));
diff --git a/searchlib/src/vespa/searchlib/attribute/enumstore.h b/searchlib/src/vespa/searchlib/attribute/enumstore.h
index 4f5454c15b2..326e0916039 100644
--- a/searchlib/src/vespa/searchlib/attribute/enumstore.h
+++ b/searchlib/src/vespa/searchlib/attribute/enumstore.h
@@ -8,7 +8,6 @@
#include "i_enum_store.h"
#include "loadedenumvalue.h"
#include <vespa/searchcommon/common/dictionary_config.h>
-#include <vespa/searchlib/util/foldedstringcompare.h>
#include <vespa/vespalib/btree/btreenode.h>
#include <vespa/vespalib/btree/btreenodeallocator.h>
#include <vespa/vespalib/btree/btree.h>
@@ -43,19 +42,23 @@ public:
vespalib::datastore::UniqueStoreStringAllocator<InternalIndex>,
vespalib::datastore::UniqueStoreAllocator<EntryT, InternalIndex>>;
using UniqueStoreType = vespalib::datastore::UniqueStore<EntryT, InternalIndex, ComparatorType, AllocatorType>;
- using FoldedComparatorType = std::conditional_t<std::is_same_v<EntryT, const char *>,
- EnumStoreFoldedStringComparator,
- ComparatorType>;
+
using EntryType = EntryT;
using EnumStoreType = EnumStoreT<EntryT>;
using EntryRef = vespalib::datastore::EntryRef;
+ using EntryComparator = vespalib::datastore::EntryComparator;
using generation_t = vespalib::GenerationHandler::generation_t;
private:
- UniqueStoreType _store;
- IEnumStoreDictionary* _dict;
- vespalib::MemoryUsage _cached_values_memory_usage;
+ UniqueStoreType _store;
+ IEnumStoreDictionary* _dict;
+ bool _is_folded;
+ ComparatorType _comparator;
+ ComparatorType _foldedComparator;
+ vespalib::MemoryUsage _cached_values_memory_usage;
vespalib::AddressSpace _cached_values_address_space_usage;
+ vespalib::MemoryUsage _cached_dictionary_btree_usage;
+ vespalib::MemoryUsage _cached_dictionary_hash_usage;
EnumStoreT(const EnumStoreT & rhs) = delete;
EnumStoreT & operator=(const EnumStoreT & rhs) = delete;
@@ -73,6 +76,8 @@ private:
ssize_t load_unique_values_internal(const void* src, size_t available, IndexVector& idx);
ssize_t load_unique_value(const void* src, size_t available, Index& idx);
+ std::unique_ptr<EntryComparator> allocate_optionally_folded_comparator(bool folded) const;
+ ComparatorType make_optionally_folded_comparator(bool folded) const;
public:
EnumStoreT(bool has_postings, const search::DictionaryConfig & dict_cfg);
~EnumStoreT() override;
@@ -86,6 +91,7 @@ public:
}
uint32_t get_num_uniques() const override { return _dict->get_num_uniques(); }
+ bool is_folded() const { return _is_folded;}
vespalib::MemoryUsage get_values_memory_usage() const override { return _store.get_allocator().get_data_store().getMemoryUsage(); }
vespalib::MemoryUsage get_dictionary_memory_usage() const override { return _dict->get_memory_usage(); }
@@ -174,33 +180,29 @@ public:
return BatchUpdater(*this);
}
- ComparatorType make_comparator() const {
- return ComparatorType(_store.get_data_store());
+ const EntryComparator & get_comparator() const {
+ return _comparator;
}
ComparatorType make_comparator(const EntryType& fallback_value) const {
return ComparatorType(_store.get_data_store(), fallback_value);
}
- FoldedComparatorType make_folded_comparator() const {
- return FoldedComparatorType(_store.get_data_store());
- }
-
- FoldedComparatorType make_folded_comparator(const EntryType& fallback_value, bool prefix = false) const {
- return FoldedComparatorType(_store.get_data_store(), fallback_value, prefix);
+ const EntryComparator & get_folded_comparator() const {
+ return _foldedComparator;
}
void write_value(BufferWriter& writer, Index idx) const override;
bool is_folded_change(Index idx1, Index idx2) const override;
bool find_enum(EntryType value, IEnumStore::EnumHandle& e) const;
- std::vector<IEnumStore::EnumHandle> find_folded_enums(EntryType value) const;
Index insert(EntryType value);
bool find_index(EntryType value, Index& idx) const;
void free_unused_values() override;
void free_unused_values(const IndexSet& to_remove);
vespalib::MemoryUsage update_stat() override;
- std::unique_ptr<EnumIndexRemapper> consider_compact(const CompactionStrategy& compaction_strategy) override;
- std::unique_ptr<EnumIndexRemapper> compact_worst(bool compact_memory, bool compact_address_space) override;
+ std::unique_ptr<EnumIndexRemapper> consider_compact_values(const CompactionStrategy& compaction_strategy) override;
+ std::unique_ptr<EnumIndexRemapper> compact_worst_values(bool compact_memory, bool compact_address_space) override;
+ bool consider_compact_dictionary(const CompactionStrategy& compaction_strategy) override;
uint64_t get_compaction_count() const override {
return _store.get_data_store().get_compaction_count();
}
@@ -208,14 +210,26 @@ public:
_store.get_allocator().get_data_store().inc_compaction_count();
}
std::unique_ptr<Enumerator> make_enumerator() const override;
- std::unique_ptr<vespalib::datastore::EntryComparator> allocate_comparator() const override;
-};
-
-std::unique_ptr<vespalib::datastore::IUniqueStoreDictionary>
-make_enum_store_dictionary(IEnumStore &store, bool has_postings, const search::DictionaryConfig & dict_cfg,
- std::unique_ptr<vespalib::datastore::EntryComparator> compare,
- std::unique_ptr<vespalib::datastore::EntryComparator> folded_compare);
+ std::unique_ptr<EntryComparator> allocate_comparator() const override;
+ // Methods below are only relevant for strings, and are templated to only be instantiated on demand.
+ template <typename Type>
+ ComparatorType
+ make_folded_comparator(const Type& fallback_value) const {
+ return ComparatorType(_store.get_data_store(), is_folded(), fallback_value);
+ }
+ template<typename Type>
+ ComparatorType
+ make_folded_comparator_prefix(const Type& fallback_value) const {
+ return ComparatorType(_store.get_data_store(), is_folded(), fallback_value, true);
+ }
+ template<typename Type>
+ std::vector<IEnumStore::EnumHandle>
+ find_folded_enums(Type value) const {
+ auto cmp = make_folded_comparator(value);
+ return _dict->find_matching_enums(cmp);
+ }
+};
template <>
void
@@ -223,9 +237,7 @@ EnumStoreT<const char*>::write_value(BufferWriter& writer, Index idx) const;
template <>
ssize_t
-EnumStoreT<const char*>::load_unique_value(const void* src,
- size_t available,
- Index& idx);
+EnumStoreT<const char*>::load_unique_value(const void* src, size_t available, Index& idx);
}
diff --git a/searchlib/src/vespa/searchlib/attribute/enumstore.hpp b/searchlib/src/vespa/searchlib/attribute/enumstore.hpp
index 99d9a7fdfee..90bcf92a103 100644
--- a/searchlib/src/vespa/searchlib/attribute/enumstore.hpp
+++ b/searchlib/src/vespa/searchlib/attribute/enumstore.hpp
@@ -22,6 +22,13 @@
namespace search {
+using vespalib::datastore::EntryComparator;
+
+std::unique_ptr<vespalib::datastore::IUniqueStoreDictionary>
+make_enum_store_dictionary(IEnumStore &store, bool has_postings, const search::DictionaryConfig & dict_cfg,
+ std::unique_ptr<EntryComparator> compare,
+ std::unique_ptr<EntryComparator> folded_compare);
+
template <typename EntryT>
void EnumStoreT<EntryT>::free_value_if_unused(Index idx, IndexSet& unused)
{
@@ -34,9 +41,7 @@ void EnumStoreT<EntryT>::free_value_if_unused(Index idx, IndexSet& unused)
template <typename EntryT>
ssize_t
-EnumStoreT<EntryT>::load_unique_values_internal(const void* src,
- size_t available,
- IndexVector& idx)
+EnumStoreT<EntryT>::load_unique_values_internal(const void* src, size_t available, IndexVector& idx)
{
size_t left = available;
const char* p = static_cast<const char*>(src);
@@ -66,17 +71,18 @@ EnumStoreT<EntryT>::load_unique_value(const void* src, size_t available, Index&
}
template <typename EntryT>
-EnumStoreT<EntryT>::EnumStoreT(bool has_postings, const search::DictionaryConfig & dict_cfg)
+EnumStoreT<EntryT>::EnumStoreT(bool has_postings, const DictionaryConfig & dict_cfg)
: _store(),
_dict(),
+ _is_folded(dict_cfg.getMatch() == DictionaryConfig::Match::UNCASED),
+ _comparator(_store.get_data_store()),
+ _foldedComparator(make_optionally_folded_comparator(is_folded())),
_cached_values_memory_usage(),
_cached_values_address_space_usage(0, 0, (1ull << 32))
{
_store.set_dictionary(make_enum_store_dictionary(*this, has_postings, dict_cfg,
- std::make_unique<ComparatorType>(_store.get_data_store()),
- (has_string_type() ?
- std::make_unique<FoldedComparatorType>(_store.get_data_store()) :
- std::unique_ptr<vespalib::datastore::EntryComparator>())));
+ allocate_comparator(),
+ allocate_optionally_folded_comparator(is_folded())));
_dict = static_cast<IEnumStoreDictionary*>(&_store.get_dictionary());
}
@@ -150,7 +156,7 @@ template <class EntryT>
bool
EnumStoreT<EntryT>::is_folded_change(Index idx1, Index idx2) const
{
- auto cmp = make_folded_comparator();
+ const auto & cmp = get_folded_comparator();
assert(!cmp.less(idx2, idx1));
return cmp.less(idx1, idx2);
}
@@ -169,14 +175,6 @@ EnumStoreT<EntryT>::find_enum(EntryType value, IEnumStore::EnumHandle& e) const
}
template <typename EntryT>
-std::vector<IEnumStore::EnumHandle>
-EnumStoreT<EntryT>::find_folded_enums(EntryType value) const
-{
- auto cmp = make_folded_comparator(value);
- return _dict->find_matching_enums(cmp);
-}
-
-template <typename EntryT>
bool
EnumStoreT<EntryT>::find_index(EntryType value, Index& idx) const
{
@@ -188,16 +186,14 @@ template <typename EntryT>
void
EnumStoreT<EntryT>::free_unused_values()
{
- auto cmp = make_comparator();
- _dict->free_unused_values(cmp);
+ _dict->free_unused_values(get_comparator());
}
template <typename EntryT>
void
EnumStoreT<EntryT>::free_unused_values(const IndexSet& to_remove)
{
- auto cmp = make_comparator();
- _dict->free_unused_values(to_remove, cmp);
+ _dict->free_unused_values(to_remove, get_comparator());
}
template <typename EntryT>
@@ -214,14 +210,17 @@ EnumStoreT<EntryT>::update_stat()
auto &store = _store.get_allocator().get_data_store();
_cached_values_memory_usage = store.getMemoryUsage();
_cached_values_address_space_usage = store.getAddressSpaceUsage();
+ _cached_dictionary_btree_usage = _dict->get_btree_memory_usage();
+ _cached_dictionary_hash_usage = _dict->get_hash_memory_usage();
auto retval = _cached_values_memory_usage;
- retval.merge(_dict->get_memory_usage());
+ retval.merge(_cached_dictionary_btree_usage);
+ retval.merge(_cached_dictionary_hash_usage);
return retval;
}
template <typename EntryT>
std::unique_ptr<IEnumStore::EnumIndexRemapper>
-EnumStoreT<EntryT>::consider_compact(const CompactionStrategy& compaction_strategy)
+EnumStoreT<EntryT>::consider_compact_values(const CompactionStrategy& compaction_strategy)
{
size_t used_bytes = _cached_values_memory_usage.usedBytes();
size_t dead_bytes = _cached_values_memory_usage.deadBytes();
@@ -230,19 +229,41 @@ EnumStoreT<EntryT>::consider_compact(const CompactionStrategy& compaction_strate
bool compact_memory = compaction_strategy.should_compact_memory(used_bytes, dead_bytes);
bool compact_address_space = compaction_strategy.should_compact_address_space(used_address_space, dead_address_space);
if (compact_memory || compact_address_space) {
- return compact_worst(compact_memory, compact_address_space);
+ return compact_worst_values(compact_memory, compact_address_space);
}
return std::unique_ptr<IEnumStore::EnumIndexRemapper>();
}
template <typename EntryT>
std::unique_ptr<IEnumStore::EnumIndexRemapper>
-EnumStoreT<EntryT>::compact_worst(bool compact_memory, bool compact_address_space)
+EnumStoreT<EntryT>::compact_worst_values(bool compact_memory, bool compact_address_space)
{
return _store.compact_worst(compact_memory, compact_address_space);
}
template <typename EntryT>
+bool
+EnumStoreT<EntryT>::consider_compact_dictionary(const CompactionStrategy& compaction_strategy)
+{
+ if (_dict->has_held_buffers()) {
+ return false;
+ }
+ if (compaction_strategy.should_compact_memory(_cached_dictionary_btree_usage.usedBytes(),
+ _cached_dictionary_btree_usage.deadBytes()))
+ {
+ _dict->compact_worst(true, false);
+ return true;
+ }
+ if (compaction_strategy.should_compact_memory(_cached_dictionary_hash_usage.usedBytes(),
+ _cached_dictionary_hash_usage.deadBytes()))
+ {
+ _dict->compact_worst(false, true);
+ return true;
+ }
+ return false;
+}
+
+template <typename EntryT>
std::unique_ptr<IEnumStore::Enumerator>
EnumStoreT<EntryT>::make_enumerator() const
{
@@ -250,10 +271,28 @@ EnumStoreT<EntryT>::make_enumerator() const
}
template <typename EntryT>
-std::unique_ptr<vespalib::datastore::EntryComparator>
+std::unique_ptr<EntryComparator>
EnumStoreT<EntryT>::allocate_comparator() const
{
return std::make_unique<ComparatorType>(_store.get_data_store());
}
+template <typename EntryT>
+std::unique_ptr<EntryComparator>
+EnumStoreT<EntryT>::allocate_optionally_folded_comparator(bool folded) const
+{
+ return (has_string_type() && folded)
+ ? std::make_unique<ComparatorType>(_store.get_data_store(), true)
+ : std::unique_ptr<EntryComparator>();
+}
+
+template <typename EntryT>
+typename EnumStoreT<EntryT>::ComparatorType
+EnumStoreT<EntryT>::make_optionally_folded_comparator(bool folded) const
+{
+ return (has_string_type() && folded)
+ ? ComparatorType(_store.get_data_store(), true)
+ : ComparatorType(_store.get_data_store());
+}
+
}
diff --git a/searchlib/src/vespa/searchlib/attribute/fixedsourceselector.cpp b/searchlib/src/vespa/searchlib/attribute/fixedsourceselector.cpp
index 9f01ce0cf9f..db3df4648c7 100644
--- a/searchlib/src/vespa/searchlib/attribute/fixedsourceselector.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/fixedsourceselector.cpp
@@ -35,7 +35,7 @@ FixedSourceSelector::Iterator::Iterator(const FixedSourceSelector & sourceSelect
FixedSourceSelector::FixedSourceSelector(queryeval::Source defaultSource,
const vespalib::string & attrBaseFileName,
uint32_t initialNumDocs) :
- SourceSelector(defaultSource, AttributeVector::SP(new SourceStore(attrBaseFileName, getConfig()))),
+ SourceSelector(defaultSource, std::make_shared<SourceStore>(attrBaseFileName, getConfig())),
_source(static_cast<SourceStore &>(*_realSource))
{
if (initialNumDocs != std::numeric_limits<uint32_t>::max()) {
@@ -44,16 +44,13 @@ FixedSourceSelector::FixedSourceSelector(queryeval::Source defaultSource,
}
}
-FixedSourceSelector::~FixedSourceSelector()
-{
-}
+FixedSourceSelector::~FixedSourceSelector() = default;
FixedSourceSelector::UP
-FixedSourceSelector::cloneAndSubtract(const vespalib::string & attrBaseFileName,
- uint32_t diff)
+FixedSourceSelector::cloneAndSubtract(const vespalib::string & attrBaseFileName, uint32_t diff)
{
queryeval::Source newDefault = getNewSource(getDefaultSource(), diff);
- FixedSourceSelector::UP selector(new FixedSourceSelector(newDefault, attrBaseFileName, _source.getNumDocs()-1));
+ auto selector = std::make_unique< FixedSourceSelector>(newDefault, attrBaseFileName, _source.getNumDocs()-1);
for (uint32_t docId = 0; docId < _source.getNumDocs(); ++docId) {
queryeval::Source src = _source.get(docId);
src = getNewSource(src, diff);
@@ -78,10 +75,9 @@ FixedSourceSelector::load(const vespalib::string & baseFileName, uint32_t curren
(uint32_t) info->header()._defaultSource, defaultSource,
baseFileName.c_str());
}
- FixedSourceSelector::UP selector(new FixedSourceSelector(
- defaultSource,
- info->header()._baseFileName,
- std::numeric_limits<uint32_t>::max()));
+ auto selector = std::make_unique<FixedSourceSelector>(defaultSource,
+ info->header()._baseFileName,
+ std::numeric_limits<uint32_t>::max());
selector->setBaseId(info->header()._baseId);
selector->_source.load();
uint32_t cappedSources = capSelector(selector->_source, selector->getDefaultSource());
diff --git a/searchlib/src/vespa/searchlib/attribute/fixedsourceselector.h b/searchlib/src/vespa/searchlib/attribute/fixedsourceselector.h
index 055d216dda1..9cffe5b001b 100644
--- a/searchlib/src/vespa/searchlib/attribute/fixedsourceselector.h
+++ b/searchlib/src/vespa/searchlib/attribute/fixedsourceselector.h
@@ -30,7 +30,7 @@ public:
FixedSourceSelector(queryeval::Source defaultSource,
const vespalib::string & attrBaseFileName,
uint32_t initialNumDocs = 0);
- ~FixedSourceSelector();
+ ~FixedSourceSelector() override;
FixedSourceSelector::UP cloneAndSubtract(const vespalib::string & attrBaseFileName, uint32_t diff);
static FixedSourceSelector::UP load(const vespalib::string & baseFileName, uint32_t currentId);
diff --git a/searchlib/src/vespa/searchlib/attribute/i_enum_store.h b/searchlib/src/vespa/searchlib/attribute/i_enum_store.h
index 55cd4f88c25..6d714ec25ba 100644
--- a/searchlib/src/vespa/searchlib/attribute/i_enum_store.h
+++ b/searchlib/src/vespa/searchlib/attribute/i_enum_store.h
@@ -64,8 +64,9 @@ public:
virtual vespalib::MemoryUsage get_values_memory_usage() const = 0;
virtual vespalib::MemoryUsage get_dictionary_memory_usage() const = 0;
virtual vespalib::MemoryUsage update_stat() = 0;
- virtual std::unique_ptr<EnumIndexRemapper> consider_compact(const CompactionStrategy& compaction_strategy) = 0;
- virtual std::unique_ptr<EnumIndexRemapper> compact_worst(bool compact_memory, bool compact_address_space) = 0;
+ virtual std::unique_ptr<EnumIndexRemapper> consider_compact_values(const CompactionStrategy& compaction_strategy) = 0;
+ virtual std::unique_ptr<EnumIndexRemapper> compact_worst_values(bool compact_memory, bool compact_address_space) = 0;
+ virtual bool consider_compact_dictionary(const CompactionStrategy& compaction_strategy) = 0;
virtual uint64_t get_compaction_count() const = 0;
// Should only be used by unit tests.
virtual void inc_compaction_count() = 0;
diff --git a/searchlib/src/vespa/searchlib/attribute/ipostinglistattributebase.h b/searchlib/src/vespa/searchlib/attribute/ipostinglistattributebase.h
index 4876f43cd5d..3936ed381be 100644
--- a/searchlib/src/vespa/searchlib/attribute/ipostinglistattributebase.h
+++ b/searchlib/src/vespa/searchlib/attribute/ipostinglistattributebase.h
@@ -4,6 +4,8 @@
#include <vespa/searchcommon/attribute/iattributevector.h>
+namespace search { class CompactionStrategy; }
+
namespace vespalib { class MemoryUsage; }
namespace search::attribute {
@@ -23,6 +25,8 @@ public:
virtual void forwardedShrinkLidSpace(uint32_t newSize) = 0;
virtual vespalib::MemoryUsage getMemoryUsage() const = 0;
+ virtual bool consider_compact_worst_btree_nodes(const CompactionStrategy& compaction_strategy) = 0;
+ virtual bool consider_compact_worst_buffers(const CompactionStrategy& compaction_strategy) = 0;
};
} // namespace search::attribute
diff --git a/searchlib/src/vespa/searchlib/attribute/multienumattribute.hpp b/searchlib/src/vespa/searchlib/attribute/multienumattribute.hpp
index d320ecfaa85..6646446c3a7 100644
--- a/searchlib/src/vespa/searchlib/attribute/multienumattribute.hpp
+++ b/searchlib/src/vespa/searchlib/attribute/multienumattribute.hpp
@@ -7,6 +7,7 @@
#include "multienumattributesaver.h"
#include "load_utils.h"
#include "enum_store_loaders.h"
+#include "ipostinglistattributebase.h"
#include <vespa/vespalib/stllike/hashtable.hpp>
#include <vespa/vespalib/datastore/unique_store_remapper.h>
@@ -175,7 +176,7 @@ MultiValueEnumAttribute<B, M>::onCommit()
this->incGeneration();
this->updateStat(true);
}
- auto remapper = this->_enumStore.consider_compact(this->getConfig().getCompactionStrategy());
+ auto remapper = this->_enumStore.consider_compact_values(this->getConfig().getCompactionStrategy());
if (remapper) {
multienumattribute::remap_enum_store_refs(*remapper, *this, this->_mvMapping);
remapper->done();
@@ -183,6 +184,21 @@ MultiValueEnumAttribute<B, M>::onCommit()
this->incGeneration();
this->updateStat(true);
}
+ if (this->_enumStore.consider_compact_dictionary(this->getConfig().getCompactionStrategy())) {
+ this->incGeneration();
+ this->updateStat(true);
+ }
+ auto *pab = this->getIPostingListAttributeBase();
+ if (pab != nullptr) {
+ if (pab->consider_compact_worst_btree_nodes(this->getConfig().getCompactionStrategy())) {
+ this->incGeneration();
+ this->updateStat(true);
+ }
+ if (pab->consider_compact_worst_buffers(this->getConfig().getCompactionStrategy())) {
+ this->incGeneration();
+ this->updateStat(true);
+ }
+ }
}
template <typename B, typename M>
diff --git a/searchlib/src/vespa/searchlib/attribute/multinumericpostattribute.hpp b/searchlib/src/vespa/searchlib/attribute/multinumericpostattribute.hpp
index 697fa33a060..e3fbed1ecc2 100644
--- a/searchlib/src/vespa/searchlib/attribute/multinumericpostattribute.hpp
+++ b/searchlib/src/vespa/searchlib/attribute/multinumericpostattribute.hpp
@@ -18,7 +18,7 @@ template <typename B, typename M>
void
MultiValueNumericPostingAttribute<B, M>::mergeMemoryStats(vespalib::MemoryUsage & total)
{
- total.merge(this->getPostingList().getMemoryUsage());
+ total.merge(this->getPostingList().update_stat());
}
template <typename B, typename M>
@@ -27,11 +27,10 @@ MultiValueNumericPostingAttribute<B, M>::applyValueChanges(const DocIndices& doc
EnumStoreBatchUpdater& updater)
{
using PostingChangeComputer = PostingChangeComputerT<WeightedIndex, PostingMap>;
- EnumStore & enumStore = this->getEnumStore();
- auto comp = enumStore.make_comparator();
EnumIndexMapper mapper;
- PostingMap changePost(PostingChangeComputer::compute(this->getMultiValueMapping(), docIndices, comp, mapper));
+ PostingMap changePost(PostingChangeComputer::compute(this->getMultiValueMapping(), docIndices,
+ this->getEnumStore().get_comparator(), mapper));
this->updatePostings(changePost);
MultiValueNumericEnumAttribute<B, M>::applyValueChanges(docIndices, updater);
}
diff --git a/searchlib/src/vespa/searchlib/attribute/multistringattribute.hpp b/searchlib/src/vespa/searchlib/attribute/multistringattribute.hpp
index 003fb9fa6b7..f1508cfa631 100644
--- a/searchlib/src/vespa/searchlib/attribute/multistringattribute.hpp
+++ b/searchlib/src/vespa/searchlib/attribute/multistringattribute.hpp
@@ -120,11 +120,11 @@ StringTemplSearchContext(QueryTermSimpleUP qTerm, const AttrType & toBeSearched)
this->_plsc = static_cast<attribute::IPostingListSearchContext *>(this);
if (this->valid()) {
if (this->isPrefix()) {
- auto comp = enumStore.make_folded_comparator(queryTerm()->getTerm(), true);
+ auto comp = enumStore.make_folded_comparator_prefix(queryTerm()->getTerm());
lookupRange(comp, comp);
} else if (this->isRegex()) {
vespalib::string prefix(vespalib::RegexpUtil::get_prefix(this->queryTerm()->getTerm()));
- auto comp = enumStore.make_folded_comparator(prefix.c_str(), true);
+ auto comp = enumStore.make_folded_comparator_prefix(prefix.c_str());
lookupRange(comp, comp);
} else {
auto comp = enumStore.make_folded_comparator(queryTerm()->getTerm());
diff --git a/searchlib/src/vespa/searchlib/attribute/multistringpostattribute.hpp b/searchlib/src/vespa/searchlib/attribute/multistringpostattribute.hpp
index d427df69903..381d5b6339b 100644
--- a/searchlib/src/vespa/searchlib/attribute/multistringpostattribute.hpp
+++ b/searchlib/src/vespa/searchlib/attribute/multistringpostattribute.hpp
@@ -31,7 +31,7 @@ class StringEnumIndexMapper : public EnumIndexMapper
public:
StringEnumIndexMapper(IEnumStoreDictionary & dictionary) : _dictionary(dictionary) { }
IEnumStore::Index map(IEnumStore::Index original) const override;
- virtual bool hasFold() const override { return true; }
+ bool hasFold() const override { return true; }
private:
IEnumStoreDictionary& _dictionary;
};
@@ -44,10 +44,10 @@ applyValueChanges(const DocIndices& docIndices, EnumStoreBatchUpdater &updater)
using PostingChangeComputer = PostingChangeComputerT<WeightedIndex, PostingMap>;
EnumStore &enumStore(this->getEnumStore());
IEnumStoreDictionary& dictionary(enumStore.get_dictionary());
- auto compare = enumStore.make_folded_comparator();
StringEnumIndexMapper mapper(dictionary);
- PostingMap changePost(PostingChangeComputer::compute(this->getMultiValueMapping(), docIndices, compare, mapper));
+ PostingMap changePost(PostingChangeComputer::compute(this->getMultiValueMapping(), docIndices,
+ enumStore.get_folded_comparator(), mapper));
this->updatePostings(changePost);
MultiValueStringAttributeT<B, T>::applyValueChanges(docIndices, updater);
}
@@ -63,7 +63,7 @@ template <typename B, typename T>
void
MultiValueStringPostingAttributeT<B, T>::mergeMemoryStats(vespalib::MemoryUsage &total)
{
- total.merge(this->_postingList.getMemoryUsage());
+ total.merge(this->_postingList.update_stat());
}
template <typename B, typename T>
diff --git a/searchlib/src/vespa/searchlib/attribute/postinglistattribute.cpp b/searchlib/src/vespa/searchlib/attribute/postinglistattribute.cpp
index 3cf51f43613..e988091db45 100644
--- a/searchlib/src/vespa/searchlib/attribute/postinglistattribute.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/postinglistattribute.cpp
@@ -102,7 +102,7 @@ PostingListAttributeBase<P>::handle_load_posting_lists_and_update_enum_store(enu
template <typename P>
void
PostingListAttributeBase<P>::updatePostings(PostingMap &changePost,
- vespalib::datastore::EntryComparator &cmp)
+ const vespalib::datastore::EntryComparator &cmp)
{
for (auto& elem : changePost) {
EnumIndex idx = elem.first.getEnumIdx();
@@ -145,7 +145,7 @@ PostingListAttributeBase<P>::
clearPostings(attribute::IAttributeVector::EnumHandle eidx,
uint32_t fromLid,
uint32_t toLid,
- vespalib::datastore::EntryComparator &cmp)
+ const vespalib::datastore::EntryComparator &cmp)
{
PostingChange<P> postings;
@@ -180,6 +180,20 @@ PostingListAttributeBase<P>::getMemoryUsage() const
return _postingList.getMemoryUsage();
}
+template <typename P>
+bool
+PostingListAttributeBase<P>::consider_compact_worst_btree_nodes(const CompactionStrategy& compaction_strategy)
+{
+ return _postingList.consider_compact_worst_btree_nodes(compaction_strategy);
+}
+
+template <typename P>
+bool
+PostingListAttributeBase<P>::consider_compact_worst_buffers(const CompactionStrategy& compaction_strategy)
+{
+ return _postingList.consider_compact_worst_buffers(compaction_strategy);
+}
+
template <typename P, typename LoadedVector, typename LoadedValueType,
typename EnumStoreType>
PostingListAttributeSubBase<P, LoadedVector, LoadedValueType, EnumStoreType>::
@@ -213,7 +227,7 @@ handle_load_posting_lists(LoadedVector& loaded)
LoadedValueType prev = value.getValue();
for (size_t i(0), m(loaded.size()); i < m; i++, loaded.next()) {
value = loaded.read();
- if (FoldedComparatorType::equal_helper(prev, value.getValue())) {
+ if (ComparatorType::equal_helper(prev, value.getValue())) {
// for single value attributes loaded[numDocs] is used
// for default value but we don't want to add an
// invalid docId to the posting list.
@@ -267,8 +281,7 @@ void
PostingListAttributeSubBase<P, LoadedVector, LoadedValueType, EnumStoreType>::
updatePostings(PostingMap &changePost)
{
- auto cmp = _es.make_folded_comparator();
- updatePostings(changePost, cmp);
+ updatePostings(changePost, _es.get_folded_comparator());
}
@@ -276,11 +289,9 @@ template <typename P, typename LoadedVector, typename LoadedValueType,
typename EnumStoreType>
void
PostingListAttributeSubBase<P, LoadedVector, LoadedValueType, EnumStoreType>::
-clearPostings(attribute::IAttributeVector::EnumHandle eidx,
- uint32_t fromLid, uint32_t toLid)
+clearPostings(attribute::IAttributeVector::EnumHandle eidx, uint32_t fromLid, uint32_t toLid)
{
- auto cmp = _es.make_folded_comparator();
- clearPostings(eidx, fromLid, toLid, cmp);
+ clearPostings(eidx, fromLid, toLid, _es.get_folded_comparator());
}
diff --git a/searchlib/src/vespa/searchlib/attribute/postinglistattribute.h b/searchlib/src/vespa/searchlib/attribute/postinglistattribute.h
index 03348f08486..8cd9d1d6bbd 100644
--- a/searchlib/src/vespa/searchlib/attribute/postinglistattribute.h
+++ b/searchlib/src/vespa/searchlib/attribute/postinglistattribute.h
@@ -52,11 +52,11 @@ protected:
IEnumStoreDictionary& _dictionary;
PostingListAttributeBase(AttributeVector &attr, IEnumStore &enumStore);
- virtual ~PostingListAttributeBase();
+ ~PostingListAttributeBase() override;
virtual void updatePostings(PostingMap & changePost) = 0;
- void updatePostings(PostingMap &changePost, vespalib::datastore::EntryComparator &cmp);
+ void updatePostings(PostingMap &changePost, const vespalib::datastore::EntryComparator &cmp);
void clearAllPostings();
void disableFreeLists() { _postingList.disableFreeLists(); }
void disableElemHoldList() { _postingList.disableElemHoldList(); }
@@ -64,10 +64,12 @@ protected:
bool forwardedOnAddDoc(DocId doc, size_t wantSize, size_t wantCapacity);
void clearPostings(attribute::IAttributeVector::EnumHandle eidx, uint32_t fromLid,
- uint32_t toLid, vespalib::datastore::EntryComparator &cmp);
+ uint32_t toLid, const vespalib::datastore::EntryComparator &cmp);
void forwardedShrinkLidSpace(uint32_t newSize) override;
- virtual vespalib::MemoryUsage getMemoryUsage() const override;
+ vespalib::MemoryUsage getMemoryUsage() const override;
+ bool consider_compact_worst_btree_nodes(const CompactionStrategy& compaction_strategy) override;
+ bool consider_compact_worst_buffers(const CompactionStrategy& compaction_strategy) override;
public:
const PostingList & getPostingList() const { return _postingList; }
@@ -84,7 +86,7 @@ public:
using EntryRef = vespalib::datastore::EntryRef;
using EnumIndex = IEnumStore::Index;
using EnumStore = EnumStoreType;
- using FoldedComparatorType = typename EnumStore::FoldedComparatorType;
+ using ComparatorType = typename EnumStore::ComparatorType;
using LoadedEnumAttributeVector = attribute::LoadedEnumAttributeVector;
using PostingList = typename Parent::PostingList;
using PostingMap = typename Parent::PostingMap;
@@ -102,7 +104,7 @@ private:
public:
PostingListAttributeSubBase(AttributeVector &attr, EnumStore &enumStore);
- virtual ~PostingListAttributeSubBase();
+ ~PostingListAttributeSubBase() override;
void handle_load_posting_lists(LoadedVector &loaded);
void updatePostings(PostingMap &changePost) override;
diff --git a/searchlib/src/vespa/searchlib/attribute/postinglistsearchcontext.h b/searchlib/src/vespa/searchlib/attribute/postinglistsearchcontext.h
index 22e9987aa9e..ceb8926bbe3 100644
--- a/searchlib/src/vespa/searchlib/attribute/postinglistsearchcontext.h
+++ b/searchlib/src/vespa/searchlib/attribute/postinglistsearchcontext.h
@@ -180,18 +180,11 @@ class StringPostingSearchContext
: public PostingSearchContext<BaseSC, PostingListFoldedSearchContextT<DataT>, AttrT>
{
private:
- using AggregationTraits = PostingListTraits<DataT>;
- using PostingList = typename AggregationTraits::PostingList;
using Parent = PostingSearchContext<BaseSC, PostingListFoldedSearchContextT<DataT>, AttrT>;
using RegexpUtil = vespalib::RegexpUtil;
using QueryTermSimpleUP = typename Parent::QueryTermSimpleUP;
- using Parent::_toBeSearched;
using Parent::_enumStore;
- using Parent::isRegex;
- using Parent::getRegex;
- bool useThis(const PostingListSearchContext::DictionaryConstIterator & it) const override {
- return isRegex() ? (getRegex().valid() ? getRegex().partial_match(_enumStore.get_value(it.getKey())) : false ) : true;
- }
+ bool useThis(const PostingListSearchContext::DictionaryConstIterator & it) const override;
public:
StringPostingSearchContext(QueryTermSimpleUP qTerm, bool useBitVector, const AttrT &toBeSearched);
};
@@ -201,10 +194,7 @@ class NumericPostingSearchContext
: public PostingSearchContext<BaseSC, PostingListSearchContextT<DataT>, AttrT>
{
private:
- typedef PostingSearchContext<BaseSC, PostingListSearchContextT<DataT>, AttrT> Parent;
- typedef PostingListTraits<DataT> AggregationTraits;
- typedef typename AggregationTraits::PostingList PostingList;
- typedef typename Parent::EnumStore::ComparatorType ComparatorType;
+ using Parent = PostingSearchContext<BaseSC, PostingListSearchContextT<DataT>, AttrT>;
typedef typename AttrT::T BaseType;
using Params = attribute::SearchContextParams;
using QueryTermSimpleUP = typename Parent::QueryTermSimpleUP;
@@ -284,11 +274,11 @@ StringPostingSearchContext(QueryTermSimpleUP qTerm, bool useBitVector, const Att
if (this->valid()) {
if (this->isPrefix()) {
- auto comp = _enumStore.make_folded_comparator(this->queryTerm()->getTerm(), true);
+ auto comp = _enumStore.make_folded_comparator_prefix(this->queryTerm()->getTerm());
this->lookupRange(comp, comp);
} else if (this->isRegex()) {
vespalib::string prefix(RegexpUtil::get_prefix(this->queryTerm()->getTerm()));
- auto comp = _enumStore.make_folded_comparator(prefix.c_str(), true);
+ auto comp = _enumStore.make_folded_comparator_prefix(prefix.c_str());
this->lookupRange(comp, comp);
} else {
auto comp = _enumStore.make_folded_comparator(this->queryTerm()->getTerm());
@@ -300,6 +290,18 @@ StringPostingSearchContext(QueryTermSimpleUP qTerm, bool useBitVector, const Att
}
}
+template <typename BaseSC, typename AttrT, typename DataT>
+bool
+StringPostingSearchContext<BaseSC, AttrT, DataT>::useThis(const PostingListSearchContext::DictionaryConstIterator & it) const {
+ if ( this->isRegex() ) {
+ return this->getRegex().valid()
+ ? this->getRegex().partial_match(_enumStore.get_value(it.getKey()))
+ : false;
+ } else if ( this->isCased() ) {
+ return this->isMatch(_enumStore.get_value(it.getKey()));
+ }
+ return true;
+}
template <typename BaseSC, typename AttrT, typename DataT>
NumericPostingSearchContext<BaseSC, AttrT, DataT>::
diff --git a/searchlib/src/vespa/searchlib/attribute/postingstore.cpp b/searchlib/src/vespa/searchlib/attribute/postingstore.cpp
index fee2520b132..6c62e650345 100644
--- a/searchlib/src/vespa/searchlib/attribute/postingstore.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/postingstore.cpp
@@ -5,6 +5,7 @@
#include <vespa/searchcommon/attribute/config.h>
#include <vespa/searchcommon/attribute/status.h>
#include <vespa/vespalib/btree/btreeiterator.hpp>
+#include <vespa/vespalib/btree/btreerootbase.cpp>
#include <vespa/vespalib/datastore/datastore.hpp>
#include <vespa/vespalib/datastore/buffer_type.hpp>
@@ -31,7 +32,9 @@ PostingStoreBase2::PostingStoreBase2(IEnumStoreDictionary& dictionary, Status &s
_bvs(),
_dictionary(dictionary),
_status(status),
- _bvExtraBytes(0)
+ _bvExtraBytes(0),
+ _cached_allocator_memory_usage(),
+ _cached_store_memory_usage()
{
}
@@ -627,6 +630,141 @@ PostingStore<DataT>::getMemoryUsage() const
return usage;
}
+template <typename DataT>
+vespalib::MemoryUsage
+PostingStore<DataT>::update_stat()
+{
+ vespalib::MemoryUsage usage;
+ _cached_allocator_memory_usage = _allocator.getMemoryUsage();
+ _cached_store_memory_usage = _store.getMemoryUsage();
+ usage.merge(_cached_allocator_memory_usage);
+ usage.merge(_cached_store_memory_usage);
+ uint64_t bvExtraBytes = _bvExtraBytes;
+ usage.incUsedBytes(bvExtraBytes);
+ usage.incAllocatedBytes(bvExtraBytes);
+ return usage;
+}
+
+template <typename DataT>
+void
+PostingStore<DataT>::move_btree_nodes(EntryRef ref)
+{
+ if (ref.valid()) {
+ RefType iRef(ref);
+ uint32_t typeId = getTypeId(iRef);
+ uint32_t clusterSize = getClusterSize(typeId);
+ if (clusterSize == 0) {
+ if (isBitVector(typeId)) {
+ BitVectorEntry *bve = getWBitVectorEntry(iRef);
+ RefType iRef2(bve->_tree);
+ if (iRef2.valid()) {
+ assert(isBTree(iRef2));
+ BTreeType *tree = getWTreeEntry(iRef2);
+ tree->move_nodes(_allocator);
+ }
+ } else {
+ BTreeType *tree = getWTreeEntry(iRef);
+ tree->move_nodes(_allocator);
+ }
+ }
+ }
+}
+
+template <typename DataT>
+typename PostingStore<DataT>::EntryRef
+PostingStore<DataT>::move(EntryRef ref)
+{
+ if (!ref.valid()) {
+ return EntryRef();
+ }
+ RefType iRef(ref);
+ uint32_t typeId = getTypeId(iRef);
+ uint32_t clusterSize = getClusterSize(typeId);
+ if (clusterSize == 0) {
+ if (isBitVector(typeId)) {
+ BitVectorEntry *bve = getWBitVectorEntry(iRef);
+ RefType iRef2(bve->_tree);
+ if (iRef2.valid()) {
+ assert(isBTree(iRef2));
+ if (_store.getCompacting(iRef2)) {
+ BTreeType *tree = getWTreeEntry(iRef2);
+ auto ref_and_ptr = allocBTreeCopy(*tree);
+ tree->prepare_hold();
+ bve->_tree = ref_and_ptr.ref;
+ }
+ }
+ if (!_store.getCompacting(ref)) {
+ return ref;
+ }
+ return allocBitVectorCopy(*bve).ref;
+ } else {
+ if (!_store.getCompacting(ref)) {
+ return ref;
+ }
+ BTreeType *tree = getWTreeEntry(iRef);
+ auto ref_and_ptr = allocBTreeCopy(*tree);
+ tree->prepare_hold();
+ return ref_and_ptr.ref;
+ }
+ }
+ if (!_store.getCompacting(ref)) {
+ return ref;
+ }
+ const KeyDataType *shortArray = getKeyDataEntry(iRef, clusterSize);
+ return allocKeyDataCopy(shortArray, clusterSize).ref;
+}
+
+template <typename DataT>
+void
+PostingStore<DataT>::compact_worst_btree_nodes()
+{
+ auto to_hold = this->start_compact_worst_btree_nodes();
+ _dictionary.normalize_posting_lists([this](EntryRef posting_idx) -> EntryRef
+ {
+ move_btree_nodes(posting_idx);
+ return posting_idx;
+ });
+ this->finish_compact_worst_btree_nodes(to_hold);
+}
+
+template <typename DataT>
+void
+PostingStore<DataT>::compact_worst_buffers()
+{
+ auto to_hold = this->start_compact_worst_buffers();
+ _dictionary.normalize_posting_lists([this](EntryRef posting_idx) -> EntryRef
+ { return move(posting_idx); });
+ this->finishCompact(to_hold);
+}
+
+template <typename DataT>
+bool
+PostingStore<DataT>::consider_compact_worst_btree_nodes(const CompactionStrategy& compaction_strategy)
+{
+ if (_allocator.getNodeStore().has_held_buffers()) {
+ return false;
+ }
+ if (compaction_strategy.should_compact_memory(_cached_allocator_memory_usage.usedBytes(), _cached_allocator_memory_usage.deadBytes())) {
+ compact_worst_btree_nodes();
+ return true;
+ }
+ return false;
+}
+
+template <typename DataT>
+bool
+PostingStore<DataT>::consider_compact_worst_buffers(const CompactionStrategy& compaction_strategy)
+{
+ if (_store.has_held_buffers()) {
+ return false;
+ }
+ if (compaction_strategy.should_compact_memory(_cached_store_memory_usage.usedBytes(), _cached_store_memory_usage.deadBytes())) {
+ compact_worst_buffers();
+ return true;
+ }
+ return false;
+}
+
template class PostingStore<BTreeNoLeafData>;
template class PostingStore<int32_t>;
diff --git a/searchlib/src/vespa/searchlib/attribute/postingstore.h b/searchlib/src/vespa/searchlib/attribute/postingstore.h
index 5ee1465d933..d1d86dc9f91 100644
--- a/searchlib/src/vespa/searchlib/attribute/postingstore.h
+++ b/searchlib/src/vespa/searchlib/attribute/postingstore.h
@@ -47,6 +47,8 @@ protected:
IEnumStoreDictionary& _dictionary;
Status &_status;
uint64_t _bvExtraBytes;
+ vespalib::MemoryUsage _cached_allocator_memory_usage;
+ vespalib::MemoryUsage _cached_store_memory_usage;
static constexpr uint32_t BUFFERTYPE_BITVECTOR = 9u;
@@ -89,6 +91,8 @@ public:
using Parent::getKeyDataEntry;
using Parent::clusterLimit;
using Parent::allocBTree;
+ using Parent::allocBTreeCopy;
+ using Parent::allocKeyDataCopy;
using Parent::_builder;
using Parent::_store;
using Parent::_allocator;
@@ -113,6 +117,11 @@ public:
vespalib::datastore::DefaultReclaimer<BitVectorEntry> >(BUFFERTYPE_BITVECTOR).alloc();
}
+ BitVectorRefPair allocBitVectorCopy(const BitVectorEntry& bve) {
+ return _store.template freeListAllocator<BitVectorEntry,
+ vespalib::datastore::DefaultReclaimer<BitVectorEntry> >(BUFFERTYPE_BITVECTOR).alloc(bve);
+ }
+
/*
* Recreate btree from bitvector. Weight information is not recreated.
*/
@@ -177,7 +186,15 @@ public:
static inline DataT bitVectorWeight();
vespalib::MemoryUsage getMemoryUsage() const;
+ vespalib::MemoryUsage update_stat();
+
+ void move_btree_nodes(EntryRef ref);
+ EntryRef move(EntryRef ref);
+ void compact_worst_btree_nodes();
+ void compact_worst_buffers();
+ bool consider_compact_worst_btree_nodes(const CompactionStrategy& compaction_strategy);
+ bool consider_compact_worst_buffers(const CompactionStrategy& compaction_strategy);
private:
size_t internalSize(uint32_t typeId, const RefType & iRef) const;
size_t internalFrozenSize(uint32_t typeId, const RefType & iRef) const;
diff --git a/searchlib/src/vespa/searchlib/attribute/reference_attribute.cpp b/searchlib/src/vespa/searchlib/attribute/reference_attribute.cpp
index f9faed34d90..8261f483e55 100644
--- a/searchlib/src/vespa/searchlib/attribute/reference_attribute.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/reference_attribute.cpp
@@ -43,6 +43,7 @@ ReferenceAttribute::ReferenceAttribute(const vespalib::stringref baseFileName,
_store(),
_indices(getGenerationHolder()),
_cached_unique_store_values_memory_usage(),
+ _cached_unique_store_dictionary_memory_usage(),
_gidToLidMapperFactory(),
_referenceMappings(getGenerationHolder(), getCommittedDocIdLimitRef())
{
@@ -177,7 +178,11 @@ ReferenceAttribute::onCommit()
{
// Note: Cost can be reduced if unneeded generation increments are dropped
incGeneration();
- if (considerCompact(getConfig().getCompactionStrategy())) {
+ if (consider_compact_values(getConfig().getCompactionStrategy())) {
+ incGeneration();
+ updateStat(true);
+ }
+ if (consider_compact_dictionary(getConfig().getCompactionStrategy())) {
incGeneration();
updateStat(true);
}
@@ -188,7 +193,9 @@ ReferenceAttribute::onUpdateStat()
{
vespalib::MemoryUsage total = _store.get_values_memory_usage();
_cached_unique_store_values_memory_usage = total;
- total.merge(_store.get_dictionary_memory_usage());
+ auto& dictionary = _store.get_dictionary();
+ _cached_unique_store_dictionary_memory_usage = dictionary.get_memory_usage();
+ total.merge(_cached_unique_store_dictionary_memory_usage);
total.mergeGenerationHeldBytes(getGenerationHolder().getHeldBytes());
total.merge(_indices.getMemoryUsage());
total.merge(_referenceMappings.getMemoryUsage());
@@ -282,20 +289,20 @@ ReferenceAttribute::getReference(DocId doc) const
}
bool
-ReferenceAttribute::considerCompact(const CompactionStrategy &compactionStrategy)
+ReferenceAttribute::consider_compact_values(const CompactionStrategy &compactionStrategy)
{
- size_t usedBytes = _cached_unique_store_values_memory_usage.usedBytes();
- size_t deadBytes = _cached_unique_store_values_memory_usage.deadBytes();
- bool compactMemory = compactionStrategy.should_compact_memory(usedBytes, deadBytes);
- if (compactMemory) {
- compactWorst();
+ size_t used_bytes = _cached_unique_store_values_memory_usage.usedBytes();
+ size_t dead_bytes = _cached_unique_store_values_memory_usage.deadBytes();
+ bool compact_memory = compactionStrategy.should_compact_memory(used_bytes, dead_bytes);
+ if (compact_memory) {
+ compact_worst_values();
return true;
}
return false;
}
void
-ReferenceAttribute::compactWorst()
+ReferenceAttribute::compact_worst_values()
{
auto remapper(_store.compact_worst(true, true));
if (remapper) {
@@ -304,6 +311,22 @@ ReferenceAttribute::compactWorst()
}
}
+bool
+ReferenceAttribute::consider_compact_dictionary(const CompactionStrategy &compaction_strategy)
+{
+ auto& dictionary = _store.get_dictionary();
+ if (dictionary.has_held_buffers()) {
+ return false;
+ }
+ if (compaction_strategy.should_compact_memory(_cached_unique_store_dictionary_memory_usage.usedBytes(),
+ _cached_unique_store_dictionary_memory_usage.deadBytes()))
+ {
+ dictionary.compact_worst(true, true);
+ return true;
+ }
+ return false;
+}
+
uint64_t
ReferenceAttribute::getUniqueValueCount() const
{
diff --git a/searchlib/src/vespa/searchlib/attribute/reference_attribute.h b/searchlib/src/vespa/searchlib/attribute/reference_attribute.h
index 892966b16c7..856dd0a9f9f 100644
--- a/searchlib/src/vespa/searchlib/attribute/reference_attribute.h
+++ b/searchlib/src/vespa/searchlib/attribute/reference_attribute.h
@@ -43,6 +43,7 @@ private:
ReferenceStore _store;
ReferenceStoreIndices _indices;
vespalib::MemoryUsage _cached_unique_store_values_memory_usage;
+ vespalib::MemoryUsage _cached_unique_store_dictionary_memory_usage;
std::shared_ptr<IGidToLidMapperFactory> _gidToLidMapperFactory;
ReferenceMappings _referenceMappings;
@@ -55,8 +56,9 @@ private:
bool onLoad() override;
uint64_t getUniqueValueCount() const override;
- bool considerCompact(const CompactionStrategy &compactionStrategy);
- void compactWorst();
+ bool consider_compact_values(const CompactionStrategy &compactionStrategy);
+ void compact_worst_values();
+ bool consider_compact_dictionary(const CompactionStrategy& compaction_strategy);
IndicesCopyVector getIndicesCopy(uint32_t size) const;
void removeReverseMapping(EntryRef oldRef, uint32_t lid);
void addReverseMapping(EntryRef newRef, uint32_t lid);
diff --git a/searchlib/src/vespa/searchlib/attribute/singleenumattribute.hpp b/searchlib/src/vespa/searchlib/attribute/singleenumattribute.hpp
index 96dda48c043..39a12cb57d5 100644
--- a/searchlib/src/vespa/searchlib/attribute/singleenumattribute.hpp
+++ b/searchlib/src/vespa/searchlib/attribute/singleenumattribute.hpp
@@ -94,7 +94,7 @@ SingleValueEnumAttribute<B>::onCommit()
freezeEnumDictionary();
std::atomic_thread_fence(std::memory_order_release);
this->removeAllOldGenerations();
- auto remapper = this->_enumStore.consider_compact(this->getConfig().getCompactionStrategy());
+ auto remapper = this->_enumStore.consider_compact_values(this->getConfig().getCompactionStrategy());
if (remapper) {
remap_enum_store_refs(*remapper, *this);
remapper->done();
@@ -102,6 +102,21 @@ SingleValueEnumAttribute<B>::onCommit()
this->incGeneration();
this->updateStat(true);
}
+ if (this->_enumStore.consider_compact_dictionary(this->getConfig().getCompactionStrategy())) {
+ this->incGeneration();
+ this->updateStat(true);
+ }
+ auto *pab = this->getIPostingListAttributeBase();
+ if (pab != nullptr) {
+ if (pab->consider_compact_worst_btree_nodes(this->getConfig().getCompactionStrategy())) {
+ this->incGeneration();
+ this->updateStat(true);
+ }
+ if (pab->consider_compact_worst_buffers(this->getConfig().getCompactionStrategy())) {
+ this->incGeneration();
+ this->updateStat(true);
+ }
+ }
}
template <typename B>
diff --git a/searchlib/src/vespa/searchlib/attribute/singlenumericpostattribute.h b/searchlib/src/vespa/searchlib/attribute/singlenumericpostattribute.h
index 634ac7ba024..4e7f8040f7f 100644
--- a/searchlib/src/vespa/searchlib/attribute/singlenumericpostattribute.h
+++ b/searchlib/src/vespa/searchlib/attribute/singlenumericpostattribute.h
@@ -63,7 +63,7 @@ private:
void mergeMemoryStats(vespalib::MemoryUsage & total) override;
void applyUpdateValueChange(const Change & c, EnumStore & enumStore,
std::map<DocId, EnumIndex> & currEnumIndices);
- void makePostingChange(const vespalib::datastore::EntryComparator *cmp,
+ void makePostingChange(const vespalib::datastore::EntryComparator &cmp,
const std::map<DocId, EnumIndex> &currEnumIndices,
PostingMap &changePost);
diff --git a/searchlib/src/vespa/searchlib/attribute/singlenumericpostattribute.hpp b/searchlib/src/vespa/searchlib/attribute/singlenumericpostattribute.hpp
index 325874091ef..f5ab855565c 100644
--- a/searchlib/src/vespa/searchlib/attribute/singlenumericpostattribute.hpp
+++ b/searchlib/src/vespa/searchlib/attribute/singlenumericpostattribute.hpp
@@ -36,7 +36,7 @@ template <typename B>
void
SingleValueNumericPostingAttribute<B>::mergeMemoryStats(vespalib::MemoryUsage & total)
{
- total.merge(this->_postingList.getMemoryUsage());
+ total.merge(this->_postingList.update_stat());
}
template <typename B>
@@ -53,7 +53,7 @@ SingleValueNumericPostingAttribute<B>::applyUpdateValueChange(const Change & c,
template <typename B>
void
SingleValueNumericPostingAttribute<B>::
-makePostingChange(const vespalib::datastore::EntryComparator *cmpa,
+makePostingChange(const vespalib::datastore::EntryComparator &cmpa,
const std::map<DocId, EnumIndex> &currEnumIndices,
PostingMap &changePost)
{
@@ -63,11 +63,11 @@ makePostingChange(const vespalib::datastore::EntryComparator *cmpa,
EnumIndex newIdx = elem.second;
// add new posting
- changePost[EnumPostingPair(newIdx, cmpa)].add(docId, 1);
+ changePost[EnumPostingPair(newIdx, &cmpa)].add(docId, 1);
// remove old posting
if ( oldIdx.valid()) {
- changePost[EnumPostingPair(oldIdx, cmpa)].remove(docId);
+ changePost[EnumPostingPair(oldIdx, &cmpa)].remove(docId);
}
}
}
@@ -79,7 +79,6 @@ SingleValueNumericPostingAttribute<B>::applyValueChanges(EnumStoreBatchUpdater&
{
EnumStore & enumStore = this->getEnumStore();
IEnumStoreDictionary& dictionary = enumStore.get_dictionary();
- auto cmp = enumStore.make_comparator();
PostingMap changePost;
// used to make sure several arithmetic operations on the same document in a single commit works
@@ -95,8 +94,7 @@ SingleValueNumericPostingAttribute<B>::applyValueChanges(EnumStoreBatchUpdater&
}
if (change._type == ChangeBase::UPDATE) {
- applyUpdateValueChange(change, enumStore,
- currEnumIndices);
+ applyUpdateValueChange(change, enumStore, currEnumIndices);
} else if (change._type >= ChangeBase::ADD && change._type <= ChangeBase::DIV) {
if (oldIdx.valid()) {
T oldValue = enumStore.get_value(oldIdx);
@@ -107,12 +105,11 @@ SingleValueNumericPostingAttribute<B>::applyValueChanges(EnumStoreBatchUpdater&
}
} else if(change._type == ChangeBase::CLEARDOC) {
this->_defaultValue._doc = change._doc;
- applyUpdateValueChange(this->_defaultValue, enumStore,
- currEnumIndices);
+ applyUpdateValueChange(this->_defaultValue, enumStore, currEnumIndices);
}
}
- makePostingChange(&cmp, currEnumIndices, changePost);
+ makePostingChange(enumStore.get_comparator(), currEnumIndices, changePost);
this->updatePostings(changePost);
SingleValueNumericEnumAttribute<B>::applyValueChanges(updater);
diff --git a/searchlib/src/vespa/searchlib/attribute/singlestringattribute.hpp b/searchlib/src/vespa/searchlib/attribute/singlestringattribute.hpp
index e362ecff6cd..532b83ea03c 100644
--- a/searchlib/src/vespa/searchlib/attribute/singlestringattribute.hpp
+++ b/searchlib/src/vespa/searchlib/attribute/singlestringattribute.hpp
@@ -56,11 +56,11 @@ SingleValueStringAttributeT<B>::StringTemplSearchContext::StringTemplSearchConte
this->_plsc = static_cast<attribute::IPostingListSearchContext *>(this);
if (this->valid()) {
if (this->isPrefix()) {
- auto comp = enumStore.make_folded_comparator(queryTerm()->getTerm(), true);
+ auto comp = enumStore.make_folded_comparator_prefix(queryTerm()->getTerm());
lookupRange(comp, comp);
} else if (this->isRegex()) {
vespalib::string prefix(vespalib::RegexpUtil::get_prefix(this->queryTerm()->getTerm()));
- auto comp = enumStore.make_folded_comparator(prefix.c_str(), true);
+ auto comp = enumStore.make_folded_comparator_prefix(prefix.c_str());
lookupRange(comp, comp);
} else {
auto comp = enumStore.make_folded_comparator(queryTerm()->getTerm());
diff --git a/searchlib/src/vespa/searchlib/attribute/singlestringpostattribute.h b/searchlib/src/vespa/searchlib/attribute/singlestringpostattribute.h
index 397fad60be3..748d5bc4567 100644
--- a/searchlib/src/vespa/searchlib/attribute/singlestringpostattribute.h
+++ b/searchlib/src/vespa/searchlib/attribute/singlestringpostattribute.h
@@ -65,11 +65,10 @@ private:
EnumStore & enumStore,
std::map<DocId, EnumIndex> &currEnumIndices);
- void
- makePostingChange(const vespalib::datastore::EntryComparator *cmp,
- IEnumStoreDictionary& dictionary,
- const std::map<DocId, EnumIndex> &currEnumIndices,
- PostingMap &changePost);
+ void makePostingChange(const vespalib::datastore::EntryComparator &cmp,
+ IEnumStoreDictionary& dictionary,
+ const std::map<DocId, EnumIndex> &currEnumIndices,
+ PostingMap &changePost);
void applyValueChanges(EnumStoreBatchUpdater& updater) override;
public:
diff --git a/searchlib/src/vespa/searchlib/attribute/singlestringpostattribute.hpp b/searchlib/src/vespa/searchlib/attribute/singlestringpostattribute.hpp
index 50ed621b13e..39ad8d71021 100644
--- a/searchlib/src/vespa/searchlib/attribute/singlestringpostattribute.hpp
+++ b/searchlib/src/vespa/searchlib/attribute/singlestringpostattribute.hpp
@@ -34,7 +34,7 @@ template <typename B>
void
SingleValueStringPostingAttributeT<B>::mergeMemoryStats(vespalib::MemoryUsage & total)
{
- total.merge(this->_postingList.getMemoryUsage());
+ total.merge(this->_postingList.update_stat());
}
template <typename B>
@@ -52,7 +52,7 @@ SingleValueStringPostingAttributeT<B>::applyUpdateValueChange(const Change & c,
template <typename B>
void
SingleValueStringPostingAttributeT<B>::
-makePostingChange(const vespalib::datastore::EntryComparator *cmpa,
+makePostingChange(const vespalib::datastore::EntryComparator &cmpa,
IEnumStoreDictionary& dictionary,
const std::map<DocId, EnumIndex> &currEnumIndices,
PostingMap &changePost)
@@ -64,12 +64,12 @@ makePostingChange(const vespalib::datastore::EntryComparator *cmpa,
// add new posting
auto remapped_new_idx = dictionary.remap_index(newIdx);
- changePost[EnumPostingPair(remapped_new_idx, cmpa)].add(docId, 1);
+ changePost[EnumPostingPair(remapped_new_idx, &cmpa)].add(docId, 1);
// remove old posting
if ( oldIdx.valid()) {
auto remapped_old_idx = dictionary.remap_index(oldIdx);
- changePost[EnumPostingPair(remapped_old_idx, cmpa)].remove(docId);
+ changePost[EnumPostingPair(remapped_old_idx, &cmpa)].remove(docId);
}
}
}
@@ -80,7 +80,6 @@ SingleValueStringPostingAttributeT<B>::applyValueChanges(EnumStoreBatchUpdater&
{
EnumStore & enumStore = this->getEnumStore();
IEnumStoreDictionary& dictionary = enumStore.get_dictionary();
- auto cmp = enumStore.make_folded_comparator();
PostingMap changePost;
// used to make sure several arithmetic operations on the same document in a single commit works
@@ -95,16 +94,14 @@ SingleValueStringPostingAttributeT<B>::applyValueChanges(EnumStoreBatchUpdater&
oldIdx = this->_enumIndices[change._doc];
}
if (change._type == ChangeBase::UPDATE) {
- applyUpdateValueChange(change, enumStore,
- currEnumIndices);
+ applyUpdateValueChange(change, enumStore, currEnumIndices);
} else if (change._type == ChangeBase::CLEARDOC) {
this->_defaultValue._doc = change._doc;
- applyUpdateValueChange(this->_defaultValue, enumStore,
- currEnumIndices);
+ applyUpdateValueChange(this->_defaultValue, enumStore, currEnumIndices);
}
}
- makePostingChange(&cmp, dictionary, currEnumIndices, changePost);
+ makePostingChange(enumStore.get_folded_comparator(), dictionary, currEnumIndices, changePost);
this->updatePostings(changePost);
diff --git a/searchlib/src/vespa/searchlib/attribute/stringattribute.h b/searchlib/src/vespa/searchlib/attribute/stringattribute.h
index 795e4c74467..9c6d2d40f73 100644
--- a/searchlib/src/vespa/searchlib/attribute/stringattribute.h
+++ b/searchlib/src/vespa/searchlib/attribute/stringattribute.h
@@ -4,7 +4,6 @@
#include <vespa/searchlib/attribute/stringbase.h>
#include <vespa/searchlib/attribute/enumstore.h>
-#include <vespa/searchlib/util/foldedstringcompare.h>
namespace search {
diff --git a/searchlib/src/vespa/searchlib/attribute/stringbase.cpp b/searchlib/src/vespa/searchlib/attribute/stringbase.cpp
index a308fc06af0..4be86652541 100644
--- a/searchlib/src/vespa/searchlib/attribute/stringbase.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/stringbase.cpp
@@ -15,6 +15,57 @@ LOG_SETUP(".searchlib.attribute.stringbase");
namespace search {
+StringSearchHelper::StringSearchHelper(QueryTermUCS4 & term, bool cased)
+ : _regex(),
+ _term(),
+ _termLen(),
+ _isPrefix(term.isPrefix()),
+ _isRegex(term.isRegex()),
+ _isCased(cased)
+{
+ if (isRegex()) {
+ if (isCased()) {
+ _regex = vespalib::Regex::from_pattern(term.getTerm(), vespalib::Regex::Options::None);
+ } else {
+ _regex = vespalib::Regex::from_pattern(term.getTerm(), vespalib::Regex::Options::IgnoreCase);
+ }
+ } else if (isCased()) {
+ _term._char = term.getTerm();
+ _termLen = term.getTermLen();
+ } else {
+ term.term(_term._ucs4);
+ }
+}
+
+StringSearchHelper::~StringSearchHelper()
+{
+ if (isRegex()) {
+
+ }
+}
+
+bool
+StringSearchHelper::isMatch(const char *src) const {
+ if (__builtin_expect(isRegex(), false)) {
+ return getRegex().valid() ? getRegex().partial_match(std::string_view(src)) : false;
+ }
+ if (__builtin_expect(isCased(), false)) {
+ int res = strncmp(_term._char, src, _termLen);
+ return (res == 0) && (src[_termLen] == 0 || isPrefix());
+ }
+ vespalib::Utf8ReaderForZTS u8reader(src);
+ uint32_t j = 0;
+ uint32_t val;
+ for (;; ++j) {
+ val = u8reader.getChar();
+ val = vespalib::LowerCase::convert(val);
+ if (_term._ucs4[j] == 0 || _term._ucs4[j] != val) {
+ break;
+ }
+ }
+ return (_term._ucs4[j] == 0 && (val == 0 || isPrefix()));
+}
+
IMPLEMENT_IDENTIFIABLE_ABSTRACT(StringAttribute, AttributeVector);
using attribute::LoadedEnumAttribute;
@@ -225,16 +276,8 @@ StringAttribute::StringSearchContext::StringSearchContext(QueryTermSimple::UP qT
const StringAttribute & toBeSearched) :
SearchContext(toBeSearched),
_queryTerm(static_cast<QueryTermUCS4 *>(qTerm.release())),
- _termUCS4(nullptr),
- _regex(),
- _isPrefix(_queryTerm->isPrefix()),
- _isRegex(_queryTerm->isRegex())
+ _helper(*_queryTerm, toBeSearched.getConfig().get_match() == Config::Match::CASED)
{
- if (isRegex()) {
- _regex = vespalib::Regex::from_pattern(_queryTerm->getTerm(), vespalib::Regex::Options::IgnoreCase);
- } else {
- _queryTerm->term(_termUCS4);
- }
}
StringAttribute::StringSearchContext::~StringSearchContext() = default;
diff --git a/searchlib/src/vespa/searchlib/attribute/stringbase.h b/searchlib/src/vespa/searchlib/attribute/stringbase.h
index b8fef783d58..b15dc597fe9 100644
--- a/searchlib/src/vespa/searchlib/attribute/stringbase.h
+++ b/searchlib/src/vespa/searchlib/attribute/stringbase.h
@@ -8,7 +8,6 @@
#include <vespa/searchlib/attribute/changevector.h>
#include <vespa/searchlib/attribute/i_enum_store.h>
#include <vespa/searchlib/attribute/loadedenumvalue.h>
-#include <vespa/searchlib/util/foldedstringcompare.h>
#include <vespa/vespalib/regex/regex.h>
#include <vespa/vespalib/text/lowercase.h>
#include <vespa/vespalib/text/utf8.h>
@@ -16,8 +15,36 @@
namespace search {
+/**
+ * Helper class for search context when scanning string fields
+ * It handles different search settings like prefix, regex and cased/uncased.
+ */
+class StringSearchHelper {
+public:
+ StringSearchHelper(QueryTermUCS4 & qTerm, bool cased);
+ ~StringSearchHelper();
+ bool isMatch(const char *src) const;
+ bool isPrefix() const { return _isPrefix; }
+ bool isRegex() const { return _isRegex; }
+ bool isCased() const { return _isCased; }
+ const vespalib::Regex & getRegex() const { return _regex; }
+private:
+ vespalib::Regex _regex;
+ union {
+ const ucs4_t *_ucs4;
+ const char *_char;
+ } _term;
+ uint32_t _termLen;
+ bool _isPrefix;
+ bool _isRegex;
+ bool _isCased;
+};
+
class ReaderBase;
+/**
+ * Base class for all string attributes.
+ */
class StringAttribute : public AttributeVector
{
public:
@@ -97,24 +124,13 @@ protected:
~StringSearchContext() override;
protected:
bool valid() const override;
-
const QueryTermUCS4 * queryTerm() const override;
- bool isMatch(const char *src) const {
- if (__builtin_expect(isRegex(), false)) {
- return _regex.valid() ? _regex.partial_match(std::string_view(src)) : false;
- }
- vespalib::Utf8ReaderForZTS u8reader(src);
- uint32_t j = 0;
- uint32_t val;
- for (;; ++j) {
- val = u8reader.getChar();
- val = vespalib::LowerCase::convert(val);
- if (_termUCS4[j] == 0 || _termUCS4[j] != val) {
- break;
- }
- }
- return (_termUCS4[j] == 0 && (val == 0 || isPrefix()));
- }
+ bool isMatch(const char *src) const { return _helper.isMatch(src); }
+ bool isPrefix() const { return _helper.isPrefix(); }
+ bool isRegex() const { return _helper.isRegex(); }
+ bool isCased() const { return _helper.isCased(); }
+ const vespalib::Regex & getRegex() const { return _helper.getRegex(); }
+
class CollectHitCount {
public:
CollectHitCount() : _hitCount(0) { }
@@ -151,16 +167,9 @@ protected:
}
return -1;
}
-
- bool isPrefix() const { return _isPrefix; }
- bool isRegex() const { return _isRegex; }
- const vespalib::Regex & getRegex() const { return _regex; }
private:
std::unique_ptr<QueryTermUCS4> _queryTerm;
- const ucs4_t *_termUCS4;
- vespalib::Regex _regex;
- bool _isPrefix;
- bool _isRegex;
+ StringSearchHelper _helper;
};
};
diff --git a/searchlib/src/vespa/searchlib/engine/proto_converter.cpp b/searchlib/src/vespa/searchlib/engine/proto_converter.cpp
index f58bcb58949..53756bfcac6 100644
--- a/searchlib/src/vespa/searchlib/engine/proto_converter.cpp
+++ b/searchlib/src/vespa/searchlib/engine/proto_converter.cpp
@@ -99,7 +99,7 @@ ProtoConverter::search_reply_to_proto(const SearchReply &reply, ProtoSearchReply
uint32_t asked_hits = reply.request->maxhits;
size_t got_hits = reply.hits.size();
if (got_hits < asked_hits && asked_offset + got_hits < reply.totalHitCount) {
- LOG(warning, "asked for %u hits [at offset %u] but only returning %zu hits from %zu available",
+ LOG(warning, "asked for %u hits [at offset %u] but only returning %zu hits from %" PRIu64 " available",
asked_hits, asked_offset, got_hits, reply.totalHitCount);
}
}
diff --git a/searchlib/src/vespa/searchlib/query/query_term_simple.h b/searchlib/src/vespa/searchlib/query/query_term_simple.h
index 93b19212926..d0dfbee010f 100644
--- a/searchlib/src/vespa/searchlib/query/query_term_simple.h
+++ b/searchlib/src/vespa/searchlib/query/query_term_simple.h
@@ -63,7 +63,6 @@ public:
virtual void visitMembers(vespalib::ObjectVisitor &visitor) const;
vespalib::string getClassName() const;
bool isValid() const { return _valid; }
-protected:
const string & getTermString() const { return _term; }
private:
bool getRangeInternal(int64_t & low, int64_t & high) const;
diff --git a/searchlib/src/vespa/searchlib/query/query_term_ucs4.h b/searchlib/src/vespa/searchlib/query/query_term_ucs4.h
index 00ac59d729e..b8735eb30f0 100644
--- a/searchlib/src/vespa/searchlib/query/query_term_ucs4.h
+++ b/searchlib/src/vespa/searchlib/query/query_term_ucs4.h
@@ -12,7 +12,6 @@ namespace search {
*/
class QueryTermUCS4 : public QueryTermSimple {
public:
- typedef std::unique_ptr<QueryTermUCS4> UP;
QueryTermUCS4(const QueryTermUCS4 &) = delete;
QueryTermUCS4 & operator = (const QueryTermUCS4 &) = delete;
QueryTermUCS4(QueryTermUCS4 &&) = delete;
diff --git a/searchlib/src/vespa/searchlib/query/streaming/query.cpp b/searchlib/src/vespa/searchlib/query/streaming/query.cpp
index bec1bdfc8ae..a189b5cbfab 100644
--- a/searchlib/src/vespa/searchlib/query/streaming/query.cpp
+++ b/searchlib/src/vespa/searchlib/query/streaming/query.cpp
@@ -2,24 +2,33 @@
#include "query.h"
#include <vespa/searchlib/parsequery/stackdumpiterator.h>
#include <vespa/vespalib/objects/visit.hpp>
+#include <cassert>
namespace search::streaming {
-void QueryConnector::visitMembers(vespalib::ObjectVisitor &visitor) const
+void
+QueryConnector::visitMembers(vespalib::ObjectVisitor &visitor) const
{
visit(visitor, "Operator", _opName);
}
-QueryConnector::QueryConnector(const char * opName) :
- QueryNode(),
- _opName(opName),
- _index()
+QueryConnector::QueryConnector(const char * opName)
+ : QueryNode(),
+ _opName(opName),
+ _index(),
+ _children()
{
}
+void
+QueryConnector::addChild(QueryNode::UP child) {
+ _children.push_back(std::move(child));
+}
+
QueryConnector::~QueryConnector() = default;
-const HitList & QueryConnector::evaluateHits(HitList & hl) const
+const HitList &
+QueryConnector::evaluateHits(HitList & hl) const
{
if (evaluate()) {
hl.push_back(Hit(1, 0, 0, 1));
@@ -27,45 +36,51 @@ const HitList & QueryConnector::evaluateHits(HitList & hl) const
return hl;
}
-void QueryConnector::reset()
+void
+QueryConnector::reset()
{
- for(const auto & node : *this) {
+ for (const auto & node : _children) {
node->reset();
}
}
-void QueryConnector::getLeafs(QueryTermList & tl)
+void
+QueryConnector::getLeafs(QueryTermList & tl)
{
- for(const auto & node : *this) {
+ for (const auto & node : _children) {
node->getLeafs(tl);
}
}
-void QueryConnector::getLeafs(ConstQueryTermList & tl) const
+void
+QueryConnector::getLeafs(ConstQueryTermList & tl) const
{
- for(const auto & node : *this) {
+ for (const auto & node : _children) {
node->getLeafs(tl);
}
}
-void QueryConnector::getPhrases(QueryNodeRefList & tl)
+void
+QueryConnector::getPhrases(QueryNodeRefList & tl)
{
- for(const auto & node : *this) {
+ for (const auto & node : _children) {
node->getPhrases(tl);
}
}
-void QueryConnector::getPhrases(ConstQueryNodeRefList & tl) const
+void
+QueryConnector::getPhrases(ConstQueryNodeRefList & tl) const
{
- for(const auto & node : *this) {
+ for (const auto & node : _children) {
node->getPhrases(tl);
}
}
-size_t QueryConnector::depth() const
+size_t
+QueryConnector::depth() const
{
size_t d(0);
- for(const auto & node : *this) {
+ for (const auto & node : _children) {
size_t t = node->depth();
if (t > d) {
d = t;
@@ -74,10 +89,11 @@ size_t QueryConnector::depth() const
return d+1;
}
-size_t QueryConnector::width() const
+size_t
+QueryConnector::width() const
{
size_t w(0);
- for(const auto & node : *this) {
+ for (const auto & node : _children) {
w += node->width();
}
@@ -104,54 +120,62 @@ QueryConnector::create(ParseItem::ItemType type)
}
}
-bool TrueNode::evaluate() const
+bool
+TrueNode::evaluate() const
{
return true;
}
-bool AndQueryNode::evaluate() const
+bool
+AndQueryNode::evaluate() const
{
- bool ok(true);
- for (const_iterator it=begin(), mt=end(); ok && (it!=mt); it++) {
- const QueryNode & qn = **it;
- ok = ok && qn.evaluate();
- }
- return ok;
+ for (const auto & qn : getChildren()) {
+ if ( ! qn->evaluate() ) return false;
+ }
+ return true;
}
-bool AndNotQueryNode::evaluate() const
-{
- bool ok(empty() ? true : front()->evaluate());
- if (!empty()) {
- for (const_iterator it=begin()+1, mt=end(); ok && (it!=mt); it++) {
- const QueryNode & qn = **it;
- ok = ok && ! qn.evaluate();
+bool
+AndNotQueryNode::evaluate() const {
+ if (getChildren().empty()) return true;
+ auto it = getChildren().begin();
+ auto mt = getChildren().end();
+ if ((*it)->evaluate()) {
+ for (++it; it != mt; it++) {
+ if ((*it)->evaluate()) return false;
+ }
+ return true;
}
- }
- return ok;
+ return false;
}
-bool OrQueryNode::evaluate() const
-{
- bool ok(false);
- for (const_iterator it=begin(), mt=end(); !ok && (it!=mt); it++) {
- const QueryNode & qn = **it;
- ok = qn.evaluate();
- }
- return ok;
+bool
+OrQueryNode::evaluate() const {
+ for (const auto & qn : getChildren()) {
+ if (qn->evaluate()) return true;
+ }
+ return false;
}
-bool EquivQueryNode::evaluate() const
+bool
+EquivQueryNode::evaluate() const
{
return OrQueryNode::evaluate();
}
-bool SameElementQueryNode::evaluate() const {
+bool
+SameElementQueryNode::evaluate() const {
HitList hl;
return ! evaluateHits(hl).empty();
}
+void
+SameElementQueryNode::addChild(QueryNode::UP child) {
+ assert(dynamic_cast<const QueryTerm *>(child.get()) != nullptr);
+ AndQueryNode::addChild(std::move(child));
+}
+
const HitList &
SameElementQueryNode::evaluateHits(HitList & hl) const
{
@@ -159,13 +183,14 @@ SameElementQueryNode::evaluateHits(HitList & hl) const
if ( !AndQueryNode::evaluate()) return hl;
HitList tmpHL;
- unsigned int numFields = size();
+ const auto & children = getChildren();
+ unsigned int numFields = children.size();
unsigned int currMatchCount = 0;
std::vector<unsigned int> indexVector(numFields, 0);
- auto curr = static_cast<const QueryTerm *> ((*this)[currMatchCount].get());
+ auto curr = static_cast<const QueryTerm *> (children[currMatchCount].get());
bool exhausted( curr->evaluateHits(tmpHL).empty());
for (; !exhausted; ) {
- auto next = static_cast<const QueryTerm *>((*this)[currMatchCount+1].get());
+ auto next = static_cast<const QueryTerm *>(children[currMatchCount+1].get());
unsigned int & currIndex = indexVector[currMatchCount];
unsigned int & nextIndex = indexVector[currMatchCount+1];
@@ -190,13 +215,14 @@ SameElementQueryNode::evaluateHits(HitList & hl) const
currMatchCount = 0;
indexVector[currMatchCount]++;
}
- curr = static_cast<const QueryTerm *>((*this)[currMatchCount].get());
+ curr = static_cast<const QueryTerm *>(children[currMatchCount].get());
exhausted = (nextIndex >= nextIndexMax) || (indexVector[currMatchCount] >= curr->evaluateHits(tmpHL).size());
}
return hl;
}
-bool PhraseQueryNode::evaluate() const
+bool
+PhraseQueryNode::evaluate() const
{
HitList hl;
return ! evaluateHits(hl).empty();
@@ -205,6 +231,12 @@ bool PhraseQueryNode::evaluate() const
void PhraseQueryNode::getPhrases(QueryNodeRefList & tl) { tl.push_back(this); }
void PhraseQueryNode::getPhrases(ConstQueryNodeRefList & tl) const { tl.push_back(this); }
+void
+PhraseQueryNode::addChild(QueryNode::UP child) {
+ assert(dynamic_cast<const QueryTerm *>(child.get()) != nullptr);
+ AndQueryNode::addChild(std::move(child));
+}
+
const HitList &
PhraseQueryNode::evaluateHits(HitList & hl) const
{
@@ -213,13 +245,14 @@ PhraseQueryNode::evaluateHits(HitList & hl) const
if ( ! AndQueryNode::evaluate()) return hl;
HitList tmpHL;
- unsigned int fullPhraseLen = size();
+ const auto & children = getChildren();
+ unsigned int fullPhraseLen = children.size();
unsigned int currPhraseLen = 0;
std::vector<unsigned int> indexVector(fullPhraseLen, 0);
- auto curr = static_cast<const QueryTerm *> ((*this)[currPhraseLen].get());
+ auto curr = static_cast<const QueryTerm *> (children[currPhraseLen].get());
bool exhausted( curr->evaluateHits(tmpHL).empty());
for (; !exhausted; ) {
- auto next = static_cast<const QueryTerm *>((*this)[currPhraseLen+1].get());
+ auto next = static_cast<const QueryTerm *>(children[currPhraseLen+1].get());
unsigned int & currIndex = indexVector[currPhraseLen];
unsigned int & nextIndex = indexVector[currPhraseLen+1];
@@ -253,7 +286,7 @@ PhraseQueryNode::evaluateHits(HitList & hl) const
currPhraseLen = 0;
indexVector[currPhraseLen]++;
}
- curr = static_cast<const QueryTerm *>((*this)[currPhraseLen].get());
+ curr = static_cast<const QueryTerm *>(children[currPhraseLen].get());
exhausted = (nextIndex >= nextIndexMax) || (indexVector[currPhraseLen] >= curr->evaluateHits(tmpHL).size());
}
return hl;
@@ -273,29 +306,22 @@ PhraseQueryNode::updateFieldInfo(size_t fid, size_t offset, size_t fieldLength)
fi.setHitCount(fi.getHitCount() + 1);
}
-bool NotQueryNode::evaluate() const
+bool
+NearQueryNode::evaluate() const
{
- bool ok(false);
- for (const auto & node : *this) {
- ok |= ! node->evaluate();
- }
- return ok;
+ return AndQueryNode::evaluate();
}
-bool NearQueryNode::evaluate() const
-{
- bool ok(AndQueryNode::evaluate());
- return ok;
-}
-
-void NearQueryNode::visitMembers(vespalib::ObjectVisitor &visitor) const
+void
+NearQueryNode::visitMembers(vespalib::ObjectVisitor &visitor) const
{
AndQueryNode::visitMembers(visitor);
visit(visitor, "distance", static_cast<uint64_t>(_distance));
}
-bool ONearQueryNode::evaluate() const
+bool
+ONearQueryNode::evaluate() const
{
bool ok(NearQueryNode::evaluate());
return ok;
@@ -303,19 +329,19 @@ bool ONearQueryNode::evaluate() const
Query::Query() = default;
-Query::Query(const QueryNodeResultFactory & factory, const QueryPacketT & queryRep) :
- _root()
+Query::Query(const QueryNodeResultFactory & factory, const QueryPacketT & queryRep)
+ : _root()
{
- build(factory, queryRep);
+ build(factory, queryRep);
}
-bool Query::evaluate() const
-{
- bool ok = valid() ? _root->evaluate() : false;
- return ok;
+bool
+Query::evaluate() const {
+ return valid() ? _root->evaluate() : false;
}
-bool Query::build(const QueryNodeResultFactory & factory, const QueryPacketT & queryRep)
+bool
+Query::build(const QueryNodeResultFactory & factory, const QueryPacketT & queryRep)
{
search::SimpleQueryStackDumpIterator stack(queryRep);
if (stack.next()) {
@@ -324,49 +350,49 @@ bool Query::build(const QueryNodeResultFactory & factory, const QueryPacketT & q
return valid();
}
-void Query::getLeafs(QueryTermList & tl)
-{
- if (valid()) {
- _root->getLeafs(tl);
- }
+void
+Query::getLeafs(QueryTermList & tl) {
+ if (valid()) {
+ _root->getLeafs(tl);
+ }
}
-void Query::getLeafs(ConstQueryTermList & tl) const
-{
- if (valid()) {
- _root->getLeafs(tl);
- }
+void
+Query::getLeafs(ConstQueryTermList & tl) const {
+ if (valid()) {
+ _root->getLeafs(tl);
+ }
}
-void Query::getPhrases(QueryNodeRefList & tl)
-{
- if (valid()) {
- _root->getPhrases(tl);
- }
+void
+Query::getPhrases(QueryNodeRefList & tl) {
+ if (valid()) {
+ _root->getPhrases(tl);
+ }
}
-void Query::getPhrases(ConstQueryNodeRefList & tl) const
-{
- if (valid()) {
- _root->getPhrases(tl);
- }
+void
+Query::getPhrases(ConstQueryNodeRefList & tl) const {
+ if (valid()) {
+ _root->getPhrases(tl);
+ }
}
-void Query::reset()
-{
- if (valid()) {
- _root->reset();
- }
+void
+Query::reset() {
+ if (valid()) {
+ _root->reset();
+ }
}
-size_t Query::depth() const
-{
- return valid() ? _root->depth() : 0;
+size_t
+Query::depth() const {
+ return valid() ? _root->depth() : 0;
}
-size_t Query::width() const
-{
- return valid() ? _root->width() : 0;
+size_t
+Query::width() const {
+ return valid() ? _root->width() : 0;
}
}
diff --git a/searchlib/src/vespa/searchlib/query/streaming/query.h b/searchlib/src/vespa/searchlib/query/streaming/query.h
index 7e30e7428f6..c0123e26cf2 100644
--- a/searchlib/src/vespa/searchlib/query/streaming/query.h
+++ b/searchlib/src/vespa/searchlib/query/streaming/query.h
@@ -11,7 +11,7 @@ namespace search::streaming {
Base class for all N-ary query operators.
Implements the width, depth, print, and collect all leafs operators(terms).
*/
-class QueryConnector : public QueryNode, public QueryNodeList
+class QueryConnector : public QueryNode
{
public:
QueryConnector(const char * opName);
@@ -29,9 +29,14 @@ public:
const vespalib::string & getIndex() const override { return _index; }
static std::unique_ptr<QueryConnector> create(ParseItem::ItemType type);
virtual bool isFlattenable(ParseItem::ItemType type) const { (void) type; return false; }
+ const QueryNodeList & getChildren() const { return _children; }
+ virtual void addChild(QueryNode::UP child);
+ size_t size() const { return _children.size(); }
+ const QueryNode::UP & operator [](size_t index) const { return _children[index]; }
private:
vespalib::string _opName;
vespalib::string _index;
+ QueryNodeList _children;
};
/**
@@ -113,6 +118,7 @@ public:
const QueryTerm::FieldInfo & getFieldInfo(size_t fid) const { return _fieldInfo[fid]; }
size_t getFieldInfoSize() const { return _fieldInfo.size(); }
bool isFlattenable(ParseItem::ItemType type) const override { return type == ParseItem::ITEM_NOT; }
+ void addChild(QueryNode::UP child) override;
private:
mutable std::vector<QueryTerm::FieldInfo> _fieldInfo;
void updateFieldInfo(size_t fid, size_t offset, size_t fieldLength) const;
@@ -129,16 +135,7 @@ public:
bool evaluate() const override;
const HitList & evaluateHits(HitList & hl) const override;
bool isFlattenable(ParseItem::ItemType type) const override { return type == ParseItem::ITEM_NOT; }
-};
-
-/**
- Unary Not operator. Just inverts the nodes result.
-*/
-class NotQueryNode : public QueryConnector
-{
-public:
- NotQueryNode() : QueryConnector("NOT") { }
- bool evaluate() const override;
+ void addChild(QueryNode::UP child) override;
};
/**
diff --git a/searchlib/src/vespa/searchlib/query/streaming/querynode.cpp b/searchlib/src/vespa/searchlib/query/streaming/querynode.cpp
index cabc9b6dae4..9fd6dd0ced9 100644
--- a/searchlib/src/vespa/searchlib/query/streaming/querynode.cpp
+++ b/searchlib/src/vespa/searchlib/query/streaming/querynode.cpp
@@ -10,8 +10,10 @@ namespace search::streaming {
namespace {
vespalib::stringref DEFAULT("default");
- bool isPhraseOrNear(const QueryNode * qn) {
- return dynamic_cast<const NearQueryNode *> (qn) || dynamic_cast<const PhraseQueryNode *> (qn);
+ bool disableRewrite(const QueryNode * qn) {
+ return dynamic_cast<const NearQueryNode *> (qn) ||
+ dynamic_cast<const PhraseQueryNode *> (qn) ||
+ dynamic_cast<const SameElementQueryNode *>(qn);
}
}
@@ -56,8 +58,7 @@ QueryNode::Build(const QueryNode * parent, const QueryNodeResultFactory & factor
if (qc->isFlattenable(queryRep.getType())) {
arity += queryRep.getArity();
} else {
- UP child = Build(qc, factory, queryRep, allowRewrite && !isPhraseOrNear(qn.get()));
- qc->push_back(std::move(child));
+ qc->addChild(Build(qc, factory, queryRep, allowRewrite && !disableRewrite(qn.get())));
}
}
}
@@ -128,15 +129,20 @@ QueryNode::Build(const QueryNode * parent, const QueryNodeResultFactory & factor
auto qt = std::make_unique<QueryTerm>(factory.create(), ssTerm, ssIndex, sTerm);
qt->setWeight(queryRep.GetWeight());
qt->setUniqueId(queryRep.getUniqueId());
- if ( qt->encoding().isBase10Integer() || ! qt->encoding().isFloat() || ! factory.getRewriteFloatTerms() || !allowRewrite || (ssTerm.find('.') == vespalib::string::npos)) {
+ if (qt->encoding().isBase10Integer() ||
+ ! qt->encoding().isFloat() ||
+ ! factory.getRewriteFloatTerms() ||
+ ! allowRewrite ||
+ (ssTerm.find('.') == vespalib::string::npos))
+ {
qn = std::move(qt);
} else {
auto phrase = std::make_unique<PhraseQueryNode>();
- phrase->push_back(std::make_unique<QueryTerm>(factory.create(), ssTerm.substr(0, ssTerm.find('.')), ssIndex, TermType::WORD));
- phrase->push_back(std::make_unique<QueryTerm>(factory.create(), ssTerm.substr(ssTerm.find('.') + 1), ssIndex, TermType::WORD));
+ phrase->addChild(std::make_unique<QueryTerm>(factory.create(), ssTerm.substr(0, ssTerm.find('.')), ssIndex, TermType::WORD));
+ phrase->addChild(std::make_unique<QueryTerm>(factory.create(), ssTerm.substr(ssTerm.find('.') + 1), ssIndex, TermType::WORD));
auto orqn = std::make_unique<EquivQueryNode>();
- orqn->push_back(std::move(qt));
- orqn->push_back(std::move(phrase));
+ orqn->addChild(std::move(qt));
+ orqn->addChild(std::move(phrase));
qn = std::move(orqn);
}
}
diff --git a/searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_blueprint.cpp b/searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_blueprint.cpp
index 4ec23b993b6..68af090afb6 100644
--- a/searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_blueprint.cpp
+++ b/searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_blueprint.cpp
@@ -4,7 +4,7 @@
#include "nearest_neighbor_blueprint.h"
#include "nearest_neighbor_iterator.h"
#include "nns_index_iterator.h"
-#include <vespa/eval/eval/dense_cells_value.h>
+#include <vespa/eval/eval/fast_value.h>
#include <vespa/searchlib/fef/termfieldmatchdataarray.h>
#include <vespa/searchlib/tensor/dense_tensor_attribute.h>
#include <vespa/searchlib/tensor/distance_function_factory.h>
@@ -12,35 +12,43 @@
LOG_SETUP(".searchlib.queryeval.nearest_neighbor_blueprint");
-using vespalib::eval::DenseCellsValue;
+using vespalib::eval::CellType;
+using vespalib::eval::FastValueBuilderFactory;
+using vespalib::eval::TypedCells;
using vespalib::eval::Value;
+using vespalib::eval::ValueType;
namespace search::queryeval {
namespace {
template<typename LCT, typename RCT>
-void
-convert_cells(std::unique_ptr<Value> &original, const vespalib::eval::ValueType &want_type)
+std::unique_ptr<Value>
+convert_cells(const ValueType &new_type, std::unique_ptr<Value> old_value)
{
- if constexpr (std::is_same<LCT,RCT>::value) {
- return;
- } else {
- auto old_cells = original->cells().typify<LCT>();
- std::vector<RCT> new_cells;
- new_cells.reserve(old_cells.size());
- for (LCT value : old_cells) {
- RCT conv(value);
- new_cells.push_back(conv);
- }
- original = std::make_unique<DenseCellsValue<RCT>>(want_type, std::move(new_cells));
+ auto old_cells = old_value->cells().typify<LCT>();
+ auto builder = FastValueBuilderFactory::get().create_value_builder<RCT>(new_type);
+ auto new_cells = builder->add_subspace();
+ assert(old_cells.size() == new_cells.size());
+ auto p = new_cells.begin();
+ for (LCT value : old_cells) {
+ RCT conv(value);
+ *p++ = conv;
}
+ return builder->build(std::move(builder));
}
struct ConvertCellsSelector
{
template <typename LCT, typename RCT>
- static auto invoke() { return convert_cells<LCT, RCT>; }
+ static auto invoke(const ValueType &new_type, std::unique_ptr<Value> old_value) {
+ return convert_cells<LCT, RCT>(new_type, std::move(old_value));
+ }
+ auto operator() (CellType from, CellType to, std::unique_ptr<Value> old_value) const {
+ using MyTypify = vespalib::eval::TypifyCellType;
+ ValueType new_type = old_value->type().cell_cast(to);
+ return vespalib::typify_invoke<2,MyTypify,ConvertCellsSelector>(from, to, new_type, std::move(old_value));
+ }
};
} // namespace <unnamed>
@@ -63,12 +71,8 @@ NearestNeighborBlueprint::NearestNeighborBlueprint(const queryeval::FieldSpec& f
_found_hits(),
_global_filter(GlobalFilter::create())
{
- auto lct = _query_tensor->cells().type;
- auto rct = _attr_tensor.getTensorType().cell_type();
- using MyTypify = vespalib::eval::TypifyCellType;
- auto fixup_fun = vespalib::typify_invoke<2,MyTypify,ConvertCellsSelector>(lct, rct);
- fixup_fun(_query_tensor, _attr_tensor.getTensorType());
- _fallback_dist_fun = search::tensor::make_distance_function(_attr_tensor.distance_metric(), rct);
+ CellType attr_ct = _attr_tensor.getTensorType().cell_type();
+ _fallback_dist_fun = search::tensor::make_distance_function(_attr_tensor.distance_metric(), attr_ct);
_dist_fun = _fallback_dist_fun.get();
assert(_dist_fun);
auto nns_index = _attr_tensor.nearest_neighbor_index();
@@ -76,6 +80,12 @@ NearestNeighborBlueprint::NearestNeighborBlueprint(const queryeval::FieldSpec& f
_dist_fun = nns_index->distance_function();
assert(_dist_fun);
}
+ auto query_ct = _query_tensor->cells().type;
+ CellType required_ct = _dist_fun->expected_cell_type();
+ if (query_ct != required_ct) {
+ ConvertCellsSelector converter;
+ _query_tensor = converter(query_ct, required_ct, std::move(_query_tensor));
+ }
if (distance_threshold < std::numeric_limits<double>::max()) {
_distance_threshold = _dist_fun->convert_threshold(distance_threshold);
_distance_heap.set_distance_threshold(_distance_threshold);
@@ -123,18 +133,13 @@ NearestNeighborBlueprint::perform_top_k()
{
auto nns_index = _attr_tensor.nearest_neighbor_index();
if (_approximate && nns_index) {
- auto lhs_type = _query_tensor->type();
- auto rhs_type = _attr_tensor.getTensorType();
- // different cell types should be converted already
- if (lhs_type == rhs_type) {
- auto lhs = _query_tensor->cells();
- uint32_t k = _target_num_hits;
- if (_global_filter->has_filter()) {
- auto filter = _global_filter->filter();
- _found_hits = nns_index->find_top_k_with_filter(k, lhs, *filter, k + _explore_additional_hits, _distance_threshold);
- } else {
- _found_hits = nns_index->find_top_k(k, lhs, k + _explore_additional_hits, _distance_threshold);
- }
+ auto lhs = _query_tensor->cells();
+ uint32_t k = _target_num_hits;
+ if (_global_filter->has_filter()) {
+ auto filter = _global_filter->filter();
+ _found_hits = nns_index->find_top_k_with_filter(k, lhs, *filter, k + _explore_additional_hits, _distance_threshold);
+ } else {
+ _found_hits = nns_index->find_top_k(k, lhs, k + _explore_additional_hits, _distance_threshold);
}
}
}
diff --git a/searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_iterator.cpp b/searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_iterator.cpp
index d85da49c2f7..89ca4f1cfef 100644
--- a/searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_iterator.cpp
+++ b/searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_iterator.cpp
@@ -16,7 +16,7 @@ bool
is_compatible(const vespalib::eval::ValueType& lhs,
const vespalib::eval::ValueType& rhs)
{
- return (lhs == rhs);
+ return (lhs.dimensions() == rhs.dimensions());
}
}
@@ -90,9 +90,6 @@ template <bool has_filter>
std::unique_ptr<NearestNeighborIterator>
resolve_strict(bool strict, const NearestNeighborIterator::Params &params)
{
- CellType lct = params.queryTensor.type().cell_type();
- CellType rct = params.tensorAttribute.getTensorType().cell_type();
- if (lct != rct) abort();
if (strict) {
using NNI = NearestNeighborImpl<true, has_filter>;
return std::make_unique<NNI>(params);
diff --git a/searchlib/src/vespa/searchlib/tensor/CMakeLists.txt b/searchlib/src/vespa/searchlib/tensor/CMakeLists.txt
index 8af77a57a44..46bafa189e1 100644
--- a/searchlib/src/vespa/searchlib/tensor/CMakeLists.txt
+++ b/searchlib/src/vespa/searchlib/tensor/CMakeLists.txt
@@ -1,6 +1,7 @@
# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
vespa_add_library(searchlib_tensor OBJECT
SOURCES
+ angular_distance.cpp
default_nearest_neighbor_index_factory.cpp
dense_tensor_attribute.cpp
dense_tensor_attribute_saver.cpp
@@ -9,13 +10,16 @@ vespa_add_library(searchlib_tensor OBJECT
direct_tensor_saver.cpp
direct_tensor_store.cpp
distance_function_factory.cpp
- distance_functions.cpp
+ euclidean_distance.cpp
+ geo_degrees_distance.cpp
+ hamming_distance.cpp
hnsw_graph.cpp
hnsw_index.cpp
hnsw_index_loader.cpp
hnsw_index_saver.cpp
imported_tensor_attribute_vector.cpp
imported_tensor_attribute_vector_read_guard.cpp
+ inner_product_distance.cpp
inv_log_level_generator.cpp
nearest_neighbor_index.cpp
nearest_neighbor_index_saver.cpp
diff --git a/searchlib/src/vespa/searchlib/tensor/angular_distance.cpp b/searchlib/src/vespa/searchlib/tensor/angular_distance.cpp
new file mode 100644
index 00000000000..21b2622283c
--- /dev/null
+++ b/searchlib/src/vespa/searchlib/tensor/angular_distance.cpp
@@ -0,0 +1,52 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "angular_distance.h"
+
+using vespalib::typify_invoke;
+using vespalib::eval::TypifyCellType;
+
+namespace search::tensor {
+
+namespace {
+
+struct CalcAngular {
+ template <typename LCT, typename RCT>
+ static double invoke(const vespalib::eval::TypedCells& lhs,
+ const vespalib::eval::TypedCells& rhs)
+ {
+ auto lhs_vector = lhs.unsafe_typify<LCT>();
+ auto rhs_vector = rhs.unsafe_typify<RCT>();
+
+ size_t sz = lhs_vector.size();
+ assert(sz == rhs_vector.size());
+ double a_norm_sq = 0.0;
+ double b_norm_sq = 0.0;
+ double dot_product = 0.0;
+ for (size_t i = 0; i < sz; ++i) {
+ double a = lhs_vector[i];
+ double b = rhs_vector[i];
+ a_norm_sq += a*a;
+ b_norm_sq += b*b;
+ dot_product += a*b;
+ }
+ double squared_norms = a_norm_sq * b_norm_sq;
+ double div = (squared_norms > 0) ? sqrt(squared_norms) : 1.0;
+ double cosine_similarity = dot_product / div;
+ double distance = 1.0 - cosine_similarity; // in range [0,2]
+ return std::max(0.0, distance);
+ }
+};
+
+}
+
+double
+AngularDistance::calc(const vespalib::eval::TypedCells& lhs,
+ const vespalib::eval::TypedCells& rhs) const
+{
+ return typify_invoke<2,TypifyCellType,CalcAngular>(lhs.type, rhs.type, lhs, rhs);
+}
+
+template class AngularDistanceHW<float>;
+template class AngularDistanceHW<double>;
+
+}
diff --git a/searchlib/src/vespa/searchlib/tensor/angular_distance.h b/searchlib/src/vespa/searchlib/tensor/angular_distance.h
new file mode 100644
index 00000000000..ce1fa40b2a4
--- /dev/null
+++ b/searchlib/src/vespa/searchlib/tensor/angular_distance.h
@@ -0,0 +1,76 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "distance_function.h"
+#include <vespa/eval/eval/typed_cells.h>
+#include <vespa/vespalib/hwaccelrated/iaccelrated.h>
+#include <cmath>
+
+namespace search::tensor {
+
+/**
+ * Calculates angular distance between vectors
+ */
+class AngularDistance : public DistanceFunction {
+public:
+ AngularDistance(vespalib::eval::CellType expected) : DistanceFunction(expected) {}
+ double calc(const vespalib::eval::TypedCells& lhs, const vespalib::eval::TypedCells& rhs) const override;
+ double convert_threshold(double threshold) const override {
+ double cosine_similarity = cos(threshold);
+ return 1.0 - cosine_similarity;
+ }
+ double to_rawscore(double distance) const override {
+ double cosine_similarity = 1.0 - distance;
+ // should be in in range [-1,1] but roundoff may cause problems:
+ cosine_similarity = std::min(1.0, cosine_similarity);
+ cosine_similarity = std::max(-1.0, cosine_similarity);
+ double angle_distance = acos(cosine_similarity); // in range [0,pi]
+ double score = 1.0 / (1.0 + angle_distance);
+ return score;
+ }
+ double calc_with_limit(const vespalib::eval::TypedCells& lhs,
+ const vespalib::eval::TypedCells& rhs,
+ double /*limit*/) const override
+ {
+ return calc(lhs, rhs);
+ }
+};
+
+/**
+ * Calculates angular distance between vectors
+ * Will use instruction optimal for the cpu it is running on
+ * when both vectors have the expected cell type.
+ */
+template <typename FloatType>
+class AngularDistanceHW : public AngularDistance {
+public:
+ AngularDistanceHW()
+ : AngularDistance(vespalib::eval::get_cell_type<FloatType>()),
+ _computer(vespalib::hwaccelrated::IAccelrated::getAccelerator())
+ {
+ assert(expected_cell_type() == vespalib::eval::get_cell_type<FloatType>());
+ }
+ double calc(const vespalib::eval::TypedCells& lhs, const vespalib::eval::TypedCells& rhs) const override {
+ constexpr vespalib::eval::CellType expected = vespalib::eval::get_cell_type<FloatType>();
+ assert(lhs.type == expected && rhs.type == expected);
+ auto lhs_vector = lhs.typify<FloatType>();
+ auto rhs_vector = rhs.typify<FloatType>();
+ size_t sz = lhs_vector.size();
+ assert(sz == rhs_vector.size());
+ auto a = &lhs_vector[0];
+ auto b = &rhs_vector[0];
+ double a_norm_sq = _computer.dotProduct(a, a, sz);
+ double b_norm_sq = _computer.dotProduct(b, b, sz);
+ double squared_norms = a_norm_sq * b_norm_sq;
+ double dot_product = _computer.dotProduct(a, b, sz);
+ double div = (squared_norms > 0) ? sqrt(squared_norms) : 1.0;
+ double cosine_similarity = dot_product / div;
+ double distance = 1.0 - cosine_similarity; // in range [0,2]
+ return distance;
+ }
+private:
+ const vespalib::hwaccelrated::IAccelrated & _computer;
+};
+
+}
diff --git a/searchlib/src/vespa/searchlib/tensor/distance_function.h b/searchlib/src/vespa/searchlib/tensor/distance_function.h
index 724f83b6129..4c1cd2c5608 100644
--- a/searchlib/src/vespa/searchlib/tensor/distance_function.h
+++ b/searchlib/src/vespa/searchlib/tensor/distance_function.h
@@ -1,8 +1,9 @@
-// Copyright 2020 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once
#include <memory>
+#include <vespa/eval/eval/cell_type.h>
namespace vespalib::eval { struct TypedCells; }
@@ -15,10 +16,20 @@ namespace search::tensor {
* The actual implementation must know which type the vectors are.
*/
class DistanceFunction {
+private:
+ vespalib::eval::CellType _expect_cell_type;
public:
using UP = std::unique_ptr<DistanceFunction>;
+
+ DistanceFunction(vespalib::eval::CellType expected) : _expect_cell_type(expected) {}
+
virtual ~DistanceFunction() {}
+ // input (query) vectors must be converted to this cell type:
+ vespalib::eval::CellType expected_cell_type() const {
+ return _expect_cell_type;
+ }
+
// calculate internal distance (comparable)
virtual double calc(const vespalib::eval::TypedCells& lhs, const vespalib::eval::TypedCells& rhs) const = 0;
diff --git a/searchlib/src/vespa/searchlib/tensor/distance_function_factory.cpp b/searchlib/src/vespa/searchlib/tensor/distance_function_factory.cpp
index 81b27b56258..8ae9441ff11 100644
--- a/searchlib/src/vespa/searchlib/tensor/distance_function_factory.cpp
+++ b/searchlib/src/vespa/searchlib/tensor/distance_function_factory.cpp
@@ -2,6 +2,10 @@
#include "distance_function_factory.h"
#include "distance_functions.h"
+#include <vespa/vespalib/util/typify.h>
+#include <vespa/log/log.h>
+
+LOG_SETUP(".searchlib.tensor.distance_function_factory");
using search::attribute::DistanceMetric;
using vespalib::eval::CellType;
@@ -13,41 +17,28 @@ DistanceFunction::UP
make_distance_function(DistanceMetric variant, CellType cell_type)
{
switch (variant) {
- case DistanceMetric::Euclidean:
- if (cell_type == CellType::FLOAT) {
- return std::make_unique<SquaredEuclideanDistance<float>>();
- } else {
- return std::make_unique<SquaredEuclideanDistance<double>>();
- }
- break;
- case DistanceMetric::Angular:
- if (cell_type == CellType::FLOAT) {
- return std::make_unique<AngularDistance<float>>();
- } else {
- return std::make_unique<AngularDistance<double>>();
- }
- break;
- case DistanceMetric::GeoDegrees:
- if (cell_type == CellType::FLOAT) {
- return std::make_unique<GeoDegreesDistance<float>>();
- } else {
- return std::make_unique<GeoDegreesDistance<double>>();
- }
- break;
- case DistanceMetric::InnerProduct:
- if (cell_type == CellType::FLOAT) {
- return std::make_unique<InnerProductDistance<float>>();
- } else {
- return std::make_unique<InnerProductDistance<double>>();
- }
- break;
- case DistanceMetric::Hamming:
- if (cell_type == CellType::FLOAT) {
- return std::make_unique<HammingDistance<float>>();
- } else {
- return std::make_unique<HammingDistance<double>>();
- }
- break;
+ case DistanceMetric::Euclidean:
+ switch (cell_type) {
+ case CellType::FLOAT: return std::make_unique<SquaredEuclideanDistanceHW<float>>();
+ case CellType::DOUBLE: return std::make_unique<SquaredEuclideanDistanceHW<double>>();
+ default: return std::make_unique<SquaredEuclideanDistance>(CellType::FLOAT);
+ }
+ case DistanceMetric::Angular:
+ switch (cell_type) {
+ case CellType::FLOAT: return std::make_unique<AngularDistanceHW<float>>();
+ case CellType::DOUBLE: return std::make_unique<AngularDistanceHW<double>>();
+ default: return std::make_unique<AngularDistance>(CellType::FLOAT);
+ }
+ case DistanceMetric::GeoDegrees:
+ return std::make_unique<GeoDegreesDistance>(CellType::DOUBLE);
+ case DistanceMetric::InnerProduct:
+ switch (cell_type) {
+ case CellType::FLOAT: return std::make_unique<InnerProductDistanceHW<float>>();
+ case CellType::DOUBLE: return std::make_unique<InnerProductDistanceHW<double>>();
+ default: return std::make_unique<InnerProductDistance>(CellType::FLOAT);
+ }
+ case DistanceMetric::Hamming:
+ return std::make_unique<HammingDistance>(cell_type);
}
// not reached:
return DistanceFunction::UP();
diff --git a/searchlib/src/vespa/searchlib/tensor/distance_functions.cpp b/searchlib/src/vespa/searchlib/tensor/distance_functions.cpp
deleted file mode 100644
index 8cf3a95db19..00000000000
--- a/searchlib/src/vespa/searchlib/tensor/distance_functions.cpp
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include "distance_functions.h"
-
-namespace search::tensor {
-
-template class SquaredEuclideanDistance<float>;
-template class SquaredEuclideanDistance<double>;
-
-template class AngularDistance<float>;
-template class AngularDistance<double>;
-
-template class InnerProductDistance<float>;
-template class InnerProductDistance<double>;
-
-template class GeoDegreesDistance<float>;
-template class GeoDegreesDistance<double>;
-
-template class HammingDistance<float>;
-template class HammingDistance<double>;
-
-}
diff --git a/searchlib/src/vespa/searchlib/tensor/distance_functions.h b/searchlib/src/vespa/searchlib/tensor/distance_functions.h
index 2557a51e0d7..8ad8c07bd3c 100644
--- a/searchlib/src/vespa/searchlib/tensor/distance_functions.h
+++ b/searchlib/src/vespa/searchlib/tensor/distance_functions.h
@@ -1,245 +1,10 @@
-// Copyright 2020 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once
#include "distance_function.h"
-#include <vespa/eval/eval/typed_cells.h>
-#include <vespa/vespalib/hwaccelrated/iaccelrated.h>
-#include <cmath>
-
-namespace search::tensor {
-
-/**
- * Calculates the square of the standard Euclidean distance.
- * Will use instruction optimal for the cpu it is running on.
- */
-template <typename FloatType>
-class SquaredEuclideanDistance : public DistanceFunction {
-public:
- SquaredEuclideanDistance()
- : _computer(vespalib::hwaccelrated::IAccelrated::getAccelerator())
- {}
- double calc(const vespalib::eval::TypedCells& lhs, const vespalib::eval::TypedCells& rhs) const override {
- auto lhs_vector = lhs.typify<FloatType>();
- auto rhs_vector = rhs.typify<FloatType>();
- size_t sz = lhs_vector.size();
- assert(sz == rhs_vector.size());
- return _computer.squaredEuclideanDistance(&lhs_vector[0], &rhs_vector[0], sz);
- }
- double convert_threshold(double threshold) const override {
- return threshold*threshold;
- }
- double to_rawscore(double distance) const override {
- double d = sqrt(distance);
- double score = 1.0 / (1.0 + d);
- return score;
- }
- double calc_with_limit(const vespalib::eval::TypedCells& lhs,
- const vespalib::eval::TypedCells& rhs,
- double limit) const override
- {
- auto lhs_vector = lhs.typify<FloatType>();
- auto rhs_vector = rhs.typify<FloatType>();
- double sum = 0.0;
- size_t sz = lhs_vector.size();
- assert(sz == rhs_vector.size());
- for (size_t i = 0; i < sz && sum <= limit; ++i) {
- double diff = lhs_vector[i] - rhs_vector[i];
- sum += diff*diff;
- }
- return sum;
- }
-
- const vespalib::hwaccelrated::IAccelrated & _computer;
-};
-
-/**
- * Calculates angular distance between vectors
- */
-template <typename FloatType>
-class AngularDistance : public DistanceFunction {
-public:
- AngularDistance()
- : _computer(vespalib::hwaccelrated::IAccelrated::getAccelerator())
- {}
- double calc(const vespalib::eval::TypedCells& lhs, const vespalib::eval::TypedCells& rhs) const override {
- auto lhs_vector = lhs.typify<FloatType>();
- auto rhs_vector = rhs.typify<FloatType>();
- size_t sz = lhs_vector.size();
- assert(sz == rhs_vector.size());
- auto a = &lhs_vector[0];
- auto b = &rhs_vector[0];
- double a_norm_sq = _computer.dotProduct(a, a, sz);
- double b_norm_sq = _computer.dotProduct(b, b, sz);
- double squared_norms = a_norm_sq * b_norm_sq;
- double dot_product = _computer.dotProduct(a, b, sz);
- double div = (squared_norms > 0) ? sqrt(squared_norms) : 1.0;
- double cosine_similarity = dot_product / div;
- double distance = 1.0 - cosine_similarity; // in range [0,2]
- return distance;
- }
- double convert_threshold(double threshold) const override {
- double cosine_similarity = cos(threshold);
- return 1.0 - cosine_similarity;
- }
- double to_rawscore(double distance) const override {
- double cosine_similarity = 1.0 - distance;
- // should be in in range [-1,1] but roundoff may cause problems:
- cosine_similarity = std::min(1.0, cosine_similarity);
- cosine_similarity = std::max(-1.0, cosine_similarity);
- double angle_distance = acos(cosine_similarity); // in range [0,pi]
- double score = 1.0 / (1.0 + angle_distance);
- return score;
- }
- double calc_with_limit(const vespalib::eval::TypedCells& lhs,
- const vespalib::eval::TypedCells& rhs,
- double /*limit*/) const override
- {
- return calc(lhs, rhs);
- }
-
- const vespalib::hwaccelrated::IAccelrated & _computer;
-};
-
-/**
- * Calculates inner-product "distance" between vectors with assumed norm 1.
- * Should give same ordering as Angular distance, but is less expensive.
- */
-template <typename FloatType>
-class InnerProductDistance : public DistanceFunction {
-public:
- InnerProductDistance()
- : _computer(vespalib::hwaccelrated::IAccelrated::getAccelerator())
- {}
- double calc(const vespalib::eval::TypedCells& lhs, const vespalib::eval::TypedCells& rhs) const override {
- auto lhs_vector = lhs.typify<FloatType>();
- auto rhs_vector = rhs.typify<FloatType>();
- size_t sz = lhs_vector.size();
- assert(sz == rhs_vector.size());
- double score = 1.0 - _computer.dotProduct(&lhs_vector[0], &rhs_vector[0], sz);
- return std::max(0.0, score);
- }
- double convert_threshold(double threshold) const override {
- return threshold;
- }
- double to_rawscore(double distance) const override {
- double score = 1.0 / (1.0 + distance);
- return score;
- }
- double calc_with_limit(const vespalib::eval::TypedCells& lhs,
- const vespalib::eval::TypedCells& rhs,
- double /*limit*/) const override
- {
- return calc(lhs, rhs);
- }
-
- const vespalib::hwaccelrated::IAccelrated & _computer;
-};
-
-/**
- * Calculates great-circle distance between Latitude/Longitude pairs,
- * measured in degrees. Output distance is measured in meters.
- * Uses the haversine formula directly from:
- * https://en.wikipedia.org/wiki/Haversine_formula
- **/
-template <typename FloatType>
-class GeoDegreesDistance : public DistanceFunction {
-public:
- // in km, as defined by IUGG, see:
- // https://en.wikipedia.org/wiki/Earth_radius#Mean_radius
- static constexpr double earth_mean_radius = 6371.0088;
- static constexpr double degrees_to_radians = M_PI / 180.0;
-
- GeoDegreesDistance() {}
- // haversine function:
- static double hav(double angle) {
- double s = sin(0.5*angle);
- return s*s;
- }
- double calc(const vespalib::eval::TypedCells& lhs, const vespalib::eval::TypedCells& rhs) const override {
- auto lhs_vector = lhs.typify<FloatType>();
- auto rhs_vector = rhs.typify<FloatType>();
- assert(2 == lhs_vector.size());
- assert(2 == rhs_vector.size());
- // convert to radians:
- double lat_A = lhs_vector[0] * degrees_to_radians;
- double lat_B = rhs_vector[0] * degrees_to_radians;
- double lon_A = lhs_vector[1] * degrees_to_radians;
- double lon_B = rhs_vector[1] * degrees_to_radians;
-
- double lat_diff = lat_A - lat_B;
- double lon_diff = lon_A - lon_B;
-
- // haversines of differences:
- double hav_lat = hav(lat_diff);
- double hav_lon = hav(lon_diff);
-
- // haversine of central angle between the two points:
- double hav_central_angle = hav_lat + cos(lat_A)*cos(lat_B)*hav_lon;
- return hav_central_angle;
- }
- double convert_threshold(double threshold) const override {
- double half_angle = threshold / (2 * earth_mean_radius);
- double rt_hav = sin(half_angle);
- return rt_hav * rt_hav;
- }
- double to_rawscore(double distance) const override {
- double hav_diff = sqrt(distance);
- // distance in kilometers:
- double d = 2 * asin(hav_diff) * earth_mean_radius;
- // km to rawscore:
- return 1.0 / (1.0 + d);
- }
- double calc_with_limit(const vespalib::eval::TypedCells& lhs,
- const vespalib::eval::TypedCells& rhs,
- double /*limit*/) const override
- {
- return calc(lhs, rhs);
- }
-
-};
-
-/**
- * Calculates the Hamming distance defined as
- * "number of cells where the values are different"
- */
-template <typename FloatType>
-class HammingDistance : public DistanceFunction {
-public:
- HammingDistance() {}
- double calc(const vespalib::eval::TypedCells& lhs, const vespalib::eval::TypedCells& rhs) const override {
- auto lhs_vector = lhs.typify<FloatType>();
- auto rhs_vector = rhs.typify<FloatType>();
- size_t sz = lhs_vector.size();
- assert(sz == rhs_vector.size());
- size_t sum = 0;
- for (size_t i = 0; i < sz; ++i) {
- sum += (lhs_vector[i] == rhs_vector[i]) ? 0 : 1;
- }
- return (double)sum;
- }
- double convert_threshold(double threshold) const override {
- return threshold;
- }
- double to_rawscore(double distance) const override {
- double score = 1.0 / (1.0 + distance);
- return score;
- }
- double calc_with_limit(const vespalib::eval::TypedCells& lhs,
- const vespalib::eval::TypedCells& rhs,
- double limit) const override
- {
- auto lhs_vector = lhs.typify<FloatType>();
- auto rhs_vector = rhs.typify<FloatType>();
- size_t sz = lhs_vector.size();
- assert(sz == rhs_vector.size());
- size_t sum = 0;
- for (size_t i = 0; i < sz && sum <= limit; ++i) {
- sum += (lhs_vector[i] == rhs_vector[i]) ? 0 : 1;
- }
- return (double)sum;
- }
-};
-
-
-}
+#include "angular_distance.h"
+#include "euclidean_distance.h"
+#include "geo_degrees_distance.h"
+#include "hamming_distance.h"
+#include "inner_product_distance.h"
diff --git a/searchlib/src/vespa/searchlib/tensor/euclidean_distance.cpp b/searchlib/src/vespa/searchlib/tensor/euclidean_distance.cpp
new file mode 100644
index 00000000000..223ceeed940
--- /dev/null
+++ b/searchlib/src/vespa/searchlib/tensor/euclidean_distance.cpp
@@ -0,0 +1,51 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "euclidean_distance.h"
+
+using vespalib::typify_invoke;
+using vespalib::eval::TypifyCellType;
+
+namespace search::tensor {
+
+namespace {
+
+struct CalcEuclidean {
+ template <typename LCT, typename RCT>
+ static double invoke(const vespalib::eval::TypedCells& lhs,
+ const vespalib::eval::TypedCells& rhs)
+ {
+ auto lhs_vector = lhs.unsafe_typify<LCT>();
+ auto rhs_vector = rhs.unsafe_typify<RCT>();
+ double sum = 0.0;
+ size_t sz = lhs_vector.size();
+ assert(sz == rhs_vector.size());
+ for (size_t i = 0; i < sz; ++i) {
+ double diff = lhs_vector[i] - rhs_vector[i];
+ sum += diff*diff;
+ }
+ return sum;
+ }
+};
+
+}
+
+double
+SquaredEuclideanDistance::calc(const vespalib::eval::TypedCells& lhs,
+ const vespalib::eval::TypedCells& rhs) const
+{
+ return typify_invoke<2,TypifyCellType,CalcEuclidean>(lhs.type, rhs.type, lhs, rhs);
+}
+
+double
+SquaredEuclideanDistance::calc_with_limit(const vespalib::eval::TypedCells& lhs,
+ const vespalib::eval::TypedCells& rhs,
+ double) const
+{
+ // maybe optimize this:
+ return typify_invoke<2,TypifyCellType,CalcEuclidean>(lhs.type, rhs.type, lhs, rhs);
+}
+
+template class SquaredEuclideanDistanceHW<float>;
+template class SquaredEuclideanDistanceHW<double>;
+
+}
diff --git a/searchlib/src/vespa/searchlib/tensor/euclidean_distance.h b/searchlib/src/vespa/searchlib/tensor/euclidean_distance.h
new file mode 100644
index 00000000000..f0eb03293cd
--- /dev/null
+++ b/searchlib/src/vespa/searchlib/tensor/euclidean_distance.h
@@ -0,0 +1,78 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "distance_function.h"
+#include <vespa/eval/eval/typed_cells.h>
+#include <vespa/vespalib/hwaccelrated/iaccelrated.h>
+#include <cmath>
+
+namespace search::tensor {
+
+/**
+ * Calculates the square of the standard Euclidean distance.
+ */
+class SquaredEuclideanDistance : public DistanceFunction {
+public:
+ SquaredEuclideanDistance(vespalib::eval::CellType expected) : DistanceFunction(expected) {}
+ double calc(const vespalib::eval::TypedCells& lhs, const vespalib::eval::TypedCells& rhs) const override;
+ double calc_with_limit(const vespalib::eval::TypedCells& lhs,
+ const vespalib::eval::TypedCells& rhs,
+ double limit) const override;
+ double convert_threshold(double threshold) const override {
+ return threshold*threshold;
+ }
+ double to_rawscore(double distance) const override {
+ double d = sqrt(distance);
+ double score = 1.0 / (1.0 + d);
+ return score;
+ }
+};
+
+/**
+ * Calculates the square of the standard Euclidean distance.
+ * Will use instruction optimal for the cpu it is running on
+ * when both vectors have the expected cell type.
+ */
+template <typename FloatType>
+class SquaredEuclideanDistanceHW : public SquaredEuclideanDistance {
+public:
+ SquaredEuclideanDistanceHW()
+ : SquaredEuclideanDistance(vespalib::eval::get_cell_type<FloatType>()),
+ _computer(vespalib::hwaccelrated::IAccelrated::getAccelerator())
+ {
+ assert(expected_cell_type() == vespalib::eval::get_cell_type<FloatType>());
+ }
+
+ double calc(const vespalib::eval::TypedCells& lhs, const vespalib::eval::TypedCells& rhs) const override {
+ constexpr vespalib::eval::CellType expected = vespalib::eval::get_cell_type<FloatType>();
+ assert(lhs.type == expected && rhs.type == expected);
+ auto lhs_vector = lhs.typify<FloatType>();
+ auto rhs_vector = rhs.typify<FloatType>();
+ size_t sz = lhs_vector.size();
+ assert(sz == rhs_vector.size());
+ return _computer.squaredEuclideanDistance(&lhs_vector[0], &rhs_vector[0], sz);
+ }
+
+ double calc_with_limit(const vespalib::eval::TypedCells& lhs,
+ const vespalib::eval::TypedCells& rhs,
+ double limit) const override
+ {
+ constexpr vespalib::eval::CellType expected = vespalib::eval::get_cell_type<FloatType>();
+ assert(lhs.type == expected && rhs.type == expected);
+ auto lhs_vector = lhs.typify<FloatType>();
+ auto rhs_vector = rhs.typify<FloatType>();
+ double sum = 0.0;
+ size_t sz = lhs_vector.size();
+ assert(sz == rhs_vector.size());
+ for (size_t i = 0; i < sz && sum <= limit; ++i) {
+ double diff = lhs_vector[i] - rhs_vector[i];
+ sum += diff*diff;
+ }
+ return sum;
+ }
+private:
+ const vespalib::hwaccelrated::IAccelrated & _computer;
+};
+
+}
diff --git a/searchlib/src/vespa/searchlib/tensor/geo_degrees_distance.cpp b/searchlib/src/vespa/searchlib/tensor/geo_degrees_distance.cpp
new file mode 100644
index 00000000000..946caa0d2cb
--- /dev/null
+++ b/searchlib/src/vespa/searchlib/tensor/geo_degrees_distance.cpp
@@ -0,0 +1,50 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "geo_degrees_distance.h"
+
+using vespalib::typify_invoke;
+using vespalib::eval::TypifyCellType;
+
+namespace search::tensor {
+
+namespace {
+
+struct CalcGeoDegrees {
+ template <typename LCT, typename RCT>
+ static double invoke(const vespalib::eval::TypedCells& lhs,
+ const vespalib::eval::TypedCells& rhs)
+ {
+ auto lhs_vector = lhs.unsafe_typify<LCT>();
+ auto rhs_vector = rhs.unsafe_typify<RCT>();
+
+ assert(2 == lhs_vector.size());
+ assert(2 == rhs_vector.size());
+ // convert to radians:
+ double lat_A = lhs_vector[0] * GeoDegreesDistance::degrees_to_radians;
+ double lat_B = rhs_vector[0] * GeoDegreesDistance::degrees_to_radians;
+ double lon_A = lhs_vector[1] * GeoDegreesDistance::degrees_to_radians;
+ double lon_B = rhs_vector[1] * GeoDegreesDistance::degrees_to_radians;
+
+ double lat_diff = lat_A - lat_B;
+ double lon_diff = lon_A - lon_B;
+
+ // haversines of differences:
+ double hav_lat = GeoDegreesDistance::hav(lat_diff);
+ double hav_lon = GeoDegreesDistance::hav(lon_diff);
+
+ // haversine of central angle between the two points:
+ double hav_central_angle = hav_lat + cos(lat_A)*cos(lat_B)*hav_lon;
+ return hav_central_angle;
+ }
+};
+
+}
+
+double
+GeoDegreesDistance::calc(const vespalib::eval::TypedCells& lhs,
+ const vespalib::eval::TypedCells& rhs) const
+{
+ return typify_invoke<2,TypifyCellType,CalcGeoDegrees>(lhs.type, rhs.type, lhs, rhs);
+}
+
+}
diff --git a/searchlib/src/vespa/searchlib/tensor/geo_degrees_distance.h b/searchlib/src/vespa/searchlib/tensor/geo_degrees_distance.h
new file mode 100644
index 00000000000..7ce69ef8aae
--- /dev/null
+++ b/searchlib/src/vespa/searchlib/tensor/geo_degrees_distance.h
@@ -0,0 +1,53 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "distance_function.h"
+#include <vespa/eval/eval/typed_cells.h>
+#include <vespa/vespalib/hwaccelrated/iaccelrated.h>
+#include <vespa/vespalib/util/typify.h>
+#include <cmath>
+
+namespace search::tensor {
+
+/**
+ * Calculates great-circle distance between Latitude/Longitude pairs,
+ * measured in degrees. Output distance is measured in meters.
+ * Uses the haversine formula directly from:
+ * https://en.wikipedia.org/wiki/Haversine_formula
+ **/
+class GeoDegreesDistance : public DistanceFunction {
+public:
+ // in km, as defined by IUGG, see:
+ // https://en.wikipedia.org/wiki/Earth_radius#Mean_radius
+ static constexpr double earth_mean_radius = 6371.0088;
+ static constexpr double degrees_to_radians = M_PI / 180.0;
+
+ GeoDegreesDistance(vespalib::eval::CellType expected) : DistanceFunction(expected) {}
+ // haversine function:
+ static double hav(double angle) {
+ double s = sin(0.5*angle);
+ return s*s;
+ }
+ double calc(const vespalib::eval::TypedCells& lhs, const vespalib::eval::TypedCells& rhs) const override;
+ double convert_threshold(double threshold) const override {
+ double half_angle = threshold / (2 * earth_mean_radius);
+ double rt_hav = sin(half_angle);
+ return rt_hav * rt_hav;
+ }
+ double to_rawscore(double distance) const override {
+ double hav_diff = sqrt(distance);
+ // distance in kilometers:
+ double d = 2 * asin(hav_diff) * earth_mean_radius;
+ // km to rawscore:
+ return 1.0 / (1.0 + d);
+ }
+ double calc_with_limit(const vespalib::eval::TypedCells& lhs,
+ const vespalib::eval::TypedCells& rhs,
+ double /*limit*/) const override
+ {
+ return calc(lhs, rhs);
+ }
+};
+
+}
diff --git a/searchlib/src/vespa/searchlib/tensor/hamming_distance.cpp b/searchlib/src/vespa/searchlib/tensor/hamming_distance.cpp
new file mode 100644
index 00000000000..ef00321a145
--- /dev/null
+++ b/searchlib/src/vespa/searchlib/tensor/hamming_distance.cpp
@@ -0,0 +1,61 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "hamming_distance.h"
+
+using vespalib::typify_invoke;
+using vespalib::eval::TypifyCellType;
+
+namespace search::tensor {
+
+namespace {
+
+struct CalcHamming {
+ template <typename LCT, typename RCT>
+ static double invoke(const vespalib::eval::TypedCells& lhs,
+ const vespalib::eval::TypedCells& rhs)
+ {
+ auto lhs_vector = lhs.unsafe_typify<LCT>();
+ auto rhs_vector = rhs.unsafe_typify<RCT>();
+ size_t sz = lhs_vector.size();
+ assert(sz == rhs_vector.size());
+ size_t sum = 0;
+ for (size_t i = 0; i < sz; ++i) {
+ sum += (lhs_vector[i] == rhs_vector[i]) ? 0 : 1;
+ }
+ return (double)sum;
+ }
+};
+
+}
+
+double
+HammingDistance::calc(const vespalib::eval::TypedCells& lhs,
+ const vespalib::eval::TypedCells& rhs) const
+{
+ constexpr auto expected = vespalib::eval::CellType::INT8;
+ if (__builtin_expect((lhs.type == expected && rhs.type == expected), true)) {
+ const uint64_t *words_a = static_cast<const uint64_t *>(lhs.data);
+ const uint64_t *words_b = static_cast<const uint64_t *>(rhs.data);
+ size_t sum = 0;
+ size_t sz = lhs.size;
+ assert(sz == rhs.size);
+ size_t i = 0;
+ for (; i * 8 + 7 < sz; ++i) {
+ uint64_t xor_bits = words_a[i] ^ words_b[i];
+ sum += __builtin_popcountl(xor_bits);
+ }
+ if (__builtin_expect((i * 8 < sz), false)) {
+ const uint8_t *bytes_a = static_cast<const uint8_t *>(lhs.data);
+ const uint8_t *bytes_b = static_cast<const uint8_t *>(rhs.data);
+ for (i *= 8; i < sz; ++i) {
+ uint64_t xor_bits = bytes_a[i] ^ bytes_b[i];
+ sum += __builtin_popcountl(xor_bits);
+ }
+ }
+ return (double)sum;
+ } else {
+ return typify_invoke<2,TypifyCellType,CalcHamming>(lhs.type, rhs.type, lhs, rhs);
+ }
+}
+
+}
diff --git a/searchlib/src/vespa/searchlib/tensor/hamming_distance.h b/searchlib/src/vespa/searchlib/tensor/hamming_distance.h
new file mode 100644
index 00000000000..d92671e4922
--- /dev/null
+++ b/searchlib/src/vespa/searchlib/tensor/hamming_distance.h
@@ -0,0 +1,38 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "distance_function.h"
+#include <vespa/eval/eval/typed_cells.h>
+#include <vespa/vespalib/util/typify.h>
+#include <cmath>
+
+namespace search::tensor {
+
+/**
+ * Calculates the Hamming distance defined as
+ * "number of cells where the values are different"
+ * or (for int8 cells, aka binary data only)
+ * "number of bits that are different"
+ */
+class HammingDistance : public DistanceFunction {
+public:
+ HammingDistance(vespalib::eval::CellType expected) : DistanceFunction(expected) {}
+ double calc(const vespalib::eval::TypedCells& lhs, const vespalib::eval::TypedCells& rhs) const override;
+ double convert_threshold(double threshold) const override {
+ return threshold;
+ }
+ double to_rawscore(double distance) const override {
+ double score = 1.0 / (1.0 + distance);
+ return score;
+ }
+ double calc_with_limit(const vespalib::eval::TypedCells& lhs,
+ const vespalib::eval::TypedCells& rhs,
+ double) const override
+ {
+ // consider optimizing:
+ return calc(lhs, rhs);
+ }
+};
+
+}
diff --git a/searchlib/src/vespa/searchlib/tensor/inner_product_distance.cpp b/searchlib/src/vespa/searchlib/tensor/inner_product_distance.cpp
new file mode 100644
index 00000000000..8a48c6c936a
--- /dev/null
+++ b/searchlib/src/vespa/searchlib/tensor/inner_product_distance.cpp
@@ -0,0 +1,44 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "inner_product_distance.h"
+
+using vespalib::typify_invoke;
+using vespalib::eval::TypifyCellType;
+
+namespace search::tensor {
+
+namespace {
+
+struct CalcInnerProduct {
+ template <typename LCT, typename RCT>
+ static double invoke(const vespalib::eval::TypedCells& lhs,
+ const vespalib::eval::TypedCells& rhs)
+ {
+ auto lhs_vector = lhs.unsafe_typify<LCT>();
+ auto rhs_vector = rhs.unsafe_typify<RCT>();
+ size_t sz = lhs_vector.size();
+ assert(sz == rhs_vector.size());
+ double dot_product = 0.0;
+ for (size_t i = 0; i < sz; ++i) {
+ double a = lhs_vector[i];
+ double b = rhs_vector[i];
+ dot_product += a*b;
+ }
+ double score = 1.0 - dot_product; // in range [0,2]
+ return std::max(0.0, score);
+ }
+};
+
+}
+
+double
+InnerProductDistance::calc(const vespalib::eval::TypedCells& lhs,
+ const vespalib::eval::TypedCells& rhs) const
+{
+ return typify_invoke<2,TypifyCellType,CalcInnerProduct>(lhs.type, rhs.type, lhs, rhs);
+}
+
+template class InnerProductDistanceHW<float>;
+template class InnerProductDistanceHW<double>;
+
+}
diff --git a/searchlib/src/vespa/searchlib/tensor/inner_product_distance.h b/searchlib/src/vespa/searchlib/tensor/inner_product_distance.h
new file mode 100644
index 00000000000..981dedea141
--- /dev/null
+++ b/searchlib/src/vespa/searchlib/tensor/inner_product_distance.h
@@ -0,0 +1,64 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "distance_function.h"
+#include <vespa/eval/eval/typed_cells.h>
+#include <vespa/vespalib/hwaccelrated/iaccelrated.h>
+#include <cmath>
+
+namespace search::tensor {
+
+/**
+ * Calculates inner-product "distance" between vectors with assumed norm 1.
+ * Should give same ordering as Angular distance, but is less expensive.
+ */
+class InnerProductDistance : public DistanceFunction {
+public:
+ InnerProductDistance(vespalib::eval::CellType expected) : DistanceFunction(expected) {}
+ double calc(const vespalib::eval::TypedCells& lhs, const vespalib::eval::TypedCells& rhs) const override;
+ double convert_threshold(double threshold) const override {
+ return threshold;
+ }
+ double to_rawscore(double distance) const override {
+ double score = 1.0 / (1.0 + distance);
+ return score;
+ }
+ double calc_with_limit(const vespalib::eval::TypedCells& lhs,
+ const vespalib::eval::TypedCells& rhs,
+ double /*limit*/) const override
+ {
+ return calc(lhs, rhs);
+ }
+};
+
+/**
+ * Calculates inner-product "distance" between vectors with assumed norm 1.
+ * Should give same ordering as Angular distance, but is less expensive.
+ * Will use instruction optimal for the cpu it is running on
+ * when both vectors have the expected cell type.
+ */
+template <typename FloatType>
+class InnerProductDistanceHW : public InnerProductDistance {
+public:
+ InnerProductDistanceHW()
+ : InnerProductDistance(vespalib::eval::get_cell_type<FloatType>()),
+ _computer(vespalib::hwaccelrated::IAccelrated::getAccelerator())
+ {
+ assert(expected_cell_type() == vespalib::eval::get_cell_type<FloatType>());
+ }
+ double calc(const vespalib::eval::TypedCells& lhs, const vespalib::eval::TypedCells& rhs) const override {
+ constexpr vespalib::eval::CellType expected = vespalib::eval::get_cell_type<FloatType>();
+ assert(lhs.type == expected && rhs.type == expected);
+ auto lhs_vector = lhs.typify<FloatType>();
+ auto rhs_vector = rhs.typify<FloatType>();
+ size_t sz = lhs_vector.size();
+ assert(sz == rhs_vector.size());
+ double score = 1.0 - _computer.dotProduct(&lhs_vector[0], &rhs_vector[0], sz);
+ return std::max(0.0, score);
+ }
+private:
+ const vespalib::hwaccelrated::IAccelrated & _computer;
+};
+
+}
diff --git a/searchlib/src/vespa/searchlib/util/foldedstringcompare.cpp b/searchlib/src/vespa/searchlib/util/foldedstringcompare.cpp
index aaecd174112..234b880bf23 100644
--- a/searchlib/src/vespa/searchlib/util/foldedstringcompare.cpp
+++ b/searchlib/src/vespa/searchlib/util/foldedstringcompare.cpp
@@ -5,22 +5,22 @@
#include <vespa/vespalib/text/lowercase.h>
using vespalib::LowerCase;
-
+using vespalib::Utf8ReaderForZTS;
namespace search {
size_t
FoldedStringCompare::
-size(const char *key) const
+size(const char *key)
{
- return vespalib::Utf8ReaderForZTS::countChars(key);
+ return Utf8ReaderForZTS::countChars(key);
}
int
FoldedStringCompare::
-compareFolded(const char *key, const char *okey) const
+compareFolded(const char *key, const char *okey)
{
- vespalib::Utf8ReaderForZTS kreader(key);
- vespalib::Utf8ReaderForZTS oreader(okey);
+ Utf8ReaderForZTS kreader(key);
+ Utf8ReaderForZTS oreader(okey);
for (;;) {
uint32_t kval = LowerCase::convert(kreader.getChar());
@@ -42,10 +42,10 @@ compareFolded(const char *key, const char *okey) const
int
FoldedStringCompare::
-compareFoldedPrefix(const char *key, const char *okey, size_t prefixLen) const
+compareFoldedPrefix(const char *key, const char *okey, size_t prefixLen)
{
- vespalib::Utf8ReaderForZTS kreader(key);
- vespalib::Utf8ReaderForZTS oreader(okey);
+ Utf8ReaderForZTS kreader(key);
+ Utf8ReaderForZTS oreader(okey);
for (size_t j = 0; j < prefixLen; ++j ) {
uint32_t kval = LowerCase::convert(kreader.getChar());
@@ -64,16 +64,22 @@ compareFoldedPrefix(const char *key, const char *okey, size_t prefixLen) const
return 0;
}
-
int
FoldedStringCompare::
-compare(const char *key, const char *okey) const
+comparePrefix(const char *key, const char *okey, size_t prefixLen)
{
- int res;
+ int res = compareFoldedPrefix(key, okey, prefixLen);
+ if (res != 0) return res;
+ return strncmp(key, okey, prefixLen);
+}
+
- res = compareFolded(key, okey);
- if (res != 0)
- return res;
+int
+FoldedStringCompare::
+compare(const char *key, const char *okey)
+{
+ int res = compareFolded(key, okey);
+ if (res != 0) return res;
return strcmp(key, okey);
}
diff --git a/searchlib/src/vespa/searchlib/util/foldedstringcompare.h b/searchlib/src/vespa/searchlib/util/foldedstringcompare.h
index 5da198ddc55..76267c26801 100644
--- a/searchlib/src/vespa/searchlib/util/foldedstringcompare.h
+++ b/searchlib/src/vespa/searchlib/util/foldedstringcompare.h
@@ -9,15 +9,13 @@ namespace search {
class FoldedStringCompare
{
public:
- FoldedStringCompare() {}
-
/**
* count number of UCS-4 characters in utf8 string
*
* @param key NUL terminated utf8 string
* @return integer number of symbols in utf8 string before NUL
*/
- size_t size(const char *key) const;
+ static size_t size(const char *key);
/**
* Compare utf8 key with utf8 other key after folding both
@@ -26,7 +24,7 @@ public:
* @param okey NUL terminated utf8 string
* @return integer -1 if key < okey, 0 if key == okey, 1 if key > okey
**/
- int compareFolded(const char *key, const char *okey) const;
+ static int compareFolded(const char *key, const char *okey);
/**
* Compare utf8 key with utf8 other key after folding both.
@@ -38,9 +36,7 @@ public:
*
* @return integer -1 if key < okey, 0 if key == okey, 1 if key > okey
*/
- int compareFoldedPrefix(const char *key,
- const char *okey,
- size_t prefixLen) const;
+ static int compareFoldedPrefix(const char *key, const char *okey, size_t prefixLen);
/*
* Compare utf8 key with utf8 other key after folding both, if
@@ -50,7 +46,17 @@ public:
* @param okey NUL terminated utf8 string
* @return integer -1 if key < okey, 0 if key == okey, 1 if key > okey
*/
- int compare(const char *key, const char *okey) const;
+ static int compare(const char *key, const char *okey);
+
+ /*
+ * Compare utf8 key with utf8 other key after folding both for prefix, if
+ * they seem equal then fall back to comparing without folding.
+ *
+ * @param key NUL terminated utf8 string
+ * @param okey NUL terminated utf8 string
+ * @return integer -1 if key < okey, 0 if key == okey, 1 if key > okey
+ */
+ static int comparePrefix(const char *key, const char *okey, size_t prefixLen);
};
} // namespace search
diff --git a/simplemetrics/.gitignore b/simplemetrics/.gitignore
deleted file mode 100644
index 12251442258..00000000000
--- a/simplemetrics/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-/target
-/pom.xml.build
diff --git a/simplemetrics/CMakeLists.txt b/simplemetrics/CMakeLists.txt
deleted file mode 100644
index 782e9c7e02a..00000000000
--- a/simplemetrics/CMakeLists.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-install_fat_java_artifact(simplemetrics)
-
-install_config_definitions()
diff --git a/simplemetrics/OWNERS b/simplemetrics/OWNERS
deleted file mode 100644
index 67cd2820bb8..00000000000
--- a/simplemetrics/OWNERS
+++ /dev/null
@@ -1 +0,0 @@
-arnej27959
diff --git a/simplemetrics/README b/simplemetrics/README
deleted file mode 100644
index 1ea13ac6221..00000000000
--- a/simplemetrics/README
+++ /dev/null
@@ -1 +0,0 @@
-A simple implementation of metrics for Java
diff --git a/simplemetrics/abi-spec.json b/simplemetrics/abi-spec.json
deleted file mode 100644
index fc6cc1765e5..00000000000
--- a/simplemetrics/abi-spec.json
+++ /dev/null
@@ -1,304 +0,0 @@
-{
- "com.yahoo.metrics.simple.Bucket": {
- "superClass": "java.lang.Object",
- "interfaces": [],
- "attributes": [
- "public"
- ],
- "methods": [
- "public void <init>()",
- "public void <init>(long, long)",
- "public java.util.Set entrySet()",
- "public java.util.Collection getAllMetricNames()",
- "public java.util.Collection getValuesForMetric(java.lang.String)",
- "public java.util.Map getMapForMetric(java.lang.String)",
- "public java.util.Map getValuesByMetricName()",
- "public java.lang.String toString()",
- "public long getFromMillis()",
- "public long getToMillis()"
- ],
- "fields": []
- },
- "com.yahoo.metrics.simple.Counter": {
- "superClass": "java.lang.Object",
- "interfaces": [],
- "attributes": [
- "public"
- ],
- "methods": [
- "public void add()",
- "public void add(long)",
- "public void add(com.yahoo.metrics.simple.Point)",
- "public void add(long, com.yahoo.metrics.simple.Point)",
- "public com.yahoo.metrics.simple.PointBuilder builder()"
- ],
- "fields": []
- },
- "com.yahoo.metrics.simple.Gauge": {
- "superClass": "java.lang.Object",
- "interfaces": [],
- "attributes": [
- "public"
- ],
- "methods": [
- "public void sample(double)",
- "public void sample(double, com.yahoo.metrics.simple.Point)",
- "public com.yahoo.metrics.simple.PointBuilder builder()"
- ],
- "fields": []
- },
- "com.yahoo.metrics.simple.Identifier": {
- "superClass": "java.lang.Object",
- "interfaces": [],
- "attributes": [
- "public"
- ],
- "methods": [
- "public void <init>(java.lang.String, com.yahoo.metrics.simple.Point)",
- "public int hashCode()",
- "public boolean equals(java.lang.Object)",
- "public java.lang.String toString()",
- "public java.lang.String getName()",
- "public com.yahoo.metrics.simple.Point getLocation()"
- ],
- "fields": []
- },
- "com.yahoo.metrics.simple.Measurement": {
- "superClass": "java.lang.Object",
- "interfaces": [],
- "attributes": [
- "public"
- ],
- "methods": [
- "public void <init>(java.lang.Number)"
- ],
- "fields": []
- },
- "com.yahoo.metrics.simple.MetricManager": {
- "superClass": "com.yahoo.component.AbstractComponent",
- "interfaces": [
- "com.yahoo.container.di.componentgraph.Provider"
- ],
- "attributes": [
- "public"
- ],
- "methods": [
- "public void <init>(com.yahoo.metrics.ManagerConfig)",
- "public void deconstruct()",
- "public com.yahoo.metrics.simple.MetricReceiver get()",
- "public bridge synthetic java.lang.Object get()"
- ],
- "fields": []
- },
- "com.yahoo.metrics.simple.MetricReceiver$MockReceiver": {
- "superClass": "com.yahoo.metrics.simple.MetricReceiver",
- "interfaces": [],
- "attributes": [
- "public",
- "final"
- ],
- "methods": [
- "public void <init>()",
- "public com.yahoo.metrics.simple.Bucket getSnapshot()",
- "public com.yahoo.metrics.simple.Point point(java.lang.String, java.lang.String)"
- ],
- "fields": []
- },
- "com.yahoo.metrics.simple.MetricReceiver": {
- "superClass": "java.lang.Object",
- "interfaces": [],
- "attributes": [
- "public"
- ],
- "methods": [
- "public void <init>(com.yahoo.concurrent.ThreadLocalDirectory, java.util.concurrent.atomic.AtomicReference)",
- "public void update(com.yahoo.metrics.simple.Sample)",
- "public com.yahoo.metrics.simple.Counter declareCounter(java.lang.String)",
- "public com.yahoo.metrics.simple.Counter declareCounter(java.lang.String, com.yahoo.metrics.simple.Point)",
- "public com.yahoo.metrics.simple.Gauge declareGauge(java.lang.String)",
- "public com.yahoo.metrics.simple.Gauge declareGauge(java.lang.String, com.yahoo.metrics.simple.Point)",
- "public com.yahoo.metrics.simple.Gauge declareGauge(java.lang.String, java.util.Optional, com.yahoo.metrics.simple.MetricSettings)",
- "public com.yahoo.metrics.simple.PointBuilder pointBuilder()",
- "public com.yahoo.metrics.simple.Bucket getSnapshot()"
- ],
- "fields": [
- "public static final com.yahoo.metrics.simple.MetricReceiver nullImplementation"
- ]
- },
- "com.yahoo.metrics.simple.MetricSettings$Builder": {
- "superClass": "java.lang.Object",
- "interfaces": [],
- "attributes": [
- "public",
- "final"
- ],
- "methods": [
- "public void <init>()",
- "public com.yahoo.metrics.simple.MetricSettings$Builder histogram(boolean)",
- "public com.yahoo.metrics.simple.MetricSettings build()"
- ],
- "fields": []
- },
- "com.yahoo.metrics.simple.MetricSettings": {
- "superClass": "java.lang.Object",
- "interfaces": [],
- "attributes": [
- "public",
- "final"
- ],
- "methods": [],
- "fields": []
- },
- "com.yahoo.metrics.simple.Point": {
- "superClass": "java.lang.Object",
- "interfaces": [
- "com.yahoo.jdisc.Metric$Context"
- ],
- "attributes": [
- "public",
- "final"
- ],
- "methods": [
- "public void <init>(java.util.Map)",
- "public static com.yahoo.metrics.simple.Point emptyPoint()",
- "public boolean equals(java.lang.Object)",
- "public int hashCode()",
- "public java.lang.String toString()",
- "public java.util.List location()",
- "public java.util.List dimensions()",
- "public int dimensionality()"
- ],
- "fields": []
- },
- "com.yahoo.metrics.simple.PointBuilder$Discriminator": {
- "superClass": "java.lang.Enum",
- "interfaces": [],
- "attributes": [
- "public",
- "final",
- "enum"
- ],
- "methods": [
- "public static com.yahoo.metrics.simple.PointBuilder$Discriminator[] values()",
- "public static com.yahoo.metrics.simple.PointBuilder$Discriminator valueOf(java.lang.String)"
- ],
- "fields": [
- "public static final enum com.yahoo.metrics.simple.PointBuilder$Discriminator LONG",
- "public static final enum com.yahoo.metrics.simple.PointBuilder$Discriminator DOUBLE",
- "public static final enum com.yahoo.metrics.simple.PointBuilder$Discriminator STRING"
- ]
- },
- "com.yahoo.metrics.simple.PointBuilder": {
- "superClass": "java.lang.Object",
- "interfaces": [],
- "attributes": [
- "public",
- "final"
- ],
- "methods": [
- "public com.yahoo.metrics.simple.PointBuilder set(java.lang.String, long)",
- "public com.yahoo.metrics.simple.PointBuilder set(java.lang.String, double)",
- "public com.yahoo.metrics.simple.PointBuilder set(java.lang.String, java.lang.String)",
- "public com.yahoo.metrics.simple.Point build()",
- "public java.lang.String toString()"
- ],
- "fields": []
- },
- "com.yahoo.metrics.simple.Sample": {
- "superClass": "java.lang.Object",
- "interfaces": [],
- "attributes": [
- "public"
- ],
- "methods": [
- "public void <init>(com.yahoo.metrics.simple.Measurement, com.yahoo.metrics.simple.Identifier, com.yahoo.metrics.simple.UntypedMetric$AssumedType)"
- ],
- "fields": []
- },
- "com.yahoo.metrics.simple.UnitTestSetup": {
- "superClass": "java.lang.Object",
- "interfaces": [],
- "attributes": [
- "public"
- ],
- "methods": [
- "public void <init>()",
- "public com.yahoo.metrics.simple.Bucket getUpdatedSnapshot()",
- "public com.yahoo.metrics.simple.MetricReceiver getReceiver()"
- ],
- "fields": []
- },
- "com.yahoo.metrics.simple.UntypedMetric$AssumedType": {
- "superClass": "java.lang.Enum",
- "interfaces": [],
- "attributes": [
- "public",
- "final",
- "enum"
- ],
- "methods": [
- "public static com.yahoo.metrics.simple.UntypedMetric$AssumedType[] values()",
- "public static com.yahoo.metrics.simple.UntypedMetric$AssumedType valueOf(java.lang.String)"
- ],
- "fields": [
- "public static final enum com.yahoo.metrics.simple.UntypedMetric$AssumedType NONE",
- "public static final enum com.yahoo.metrics.simple.UntypedMetric$AssumedType GAUGE",
- "public static final enum com.yahoo.metrics.simple.UntypedMetric$AssumedType COUNTER"
- ]
- },
- "com.yahoo.metrics.simple.UntypedMetric": {
- "superClass": "java.lang.Object",
- "interfaces": [],
- "attributes": [
- "public"
- ],
- "methods": [
- "public boolean isCounter()",
- "public long getCount()",
- "public double getLast()",
- "public double getMax()",
- "public double getMin()",
- "public double getSum()",
- "public org.HdrHistogram.DoubleHistogram getHistogram()",
- "public java.lang.String toString()"
- ],
- "fields": []
- },
- "com.yahoo.metrics.simple.Value$Discriminator": {
- "superClass": "java.lang.Enum",
- "interfaces": [],
- "attributes": [
- "public",
- "final",
- "enum"
- ],
- "methods": [
- "public static com.yahoo.metrics.simple.Value$Discriminator[] values()",
- "public static com.yahoo.metrics.simple.Value$Discriminator valueOf(java.lang.String)"
- ],
- "fields": [
- "public static final enum com.yahoo.metrics.simple.Value$Discriminator LONG",
- "public static final enum com.yahoo.metrics.simple.Value$Discriminator DOUBLE",
- "public static final enum com.yahoo.metrics.simple.Value$Discriminator STRING"
- ]
- },
- "com.yahoo.metrics.simple.Value": {
- "superClass": "java.lang.Object",
- "interfaces": [],
- "attributes": [
- "public",
- "abstract"
- ],
- "methods": [
- "public void <init>()",
- "public long longValue()",
- "public double doubleValue()",
- "public java.lang.String stringValue()",
- "public abstract com.yahoo.metrics.simple.Value$Discriminator getType()",
- "public static com.yahoo.metrics.simple.Value of(long)",
- "public static com.yahoo.metrics.simple.Value of(double)",
- "public static com.yahoo.metrics.simple.Value of(java.lang.String)"
- ],
- "fields": []
- }
-} \ No newline at end of file
diff --git a/slobrok/src/tests/backoff/testbackoff.cpp b/slobrok/src/tests/backoff/testbackoff.cpp
index e3d799bfb72..40ca1ac17ba 100644
--- a/slobrok/src/tests/backoff/testbackoff.cpp
+++ b/slobrok/src/tests/backoff/testbackoff.cpp
@@ -11,85 +11,47 @@ TEST_SETUP(Test);
//-----------------------------------------------------------------------------
-static double expectWait[21] = {
- 0.5, 1.0, 1.5, 2.0, 2.5,
- 3.0, 3.5, 4.0, 4.5,
- 5.0, 6.0, 7.0, 8.0, 9.0,
- 10, 15, 20, 25, 30, 30, 30
-};
-
int
Test::Main()
{
TEST_INIT("backoff_test");
BackOff one;
- EXPECT_FALSE(one.shouldWarn());
- EXPECT_EQUAL(0.500, one.get());
- EXPECT_FALSE(one.shouldWarn());
- EXPECT_EQUAL(1.000, one.get());
- EXPECT_FALSE(one.shouldWarn());
- EXPECT_EQUAL(1.500, one.get());
- EXPECT_FALSE(one.shouldWarn());
- EXPECT_EQUAL(2.000, one.get());
- EXPECT_TRUE(one.shouldWarn());
-
- EXPECT_EQUAL(2.500, one.get());
- EXPECT_FALSE(one.shouldWarn());
- EXPECT_EQUAL(3.000, one.get());
- EXPECT_FALSE(one.shouldWarn());
- EXPECT_EQUAL(3.500, one.get());
- EXPECT_FALSE(one.shouldWarn());
- EXPECT_EQUAL(4.000, one.get());
- EXPECT_FALSE(one.shouldWarn());
- EXPECT_EQUAL(4.500, one.get());
EXPECT_TRUE(one.shouldWarn());
-
- EXPECT_EQUAL(5.000, one.get());
- EXPECT_FALSE(one.shouldWarn());
- EXPECT_EQUAL(6.000, one.get());
- EXPECT_FALSE(one.shouldWarn());
- EXPECT_EQUAL(7.000, one.get());
- EXPECT_FALSE(one.shouldWarn());
- EXPECT_EQUAL(8.000, one.get());
- EXPECT_FALSE(one.shouldWarn());
- EXPECT_EQUAL(9.000, one.get());
- EXPECT_FALSE(one.shouldWarn());
- EXPECT_EQUAL(10.00, one.get());
- EXPECT_FALSE(one.shouldWarn());
- EXPECT_EQUAL(15.00, one.get());
- EXPECT_FALSE(one.shouldWarn());
- EXPECT_EQUAL(20.00, one.get());
- EXPECT_TRUE(one.shouldWarn());
-
- EXPECT_EQUAL(25.00, one.get());
- EXPECT_FALSE(one.shouldWarn());
- EXPECT_EQUAL(30.00, one.get());
- EXPECT_FALSE(one.shouldWarn());
- EXPECT_EQUAL(30.00, one.get());
- EXPECT_FALSE(one.shouldWarn());
- EXPECT_EQUAL(30.00, one.get());
- EXPECT_FALSE(one.shouldWarn());
- EXPECT_EQUAL(30.00, one.get());
- EXPECT_FALSE(one.shouldWarn());
- EXPECT_EQUAL(30.00, one.get());
- EXPECT_FALSE(one.shouldWarn());
-
+ EXPECT_EQUAL(0.500, one.get());
+ for (int i = 2; i < 41; i++) {
+ EXPECT_EQUAL(0.5 * i, one.get());
+ }
+ for (int i = 1; i < 1000; i++) {
+ EXPECT_EQUAL(20.0, one.get());
+ }
TEST_FLUSH();
BackOff two;
- for (int i = 0; i < 21; i++) {
- EXPECT_EQUAL(expectWait[i], two.get());
- if (i == 3 || i == 8 || i == 16) {
+ for (int i = 1; i < 50; i++) {
+ double expect = 0.5 * i;
+ if (expect > 20.0) expect = 20.0;
+ EXPECT_EQUAL(expect, two.get());
+ if (i == 1 || i == 7 || i == 18) {
EXPECT_TRUE(two.shouldWarn());
} else {
EXPECT_FALSE(two.shouldWarn());
}
}
two.reset();
- for (int i = 0; i < 21; i++) {
- EXPECT_EQUAL(expectWait[i], two.get());
- if (i == 7 || i == 15) {
+ for (int i = 1; i < 50; i++) {
+ double expect = 0.5 * i;
+ if (expect > 20.0) expect = 20.0;
+ EXPECT_EQUAL(expect, two.get());
+ if (i == 1 || i == 7 || i == 18) {
+ EXPECT_TRUE(two.shouldWarn());
+ } else {
+ EXPECT_FALSE(two.shouldWarn());
+ }
+ }
+ for (int i = 0; i < 50000; i++) {
+ EXPECT_EQUAL(20.0, two.get());
+ if ((i % 180) == 5) {
EXPECT_TRUE(two.shouldWarn());
} else {
EXPECT_FALSE(two.shouldWarn());
diff --git a/slobrok/src/vespa/slobrok/CMakeLists.txt b/slobrok/src/vespa/slobrok/CMakeLists.txt
index 92edaf79c7d..00ccadb1226 100644
--- a/slobrok/src/vespa/slobrok/CMakeLists.txt
+++ b/slobrok/src/vespa/slobrok/CMakeLists.txt
@@ -1,6 +1,7 @@
# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
vespa_add_library(slobrok
SOURCES
+ backoff.cpp
sblist.cpp
cfg.cpp
sbmirror.cpp
diff --git a/slobrok/src/vespa/slobrok/backoff.cpp b/slobrok/src/vespa/slobrok/backoff.cpp
new file mode 100644
index 00000000000..e09e688f646
--- /dev/null
+++ b/slobrok/src/vespa/slobrok/backoff.cpp
@@ -0,0 +1,39 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "backoff.h"
+
+namespace slobrok::api {
+
+namespace {
+constexpr size_t num_warn_intervals = 5;
+const double warn_intervals[num_warn_intervals] = { 0.0, 10.0, 60.0, 600.0, 3600.0 };
+}
+
+BackOff::BackOff() { reset(); }
+
+void BackOff::reset() {
+ _time = 0.0;
+ _since_last_warn = 0.0;
+ _nextwarn_idx = 0;
+}
+
+double BackOff::get() {
+ _since_last_warn += _time;
+ if (_time < 20.0) {
+ _time += 0.5;
+ }
+ return _time;
+}
+
+bool BackOff::shouldWarn() {
+ if (_since_last_warn >= warn_intervals[_nextwarn_idx]) {
+ if (_nextwarn_idx + 1 < num_warn_intervals) {
+ ++_nextwarn_idx;
+ }
+ _since_last_warn = 0.0;
+ return true;
+ }
+ return false;
+}
+
+} // namespace slobrok::api
diff --git a/slobrok/src/vespa/slobrok/backoff.h b/slobrok/src/vespa/slobrok/backoff.h
index 9bfc4667e6d..53193a9de6e 100644
--- a/slobrok/src/vespa/slobrok/backoff.h
+++ b/slobrok/src/vespa/slobrok/backoff.h
@@ -1,6 +1,8 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once
+#include <stdio.h>
+
namespace slobrok {
namespace api {
@@ -8,39 +10,14 @@ class BackOff
{
private:
double _time;
- double _warntime;
- double _nextwarn;
+ double _since_last_warn;
+ size_t _nextwarn_idx;
public:
- BackOff() : _time(0.50), _warntime(0.0), _nextwarn(4.0) {}
- void reset() { _time = 0.50; _warntime = 0.0; _nextwarn = 15.0; }
- double get() {
- double ret = _time;
- _warntime += ret;
- if (_time < 5.0) {
- _time += 0.5;
- } else if (_time < 10.0) {
- _time += 1.0;
- } else if (_time < 30.0) {
- _time += 5;
- } else {
- // max retry time is 30 seconds
- _time = 30.0;
- }
- return ret;
- }
- bool shouldWarn() {
- if (_warntime > _nextwarn) {
- _warntime = 0.0;
- _nextwarn *= 4.0;
- if (_nextwarn > 86400.0) {
- _nextwarn = 86400.0;
- }
- return true;
- } else {
- return false;
- }
- }
+ BackOff();
+ void reset();
+ double get();
+ bool shouldWarn();
};
} // namespace api
diff --git a/slobrok/src/vespa/slobrok/sblist.cpp b/slobrok/src/vespa/slobrok/sblist.cpp
index e7528fb6db2..2cb5f0b3d60 100644
--- a/slobrok/src/vespa/slobrok/sblist.cpp
+++ b/slobrok/src/vespa/slobrok/sblist.cpp
@@ -60,12 +60,12 @@ SlobrokList::logString()
if (_slobrokSpecs.size() == 0) {
return "[empty service location broker list]";
}
- std::string v;
- v = _slobrokSpecs[0];
- for (size_t i = 1 ; i < _slobrokSpecs.size(); ++i) {
- v += " or ";
+ std::string v = "[";
+ for (size_t i = 0 ; i < _slobrokSpecs.size(); ++i) {
+ if (i > 0) v += ", ";
v += _slobrokSpecs[i];
}
+ v += "]";
return v;
}
diff --git a/slobrok/src/vespa/slobrok/sbmirror.cpp b/slobrok/src/vespa/slobrok/sbmirror.cpp
index 460d61cb2a8..6aff47e13fd 100644
--- a/slobrok/src/vespa/slobrok/sbmirror.cpp
+++ b/slobrok/src/vespa/slobrok/sbmirror.cpp
@@ -16,6 +16,7 @@ MirrorAPI::MirrorAPI(FRT_Supervisor &orb, const ConfiguratorFactory & config)
_reqPending(false),
_scheduled(false),
_reqDone(false),
+ _logOnSuccess(true),
_specs(),
_specsGen(),
_updates(),
@@ -234,6 +235,11 @@ MirrorAPI::handleReqDone()
} else {
_backOff.reset();
// req done OK
+ if (_logOnSuccess) {
+ LOG(info, "successfully connected to location broker %s (mirror initialized with %zu service names)",
+ _currSlobrok.c_str(), _specs.size());
+ _logOnSuccess = false;
+ }
return true;
}
}
@@ -245,6 +251,7 @@ void
MirrorAPI::handleReconnect()
{
if (_target == 0) {
+ _logOnSuccess = true;
_currSlobrok = _slobrokSpecs.nextSlobrokSpec();
if (_currSlobrok.size() > 0) {
_target = _orb.GetTarget(_currSlobrok.c_str());
@@ -256,10 +263,12 @@ MirrorAPI::handleReconnect()
}
double delay = _backOff.get();
reSched(delay);
+ std::string cps = _slobrokSpecs.logString();
+ const char * const msgfmt = "no location brokers available, retrying: %s (in %.1f seconds)";
if (_backOff.shouldWarn()) {
- std::string cps = _slobrokSpecs.logString();
- LOG(warning, "cannot connect to location broker at %s (retry in %f seconds)",
- cps.c_str(), delay);
+ LOG(warning, msgfmt, cps.c_str(), delay);
+ } else {
+ LOG(debug, msgfmt, cps.c_str(), delay);
}
}
}
diff --git a/slobrok/src/vespa/slobrok/sbmirror.h b/slobrok/src/vespa/slobrok/sbmirror.h
index 437ef334af6..c1c7009ce12 100644
--- a/slobrok/src/vespa/slobrok/sbmirror.h
+++ b/slobrok/src/vespa/slobrok/sbmirror.h
@@ -92,6 +92,7 @@ private:
bool _reqPending;
bool _scheduled;
bool _reqDone;
+ bool _logOnSuccess;
SpecList _specs;
vespalib::GenCnt _specsGen;
vespalib::GenCnt _updates;
diff --git a/slobrok/src/vespa/slobrok/sbregister.cpp b/slobrok/src/vespa/slobrok/sbregister.cpp
index 72be5f69538..06a9fb2d79e 100644
--- a/slobrok/src/vespa/slobrok/sbregister.cpp
+++ b/slobrok/src/vespa/slobrok/sbregister.cpp
@@ -58,6 +58,7 @@ RegisterAPI::RegisterAPI(FRT_Supervisor &orb, const ConfiguratorFactory & config
_hooks(*this),
_lock(),
_reqDone(false),
+ _logOnSuccess(true),
_busy(false),
_slobrokSpecs(),
_configurator(config.create(_slobrokSpecs)),
@@ -146,6 +147,11 @@ RegisterAPI::handleReqDone()
_req->GetErrorMessage());
}
} else {
+ if (_logOnSuccess && (_pending.size() == 0) && (_names.size() > 0)) {
+ LOG(info, "[RPC @ %s] registering %s with location broker %s completed successfully",
+ createSpec(_orb).c_str(), _names[0].c_str(), _currSlobrok.c_str());
+ _logOnSuccess = false;
+ }
// reset backoff strategy on any successful request
_backOff.reset();
}
@@ -161,13 +167,14 @@ RegisterAPI::handleReconnect()
if (_configurator->poll() && _target != 0) {
if (! _slobrokSpecs.contains(_currSlobrok)) {
vespalib::string cps = _slobrokSpecs.logString();
- LOG(warning, "current server %s not in list of location brokers: %s",
- _currSlobrok.c_str(), cps.c_str());
+ LOG(warning, "[RPC @ %s] location broker %s removed, will disconnect and use one of: %s",
+ createSpec(_orb).c_str(), _currSlobrok.c_str(), cps.c_str());
_target->SubRef();
_target = 0;
}
}
if (_target == 0) {
+ _logOnSuccess = true;
_currSlobrok = _slobrokSpecs.nextSlobrokSpec();
if (_currSlobrok.size() > 0) {
// try next possible server.
@@ -185,13 +192,13 @@ RegisterAPI::handleReconnect()
// possibly with a warning.
double delay = _backOff.get();
Schedule(delay);
+ const char * const msgfmt = "[RPC @ %s] no location brokers available, retrying: %s (in %.1f seconds)";
+ vespalib::string cps = _slobrokSpecs.logString();
if (_backOff.shouldWarn()) {
- vespalib::string cps = _slobrokSpecs.logString();
- LOG(warning, "cannot connect to location broker at %s "
- "(retry in %f seconds)", cps.c_str(), delay);
+ LOG(warning, msgfmt, createSpec(_orb).c_str(), cps.c_str(), delay);
} else {
- LOG(debug, "slobrok retry in %f seconds", delay);
- }
+ LOG(debug, msgfmt, createSpec(_orb).c_str(), cps.c_str(), delay);
+ }
return;
}
}
diff --git a/slobrok/src/vespa/slobrok/sbregister.h b/slobrok/src/vespa/slobrok/sbregister.h
index 8e8614fdf0a..8e849e1ea13 100644
--- a/slobrok/src/vespa/slobrok/sbregister.h
+++ b/slobrok/src/vespa/slobrok/sbregister.h
@@ -87,6 +87,7 @@ private:
RPCHooks _hooks;
std::mutex _lock;
bool _reqDone;
+ bool _logOnSuccess;
std::atomic<bool> _busy;
SlobrokList _slobrokSpecs;
Configurator::UP _configurator;
diff --git a/storage/src/tests/distributor/bucketdbupdatertest.cpp b/storage/src/tests/distributor/bucketdbupdatertest.cpp
index 4ec49b5c6f8..97fccf58901 100644
--- a/storage/src/tests/distributor/bucketdbupdatertest.cpp
+++ b/storage/src/tests/distributor/bucketdbupdatertest.cpp
@@ -1116,7 +1116,7 @@ TEST_F(BucketDBUpdaterTest, notify_bucket_change_from_node_down) {
getBucketDBUpdater().onNotifyBucketChange(cmd);
}
// Enable here to avoid having request bucket info be silently swallowed
- // (sendRequestBucketInfo drops message if node is down).
+ // (send_request_bucket_info drops message if node is down).
enableDistributorClusterState("distributor:1 storage:2 .0.s:d");
ASSERT_EQ(std::string("BucketId(0x4000000000000001) : "
diff --git a/storage/src/tests/distributor/distributortest.cpp b/storage/src/tests/distributor/distributortest.cpp
index 4c574609df5..7958306db5f 100644
--- a/storage/src/tests/distributor/distributortest.cpp
+++ b/storage/src/tests/distributor/distributortest.cpp
@@ -239,6 +239,7 @@ DistributorTest::DistributorTest()
DistributorTest::~DistributorTest() = default;
+// TODO -> stripe test
TEST_F(DistributorTest, operation_generation) {
setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1");
@@ -257,6 +258,7 @@ TEST_F(DistributorTest, operation_generation) {
EXPECT_EQ("Visitor Create", testOp(cmd));
}
+// TODO -> stripe test
TEST_F(DistributorTest, operations_generated_and_started_without_duplicates) {
setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1");
@@ -271,6 +273,8 @@ TEST_F(DistributorTest, operations_generated_and_started_without_duplicates) {
ASSERT_EQ(6, _sender.commands().size());
}
+// TODO -> stripe test
+// TODO also need to impl/test cross-stripe cluster state changes
TEST_F(DistributorTest, recovery_mode_on_cluster_state_change) {
setupDistributor(Redundancy(1), NodeCount(2),
"storage:1 .0.s:d distributor:1");
@@ -291,6 +295,8 @@ TEST_F(DistributorTest, recovery_mode_on_cluster_state_change) {
EXPECT_TRUE(_distributor->isInRecoveryMode());
}
+// TODO -> stripe test
+// TODO how to throttle across stripes?
TEST_F(DistributorTest, operations_are_throttled) {
setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1");
getConfig().setMinPendingMaintenanceOps(1);
@@ -303,6 +309,7 @@ TEST_F(DistributorTest, operations_are_throttled) {
ASSERT_EQ(1, _sender.commands().size());
}
+// TODO -> stripe test
TEST_F(DistributorTest, handle_unknown_maintenance_reply) {
setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1");
@@ -322,6 +329,7 @@ TEST_F(DistributorTest, handle_unknown_maintenance_reply) {
}
}
+// TODO -> generic, non distr/stripe test
TEST_F(DistributorTest, contains_time_statement) {
setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1");
@@ -333,6 +341,7 @@ TEST_F(DistributorTest, contains_time_statement) {
EXPECT_TRUE(getConfig().containsTimeStatement("testdoctype1.headerfield == now() - 3600"));
}
+// TODO -> stripe test
TEST_F(DistributorTest, update_bucket_database) {
enableDistributorClusterState("distributor:1 storage:3");
@@ -402,6 +411,8 @@ public:
}
+// TODO -> stripe test
+// TODO need to impl/test cross-stripe status requests
TEST_F(DistributorTest, tick_processes_status_requests) {
setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1");
@@ -430,6 +441,8 @@ TEST_F(DistributorTest, tick_processes_status_requests) {
EXPECT_THAT(thread.getResult(), HasSubstr("BucketId(0x4000000000000001)"));
}
+// TODO -> distributor test since it owns metric hook
+// TODO need to impl/test cross-stripe metrics aggregation
TEST_F(DistributorTest, metric_update_hook_updates_pending_maintenance_metrics) {
setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
// To ensure we count all operations, not just those fitting within the
@@ -484,6 +497,7 @@ uint64_t db_sample_interval_sec(const Distributor& d) noexcept {
}
+// TODO -> stripe test
TEST_F(DistributorTest, bucket_db_memory_usage_metrics_only_updated_at_fixed_time_intervals) {
getClock().setAbsoluteTimeInSeconds(1000);
@@ -521,6 +535,8 @@ TEST_F(DistributorTest, bucket_db_memory_usage_metrics_only_updated_at_fixed_tim
EXPECT_GT(now_used, last_used);
}
+// TODO -> stripe test
+// TODO need to impl/test cross-stripe config propagation
TEST_F(DistributorTest, priority_config_is_propagated_to_distributor_configuration) {
using namespace vespa::config::content::core;
@@ -557,6 +573,7 @@ TEST_F(DistributorTest, priority_config_is_propagated_to_distributor_configurati
EXPECT_EQ(12, static_cast<int>(mp.mergeGlobalBuckets));
}
+// TODO -> stripe test
TEST_F(DistributorTest, no_db_resurrection_for_bucket_not_owned_in_pending_state) {
setupDistributor(Redundancy(1), NodeCount(10), "storage:2 distributor:2");
lib::ClusterState newState("storage:10 distributor:10");
@@ -578,6 +595,7 @@ TEST_F(DistributorTest, no_db_resurrection_for_bucket_not_owned_in_pending_state
EXPECT_EQ("NONEXISTING", dumpBucket(nonOwnedBucket));
}
+// TODO -> stripe test
TEST_F(DistributorTest, added_db_buckets_without_gc_timestamp_implicitly_get_current_time) {
setupDistributor(Redundancy(1), NodeCount(10), "storage:2 distributor:2");
getClock().setAbsoluteTimeInSeconds(101234);
@@ -591,6 +609,7 @@ TEST_F(DistributorTest, added_db_buckets_without_gc_timestamp_implicitly_get_cur
EXPECT_EQ(101234, e->getLastGarbageCollectionTime());
}
+// TODO -> stripe test
TEST_F(DistributorTest, merge_stats_are_accumulated_during_database_iteration) {
setupDistributor(Redundancy(2), NodeCount(3), "storage:3 distributor:1");
// Copies out of sync. Not possible for distributor to _reliably_ tell
@@ -662,6 +681,7 @@ DistributorTest::assertBucketSpaceStats(size_t expBucketPending, size_t expBucke
* their state checkers at all, we won't get any statistics from any other
* operations for the bucket.
*/
+// TODO -> stripe test
TEST_F(DistributorTest, stats_generated_for_preempted_operations) {
setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
// For this test it suffices to have a single bucket with multiple aspects
@@ -686,6 +706,7 @@ TEST_F(DistributorTest, stats_generated_for_preempted_operations) {
}
}
+// TODO -> distributor test
TEST_F(DistributorTest, host_info_reporter_config_is_propagated_to_reporter) {
setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
@@ -699,11 +720,13 @@ TEST_F(DistributorTest, host_info_reporter_config_is_propagated_to_reporter) {
EXPECT_FALSE(distributor_host_info_reporter().isReportingEnabled());
}
+// TODO -> stripe test (though config is a bit of a special case...)
TEST_F(DistributorTest, replica_counting_mode_is_configured_to_trusted_by_default) {
setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
EXPECT_EQ(ConfigBuilder::MinimumReplicaCountingMode::TRUSTED, currentReplicaCountingMode());
}
+// TODO -> stripe test
TEST_F(DistributorTest, replica_counting_mode_config_is_propagated_to_metric_updater) {
setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
ConfigBuilder builder;
@@ -712,6 +735,7 @@ TEST_F(DistributorTest, replica_counting_mode_config_is_propagated_to_metric_upd
EXPECT_EQ(ConfigBuilder::MinimumReplicaCountingMode::ANY, currentReplicaCountingMode());
}
+// TODO -> stripe test
TEST_F(DistributorTest, max_consecutively_inhibited_maintenance_ticks_config_is_propagated_to_internal_config) {
setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
ConfigBuilder builder;
@@ -720,11 +744,13 @@ TEST_F(DistributorTest, max_consecutively_inhibited_maintenance_ticks_config_is_
EXPECT_EQ(getConfig().max_consecutively_inhibited_maintenance_ticks(), 123);
}
+// TODO -> stripe test
TEST_F(DistributorTest, bucket_activation_is_enabled_by_default) {
setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
EXPECT_FALSE(getConfig().isBucketActivationDisabled());
}
+// TODO -> stripe test
TEST_F(DistributorTest, bucket_activation_config_is_propagated_to_distributor_configuration) {
using namespace vespa::config::content::core;
@@ -747,6 +773,7 @@ DistributorTest::configureMaxClusterClockSkew(int seconds) {
_distributor->enableNextConfig();
}
+// TODO -> stripe test
TEST_F(DistributorTest, max_clock_skew_config_is_propagated_to_distributor_config) {
setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
@@ -817,6 +844,7 @@ void DistributorTest::assertNoMessageBounced() {
// TODO refactor this to set proper highest timestamp as part of bucket info
// reply once we have the "highest timestamp across all owned buckets" feature
// in place.
+// TODO where does this truly belong?
TEST_F(DistributorTest, configured_safe_time_point_rejection_works_end_to_end) {
setupDistributor(Redundancy(2), NodeCount(2),
"bits:1 storage:1 distributor:2");
@@ -846,6 +874,7 @@ void DistributorTest::configure_mutation_sequencing(bool enabled) {
_distributor->enableNextConfig();
}
+// TODO -> stripe test
TEST_F(DistributorTest, sequencing_config_is_propagated_to_distributor_config) {
setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
@@ -871,6 +900,7 @@ DistributorTest::configure_merge_busy_inhibit_duration(int seconds) {
_distributor->enableNextConfig();
}
+// TODO -> stripe test
TEST_F(DistributorTest, merge_busy_inhibit_duration_config_is_propagated_to_distributor_config) {
setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
@@ -878,6 +908,7 @@ TEST_F(DistributorTest, merge_busy_inhibit_duration_config_is_propagated_to_dist
EXPECT_EQ(getConfig().getInhibitMergesOnBusyNodeDuration(), std::chrono::seconds(7));
}
+// TODO -> stripe test
TEST_F(DistributorTest, merge_busy_inhibit_duration_is_propagated_to_pending_message_tracker) {
setupDistributor(Redundancy(2), NodeCount(2), "storage:1 distributor:1");
addNodesToBucketDB(document::BucketId(16, 1), "0=1/1/1/t");
@@ -903,6 +934,7 @@ TEST_F(DistributorTest, merge_busy_inhibit_duration_is_propagated_to_pending_mes
EXPECT_FALSE(node_info.isBusy(0));
}
+// TODO -> stripe test
TEST_F(DistributorTest, external_client_requests_are_handled_individually_in_priority_order) {
setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1");
addNodesToBucketDB(document::BucketId(16, 1), "0=1/1/1/t/a");
@@ -931,6 +963,7 @@ TEST_F(DistributorTest, external_client_requests_are_handled_individually_in_pri
EXPECT_THAT(actual, ContainerEq(expected));
}
+// TODO -> stripe test
TEST_F(DistributorTest, internal_messages_are_started_in_fifo_order_batch) {
// To test internal request ordering, we use NotifyBucketChangeCommand
// for the reason that it explicitly updates the bucket database for
@@ -959,6 +992,8 @@ TEST_F(DistributorTest, internal_messages_are_started_in_fifo_order_batch) {
EXPECT_EQ(api::BucketInfo(1, 1, 1), e.getBucketInfo().getNode(0)->getBucketInfo());
}
+// TODO -> stripe test
+// TODO also test that closing distributor closes stripes
TEST_F(DistributorTest, closing_aborts_priority_queued_client_requests) {
setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1");
document::BucketId bucket(16, 1);
@@ -997,6 +1032,9 @@ void assert_invalid_stats_for_all_spaces(
}
+// TODO -> stripe test
+// TODO must impl/test cross-stripe bucket space stats
+// TODO cross-stripe recovery mode handling how?
TEST_F(DistributorTest, entering_recovery_mode_resets_bucket_space_stats) {
// Set up a cluster state + DB contents which implies merge maintenance ops
setupDistributor(Redundancy(2), NodeCount(2), "version:1 distributor:1 storage:2");
@@ -1018,6 +1056,7 @@ TEST_F(DistributorTest, entering_recovery_mode_resets_bucket_space_stats) {
assert_invalid_stats_for_all_spaces(stats, 2);
}
+// TODO figure out interaction between stripes and distributors on this one
TEST_F(DistributorTest, leaving_recovery_mode_immediately_sends_getnodestate_replies) {
setupDistributor(Redundancy(2), NodeCount(2), "version:1 distributor:1 storage:2");
// Should not send explicit replies during init stage
diff --git a/storage/src/tests/distributor/distributortestutil.cpp b/storage/src/tests/distributor/distributortestutil.cpp
index 7929cc1c906..43870dd2c3e 100644
--- a/storage/src/tests/distributor/distributortestutil.cpp
+++ b/storage/src/tests/distributor/distributortestutil.cpp
@@ -32,7 +32,7 @@ DistributorTestUtil::createLinks()
_node->node_identity(),
*_threadPool,
*this,
- true,
+ 0,
_hostInfo,
&_messageSender));
_component.reset(new storage::DistributorComponent(_node->getComponentRegister(), "distrtestutil"));
@@ -66,7 +66,7 @@ DistributorTestUtil::setup_distributor(int redundancy,
// This is for all intents and purposes a hack to avoid having the
// distributor treat setting the distribution explicitly as a signal that
// it should send RequestBucketInfo to all configured nodes.
- // If we called storageDistributionChanged followed by enableDistribution
+ // If we called storage_distribution_changed followed by enableDistribution
// explicitly (which is what happens in "real life"), that is what would
// take place.
// The inverse case of this can be explicitly accomplished by calling
@@ -338,7 +338,7 @@ DistributorTestUtil::disableBucketActivationInConfig(bool disable)
getConfig().configure(config);
}
-BucketDBUpdater&
+StripeBucketDBUpdater&
DistributorTestUtil::getBucketDBUpdater() {
return _distributor->bucket_db_updater();
}
diff --git a/storage/src/tests/distributor/distributortestutil.h b/storage/src/tests/distributor/distributortestutil.h
index d3c0445d5b5..ddf153a1406 100644
--- a/storage/src/tests/distributor/distributortestutil.h
+++ b/storage/src/tests/distributor/distributortestutil.h
@@ -17,7 +17,7 @@ namespace framework { struct TickingThreadPool; }
namespace distributor {
-class BucketDBUpdater;
+class StripeBucketDBUpdater;
class Distributor;
class DistributorBucketSpace;
class DistributorBucketSpaceRepo;
@@ -112,7 +112,7 @@ public:
int idx = -1,
api::ReturnCode::Result result = api::ReturnCode::OK);
- BucketDBUpdater& getBucketDBUpdater();
+ StripeBucketDBUpdater& getBucketDBUpdater();
IdealStateManager& getIdealStateManager();
ExternalOperationHandler& getExternalOperationHandler();
storage::distributor::DistributorStripeComponent& distributor_component();
diff --git a/storage/src/vespa/storage/common/distributorcomponent.cpp b/storage/src/vespa/storage/common/distributorcomponent.cpp
index 328281e5f2b..c5fe099635b 100644
--- a/storage/src/vespa/storage/common/distributorcomponent.cpp
+++ b/storage/src/vespa/storage/common/distributorcomponent.cpp
@@ -8,12 +8,23 @@ DistributorComponent::DistributorComponent(DistributorComponentRegister& compReg
vespalib::stringref name)
: StorageComponent(compReg, name),
_timeCalculator(0),
- _totalConfig(*this)
+ _internal_config_generation(0),
+ _config_snapshot(std::make_shared<DistributorConfiguration>(*this))
{
compReg.registerDistributorComponent(*this);
}
-DistributorComponent::~DistributorComponent() { }
+DistributorComponent::~DistributorComponent() = default;
+
+void DistributorComponent::update_config_snapshot() {
+ auto new_snapshot = std::make_shared<DistributorConfiguration>(*this);
+ new_snapshot->configure(_visitorConfig);
+ new_snapshot->configure(_distributorConfig);
+ // TODO make thread safe if necessary; access currently synchronized by config updates
+ // and checks all being routed through the same "critical tick" global lock.
+ ++_internal_config_generation;
+ _config_snapshot = std::move(new_snapshot);
+}
} // storage
diff --git a/storage/src/vespa/storage/common/distributorcomponent.h b/storage/src/vespa/storage/common/distributorcomponent.h
index 8d5920ecc77..d5eb3fa56c8 100644
--- a/storage/src/vespa/storage/common/distributorcomponent.h
+++ b/storage/src/vespa/storage/common/distributorcomponent.h
@@ -69,20 +69,23 @@ class DistributorComponent : public StorageComponent,
mutable UniqueTimeCalculator* _timeCalculator;
DistributorConfig _distributorConfig;
VisitorConfig _visitorConfig;
- DistributorConfiguration _totalConfig;
+ uint64_t _internal_config_generation; // Note: NOT related to config system generations
+ std::shared_ptr<const DistributorConfiguration> _config_snapshot;
void setTimeCalculator(UniqueTimeCalculator& utc) override { _timeCalculator = &utc; }
void setDistributorConfig(const DistributorConfig& c) override {
_distributorConfig = c;
- _totalConfig.configure(c);
+ update_config_snapshot();
}
void setVisitorConfig(const VisitorConfig& c) override {
_visitorConfig = c;
- _totalConfig.configure(c);
+ update_config_snapshot();
}
+ void update_config_snapshot();
+
public:
- typedef std::unique_ptr<DistributorComponent> UP;
+ using UP = std::unique_ptr<DistributorComponent>;
DistributorComponent(DistributorComponentRegister& compReg, vespalib::stringref name);
~DistributorComponent() override;
@@ -96,9 +99,11 @@ public:
const VisitorConfig& getVisitorConfig() const {
return _visitorConfig;
}
- const DistributorConfiguration&
- getTotalDistributorConfig() const {
- return _totalConfig;
+ uint64_t internal_config_generation() const noexcept {
+ return _internal_config_generation;
+ }
+ std::shared_ptr<const DistributorConfiguration> total_distributor_config_sp() const noexcept {
+ return _config_snapshot;
}
};
diff --git a/storage/src/vespa/storage/config/distributorconfiguration.cpp b/storage/src/vespa/storage/config/distributorconfiguration.cpp
index ea963b227e2..90fbb35d18d 100644
--- a/storage/src/vespa/storage/config/distributorconfiguration.cpp
+++ b/storage/src/vespa/storage/config/distributorconfiguration.cpp
@@ -31,6 +31,7 @@ DistributorConfiguration::DistributorConfiguration(StorageComponent& component)
_maxPendingMaintenanceOps(1000),
_maxVisitorsPerNodePerClientVisitor(4),
_minBucketsPerVisitor(5),
+ _num_distributor_stripes(0),
_maxClusterClockSkew(0),
_inhibitMergeSendingOnBusyNodeDuration(60s),
_simulated_db_pruning_latency(0),
@@ -95,18 +96,18 @@ DistributorConfiguration::configureMaintenancePriorities(
const vespa::config::content::core::StorDistributormanagerConfig& cfg)
{
MaintenancePriorities& mp(_maintenancePriorities);
- mp.mergeMoveToIdealNode = cfg.priorityMergeMoveToIdealNode;
- mp.mergeOutOfSyncCopies = cfg.priorityMergeOutOfSyncCopies;
- mp.mergeTooFewCopies = cfg.priorityMergeTooFewCopies;
- mp.mergeGlobalBuckets = cfg.priorityMergeGlobalBuckets;
- mp.activateNoExistingActive = cfg.priorityActivateNoExistingActive;
+ mp.mergeMoveToIdealNode = cfg.priorityMergeMoveToIdealNode;
+ mp.mergeOutOfSyncCopies = cfg.priorityMergeOutOfSyncCopies;
+ mp.mergeTooFewCopies = cfg.priorityMergeTooFewCopies;
+ mp.mergeGlobalBuckets = cfg.priorityMergeGlobalBuckets;
+ mp.activateNoExistingActive = cfg.priorityActivateNoExistingActive;
mp.activateWithExistingActive = cfg.priorityActivateWithExistingActive;
- mp.deleteBucketCopy = cfg.priorityDeleteBucketCopy;
- mp.joinBuckets = cfg.priorityJoinBuckets;
- mp.splitDistributionBits = cfg.prioritySplitDistributionBits;
- mp.splitLargeBucket = cfg.prioritySplitLargeBucket;
- mp.splitInconsistentBucket = cfg.prioritySplitInconsistentBucket;
- mp.garbageCollection = cfg.priorityGarbageCollection;
+ mp.deleteBucketCopy = cfg.priorityDeleteBucketCopy;
+ mp.joinBuckets = cfg.priorityJoinBuckets;
+ mp.splitDistributionBits = cfg.prioritySplitDistributionBits;
+ mp.splitLargeBucket = cfg.prioritySplitLargeBucket;
+ mp.splitInconsistentBucket = cfg.prioritySplitInconsistentBucket;
+ mp.garbageCollection = cfg.priorityGarbageCollection;
}
void
@@ -181,6 +182,8 @@ DistributorConfiguration::configure(const vespa::config::content::core::StorDist
}
_simulated_db_pruning_latency = std::chrono::milliseconds(std::max(0, config.simulatedDbPruningLatencyMsec));
_simulated_db_merging_latency = std::chrono::milliseconds(std::max(0, config.simulatedDbMergingLatencyMsec));
+
+ _num_distributor_stripes = std::max(0, config.numDistributorStripes); // TODO STRIPE test
LOG(debug,
"Distributor now using new configuration parameters. Split limits: %d docs/%d bytes. "
diff --git a/storage/src/vespa/storage/config/distributorconfiguration.h b/storage/src/vespa/storage/config/distributorconfiguration.h
index 9c1456fa9fd..479298ff082 100644
--- a/storage/src/vespa/storage/config/distributorconfiguration.h
+++ b/storage/src/vespa/storage/config/distributorconfiguration.h
@@ -265,6 +265,8 @@ public:
return _enable_revert;
}
+ uint32_t num_distributor_stripes() const noexcept { return _num_distributor_stripes; }
+
bool containsTimeStatement(const std::string& documentSelection) const;
private:
@@ -297,6 +299,8 @@ private:
uint32_t _maxVisitorsPerNodePerClientVisitor;
uint32_t _minBucketsPerVisitor;
+ uint32_t _num_distributor_stripes;
+
MaintenancePriorities _maintenancePriorities;
std::chrono::seconds _maxClusterClockSkew;
std::chrono::seconds _inhibitMergeSendingOnBusyNodeDuration;
diff --git a/storage/src/vespa/storage/config/stor-distributormanager.def b/storage/src/vespa/storage/config/stor-distributormanager.def
index d1c4f35e929..887761ab3b5 100644
--- a/storage/src/vespa/storage/config/stor-distributormanager.def
+++ b/storage/src/vespa/storage/config/stor-distributormanager.def
@@ -273,3 +273,7 @@ prioritize_global_bucket_merges bool default=true
## Note: this feature only kicks in if the number of groups in the cluster is greater
## than 1.
max_activation_inhibited_out_of_sync_groups int default=0
+
+## TODO STRIPE document
+## If 0, legacy single stripe behavior is used. Currently supports 0 or 1.
+num_distributor_stripes int default=0 restart
diff --git a/storage/src/vespa/storage/distributor/CMakeLists.txt b/storage/src/vespa/storage/distributor/CMakeLists.txt
index d82d14831f4..57d6a23c79f 100644
--- a/storage/src/vespa/storage/distributor/CMakeLists.txt
+++ b/storage/src/vespa/storage/distributor/CMakeLists.txt
@@ -4,6 +4,7 @@ vespa_add_library(storage_distributor
activecopy.cpp
blockingoperationstarter.cpp
bucket_db_prune_elision.cpp
+ bucket_space_distribution_configs.cpp
bucket_space_distribution_context.cpp
bucketdbupdater.cpp
bucketgctimecalculator.cpp
@@ -23,6 +24,7 @@ vespa_add_library(storage_distributor
ideal_service_layer_nodes_bundle.cpp
idealstatemanager.cpp
idealstatemetricsset.cpp
+ legacy_single_stripe_accessor.cpp
messagetracker.cpp
nodeinfo.cpp
operation_routing_snapshot.cpp
@@ -40,6 +42,7 @@ vespa_add_library(storage_distributor
statechecker.cpp
statecheckers.cpp
statusreporterdelegate.cpp
+ stripe_bucket_db_updater.cpp
throttlingoperationstarter.cpp
update_metric_set.cpp
visitormetricsset.cpp
diff --git a/storage/src/vespa/storage/distributor/bucket_space_distribution_configs.cpp b/storage/src/vespa/storage/distributor/bucket_space_distribution_configs.cpp
new file mode 100644
index 00000000000..da5769411d4
--- /dev/null
+++ b/storage/src/vespa/storage/distributor/bucket_space_distribution_configs.cpp
@@ -0,0 +1,17 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include "bucket_space_distribution_configs.h"
+#include <vespa/document/bucket/fixed_bucket_spaces.h>
+#include <vespa/storage/common/global_bucket_space_distribution_converter.h>
+#include <vespa/vdslib/distribution/distribution.h>
+
+namespace storage::distributor {
+
+BucketSpaceDistributionConfigs
+BucketSpaceDistributionConfigs::from_default_distribution(std::shared_ptr<const lib::Distribution> distribution) {
+ BucketSpaceDistributionConfigs ret;
+ ret.space_configs.emplace(document::FixedBucketSpaces::global_space(), GlobalBucketSpaceDistributionConverter::convert_to_global(*distribution));
+ ret.space_configs.emplace(document::FixedBucketSpaces::default_space(), std::move(distribution));
+ return ret;
+}
+
+}
diff --git a/storage/src/vespa/storage/distributor/bucket_space_distribution_configs.h b/storage/src/vespa/storage/distributor/bucket_space_distribution_configs.h
new file mode 100644
index 00000000000..9ebd8ef9732
--- /dev/null
+++ b/storage/src/vespa/storage/distributor/bucket_space_distribution_configs.h
@@ -0,0 +1,27 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#pragma once
+
+#include <vespa/document/bucket/bucketspace.h>
+#include <map>
+#include <memory>
+
+namespace storage::lib { class Distribution; }
+
+namespace storage::distributor {
+
+/**
+ * Represents a complete mapping of all known bucket spaces to their appropriate,
+ * (possibly derived) distribution config.
+ */
+struct BucketSpaceDistributionConfigs {
+ std::map<document::BucketSpace, std::shared_ptr<const lib::Distribution>> space_configs;
+
+ std::shared_ptr<const lib::Distribution> get_or_nullptr(document::BucketSpace space) const noexcept {
+ auto iter = space_configs.find(space);
+ return (iter != space_configs.end()) ? iter->second : std::shared_ptr<const lib::Distribution>();
+ }
+
+ static BucketSpaceDistributionConfigs from_default_distribution(std::shared_ptr<const lib::Distribution>);
+};
+
+}
diff --git a/storage/src/vespa/storage/distributor/bucketdbupdater.cpp b/storage/src/vespa/storage/distributor/bucketdbupdater.cpp
index 6735ea1e533..f328b599604 100644
--- a/storage/src/vespa/storage/distributor/bucketdbupdater.cpp
+++ b/storage/src/vespa/storage/distributor/bucketdbupdater.cpp
@@ -2,12 +2,15 @@
#include "bucketdbupdater.h"
#include "bucket_db_prune_elision.h"
+#include "bucket_space_distribution_configs.h"
#include "bucket_space_distribution_context.h"
#include "distributor.h"
#include "distributor_bucket_space.h"
#include "distributormetricsset.h"
#include "simpleclusterinformation.h"
+#include "stripe_access_guard.h"
#include <vespa/document/bucket/fixed_bucket_spaces.h>
+#include <vespa/storage/common/global_bucket_space_distribution_converter.h>
#include <vespa/storageapi/message/persistence.h>
#include <vespa/storageapi/message/removelocation.h>
#include <vespa/vdslib/distribution/distribution.h>
@@ -24,74 +27,71 @@ using document::BucketSpace;
namespace storage::distributor {
-BucketDBUpdater::BucketDBUpdater(DistributorStripeInterface& owner,
- DistributorBucketSpaceRepo& bucketSpaceRepo,
- DistributorBucketSpaceRepo& readOnlyBucketSpaceRepo,
+BucketDBUpdater::BucketDBUpdater(DistributorStripeInterface& owner, // FIXME STRIPE!
DistributorMessageSender& sender,
- DistributorComponentRegister& compReg)
- : framework::StatusReporter("bucketdb", "Bucket DB Updater"),
- _distributorComponent(owner, bucketSpaceRepo, readOnlyBucketSpaceRepo, compReg, "Bucket DB Updater"),
- _node_ctx(_distributorComponent),
- _op_ctx(_distributorComponent),
- _distributor_interface(_distributorComponent.getDistributor()),
- _delayedRequests(),
- _sentMessages(),
- _pendingClusterState(),
+ DistributorComponentRegister& comp_reg,
+ StripeAccessor& stripe_accessor)
+ : framework::StatusReporter("temp_bucketdb", "Bucket DB Updater"), // TODO STRIPE rename once duplication is removed
+ _stripe_accessor(stripe_accessor),
+ _active_state_bundle(lib::ClusterState()),
+ _dummy_mutable_bucket_space_repo(std::make_unique<DistributorBucketSpaceRepo>(owner.getDistributorIndex())),
+ _dummy_read_only_bucket_space_repo(std::make_unique<DistributorBucketSpaceRepo>(owner.getDistributorIndex())),
+ _distributor_component(owner, *_dummy_mutable_bucket_space_repo, *_dummy_read_only_bucket_space_repo, comp_reg, "Bucket DB Updater"),
+ _node_ctx(_distributor_component),
+ _op_ctx(_distributor_component),
+ _distributor_interface(_distributor_component.getDistributor()),
+ _delayed_requests(),
+ _sent_messages(),
+ _pending_cluster_state(),
_history(),
_sender(sender),
- _enqueuedRechecks(),
- _outdatedNodesMap(),
- _transitionTimer(_node_ctx.clock()),
- _stale_reads_enabled(false),
- _active_distribution_contexts(),
- _explicit_transition_read_guard(),
- _distribution_context_mutex()
+ _enqueued_rechecks(),
+ _outdated_nodes_map(),
+ _transition_timer(_node_ctx.clock()),
+ _stale_reads_enabled(false)
{
- for (auto& elem : _op_ctx.bucket_space_repo()) {
- _active_distribution_contexts.emplace(
- elem.first,
- BucketSpaceDistributionContext::make_not_yet_initialized(_node_ctx.node_index()));
- _explicit_transition_read_guard.emplace(elem.first, std::shared_ptr<BucketDatabase::ReadGuard>());
- }
+ // FIXME STRIPE top-level Distributor needs a proper way to track the current cluster state bundle!
+ propagate_active_state_bundle_internally();
+ bootstrap_distribution_config(_distributor_component.getDistribution());
}
BucketDBUpdater::~BucketDBUpdater() = default;
-OperationRoutingSnapshot BucketDBUpdater::read_snapshot_for_bucket(const document::Bucket& bucket) const {
- const auto bucket_space = bucket.getBucketSpace();
- std::lock_guard lock(_distribution_context_mutex);
- auto active_state_iter = _active_distribution_contexts.find(bucket_space);
- assert(active_state_iter != _active_distribution_contexts.cend());
- auto& state = *active_state_iter->second;
- if (!state.bucket_owned_in_active_state(bucket.getBucketId())) {
- return OperationRoutingSnapshot::make_not_routable_in_state(active_state_iter->second);
+void
+BucketDBUpdater::propagate_active_state_bundle_internally() {
+ for (auto* repo : {_dummy_mutable_bucket_space_repo.get(), _dummy_read_only_bucket_space_repo.get()}) {
+ for (auto& iter : *repo) {
+ iter.second->setClusterState(_active_state_bundle.getDerivedClusterState(iter.first));
+ }
}
- const bool bucket_present_in_mutable_db = state.bucket_owned_in_pending_state(bucket.getBucketId());
- if (!bucket_present_in_mutable_db && !stale_reads_enabled()) {
- return OperationRoutingSnapshot::make_not_routable_in_state(active_state_iter->second);
+}
+
+void
+BucketDBUpdater::bootstrap_distribution_config(std::shared_ptr<const lib::Distribution> distribution) {
+ auto global_distr = GlobalBucketSpaceDistributionConverter::convert_to_global(*distribution);
+ for (auto* repo : {_dummy_mutable_bucket_space_repo.get(), _dummy_read_only_bucket_space_repo.get()}) {
+ repo->get(document::FixedBucketSpaces::default_space()).setDistribution(distribution);
+ repo->get(document::FixedBucketSpaces::global_space()).setDistribution(global_distr);
}
- const auto& space_repo = bucket_present_in_mutable_db
- ? _op_ctx.bucket_space_repo()
- : _op_ctx.read_only_bucket_space_repo();
- auto existing_guard_iter = _explicit_transition_read_guard.find(bucket_space);
- assert(existing_guard_iter != _explicit_transition_read_guard.cend());
- auto db_guard = existing_guard_iter->second
- ? existing_guard_iter-> second
- : space_repo.get(bucket_space).getBucketDatabase().acquire_read_guard();
- return OperationRoutingSnapshot::make_routable_with_guard(active_state_iter->second, std::move(db_guard), space_repo);
+ // TODO STRIPE do we need to bootstrap the stripes as well here? Or do they do this on their own volition?
+ // ... need to take a guard if so, so can probably not be done at ctor time..?
}
+// TODO STRIPE what to do with merge guards...
+// FIXME what about bucket DB replica update timestamp allocations?! Replace with u64 counter..?
+// Must at the very least ensure we use stripe-local TS generation for DB inserts...! i.e. no global TS
+// Or do we have to touch these at all here? Just defer all this via stripe interface?
void
BucketDBUpdater::flush()
{
- for (auto & entry : _sentMessages) {
+ for (auto & entry : _sent_messages) {
// Cannot sendDown MergeBucketReplies during flushing, since
// all lower links have been closed
if (entry.second._mergeReplyGuard) {
entry.second._mergeReplyGuard->resetReply();
}
}
- _sentMessages.clear();
+ _sent_messages.clear();
}
void
@@ -102,26 +102,19 @@ BucketDBUpdater::print(std::ostream& out, bool verbose, const std::string& inden
}
bool
-BucketDBUpdater::shouldDeferStateEnabling() const noexcept
+BucketDBUpdater::should_defer_state_enabling() const noexcept
{
return stale_reads_enabled();
}
bool
-BucketDBUpdater::hasPendingClusterState() const
+BucketDBUpdater::has_pending_cluster_state() const
{
- return static_cast<bool>(_pendingClusterState);
-}
-
-const lib::ClusterState*
-BucketDBUpdater::pendingClusterStateOrNull(const document::BucketSpace& space) const {
- return (hasPendingClusterState()
- ? _pendingClusterState->getNewClusterStateBundle().getDerivedClusterState(space).get()
- : nullptr);
+ return static_cast<bool>(_pending_cluster_state);
}
void
-BucketDBUpdater::sendRequestBucketInfo(
+BucketDBUpdater::send_request_bucket_info(
uint16_t node,
const document::Bucket& bucket,
const std::shared_ptr<MergeReplyGuard>& mergeReplyGuard)
@@ -135,116 +128,50 @@ BucketDBUpdater::sendRequestBucketInfo(
auto msg = std::make_shared<api::RequestBucketInfoCommand>(bucket.getBucketSpace(), buckets);
- LOG(debug,
- "Sending request bucket info command %" PRIu64 " for "
- "bucket %s to node %u",
- msg->getMsgId(),
- bucket.toString().c_str(),
- node);
+ LOG(debug, "Sending request bucket info command %" PRIu64 " for bucket %s to node %u",
+ msg->getMsgId(), bucket.toString().c_str(), node);
msg->setPriority(50);
msg->setAddress(_node_ctx.node_address(node));
- _sentMessages[msg->getMsgId()] =
+ _sent_messages[msg->getMsgId()] =
BucketRequest(node, _op_ctx.generate_unique_timestamp(),
bucket, mergeReplyGuard);
_sender.sendCommand(msg);
}
void
-BucketDBUpdater::recheckBucketInfo(uint32_t nodeIdx,
- const document::Bucket& bucket)
-{
- sendRequestBucketInfo(nodeIdx, bucket, std::shared_ptr<MergeReplyGuard>());
-}
-
-namespace {
-
-class ReadOnlyDbMergingInserter : public BucketDatabase::MergingProcessor {
- using NewEntries = std::vector<BucketDatabase::Entry>;
- NewEntries::const_iterator _current;
- const NewEntries::const_iterator _last;
-public:
- explicit ReadOnlyDbMergingInserter(const NewEntries& new_entries)
- : _current(new_entries.cbegin()),
- _last(new_entries.cend())
- {}
-
- Result merge(BucketDatabase::Merger& m) override {
- const uint64_t key_to_insert = m.bucket_key();
- uint64_t key_at_cursor = 0;
- while (_current != _last) {
- key_at_cursor = _current->getBucketId().toKey();
- if (key_at_cursor >= key_to_insert) {
- break;
- }
- m.insert_before_current(_current->getBucketId(), *_current);
- ++_current;
- }
- if ((_current != _last) && (key_at_cursor == key_to_insert)) {
- // If we encounter a bucket that already exists, replace value wholesale.
- // Don't try to cleverly merge replicas, as the values we currently hold
- // in the read-only DB may be stale.
- // Note that this case shouldn't really happen, since we only add previously
- // owned buckets to the read-only DB, and subsequent adds to a non-empty DB
- // can only happen for state preemptions. Since ownership is not regained
- // before a state is stable, a bucket is only added once. But we handle it
- // anyway in case this changes at some point in the future.
- m.current_entry() = *_current;
- return Result::Update;
- }
- return Result::KeepUnchanged;
- }
-
- void insert_remaining_at_end(BucketDatabase::TrailingInserter& inserter) override {
- for (; _current != _last; ++_current) {
- inserter.insert_at_end(_current->getBucketId(), *_current);
- }
- }
-};
-
-}
-
-void
-BucketDBUpdater::removeSuperfluousBuckets(
- const lib::ClusterStateBundle& newState,
+BucketDBUpdater::remove_superfluous_buckets(
+ StripeAccessGuard& guard,
+ const lib::ClusterStateBundle& new_state,
bool is_distribution_config_change)
{
- const bool move_to_read_only_db = shouldDeferStateEnabling();
const char* up_states = _op_ctx.storage_node_up_states();
+ // TODO STRIPE explicit space -> config mapping, don't get via repo
+ // ... but we need to get the current cluster state per space..!
for (auto& elem : _op_ctx.bucket_space_repo()) {
- const auto& newDistribution(elem.second->getDistribution());
- const auto& oldClusterState(elem.second->getClusterState());
- const auto& new_cluster_state = newState.getDerivedClusterState(elem.first);
+ const auto& old_cluster_state(elem.second->getClusterState());
+ const auto& new_cluster_state = new_state.getDerivedClusterState(elem.first);
// Running a full DB sweep is expensive, so if the cluster state transition does
// not actually indicate that buckets should possibly be removed, we elide it entirely.
if (!is_distribution_config_change
- && db_pruning_may_be_elided(oldClusterState, *new_cluster_state, up_states))
+ && db_pruning_may_be_elided(old_cluster_state, *new_cluster_state, up_states))
{
LOG(debug, "[bucket space '%s']: eliding DB pruning for state transition '%s' -> '%s'",
document::FixedBucketSpaces::to_string(elem.first).data(),
- oldClusterState.toString().c_str(), new_cluster_state->toString().c_str());
+ old_cluster_state.toString().c_str(), new_cluster_state->toString().c_str());
continue;
}
-
- auto& bucketDb(elem.second->getBucketDatabase());
- auto& readOnlyDb(_op_ctx.read_only_bucket_space_repo().get(elem.first).getBucketDatabase());
-
- // Remove all buckets not belonging to this distributor, or
- // being on storage nodes that are no longer up.
- MergingNodeRemover proc(
- oldClusterState,
- *new_cluster_state,
- _node_ctx.node_index(),
- newDistribution,
- up_states,
- move_to_read_only_db);
-
- bucketDb.merge(proc);
- if (move_to_read_only_db) {
- ReadOnlyDbMergingInserter read_only_merger(proc.getNonOwnedEntries());
- readOnlyDb.merge(read_only_merger);
+ // TODO STRIPE should we also pass old state and distr config? Must ensure we're in sync with stripe...
+ // .. but config is set synchronously via the guard upon pending state creation edge
+ auto maybe_lost = guard.remove_superfluous_buckets(elem.first, *new_cluster_state, is_distribution_config_change);
+ if (maybe_lost.buckets != 0) {
+ LOGBM(info, "After cluster state change %s, %zu buckets no longer "
+ "have available replicas. %zu documents in these buckets will "
+ "be unavailable until nodes come back up",
+ old_cluster_state.getTextualDifference(*new_cluster_state).c_str(),
+ maybe_lost.buckets, maybe_lost.documents);
}
maybe_inject_simulated_db_pruning_delay();
}
@@ -271,63 +198,58 @@ BucketDBUpdater::maybe_inject_simulated_db_merging_delay() {
}
void
-BucketDBUpdater::ensureTransitionTimerStarted()
+BucketDBUpdater::ensure_transition_timer_started()
{
// Don't overwrite start time if we're already processing a state, as
// that will make transition times appear artificially low.
- if (!hasPendingClusterState()) {
- _transitionTimer = framework::MilliSecTimer(
- _node_ctx.clock());
+ if (!has_pending_cluster_state()) {
+ _transition_timer = framework::MilliSecTimer(_node_ctx.clock());
}
}
void
-BucketDBUpdater::completeTransitionTimer()
+BucketDBUpdater::complete_transition_timer()
{
_distributor_interface.getMetrics()
- .stateTransitionTime.addValue(_transitionTimer.getElapsedTimeAsDouble());
-}
-
-void
-BucketDBUpdater::clearReadOnlyBucketRepoDatabases()
-{
- for (auto& space : _op_ctx.read_only_bucket_space_repo()) {
- space.second->getBucketDatabase().clear();
- }
+ .stateTransitionTime.addValue(_transition_timer.getElapsedTimeAsDouble());
}
void
-BucketDBUpdater::storageDistributionChanged()
+BucketDBUpdater::storage_distribution_changed(const BucketSpaceDistributionConfigs& configs)
{
- ensureTransitionTimerStarted();
+ ensure_transition_timer_started();
- removeSuperfluousBuckets(_op_ctx.cluster_state_bundle(), true);
+ auto guard = _stripe_accessor.rendezvous_and_hold_all();
+ // FIXME STRIPE might this cause a mismatch with the component stuff's own distribution config..?!
+ guard->update_distribution_config(configs);
+ remove_superfluous_buckets(*guard, _op_ctx.cluster_state_bundle(), true);
auto clusterInfo = std::make_shared<const SimpleClusterInformation>(
_node_ctx.node_index(),
_op_ctx.cluster_state_bundle(),
_op_ctx.storage_node_up_states());
- _pendingClusterState = PendingClusterState::createForDistributionChange(
+ _pending_cluster_state = PendingClusterState::createForDistributionChange(
_node_ctx.clock(),
std::move(clusterInfo),
_sender,
- _op_ctx.bucket_space_repo(),
- _op_ctx.generate_unique_timestamp());
- _outdatedNodesMap = _pendingClusterState->getOutdatedNodesMap();
- _op_ctx.bucket_space_repo().set_pending_cluster_state_bundle(_pendingClusterState->getNewClusterStateBundle());
+ _op_ctx.bucket_space_repo(), // TODO STRIPE cannot use!
+ _op_ctx.generate_unique_timestamp()); // TODO STRIPE must ensure no stripes can generate < this
+ _outdated_nodes_map = _pending_cluster_state->getOutdatedNodesMap();
+
+ guard->set_pending_cluster_state_bundle(_pending_cluster_state->getNewClusterStateBundle());
}
void
-BucketDBUpdater::replyToPreviousPendingClusterStateIfAny()
+BucketDBUpdater::reply_to_previous_pending_cluster_state_if_any()
{
- if (_pendingClusterState.get() && _pendingClusterState->hasCommand()) {
+ if (_pending_cluster_state.get() && _pending_cluster_state->hasCommand()) {
_distributor_interface.getMessageSender().sendUp(
- std::make_shared<api::SetSystemStateReply>(*_pendingClusterState->getCommand()));
+ std::make_shared<api::SetSystemStateReply>(*_pending_cluster_state->getCommand()));
}
}
void
-BucketDBUpdater::replyToActivationWithActualVersion(
+BucketDBUpdater::reply_to_activation_with_actual_version(
const api::ActivateClusterStateVersionCommand& cmd,
uint32_t actualVersion)
{
@@ -336,104 +258,49 @@ BucketDBUpdater::replyToActivationWithActualVersion(
_distributor_interface.getMessageSender().sendUp(reply); // TODO let API accept rvalues
}
-void BucketDBUpdater::update_read_snapshot_before_db_pruning() {
- std::lock_guard lock(_distribution_context_mutex);
- for (auto& elem : _op_ctx.bucket_space_repo()) {
- // At this point, we're still operating with a distribution context _without_ a
- // pending state, i.e. anyone using the context will expect to find buckets
- // in the DB that correspond to how the database looked like prior to pruning
- // buckets from the DB. To ensure this is not violated, take a snapshot of the
- // _mutable_ DB and expose this. This snapshot only lives until we atomically
- // flip to expose a distribution context that includes the new, pending state.
- // At that point, the read-only DB is known to contain the buckets that have
- // been pruned away, so we can release the mutable DB snapshot safely.
- // TODO test for, and handle, state preemption case!
- _explicit_transition_read_guard[elem.first] = elem.second->getBucketDatabase().acquire_read_guard();
- }
-}
-
-
-void BucketDBUpdater::update_read_snapshot_after_db_pruning(const lib::ClusterStateBundle& new_state) {
- std::lock_guard lock(_distribution_context_mutex);
- const auto old_default_state = _op_ctx.bucket_space_repo().get(
- document::FixedBucketSpaces::default_space()).cluster_state_sp();
- for (auto& elem : _op_ctx.bucket_space_repo()) {
- auto new_distribution = elem.second->distribution_sp();
- auto old_cluster_state = elem.second->cluster_state_sp();
- auto new_cluster_state = new_state.getDerivedClusterState(elem.first);
- _active_distribution_contexts.insert_or_assign(
- elem.first,
- BucketSpaceDistributionContext::make_state_transition(
- std::move(old_cluster_state),
- old_default_state,
- std::move(new_cluster_state),
- std::move(new_distribution),
- _node_ctx.node_index()));
- // We can now remove the explicit mutable DB snapshot, as the buckets that have been
- // pruned away are visible in the read-only DB.
- _explicit_transition_read_guard[elem.first] = std::shared_ptr<BucketDatabase::ReadGuard>();
- }
-}
-
-void BucketDBUpdater::update_read_snapshot_after_activation(const lib::ClusterStateBundle& activated_state) {
- std::lock_guard lock(_distribution_context_mutex);
- const auto& default_cluster_state = activated_state.getDerivedClusterState(document::FixedBucketSpaces::default_space());
- for (auto& elem : _op_ctx.bucket_space_repo()) {
- auto new_distribution = elem.second->distribution_sp();
- auto new_cluster_state = activated_state.getDerivedClusterState(elem.first);
- _active_distribution_contexts.insert_or_assign(
- elem.first,
- BucketSpaceDistributionContext::make_stable_state(
- std::move(new_cluster_state),
- default_cluster_state,
- std::move(new_distribution),
- _node_ctx.node_index()));
- }
-}
-
bool
BucketDBUpdater::onSetSystemState(
const std::shared_ptr<api::SetSystemStateCommand>& cmd)
{
- LOG(debug,
- "Received new cluster state %s",
+ LOG(debug, "Received new cluster state %s",
cmd->getSystemState().toString().c_str());
- const lib::ClusterStateBundle oldState = _op_ctx.cluster_state_bundle();
const lib::ClusterStateBundle& state = cmd->getClusterStateBundle();
- if (state == oldState) {
+ if (state == _active_state_bundle) {
return false;
}
- ensureTransitionTimerStarted();
- // Separate timer since _transitionTimer might span multiple pending states.
+ ensure_transition_timer_started();
+ // Separate timer since _transition_timer might span multiple pending states.
framework::MilliSecTimer process_timer(_node_ctx.clock());
- update_read_snapshot_before_db_pruning();
+
+ auto guard = _stripe_accessor.rendezvous_and_hold_all();
+ guard->update_read_snapshot_before_db_pruning();
const auto& bundle = cmd->getClusterStateBundle();
- removeSuperfluousBuckets(bundle, false);
- update_read_snapshot_after_db_pruning(bundle);
- replyToPreviousPendingClusterStateIfAny();
+ remove_superfluous_buckets(*guard, bundle, false);
+ guard->update_read_snapshot_after_db_pruning(bundle);
+ reply_to_previous_pending_cluster_state_if_any();
auto clusterInfo = std::make_shared<const SimpleClusterInformation>(
_node_ctx.node_index(),
_op_ctx.cluster_state_bundle(),
_op_ctx.storage_node_up_states());
- _pendingClusterState = PendingClusterState::createForClusterStateChange(
+ _pending_cluster_state = PendingClusterState::createForClusterStateChange(
_node_ctx.clock(),
std::move(clusterInfo),
_sender,
- _op_ctx.bucket_space_repo(),
+ _op_ctx.bucket_space_repo(), // TODO STRIPE remove
cmd,
- _outdatedNodesMap,
- _op_ctx.generate_unique_timestamp());
- _outdatedNodesMap = _pendingClusterState->getOutdatedNodesMap();
+ _outdated_nodes_map,
+ _op_ctx.generate_unique_timestamp()); // FIXME STRIPE must be atomic across all threads
+ _outdated_nodes_map = _pending_cluster_state->getOutdatedNodesMap();
_distributor_interface.getMetrics().set_cluster_state_processing_time.addValue(
process_timer.getElapsedTimeAsDouble());
- _op_ctx.bucket_space_repo().set_pending_cluster_state_bundle(_pendingClusterState->getNewClusterStateBundle());
- if (isPendingClusterStateCompleted()) {
- processCompletedPendingClusterState();
+ guard->set_pending_cluster_state_bundle(_pending_cluster_state->getNewClusterStateBundle());
+ if (is_pending_cluster_state_completed()) {
+ process_completed_pending_cluster_state(*guard);
}
return true;
}
@@ -441,25 +308,26 @@ BucketDBUpdater::onSetSystemState(
bool
BucketDBUpdater::onActivateClusterStateVersion(const std::shared_ptr<api::ActivateClusterStateVersionCommand>& cmd)
{
- if (hasPendingClusterState() && _pendingClusterState->isVersionedTransition()) {
- const auto pending_version = _pendingClusterState->clusterStateVersion();
+ if (has_pending_cluster_state() && _pending_cluster_state->isVersionedTransition()) {
+ const auto pending_version = _pending_cluster_state->clusterStateVersion();
if (pending_version == cmd->version()) {
- if (isPendingClusterStateCompleted()) {
- assert(_pendingClusterState->isDeferred());
- activatePendingClusterState();
+ if (is_pending_cluster_state_completed()) {
+ assert(_pending_cluster_state->isDeferred());
+ auto guard = _stripe_accessor.rendezvous_and_hold_all();
+ activate_pending_cluster_state(*guard);
} else {
LOG(error, "Received cluster state activation for pending version %u "
"without pending state being complete yet. This is not expected, "
"as no activation should be sent before all distributors have "
"reported that state processing is complete.", pending_version);
- replyToActivationWithActualVersion(*cmd, 0); // Invalid version, will cause re-send (hopefully when completed).
+ reply_to_activation_with_actual_version(*cmd, 0); // Invalid version, will cause re-send (hopefully when completed).
return true;
}
} else {
- replyToActivationWithActualVersion(*cmd, pending_version);
+ reply_to_activation_with_actual_version(*cmd, pending_version);
return true;
}
- } else if (shouldDeferStateEnabling()) {
+ } else if (should_defer_state_enabling()) {
// Likely just a resend, but log warn for now to get a feel of how common it is.
LOG(warning, "Received cluster state activation command for version %u, which "
"has no corresponding pending state. Likely resent operation.", cmd->version());
@@ -471,6 +339,7 @@ BucketDBUpdater::onActivateClusterStateVersion(const std::shared_ptr<api::Activa
return false;
}
+// TODO remove entirely from this abstraction level?
BucketDBUpdater::MergeReplyGuard::~MergeReplyGuard()
{
if (_reply) {
@@ -488,71 +357,25 @@ BucketDBUpdater::onMergeBucketReply(
// actually merged (source-only nodes?) we request the bucket info of the
// bucket again to make sure it's ok.
for (uint32_t i = 0; i < reply->getNodes().size(); i++) {
- sendRequestBucketInfo(reply->getNodes()[i].index,
- reply->getBucket(),
- replyGuard);
+ send_request_bucket_info(reply->getNodes()[i].index,
+ reply->getBucket(),
+ replyGuard);
}
return true;
}
void
-BucketDBUpdater::enqueueRecheckUntilPendingStateEnabled(
- uint16_t node,
- const document::Bucket& bucket)
-{
- LOG(spam,
- "DB updater has a pending cluster state, enqueuing recheck "
- "of bucket %s on node %u until state is done processing",
- bucket.toString().c_str(),
- node);
- _enqueuedRechecks.insert(EnqueuedBucketRecheck(node, bucket));
-}
-
-void
-BucketDBUpdater::sendAllQueuedBucketRechecks()
+BucketDBUpdater::send_all_queued_bucket_rechecks()
{
- LOG(spam,
- "Sending %zu queued bucket rechecks previously received "
- "via NotifyBucketChange commands",
- _enqueuedRechecks.size());
+ LOG(spam, "Sending %zu queued bucket rechecks previously received "
+ "via NotifyBucketChange commands",
+ _enqueued_rechecks.size());
- for (const auto & entry :_enqueuedRechecks) {
- sendRequestBucketInfo(entry.node, entry.bucket, std::shared_ptr<MergeReplyGuard>());
+ for (const auto & entry :_enqueued_rechecks) {
+ send_request_bucket_info(entry.node, entry.bucket, std::shared_ptr<MergeReplyGuard>());
}
- _enqueuedRechecks.clear();
-}
-
-bool
-BucketDBUpdater::onNotifyBucketChange(
- const std::shared_ptr<api::NotifyBucketChangeCommand>& cmd)
-{
- // Immediately schedule reply to ensure it is sent.
- _sender.sendReply(std::make_shared<api::NotifyBucketChangeReply>(*cmd));
-
- if (!cmd->getBucketInfo().valid()) {
- LOG(error,
- "Received invalid bucket info for bucket %s from notify bucket "
- "change! Not updating bucket.",
- cmd->getBucketId().toString().c_str());
- return true;
- }
- LOG(debug,
- "Received notify bucket change from node %u for bucket %s with %s.",
- cmd->getSourceIndex(),
- cmd->getBucketId().toString().c_str(),
- cmd->getBucketInfo().toString().c_str());
-
- if (hasPendingClusterState()) {
- enqueueRecheckUntilPendingStateEnabled(cmd->getSourceIndex(),
- cmd->getBucket());
- } else {
- sendRequestBucketInfo(cmd->getSourceIndex(),
- cmd->getBucket(),
- std::shared_ptr<MergeReplyGuard>());
- }
-
- return true;
+ _enqueued_rechecks.clear();
}
bool sort_pred(const BucketListMerger::BucketEntry& left,
@@ -563,178 +386,64 @@ bool sort_pred(const BucketListMerger::BucketEntry& left,
bool
BucketDBUpdater::onRequestBucketInfoReply(
- const std::shared_ptr<api::RequestBucketInfoReply> & repl)
+ const std::shared_ptr<api::RequestBucketInfoReply>& repl)
{
- if (pendingClusterStateAccepted(repl)) {
+ if (pending_cluster_state_accepted(repl)) {
return true;
}
- return processSingleBucketInfoReply(repl);
+ return false;
}
bool
-BucketDBUpdater::pendingClusterStateAccepted(
- const std::shared_ptr<api::RequestBucketInfoReply> & repl)
+BucketDBUpdater::pending_cluster_state_accepted(
+ const std::shared_ptr<api::RequestBucketInfoReply>& repl)
{
- if (_pendingClusterState.get()
- && _pendingClusterState->onRequestBucketInfoReply(repl))
+ if (_pending_cluster_state.get()
+ && _pending_cluster_state->onRequestBucketInfoReply(repl))
{
- if (isPendingClusterStateCompleted()) {
- processCompletedPendingClusterState();
+ if (is_pending_cluster_state_completed()) {
+ auto guard = _stripe_accessor.rendezvous_and_hold_all();
+ process_completed_pending_cluster_state(*guard);
}
return true;
}
- LOG(spam,
- "Reply %s was not accepted by pending cluster state",
+ LOG(spam, "Reply %s was not accepted by pending cluster state",
repl->toString().c_str());
return false;
}
void
-BucketDBUpdater::handleSingleBucketInfoFailure(
- const std::shared_ptr<api::RequestBucketInfoReply>& repl,
- const BucketRequest& req)
+BucketDBUpdater::resend_delayed_messages()
{
- LOG(debug, "Request bucket info failed towards node %d: error was %s",
- req.targetNode, repl->getResult().toString().c_str());
-
- if (req.bucket.getBucketId() != document::BucketId(0)) {
- framework::MilliSecTime sendTime(_node_ctx.clock());
- sendTime += framework::MilliSecTime(100);
- _delayedRequests.emplace_back(sendTime, req);
+ if (_pending_cluster_state) {
+ _pending_cluster_state->resendDelayedMessages();
}
-}
-
-void
-BucketDBUpdater::resendDelayedMessages()
-{
- if (_pendingClusterState) {
- _pendingClusterState->resendDelayedMessages();
- }
- if (_delayedRequests.empty()) {
+ if (_delayed_requests.empty()) {
return; // Don't fetch time if not needed
}
framework::MilliSecTime currentTime(_node_ctx.clock());
- while (!_delayedRequests.empty()
- && currentTime >= _delayedRequests.front().first)
+ while (!_delayed_requests.empty()
+ && currentTime >= _delayed_requests.front().first)
{
- BucketRequest& req(_delayedRequests.front().second);
- sendRequestBucketInfo(req.targetNode, req.bucket, std::shared_ptr<MergeReplyGuard>());
- _delayedRequests.pop_front();
- }
-}
-
-void
-BucketDBUpdater::convertBucketInfoToBucketList(
- const std::shared_ptr<api::RequestBucketInfoReply>& repl,
- uint16_t targetNode, BucketListMerger::BucketList& newList)
-{
- for (const auto & entry : repl->getBucketInfo()) {
- LOG(debug, "Received bucket information from node %u for bucket %s: %s", targetNode,
- entry._bucketId.toString().c_str(), entry._info.toString().c_str());
-
- newList.emplace_back(entry._bucketId, entry._info);
- }
-}
-
-void
-BucketDBUpdater::mergeBucketInfoWithDatabase(
- const std::shared_ptr<api::RequestBucketInfoReply>& repl,
- const BucketRequest& req)
-{
- BucketListMerger::BucketList existing;
- BucketListMerger::BucketList newList;
-
- findRelatedBucketsInDatabase(req.targetNode, req.bucket, existing);
- convertBucketInfoToBucketList(repl, req.targetNode, newList);
-
- std::sort(existing.begin(), existing.end(), sort_pred);
- std::sort(newList.begin(), newList.end(), sort_pred);
-
- BucketListMerger merger(newList, existing, req.timestamp);
- updateDatabase(req.bucket.getBucketSpace(), req.targetNode, merger);
-}
-
-bool
-BucketDBUpdater::processSingleBucketInfoReply(
- const std::shared_ptr<api::RequestBucketInfoReply> & repl)
-{
- auto iter = _sentMessages.find(repl->getMsgId());
-
- // Has probably been deleted for some reason earlier.
- if (iter == _sentMessages.end()) {
- return true;
- }
-
- BucketRequest req = iter->second;
- _sentMessages.erase(iter);
-
- if (!_op_ctx.storage_node_is_up(req.bucket.getBucketSpace(), req.targetNode)) {
- // Ignore replies from nodes that are down.
- return true;
- }
- if (repl->getResult().getResult() != api::ReturnCode::OK) {
- handleSingleBucketInfoFailure(repl, req);
- return true;
- }
- mergeBucketInfoWithDatabase(repl, req);
- return true;
-}
-
-void
-BucketDBUpdater::addBucketInfoForNode(
- const BucketDatabase::Entry& e,
- uint16_t node,
- BucketListMerger::BucketList& existing) const
-{
- const BucketCopy* copy(e->getNode(node));
- if (copy) {
- existing.emplace_back(e.getBucketId(), copy->getBucketInfo());
- }
-}
-
-void
-BucketDBUpdater::findRelatedBucketsInDatabase(uint16_t node, const document::Bucket& bucket,
- BucketListMerger::BucketList& existing)
-{
- auto &distributorBucketSpace(_op_ctx.bucket_space_repo().get(bucket.getBucketSpace()));
- std::vector<BucketDatabase::Entry> entries;
- distributorBucketSpace.getBucketDatabase().getAll(bucket.getBucketId(), entries);
-
- for (const BucketDatabase::Entry & entry : entries) {
- addBucketInfoForNode(entry, node, existing);
- }
-}
-
-void
-BucketDBUpdater::updateDatabase(document::BucketSpace bucketSpace, uint16_t node, BucketListMerger& merger)
-{
- for (const document::BucketId & bucketId : merger.getRemovedEntries()) {
- document::Bucket bucket(bucketSpace, bucketId);
- _op_ctx.remove_node_from_bucket_database(bucket, node);
- }
-
- for (const BucketListMerger::BucketEntry& entry : merger.getAddedEntries()) {
- document::Bucket bucket(bucketSpace, entry.first);
- _op_ctx.update_bucket_database(
- bucket,
- BucketCopy(merger.getTimestamp(), node, entry.second),
- DatabaseUpdate::CREATE_IF_NONEXISTING);
+ BucketRequest& req(_delayed_requests.front().second);
+ send_request_bucket_info(req.targetNode, req.bucket, std::shared_ptr<MergeReplyGuard>());
+ _delayed_requests.pop_front();
}
}
bool
-BucketDBUpdater::isPendingClusterStateCompleted() const
+BucketDBUpdater::is_pending_cluster_state_completed() const
{
- return _pendingClusterState.get() && _pendingClusterState->done();
+ return _pending_cluster_state.get() && _pending_cluster_state->done();
}
void
-BucketDBUpdater::processCompletedPendingClusterState()
+BucketDBUpdater::process_completed_pending_cluster_state(StripeAccessGuard& guard)
{
- if (_pendingClusterState->isDeferred()) {
+ if (_pending_cluster_state->isDeferred()) {
LOG(debug, "Deferring completion of pending cluster state version %u until explicitly activated",
- _pendingClusterState->clusterStateVersion());
- assert(_pendingClusterState->hasCommand()); // Deferred transitions should only ever be created by state commands.
+ _pending_cluster_state->clusterStateVersion());
+ assert(_pending_cluster_state->hasCommand()); // Deferred transitions should only ever be created by state commands.
// Sending down SetSystemState command will reach the state manager and a reply
// will be auto-sent back to the cluster controller in charge. Once this happens,
// it will send an explicit activation command once all distributors have reported
@@ -743,73 +452,81 @@ BucketDBUpdater::processCompletedPendingClusterState()
// taken effect via activation. External operation handler will keep operations from
// actually being scheduled until state has been activated. The external operation handler
// needs to be explicitly aware of the case where no state has yet to be activated.
- _distributor_interface.getMessageSender().sendDown(
- _pendingClusterState->getCommand());
- _pendingClusterState->clearCommand();
+ _distributor_interface.getMessageSender().sendDown(_pending_cluster_state->getCommand());
+ _pending_cluster_state->clearCommand();
return;
}
// Distribution config change or non-deferred cluster state. Immediately activate
// the pending state without being told to do so explicitly.
- activatePendingClusterState();
+ activate_pending_cluster_state(guard);
}
void
-BucketDBUpdater::activatePendingClusterState()
+BucketDBUpdater::activate_pending_cluster_state(StripeAccessGuard& guard)
{
framework::MilliSecTimer process_timer(_node_ctx.clock());
- _pendingClusterState->mergeIntoBucketDatabases();
+ _pending_cluster_state->merge_into_bucket_databases(guard);
maybe_inject_simulated_db_merging_delay();
- if (_pendingClusterState->isVersionedTransition()) {
- LOG(debug, "Activating pending cluster state version %u", _pendingClusterState->clusterStateVersion());
- enableCurrentClusterStateBundleInDistributor();
- if (_pendingClusterState->hasCommand()) {
- _distributor_interface.getMessageSender().sendDown(
- _pendingClusterState->getCommand());
+ if (_pending_cluster_state->isVersionedTransition()) {
+ LOG(debug, "Activating pending cluster state version %u", _pending_cluster_state->clusterStateVersion());
+ enable_current_cluster_state_bundle_in_distributor_and_stripes(guard);
+ if (_pending_cluster_state->hasCommand()) {
+ _distributor_interface.getMessageSender().sendDown(_pending_cluster_state->getCommand());
}
- addCurrentStateToClusterStateHistory();
+ add_current_state_to_cluster_state_history();
} else {
LOG(debug, "Activating pending distribution config");
// TODO distribution changes cannot currently be deferred as they are not
// initiated by the cluster controller!
- _distributor_interface.notifyDistributionChangeEnabled();
+ _distributor_interface.notifyDistributionChangeEnabled(); // TODO factor these two out into one func?
+ guard.notify_distribution_change_enabled();
}
- update_read_snapshot_after_activation(_pendingClusterState->getNewClusterStateBundle());
- _pendingClusterState.reset();
- _outdatedNodesMap.clear();
- _op_ctx.bucket_space_repo().clear_pending_cluster_state_bundle();
- sendAllQueuedBucketRechecks();
- completeTransitionTimer();
- clearReadOnlyBucketRepoDatabases();
+ guard.update_read_snapshot_after_activation(_pending_cluster_state->getNewClusterStateBundle());
+ _pending_cluster_state.reset();
+ _outdated_nodes_map.clear();
+ guard.clear_pending_cluster_state_bundle();
+ send_all_queued_bucket_rechecks();
+ complete_transition_timer();
+ guard.clear_read_only_bucket_repo_databases();
_distributor_interface.getMetrics().activate_cluster_state_processing_time.addValue(
process_timer.getElapsedTimeAsDouble());
}
void
-BucketDBUpdater::enableCurrentClusterStateBundleInDistributor()
+BucketDBUpdater::enable_current_cluster_state_bundle_in_distributor_and_stripes(StripeAccessGuard& guard)
{
- const lib::ClusterStateBundle& state(
- _pendingClusterState->getNewClusterStateBundle());
+ const lib::ClusterStateBundle& state = _pending_cluster_state->getNewClusterStateBundle();
+
+ _active_state_bundle = _pending_cluster_state->getNewClusterStateBundle();
+ propagate_active_state_bundle_internally();
- LOG(debug,
- "BucketDBUpdater finished processing state %s",
+ LOG(debug, "BucketDBUpdater finished processing state %s",
state.getBaselineClusterState()->toString().c_str());
+ // First enable the cluster state for the _top-level_ distributor component.
_distributor_interface.enableClusterStateBundle(state);
+ // And then subsequently for all underlying stripes. Technically the order doesn't matter
+ // since all threads are blocked at this point.
+ guard.enable_cluster_state_bundle(state);
}
void BucketDBUpdater::simulate_cluster_state_bundle_activation(const lib::ClusterStateBundle& activated_state) {
- update_read_snapshot_after_activation(activated_state);
+ auto guard = _stripe_accessor.rendezvous_and_hold_all();
_distributor_interface.enableClusterStateBundle(activated_state);
+ guard->enable_cluster_state_bundle(activated_state);
+
+ _active_state_bundle = activated_state;
+ propagate_active_state_bundle_internally();
}
void
-BucketDBUpdater::addCurrentStateToClusterStateHistory()
+BucketDBUpdater::add_current_state_to_cluster_state_history()
{
- _history.push_back(_pendingClusterState->getSummary());
+ _history.push_back(_pending_cluster_state->getSummary());
if (_history.size() > 50) {
_history.pop_front();
@@ -857,22 +574,22 @@ BucketDBUpdater::reportStatus(std::ostream& out,
xos << XmlTag("status")
<< XmlAttribute("id", BUCKETDB)
<< XmlAttribute("name", BUCKETDB_UPDATER);
- reportXmlStatus(xos, path);
+ report_xml_status(xos, path);
xos << XmlEndTag();
return true;
}
vespalib::string
-BucketDBUpdater::reportXmlStatus(vespalib::xml::XmlOutputStream& xos,
- const framework::HttpUrlPath&) const
+BucketDBUpdater::report_xml_status(vespalib::xml::XmlOutputStream& xos,
+ const framework::HttpUrlPath&) const
{
using namespace vespalib::xml;
xos << XmlTag("bucketdb")
<< XmlTag("systemstate_active")
<< XmlContent(_op_ctx.cluster_state_bundle().getBaselineClusterState()->toString())
<< XmlEndTag();
- if (_pendingClusterState) {
- xos << *_pendingClusterState;
+ if (_pending_cluster_state) {
+ xos << *_pending_cluster_state;
}
xos << XmlTag("systemstate_history");
for (auto i(_history.rbegin()), e(_history.rend()); i != e; ++i) {
@@ -884,13 +601,13 @@ BucketDBUpdater::reportXmlStatus(vespalib::xml::XmlOutputStream& xos,
}
xos << XmlEndTag()
<< XmlTag("single_bucket_requests");
- for (const auto & entry : _sentMessages)
+ for (const auto & entry : _sent_messages)
{
entry.second.print_xml_tag(xos, XmlAttribute("sendtimestamp", entry.second.timestamp));
}
xos << XmlEndTag()
<< XmlTag("delayed_single_bucket_requests");
- for (const auto & entry : _delayedRequests)
+ for (const auto & entry : _delayed_requests)
{
entry.second.print_xml_tag(xos, XmlAttribute("resendtimestamp", entry.first.getTime()));
}
@@ -898,166 +615,4 @@ BucketDBUpdater::reportXmlStatus(vespalib::xml::XmlOutputStream& xos,
return "";
}
-BucketDBUpdater::MergingNodeRemover::MergingNodeRemover(
- const lib::ClusterState& oldState,
- const lib::ClusterState& s,
- uint16_t localIndex,
- const lib::Distribution& distribution,
- const char* upStates,
- bool track_non_owned_entries)
- : _oldState(oldState),
- _state(s),
- _available_nodes(),
- _nonOwnedBuckets(),
- _removed_buckets(0),
- _removed_documents(0),
- _localIndex(localIndex),
- _distribution(distribution),
- _upStates(upStates),
- _track_non_owned_entries(track_non_owned_entries),
- _cachedDecisionSuperbucket(UINT64_MAX),
- _cachedOwned(false)
-{
- // TODO intersection of cluster state and distribution config
- const uint16_t storage_count = s.getNodeCount(lib::NodeType::STORAGE);
- _available_nodes.resize(storage_count);
- for (uint16_t i = 0; i < storage_count; ++i) {
- if (s.getNodeState(lib::Node(lib::NodeType::STORAGE, i)).getState().oneOf(_upStates)) {
- _available_nodes[i] = true;
- }
- }
-}
-
-void
-BucketDBUpdater::MergingNodeRemover::logRemove(const document::BucketId& bucketId, const char* msg) const
-{
- LOG(spam, "Removing bucket %s: %s", bucketId.toString().c_str(), msg);
-}
-
-namespace {
-
-uint64_t superbucket_from_id(const document::BucketId& id, uint16_t distribution_bits) noexcept {
- // The n LSBs of the bucket ID contain the superbucket number. Mask off the rest.
- return id.getRawId() & ~(UINT64_MAX << distribution_bits);
-}
-
-}
-
-bool
-BucketDBUpdater::MergingNodeRemover::distributorOwnsBucket(
- const document::BucketId& bucketId) const
-{
- // TODO "no distributors available" case is the same for _all_ buckets; cache once in constructor.
- // TODO "too few bits used" case can be cheaply checked without needing exception
- try {
- const auto bits = _state.getDistributionBitCount();
- const auto this_superbucket = superbucket_from_id(bucketId, bits);
- if (_cachedDecisionSuperbucket == this_superbucket) {
- if (!_cachedOwned) {
- logRemove(bucketId, "bucket now owned by another distributor (cached)");
- }
- return _cachedOwned;
- }
-
- uint16_t distributor = _distribution.getIdealDistributorNode(_state, bucketId, "uim");
- _cachedDecisionSuperbucket = this_superbucket;
- _cachedOwned = (distributor == _localIndex);
- if (!_cachedOwned) {
- logRemove(bucketId, "bucket now owned by another distributor");
- return false;
- }
- return true;
- } catch (lib::TooFewBucketBitsInUseException& exc) {
- logRemove(bucketId, "using too few distribution bits now");
- } catch (lib::NoDistributorsAvailableException& exc) {
- logRemove(bucketId, "no distributors are available");
- }
- return false;
-}
-
-void
-BucketDBUpdater::MergingNodeRemover::setCopiesInEntry(
- BucketDatabase::Entry& e,
- const std::vector<BucketCopy>& copies) const
-{
- e->clear();
-
- std::vector<uint16_t> order =
- _distribution.getIdealStorageNodes(_state, e.getBucketId(), _upStates);
-
- e->addNodes(copies, order);
-
- LOG(spam, "Changed %s", e->toString().c_str());
-}
-
-bool
-BucketDBUpdater::MergingNodeRemover::has_unavailable_nodes(const storage::BucketDatabase::Entry& e) const
-{
- const uint16_t n_nodes = e->getNodeCount();
- for (uint16_t i = 0; i < n_nodes; i++) {
- const uint16_t node_idx = e->getNodeRef(i).getNode();
- if (!storage_node_is_available(node_idx)) {
- return true;
- }
- }
- return false;
-}
-
-BucketDatabase::MergingProcessor::Result
-BucketDBUpdater::MergingNodeRemover::merge(storage::BucketDatabase::Merger& merger)
-{
- document::BucketId bucketId(merger.bucket_id());
- LOG(spam, "Check for remove: bucket %s", bucketId.toString().c_str());
- if (!distributorOwnsBucket(bucketId)) {
- // TODO remove in favor of DB snapshotting
- if (_track_non_owned_entries) {
- _nonOwnedBuckets.emplace_back(merger.current_entry());
- }
- return Result::Skip;
- }
- auto& e = merger.current_entry();
-
- if (e->getNodeCount() == 0) { // TODO when should this edge ever trigger?
- return Result::Skip;
- }
-
- if (!has_unavailable_nodes(e)) {
- return Result::KeepUnchanged;
- }
-
- std::vector<BucketCopy> remainingCopies;
- for (uint16_t i = 0; i < e->getNodeCount(); i++) {
- const uint16_t node_idx = e->getNodeRef(i).getNode();
- if (storage_node_is_available(node_idx)) {
- remainingCopies.push_back(e->getNodeRef(i));
- }
- }
-
- if (remainingCopies.empty()) {
- ++_removed_buckets;
- _removed_documents += e->getHighestDocumentCount();
- return Result::Skip;
- } else {
- setCopiesInEntry(e, remainingCopies);
- return Result::Update;
- }
-}
-
-bool
-BucketDBUpdater::MergingNodeRemover::storage_node_is_available(uint16_t index) const noexcept
-{
- return ((index < _available_nodes.size()) && _available_nodes[index]);
-}
-
-BucketDBUpdater::MergingNodeRemover::~MergingNodeRemover()
-{
- if (_removed_buckets != 0) {
- LOGBM(info, "After cluster state change %s, %zu buckets no longer "
- "have available replicas. %zu documents in these buckets will "
- "be unavailable until nodes come back up",
- _oldState.getTextualDifference(_state).c_str(),
- _removed_buckets, _removed_documents);
- }
-}
-
} // distributor
diff --git a/storage/src/vespa/storage/distributor/bucketdbupdater.h b/storage/src/vespa/storage/distributor/bucketdbupdater.h
index 375a5cee4e7..b990f094e9c 100644
--- a/storage/src/vespa/storage/distributor/bucketdbupdater.h
+++ b/storage/src/vespa/storage/distributor/bucketdbupdater.h
@@ -26,8 +26,11 @@ class XmlAttribute;
namespace storage::distributor {
-class DistributorStripeInterface;
+struct BucketSpaceDistributionConfigs;
class BucketSpaceDistributionContext;
+class DistributorStripeInterface;
+class StripeAccessor;
+class StripeAccessGuard;
class BucketDBUpdater : public framework::StatusReporter,
public api::MessageHandler
@@ -35,41 +38,31 @@ class BucketDBUpdater : public framework::StatusReporter,
public:
using OutdatedNodesMap = dbtransition::OutdatedNodesMap;
BucketDBUpdater(DistributorStripeInterface& owner,
- DistributorBucketSpaceRepo& bucketSpaceRepo,
- DistributorBucketSpaceRepo& readOnlyBucketSpaceRepo,
DistributorMessageSender& sender,
- DistributorComponentRegister& compReg);
+ DistributorComponentRegister& comp_reg,
+ StripeAccessor& stripe_accessor);
~BucketDBUpdater() override;
void flush();
- const lib::ClusterState* pendingClusterStateOrNull(const document::BucketSpace&) const;
- void recheckBucketInfo(uint32_t nodeIdx, const document::Bucket& bucket);
bool onSetSystemState(const std::shared_ptr<api::SetSystemStateCommand>& cmd) override;
bool onActivateClusterStateVersion(const std::shared_ptr<api::ActivateClusterStateVersionCommand>& cmd) override;
bool onRequestBucketInfoReply(const std::shared_ptr<api::RequestBucketInfoReply> & repl) override;
bool onMergeBucketReply(const std::shared_ptr<api::MergeBucketReply>& reply) override;
- bool onNotifyBucketChange(const std::shared_ptr<api::NotifyBucketChangeCommand>&) override;
- void resendDelayedMessages();
- void storageDistributionChanged();
- vespalib::string reportXmlStatus(vespalib::xml::XmlOutputStream&, const framework::HttpUrlPath&) const;
vespalib::string getReportContentType(const framework::HttpUrlPath&) const override;
bool reportStatus(std::ostream&, const framework::HttpUrlPath&) const override;
+
+ void resend_delayed_messages();
+ void storage_distribution_changed(const BucketSpaceDistributionConfigs& configs);
+ void bootstrap_distribution_config(std::shared_ptr<const lib::Distribution>);
+
+ vespalib::string report_xml_status(vespalib::xml::XmlOutputStream& xos, const framework::HttpUrlPath&) const;
+
void print(std::ostream& out, bool verbose, const std::string& indent) const;
const DistributorNodeContext& node_context() const { return _node_ctx; }
DistributorOperationContext& operation_context() { return _op_ctx; }
- /**
- * Returns whether the current PendingClusterState indicates that there has
- * been a transfer of bucket ownership amongst the distributors in the
- * cluster. This method only makes sense to call when _pendingClusterState
- * is active, such as from within a enableClusterState() call.
- */
- bool bucketOwnershipHasChanged() const {
- return ((_pendingClusterState.get() != nullptr)
- && _pendingClusterState->hasBucketOwnershipTransfer());
- }
void set_stale_reads_enabled(bool enabled) noexcept {
_stale_reads_enabled.store(enabled, std::memory_order_relaxed);
}
@@ -77,7 +70,6 @@ public:
return _stale_reads_enabled.load(std::memory_order_relaxed);
}
- OperationRoutingSnapshot read_snapshot_for_bucket(const document::Bucket&) const;
private:
class MergeReplyGuard {
public:
@@ -141,126 +133,54 @@ private:
// Transitively invokes Distributor::enableClusterStateBundle
void simulate_cluster_state_bundle_activation(const lib::ClusterStateBundle& activated_state);
- bool shouldDeferStateEnabling() const noexcept;
- bool hasPendingClusterState() const;
- bool pendingClusterStateAccepted(const std::shared_ptr<api::RequestBucketInfoReply>& repl);
- bool processSingleBucketInfoReply(const std::shared_ptr<api::RequestBucketInfoReply>& repl);
- void handleSingleBucketInfoFailure(const std::shared_ptr<api::RequestBucketInfoReply>& repl,
- const BucketRequest& req);
- bool isPendingClusterStateCompleted() const;
- void processCompletedPendingClusterState();
- void activatePendingClusterState();
- void mergeBucketInfoWithDatabase(const std::shared_ptr<api::RequestBucketInfoReply>& repl,
- const BucketRequest& req);
- void convertBucketInfoToBucketList(const std::shared_ptr<api::RequestBucketInfoReply>& repl,
- uint16_t targetNode, BucketListMerger::BucketList& newList);
- void sendRequestBucketInfo(uint16_t node, const document::Bucket& bucket,
- const std::shared_ptr<MergeReplyGuard>& mergeReply);
- void addBucketInfoForNode(const BucketDatabase::Entry& e, uint16_t node,
- BucketListMerger::BucketList& existing) const;
- void ensureTransitionTimerStarted();
- void completeTransitionTimer();
- void clearReadOnlyBucketRepoDatabases();
- /**
- * Adds all buckets contained in the bucket database
- * that are either contained
- * in bucketId, or that bucketId is contained in, that have copies
- * on the given node.
- */
- void findRelatedBucketsInDatabase(uint16_t node, const document::Bucket& bucket,
- BucketListMerger::BucketList& existing);
-
- /**
- Updates the bucket database from the information generated by the given
- bucket list merger.
- */
- void updateDatabase(document::BucketSpace bucketSpace, uint16_t node, BucketListMerger& merger);
-
- void updateState(const lib::ClusterState& oldState, const lib::ClusterState& newState);
-
- void update_read_snapshot_before_db_pruning();
- void removeSuperfluousBuckets(const lib::ClusterStateBundle& newState,
- bool is_distribution_config_change);
- void update_read_snapshot_after_db_pruning(const lib::ClusterStateBundle& new_state);
- void update_read_snapshot_after_activation(const lib::ClusterStateBundle& activated_state);
-
- void replyToPreviousPendingClusterStateIfAny();
- void replyToActivationWithActualVersion(
+ bool should_defer_state_enabling() const noexcept;
+ bool has_pending_cluster_state() const;
+ bool pending_cluster_state_accepted(const std::shared_ptr<api::RequestBucketInfoReply>& repl);
+ bool is_pending_cluster_state_completed() const;
+ void process_completed_pending_cluster_state(StripeAccessGuard& guard);
+ void activate_pending_cluster_state(StripeAccessGuard& guard);
+ void send_request_bucket_info(uint16_t node, const document::Bucket& bucket,
+ const std::shared_ptr<MergeReplyGuard>& mergeReplyGuard);
+ void ensure_transition_timer_started();
+ void complete_transition_timer();
+
+ void remove_superfluous_buckets(StripeAccessGuard& guard,
+ const lib::ClusterStateBundle& new_state,
+ bool is_distribution_config_change);
+
+ void reply_to_previous_pending_cluster_state_if_any();
+ void reply_to_activation_with_actual_version(
const api::ActivateClusterStateVersionCommand& cmd,
uint32_t actualVersion);
- void enableCurrentClusterStateBundleInDistributor();
- void addCurrentStateToClusterStateHistory();
- void enqueueRecheckUntilPendingStateEnabled(uint16_t node, const document::Bucket&);
- void sendAllQueuedBucketRechecks();
+ void enable_current_cluster_state_bundle_in_distributor_and_stripes(StripeAccessGuard& guard);
+ void add_current_state_to_cluster_state_history();
+ void send_all_queued_bucket_rechecks();
+
+ void propagate_active_state_bundle_internally();
void maybe_inject_simulated_db_pruning_delay();
void maybe_inject_simulated_db_merging_delay();
- /**
- Removes all copies of buckets that are on nodes that are down.
- */
- class MergingNodeRemover : public BucketDatabase::MergingProcessor {
- public:
- MergingNodeRemover(const lib::ClusterState& oldState,
- const lib::ClusterState& s,
- uint16_t localIndex,
- const lib::Distribution& distribution,
- const char* upStates,
- bool track_non_owned_entries);
- ~MergingNodeRemover() override;
-
- Result merge(BucketDatabase::Merger&) override;
- void logRemove(const document::BucketId& bucketId, const char* msg) const;
- bool distributorOwnsBucket(const document::BucketId&) const;
-
- const std::vector<BucketDatabase::Entry>& getNonOwnedEntries() const noexcept {
- return _nonOwnedBuckets;
- }
- private:
- void setCopiesInEntry(BucketDatabase::Entry& e, const std::vector<BucketCopy>& copies) const;
-
- bool has_unavailable_nodes(const BucketDatabase::Entry&) const;
- bool storage_node_is_available(uint16_t index) const noexcept;
-
- const lib::ClusterState _oldState;
- const lib::ClusterState _state;
- std::vector<bool> _available_nodes;
- std::vector<BucketDatabase::Entry> _nonOwnedBuckets;
- size_t _removed_buckets;
- size_t _removed_documents;
-
- uint16_t _localIndex;
- const lib::Distribution& _distribution;
- const char* _upStates;
- bool _track_non_owned_entries;
-
- mutable uint64_t _cachedDecisionSuperbucket;
- mutable bool _cachedOwned;
- };
+ // TODO STRIPE remove once distributor component dependencies have been pruned
+ StripeAccessor& _stripe_accessor;
+ lib::ClusterStateBundle _active_state_bundle;
+ std::unique_ptr<DistributorBucketSpaceRepo> _dummy_mutable_bucket_space_repo;
+ std::unique_ptr<DistributorBucketSpaceRepo> _dummy_read_only_bucket_space_repo;
- DistributorStripeComponent _distributorComponent;
+ DistributorStripeComponent _distributor_component;
const DistributorNodeContext& _node_ctx;
DistributorOperationContext& _op_ctx;
DistributorStripeInterface& _distributor_interface;
- std::deque<std::pair<framework::MilliSecTime, BucketRequest> > _delayedRequests;
- std::map<uint64_t, BucketRequest> _sentMessages;
- std::unique_ptr<PendingClusterState> _pendingClusterState;
+ std::deque<std::pair<framework::MilliSecTime, BucketRequest>> _delayed_requests;
+ std::map<uint64_t, BucketRequest> _sent_messages;
+ std::unique_ptr<PendingClusterState> _pending_cluster_state;
std::list<PendingClusterState::Summary> _history;
DistributorMessageSender& _sender;
- std::set<EnqueuedBucketRecheck> _enqueuedRechecks;
- OutdatedNodesMap _outdatedNodesMap;
- framework::MilliSecTimer _transitionTimer;
+ std::set<EnqueuedBucketRecheck> _enqueued_rechecks;
+ OutdatedNodesMap _outdated_nodes_map;
+ framework::MilliSecTimer _transition_timer;
std::atomic<bool> _stale_reads_enabled;
- using DistributionContexts = std::unordered_map<document::BucketSpace,
- std::shared_ptr<BucketSpaceDistributionContext>,
- document::BucketSpace::hash>;
- DistributionContexts _active_distribution_contexts;
- using DbGuards = std::unordered_map<document::BucketSpace,
- std::shared_ptr<BucketDatabase::ReadGuard>,
- document::BucketSpace::hash>;
- DbGuards _explicit_transition_read_guard;
- mutable std::mutex _distribution_context_mutex;
};
}
diff --git a/storage/src/vespa/storage/distributor/distributor.cpp b/storage/src/vespa/storage/distributor/distributor.cpp
index f886640d1f9..8dd414e8def 100644
--- a/storage/src/vespa/storage/distributor/distributor.cpp
+++ b/storage/src/vespa/storage/distributor/distributor.cpp
@@ -1,12 +1,15 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
//
#include "blockingoperationstarter.h"
+#include "bucket_space_distribution_configs.h"
+#include "bucketdbupdater.h"
#include "distributor.h"
#include "distributor_bucket_space.h"
#include "distributor_status.h"
#include "distributor_stripe.h"
#include "distributormetricsset.h"
#include "idealstatemetricsset.h"
+#include "legacy_single_stripe_accessor.h"
#include "operation_sequencer.h"
#include "ownership_transfer_safe_time_point_calculator.h"
#include "throttlingoperationstarter.h"
@@ -42,24 +45,36 @@ Distributor::Distributor(DistributorComponentRegister& compReg,
const NodeIdentity& node_identity,
framework::TickingThreadPool& threadPool,
DoneInitializeHandler& doneInitHandler,
- bool manageActiveBucketCopies,
+ uint32_t num_distributor_stripes,
HostInfo& hostInfoReporterRegistrar,
ChainedMessageSender* messageSender)
: StorageLink("distributor"),
framework::StatusReporter("distributor", "Distributor"),
+ _comp_reg(compReg),
_metrics(std::make_shared<DistributorMetricSet>()),
_messageSender(messageSender),
- _stripe(std::make_unique<DistributorStripe>(compReg, *_metrics, node_identity, threadPool, doneInitHandler,
- manageActiveBucketCopies, *this)),
+ _stripe(std::make_unique<DistributorStripe>(compReg, *_metrics, node_identity, threadPool,
+ doneInitHandler, *this, (num_distributor_stripes == 0))),
+ _stripe_accessor(std::make_unique<LegacySingleStripeAccessor>(*_stripe)),
_component(compReg, "distributor"),
+ _bucket_db_updater(),
_distributorStatusDelegate(compReg, *this, *this),
_threadPool(threadPool),
_tickResult(framework::ThreadWaitInfo::NO_MORE_CRITICAL_WORK_KNOWN),
_metricUpdateHook(*this),
- _hostInfoReporter(*this, *this)
+ _hostInfoReporter(*this, *this),
+ _distribution(),
+ _next_distribution(),
+ _current_internal_config_generation(_component.internal_config_generation())
{
_component.registerMetric(*_metrics);
_component.registerMetricUpdateHook(_metricUpdateHook, framework::SecondTime(0));
+ if (num_distributor_stripes > 0) {
+ LOG(info, "Setting up distributor with %u stripes", num_distributor_stripes); // TODO STRIPE remove once legacy gone
+ // FIXME STRIPE using the singular stripe here is a temporary Hack McHack Deluxe 3000!
+ _bucket_db_updater = std::make_unique<BucketDBUpdater>(*_stripe, *_stripe, _comp_reg, *_stripe_accessor);
+ }
+ _hostInfoReporter.enableReporting(getConfig().getEnableHostInfoReporting());
_distributorStatusDelegate.registerStatusPage();
hostInfoReporterRegistrar.registerReporter(&_hostInfoReporter);
propagateDefaultDistribution(_component.getDistribution());
@@ -113,12 +128,12 @@ Distributor::distributor_component() noexcept {
return _stripe->_component;
}
-BucketDBUpdater&
+StripeBucketDBUpdater&
Distributor::bucket_db_updater() {
return _stripe->bucket_db_updater();
}
-const BucketDBUpdater&
+const StripeBucketDBUpdater&
Distributor::bucket_db_updater() const {
return _stripe->bucket_db_updater();
}
@@ -171,7 +186,6 @@ void
Distributor::onOpen()
{
LOG(debug, "Distributor::onOpen invoked");
- _stripe->open();
setNodeStateUp();
framework::MilliSecTime maxProcessingTime(60 * 1000);
framework::MilliSecTime waitTime(1000);
@@ -187,7 +201,10 @@ Distributor::onOpen()
void Distributor::onClose() {
LOG(debug, "Distributor::onClose invoked");
- _stripe->close();
+ _stripe->flush_and_close();
+ if (_bucket_db_updater) {
+ _bucket_db_updater->flush();
+ }
}
void
@@ -210,18 +227,47 @@ Distributor::sendDown(const std::shared_ptr<api::StorageMessage>& msg)
}
}
+namespace {
+
+bool should_be_handled_by_top_level_bucket_db_updater(const api::StorageMessage& msg) noexcept {
+ switch (msg.getType().getId()) {
+ case api::MessageType::SETSYSTEMSTATE_ID:
+ case api::MessageType::ACTIVATE_CLUSTER_STATE_VERSION_ID:
+ return true;
+ case api::MessageType::REQUESTBUCKETINFO_REPLY_ID:
+ // Top-level component should only handle replies for full bucket info fetches.
+ // Bucket-specific requests should go to the stripes that sent them.
+ return dynamic_cast<const api::RequestBucketInfoReply&>(msg).full_bucket_fetch();
+ default:
+ return false;
+ }
+}
+
+}
+
bool
Distributor::onDown(const std::shared_ptr<api::StorageMessage>& msg)
{
- return _stripe->onDown(msg);
+ // FIXME STRIPE this MUST be in a separate thread to enforce processing in a single thread
+ // regardless of what RPC thread (comm mgr, FRT...) this is called from!
+ if (_bucket_db_updater && should_be_handled_by_top_level_bucket_db_updater(*msg)) {
+ return msg->callHandler(*_bucket_db_updater, msg);
+ }
+ // TODO STRIPE can we route both requests and responses that are BucketCommand|Reply based on their bucket alone?
+ // that covers most operations already...
+ return _stripe->handle_or_enqueue_message(msg);
}
bool
Distributor::handleReply(const std::shared_ptr<api::StorageReply>& reply)
{
+ if (_bucket_db_updater && should_be_handled_by_top_level_bucket_db_updater(*reply)) {
+ return reply->callHandler(*_bucket_db_updater, reply);
+ }
return _stripe->handleReply(reply);
}
+// TODO STRIPE we need to reintroduce the top-level message queue...
bool
Distributor::handleMessage(const std::shared_ptr<api::StorageMessage>& msg)
{
@@ -245,21 +291,44 @@ Distributor::enableClusterStateBundle(const lib::ClusterStateBundle& state)
void
Distributor::storageDistributionChanged()
{
- // May happen from any thread.
- _stripe->storageDistributionChanged();
+ if (_bucket_db_updater) {
+ if (!_distribution || (*_component.getDistribution() != *_distribution)) {
+ LOG(debug, "Distribution changed to %s, must re-fetch bucket information",
+ _component.getDistribution()->toString().c_str());
+ _next_distribution = _component.getDistribution(); // FIXME this is not thread safe
+ } else {
+ LOG(debug, "Got distribution change, but the distribution %s was the same as before: %s",
+ _component.getDistribution()->toString().c_str(),
+ _distribution->toString().c_str());
+ }
+ } else {
+ // May happen from any thread.
+ _stripe->storage_distribution_changed();
+ }
}
void
Distributor::enableNextDistribution()
{
- _stripe->enableNextDistribution();
+ if (_bucket_db_updater) {
+ if (_next_distribution) {
+ _distribution = _next_distribution;
+ _next_distribution = std::shared_ptr<lib::Distribution>();
+ auto new_configs = BucketSpaceDistributionConfigs::from_default_distribution(_distribution);
+ _bucket_db_updater->storage_distribution_changed(new_configs);
+ }
+ } else {
+ _stripe->enableNextDistribution();
+ }
}
// TODO STRIPE only used by tests to directly inject new distribution config
+// - actually, also by ctor
void
Distributor::propagateDefaultDistribution(
std::shared_ptr<const lib::Distribution> distribution)
{
+ // TODO STRIPE top-level bucket DB updater
_stripe->propagateDefaultDistribution(std::move(distribution));
}
@@ -299,6 +368,9 @@ framework::ThreadWaitInfo
Distributor::doCriticalTick(framework::ThreadIndex idx)
{
_tickResult = framework::ThreadWaitInfo::NO_MORE_CRITICAL_WORK_KNOWN;
+ if (_bucket_db_updater) {
+ enableNextDistribution();
+ }
// Propagates any new configs down to stripe(s)
enableNextConfig();
_stripe->doCriticalTick(idx);
@@ -309,6 +381,9 @@ Distributor::doCriticalTick(framework::ThreadIndex idx)
framework::ThreadWaitInfo
Distributor::doNonCriticalTick(framework::ThreadIndex idx)
{
+ if (_bucket_db_updater) {
+ _bucket_db_updater->resend_delayed_messages();
+ }
// TODO STRIPE stripes need their own thread loops!
_stripe->doNonCriticalTick(idx);
_tickResult = _stripe->_tickResult;
@@ -318,8 +393,22 @@ Distributor::doNonCriticalTick(framework::ThreadIndex idx)
void
Distributor::enableNextConfig()
{
- _hostInfoReporter.enableReporting(getConfig().getEnableHostInfoReporting());
- _stripe->enableNextConfig(); // TODO STRIPE avoid redundant call
+ // Only lazily trigger a config propagation and internal update if something has _actually changed_.
+ if (_component.internal_config_generation() != _current_internal_config_generation) {
+ if (_bucket_db_updater) {
+ auto guard = _stripe_accessor->rendezvous_and_hold_all();
+ guard->update_total_distributor_config(_component.total_distributor_config_sp());
+ } else {
+ _stripe->update_total_distributor_config(_component.total_distributor_config_sp());
+ }
+ _hostInfoReporter.enableReporting(getConfig().getEnableHostInfoReporting());
+ _current_internal_config_generation = _component.internal_config_generation();
+ }
+ if (!_bucket_db_updater) {
+ // TODO STRIPE remove these once tests are fixed to trigger reconfig properly
+ _hostInfoReporter.enableReporting(getConfig().getEnableHostInfoReporting());
+ _stripe->enableNextConfig(); // TODO STRIPE avoid redundant call
+ }
}
vespalib::string
diff --git a/storage/src/vespa/storage/distributor/distributor.h b/storage/src/vespa/storage/distributor/distributor.h
index bfffe126b44..074f5fe27d4 100644
--- a/storage/src/vespa/storage/distributor/distributor.h
+++ b/storage/src/vespa/storage/distributor/distributor.h
@@ -11,6 +11,7 @@
#include "min_replica_provider.h"
#include "pendingmessagetracker.h"
#include "statusreporterdelegate.h"
+#include "stripe_bucket_db_updater.h" // TODO this is temporary
#include <vespa/config/config.h>
#include <vespa/storage/common/distributorcomponent.h>
#include <vespa/storage/common/doneinitializehandler.h>
@@ -33,10 +34,12 @@ namespace storage::distributor {
class BlockingOperationStarter;
class BucketPriorityDatabase;
+class BucketDBUpdater;
class DistributorBucketSpaceRepo;
class DistributorStatus;
class DistributorStripe;
class OperationSequencer;
+class LegacySingleStripeAccessor;
class OwnershipTransferSafeTimePointCalculator;
class SimpleMaintenanceScanner;
class ThrottlingOperationStarter;
@@ -54,7 +57,7 @@ public:
const NodeIdentity& node_identity,
framework::TickingThreadPool&,
DoneInitializeHandler&,
- bool manageActiveBucketCopies,
+ uint32_t num_distributor_stripes,
HostInfo& hostInfoReporterRegistrar,
ChainedMessageSender* = nullptr);
@@ -135,8 +138,8 @@ private:
bool handleMessage(const std::shared_ptr<api::StorageMessage>& msg);
// Accessors used by tests
- BucketDBUpdater& bucket_db_updater();
- const BucketDBUpdater& bucket_db_updater() const;
+ StripeBucketDBUpdater& bucket_db_updater();
+ const StripeBucketDBUpdater& bucket_db_updater() const;
IdealStateManager& ideal_state_manager();
const IdealStateManager& ideal_state_manager() const;
ExternalOperationHandler& external_operation_handler();
@@ -162,16 +165,24 @@ private:
void enableNextDistribution();
void propagateDefaultDistribution(std::shared_ptr<const lib::Distribution>);
+ DistributorComponentRegister& _comp_reg;
std::shared_ptr<DistributorMetricSet> _metrics;
ChainedMessageSender* _messageSender;
// TODO STRIPE multiple stripes...! This is for proof of concept of wiring.
std::unique_ptr<DistributorStripe> _stripe;
+ std::unique_ptr<LegacySingleStripeAccessor> _stripe_accessor;
storage::DistributorComponent _component;
+ std::unique_ptr<BucketDBUpdater> _bucket_db_updater;
StatusReporterDelegate _distributorStatusDelegate;
framework::TickingThreadPool& _threadPool;
framework::ThreadWaitInfo _tickResult;
MetricUpdateHook _metricUpdateHook;
DistributorHostInfoReporter _hostInfoReporter;
+
+ std::shared_ptr<lib::Distribution> _distribution;
+ std::shared_ptr<lib::Distribution> _next_distribution;
+
+ uint64_t _current_internal_config_generation;
};
}
diff --git a/storage/src/vespa/storage/distributor/distributor_bucket_space.h b/storage/src/vespa/storage/distributor/distributor_bucket_space.h
index f4d5bd6f5aa..558cbada31f 100644
--- a/storage/src/vespa/storage/distributor/distributor_bucket_space.h
+++ b/storage/src/vespa/storage/distributor/distributor_bucket_space.h
@@ -80,6 +80,7 @@ public:
}
void set_pending_cluster_state(std::shared_ptr<const lib::ClusterState> pending_cluster_state);
+ bool has_pending_cluster_state() const noexcept { return static_cast<bool>(_pending_cluster_state); }
const lib::ClusterState& get_pending_cluster_state() const noexcept { return *_pending_cluster_state; }
/**
diff --git a/storage/src/vespa/storage/distributor/distributor_stripe.cpp b/storage/src/vespa/storage/distributor/distributor_stripe.cpp
index e41c7940a0d..87e938efd71 100644
--- a/storage/src/vespa/storage/distributor/distributor_stripe.cpp
+++ b/storage/src/vespa/storage/distributor/distributor_stripe.cpp
@@ -38,24 +38,24 @@ DistributorStripe::DistributorStripe(DistributorComponentRegister& compReg,
const NodeIdentity& node_identity,
framework::TickingThreadPool& threadPool,
DoneInitializeHandler& doneInitHandler,
- bool manageActiveBucketCopies,
- ChainedMessageSender& messageSender)
- : StorageLink("distributor"),
- DistributorStripeInterface(),
+ ChainedMessageSender& messageSender,
+ bool use_legacy_mode)
+ : DistributorStripeInterface(),
framework::StatusReporter("distributor", "Distributor"),
_clusterStateBundle(lib::ClusterState()),
_bucketSpaceRepo(std::make_unique<DistributorBucketSpaceRepo>(node_identity.node_index())),
_readOnlyBucketSpaceRepo(std::make_unique<DistributorBucketSpaceRepo>(node_identity.node_index())),
_component(*this, *_bucketSpaceRepo, *_readOnlyBucketSpaceRepo, compReg, "distributor"),
+ _total_config(_component.total_distributor_config_sp()),
_metrics(metrics),
_operationOwner(*this, _component.getClock()),
_maintenanceOperationOwner(*this, _component.getClock()),
_operation_sequencer(std::make_unique<OperationSequencer>()),
_pendingMessageTracker(compReg),
- _bucketDBUpdater(*this, *_bucketSpaceRepo, *_readOnlyBucketSpaceRepo, *this, compReg),
+ _bucketDBUpdater(*this, *_bucketSpaceRepo, *_readOnlyBucketSpaceRepo, *this, compReg, use_legacy_mode),
_distributorStatusDelegate(compReg, *this, *this),
_bucketDBStatusDelegate(compReg, *this, _bucketDBUpdater),
- _idealStateManager(*this, *_bucketSpaceRepo, *_readOnlyBucketSpaceRepo, compReg, manageActiveBucketCopies),
+ _idealStateManager(*this, *_bucketSpaceRepo, *_readOnlyBucketSpaceRepo, compReg),
_messageSender(messageSender),
_externalOperationHandler(_component, _component, getMetrics(), getMessageSender(),
*_operation_sequencer, *this, _component,
@@ -81,7 +81,8 @@ DistributorStripe::DistributorStripe(DistributorComponentRegister& compReg,
_db_memory_sample_interval(30s),
_last_db_memory_sample_time_point(),
_inhibited_maintenance_tick_count(0),
- _must_send_updated_host_info(false)
+ _must_send_updated_host_info(false),
+ _use_legacy_mode(use_legacy_mode)
{
_bucketDBStatusDelegate.registerStatusPage();
propagateDefaultDistribution(_component.getDistribution());
@@ -114,36 +115,23 @@ DistributorStripe::sendCommand(const std::shared_ptr<api::StorageCommand>& cmd)
api::MergeBucketCommand& merge(static_cast<api::MergeBucketCommand&>(*cmd));
_idealStateManager.getMetrics().nodesPerMerge.addValue(merge.getNodes().size());
}
- sendUp(cmd);
+ send_up_with_tracking(cmd);
}
void
DistributorStripe::sendReply(const std::shared_ptr<api::StorageReply>& reply)
{
- sendUp(reply);
-}
-
-void
-DistributorStripe::onOpen()
-{
- LOG(debug, "DistributorStripe::onOpen invoked");
- if (_component.getDistributorConfig().startDistributorThread) {
- // TODO STRIPE own thread per stripe!
- } else {
- LOG(warning, "Not starting distributor stripe thread as it's not configured to "
- "run. Unless you are just running a test tool, this is a "
- "fatal error.");
- }
+ send_up_with_tracking(reply);
}
void DistributorStripe::send_shutdown_abort_reply(const std::shared_ptr<api::StorageMessage>& msg) {
api::StorageReply::UP reply(
std::dynamic_pointer_cast<api::StorageCommand>(msg)->makeReply());
reply->setResult(api::ReturnCode(api::ReturnCode::ABORTED, "Distributor is shutting down"));
- sendUp(std::shared_ptr<api::StorageMessage>(reply.release()));
+ send_up_with_tracking(std::shared_ptr<api::StorageMessage>(reply.release()));
}
-void DistributorStripe::onClose() {
+void DistributorStripe::flush_and_close() {
for (auto& msg : _messageQueue) {
if (!msg->getType().isReply()) {
send_shutdown_abort_reply(msg);
@@ -168,18 +156,20 @@ void DistributorStripe::send_up_without_tracking(const std::shared_ptr<api::Stor
}
void
-DistributorStripe::sendUp(const std::shared_ptr<api::StorageMessage>& msg)
+DistributorStripe::send_up_with_tracking(const std::shared_ptr<api::StorageMessage>& msg)
{
_pendingMessageTracker.insert(msg);
send_up_without_tracking(msg);
}
bool
-DistributorStripe::onDown(const std::shared_ptr<api::StorageMessage>& msg)
+DistributorStripe::handle_or_enqueue_message(const std::shared_ptr<api::StorageMessage>& msg)
{
if (_externalOperationHandler.try_handle_message_outside_main_thread(msg)) {
return true;
}
+ // TODO STRIPE redesign how message queue guarding and wakeup is performed.
+ // Currently involves a _thread pool global_ lock transitively via tick guard!
framework::TickingLockGuard guard(_threadPool.freezeCriticalTicks());
MBUS_TRACE(msg->getTrace(), 9,
"Distributor: Added to message queue. Thread state: "
@@ -400,8 +390,9 @@ void DistributorStripe::invalidate_bucket_spaces_stats() {
}
void
-DistributorStripe::storageDistributionChanged()
+DistributorStripe::storage_distribution_changed()
{
+ assert(_use_legacy_mode);
if (!_distribution.get()
|| *_component.getDistribution() != *_distribution)
{
@@ -478,17 +469,22 @@ DistributorStripe::checkBucketForSplit(document::BucketSpace bucketSpace,
}
}
+// TODO STRIPE must only be called when operating in legacy single stripe mode!
+// In other cases, distribution config switching is controlled by top-level distributor, not via framework(tm).
void
DistributorStripe::enableNextDistribution()
{
+ assert(_use_legacy_mode);
if (_nextDistribution.get()) {
_distribution = _nextDistribution;
propagateDefaultDistribution(_distribution);
_nextDistribution = std::shared_ptr<lib::Distribution>();
+ // TODO conditional on whether top-level DB updater is in charge
_bucketDBUpdater.storageDistributionChanged();
}
}
+// TODO STRIPE must be invoked by top-level bucket db updater probably
void
DistributorStripe::propagateDefaultDistribution(
std::shared_ptr<const lib::Distribution> distribution)
@@ -500,6 +496,20 @@ DistributorStripe::propagateDefaultDistribution(
}
}
+// Only called when stripe is in rendezvous freeze
+void
+DistributorStripe::update_distribution_config(const BucketSpaceDistributionConfigs& new_configs) {
+ assert(!_use_legacy_mode);
+ auto default_distr = new_configs.get_or_nullptr(document::FixedBucketSpaces::default_space());
+ auto global_distr = new_configs.get_or_nullptr(document::FixedBucketSpaces::global_space());
+ assert(default_distr && global_distr);
+
+ for (auto* repo : {_bucketSpaceRepo.get(), _readOnlyBucketSpaceRepo.get()}) {
+ repo->get(document::FixedBucketSpaces::default_space()).setDistribution(default_distr);
+ repo->get(document::FixedBucketSpaces::global_space()).setDistribution(global_distr);
+ }
+}
+
void
DistributorStripe::propagateClusterStates()
{
@@ -730,12 +740,15 @@ DistributorStripe::startNextMaintenanceOperation()
_scheduler->tick(_schedulingMode);
}
+// TODO STRIPE begone with this!
framework::ThreadWaitInfo
DistributorStripe::doCriticalTick(framework::ThreadIndex)
{
_tickResult = framework::ThreadWaitInfo::NO_MORE_CRITICAL_WORK_KNOWN;
- enableNextDistribution();
- enableNextConfig();
+ if (_use_legacy_mode) {
+ enableNextDistribution();
+ enableNextConfig();
+ }
fetchStatusRequests();
fetchExternalMessages();
return _tickResult;
@@ -783,6 +796,21 @@ void DistributorStripe::mark_maintenance_tick_as_no_longer_inhibited() noexcept
void
DistributorStripe::enableNextConfig()
{
+ assert(_use_legacy_mode);
+ propagate_config_snapshot_to_internal_components();
+
+}
+
+void
+DistributorStripe::update_total_distributor_config(std::shared_ptr<const DistributorConfiguration> config)
+{
+ _total_config = std::move(config);
+ propagate_config_snapshot_to_internal_components();
+}
+
+void
+DistributorStripe::propagate_config_snapshot_to_internal_components()
+{
_bucketDBMetricUpdater.setMinimumReplicaCountingMode(getConfig().getMinimumReplicaCountingMode());
_ownershipSafeTimeCalc->setMaxClusterClockSkew(getConfig().getMaxClusterClockSkew());
_pendingMessageTracker.setNodeBusyDuration(getConfig().getInhibitMergesOnBusyNodeDuration());
diff --git a/storage/src/vespa/storage/distributor/distributor_stripe.h b/storage/src/vespa/storage/distributor/distributor_stripe.h
index 10b3f54d834..bc058305c09 100644
--- a/storage/src/vespa/storage/distributor/distributor_stripe.h
+++ b/storage/src/vespa/storage/distributor/distributor_stripe.h
@@ -3,7 +3,6 @@
#pragma once
#include "bucket_spaces_stats_provider.h"
-#include "bucketdbupdater.h"
#include "distributor_host_info_reporter.h"
#include "distributor_stripe_interface.h"
#include "externaloperationhandler.h"
@@ -11,6 +10,8 @@
#include "min_replica_provider.h"
#include "pendingmessagetracker.h"
#include "statusreporterdelegate.h"
+#include "stripe_access_guard.h"
+#include "stripe_bucket_db_updater.h"
#include <vespa/config/config.h>
#include <vespa/storage/common/doneinitializehandler.h>
#include <vespa/storage/common/messagesender.h>
@@ -44,11 +45,9 @@ class ThrottlingOperationStarter;
* TODO STRIPE add class comment.
*/
class DistributorStripe final
- : public StorageLink, // TODO decouple
- public DistributorStripeInterface,
+ : public DistributorStripeInterface,
public StatusDelegator,
public framework::StatusReporter,
- public framework::TickingThread,
public MinReplicaProvider,
public BucketSpacesStatsProvider,
public NonTrackingMessageSender
@@ -59,18 +58,17 @@ public:
const NodeIdentity& node_identity,
framework::TickingThreadPool&,
DoneInitializeHandler&,
- bool manageActiveBucketCopies,
- ChainedMessageSender& messageSender);
+ ChainedMessageSender& messageSender,
+ bool use_legacy_mode);
~DistributorStripe() override;
const ClusterContext& cluster_context() const override {
return _component.cluster_context();
}
- void onOpen() override;
- void onClose() override;
- bool onDown(const std::shared_ptr<api::StorageMessage>&) override;
- void sendUp(const std::shared_ptr<api::StorageMessage>&) override;
+ void flush_and_close();
+ bool handle_or_enqueue_message(const std::shared_ptr<api::StorageMessage>&);
+ void send_up_with_tracking(const std::shared_ptr<api::StorageMessage>&);
// Bypasses message tracker component. Thread safe.
void send_up_without_tracking(const std::shared_ptr<api::StorageMessage>&) override;
@@ -98,13 +96,13 @@ public:
/**
* Invoked when a pending cluster state for a distribution (config)
- * change has been enabled. An invocation of storageDistributionChanged
+ * change has been enabled. An invocation of storage_distribution_changed
* will eventually cause this method to be called, assuming the pending
* cluster state completed successfully.
*/
void notifyDistributionChangeEnabled() override;
- void storageDistributionChanged() override;
+ void storage_distribution_changed();
void recheckBucketInfo(uint16_t nodeIdx, const document::Bucket &bucket) override;
@@ -119,8 +117,8 @@ public:
std::string getActiveIdealStateOperations() const;
std::string getActiveOperations() const;
- virtual framework::ThreadWaitInfo doCriticalTick(framework::ThreadIndex) override;
- virtual framework::ThreadWaitInfo doNonCriticalTick(framework::ThreadIndex) override;
+ framework::ThreadWaitInfo doCriticalTick(framework::ThreadIndex);
+ framework::ThreadWaitInfo doNonCriticalTick(framework::ThreadIndex);
/**
* Checks whether a bucket needs to be split, and sends a split
@@ -152,7 +150,7 @@ public:
}
const DistributorConfiguration& getConfig() const override {
- return _component.getTotalDistributorConfig();
+ return *_total_config;
}
bool isInRecoveryMode() const noexcept {
@@ -169,8 +167,8 @@ public:
return *_bucketIdHasher;
}
- BucketDBUpdater& bucket_db_updater() { return _bucketDBUpdater; }
- const BucketDBUpdater& bucket_db_updater() const { return _bucketDBUpdater; }
+ StripeBucketDBUpdater& bucket_db_updater() { return _bucketDBUpdater; }
+ const StripeBucketDBUpdater& bucket_db_updater() const { return _bucketDBUpdater; }
IdealStateManager& ideal_state_manager() { return _idealStateManager; }
const IdealStateManager& ideal_state_manager() const { return _idealStateManager; }
ExternalOperationHandler& external_operation_handler() { return _externalOperationHandler; }
@@ -198,6 +196,7 @@ private:
friend class DistributorTestUtil;
friend class MetricUpdateHook;
friend class Distributor;
+ friend class LegacySingleStripeAccessGuard;
bool handleMessage(const std::shared_ptr<api::StorageMessage>& msg);
bool isMaintenanceReply(const api::StorageReply& reply) const;
@@ -251,24 +250,27 @@ private:
bool generateOperation(const std::shared_ptr<api::StorageMessage>& msg,
Operation::SP& operation);
- void enableNextDistribution();
- void propagateDefaultDistribution(std::shared_ptr<const lib::Distribution>);
+ void enableNextDistribution(); // TODO STRIPE remove once legacy is gone
+ void propagateDefaultDistribution(std::shared_ptr<const lib::Distribution>); // TODO STRIPE remove once legacy is gone
void propagateClusterStates();
+ void update_distribution_config(const BucketSpaceDistributionConfigs& new_configs);
+ void update_total_distributor_config(std::shared_ptr<const DistributorConfiguration> config);
BucketSpacesStatsProvider::BucketSpacesStats make_invalid_stats_per_configured_space() const;
template <typename NodeFunctor>
void for_each_available_content_node_in(const lib::ClusterState&, NodeFunctor&&);
void invalidate_bucket_spaces_stats();
void send_updated_host_info_if_required();
+ void propagate_config_snapshot_to_internal_components();
lib::ClusterStateBundle _clusterStateBundle;
-
std::unique_ptr<DistributorBucketSpaceRepo> _bucketSpaceRepo;
// Read-only bucket space repo with DBs that only contain buckets transiently
// during cluster state transitions. Bucket set does not overlap that of _bucketSpaceRepo
// and the DBs are empty during non-transition phases.
std::unique_ptr<DistributorBucketSpaceRepo> _readOnlyBucketSpaceRepo;
storage::distributor::DistributorStripeComponent _component;
+ std::shared_ptr<const DistributorConfiguration> _total_config;
DistributorMetricSet& _metrics;
OperationOwner _operationOwner;
@@ -276,7 +278,7 @@ private:
std::unique_ptr<OperationSequencer> _operation_sequencer;
PendingMessageTracker _pendingMessageTracker;
- BucketDBUpdater _bucketDBUpdater;
+ StripeBucketDBUpdater _bucketDBUpdater;
StatusReporterDelegate _distributorStatusDelegate;
StatusReporterDelegate _bucketDBStatusDelegate;
IdealStateManager _idealStateManager;
@@ -333,6 +335,7 @@ private:
std::chrono::steady_clock::time_point _last_db_memory_sample_time_point;
size_t _inhibited_maintenance_tick_count;
bool _must_send_updated_host_info;
+ bool _use_legacy_mode;
};
}
diff --git a/storage/src/vespa/storage/distributor/idealstatemanager.cpp b/storage/src/vespa/storage/distributor/idealstatemanager.cpp
index 84fef955feb..a090f00300b 100644
--- a/storage/src/vespa/storage/distributor/idealstatemanager.cpp
+++ b/storage/src/vespa/storage/distributor/idealstatemanager.cpp
@@ -28,8 +28,7 @@ IdealStateManager::IdealStateManager(
DistributorStripeInterface& owner,
DistributorBucketSpaceRepo& bucketSpaceRepo,
DistributorBucketSpaceRepo& readOnlyBucketSpaceRepo,
- DistributorComponentRegister& compReg,
- bool manageActiveBucketCopies)
+ DistributorComponentRegister& compReg)
: HtmlStatusReporter("idealstateman", "Ideal state manager"),
_metrics(new IdealStateMetricSet),
_distributorComponent(owner, bucketSpaceRepo, readOnlyBucketSpaceRepo, compReg, "Ideal state manager"),
@@ -39,11 +38,8 @@ IdealStateManager::IdealStateManager(
_distributorComponent.registerStatusPage(*this);
_distributorComponent.registerMetric(*_metrics);
- if (manageActiveBucketCopies) {
- LOG(debug, "Adding BucketStateStateChecker to state checkers");
- _stateCheckers.push_back(
- StateChecker::SP(new BucketStateStateChecker()));
- }
+ LOG(debug, "Adding BucketStateStateChecker to state checkers");
+ _stateCheckers.push_back(StateChecker::SP(new BucketStateStateChecker()));
_splitBucketStateChecker = new SplitBucketStateChecker();
_stateCheckers.push_back(StateChecker::SP(_splitBucketStateChecker));
diff --git a/storage/src/vespa/storage/distributor/idealstatemanager.h b/storage/src/vespa/storage/distributor/idealstatemanager.h
index 363d66d8174..a5f80343512 100644
--- a/storage/src/vespa/storage/distributor/idealstatemanager.h
+++ b/storage/src/vespa/storage/distributor/idealstatemanager.h
@@ -37,8 +37,7 @@ public:
IdealStateManager(DistributorStripeInterface& owner,
DistributorBucketSpaceRepo& bucketSpaceRepo,
DistributorBucketSpaceRepo& readOnlyBucketSpaceRepo,
- DistributorComponentRegister& compReg,
- bool manageActiveBucketCopies);
+ DistributorComponentRegister& compReg);
~IdealStateManager() override;
diff --git a/storage/src/vespa/storage/distributor/legacy_single_stripe_accessor.cpp b/storage/src/vespa/storage/distributor/legacy_single_stripe_accessor.cpp
new file mode 100644
index 00000000000..0c6c0206608
--- /dev/null
+++ b/storage/src/vespa/storage/distributor/legacy_single_stripe_accessor.cpp
@@ -0,0 +1,92 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include "legacy_single_stripe_accessor.h"
+#include "distributor_stripe.h"
+
+namespace storage::distributor {
+
+LegacySingleStripeAccessGuard::LegacySingleStripeAccessGuard(LegacySingleStripeAccessor& accessor,
+ DistributorStripe& stripe)
+ : _accessor(accessor),
+ _stripe(stripe)
+{}
+
+LegacySingleStripeAccessGuard::~LegacySingleStripeAccessGuard() {
+ _accessor.mark_guard_released();
+}
+
+void LegacySingleStripeAccessGuard::update_total_distributor_config(std::shared_ptr<const DistributorConfiguration> config) {
+ _stripe.update_total_distributor_config(std::move(config));
+}
+
+void LegacySingleStripeAccessGuard::update_distribution_config(const BucketSpaceDistributionConfigs& new_configs) {
+ _stripe.update_distribution_config(new_configs);
+}
+
+void LegacySingleStripeAccessGuard::set_pending_cluster_state_bundle(const lib::ClusterStateBundle& pending_state) {
+ _stripe.getBucketSpaceRepo().set_pending_cluster_state_bundle(pending_state);
+ // TODO STRIPE also read only repo?
+}
+
+void LegacySingleStripeAccessGuard::clear_pending_cluster_state_bundle() {
+ _stripe.getBucketSpaceRepo().clear_pending_cluster_state_bundle();
+ // TODO STRIPE also read only repo?
+}
+
+void LegacySingleStripeAccessGuard::enable_cluster_state_bundle(const lib::ClusterStateBundle& new_state) {
+ _stripe.enableClusterStateBundle(new_state);
+}
+
+void LegacySingleStripeAccessGuard::notify_distribution_change_enabled() {
+ _stripe.notifyDistributionChangeEnabled();
+}
+
+PotentialDataLossReport
+LegacySingleStripeAccessGuard::remove_superfluous_buckets(document::BucketSpace bucket_space,
+ const lib::ClusterState& new_state,
+ bool is_distribution_change)
+{
+ return _stripe.bucket_db_updater().remove_superfluous_buckets(bucket_space, new_state, is_distribution_change);
+}
+
+void
+LegacySingleStripeAccessGuard::merge_entries_into_db(document::BucketSpace bucket_space,
+ api::Timestamp gathered_at_timestamp,
+ const lib::Distribution& distribution,
+ const lib::ClusterState& new_state,
+ const char* storage_up_states,
+ const std::unordered_set<uint16_t>& outdated_nodes,
+ const std::vector<dbtransition::Entry>& entries)
+{
+ _stripe.bucket_db_updater().merge_entries_into_db(bucket_space, gathered_at_timestamp, distribution,
+ new_state, storage_up_states, outdated_nodes, entries);
+}
+
+void LegacySingleStripeAccessGuard::update_read_snapshot_before_db_pruning() {
+ _stripe.bucket_db_updater().update_read_snapshot_before_db_pruning();
+}
+
+void LegacySingleStripeAccessGuard::update_read_snapshot_after_db_pruning(const lib::ClusterStateBundle& new_state) {
+ _stripe.bucket_db_updater().update_read_snapshot_after_db_pruning(new_state);
+}
+
+void LegacySingleStripeAccessGuard::update_read_snapshot_after_activation(const lib::ClusterStateBundle& activated_state) {
+ _stripe.bucket_db_updater().update_read_snapshot_after_activation(activated_state);
+}
+
+void LegacySingleStripeAccessGuard::clear_read_only_bucket_repo_databases() {
+ _stripe.bucket_db_updater().clearReadOnlyBucketRepoDatabases();
+}
+
+std::unique_ptr<StripeAccessGuard> LegacySingleStripeAccessor::rendezvous_and_hold_all() {
+ // For sanity checking during development.
+ assert(!_guard_held);
+ _guard_held = true;
+ return std::make_unique<LegacySingleStripeAccessGuard>(*this, _stripe);
+}
+
+void LegacySingleStripeAccessor::mark_guard_released() {
+ assert(_guard_held);
+ _guard_held = false;
+}
+
+}
diff --git a/storage/src/vespa/storage/distributor/legacy_single_stripe_accessor.h b/storage/src/vespa/storage/distributor/legacy_single_stripe_accessor.h
new file mode 100644
index 00000000000..caf1e397e5b
--- /dev/null
+++ b/storage/src/vespa/storage/distributor/legacy_single_stripe_accessor.h
@@ -0,0 +1,69 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#pragma once
+
+#include "stripe_access_guard.h"
+
+namespace storage::distributor {
+
+class DistributorStripe;
+class LegacySingleStripeAccessor;
+
+/**
+ * Very simple stripe access guard which expects the caller and its single stripe to run in the
+ * same thread. This means there's no actual striping of operations or any thread synchronization
+ * performed. Only intended as a stop-gap while we have legacy stripe behavior.
+ */
+class LegacySingleStripeAccessGuard : public StripeAccessGuard {
+ LegacySingleStripeAccessor& _accessor;
+ DistributorStripe& _stripe;
+public:
+ LegacySingleStripeAccessGuard(LegacySingleStripeAccessor& accessor,
+ DistributorStripe& stripe);
+ ~LegacySingleStripeAccessGuard() override;
+
+ void update_total_distributor_config(std::shared_ptr<const DistributorConfiguration> config) override;
+
+ void update_distribution_config(const BucketSpaceDistributionConfigs& new_configs) override;
+ void set_pending_cluster_state_bundle(const lib::ClusterStateBundle& pending_state) override;
+ void clear_pending_cluster_state_bundle() override;
+ void enable_cluster_state_bundle(const lib::ClusterStateBundle& new_state) override;
+ void notify_distribution_change_enabled() override;
+
+ PotentialDataLossReport remove_superfluous_buckets(document::BucketSpace bucket_space,
+ const lib::ClusterState& new_state,
+ bool is_distribution_change) override;
+ void merge_entries_into_db(document::BucketSpace bucket_space,
+ api::Timestamp gathered_at_timestamp,
+ const lib::Distribution& distribution,
+ const lib::ClusterState& new_state,
+ const char* storage_up_states,
+ const std::unordered_set<uint16_t>& outdated_nodes,
+ const std::vector<dbtransition::Entry>& entries) override;
+
+ void update_read_snapshot_before_db_pruning() override;
+ void update_read_snapshot_after_db_pruning(const lib::ClusterStateBundle& new_state) override;
+ void update_read_snapshot_after_activation(const lib::ClusterStateBundle& activated_state) override;
+ void clear_read_only_bucket_repo_databases() override;
+};
+
+/**
+ * Impl of StripeAccessor which creates LegacySingleStripeAccessGuards bound to a single stripe.
+ */
+class LegacySingleStripeAccessor : public StripeAccessor {
+ DistributorStripe& _stripe;
+ bool _guard_held;
+
+ friend class LegacySingleStripeAccessGuard;
+public:
+ explicit LegacySingleStripeAccessor(DistributorStripe& stripe)
+ : _stripe(stripe),
+ _guard_held(false)
+ {}
+ ~LegacySingleStripeAccessor() override = default;
+
+ std::unique_ptr<StripeAccessGuard> rendezvous_and_hold_all() override;
+private:
+ void mark_guard_released();
+};
+
+}
diff --git a/storage/src/vespa/storage/distributor/pending_bucket_space_db_transition.cpp b/storage/src/vespa/storage/distributor/pending_bucket_space_db_transition.cpp
index af6223afe28..335d070ad7b 100644
--- a/storage/src/vespa/storage/distributor/pending_bucket_space_db_transition.cpp
+++ b/storage/src/vespa/storage/distributor/pending_bucket_space_db_transition.cpp
@@ -4,6 +4,7 @@
#include "clusterinformation.h"
#include "pendingclusterstate.h"
#include "distributor_bucket_space.h"
+#include "stripe_access_guard.h"
#include <vespa/vdslib/distribution/distribution.h>
#include <vespa/vdslib/state/clusterstate.h>
#include <algorithm>
@@ -17,15 +18,15 @@ using lib::Node;
using lib::NodeType;
using lib::NodeState;
-PendingBucketSpaceDbTransition::PendingBucketSpaceDbTransition(const PendingClusterState &pendingClusterState,
+PendingBucketSpaceDbTransition::PendingBucketSpaceDbTransition(document::BucketSpace bucket_space,
DistributorBucketSpace &distributorBucketSpace,
bool distributionChanged,
const OutdatedNodes &outdatedNodes,
std::shared_ptr<const ClusterInformation> clusterInfo,
const lib::ClusterState &newClusterState,
api::Timestamp creationTimestamp)
- : _entries(),
- _iter(0),
+ : _bucket_space(bucket_space),
+ _entries(),
_removedBuckets(),
_missingEntries(),
_clusterInfo(std::move(clusterInfo)),
@@ -33,7 +34,6 @@ PendingBucketSpaceDbTransition::PendingBucketSpaceDbTransition(const PendingClus
_prevClusterState(distributorBucketSpace.getClusterState()),
_newClusterState(newClusterState),
_creationTimestamp(creationTimestamp),
- _pendingClusterState(pendingClusterState),
_distributorBucketSpace(distributorBucketSpace),
_distributorIndex(_clusterInfo->getDistributorIndex()),
_bucketOwnershipTransfer(distributionChanged),
@@ -53,7 +53,7 @@ PendingBucketSpaceDbTransition::PendingBucketSpaceDbTransition(const PendingClus
PendingBucketSpaceDbTransition::~PendingBucketSpaceDbTransition() = default;
PendingBucketSpaceDbTransition::Range
-PendingBucketSpaceDbTransition::skipAllForSameBucket()
+PendingBucketSpaceDbTransition::DbMerger::skipAllForSameBucket()
{
Range r(_iter, _iter);
@@ -68,7 +68,7 @@ PendingBucketSpaceDbTransition::skipAllForSameBucket()
}
std::vector<BucketCopy>
-PendingBucketSpaceDbTransition::getCopiesThatAreNewOrAltered(BucketDatabase::Entry& info, const Range& range)
+PendingBucketSpaceDbTransition::DbMerger::getCopiesThatAreNewOrAltered(BucketDatabase::Entry& info, const Range& range)
{
std::vector<BucketCopy> copiesToAdd;
for (uint32_t i = range.first; i < range.second; ++i) {
@@ -83,28 +83,21 @@ PendingBucketSpaceDbTransition::getCopiesThatAreNewOrAltered(BucketDatabase::Ent
}
void
-PendingBucketSpaceDbTransition::insertInfo(BucketDatabase::Entry& info, const Range& range)
+PendingBucketSpaceDbTransition::DbMerger::insertInfo(BucketDatabase::Entry& info, const Range& range)
{
std::vector<BucketCopy> copiesToAddOrUpdate(
getCopiesThatAreNewOrAltered(info, range));
- const auto &dist(_distributorBucketSpace.getDistribution());
std::vector<uint16_t> order(
- dist.getIdealStorageNodes(
- _newClusterState,
+ _distribution.getIdealStorageNodes(
+ _new_state,
_entries[range.first].bucket_id(),
- _clusterInfo->getStorageUpStates()));
+ _storage_up_states));
info->addNodes(copiesToAddOrUpdate, order, TrustedUpdate::DEFER);
}
-std::string
-PendingBucketSpaceDbTransition::requestNodesToString()
-{
- return _pendingClusterState.requestNodesToString();
-}
-
bool
-PendingBucketSpaceDbTransition::removeCopiesFromNodesThatWereRequested(BucketDatabase::Entry& e, const document::BucketId& bucketId)
+PendingBucketSpaceDbTransition::DbMerger::removeCopiesFromNodesThatWereRequested(BucketDatabase::Entry& e, const document::BucketId& bucketId)
{
bool updated = false;
for (uint32_t i = 0; i < e->getNodeCount();) {
@@ -116,7 +109,7 @@ PendingBucketSpaceDbTransition::removeCopiesFromNodesThatWereRequested(BucketDat
// mark a single remaining replica as trusted even though there might
// be one or more additional replicas pending merge into the database.
if (nodeIsOutdated(entryNode)
- && (info.getTimestamp() < _creationTimestamp)
+ && (info.getTimestamp() < _creation_timestamp)
&& e->removeNode(entryNode, TrustedUpdate::DEFER))
{
LOG(spam,
@@ -133,21 +126,21 @@ PendingBucketSpaceDbTransition::removeCopiesFromNodesThatWereRequested(BucketDat
}
bool
-PendingBucketSpaceDbTransition::databaseIteratorHasPassedBucketInfoIterator(uint64_t bucket_key) const
+PendingBucketSpaceDbTransition::DbMerger::databaseIteratorHasPassedBucketInfoIterator(uint64_t bucket_key) const
{
return ((_iter < _entries.size())
&& (_entries[_iter].bucket_key < bucket_key));
}
bool
-PendingBucketSpaceDbTransition::bucketInfoIteratorPointsToBucket(uint64_t bucket_key) const
+PendingBucketSpaceDbTransition::DbMerger::bucketInfoIteratorPointsToBucket(uint64_t bucket_key) const
{
return _iter < _entries.size() && _entries[_iter].bucket_key == bucket_key;
}
using MergeResult = BucketDatabase::MergingProcessor::Result;
-MergeResult PendingBucketSpaceDbTransition::merge(BucketDatabase::Merger& merger) {
+MergeResult PendingBucketSpaceDbTransition::DbMerger::merge(BucketDatabase::Merger& merger) {
const uint64_t bucket_key = merger.bucket_key();
while (databaseIteratorHasPassedBucketInfoIterator(bucket_key)) {
@@ -158,9 +151,7 @@ MergeResult PendingBucketSpaceDbTransition::merge(BucketDatabase::Merger& merger
auto& e = merger.current_entry();
document::BucketId bucketId(e.getBucketId());
- LOG(spam,
- "Before merging info from nodes [%s], bucket %s had info %s",
- requestNodesToString().c_str(),
+ LOG(spam, "Before merging info, bucket %s had info %s",
bucketId.toString().c_str(),
e.getBucketInfo().toString().c_str());
@@ -185,14 +176,14 @@ MergeResult PendingBucketSpaceDbTransition::merge(BucketDatabase::Merger& merger
return MergeResult::KeepUnchanged;
}
-void PendingBucketSpaceDbTransition::insert_remaining_at_end(BucketDatabase::TrailingInserter& inserter) {
+void PendingBucketSpaceDbTransition::DbMerger::insert_remaining_at_end(BucketDatabase::TrailingInserter& inserter) {
while (_iter < _entries.size()) {
addToInserter(inserter, skipAllForSameBucket());
}
}
void
-PendingBucketSpaceDbTransition::addToMerger(BucketDatabase::Merger& merger, const Range& range)
+PendingBucketSpaceDbTransition::DbMerger::addToMerger(BucketDatabase::Merger& merger, const Range& range)
{
const auto bucket_id = _entries[range.first].bucket_id();
LOG(spam, "Adding new bucket %s with %d copies",
@@ -202,16 +193,14 @@ PendingBucketSpaceDbTransition::addToMerger(BucketDatabase::Merger& merger, cons
BucketDatabase::Entry e(bucket_id, BucketInfo());
insertInfo(e, range);
if (e->getLastGarbageCollectionTime() == 0) {
- e->setLastGarbageCollectionTime(
- framework::MicroSecTime(_creationTimestamp)
- .getSeconds().getTime());
+ e->setLastGarbageCollectionTime(framework::MicroSecTime(_creation_timestamp).getSeconds().getTime());
}
e.getBucketInfo().updateTrusted();
merger.insert_before_current(bucket_id, e);
}
void
-PendingBucketSpaceDbTransition::addToInserter(BucketDatabase::TrailingInserter& inserter, const Range& range)
+PendingBucketSpaceDbTransition::DbMerger::addToInserter(BucketDatabase::TrailingInserter& inserter, const Range& range)
{
// TODO dedupe
const auto bucket_id = _entries[range.first].bucket_id();
@@ -222,20 +211,32 @@ PendingBucketSpaceDbTransition::addToInserter(BucketDatabase::TrailingInserter&
BucketDatabase::Entry e(bucket_id, BucketInfo());
insertInfo(e, range);
if (e->getLastGarbageCollectionTime() == 0) {
- e->setLastGarbageCollectionTime(
- framework::MicroSecTime(_creationTimestamp)
- .getSeconds().getTime());
+ e->setLastGarbageCollectionTime(framework::MicroSecTime(_creation_timestamp).getSeconds().getTime());
}
e.getBucketInfo().updateTrusted();
inserter.insert_at_end(bucket_id, e);
}
+// TODO STRIPE remove legacy single stripe stuff
void
PendingBucketSpaceDbTransition::mergeIntoBucketDatabase()
{
BucketDatabase &db(_distributorBucketSpace.getBucketDatabase());
std::sort(_entries.begin(), _entries.end());
- db.merge(*this);
+
+ const auto& dist = _distributorBucketSpace.getDistribution();
+ DbMerger merger(_creationTimestamp, dist, _newClusterState, _clusterInfo->getStorageUpStates(), _outdatedNodes, _entries);
+
+ db.merge(merger);
+}
+
+void
+PendingBucketSpaceDbTransition::merge_into_bucket_databases(StripeAccessGuard& guard)
+{
+ std::sort(_entries.begin(), _entries.end());
+ const auto& dist = _distributorBucketSpace.getDistribution();
+ guard.merge_entries_into_db(_bucket_space, _creationTimestamp, dist, _newClusterState,
+ _clusterInfo->getStorageUpStates(), _outdatedNodes, _entries);
}
void
diff --git a/storage/src/vespa/storage/distributor/pending_bucket_space_db_transition.h b/storage/src/vespa/storage/distributor/pending_bucket_space_db_transition.h
index 232f1186879..f7766cb265d 100644
--- a/storage/src/vespa/storage/distributor/pending_bucket_space_db_transition.h
+++ b/storage/src/vespa/storage/distributor/pending_bucket_space_db_transition.h
@@ -3,25 +3,30 @@
#include "pending_bucket_space_db_transition_entry.h"
#include "outdated_nodes.h"
+#include <vespa/document/bucket/bucketspace.h>
#include <vespa/storage/bucketdb/bucketdatabase.h>
#include <unordered_map>
namespace storage::api { class RequestBucketInfoReply; }
-namespace storage::lib { class ClusterState; class State; }
+namespace storage::lib {
+class ClusterState;
+class Distribution;
+class State;
+}
namespace storage::distributor {
class ClusterInformation;
class PendingClusterState;
class DistributorBucketSpace;
+class StripeAccessGuard;
/**
* Class used by PendingClusterState to track request bucket info
* reply result within a bucket space and apply it to the distributor
* bucket database when switching to the pending cluster state.
*/
-class PendingBucketSpaceDbTransition : public BucketDatabase::MergingProcessor
-{
+class PendingBucketSpaceDbTransition {
public:
using Entry = dbtransition::Entry;
using EntryList = std::vector<Entry>;
@@ -29,8 +34,8 @@ public:
private:
using Range = std::pair<uint32_t, uint32_t>;
+ document::BucketSpace _bucket_space;
EntryList _entries;
- uint32_t _iter;
std::vector<document::BucketId> _removedBuckets;
std::vector<Range> _missingEntries;
std::shared_ptr<const ClusterInformation> _clusterInfo;
@@ -42,45 +47,15 @@ private:
// may be down and thus cannot get a request.
OutdatedNodes _outdatedNodes;
- const lib::ClusterState &_prevClusterState;
- const lib::ClusterState &_newClusterState;
+ const lib::ClusterState& _prevClusterState;
+ const lib::ClusterState& _newClusterState;
const api::Timestamp _creationTimestamp;
- const PendingClusterState &_pendingClusterState;
- DistributorBucketSpace &_distributorBucketSpace;
+ DistributorBucketSpace& _distributorBucketSpace;
uint16_t _distributorIndex;
bool _bucketOwnershipTransfer;
std::unordered_map<uint16_t, size_t> _rejectedRequests;
std::unordered_map<uint16_t, size_t> _failed_requests; // Also includes rejections
- BucketDatabase::MergingProcessor::Result merge(BucketDatabase::Merger&) override;
- void insert_remaining_at_end(BucketDatabase::TrailingInserter&) override;
-
- /**
- * Skips through all entries for the same bucket and returns
- * the range in the entry list for which they were found.
- * The range is [from, to>
- */
- Range skipAllForSameBucket();
-
- std::vector<BucketCopy> getCopiesThatAreNewOrAltered(BucketDatabase::Entry& info, const Range& range);
- void insertInfo(BucketDatabase::Entry& info, const Range& range);
- void addToMerger(BucketDatabase::Merger& merger, const Range& range);
- void addToInserter(BucketDatabase::TrailingInserter& inserter, const Range& range);
-
- bool nodeIsOutdated(uint16_t node) const {
- return (_outdatedNodes.find(node) != _outdatedNodes.end());
- }
-
- // Returns whether at least one replica was removed from the entry.
- // Does NOT implicitly update trusted status on remaining replicas; caller must do
- // this explicitly.
- bool removeCopiesFromNodesThatWereRequested(BucketDatabase::Entry& e, const document::BucketId& bucketId);
-
- // Helper methods for iterating over _entries
- bool databaseIteratorHasPassedBucketInfoIterator(uint64_t bucket_key) const;
- bool bucketInfoIteratorPointsToBucket(uint64_t bucket_key) const;
- std::string requestNodesToString();
-
bool distributorChanged();
static bool nodeWasUpButNowIsDown(const lib::State &old, const lib::State &nw);
bool storageNodeUpInNewState(uint16_t node) const;
@@ -94,17 +69,74 @@ private:
void updateSetOfNodesThatAreOutdated();
public:
- PendingBucketSpaceDbTransition(const PendingClusterState &pendingClusterState,
+ // Abstracts away the details of how an entry list gathered from content nodes
+ // is actually diffed and merged into a database.
+ class DbMerger : public BucketDatabase::MergingProcessor {
+ api::Timestamp _creation_timestamp;
+ const lib::Distribution& _distribution;
+ const lib::ClusterState& _new_state;
+ const char* _storage_up_states;
+ const std::unordered_set<uint16_t>& _outdated_nodes; // TODO hash_set
+ const std::vector<dbtransition::Entry>& _entries;
+ uint32_t _iter;
+ public:
+ DbMerger(api::Timestamp creation_timestamp,
+ const lib::Distribution& distribution,
+ const lib::ClusterState& new_state,
+ const char* storage_up_states,
+ const std::unordered_set<uint16_t>& outdated_nodes,
+ const std::vector<dbtransition::Entry>& entries)
+ : _creation_timestamp(creation_timestamp),
+ _distribution(distribution),
+ _new_state(new_state),
+ _storage_up_states(storage_up_states),
+ _outdated_nodes(outdated_nodes),
+ _entries(entries),
+ _iter(0)
+ {}
+ ~DbMerger() override = default;
+
+ BucketDatabase::MergingProcessor::Result merge(BucketDatabase::Merger&) override;
+ void insert_remaining_at_end(BucketDatabase::TrailingInserter&) override;
+
+ /**
+ * Skips through all entries for the same bucket and returns
+ * the range in the entry list for which they were found.
+ * The range is [from, to>
+ */
+ Range skipAllForSameBucket();
+
+ std::vector<BucketCopy> getCopiesThatAreNewOrAltered(BucketDatabase::Entry& info, const Range& range);
+ void insertInfo(BucketDatabase::Entry& info, const Range& range);
+ void addToMerger(BucketDatabase::Merger& merger, const Range& range);
+ void addToInserter(BucketDatabase::TrailingInserter& inserter, const Range& range);
+
+ // Returns whether at least one replica was removed from the entry.
+ // Does NOT implicitly update trusted status on remaining replicas; caller must do
+ // this explicitly.
+ bool removeCopiesFromNodesThatWereRequested(BucketDatabase::Entry& e, const document::BucketId& bucketId);
+
+ // Helper methods for iterating over _entries
+ bool databaseIteratorHasPassedBucketInfoIterator(uint64_t bucket_key) const;
+ bool bucketInfoIteratorPointsToBucket(uint64_t bucket_key) const;
+
+ bool nodeIsOutdated(uint16_t node) const {
+ return (_outdated_nodes.find(node) != _outdated_nodes.end());
+ }
+ };
+
+ PendingBucketSpaceDbTransition(document::BucketSpace bucket_space,
DistributorBucketSpace &distributorBucketSpace,
bool distributionChanged,
const OutdatedNodes &outdatedNodes,
std::shared_ptr<const ClusterInformation> clusterInfo,
const lib::ClusterState &newClusterState,
api::Timestamp creationTimestamp);
- ~PendingBucketSpaceDbTransition() override;
+ ~PendingBucketSpaceDbTransition();
// Merges all the results with the corresponding bucket database.
void mergeIntoBucketDatabase();
+ void merge_into_bucket_databases(StripeAccessGuard& guard);
// Adds the info from the reply to our list of information.
void onRequestBucketInfoReply(const api::RequestBucketInfoReply &reply, uint16_t node);
diff --git a/storage/src/vespa/storage/distributor/pendingclusterstate.cpp b/storage/src/vespa/storage/distributor/pendingclusterstate.cpp
index a7fd5a5af53..77760174253 100644
--- a/storage/src/vespa/storage/distributor/pendingclusterstate.cpp
+++ b/storage/src/vespa/storage/distributor/pendingclusterstate.cpp
@@ -84,9 +84,9 @@ PendingClusterState::initializeBucketSpaceTransitions(bool distributionChanged,
auto onItr = outdatedNodesMap.find(elem.first);
const auto &outdatedNodes = (onItr == outdatedNodesMap.end()) ? emptyOutdatedNodes : onItr->second;
auto pendingTransition =
- std::make_unique<PendingBucketSpaceDbTransition>
- (*this, *elem.second, distributionChanged, outdatedNodes,
- _clusterInfo, *_newClusterStateBundle.getDerivedClusterState(elem.first), _creationTimestamp);
+ std::make_unique<PendingBucketSpaceDbTransition>(
+ elem.first, *elem.second, distributionChanged, outdatedNodes,
+ _clusterInfo, *_newClusterStateBundle.getDerivedClusterState(elem.first), _creationTimestamp);
if (pendingTransition->getBucketOwnershipTransfer()) {
_bucketOwnershipTransfer = true;
}
@@ -331,6 +331,14 @@ PendingClusterState::mergeIntoBucketDatabases()
}
void
+PendingClusterState::merge_into_bucket_databases(StripeAccessGuard& guard)
+{
+ for (auto &elem : _pendingTransitions) {
+ elem.second->merge_into_bucket_databases(guard);
+ }
+}
+
+void
PendingClusterState::printXml(vespalib::XmlOutputStream& xos) const
{
using namespace vespalib::xml;
diff --git a/storage/src/vespa/storage/distributor/pendingclusterstate.h b/storage/src/vespa/storage/distributor/pendingclusterstate.h
index 42b7bf0dcf2..af0c85fab95 100644
--- a/storage/src/vespa/storage/distributor/pendingclusterstate.h
+++ b/storage/src/vespa/storage/distributor/pendingclusterstate.h
@@ -18,6 +18,7 @@ namespace storage::distributor {
class DistributorMessageSender;
class PendingBucketSpaceDbTransition;
class DistributorBucketSpaceRepo;
+class StripeAccessGuard;
/**
* Class used by BucketDBUpdater to track request bucket info
@@ -146,6 +147,8 @@ public:
* Merges all the results with the corresponding bucket databases.
*/
void mergeIntoBucketDatabases();
+ void merge_into_bucket_databases(StripeAccessGuard& guard);
+
// Get pending transition for a specific bucket space. Only used by unit test.
PendingBucketSpaceDbTransition &getPendingBucketSpaceDbTransition(document::BucketSpace bucketSpace);
diff --git a/storage/src/vespa/storage/distributor/potential_data_loss_report.h b/storage/src/vespa/storage/distributor/potential_data_loss_report.h
new file mode 100644
index 00000000000..96abd787649
--- /dev/null
+++ b/storage/src/vespa/storage/distributor/potential_data_loss_report.h
@@ -0,0 +1,22 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#pragma once
+
+#include <cstdint>
+
+namespace storage::distributor {
+
+/**
+ * Represents the amount of data a distributor reasons _may_ have become unavailable
+ * due to all bucket replicas no longer being present.
+ */
+struct PotentialDataLossReport {
+ size_t buckets = 0;
+ size_t documents = 0;
+
+ void merge(const PotentialDataLossReport& other) noexcept {
+ buckets += other.buckets;
+ documents += other.documents;
+ }
+};
+
+}
diff --git a/storage/src/vespa/storage/distributor/stripe_access_guard.h b/storage/src/vespa/storage/distributor/stripe_access_guard.h
new file mode 100644
index 00000000000..69aae755dec
--- /dev/null
+++ b/storage/src/vespa/storage/distributor/stripe_access_guard.h
@@ -0,0 +1,71 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#pragma once
+
+#include "bucket_space_distribution_configs.h"
+#include "pending_bucket_space_db_transition_entry.h"
+#include "potential_data_loss_report.h"
+#include <vespa/document/bucket/bucketspace.h>
+#include <vespa/storageapi/defs.h>
+#include <unordered_set> // TODO use hash_set instead
+
+namespace storage::lib {
+class ClusterState;
+class ClusterStateBundle;
+class Distribution;
+}
+
+namespace storage { class DistributorConfiguration; }
+
+namespace storage::distributor {
+
+/**
+ * A stripe access guard guarantees that the holder of a guard can access underlying
+ * stripes via it in a thread safe manner. In particular, while any access guard is
+ * held, all stripe threads must be in a safe rendezvous location with no race conditions
+ * possible. When a guard goes out of scope, the stripe threads may resume operation.
+ */
+class StripeAccessGuard {
+public:
+ virtual ~StripeAccessGuard() = default;
+
+ virtual void update_total_distributor_config(std::shared_ptr<const DistributorConfiguration> config) = 0;
+
+ virtual void update_distribution_config(const BucketSpaceDistributionConfigs& new_configs) = 0;
+ virtual void set_pending_cluster_state_bundle(const lib::ClusterStateBundle& pending_state) = 0;
+ virtual void clear_pending_cluster_state_bundle() = 0;
+ virtual void enable_cluster_state_bundle(const lib::ClusterStateBundle& new_state) = 0;
+ virtual void notify_distribution_change_enabled() = 0;
+
+ virtual PotentialDataLossReport remove_superfluous_buckets(document::BucketSpace bucket_space,
+ const lib::ClusterState& new_state,
+ bool is_distribution_change) = 0;
+ virtual void merge_entries_into_db(document::BucketSpace bucket_space,
+ api::Timestamp gathered_at_timestamp,
+ const lib::Distribution& distribution,
+ const lib::ClusterState& new_state,
+ const char* storage_up_states,
+ const std::unordered_set<uint16_t>& outdated_nodes,
+ const std::vector<dbtransition::Entry>& entries) = 0;
+
+ virtual void update_read_snapshot_before_db_pruning() = 0;
+ virtual void update_read_snapshot_after_db_pruning(const lib::ClusterStateBundle& new_state) = 0;
+ virtual void update_read_snapshot_after_activation(const lib::ClusterStateBundle& activated_state) = 0;
+ virtual void clear_read_only_bucket_repo_databases() = 0;
+
+};
+
+/**
+ * Provides a factory for guards that protect access to underlying stripes.
+ *
+ * Important: at most one StripeAccessorGuard may exist at any given time. Creating
+ * concurrent guards is undefined behavior.
+ */
+class StripeAccessor {
+public:
+ virtual ~StripeAccessor() = default;
+
+ virtual std::unique_ptr<StripeAccessGuard> rendezvous_and_hold_all() = 0;
+ // TODO also accessor for a single particular stripe?
+};
+
+}
diff --git a/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.cpp b/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.cpp
new file mode 100644
index 00000000000..61ff11d5ac3
--- /dev/null
+++ b/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.cpp
@@ -0,0 +1,1139 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "stripe_bucket_db_updater.h"
+#include "bucket_db_prune_elision.h"
+#include "bucket_space_distribution_context.h"
+#include "distributor.h"
+#include "distributor_bucket_space.h"
+#include "distributormetricsset.h"
+#include "pending_bucket_space_db_transition.h"
+#include "potential_data_loss_report.h"
+#include "simpleclusterinformation.h"
+#include "stripe_access_guard.h"
+#include <vespa/document/bucket/fixed_bucket_spaces.h>
+#include <vespa/storageapi/message/persistence.h>
+#include <vespa/storageapi/message/removelocation.h>
+#include <vespa/vdslib/distribution/distribution.h>
+#include <vespa/vdslib/state/clusterstate.h>
+#include <vespa/vespalib/util/xmlstream.h>
+#include <thread>
+
+#include <vespa/log/bufferedlogger.h>
+LOG_SETUP(".distributor.stripe_bucket_db_updater");
+
+using storage::lib::Node;
+using storage::lib::NodeType;
+using document::BucketSpace;
+
+namespace storage::distributor {
+
+StripeBucketDBUpdater::StripeBucketDBUpdater(DistributorStripeInterface& owner,
+ DistributorBucketSpaceRepo& bucketSpaceRepo,
+ DistributorBucketSpaceRepo& readOnlyBucketSpaceRepo,
+ DistributorMessageSender& sender,
+ DistributorComponentRegister& compReg,
+ bool use_legacy_mode)
+ : framework::StatusReporter("bucketdb", "Bucket DB Updater"),
+ _distributorComponent(owner, bucketSpaceRepo, readOnlyBucketSpaceRepo, compReg, "Bucket DB Updater"),
+ _node_ctx(_distributorComponent),
+ _op_ctx(_distributorComponent),
+ _distributor_interface(_distributorComponent.getDistributor()),
+ _delayedRequests(),
+ _sentMessages(),
+ _pendingClusterState(),
+ _history(),
+ _sender(sender),
+ _enqueuedRechecks(),
+ _outdatedNodesMap(),
+ _transitionTimer(_node_ctx.clock()),
+ _stale_reads_enabled(false),
+ _active_distribution_contexts(),
+ _explicit_transition_read_guard(),
+ _distribution_context_mutex(),
+ _use_legacy_mode(use_legacy_mode)
+{
+ for (auto& elem : _op_ctx.bucket_space_repo()) {
+ _active_distribution_contexts.emplace(
+ elem.first,
+ BucketSpaceDistributionContext::make_not_yet_initialized(_node_ctx.node_index()));
+ _explicit_transition_read_guard.emplace(elem.first, std::shared_ptr<BucketDatabase::ReadGuard>());
+ }
+}
+
+StripeBucketDBUpdater::~StripeBucketDBUpdater() = default;
+
+OperationRoutingSnapshot StripeBucketDBUpdater::read_snapshot_for_bucket(const document::Bucket& bucket) const {
+ const auto bucket_space = bucket.getBucketSpace();
+ std::lock_guard lock(_distribution_context_mutex);
+ auto active_state_iter = _active_distribution_contexts.find(bucket_space);
+ assert(active_state_iter != _active_distribution_contexts.cend());
+ auto& state = *active_state_iter->second;
+ if (!state.bucket_owned_in_active_state(bucket.getBucketId())) {
+ return OperationRoutingSnapshot::make_not_routable_in_state(active_state_iter->second);
+ }
+ const bool bucket_present_in_mutable_db = state.bucket_owned_in_pending_state(bucket.getBucketId());
+ if (!bucket_present_in_mutable_db && !stale_reads_enabled()) {
+ return OperationRoutingSnapshot::make_not_routable_in_state(active_state_iter->second);
+ }
+ const auto& space_repo = bucket_present_in_mutable_db
+ ? _op_ctx.bucket_space_repo()
+ : _op_ctx.read_only_bucket_space_repo();
+ auto existing_guard_iter = _explicit_transition_read_guard.find(bucket_space);
+ assert(existing_guard_iter != _explicit_transition_read_guard.cend());
+ auto db_guard = existing_guard_iter->second
+ ? existing_guard_iter-> second
+ : space_repo.get(bucket_space).getBucketDatabase().acquire_read_guard();
+ return OperationRoutingSnapshot::make_routable_with_guard(active_state_iter->second, std::move(db_guard), space_repo);
+}
+
+void
+StripeBucketDBUpdater::flush()
+{
+ for (auto & entry : _sentMessages) {
+ // Cannot sendDown MergeBucketReplies during flushing, since
+ // all lower links have been closed
+ if (entry.second._mergeReplyGuard) {
+ entry.second._mergeReplyGuard->resetReply();
+ }
+ }
+ _sentMessages.clear();
+}
+
+void
+StripeBucketDBUpdater::print(std::ostream& out, bool verbose, const std::string& indent) const
+{
+ (void) verbose; (void) indent;
+ out << "BucketDBUpdater";
+}
+
+bool
+StripeBucketDBUpdater::shouldDeferStateEnabling() const noexcept
+{
+ return stale_reads_enabled();
+}
+
+bool
+StripeBucketDBUpdater::hasPendingClusterState() const
+{
+ // Defer to the repo instead of checking our own internal pending cluster state,
+ // as we won't have one if the top level distributor handles this for all stripes.
+ // But if we're operating in "legacy" mode with this stripe bucket DB updater as
+ // the authoritative source, there should always be an internal pending cluster
+ // state if the repo is tagged as having one as well.
+ // Since we also set a pending cluster state bundle when triggered by a distribution
+ // config change, this check also covers that case.
+ return _op_ctx.bucket_space_repo().get(document::FixedBucketSpaces::default_space()).has_pending_cluster_state();
+}
+
+const lib::ClusterState*
+StripeBucketDBUpdater::pendingClusterStateOrNull(const document::BucketSpace& space) const {
+ auto& distr_space = _op_ctx.bucket_space_repo().get(space);
+ return (distr_space.has_pending_cluster_state()
+ ? &distr_space.get_pending_cluster_state()
+ : nullptr);
+}
+
+void
+StripeBucketDBUpdater::sendRequestBucketInfo(
+ uint16_t node,
+ const document::Bucket& bucket,
+ const std::shared_ptr<MergeReplyGuard>& mergeReplyGuard)
+{
+ if (!_op_ctx.storage_node_is_up(bucket.getBucketSpace(), node)) {
+ return;
+ }
+
+ std::vector<document::BucketId> buckets;
+ buckets.push_back(bucket.getBucketId());
+
+ auto msg = std::make_shared<api::RequestBucketInfoCommand>(bucket.getBucketSpace(), buckets);
+
+ LOG(debug,
+ "Sending request bucket info command %" PRIu64 " for "
+ "bucket %s to node %u",
+ msg->getMsgId(),
+ bucket.toString().c_str(),
+ node);
+
+ msg->setPriority(50);
+ msg->setAddress(_node_ctx.node_address(node));
+
+ _sentMessages[msg->getMsgId()] =
+ BucketRequest(node, _op_ctx.generate_unique_timestamp(),
+ bucket, mergeReplyGuard);
+ _sender.sendCommand(msg);
+}
+
+void
+StripeBucketDBUpdater::recheckBucketInfo(uint32_t nodeIdx,
+ const document::Bucket& bucket)
+{
+ sendRequestBucketInfo(nodeIdx, bucket, std::shared_ptr<MergeReplyGuard>());
+}
+
+namespace {
+
+class ReadOnlyDbMergingInserter : public BucketDatabase::MergingProcessor {
+ using NewEntries = std::vector<BucketDatabase::Entry>;
+ NewEntries::const_iterator _current;
+ const NewEntries::const_iterator _last;
+public:
+ explicit ReadOnlyDbMergingInserter(const NewEntries& new_entries)
+ : _current(new_entries.cbegin()),
+ _last(new_entries.cend())
+ {}
+
+ Result merge(BucketDatabase::Merger& m) override {
+ const uint64_t key_to_insert = m.bucket_key();
+ uint64_t key_at_cursor = 0;
+ while (_current != _last) {
+ key_at_cursor = _current->getBucketId().toKey();
+ if (key_at_cursor >= key_to_insert) {
+ break;
+ }
+ m.insert_before_current(_current->getBucketId(), *_current);
+ ++_current;
+ }
+ if ((_current != _last) && (key_at_cursor == key_to_insert)) {
+ // If we encounter a bucket that already exists, replace value wholesale.
+ // Don't try to cleverly merge replicas, as the values we currently hold
+ // in the read-only DB may be stale.
+ // Note that this case shouldn't really happen, since we only add previously
+ // owned buckets to the read-only DB, and subsequent adds to a non-empty DB
+ // can only happen for state preemptions. Since ownership is not regained
+ // before a state is stable, a bucket is only added once. But we handle it
+ // anyway in case this changes at some point in the future.
+ m.current_entry() = *_current;
+ return Result::Update;
+ }
+ return Result::KeepUnchanged;
+ }
+
+ void insert_remaining_at_end(BucketDatabase::TrailingInserter& inserter) override {
+ for (; _current != _last; ++_current) {
+ inserter.insert_at_end(_current->getBucketId(), *_current);
+ }
+ }
+};
+
+}
+
+void
+StripeBucketDBUpdater::removeSuperfluousBuckets(
+ const lib::ClusterStateBundle& newState,
+ bool is_distribution_config_change)
+{
+ assert(_use_legacy_mode);
+ const bool move_to_read_only_db = shouldDeferStateEnabling();
+ const char* up_states = _op_ctx.storage_node_up_states();
+ for (auto& elem : _op_ctx.bucket_space_repo()) {
+ const auto& newDistribution(elem.second->getDistribution());
+ const auto& oldClusterState(elem.second->getClusterState());
+ const auto& new_cluster_state = newState.getDerivedClusterState(elem.first);
+
+ // Running a full DB sweep is expensive, so if the cluster state transition does
+ // not actually indicate that buckets should possibly be removed, we elide it entirely.
+ if (!is_distribution_config_change
+ && db_pruning_may_be_elided(oldClusterState, *new_cluster_state, up_states))
+ {
+ LOG(debug, "[bucket space '%s']: eliding DB pruning for state transition '%s' -> '%s'",
+ document::FixedBucketSpaces::to_string(elem.first).data(),
+ oldClusterState.toString().c_str(), new_cluster_state->toString().c_str());
+ continue;
+ }
+
+ auto& bucketDb(elem.second->getBucketDatabase());
+ auto& readOnlyDb(_op_ctx.read_only_bucket_space_repo().get(elem.first).getBucketDatabase());
+
+ // Remove all buckets not belonging to this distributor, or
+ // being on storage nodes that are no longer up.
+ MergingNodeRemover proc(
+ oldClusterState,
+ *new_cluster_state,
+ _node_ctx.node_index(),
+ newDistribution,
+ up_states,
+ move_to_read_only_db);
+
+ bucketDb.merge(proc);
+ if (move_to_read_only_db) {
+ ReadOnlyDbMergingInserter read_only_merger(proc.getNonOwnedEntries());
+ readOnlyDb.merge(read_only_merger);
+ }
+ maybe_inject_simulated_db_pruning_delay();
+ }
+}
+
+PotentialDataLossReport
+StripeBucketDBUpdater::remove_superfluous_buckets(
+ document::BucketSpace bucket_space,
+ const lib::ClusterState& new_state,
+ bool is_distribution_change)
+{
+ assert(!_use_legacy_mode);
+ (void)is_distribution_change; // TODO remove if not needed
+ const bool move_to_read_only_db = shouldDeferStateEnabling();
+ const char* up_states = _op_ctx.storage_node_up_states();
+
+ auto& s = _op_ctx.bucket_space_repo().get(bucket_space);
+ const auto& new_distribution = s.getDistribution();
+ const auto& old_cluster_state = s.getClusterState();
+ // Elision of DB sweep is done at a higher level, so we don't have to do that here.
+ auto& bucket_db = s.getBucketDatabase();
+ auto& read_only_db = _op_ctx.read_only_bucket_space_repo().get(bucket_space).getBucketDatabase();
+
+ // Remove all buckets not belonging to this distributor, or
+ // being on storage nodes that are no longer up.
+ MergingNodeRemover proc(
+ old_cluster_state,
+ new_state,
+ _node_ctx.node_index(),
+ new_distribution,
+ up_states,
+ move_to_read_only_db);
+
+ bucket_db.merge(proc);
+ if (move_to_read_only_db) {
+ ReadOnlyDbMergingInserter read_only_merger(proc.getNonOwnedEntries());
+ read_only_db.merge(read_only_merger);
+ }
+ PotentialDataLossReport report;
+ report.buckets = proc.removed_buckets();
+ report.documents = proc.removed_documents();
+ return report;
+}
+
+void
+StripeBucketDBUpdater::merge_entries_into_db(document::BucketSpace bucket_space,
+ api::Timestamp gathered_at_timestamp,
+ const lib::Distribution& distribution,
+ const lib::ClusterState& new_state,
+ const char* storage_up_states,
+ const std::unordered_set<uint16_t>& outdated_nodes,
+ const std::vector<dbtransition::Entry>& entries)
+{
+ assert(!_use_legacy_mode);
+ auto& s = _op_ctx.bucket_space_repo().get(bucket_space);
+ auto& bucket_db = s.getBucketDatabase();
+
+ PendingBucketSpaceDbTransition::DbMerger merger(gathered_at_timestamp, distribution, new_state,
+ storage_up_states, outdated_nodes, entries);
+ bucket_db.merge(merger);
+}
+
+namespace {
+
+void maybe_sleep_for(std::chrono::milliseconds ms) {
+ if (ms.count() > 0) {
+ std::this_thread::sleep_for(ms);
+ }
+}
+
+}
+
+void
+StripeBucketDBUpdater::maybe_inject_simulated_db_pruning_delay() {
+ maybe_sleep_for(_op_ctx.distributor_config().simulated_db_pruning_latency());
+}
+
+void
+StripeBucketDBUpdater::maybe_inject_simulated_db_merging_delay() {
+ maybe_sleep_for(_op_ctx.distributor_config().simulated_db_merging_latency());
+}
+
+void
+StripeBucketDBUpdater::ensureTransitionTimerStarted()
+{
+ // Don't overwrite start time if we're already processing a state, as
+ // that will make transition times appear artificially low.
+ if (!hasPendingClusterState()) {
+ _transitionTimer = framework::MilliSecTimer(
+ _node_ctx.clock());
+ }
+}
+
+void
+StripeBucketDBUpdater::completeTransitionTimer()
+{
+ _distributor_interface.getMetrics()
+ .stateTransitionTime.addValue(_transitionTimer.getElapsedTimeAsDouble());
+}
+
+void
+StripeBucketDBUpdater::clearReadOnlyBucketRepoDatabases()
+{
+ for (auto& space : _op_ctx.read_only_bucket_space_repo()) {
+ space.second->getBucketDatabase().clear();
+ }
+}
+
+void
+StripeBucketDBUpdater::storageDistributionChanged()
+{
+ ensureTransitionTimerStarted();
+
+ removeSuperfluousBuckets(_op_ctx.cluster_state_bundle(), true);
+
+ auto clusterInfo = std::make_shared<const SimpleClusterInformation>(
+ _node_ctx.node_index(),
+ _op_ctx.cluster_state_bundle(),
+ _op_ctx.storage_node_up_states());
+ _pendingClusterState = PendingClusterState::createForDistributionChange(
+ _node_ctx.clock(),
+ std::move(clusterInfo),
+ _sender,
+ _op_ctx.bucket_space_repo(),
+ _op_ctx.generate_unique_timestamp());
+ _outdatedNodesMap = _pendingClusterState->getOutdatedNodesMap();
+ _op_ctx.bucket_space_repo().set_pending_cluster_state_bundle(_pendingClusterState->getNewClusterStateBundle());
+}
+
+void
+StripeBucketDBUpdater::replyToPreviousPendingClusterStateIfAny()
+{
+ if (_pendingClusterState.get() && _pendingClusterState->hasCommand()) {
+ _distributor_interface.getMessageSender().sendUp(
+ std::make_shared<api::SetSystemStateReply>(*_pendingClusterState->getCommand()));
+ }
+}
+
+void
+StripeBucketDBUpdater::replyToActivationWithActualVersion(
+ const api::ActivateClusterStateVersionCommand& cmd,
+ uint32_t actualVersion)
+{
+ auto reply = std::make_shared<api::ActivateClusterStateVersionReply>(cmd);
+ reply->setActualVersion(actualVersion);
+ _distributor_interface.getMessageSender().sendUp(reply); // TODO let API accept rvalues
+}
+
+void StripeBucketDBUpdater::update_read_snapshot_before_db_pruning() {
+ std::lock_guard lock(_distribution_context_mutex);
+ for (auto& elem : _op_ctx.bucket_space_repo()) {
+ // At this point, we're still operating with a distribution context _without_ a
+ // pending state, i.e. anyone using the context will expect to find buckets
+ // in the DB that correspond to how the database looked like prior to pruning
+ // buckets from the DB. To ensure this is not violated, take a snapshot of the
+ // _mutable_ DB and expose this. This snapshot only lives until we atomically
+ // flip to expose a distribution context that includes the new, pending state.
+ // At that point, the read-only DB is known to contain the buckets that have
+ // been pruned away, so we can release the mutable DB snapshot safely.
+ // TODO test for, and handle, state preemption case!
+ _explicit_transition_read_guard[elem.first] = elem.second->getBucketDatabase().acquire_read_guard();
+ }
+}
+
+
+void StripeBucketDBUpdater::update_read_snapshot_after_db_pruning(const lib::ClusterStateBundle& new_state) {
+ std::lock_guard lock(_distribution_context_mutex);
+ const auto old_default_state = _op_ctx.bucket_space_repo().get(
+ document::FixedBucketSpaces::default_space()).cluster_state_sp();
+ for (auto& elem : _op_ctx.bucket_space_repo()) {
+ auto new_distribution = elem.second->distribution_sp();
+ auto old_cluster_state = elem.second->cluster_state_sp();
+ auto new_cluster_state = new_state.getDerivedClusterState(elem.first);
+ _active_distribution_contexts.insert_or_assign(
+ elem.first,
+ BucketSpaceDistributionContext::make_state_transition(
+ std::move(old_cluster_state),
+ old_default_state,
+ std::move(new_cluster_state),
+ std::move(new_distribution),
+ _node_ctx.node_index()));
+ // We can now remove the explicit mutable DB snapshot, as the buckets that have been
+ // pruned away are visible in the read-only DB.
+ _explicit_transition_read_guard[elem.first] = std::shared_ptr<BucketDatabase::ReadGuard>();
+ }
+}
+
+void StripeBucketDBUpdater::update_read_snapshot_after_activation(const lib::ClusterStateBundle& activated_state) {
+ std::lock_guard lock(_distribution_context_mutex);
+ const auto& default_cluster_state = activated_state.getDerivedClusterState(document::FixedBucketSpaces::default_space());
+ for (auto& elem : _op_ctx.bucket_space_repo()) {
+ auto new_distribution = elem.second->distribution_sp();
+ auto new_cluster_state = activated_state.getDerivedClusterState(elem.first);
+ _active_distribution_contexts.insert_or_assign(
+ elem.first,
+ BucketSpaceDistributionContext::make_stable_state(
+ std::move(new_cluster_state),
+ default_cluster_state,
+ std::move(new_distribution),
+ _node_ctx.node_index()));
+ }
+}
+
+bool
+StripeBucketDBUpdater::onSetSystemState(
+ const std::shared_ptr<api::SetSystemStateCommand>& cmd)
+{
+ assert(_use_legacy_mode);
+ LOG(debug,
+ "Received new cluster state %s",
+ cmd->getSystemState().toString().c_str());
+
+ const lib::ClusterStateBundle oldState = _op_ctx.cluster_state_bundle();
+ const lib::ClusterStateBundle& state = cmd->getClusterStateBundle();
+
+ if (state == oldState) {
+ return false;
+ }
+ ensureTransitionTimerStarted();
+ // Separate timer since _transition_timer might span multiple pending states.
+ framework::MilliSecTimer process_timer(_node_ctx.clock());
+ update_read_snapshot_before_db_pruning();
+ const auto& bundle = cmd->getClusterStateBundle();
+ removeSuperfluousBuckets(bundle, false);
+ update_read_snapshot_after_db_pruning(bundle);
+ replyToPreviousPendingClusterStateIfAny();
+
+ auto clusterInfo = std::make_shared<const SimpleClusterInformation>(
+ _node_ctx.node_index(),
+ _op_ctx.cluster_state_bundle(),
+ _op_ctx.storage_node_up_states());
+ _pendingClusterState = PendingClusterState::createForClusterStateChange(
+ _node_ctx.clock(),
+ std::move(clusterInfo),
+ _sender,
+ _op_ctx.bucket_space_repo(),
+ cmd,
+ _outdatedNodesMap,
+ _op_ctx.generate_unique_timestamp());
+ _outdatedNodesMap = _pendingClusterState->getOutdatedNodesMap();
+
+ _distributor_interface.getMetrics().set_cluster_state_processing_time.addValue(
+ process_timer.getElapsedTimeAsDouble());
+
+ _op_ctx.bucket_space_repo().set_pending_cluster_state_bundle(_pendingClusterState->getNewClusterStateBundle());
+ if (isPendingClusterStateCompleted()) {
+ processCompletedPendingClusterState();
+ }
+ return true;
+}
+
+bool
+StripeBucketDBUpdater::onActivateClusterStateVersion(const std::shared_ptr<api::ActivateClusterStateVersionCommand>& cmd)
+{
+ assert(_use_legacy_mode);
+ if (hasPendingClusterState() && _pendingClusterState->isVersionedTransition()) {
+ const auto pending_version = _pendingClusterState->clusterStateVersion();
+ if (pending_version == cmd->version()) {
+ if (isPendingClusterStateCompleted()) {
+ assert(_pendingClusterState->isDeferred());
+ activatePendingClusterState();
+ } else {
+ LOG(error, "Received cluster state activation for pending version %u "
+ "without pending state being complete yet. This is not expected, "
+ "as no activation should be sent before all distributors have "
+ "reported that state processing is complete.", pending_version);
+ replyToActivationWithActualVersion(*cmd, 0); // Invalid version, will cause re-send (hopefully when completed).
+ return true;
+ }
+ } else {
+ replyToActivationWithActualVersion(*cmd, pending_version);
+ return true;
+ }
+ } else if (shouldDeferStateEnabling()) {
+ // Likely just a resend, but log warn for now to get a feel of how common it is.
+ LOG(warning, "Received cluster state activation command for version %u, which "
+ "has no corresponding pending state. Likely resent operation.", cmd->version());
+ } else {
+ LOG(debug, "Received cluster state activation command for version %u, but distributor "
+ "config does not have deferred activation enabled. Treating as no-op.", cmd->version());
+ }
+ // Fall through to next link in call chain that cares about this message.
+ return false;
+}
+
+StripeBucketDBUpdater::MergeReplyGuard::~MergeReplyGuard()
+{
+ if (_reply) {
+ _distributor_interface.handleCompletedMerge(_reply);
+ }
+}
+
+bool
+StripeBucketDBUpdater::onMergeBucketReply(
+ const std::shared_ptr<api::MergeBucketReply>& reply)
+{
+ auto replyGuard = std::make_shared<MergeReplyGuard>(_distributor_interface, reply);
+
+ // In case the merge was unsuccessful somehow, or some nodes weren't
+ // actually merged (source-only nodes?) we request the bucket info of the
+ // bucket again to make sure it's ok.
+ for (uint32_t i = 0; i < reply->getNodes().size(); i++) {
+ sendRequestBucketInfo(reply->getNodes()[i].index,
+ reply->getBucket(),
+ replyGuard);
+ }
+
+ return true;
+}
+
+void
+StripeBucketDBUpdater::enqueueRecheckUntilPendingStateEnabled(
+ uint16_t node,
+ const document::Bucket& bucket)
+{
+ LOG(spam,
+ "DB updater has a pending cluster state, enqueuing recheck "
+ "of bucket %s on node %u until state is done processing",
+ bucket.toString().c_str(),
+ node);
+ _enqueuedRechecks.insert(EnqueuedBucketRecheck(node, bucket));
+}
+
+void
+StripeBucketDBUpdater::sendAllQueuedBucketRechecks()
+{
+ LOG(spam,
+ "Sending %zu queued bucket rechecks previously received "
+ "via NotifyBucketChange commands",
+ _enqueuedRechecks.size());
+
+ for (const auto & entry :_enqueuedRechecks) {
+ sendRequestBucketInfo(entry.node, entry.bucket, std::shared_ptr<MergeReplyGuard>());
+ }
+ _enqueuedRechecks.clear();
+}
+
+bool
+StripeBucketDBUpdater::onNotifyBucketChange(
+ const std::shared_ptr<api::NotifyBucketChangeCommand>& cmd)
+{
+ // Immediately schedule reply to ensure it is sent.
+ _sender.sendReply(std::make_shared<api::NotifyBucketChangeReply>(*cmd));
+
+ if (!cmd->getBucketInfo().valid()) {
+ LOG(error,
+ "Received invalid bucket info for bucket %s from notify bucket "
+ "change! Not updating bucket.",
+ cmd->getBucketId().toString().c_str());
+ return true;
+ }
+ LOG(debug,
+ "Received notify bucket change from node %u for bucket %s with %s.",
+ cmd->getSourceIndex(),
+ cmd->getBucketId().toString().c_str(),
+ cmd->getBucketInfo().toString().c_str());
+
+ if (hasPendingClusterState()) {
+ enqueueRecheckUntilPendingStateEnabled(cmd->getSourceIndex(),
+ cmd->getBucket());
+ } else {
+ sendRequestBucketInfo(cmd->getSourceIndex(),
+ cmd->getBucket(),
+ std::shared_ptr<MergeReplyGuard>());
+ }
+
+ return true;
+}
+
+namespace {
+
+bool sort_pred(const BucketListMerger::BucketEntry& left,
+ const BucketListMerger::BucketEntry& right) {
+ return left.first < right.first;
+}
+
+}
+
+bool
+StripeBucketDBUpdater::onRequestBucketInfoReply(
+ const std::shared_ptr<api::RequestBucketInfoReply> & repl)
+{
+ if (pendingClusterStateAccepted(repl)) {
+ return true;
+ }
+ return processSingleBucketInfoReply(repl);
+}
+
+bool
+StripeBucketDBUpdater::pendingClusterStateAccepted(
+ const std::shared_ptr<api::RequestBucketInfoReply> & repl)
+{
+ if (_pendingClusterState.get()
+ && _pendingClusterState->onRequestBucketInfoReply(repl))
+ {
+ if (isPendingClusterStateCompleted()) {
+ processCompletedPendingClusterState();
+ }
+ return true;
+ }
+ LOG(spam,
+ "Reply %s was not accepted by pending cluster state",
+ repl->toString().c_str());
+ return false;
+}
+
+void
+StripeBucketDBUpdater::handleSingleBucketInfoFailure(
+ const std::shared_ptr<api::RequestBucketInfoReply>& repl,
+ const BucketRequest& req)
+{
+ LOG(debug, "Request bucket info failed towards node %d: error was %s",
+ req.targetNode, repl->getResult().toString().c_str());
+
+ if (req.bucket.getBucketId() != document::BucketId(0)) {
+ framework::MilliSecTime sendTime(_node_ctx.clock());
+ sendTime += framework::MilliSecTime(100);
+ _delayedRequests.emplace_back(sendTime, req);
+ }
+}
+
+void
+StripeBucketDBUpdater::resendDelayedMessages()
+{
+ if (_pendingClusterState) {
+ _pendingClusterState->resendDelayedMessages();
+ }
+ if (_delayedRequests.empty()) {
+ return; // Don't fetch time if not needed
+ }
+ framework::MilliSecTime currentTime(_node_ctx.clock());
+ while (!_delayedRequests.empty()
+ && currentTime >= _delayedRequests.front().first)
+ {
+ BucketRequest& req(_delayedRequests.front().second);
+ sendRequestBucketInfo(req.targetNode, req.bucket, std::shared_ptr<MergeReplyGuard>());
+ _delayedRequests.pop_front();
+ }
+}
+
+void
+StripeBucketDBUpdater::convertBucketInfoToBucketList(
+ const std::shared_ptr<api::RequestBucketInfoReply>& repl,
+ uint16_t targetNode, BucketListMerger::BucketList& newList)
+{
+ for (const auto & entry : repl->getBucketInfo()) {
+ LOG(debug, "Received bucket information from node %u for bucket %s: %s", targetNode,
+ entry._bucketId.toString().c_str(), entry._info.toString().c_str());
+
+ newList.emplace_back(entry._bucketId, entry._info);
+ }
+}
+
+void
+StripeBucketDBUpdater::mergeBucketInfoWithDatabase(
+ const std::shared_ptr<api::RequestBucketInfoReply>& repl,
+ const BucketRequest& req)
+{
+ BucketListMerger::BucketList existing;
+ BucketListMerger::BucketList newList;
+
+ findRelatedBucketsInDatabase(req.targetNode, req.bucket, existing);
+ convertBucketInfoToBucketList(repl, req.targetNode, newList);
+
+ std::sort(existing.begin(), existing.end(), sort_pred);
+ std::sort(newList.begin(), newList.end(), sort_pred);
+
+ BucketListMerger merger(newList, existing, req.timestamp);
+ updateDatabase(req.bucket.getBucketSpace(), req.targetNode, merger);
+}
+
+bool
+StripeBucketDBUpdater::processSingleBucketInfoReply(
+ const std::shared_ptr<api::RequestBucketInfoReply> & repl)
+{
+ auto iter = _sentMessages.find(repl->getMsgId());
+
+ // Has probably been deleted for some reason earlier.
+ if (iter == _sentMessages.end()) {
+ return true;
+ }
+
+ BucketRequest req = iter->second;
+ _sentMessages.erase(iter);
+
+ if (!_op_ctx.storage_node_is_up(req.bucket.getBucketSpace(), req.targetNode)) {
+ // Ignore replies from nodes that are down.
+ return true;
+ }
+ if (repl->getResult().getResult() != api::ReturnCode::OK) {
+ handleSingleBucketInfoFailure(repl, req);
+ return true;
+ }
+ mergeBucketInfoWithDatabase(repl, req);
+ return true;
+}
+
+void
+StripeBucketDBUpdater::addBucketInfoForNode(
+ const BucketDatabase::Entry& e,
+ uint16_t node,
+ BucketListMerger::BucketList& existing) const
+{
+ const BucketCopy* copy(e->getNode(node));
+ if (copy) {
+ existing.emplace_back(e.getBucketId(), copy->getBucketInfo());
+ }
+}
+
+void
+StripeBucketDBUpdater::findRelatedBucketsInDatabase(uint16_t node, const document::Bucket& bucket,
+ BucketListMerger::BucketList& existing)
+{
+ auto &distributorBucketSpace(_op_ctx.bucket_space_repo().get(bucket.getBucketSpace()));
+ std::vector<BucketDatabase::Entry> entries;
+ distributorBucketSpace.getBucketDatabase().getAll(bucket.getBucketId(), entries);
+
+ for (const BucketDatabase::Entry & entry : entries) {
+ addBucketInfoForNode(entry, node, existing);
+ }
+}
+
+void
+StripeBucketDBUpdater::updateDatabase(document::BucketSpace bucketSpace, uint16_t node, BucketListMerger& merger)
+{
+ for (const document::BucketId & bucketId : merger.getRemovedEntries()) {
+ document::Bucket bucket(bucketSpace, bucketId);
+ _op_ctx.remove_node_from_bucket_database(bucket, node);
+ }
+
+ for (const BucketListMerger::BucketEntry& entry : merger.getAddedEntries()) {
+ document::Bucket bucket(bucketSpace, entry.first);
+ _op_ctx.update_bucket_database(
+ bucket,
+ BucketCopy(merger.getTimestamp(), node, entry.second),
+ DatabaseUpdate::CREATE_IF_NONEXISTING);
+ }
+}
+
+bool
+StripeBucketDBUpdater::isPendingClusterStateCompleted() const
+{
+ return _pendingClusterState.get() && _pendingClusterState->done();
+}
+
+void
+StripeBucketDBUpdater::processCompletedPendingClusterState()
+{
+ if (_pendingClusterState->isDeferred()) {
+ LOG(debug, "Deferring completion of pending cluster state version %u until explicitly activated",
+ _pendingClusterState->clusterStateVersion());
+ assert(_pendingClusterState->hasCommand()); // Deferred transitions should only ever be created by state commands.
+ // Sending down SetSystemState command will reach the state manager and a reply
+ // will be auto-sent back to the cluster controller in charge. Once this happens,
+ // it will send an explicit activation command once all distributors have reported
+ // that their pending cluster states have completed.
+ // A booting distributor will treat itself as "system Up" before the state has actually
+ // taken effect via activation. External operation handler will keep operations from
+ // actually being scheduled until state has been activated. The external operation handler
+ // needs to be explicitly aware of the case where no state has yet to be activated.
+ _distributor_interface.getMessageSender().sendDown(
+ _pendingClusterState->getCommand());
+ _pendingClusterState->clearCommand();
+ return;
+ }
+ // Distribution config change or non-deferred cluster state. Immediately activate
+ // the pending state without being told to do so explicitly.
+ activatePendingClusterState();
+}
+
+void
+StripeBucketDBUpdater::activatePendingClusterState()
+{
+ framework::MilliSecTimer process_timer(_node_ctx.clock());
+
+ _pendingClusterState->mergeIntoBucketDatabases();
+ maybe_inject_simulated_db_merging_delay();
+
+ if (_pendingClusterState->isVersionedTransition()) {
+ LOG(debug, "Activating pending cluster state version %u", _pendingClusterState->clusterStateVersion());
+ enableCurrentClusterStateBundleInDistributor();
+ if (_pendingClusterState->hasCommand()) {
+ _distributor_interface.getMessageSender().sendDown(
+ _pendingClusterState->getCommand());
+ }
+ addCurrentStateToClusterStateHistory();
+ } else {
+ LOG(debug, "Activating pending distribution config");
+ // TODO distribution changes cannot currently be deferred as they are not
+ // initiated by the cluster controller!
+ _distributor_interface.notifyDistributionChangeEnabled();
+ }
+
+ update_read_snapshot_after_activation(_pendingClusterState->getNewClusterStateBundle());
+ _pendingClusterState.reset();
+ _outdatedNodesMap.clear();
+ _op_ctx.bucket_space_repo().clear_pending_cluster_state_bundle(); // TODO also read only bucket space..?
+ sendAllQueuedBucketRechecks();
+ completeTransitionTimer();
+ clearReadOnlyBucketRepoDatabases();
+
+ _distributor_interface.getMetrics().activate_cluster_state_processing_time.addValue(
+ process_timer.getElapsedTimeAsDouble());
+}
+
+void
+StripeBucketDBUpdater::enableCurrentClusterStateBundleInDistributor()
+{
+ const lib::ClusterStateBundle& state(
+ _pendingClusterState->getNewClusterStateBundle());
+
+ LOG(debug,
+ "BucketDBUpdater finished processing state %s",
+ state.getBaselineClusterState()->toString().c_str());
+
+ _distributor_interface.enableClusterStateBundle(state);
+}
+
+void StripeBucketDBUpdater::simulate_cluster_state_bundle_activation(const lib::ClusterStateBundle& activated_state) {
+ update_read_snapshot_after_activation(activated_state);
+ _distributor_interface.enableClusterStateBundle(activated_state);
+}
+
+void
+StripeBucketDBUpdater::addCurrentStateToClusterStateHistory()
+{
+ _history.push_back(_pendingClusterState->getSummary());
+
+ if (_history.size() > 50) {
+ _history.pop_front();
+ }
+}
+
+vespalib::string
+StripeBucketDBUpdater::getReportContentType(const framework::HttpUrlPath&) const
+{
+ return "text/xml";
+}
+
+namespace {
+
+const vespalib::string ALL = "all";
+const vespalib::string BUCKETDB = "bucketdb";
+const vespalib::string BUCKETDB_UPDATER = "Bucket Database Updater";
+
+}
+
+void
+StripeBucketDBUpdater::BucketRequest::print_xml_tag(vespalib::xml::XmlOutputStream &xos, const vespalib::xml::XmlAttribute &timestampAttribute) const
+{
+ using namespace vespalib::xml;
+ xos << XmlTag("storagenode")
+ << XmlAttribute("index", targetNode);
+ xos << XmlAttribute("bucketspace", bucket.getBucketSpace().getId(), XmlAttribute::HEX);
+ if (bucket.getBucketId().getRawId() == 0) {
+ xos << XmlAttribute("bucket", ALL);
+ } else {
+ xos << XmlAttribute("bucket", bucket.getBucketId().getId(), XmlAttribute::HEX);
+ }
+ xos << timestampAttribute << XmlEndTag();
+}
+
+bool
+StripeBucketDBUpdater::reportStatus(std::ostream& out,
+ const framework::HttpUrlPath& path) const
+{
+ using namespace vespalib::xml;
+ XmlOutputStream xos(out);
+ // FIXME(vekterli): have to do this manually since we cannot inherit
+ // directly from XmlStatusReporter due to data races when BucketDBUpdater
+ // gets status requests directly.
+ xos << XmlTag("status")
+ << XmlAttribute("id", BUCKETDB)
+ << XmlAttribute("name", BUCKETDB_UPDATER);
+ reportXmlStatus(xos, path);
+ xos << XmlEndTag();
+ return true;
+}
+
+vespalib::string
+StripeBucketDBUpdater::reportXmlStatus(vespalib::xml::XmlOutputStream& xos,
+ const framework::HttpUrlPath&) const
+{
+ using namespace vespalib::xml;
+ xos << XmlTag("bucketdb")
+ << XmlTag("systemstate_active")
+ << XmlContent(_op_ctx.cluster_state_bundle().getBaselineClusterState()->toString())
+ << XmlEndTag();
+ if (_pendingClusterState) {
+ xos << *_pendingClusterState;
+ }
+ xos << XmlTag("systemstate_history");
+ for (auto i(_history.rbegin()), e(_history.rend()); i != e; ++i) {
+ xos << XmlTag("change")
+ << XmlAttribute("from", i->_prevClusterState)
+ << XmlAttribute("to", i->_newClusterState)
+ << XmlAttribute("processingtime", i->_processingTime)
+ << XmlEndTag();
+ }
+ xos << XmlEndTag()
+ << XmlTag("single_bucket_requests");
+ for (const auto & entry : _sentMessages)
+ {
+ entry.second.print_xml_tag(xos, XmlAttribute("sendtimestamp", entry.second.timestamp));
+ }
+ xos << XmlEndTag()
+ << XmlTag("delayed_single_bucket_requests");
+ for (const auto & entry : _delayedRequests)
+ {
+ entry.second.print_xml_tag(xos, XmlAttribute("resendtimestamp", entry.first.getTime()));
+ }
+ xos << XmlEndTag() << XmlEndTag();
+ return "";
+}
+
+StripeBucketDBUpdater::MergingNodeRemover::MergingNodeRemover(
+ const lib::ClusterState& oldState,
+ const lib::ClusterState& s,
+ uint16_t localIndex,
+ const lib::Distribution& distribution,
+ const char* upStates,
+ bool track_non_owned_entries)
+ : _oldState(oldState),
+ _state(s),
+ _available_nodes(),
+ _nonOwnedBuckets(),
+ _removed_buckets(0),
+ _removed_documents(0),
+ _localIndex(localIndex),
+ _distribution(distribution),
+ _upStates(upStates),
+ _track_non_owned_entries(track_non_owned_entries),
+ _cachedDecisionSuperbucket(UINT64_MAX),
+ _cachedOwned(false)
+{
+ // TODO intersection of cluster state and distribution config
+ const uint16_t storage_count = s.getNodeCount(lib::NodeType::STORAGE);
+ _available_nodes.resize(storage_count);
+ for (uint16_t i = 0; i < storage_count; ++i) {
+ if (s.getNodeState(lib::Node(lib::NodeType::STORAGE, i)).getState().oneOf(_upStates)) {
+ _available_nodes[i] = true;
+ }
+ }
+}
+
+void
+StripeBucketDBUpdater::MergingNodeRemover::logRemove(const document::BucketId& bucketId, const char* msg) const
+{
+ LOG(spam, "Removing bucket %s: %s", bucketId.toString().c_str(), msg);
+}
+
+namespace {
+
+uint64_t superbucket_from_id(const document::BucketId& id, uint16_t distribution_bits) noexcept {
+ // The n LSBs of the bucket ID contain the superbucket number. Mask off the rest.
+ return id.getRawId() & ~(UINT64_MAX << distribution_bits);
+}
+
+}
+
+bool
+StripeBucketDBUpdater::MergingNodeRemover::distributorOwnsBucket(
+ const document::BucketId& bucketId) const
+{
+ // TODO "no distributors available" case is the same for _all_ buckets; cache once in constructor.
+ // TODO "too few bits used" case can be cheaply checked without needing exception
+ try {
+ const auto bits = _state.getDistributionBitCount();
+ const auto this_superbucket = superbucket_from_id(bucketId, bits);
+ if (_cachedDecisionSuperbucket == this_superbucket) {
+ if (!_cachedOwned) {
+ logRemove(bucketId, "bucket now owned by another distributor (cached)");
+ }
+ return _cachedOwned;
+ }
+
+ uint16_t distributor = _distribution.getIdealDistributorNode(_state, bucketId, "uim");
+ _cachedDecisionSuperbucket = this_superbucket;
+ _cachedOwned = (distributor == _localIndex);
+ if (!_cachedOwned) {
+ logRemove(bucketId, "bucket now owned by another distributor");
+ return false;
+ }
+ return true;
+ } catch (lib::TooFewBucketBitsInUseException& exc) {
+ logRemove(bucketId, "using too few distribution bits now");
+ } catch (lib::NoDistributorsAvailableException& exc) {
+ logRemove(bucketId, "no distributors are available");
+ }
+ return false;
+}
+
+void
+StripeBucketDBUpdater::MergingNodeRemover::setCopiesInEntry(
+ BucketDatabase::Entry& e,
+ const std::vector<BucketCopy>& copies) const
+{
+ e->clear();
+
+ std::vector<uint16_t> order =
+ _distribution.getIdealStorageNodes(_state, e.getBucketId(), _upStates);
+
+ e->addNodes(copies, order);
+
+ LOG(spam, "Changed %s", e->toString().c_str());
+}
+
+bool
+StripeBucketDBUpdater::MergingNodeRemover::has_unavailable_nodes(const storage::BucketDatabase::Entry& e) const
+{
+ const uint16_t n_nodes = e->getNodeCount();
+ for (uint16_t i = 0; i < n_nodes; i++) {
+ const uint16_t node_idx = e->getNodeRef(i).getNode();
+ if (!storage_node_is_available(node_idx)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+BucketDatabase::MergingProcessor::Result
+StripeBucketDBUpdater::MergingNodeRemover::merge(storage::BucketDatabase::Merger& merger)
+{
+ document::BucketId bucketId(merger.bucket_id());
+ LOG(spam, "Check for remove: bucket %s", bucketId.toString().c_str());
+ if (!distributorOwnsBucket(bucketId)) {
+ // TODO remove in favor of DB snapshotting
+ if (_track_non_owned_entries) {
+ _nonOwnedBuckets.emplace_back(merger.current_entry());
+ }
+ return Result::Skip;
+ }
+ auto& e = merger.current_entry();
+
+ if (e->getNodeCount() == 0) { // TODO when should this edge ever trigger?
+ return Result::Skip;
+ }
+
+ if (!has_unavailable_nodes(e)) {
+ return Result::KeepUnchanged;
+ }
+
+ std::vector<BucketCopy> remainingCopies;
+ for (uint16_t i = 0; i < e->getNodeCount(); i++) {
+ const uint16_t node_idx = e->getNodeRef(i).getNode();
+ if (storage_node_is_available(node_idx)) {
+ remainingCopies.push_back(e->getNodeRef(i));
+ }
+ }
+
+ if (remainingCopies.empty()) {
+ ++_removed_buckets;
+ _removed_documents += e->getHighestDocumentCount();
+ return Result::Skip;
+ } else {
+ setCopiesInEntry(e, remainingCopies);
+ return Result::Update;
+ }
+}
+
+bool
+StripeBucketDBUpdater::MergingNodeRemover::storage_node_is_available(uint16_t index) const noexcept
+{
+ return ((index < _available_nodes.size()) && _available_nodes[index]);
+}
+
+StripeBucketDBUpdater::MergingNodeRemover::~MergingNodeRemover()
+{
+ if (_removed_buckets != 0) {
+ LOGBM(info, "After cluster state change %s, %zu buckets no longer "
+ "have available replicas. %zu documents in these buckets will "
+ "be unavailable until nodes come back up",
+ _oldState.getTextualDifference(_state).c_str(),
+ _removed_buckets, _removed_documents);
+ }
+}
+
+} // distributor
diff --git a/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.h b/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.h
new file mode 100644
index 00000000000..decaa964a59
--- /dev/null
+++ b/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.h
@@ -0,0 +1,286 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#pragma once
+
+#include "bucketlistmerger.h"
+#include "distributor_stripe_component.h"
+#include "distributormessagesender.h"
+#include "messageguard.h"
+#include "operation_routing_snapshot.h"
+#include "outdated_nodes_map.h"
+#include "pendingclusterstate.h"
+#include "potential_data_loss_report.h"
+#include <vespa/document/bucket/bucket.h>
+#include <vespa/storage/common/storagelink.h>
+#include <vespa/storageapi/message/bucket.h>
+#include <vespa/storageapi/messageapi/messagehandler.h>
+#include <vespa/storageframework/generic/clock/timer.h>
+#include <vespa/storageframework/generic/status/statusreporter.h>
+#include <vespa/vdslib/state/clusterstate.h>
+#include <atomic>
+#include <list>
+#include <mutex>
+
+namespace vespalib::xml {
+class XmlOutputStream;
+class XmlAttribute;
+}
+
+namespace storage::distributor {
+
+class DistributorStripeInterface;
+class BucketSpaceDistributionContext;
+
+class StripeBucketDBUpdater final
+ : public framework::StatusReporter,
+ public api::MessageHandler
+{
+public:
+ using OutdatedNodesMap = dbtransition::OutdatedNodesMap;
+ StripeBucketDBUpdater(DistributorStripeInterface& owner,
+ DistributorBucketSpaceRepo& bucketSpaceRepo,
+ DistributorBucketSpaceRepo& readOnlyBucketSpaceRepo,
+ DistributorMessageSender& sender,
+ DistributorComponentRegister& compReg,
+ bool use_legacy_mode);
+ ~StripeBucketDBUpdater() override;
+
+ void flush();
+ const lib::ClusterState* pendingClusterStateOrNull(const document::BucketSpace&) const;
+ void recheckBucketInfo(uint32_t nodeIdx, const document::Bucket& bucket);
+
+ bool onSetSystemState(const std::shared_ptr<api::SetSystemStateCommand>& cmd) override;
+ bool onActivateClusterStateVersion(const std::shared_ptr<api::ActivateClusterStateVersionCommand>& cmd) override;
+ bool onRequestBucketInfoReply(const std::shared_ptr<api::RequestBucketInfoReply> & repl) override;
+ bool onMergeBucketReply(const std::shared_ptr<api::MergeBucketReply>& reply) override;
+ bool onNotifyBucketChange(const std::shared_ptr<api::NotifyBucketChangeCommand>&) override;
+ void resendDelayedMessages();
+ void storageDistributionChanged();
+
+ vespalib::string reportXmlStatus(vespalib::xml::XmlOutputStream&, const framework::HttpUrlPath&) const;
+ vespalib::string getReportContentType(const framework::HttpUrlPath&) const override;
+ bool reportStatus(std::ostream&, const framework::HttpUrlPath&) const override;
+ void print(std::ostream& out, bool verbose, const std::string& indent) const;
+ const DistributorNodeContext& node_context() const { return _node_ctx; }
+ DistributorOperationContext& operation_context() { return _op_ctx; }
+
+ /**
+ * Returns whether the current PendingClusterState indicates that there has
+ * been a transfer of bucket ownership amongst the distributors in the
+ * cluster. This method only makes sense to call when _pending_cluster_state
+ * is active, such as from within a enableClusterState() call.
+ */
+ bool bucketOwnershipHasChanged() const {
+ return ((_pendingClusterState.get() != nullptr)
+ && _pendingClusterState->hasBucketOwnershipTransfer());
+ }
+ void set_stale_reads_enabled(bool enabled) noexcept {
+ _stale_reads_enabled.store(enabled, std::memory_order_relaxed);
+ }
+ bool stale_reads_enabled() const noexcept {
+ return _stale_reads_enabled.load(std::memory_order_relaxed);
+ }
+
+ OperationRoutingSnapshot read_snapshot_for_bucket(const document::Bucket&) const;
+private:
+ class MergeReplyGuard {
+ public:
+ MergeReplyGuard(DistributorStripeInterface& distributor_interface, const std::shared_ptr<api::MergeBucketReply>& reply) noexcept
+ : _distributor_interface(distributor_interface), _reply(reply) {}
+
+ ~MergeReplyGuard();
+
+ // Used when we're flushing and simply want to drop the reply rather
+ // than send it down
+ void resetReply() { _reply.reset(); }
+ private:
+ DistributorStripeInterface& _distributor_interface;
+ std::shared_ptr<api::MergeBucketReply> _reply;
+ };
+
+ struct BucketRequest {
+ BucketRequest()
+ : targetNode(0), bucket(), timestamp(0) {};
+
+ BucketRequest(uint16_t t, uint64_t currentTime, const document::Bucket& b,
+ const std::shared_ptr<MergeReplyGuard>& guard)
+ : targetNode(t),
+ bucket(b),
+ timestamp(currentTime),
+ _mergeReplyGuard(guard) {};
+
+ void print_xml_tag(vespalib::xml::XmlOutputStream &xos, const vespalib::xml::XmlAttribute &timestampAttribute) const;
+ uint16_t targetNode;
+ document::Bucket bucket;
+ uint64_t timestamp;
+
+ std::shared_ptr<MergeReplyGuard> _mergeReplyGuard;
+ };
+
+ struct EnqueuedBucketRecheck {
+ uint16_t node;
+ document::Bucket bucket;
+
+ EnqueuedBucketRecheck() : node(0), bucket() {}
+
+ EnqueuedBucketRecheck(uint16_t _node, const document::Bucket& _bucket)
+ : node(_node),
+ bucket(_bucket)
+ {}
+
+ bool operator<(const EnqueuedBucketRecheck& o) const {
+ if (node != o.node) {
+ return node < o.node;
+ }
+ return bucket < o.bucket;
+ }
+ bool operator==(const EnqueuedBucketRecheck& o) const {
+ return node == o.node && bucket == o.bucket;
+ }
+ };
+
+ friend class DistributorTestUtil;
+ // TODO refactor and rewire to avoid needing this direct meddling
+ friend class LegacySingleStripeAccessGuard;
+ // Only to be used by tests that want to ensure both the BucketDBUpdater _and_ the Distributor
+ // components agree on the currently active cluster state bundle.
+ // Transitively invokes Distributor::enableClusterStateBundle
+ void simulate_cluster_state_bundle_activation(const lib::ClusterStateBundle& activated_state);
+
+ bool shouldDeferStateEnabling() const noexcept;
+ bool hasPendingClusterState() const;
+ bool pendingClusterStateAccepted(const std::shared_ptr<api::RequestBucketInfoReply>& repl);
+ bool processSingleBucketInfoReply(const std::shared_ptr<api::RequestBucketInfoReply>& repl);
+ void handleSingleBucketInfoFailure(const std::shared_ptr<api::RequestBucketInfoReply>& repl,
+ const BucketRequest& req);
+ bool isPendingClusterStateCompleted() const;
+ void processCompletedPendingClusterState();
+ void activatePendingClusterState();
+ void mergeBucketInfoWithDatabase(const std::shared_ptr<api::RequestBucketInfoReply>& repl,
+ const BucketRequest& req);
+ void convertBucketInfoToBucketList(const std::shared_ptr<api::RequestBucketInfoReply>& repl,
+ uint16_t targetNode, BucketListMerger::BucketList& newList);
+ void sendRequestBucketInfo(uint16_t node, const document::Bucket& bucket,
+ const std::shared_ptr<MergeReplyGuard>& mergeReply);
+ void addBucketInfoForNode(const BucketDatabase::Entry& e, uint16_t node,
+ BucketListMerger::BucketList& existing) const;
+ void ensureTransitionTimerStarted();
+ void completeTransitionTimer();
+ void clearReadOnlyBucketRepoDatabases();
+ /**
+ * Adds all buckets contained in the bucket database
+ * that are either contained
+ * in bucketId, or that bucketId is contained in, that have copies
+ * on the given node.
+ */
+ void findRelatedBucketsInDatabase(uint16_t node, const document::Bucket& bucket,
+ BucketListMerger::BucketList& existing);
+
+ /**
+ Updates the bucket database from the information generated by the given
+ bucket list merger.
+ */
+ void updateDatabase(document::BucketSpace bucketSpace, uint16_t node, BucketListMerger& merger);
+
+ void updateState(const lib::ClusterState& oldState, const lib::ClusterState& newState);
+
+ void update_read_snapshot_before_db_pruning();
+ void removeSuperfluousBuckets(const lib::ClusterStateBundle& newState,
+ bool is_distribution_config_change);
+ void update_read_snapshot_after_db_pruning(const lib::ClusterStateBundle& new_state);
+ void update_read_snapshot_after_activation(const lib::ClusterStateBundle& activated_state);
+
+ // TODO STRIPE only called when stripe guard is held
+ PotentialDataLossReport remove_superfluous_buckets(document::BucketSpace bucket_space,
+ const lib::ClusterState& new_state,
+ bool is_distribution_change);
+ void merge_entries_into_db(document::BucketSpace bucket_space,
+ api::Timestamp gathered_at_timestamp,
+ const lib::Distribution& distribution,
+ const lib::ClusterState& new_state,
+ const char* storage_up_states,
+ const std::unordered_set<uint16_t>& outdated_nodes,
+ const std::vector<dbtransition::Entry>& entries);
+
+ void replyToPreviousPendingClusterStateIfAny();
+ void replyToActivationWithActualVersion(
+ const api::ActivateClusterStateVersionCommand& cmd,
+ uint32_t actualVersion);
+
+ void enableCurrentClusterStateBundleInDistributor();
+ void addCurrentStateToClusterStateHistory();
+ void enqueueRecheckUntilPendingStateEnabled(uint16_t node, const document::Bucket&);
+ void sendAllQueuedBucketRechecks();
+
+ void maybe_inject_simulated_db_pruning_delay();
+ void maybe_inject_simulated_db_merging_delay();
+
+ /**
+ Removes all copies of buckets that are on nodes that are down.
+ */
+ class MergingNodeRemover : public BucketDatabase::MergingProcessor {
+ public:
+ MergingNodeRemover(const lib::ClusterState& oldState,
+ const lib::ClusterState& s,
+ uint16_t localIndex,
+ const lib::Distribution& distribution,
+ const char* upStates,
+ bool track_non_owned_entries);
+ ~MergingNodeRemover() override;
+
+ Result merge(BucketDatabase::Merger&) override;
+ void logRemove(const document::BucketId& bucketId, const char* msg) const;
+ bool distributorOwnsBucket(const document::BucketId&) const;
+
+ const std::vector<BucketDatabase::Entry>& getNonOwnedEntries() const noexcept {
+ return _nonOwnedBuckets;
+ }
+ size_t removed_buckets() const noexcept { return _removed_buckets; }
+ size_t removed_documents() const noexcept { return _removed_documents; }
+ private:
+ void setCopiesInEntry(BucketDatabase::Entry& e, const std::vector<BucketCopy>& copies) const;
+
+ bool has_unavailable_nodes(const BucketDatabase::Entry&) const;
+ bool storage_node_is_available(uint16_t index) const noexcept;
+
+ const lib::ClusterState _oldState;
+ const lib::ClusterState _state;
+ std::vector<bool> _available_nodes;
+ std::vector<BucketDatabase::Entry> _nonOwnedBuckets;
+ size_t _removed_buckets;
+ size_t _removed_documents;
+
+ uint16_t _localIndex;
+ const lib::Distribution& _distribution;
+ const char* _upStates;
+ bool _track_non_owned_entries;
+
+ mutable uint64_t _cachedDecisionSuperbucket;
+ mutable bool _cachedOwned;
+ };
+
+ DistributorStripeComponent _distributorComponent;
+ const DistributorNodeContext& _node_ctx;
+ DistributorOperationContext& _op_ctx;
+ DistributorStripeInterface& _distributor_interface;
+ std::deque<std::pair<framework::MilliSecTime, BucketRequest> > _delayedRequests;
+ std::map<uint64_t, BucketRequest> _sentMessages;
+ std::unique_ptr<PendingClusterState> _pendingClusterState;
+ std::list<PendingClusterState::Summary> _history;
+ DistributorMessageSender& _sender;
+ std::set<EnqueuedBucketRecheck> _enqueuedRechecks;
+ OutdatedNodesMap _outdatedNodesMap;
+ framework::MilliSecTimer _transitionTimer;
+ std::atomic<bool> _stale_reads_enabled;
+ using DistributionContexts = std::unordered_map<document::BucketSpace,
+ std::shared_ptr<BucketSpaceDistributionContext>,
+ document::BucketSpace::hash>;
+ DistributionContexts _active_distribution_contexts;
+ using DbGuards = std::unordered_map<document::BucketSpace,
+ std::shared_ptr<BucketDatabase::ReadGuard>,
+ document::BucketSpace::hash>;
+ DbGuards _explicit_transition_read_guard;
+ mutable std::mutex _distribution_context_mutex;
+ bool _use_legacy_mode;
+};
+
+}
diff --git a/storage/src/vespa/storage/storageserver/distributornode.cpp b/storage/src/vespa/storage/storageserver/distributornode.cpp
index fbbc7c366fd..8f4f0422f44 100644
--- a/storage/src/vespa/storage/storageserver/distributornode.cpp
+++ b/storage/src/vespa/storage/storageserver/distributornode.cpp
@@ -19,7 +19,7 @@ DistributorNode::DistributorNode(
const config::ConfigUri& configUri,
DistributorNodeContext& context,
ApplicationGenerationFetcher& generationFetcher,
- NeedActiveState activeState,
+ uint32_t num_distributor_stripes,
StorageLink::UP communicationManager,
std::unique_ptr<IStorageChainBuilder> storage_chain_builder)
: StorageNode(configUri, context, generationFetcher,
@@ -29,7 +29,7 @@ DistributorNode::DistributorNode(
_context(context),
_lastUniqueTimestampRequested(0),
_uniqueTimestampCounter(0),
- _manageActiveBucketCopies(activeState == NEED_ACTIVE_BUCKET_STATES_SET),
+ _num_distributor_stripes(num_distributor_stripes),
_retrievedCommunicationManager(std::move(communicationManager))
{
if (storage_chain_builder) {
@@ -101,12 +101,13 @@ DistributorNode::createChain(IStorageChainBuilder &builder)
// extends to the end of the process.
builder.add(std::make_unique<storage::distributor::Distributor>
(dcr, *_node_identity, *_threadPool, getDoneInitializeHandler(),
- _manageActiveBucketCopies,
+ _num_distributor_stripes,
stateManager->getHostInfo()));
builder.add(std::move(stateManager));
}
+// FIXME STRIPE not thread safe!!
api::Timestamp
DistributorNode::getUniqueTimestamp()
{
diff --git a/storage/src/vespa/storage/storageserver/distributornode.h b/storage/src/vespa/storage/storageserver/distributornode.h
index 17636efc7d3..267d4400ac7 100644
--- a/storage/src/vespa/storage/storageserver/distributornode.h
+++ b/storage/src/vespa/storage/storageserver/distributornode.h
@@ -25,21 +25,16 @@ class DistributorNode
DistributorNodeContext& _context;
uint64_t _lastUniqueTimestampRequested;
uint32_t _uniqueTimestampCounter;
- bool _manageActiveBucketCopies;
+ uint32_t _num_distributor_stripes;
std::unique_ptr<StorageLink> _retrievedCommunicationManager;
public:
typedef std::unique_ptr<DistributorNode> UP;
- enum NeedActiveState
- {
- NEED_ACTIVE_BUCKET_STATES_SET,
- NO_NEED_FOR_ACTIVE_STATES
- };
DistributorNode(const config::ConfigUri & configUri,
DistributorNodeContext&,
ApplicationGenerationFetcher& generationFetcher,
- NeedActiveState,
+ uint32_t num_distributor_stripes,
std::unique_ptr<StorageLink> communicationManager,
std::unique_ptr<IStorageChainBuilder> storage_chain_builder);
~DistributorNode() override;
diff --git a/storageapi/src/vespa/storageapi/message/bucket.cpp b/storageapi/src/vespa/storageapi/message/bucket.cpp
index fdc19d63134..2e2ca82079d 100644
--- a/storageapi/src/vespa/storageapi/message/bucket.cpp
+++ b/storageapi/src/vespa/storageapi/message/bucket.cpp
@@ -509,7 +509,8 @@ std::ostream& operator<<(std::ostream& out, const RequestBucketInfoReply::Entry&
RequestBucketInfoReply::RequestBucketInfoReply(const RequestBucketInfoCommand& cmd)
: StorageReply(cmd),
- _buckets()
+ _buckets(),
+ _full_bucket_fetch(cmd.hasSystemState())
{ }
RequestBucketInfoReply::~RequestBucketInfoReply() = default;
@@ -519,6 +520,9 @@ RequestBucketInfoReply::print(std::ostream& out, bool verbose,
const std::string& indent) const
{
out << "RequestBucketInfoReply(" << _buckets.size();
+ if (_full_bucket_fetch) {
+ out << ", full fetch";
+ }
if (verbose) {
out << "\n" << indent << " ";
std::copy(_buckets.begin(), _buckets.end(),
diff --git a/storageapi/src/vespa/storageapi/message/bucket.h b/storageapi/src/vespa/storageapi/message/bucket.h
index cde440b91de..61766fb1f11 100644
--- a/storageapi/src/vespa/storageapi/message/bucket.h
+++ b/storageapi/src/vespa/storageapi/message/bucket.h
@@ -338,9 +338,8 @@ class RequestBucketInfoCommand : public StorageCommand {
vespalib::string _distributionHash;
public:
- explicit RequestBucketInfoCommand(
- document::BucketSpace bucketSpace,
- const std::vector<document::BucketId>& buckets);
+ RequestBucketInfoCommand(document::BucketSpace bucketSpace,
+ const std::vector<document::BucketId>& buckets);
RequestBucketInfoCommand(document::BucketSpace bucketSpace,
uint16_t distributor,
const lib::ClusterState& state,
@@ -388,6 +387,7 @@ public:
typedef vespalib::Array<Entry> EntryVector;
private:
EntryVector _buckets;
+ bool _full_bucket_fetch;
public:
@@ -395,6 +395,7 @@ public:
~RequestBucketInfoReply();
const EntryVector & getBucketInfo() const { return _buckets; }
EntryVector & getBucketInfo() { return _buckets; }
+ [[nodiscard]] bool full_bucket_fetch() const noexcept { return _full_bucket_fetch; }
void print(std::ostream& out, bool verbose, const std::string& indent) const override;
DECLARE_STORAGEREPLY(RequestBucketInfoReply, onRequestBucketInfoReply)
};
diff --git a/storageserver/src/vespa/storageserver/app/distributorprocess.cpp b/storageserver/src/vespa/storageserver/app/distributorprocess.cpp
index e448c7c68bc..ede7fd1c9c0 100644
--- a/storageserver/src/vespa/storageserver/app/distributorprocess.cpp
+++ b/storageserver/src/vespa/storageserver/app/distributorprocess.cpp
@@ -12,7 +12,11 @@ namespace storage {
DistributorProcess::DistributorProcess(const config::ConfigUri & configUri)
: Process(configUri),
- _activeFlag(DistributorNode::NO_NEED_FOR_ACTIVE_STATES),
+ _context(),
+ _num_distributor_stripes(0), // TODO STRIPE: change default when legacy single stripe mode is removed
+ _node(),
+ _distributorConfigHandler(),
+ _visitDispatcherConfigHandler(),
_storage_chain_builder()
{
}
@@ -31,15 +35,12 @@ DistributorProcess::shutdown()
void
DistributorProcess::setupConfig(milliseconds subscribeTimeout)
{
- using vespa::config::content::core::StorServerConfig;
using vespa::config::content::core::StorDistributormanagerConfig;
using vespa::config::content::core::StorVisitordispatcherConfig;
- auto stor_config = config::ConfigGetter<StorServerConfig>::getConfig(
+ auto distr_cfg = config::ConfigGetter<StorDistributormanagerConfig>::getConfig(
_configUri.getConfigId(), _configUri.getContext(), subscribeTimeout);
- if (stor_config->persistenceProvider.type != StorServerConfig::PersistenceProvider::Type::STORAGE) {
- _activeFlag = DistributorNode::NEED_ACTIVE_BUCKET_STATES_SET;
- }
+ _num_distributor_stripes = distr_cfg->numDistributorStripes;
_distributorConfigHandler = _configSubscriber.subscribe<StorDistributormanagerConfig>(_configUri.getConfigId(), subscribeTimeout);
_visitDispatcherConfigHandler = _configSubscriber.subscribe<StorVisitordispatcherConfig>(_configUri.getConfigId(), subscribeTimeout);
Process::setupConfig(subscribeTimeout);
@@ -75,7 +76,7 @@ DistributorProcess::configUpdated()
void
DistributorProcess::createNode()
{
- _node = std::make_unique<DistributorNode>(_configUri, _context, *this, _activeFlag, StorageLink::UP(), std::move(_storage_chain_builder));
+ _node = std::make_unique<DistributorNode>(_configUri, _context, *this, _num_distributor_stripes, StorageLink::UP(), std::move(_storage_chain_builder));
_node->handleConfigChange(*_distributorConfigHandler->getConfig());
_node->handleConfigChange(*_visitDispatcherConfigHandler->getConfig());
}
diff --git a/storageserver/src/vespa/storageserver/app/distributorprocess.h b/storageserver/src/vespa/storageserver/app/distributorprocess.h
index 21e7e9b534a..f07082c0d21 100644
--- a/storageserver/src/vespa/storageserver/app/distributorprocess.h
+++ b/storageserver/src/vespa/storageserver/app/distributorprocess.h
@@ -16,7 +16,7 @@ class IStorageChainBuilder;
class DistributorProcess final : public Process {
DistributorNodeContext _context;
- DistributorNode::NeedActiveState _activeFlag;
+ uint32_t _num_distributor_stripes;
DistributorNode::UP _node;
config::ConfigHandle<vespa::config::content::core::StorDistributormanagerConfig>::UP
_distributorConfigHandler;
diff --git a/vespa-athenz/pom.xml b/vespa-athenz/pom.xml
index 7d2ad924ae3..653eb58d76d 100644
--- a/vespa-athenz/pom.xml
+++ b/vespa-athenz/pom.xml
@@ -65,6 +65,14 @@
</exclusion>
<!--Exclude all Jackson bundles provided by JDisc -->
<exclusion>
+ <groupId>jakarta.activation</groupId>
+ <artifactId>jakarta.activation-api</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>jakarta.xml.bind</groupId>
+ <artifactId>jakarta.xml.bind-api</artifactId>
+ </exclusion>
+ <exclusion>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-core</artifactId>
</exclusion>
@@ -78,6 +86,22 @@
</exclusion>
</exclusions>
</dependency>
+ <dependency> <!-- needed by auth-core -->
+ <groupId>io.jsonwebtoken</groupId>
+ <artifactId>jjwt-impl</artifactId>
+ <scope>compile</scope>
+ </dependency>
+ <dependency> <!-- needed by auth-core -->
+ <groupId>io.jsonwebtoken</groupId>
+ <artifactId>jjwt-jackson</artifactId>
+ <scope>compile</scope>
+ <exclusions>
+ <exclusion>
+ <groupId>com.fasterxml.jackson.core</groupId>
+ <artifactId>jackson-databind</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
<dependency>
<groupId>com.yahoo.athenz</groupId>
<artifactId>athenz-zpe-java-client</artifactId>
diff --git a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/aws/AwsCredentials.java b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/aws/AwsCredentials.java
index 30ff63fb108..b5473929184 100644
--- a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/aws/AwsCredentials.java
+++ b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/aws/AwsCredentials.java
@@ -25,12 +25,18 @@ public class AwsCredentials {
private final AthenzDomain athenzDomain;
private final AwsRole awsRole;
private final ZtsClient ztsClient;
+ private final String externalId;
private volatile AwsTemporaryCredentials credentials;
public AwsCredentials(ZtsClient ztsClient, AthenzDomain athenzDomain, AwsRole awsRole) {
+ this(ztsClient, athenzDomain, awsRole, null);
+ }
+
+ public AwsCredentials(ZtsClient ztsClient, AthenzDomain athenzDomain, AwsRole awsRole, String externalId) {
this.ztsClient = ztsClient;
this.athenzDomain = athenzDomain;
this.awsRole = awsRole;
+ this.externalId = externalId;
this.credentials = get();
}
@@ -42,12 +48,16 @@ public class AwsCredentials {
this(new DefaultZtsClient.Builder(ztsUrl).withSslContext(sslContext).build(), athenzDomain, awsRole);
}
+ public AwsCredentials(URI ztsUrl, SSLContext sslContext, AthenzDomain athenzDomain, AwsRole awsRole, String externalId) {
+ this(new DefaultZtsClient.Builder(ztsUrl).withSslContext(sslContext).build(), athenzDomain, awsRole, externalId);
+ }
+
/**
* Requests temporary credentials from ZTS or return cached credentials
*/
public AwsTemporaryCredentials get() {
if(shouldRefresh(credentials)) {
- this.credentials = ztsClient.getAwsTemporaryCredentials(athenzDomain, awsRole);
+ this.credentials = ztsClient.getAwsTemporaryCredentials(athenzDomain, awsRole, externalId);
}
return credentials;
}
diff --git a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identityprovider/api/bindings/IdentityDocumentApi.java b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identityprovider/api/bindings/IdentityDocumentApi.java
deleted file mode 100644
index fc5392411c1..00000000000
--- a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identityprovider/api/bindings/IdentityDocumentApi.java
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.athenz.identityprovider.api.bindings;
-
-import javax.ws.rs.GET;
-import javax.ws.rs.Path;
-import javax.ws.rs.PathParam;
-import javax.ws.rs.Produces;
-import javax.ws.rs.core.MediaType;
-
-/**
- * @author bjorncs
- */
-@Path("/identity-document")
-public interface IdentityDocumentApi {
-
- @GET
- @Produces(MediaType.APPLICATION_JSON)
- @Path("/node/{host}")
- SignedIdentityDocumentEntity getNodeIdentityDocument(@PathParam("host") String host);
-
-
- @GET
- @Produces(MediaType.APPLICATION_JSON)
- @Path("/tenant/{host}")
- SignedIdentityDocumentEntity getTenantIdentityDocument(@PathParam("host") String host);
-}
diff --git a/vespa_feed_perf/src/main/java/com/yahoo/vespa/feed/perf/SimpleFeeder.java b/vespa_feed_perf/src/main/java/com/yahoo/vespa/feed/perf/SimpleFeeder.java
index aac7ab750bd..3ee5f1df37e 100644
--- a/vespa_feed_perf/src/main/java/com/yahoo/vespa/feed/perf/SimpleFeeder.java
+++ b/vespa_feed_perf/src/main/java/com/yahoo/vespa/feed/perf/SimpleFeeder.java
@@ -54,6 +54,8 @@ import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
+import java.util.logging.Level;
+import java.util.logging.Logger;
/**
* @author Simon Thoresen Hult
@@ -118,6 +120,7 @@ public class SimpleFeeder implements ReplyHandler {
public static void main(String[] args) throws Throwable {
+ Logger.getLogger("").setLevel(Level.WARNING);
new SimpleFeeder(new FeederParams().parseArgs(args)).run().close();
}
diff --git a/vespaclient-java/src/main/java/com/yahoo/vespaget/Main.java b/vespaclient-java/src/main/java/com/yahoo/vespaget/Main.java
index 6b3938f9cf8..da688c80252 100644
--- a/vespaclient-java/src/main/java/com/yahoo/vespaget/Main.java
+++ b/vespaclient-java/src/main/java/com/yahoo/vespaget/Main.java
@@ -1,10 +1,12 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespaget;
-
import com.yahoo.documentapi.messagebus.loadtypes.LoadTypeSet;
import com.yahoo.vespaclient.ClusterList;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
/**
* The vespa-get tool retrieves documents from a Vespa Document Storage cluster, and prints them to stdout as XML.
*
@@ -14,6 +16,7 @@ public class Main {
public static void main(String[] args) {
try {
+ Logger.getLogger("").setLevel(Level.WARNING);
CommandLineOptions options = new CommandLineOptions();
ClientParameters params = options.parseCommandLineArguments(args);
diff --git a/vespaclient-java/src/main/java/com/yahoo/vespastat/Main.java b/vespaclient-java/src/main/java/com/yahoo/vespastat/Main.java
index d3f05e18841..c7698a81667 100644
--- a/vespaclient-java/src/main/java/com/yahoo/vespastat/Main.java
+++ b/vespaclient-java/src/main/java/com/yahoo/vespastat/Main.java
@@ -1,6 +1,9 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespastat;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
/**
* Main application class
*
@@ -12,6 +15,7 @@ public class Main {
}
public static void main(String[] args) {
+ Logger.getLogger("").setLevel(Level.WARNING);
CommandLineOptions options = new CommandLineOptions();
try {
ClientParameters params = options.parseCommandLineArguments(args);
diff --git a/vespaclient-java/src/main/sh/vespa-visit.sh b/vespaclient-java/src/main/sh/vespa-visit.sh
index 34610fbbd8c..92d6bc67f3f 100755
--- a/vespaclient-java/src/main/sh/vespa-visit.sh
+++ b/vespaclient-java/src/main/sh/vespa-visit.sh
@@ -74,6 +74,10 @@ findhost
# END environment bootstrap section
+if [ "${VESPA_LOG_LEVEL}" = "" ]; then
+ export VESPA_LOG_LEVEL=error,warning
+fi
+
export MALLOC_ARENA_MAX=1 #Does not need fast allocation
exec java \
-server -enableassertions \
diff --git a/vespajlib/abi-spec.json b/vespajlib/abi-spec.json
index 6d99eeac816..ebca0a4d852 100644
--- a/vespajlib/abi-spec.json
+++ b/vespajlib/abi-spec.json
@@ -679,7 +679,8 @@
"public static void ensureSmaller(java.lang.String, java.lang.Comparable, java.lang.String, java.lang.Comparable)",
"public static void ensure(java.lang.String, boolean)",
"public static varargs void ensure(boolean, java.lang.Object[])",
- "public static void ensureInstanceOf(java.lang.String, java.lang.Object, java.lang.Class)"
+ "public static void ensureInstanceOf(java.lang.String, java.lang.Object, java.lang.Class)",
+ "public static void ensureNotInstanceOf(java.lang.String, java.lang.Object, java.lang.Class)"
],
"fields": []
},
@@ -1459,6 +1460,25 @@
],
"fields": []
},
+ "com.yahoo.tensor.TypeResolver": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>()",
+ "public static com.yahoo.tensor.TensorType map(com.yahoo.tensor.TensorType)",
+ "public static com.yahoo.tensor.TensorType reduce(com.yahoo.tensor.TensorType, java.util.List)",
+ "public static com.yahoo.tensor.TensorType peek(com.yahoo.tensor.TensorType, java.util.List)",
+ "public static com.yahoo.tensor.TensorType rename(com.yahoo.tensor.TensorType, java.util.List, java.util.List)",
+ "public static com.yahoo.tensor.TensorType cell_cast(com.yahoo.tensor.TensorType, com.yahoo.tensor.TensorType$Value)",
+ "public static com.yahoo.tensor.TensorType join(com.yahoo.tensor.TensorType, com.yahoo.tensor.TensorType)",
+ "public static com.yahoo.tensor.TensorType merge(com.yahoo.tensor.TensorType, com.yahoo.tensor.TensorType)",
+ "public static com.yahoo.tensor.TensorType concat(com.yahoo.tensor.TensorType, com.yahoo.tensor.TensorType, java.lang.String)"
+ ],
+ "fields": []
+ },
"com.yahoo.tensor.evaluation.EvaluationContext": {
"superClass": "java.lang.Object",
"interfaces": [
diff --git a/vespajlib/src/main/java/com/yahoo/protect/Validator.java b/vespajlib/src/main/java/com/yahoo/protect/Validator.java
index 49fe7716ba2..ee4a93c2f01 100644
--- a/vespajlib/src/main/java/com/yahoo/protect/Validator.java
+++ b/vespajlib/src/main/java/com/yahoo/protect/Validator.java
@@ -68,14 +68,10 @@ public abstract class Validator {
* Throws an IllegalArgumentException if the first argument is not strictly
* smaller than the second argument
*
- * @param smallDescription
- * description of the smallest argument
- * @param small
- * the smallest argument
- * @param largeDescription
- * description of the largest argument
- * @param large
- * the largest argument
+ * @param smallDescription description of the smallest argument
+ * @param small the smallest argument
+ * @param largeDescription description of the largest argument
+ * @param large the largest argument
*/
@SuppressWarnings({ "rawtypes", "unchecked" })
public static void ensureSmaller(String smallDescription, Comparable small, String largeDescription, Comparable large) {
@@ -115,14 +111,10 @@ public abstract class Validator {
/**
* Ensures that an item is of a particular class
*
- * @param description
- * a description of the item to be checked
- * @param item
- * the item to check the type of
- * @param type
- * the type the given item should be instanceof
- * @throws IllegalArgumentException
- * if the given item is not of the correct type
+ * @param description a description of the item to be checked
+ * @param item the item to check the type of
+ * @param type the type the given item should be instanceof
+ * @throws IllegalArgumentException if the given item is not of the correct type
*/
public static void ensureInstanceOf(String description, Object item, Class<?> type) {
if ( ! type.isAssignableFrom(item.getClass())) {
@@ -131,4 +123,19 @@ public abstract class Validator {
}
}
+ /**
+ * Ensures that an item is not of a particular class
+ *
+ * @param description a description of the item to be checked
+ * @param item the item to check the type of
+ * @param type the type the given item should NOT be instanceof
+ * @throws IllegalArgumentException if the given item is of the wrong type
+ */
+ public static void ensureNotInstanceOf(String description, Object item, Class<?> type) {
+ if ( type.isAssignableFrom(item.getClass())) {
+ throw new IllegalArgumentException(description + " " + item + " should NOT be an instance of " + type +
+ " but is " + item.getClass());
+ }
+ }
+
}
diff --git a/vespajlib/src/main/java/com/yahoo/tensor/TypeResolver.java b/vespajlib/src/main/java/com/yahoo/tensor/TypeResolver.java
new file mode 100644
index 00000000000..2bb9a99d6e5
--- /dev/null
+++ b/vespajlib/src/main/java/com/yahoo/tensor/TypeResolver.java
@@ -0,0 +1,264 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package com.yahoo.tensor;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+import static com.yahoo.tensor.TensorType.Dimension;
+import static com.yahoo.tensor.TensorType.Value;
+
+/**
+ * Common type resolving for basic tensor operations.
+ *
+ * @author arnej
+ */
+public class TypeResolver {
+
+ private static final Logger logger = Logger.getLogger(TypeResolver.class.getName());
+
+ static private TensorType scalar() {
+ return TensorType.empty;
+ }
+
+ static public TensorType map(TensorType inputType) {
+ Value orig = inputType.valueType();
+ Value cellType = Value.largestOf(orig, Value.FLOAT);
+ if (cellType == orig) {
+ return inputType;
+ }
+ return new TensorType(cellType, inputType.dimensions());
+ }
+
+ static public TensorType reduce(TensorType inputType, List<String> reduceDimensions) {
+ if (reduceDimensions.isEmpty()) {
+ return scalar();
+ }
+ Map<String, Dimension> map = new HashMap<>();
+ for (Dimension dim : inputType.dimensions()) {
+ map.put(dim.name(), dim);
+ }
+ for (String name : reduceDimensions) {
+ if (map.containsKey(name)) {
+ map.remove(name);
+ } else {
+ logger.log(Level.WARNING, "reducing non-existing dimension "+name+" in type "+inputType);
+ // throw new IllegalArgumentException("reducing non-existing dimension "+name+" in type "+inputType);
+ }
+ }
+ if (map.isEmpty()) {
+ return scalar();
+ }
+ Value cellType = Value.largestOf(inputType.valueType(), Value.FLOAT);
+ return new TensorType(cellType, map.values());
+ }
+
+ static public TensorType peek(TensorType inputType, List<String> peekDimensions) {
+ if (peekDimensions.isEmpty()) {
+ throw new IllegalArgumentException("peeking no dimensions makes no sense");
+ }
+ Map<String, Dimension> map = new HashMap<>();
+ for (Dimension dim : inputType.dimensions()) {
+ map.put(dim.name(), dim);
+ }
+ for (String name : peekDimensions) {
+ if (map.containsKey(name)) {
+ map.remove(name);
+ } else {
+ throw new IllegalArgumentException("peeking non-existing dimension "+name+" in type "+inputType);
+ }
+ }
+ if (map.isEmpty()) {
+ return scalar();
+ }
+ Value cellType = inputType.valueType();
+ return new TensorType(cellType, map.values());
+ }
+
+ static public TensorType rename(TensorType inputType, List<String> from, List<String> to) {
+ if (from.isEmpty()) {
+ throw new IllegalArgumentException("renaming no dimensions");
+ }
+ if (from.size() != to.size()) {
+ throw new IllegalArgumentException("bad rename, from size "+from.size()+" != to.size "+to.size());
+ }
+ Map<String,Dimension> oldDims = new HashMap<>();
+ for (Dimension dim : inputType.dimensions()) {
+ oldDims.put(dim.name(), dim);
+ }
+ Map<String,Dimension> newDims = new HashMap<>();
+ for (int i = 0; i < from.size(); ++i) {
+ String oldName = from.get(i);
+ String newName = to.get(i);
+ if (oldDims.containsKey(oldName)) {
+ var dim = oldDims.remove(oldName);
+ newDims.put(newName, dim.withName(newName));
+ } else {
+ logger.log(Level.WARNING, "renaming non-existing dimension "+oldName+" in type "+inputType);
+ // throw new IllegalArgumentException("bad rename, dimension "+oldName+" not found");
+ }
+ }
+ for (var keep : oldDims.values()) {
+ newDims.put(keep.name(), keep);
+ }
+ if (inputType.dimensions().size() == newDims.size()) {
+ return new TensorType(inputType.valueType(), newDims.values());
+ } else {
+ throw new IllegalArgumentException("bad rename, lost some dimenions");
+ }
+ }
+
+ static public TensorType cell_cast(TensorType inputType, Value toCellType) {
+ if (toCellType != Value.DOUBLE && inputType.dimensions().isEmpty()) {
+ throw new IllegalArgumentException("cannot cast "+inputType+" to valueType"+toCellType);
+ }
+ return new TensorType(toCellType, inputType.dimensions());
+ }
+
+ private static boolean firstIsBoundSecond(Dimension first, Dimension second) {
+ return (first.type() == Dimension.Type.indexedBound &&
+ second.type() == Dimension.Type.indexedUnbound &&
+ first.name().equals(second.name()));
+ }
+
+ private static boolean firstIsSmaller(Dimension first, Dimension second) {
+ return (first.type() == Dimension.Type.indexedBound &&
+ second.type() == Dimension.Type.indexedBound &&
+ first.name().equals(second.name()) &&
+ first.size().isPresent() && second.size().isPresent() &&
+ first.size().get() < second.size().get());
+ }
+
+ static public TensorType join(TensorType lhs, TensorType rhs) {
+ Value cellType = Value.DOUBLE;
+ if (lhs.rank() > 0 && rhs.rank() > 0) {
+ // both types decide the new cell type
+ cellType = Value.largestOf(lhs.valueType(), rhs.valueType());
+ } else if (lhs.rank() > 0) {
+ // only the tensor decide the new cell type
+ cellType = lhs.valueType();
+ } else if (rhs.rank() > 0) {
+ // only the tensor decide the new cell type
+ cellType = rhs.valueType();
+ }
+ // result of computation must be at least float
+ cellType = Value.largestOf(cellType, Value.FLOAT);
+
+ Map<String, Dimension> map = new HashMap<>();
+ for (Dimension dim : lhs.dimensions()) {
+ map.put(dim.name(), dim);
+ }
+ for (Dimension dim : rhs.dimensions()) {
+ if (map.containsKey(dim.name())) {
+ Dimension other = map.get(dim.name());
+ if (! other.equals(dim)) {
+ if (firstIsBoundSecond(dim, other)) {
+ map.put(dim.name(), dim);
+ } else if (firstIsBoundSecond(other, dim)) {
+ map.put(dim.name(), other);
+ } else if (dim.isMapped() && other.isIndexed()) {
+ map.put(dim.name(), dim); // {} and [] -> {}. Note: this is not allowed in C++
+ } else if (dim.isIndexed() && other.isMapped()) {
+ map.put(dim.name(), other); // {} and [] -> {}. Note: this is not allowed in C++
+ } else {
+ throw new IllegalArgumentException("Unequal dimension " + dim.name() + " in " + lhs+ " and "+rhs);
+ }
+ }
+ } else {
+ map.put(dim.name(), dim);
+ }
+ }
+ return new TensorType(cellType, map.values());
+ }
+
+ static public TensorType merge(TensorType lhs, TensorType rhs) {
+ int sz = lhs.dimensions().size();
+ boolean allOk = (rhs.dimensions().size() == sz);
+ if (allOk) {
+ for (int i = 0; i < sz; i++) {
+ String lName = lhs.dimensions().get(i).name();
+ String rName = rhs.dimensions().get(i).name();
+ if (! lName.equals(rName)) {
+ allOk = false;
+ }
+ }
+ }
+ if (allOk) {
+ return join(lhs, rhs);
+ } else {
+ throw new IllegalArgumentException("types in merge() dimensions mismatch: "+lhs+" != "+rhs);
+ }
+ }
+
+ static public TensorType concat(TensorType lhs, TensorType rhs, String concatDimension) {
+ Value cellType = Value.DOUBLE;
+ if (lhs.rank() > 0 && rhs.rank() > 0) {
+ if (lhs.valueType() == rhs.valueType()) {
+ cellType = lhs.valueType();
+ } else {
+ cellType = Value.largestOf(lhs.valueType(), rhs.valueType());
+ // when changing cell type, make it at least float
+ cellType = Value.largestOf(cellType, Value.FLOAT);
+ }
+ } else if (lhs.rank() > 0) {
+ cellType = lhs.valueType();
+ } else if (rhs.rank() > 0) {
+ cellType = rhs.valueType();
+ }
+ Dimension first = Dimension.indexed(concatDimension, 1);
+ Dimension second = Dimension.indexed(concatDimension, 1);
+ Map<String, Dimension> map = new HashMap<>();
+ for (Dimension dim : lhs.dimensions()) {
+ if (dim.name().equals(concatDimension)) {
+ first = dim;
+ } else {
+ map.put(dim.name(), dim);
+ }
+ }
+ for (Dimension dim : rhs.dimensions()) {
+ if (dim.name().equals(concatDimension)) {
+ second = dim;
+ } else if (map.containsKey(dim.name())) {
+ Dimension other = map.get(dim.name());
+ if (! other.equals(dim)) {
+ if (firstIsBoundSecond(dim, other)) {
+ map.put(dim.name(), other); // [N] and [] -> []
+ } else if (firstIsBoundSecond(other, dim)) {
+ map.put(dim.name(), dim); // [N] and [] -> []
+ } else if (firstIsSmaller(dim, other)) {
+ map.put(dim.name(), dim); // [N] and [M] -> [ min(N,M] ].
+ } else if (firstIsSmaller(other, dim)) {
+ map.put(dim.name(), other); // [N] and [M] -> [ min(N,M] ].
+ } else {
+ throw new IllegalArgumentException("Unequal dimension " + dim.name() + " in " + lhs+ " and "+rhs);
+ }
+ }
+ } else {
+ map.put(dim.name(), dim);
+ }
+ }
+ if (first.type() == Dimension.Type.mapped) {
+ throw new IllegalArgumentException("Bad concat dimension "+concatDimension+" in lhs: "+lhs);
+ }
+ if (second.type() == Dimension.Type.mapped) {
+ throw new IllegalArgumentException("Bad concat dimension "+concatDimension+" in rhs: "+rhs);
+ }
+ if (first.type() == Dimension.Type.indexedUnbound) {
+ map.put(concatDimension, first);
+ } else if (second.type() == Dimension.Type.indexedUnbound) {
+ map.put(concatDimension, second);
+ } else {
+ long concatSize = first.size().get() + second.size().get();
+ map.put(concatDimension, Dimension.indexed(concatDimension, concatSize));
+ }
+ return new TensorType(cellType, map.values());
+ }
+
+}
+
diff --git a/vespajlib/src/main/java/com/yahoo/tensor/functions/CellCast.java b/vespajlib/src/main/java/com/yahoo/tensor/functions/CellCast.java
index c6f8171bd18..fe8b2f417aa 100644
--- a/vespajlib/src/main/java/com/yahoo/tensor/functions/CellCast.java
+++ b/vespajlib/src/main/java/com/yahoo/tensor/functions/CellCast.java
@@ -3,6 +3,7 @@ package com.yahoo.tensor.functions;
import com.yahoo.tensor.Tensor;
import com.yahoo.tensor.TensorType;
+import com.yahoo.tensor.TypeResolver;
import com.yahoo.tensor.evaluation.EvaluationContext;
import com.yahoo.tensor.evaluation.Name;
import com.yahoo.tensor.evaluation.TypeContext;
@@ -47,7 +48,7 @@ public class CellCast<NAMETYPE extends Name> extends PrimitiveTensorFunction<NAM
@Override
public TensorType type(TypeContext<NAMETYPE> context) {
- return new TensorType(valueType, argument.type(context).dimensions());
+ return TypeResolver.cell_cast(argument.type(context), valueType);
}
@Override
@@ -56,12 +57,11 @@ public class CellCast<NAMETYPE extends Name> extends PrimitiveTensorFunction<NAM
if (tensor.type().valueType() == valueType) {
return tensor;
}
- TensorType type = new TensorType(valueType, tensor.type().dimensions());
+ TensorType type = TypeResolver.cell_cast(tensor.type(), valueType);
return cast(tensor, type);
}
private Tensor cast(Tensor tensor, TensorType type) {
- Tensor.Builder builder = Tensor.Builder.of(type);
TensorType.Value fromValueType = tensor.type().valueType();
switch (fromValueType) {
case DOUBLE:
diff --git a/vespajlib/src/main/java/com/yahoo/tensor/functions/Concat.java b/vespajlib/src/main/java/com/yahoo/tensor/functions/Concat.java
index fff2ddaf320..ebb4dbcc3b6 100644
--- a/vespajlib/src/main/java/com/yahoo/tensor/functions/Concat.java
+++ b/vespajlib/src/main/java/com/yahoo/tensor/functions/Concat.java
@@ -7,13 +7,18 @@ import com.yahoo.tensor.IndexedTensor;
import com.yahoo.tensor.Tensor;
import com.yahoo.tensor.TensorAddress;
import com.yahoo.tensor.TensorType;
+import com.yahoo.tensor.TypeResolver;
import com.yahoo.tensor.evaluation.EvaluationContext;
import com.yahoo.tensor.evaluation.Name;
import com.yahoo.tensor.evaluation.TypeContext;
import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
import java.util.Iterator;
+import java.util.ArrayList;
import java.util.List;
+import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
@@ -26,6 +31,232 @@ import java.util.stream.Collectors;
*/
public class Concat<NAMETYPE extends Name> extends PrimitiveTensorFunction<NAMETYPE> {
+ static class CellVector {
+ ArrayList<Double> values = new ArrayList<>();
+ void setValue(int ccDimIndex, double value) {
+ while (values.size() <= ccDimIndex) {
+ values.add(0.0);
+ }
+ values.set(ccDimIndex, value);
+ }
+ }
+
+ static class CellVectorMap {
+ Map<TensorAddress, CellVector> map = new HashMap<>();
+ CellVector lookupCreate(TensorAddress addr) {
+ return map.computeIfAbsent(addr, k -> new CellVector());
+ }
+ }
+
+ static class CellVectorMapMap {
+ Map<TensorAddress, CellVectorMap> map = new HashMap<>();
+
+ CellVectorMap lookupCreate(TensorAddress addr) {
+ return map.computeIfAbsent(addr, k -> new CellVectorMap());
+ }
+
+ }
+
+ enum DimType { common, separate, concat }
+
+ static class SplitHow {
+ List<DimType> handleDims = new ArrayList<>();
+ long numCommon() { return handleDims.stream().filter(t -> (t == DimType.common)).count(); }
+ long numSeparate() { return handleDims.stream().filter(t -> (t == DimType.separate)).count(); }
+ }
+
+ static class ConcatPlan {
+
+ final TensorType resultType;
+ final String concatDimension;
+
+ SplitHow splitInfoA = new SplitHow();
+ SplitHow splitInfoB = new SplitHow();
+
+ enum CombineHow { left, right, both, concat }
+
+ List<CombineHow> combineHow = new ArrayList<>();
+
+ void aOnly(String dimName) {
+ if (dimName.equals(concatDimension)) {
+ splitInfoA.handleDims.add(DimType.concat);
+ combineHow.add(CombineHow.concat);
+ } else {
+ splitInfoA.handleDims.add(DimType.separate);
+ combineHow.add(CombineHow.left);
+ }
+ }
+
+ void bOnly(String dimName) {
+ if (dimName.equals(concatDimension)) {
+ splitInfoB.handleDims.add(DimType.concat);
+ combineHow.add(CombineHow.concat);
+ } else {
+ splitInfoB.handleDims.add(DimType.separate);
+ combineHow.add(CombineHow.right);
+ }
+ }
+
+ void bothAandB(String dimName) {
+ if (dimName.equals(concatDimension)) {
+ splitInfoA.handleDims.add(DimType.concat);
+ splitInfoB.handleDims.add(DimType.concat);
+ combineHow.add(CombineHow.concat);
+ } else {
+ splitInfoA.handleDims.add(DimType.common);
+ splitInfoB.handleDims.add(DimType.common);
+ combineHow.add(CombineHow.both);
+ }
+ }
+
+ ConcatPlan(TensorType aType, TensorType bType, String concatDimension) {
+ this.resultType = TypeResolver.concat(aType, bType, concatDimension);
+ this.concatDimension = concatDimension;
+ var aDims = aType.dimensions();
+ var bDims = bType.dimensions();
+ int i = 0;
+ int j = 0;
+ while (i < aDims.size() && j < bDims.size()) {
+ String aName = aDims.get(i).name();
+ String bName = bDims.get(j).name();
+ int cmp = aName.compareTo(bName);
+ if (cmp == 0) {
+ bothAandB(aName);
+ ++i;
+ ++j;
+ } else if (cmp < 0) {
+ aOnly(aName);
+ ++i;
+ } else {
+ bOnly(bName);
+ ++j;
+ }
+ }
+ while (i < aDims.size()) {
+ aOnly(aDims.get(i++).name());
+ }
+ while (j < bDims.size()) {
+ bOnly(bDims.get(j++).name());
+ }
+ if (combineHow.size() < resultType.rank()) {
+ var idx = resultType.indexOfDimension(concatDimension);
+ combineHow.add(idx.get(), CombineHow.concat);
+ }
+ }
+
+ }
+
+ static class Helper {
+ ConcatPlan plan;
+ Tensor result;
+
+ Helper(Tensor a, Tensor b, String dimension) {
+ this.plan = new ConcatPlan(a.type(), b.type(), dimension);
+ CellVectorMapMap aData = decompose(a, plan.splitInfoA);
+ CellVectorMapMap bData = decompose(b, plan.splitInfoB);
+ this.result = merge(aData, bData);
+ }
+
+ static int concatDimensionSize(CellVectorMapMap data) {
+ Set<Integer> sizes = new HashSet<>();
+ data.map.forEach((m, cvmap) ->
+ cvmap.map.forEach((e, vector) ->
+ sizes.add(vector.values.size())));
+ if (sizes.isEmpty()) {
+ return 1;
+ }
+ if (sizes.size() == 1) {
+ return sizes.iterator().next();
+ }
+ throw new IllegalArgumentException("inconsistent size of concat dimension, had "+sizes.size()+" different values");
+ }
+
+ TensorAddress combine(TensorAddress match, TensorAddress leftOnly, TensorAddress rightOnly, int concatDimIdx) {
+ String[] labels = new String[plan.resultType.rank()];
+ int out = 0;
+ int m = 0;
+ int a = 0;
+ int b = 0;
+ for (var how : plan.combineHow) {
+ switch (how) {
+ case left:
+ labels[out++] = leftOnly.label(a++);
+ break;
+ case right:
+ labels[out++] = rightOnly.label(b++);
+ break;
+ case both:
+ labels[out++] = match.label(m++);
+ break;
+ case concat:
+ labels[out++] = String.valueOf(concatDimIdx);
+ break;
+ default:
+ throw new IllegalArgumentException("cannot handle: "+how);
+ }
+ }
+ return TensorAddress.of(labels);
+ }
+
+ Tensor merge(CellVectorMapMap a, CellVectorMapMap b) {
+ var builder = Tensor.Builder.of(plan.resultType);
+ int aConcatSize = concatDimensionSize(a);
+ for (var entry : a.map.entrySet()) {
+ TensorAddress common = entry.getKey();
+ if (b.map.containsKey(common)) {
+ var lhs = entry.getValue();
+ var rhs = b.map.get(common);
+ lhs.map.forEach((leftOnly, leftCells) -> {
+ rhs.map.forEach((rightOnly, rightCells) -> {
+ for (int i = 0; i < leftCells.values.size(); i++) {
+ TensorAddress addr = combine(common, leftOnly, rightOnly, i);
+ builder.cell(addr, leftCells.values.get(i));
+ }
+ for (int i = 0; i < rightCells.values.size(); i++) {
+ TensorAddress addr = combine(common, leftOnly, rightOnly, i + aConcatSize);
+ builder.cell(addr, rightCells.values.get(i));
+ }
+ });
+ });
+ }
+ }
+ return builder.build();
+ }
+
+ CellVectorMapMap decompose(Tensor input, SplitHow how) {
+ var iter = input.cellIterator();
+ String[] commonLabels = new String[(int)how.numCommon()];
+ String[] separateLabels = new String[(int)how.numSeparate()];
+ CellVectorMapMap result = new CellVectorMapMap();
+ while (iter.hasNext()) {
+ var cell = iter.next();
+ var addr = cell.getKey();
+ long ccDimIndex = 0;
+ int commonIdx = 0;
+ int separateIdx = 0;
+ for (int i = 0; i < how.handleDims.size(); i++) {
+ switch (how.handleDims.get(i)) {
+ case common:
+ commonLabels[commonIdx++] = addr.label(i);
+ break;
+ case separate:
+ separateLabels[separateIdx++] = addr.label(i);
+ break;
+ case concat:
+ ccDimIndex = addr.numericLabel(i);
+ break;
+ default:
+ throw new IllegalArgumentException("cannot handle: "+how.handleDims.get(i));
+ }
+ }
+ TensorAddress commonAddr = TensorAddress.of(commonLabels);
+ TensorAddress separateAddr = TensorAddress.of(separateLabels);
+ result.lookupCreate(commonAddr).lookupCreate(separateAddr).setValue((int)ccDimIndex, cell.getValue());
+ }
+ return result;
+ }
+ }
+
private final TensorFunction<NAMETYPE> argumentA, argumentB;
private final String dimension;
@@ -60,44 +291,28 @@ public class Concat<NAMETYPE extends Name> extends PrimitiveTensorFunction<NAMET
@Override
public TensorType type(TypeContext<NAMETYPE> context) {
- return type(argumentA.type(context), argumentB.type(context));
- }
-
- /** Returns the type resulting from concatenating a and b */
- private TensorType type(TensorType a, TensorType b) {
- // TODO: Fail if concat dimension is present but not indexed in a or b
- TensorType.Builder builder = new TensorType.Builder(a, b);
- if ( ! unboundIn(a, dimension) && ! unboundIn(b, dimension)) {
- builder.set(TensorType.Dimension.indexed(dimension, a.sizeOfDimension(dimension).orElse(1L) +
- b.sizeOfDimension(dimension).orElse(1L)));
- /*
- MutableLong concatSize = new MutableLong(0);
- a.sizeOfDimension(dimension).ifPresent(concatSize::add);
- b.sizeOfDimension(dimension).ifPresent(concatSize::add);
- builder.set(TensorType.Dimension.indexed(dimension, concatSize.get()));
- */
- }
- return builder.build();
- }
-
- /** Returns true if this dimension is present and unbound */
- private boolean unboundIn(TensorType type, String dimensionName) {
- Optional<TensorType.Dimension> dimension = type.dimension(dimensionName);
- return dimension.isPresent() && ! dimension.get().size().isPresent();
+ return TypeResolver.concat(argumentA.type(context), argumentB.type(context), dimension);
}
@Override
public Tensor evaluate(EvaluationContext<NAMETYPE> context) {
Tensor a = argumentA.evaluate(context);
Tensor b = argumentB.evaluate(context);
- TensorType.Value combinedValueType = TensorType.combinedValueType(a.type(), b.type());
- a = ensureIndexedDimension(dimension, a, combinedValueType);
- b = ensureIndexedDimension(dimension, b, combinedValueType);
+ if (a instanceof IndexedTensor && b instanceof IndexedTensor) {
+ return oldEvaluate(a, b);
+ }
+ var helper = new Helper(a, b, dimension);
+ return helper.result;
+ }
+
+ private Tensor oldEvaluate(Tensor a, Tensor b) {
+ TensorType concatType = TypeResolver.concat(a.type(), b.type(), dimension);
+
+ a = ensureIndexedDimension(dimension, a, concatType.valueType());
+ b = ensureIndexedDimension(dimension, b, concatType.valueType());
IndexedTensor aIndexed = (IndexedTensor) a; // If you get an exception here you have implemented a mixed tensor
IndexedTensor bIndexed = (IndexedTensor) b;
-
- TensorType concatType = type(a.type(), b.type());
DimensionSizes concatSize = concatSize(concatType, aIndexed, bIndexed, dimension);
Tensor.Builder builder = Tensor.Builder.of(concatType, concatSize);
diff --git a/vespajlib/src/main/java/com/yahoo/tensor/functions/Join.java b/vespajlib/src/main/java/com/yahoo/tensor/functions/Join.java
index 5419d04a4fb..d43b7889982 100644
--- a/vespajlib/src/main/java/com/yahoo/tensor/functions/Join.java
+++ b/vespajlib/src/main/java/com/yahoo/tensor/functions/Join.java
@@ -9,6 +9,7 @@ import com.yahoo.tensor.PartialAddress;
import com.yahoo.tensor.Tensor;
import com.yahoo.tensor.TensorAddress;
import com.yahoo.tensor.TensorType;
+import com.yahoo.tensor.TypeResolver;
import com.yahoo.tensor.evaluation.EvaluationContext;
import com.yahoo.tensor.evaluation.Name;
import com.yahoo.tensor.evaluation.TypeContext;
@@ -49,7 +50,7 @@ public class Join<NAMETYPE extends Name> extends PrimitiveTensorFunction<NAMETYP
/** Returns the type resulting from applying Join to the two given types */
public static TensorType outputType(TensorType a, TensorType b) {
try {
- return new TensorType.Builder(false, a, b).build();
+ return TypeResolver.join(a, b);
}
catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Can not join " + a + " and " + b, e);
diff --git a/vespajlib/src/main/java/com/yahoo/tensor/functions/Map.java b/vespajlib/src/main/java/com/yahoo/tensor/functions/Map.java
index 0ddf0bb4e63..77c23f2e603 100644
--- a/vespajlib/src/main/java/com/yahoo/tensor/functions/Map.java
+++ b/vespajlib/src/main/java/com/yahoo/tensor/functions/Map.java
@@ -4,6 +4,7 @@ package com.yahoo.tensor.functions;
import com.yahoo.tensor.Tensor;
import com.yahoo.tensor.TensorAddress;
import com.yahoo.tensor.TensorType;
+import com.yahoo.tensor.TypeResolver;
import com.yahoo.tensor.evaluation.EvaluationContext;
import com.yahoo.tensor.evaluation.Name;
import com.yahoo.tensor.evaluation.TypeContext;
@@ -31,7 +32,9 @@ public class Map<NAMETYPE extends Name> extends PrimitiveTensorFunction<NAMETYPE
this.mapper = mapper;
}
- public static TensorType outputType(TensorType inputType) { return inputType; }
+ public static TensorType outputType(TensorType inputType) {
+ return TypeResolver.map(inputType);
+ }
public TensorFunction<NAMETYPE> argument() { return argument; }
public DoubleUnaryOperator mapper() { return mapper; }
@@ -53,14 +56,14 @@ public class Map<NAMETYPE extends Name> extends PrimitiveTensorFunction<NAMETYPE
@Override
public TensorType type(TypeContext<NAMETYPE> context) {
- return argument.type(context);
+ return outputType(argument.type(context));
}
@Override
public Tensor evaluate(EvaluationContext<NAMETYPE> context) {
- Tensor argument = argument().evaluate(context);
- Tensor.Builder builder = Tensor.Builder.of(argument.type());
- for (Iterator<Tensor.Cell> i = argument.cellIterator(); i.hasNext(); ) {
+ Tensor input = argument().evaluate(context);
+ Tensor.Builder builder = Tensor.Builder.of(outputType(input.type()));
+ for (Iterator<Tensor.Cell> i = input.cellIterator(); i.hasNext(); ) {
java.util.Map.Entry<TensorAddress, Double> cell = i.next();
builder.cell(cell.getKey(), mapper.applyAsDouble(cell.getValue()));
}
diff --git a/vespajlib/src/main/java/com/yahoo/tensor/functions/Merge.java b/vespajlib/src/main/java/com/yahoo/tensor/functions/Merge.java
index d5633bde36c..4aa09f3f4e3 100644
--- a/vespajlib/src/main/java/com/yahoo/tensor/functions/Merge.java
+++ b/vespajlib/src/main/java/com/yahoo/tensor/functions/Merge.java
@@ -9,6 +9,7 @@ import com.yahoo.tensor.PartialAddress;
import com.yahoo.tensor.Tensor;
import com.yahoo.tensor.TensorAddress;
import com.yahoo.tensor.TensorType;
+import com.yahoo.tensor.TypeResolver;
import com.yahoo.tensor.evaluation.EvaluationContext;
import com.yahoo.tensor.evaluation.Name;
import com.yahoo.tensor.evaluation.TypeContext;
@@ -48,9 +49,7 @@ public class Merge<NAMETYPE extends Name> extends PrimitiveTensorFunction<NAMETY
/** Returns the type resulting from applying Merge to the two given types */
public static TensorType outputType(TensorType a, TensorType b) {
- Optional<TensorType> outputType = a.dimensionwiseGeneralizationWith(b);
- if (outputType.isPresent()) return outputType.get();
- throw new IllegalArgumentException("Cannot merge " + a + " and " + b + ": Arguments must have compatible types");
+ return TypeResolver.merge(a, b);
}
public DoubleBinaryOperator merger() { return merger; }
diff --git a/vespajlib/src/main/java/com/yahoo/tensor/functions/Reduce.java b/vespajlib/src/main/java/com/yahoo/tensor/functions/Reduce.java
index 48604df87e4..86f23487efb 100644
--- a/vespajlib/src/main/java/com/yahoo/tensor/functions/Reduce.java
+++ b/vespajlib/src/main/java/com/yahoo/tensor/functions/Reduce.java
@@ -6,6 +6,7 @@ import com.yahoo.tensor.IndexedTensor;
import com.yahoo.tensor.Tensor;
import com.yahoo.tensor.TensorAddress;
import com.yahoo.tensor.TensorType;
+import com.yahoo.tensor.TypeResolver;
import com.yahoo.tensor.evaluation.EvaluationContext;
import com.yahoo.tensor.evaluation.Name;
import com.yahoo.tensor.evaluation.TypeContext;
@@ -60,13 +61,7 @@ public class Reduce<NAMETYPE extends Name> extends PrimitiveTensorFunction<NAMET
}
public static TensorType outputType(TensorType inputType, List<String> reduceDimensions) {
- TensorType.Builder b = new TensorType.Builder(inputType.valueType());
- if (reduceDimensions.isEmpty()) return b.build(); // means reduce all
- for (TensorType.Dimension dimension : inputType.dimensions()) {
- if ( ! reduceDimensions.contains(dimension.name()))
- b.dimension(dimension);
- }
- return b.build();
+ return TypeResolver.reduce(inputType, reduceDimensions);
}
public TensorFunction<NAMETYPE> argument() { return argument; }
@@ -104,16 +99,7 @@ public class Reduce<NAMETYPE extends Name> extends PrimitiveTensorFunction<NAMET
@Override
public TensorType type(TypeContext<NAMETYPE> context) {
- return type(argument.type(context), dimensions);
- }
-
- private static TensorType type(TensorType argumentType, List<String> dimensions) {
- TensorType.Builder builder = new TensorType.Builder(argumentType.valueType());
- if (dimensions.isEmpty()) return builder.build(); // means reduce all
- for (TensorType.Dimension dimension : argumentType.dimensions())
- if ( ! dimensions.contains(dimension.name())) // keep
- builder.dimension(dimension);
- return builder.build();
+ return outputType(argument.type(context), dimensions);
}
@Override
@@ -133,7 +119,7 @@ public class Reduce<NAMETYPE extends Name> extends PrimitiveTensorFunction<NAMET
else
return reduceAllGeneral(argument, aggregator);
- TensorType reducedType = type(argument.type(), dimensions);
+ TensorType reducedType = outputType(argument.type(), dimensions);
// Reduce cells
Map<TensorAddress, ValueAggregator> aggregatingCells = new HashMap<>();
@@ -250,7 +236,7 @@ public class Reduce<NAMETYPE extends Name> extends PrimitiveTensorFunction<NAMET
private static class MaxAggregator extends ValueAggregator {
- private double maxValue = Double.MIN_VALUE;
+ private double maxValue = Double.NEGATIVE_INFINITY;
@Override
public void aggregate(double value) {
@@ -265,7 +251,7 @@ public class Reduce<NAMETYPE extends Name> extends PrimitiveTensorFunction<NAMET
@Override
public void reset() {
- maxValue = Double.MIN_VALUE;
+ maxValue = Double.NEGATIVE_INFINITY;
}
}
@@ -304,7 +290,7 @@ public class Reduce<NAMETYPE extends Name> extends PrimitiveTensorFunction<NAMET
private static class MinAggregator extends ValueAggregator {
- private double minValue = Double.MAX_VALUE;
+ private double minValue = Double.POSITIVE_INFINITY;
@Override
public void aggregate(double value) {
@@ -319,7 +305,7 @@ public class Reduce<NAMETYPE extends Name> extends PrimitiveTensorFunction<NAMET
@Override
public void reset() {
- minValue = Double.MAX_VALUE;
+ minValue = Double.POSITIVE_INFINITY;
}
}
diff --git a/vespajlib/src/main/java/com/yahoo/tensor/functions/Rename.java b/vespajlib/src/main/java/com/yahoo/tensor/functions/Rename.java
index 275b546c0aa..fc1e7737d83 100644
--- a/vespajlib/src/main/java/com/yahoo/tensor/functions/Rename.java
+++ b/vespajlib/src/main/java/com/yahoo/tensor/functions/Rename.java
@@ -5,6 +5,7 @@ import com.google.common.collect.ImmutableList;
import com.yahoo.tensor.Tensor;
import com.yahoo.tensor.TensorAddress;
import com.yahoo.tensor.TensorType;
+import com.yahoo.tensor.TypeResolver;
import com.yahoo.tensor.evaluation.EvaluationContext;
import com.yahoo.tensor.evaluation.Name;
import com.yahoo.tensor.evaluation.TypeContext;
@@ -76,10 +77,7 @@ public class Rename<NAMETYPE extends Name> extends PrimitiveTensorFunction<NAMET
}
private TensorType type(TensorType type) {
- TensorType.Builder builder = new TensorType.Builder(type.valueType());
- for (TensorType.Dimension dimension : type.dimensions())
- builder.dimension(dimension.withName(fromToMap.getOrDefault(dimension.name(), dimension.name())));
- return builder.build();
+ return TypeResolver.rename(type, fromDimensions, toDimensions);
}
@Override
diff --git a/vespajlib/src/main/java/com/yahoo/tensor/functions/ScalarFunctions.java b/vespajlib/src/main/java/com/yahoo/tensor/functions/ScalarFunctions.java
index c19b07cf96f..3ee9e67cdd6 100644
--- a/vespajlib/src/main/java/com/yahoo/tensor/functions/ScalarFunctions.java
+++ b/vespajlib/src/main/java/com/yahoo/tensor/functions/ScalarFunctions.java
@@ -3,7 +3,9 @@ package com.yahoo.tensor.functions;
import com.google.common.collect.ImmutableList;
+import java.util.Comparator;
import java.util.List;
+import java.util.PriorityQueue;
import java.util.concurrent.ThreadLocalRandom;
import java.util.function.DoubleBinaryOperator;
import java.util.function.DoubleUnaryOperator;
@@ -332,27 +334,81 @@ public class ScalarFunctions {
}
public static class Erf implements DoubleUnaryOperator {
+ static final Comparator<Double> byAbs = (x,y) -> Double.compare(Math.abs(x), Math.abs(y));
+
+ static double kummer(double a, double b, double z) {
+ PriorityQueue<Double> terms = new PriorityQueue<>(byAbs);
+ double term = 1.0;
+ long n = 0;
+ while (Math.abs(term) > Double.MIN_NORMAL) {
+ terms.add(term);
+ term *= (a+n);
+ term /= (b+n);
+ ++n;
+ term *= z;
+ term /= n;
+ }
+ double sum = terms.remove();
+ while (! terms.isEmpty()) {
+ sum += terms.remove();
+ terms.add(sum);
+ sum = terms.remove();
+ }
+ return sum;
+ }
+
+ static double approx_erfc(double x) {
+ double sq = x*x;
+ double mult = Math.exp(-sq) / (x * Math.sqrt(Math.PI));
+ double term = 1.0;
+ long n = 1;
+ double sum = 0.0;
+ while ((sum + term) != sum) {
+ double pterm = term;
+ sum += term;
+ term = 0.5 * pterm * n / sq;
+ if (term > pterm) {
+ sum -= 0.5 * pterm;
+ return sum*mult;
+ }
+ n += 2;
+ pterm = term;
+ sum -= term;
+ term = 0.5 * pterm * n / sq;
+ if (term > pterm) {
+ sum += 0.5 * pterm;
+ return sum*mult;
+ }
+ n += 2;
+ }
+ return sum*mult;
+ }
+
@Override
public double applyAsDouble(double operand) { return erf(operand); }
@Override
public String toString() { return "f(a)(erf(a))"; }
- // Use Horner's method
- // From https://introcs.cs.princeton.edu/java/21function/ErrorFunction.java.html
+ static final double nearZeroMultiplier = 2.0 / Math.sqrt(Math.PI);
+
public static double erf(double v) {
- double t = 1.0 / (1.0 + 0.5 * Math.abs(v));
- double ans = 1 - t * Math.exp(-v*v - 1.26551223 +
- t * ( 1.00002368 +
- t * ( 0.37409196 +
- t * ( 0.09678418 +
- t * (-0.18628806 +
- t * ( 0.27886807 +
- t * (-1.13520398 +
- t * ( 1.48851587 +
- t * (-0.82215223 +
- t * ( 0.17087277))))))))));
- if (v >= 0) return ans;
- else return -ans;
+ if (v < 0) {
+ return -erf(Math.abs(v));
+ }
+ if (v < 1.0e-10) {
+ // Just use the derivate when very near zero:
+ return v * nearZeroMultiplier;
+ }
+ if (v <= 1.0) {
+ // works best when v is small
+ return v * nearZeroMultiplier * kummer(0.5, 1.5, -v*v);
+ }
+ if (v < 4.3) {
+ // slower, but works with bigger v
+ return v * nearZeroMultiplier * Math.exp(-v*v) * kummer(1.0, 1.5, v*v);
+ }
+ // works only with "very big" v
+ return 1.0 - approx_erfc(v);
}
}
diff --git a/vespajlib/src/main/java/com/yahoo/tensor/functions/Slice.java b/vespajlib/src/main/java/com/yahoo/tensor/functions/Slice.java
index bccd66acd31..607c9a0ab44 100644
--- a/vespajlib/src/main/java/com/yahoo/tensor/functions/Slice.java
+++ b/vespajlib/src/main/java/com/yahoo/tensor/functions/Slice.java
@@ -6,6 +6,7 @@ import com.yahoo.tensor.PartialAddress;
import com.yahoo.tensor.Tensor;
import com.yahoo.tensor.TensorAddress;
import com.yahoo.tensor.TensorType;
+import com.yahoo.tensor.TypeResolver;
import com.yahoo.tensor.evaluation.EvaluationContext;
import com.yahoo.tensor.evaluation.Name;
import com.yahoo.tensor.evaluation.TypeContext;
@@ -14,7 +15,7 @@ import java.util.Iterator;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
-import java.util.Set;
+import java.util.function.Predicate;
import java.util.stream.Collectors;
/**
@@ -113,44 +114,33 @@ public class Slice<NAMETYPE extends Name> extends PrimitiveTensorFunction<NAMETY
return resultType(argument.type(context));
}
+ private List<String> findDimensions(List<TensorType.Dimension> dims, Predicate<TensorType.Dimension> pred) {
+ return dims.stream().filter(pred).map(TensorType.Dimension::name).collect(Collectors.toList());
+ }
+
private TensorType resultType(TensorType argumentType) {
- TensorType.Builder b = new TensorType.Builder();
+ List<String> peekDimensions;
// Special case where a single indexed or mapped dimension is sliced
if (subspaceAddress.size() == 1 && subspaceAddress.get(0).dimension().isEmpty()) {
if (subspaceAddress.get(0).index().isPresent()) {
- if (argumentType.dimensions().stream().filter(d -> d.isIndexed()).count() > 1)
+ peekDimensions = findDimensions(argumentType.dimensions(), TensorType.Dimension::isIndexed);
+ if (peekDimensions.size() > 1) {
throw new IllegalArgumentException(this + " slices a single indexed dimension, cannot be applied " +
- " to " + argumentType + ", which have multiple");
- for (TensorType.Dimension dimension : argumentType.dimensions()) {
- if ( ! dimension.isIndexed())
- b.dimension(dimension);
+ "to " + argumentType + ", which has multiple");
}
}
else {
- if (argumentType.dimensions().stream().filter(d -> ! d.isIndexed()).count() > 1)
+ peekDimensions = findDimensions(argumentType.dimensions(), TensorType.Dimension::isMapped);
+ if (peekDimensions.size() > 1)
throw new IllegalArgumentException(this + " slices a single mapped dimension, cannot be applied " +
- " to " + argumentType + ", which have multiple");
- for (TensorType.Dimension dimension : argumentType.dimensions()) {
- if (dimension.isIndexed())
- b.dimension(dimension);
- }
-
+ "to " + argumentType + ", which has multiple");
}
}
else { // general slicing
- Set<String> slicedDimensions = subspaceAddress.stream().map(d -> d.dimension().get()).collect(Collectors.toSet());
- for (TensorType.Dimension dimension : argumentType.dimensions()) {
- if (slicedDimensions.contains(dimension.name()))
- slicedDimensions.remove(dimension.name());
- else
- b.dimension(dimension);
- }
- if ( ! slicedDimensions.isEmpty())
- throw new IllegalArgumentException(this + " slices " + slicedDimensions + " which are not present in " +
- argumentType);
+ peekDimensions = subspaceAddress.stream().map(d -> d.dimension().get()).collect(Collectors.toList());
}
- return b.build();
+ return TypeResolver.peek(argumentType, peekDimensions);
}
@Override
diff --git a/vespajlib/src/test/java/com/yahoo/tensor/TypeResolverTestCase.java b/vespajlib/src/test/java/com/yahoo/tensor/TypeResolverTestCase.java
new file mode 100644
index 00000000000..f0be02a1b53
--- /dev/null
+++ b/vespajlib/src/test/java/com/yahoo/tensor/TypeResolverTestCase.java
@@ -0,0 +1,475 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package com.yahoo.tensor;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+/**
+ * @author arnej
+ */
+public class TypeResolverTestCase {
+
+ private static List<String> mkl(String ...values) {
+ return Arrays.asList(values);
+ }
+
+ @Test
+ public void verifyMap() {
+ checkMap("tensor()", "tensor()");
+ checkMap("tensor(x[10])", "tensor(x[10])");
+ checkMap("tensor(a[10],b[20],c[30])", "tensor(a[10],b[20],c[30])");
+ checkMap("tensor(y{})", "tensor(y{})");
+ checkMap("tensor(x[10],y{})", "tensor(x[10],y{})");
+ checkMap("tensor<float>(x[10])", "tensor<float>(x[10])");
+ checkMap("tensor<float>(y{})", "tensor<float>(y{})");
+ checkMap("tensor<bfloat16>(x[10])", "tensor<float>(x[10])");
+ checkMap("tensor<bfloat16>(y{})", "tensor<float>(y{})");
+ checkMap("tensor<int8>(x[10])", "tensor<float>(x[10])");
+ checkMap("tensor<int8>(y{})", "tensor<float>(y{})");
+ }
+
+ @Test
+ public void verifyJoin() {
+ checkJoin("tensor()", "tensor()", "tensor()");
+ checkJoin("tensor()", "tensor(x{})", "tensor(x{})");
+ checkJoin("tensor(x{})", "tensor()", "tensor(x{})");
+ checkJoin("tensor(x{})", "tensor(x{})", "tensor(x{})");
+ checkJoin("tensor(x{})", "tensor(y{})", "tensor(x{},y{})");
+ checkJoin("tensor(x{},y{})", "tensor(y{},z{})", "tensor(x{},y{},z{})");
+ checkJoin("tensor(y{})", "tensor()", "tensor(y{})");
+ checkJoin("tensor(y{})", "tensor(y{})", "tensor(y{})");
+ checkJoin("tensor(a[10])", "tensor(a[10])", "tensor(a[10])");
+ checkJoin("tensor(a[10])", "tensor()", "tensor(a[10])");
+ checkJoin("tensor(a[10])", "tensor(x{},y{},z{})", "tensor(a[10],x{},y{},z{})");
+ // with cell types
+ checkJoin("tensor<bfloat16>(x[5])", "tensor<bfloat16>(x[5])", "tensor<float>(x[5])");
+ checkJoin("tensor<bfloat16>(x[5])", "tensor<float>(x[5])", "tensor<float>(x[5])");
+ checkJoin("tensor<bfloat16>(x[5])", "tensor<int8>(x[5])", "tensor<float>(x[5])");
+ checkJoin("tensor<bfloat16>(x[5])", "tensor()", "tensor<float>(x[5])");
+ checkJoin("tensor<bfloat16>(x[5])", "tensor(x[5])", "tensor(x[5])");
+ checkJoin("tensor<bfloat16>(x{})", "tensor<bfloat16>(y{})", "tensor<float>(x{},y{})");
+ checkJoin("tensor<bfloat16>(x{})", "tensor<int8>(y{})", "tensor<float>(x{},y{})");
+ checkJoin("tensor<bfloat16>(x{})", "tensor()", "tensor<float>(x{})");
+ checkJoin("tensor<float>(x[5])", "tensor<float>(x[5])", "tensor<float>(x[5])");
+ checkJoin("tensor<float>(x[5])", "tensor<int8>(x[5])", "tensor<float>(x[5])");
+ checkJoin("tensor<float>(x[5])", "tensor()", "tensor<float>(x[5])");
+ checkJoin("tensor<float>(x[5])", "tensor(x[5])", "tensor(x[5])");
+ checkJoin("tensor<float>(x{})", "tensor<bfloat16>(y{})", "tensor<float>(x{},y{})");
+ checkJoin("tensor<float>(x{})", "tensor<float>(y{})", "tensor<float>(x{},y{})");
+ checkJoin("tensor<float>(x{})", "tensor<int8>(y{})", "tensor<float>(x{},y{})");
+ checkJoin("tensor<float>(x{})", "tensor()", "tensor<float>(x{})");
+ checkJoin("tensor<int8>(x[5])", "tensor<int8>(x[5])", "tensor<float>(x[5])");
+ checkJoin("tensor<int8>(x{})", "tensor<int8>(y{})", "tensor<float>(x{},y{})");
+ checkJoin("tensor<int8>(x{})", "tensor()", "tensor<float>(x{})");
+ checkJoin("tensor()", "tensor<int8>(x[5])", "tensor<float>(x[5])");
+ checkJoin("tensor(x[5])", "tensor<int8>(x[5])", "tensor(x[5])");
+ checkJoin("tensor(x[5])", "tensor(x[5])", "tensor(x[5])");
+ checkJoin("tensor(x{})", "tensor<bfloat16>(y{})", "tensor(x{},y{})");
+ checkJoin("tensor(x{})", "tensor<float>(y{})", "tensor(x{},y{})");
+ checkJoin("tensor(x{})", "tensor<int8>(y{})", "tensor(x{},y{})");
+ // specific for Java
+ checkJoin("tensor(x[])", "tensor(x{})", "tensor(x{})");
+ checkJoin("tensor(x[3])", "tensor(x{})", "tensor(x{})");
+ checkJoin("tensor(x{})", "tensor(x[])", "tensor(x{})");
+ checkJoin("tensor(x{})", "tensor(x[3])", "tensor(x{})");
+ // dimension mismatch should fail:
+ checkJoinFails("tensor(x[3])", "tensor(x[5])");
+ checkJoinFails("tensor(x[5])", "tensor(x[3])");
+ }
+
+ @Test
+ public void verifyReduce() {
+ checkFullReduce("tensor()");
+ checkReduce("tensor(x[10],y[20],z[30])", mkl("x"), "tensor(y[20],z[30])");
+ checkReduce("tensor(x[10],y[20],z[30])", mkl("y"), "tensor(x[10],z[30])");
+ checkReduce("tensor<float>(x[10],y[20],z[30])", mkl("z"), "tensor<float>(x[10],y[20])");
+ checkReduce("tensor<bfloat16>(x[10],y[20],z[30])", mkl("z"), "tensor<float>(x[10],y[20])");
+ checkReduce("tensor<int8>(x[10],y[20],z[30])", mkl("z"), "tensor<float>(x[10],y[20])");
+ checkReduce("tensor(x[10],y[20],z[30])", mkl("x", "z"), "tensor(y[20])");
+ checkReduce("tensor<float>(x[10],y[20],z[30])", mkl("z", "x"), "tensor<float>(y[20])");
+ checkReduce("tensor<bfloat16>(x[10],y[20],z[30])", mkl("z", "x"), "tensor<float>(y[20])");
+ checkReduce("tensor<int8>(x[10],y[20],z[30])", mkl("z", "x"), "tensor<float>(y[20])");
+ checkFullReduce("tensor(x[10],y[20],z[30])");
+ checkFullReduce("tensor<float>(x[10],y[20],z[30])");
+ checkFullReduce("tensor<bfloat16>(x[10],y[20],z[30])");
+ checkFullReduce("tensor<int8>(x[10],y[20],z[30])");
+ checkReduce("tensor(x[10],y[20],z[30])", mkl("x", "y", "z"), "tensor()");
+ checkReduce("tensor<float>(x[10],y[20],z[30])", mkl("x", "y", "z"), "tensor()");
+ checkReduce("tensor<bfloat16>(x[10],y[20],z[30])", mkl("x", "y", "z"), "tensor()");
+ checkReduce("tensor<int8>(x[10],y[20],z[30])", mkl("x", "y", "z"), "tensor()");
+ checkReduce("tensor(x[10],y{},z[30])", mkl("x"), "tensor(y{},z[30])");
+ checkReduce("tensor(x[10],y{},z[30])", mkl("y"), "tensor(x[10],z[30])");
+ checkReduce("tensor<float>(x[10],y{},z[30])", mkl("z"), "tensor<float>(x[10],y{})");
+ checkReduce("tensor<bfloat16>(x[10],y{},z[30])", mkl("z"), "tensor<float>(x[10],y{})");
+ checkReduce("tensor<int8>(x[10],y{},z[30])", mkl("z"), "tensor<float>(x[10],y{})");
+ checkReduce("tensor(x[10],y{},z[30])", mkl("x", "z"), "tensor(y{})");
+ checkReduce("tensor<float>(x[10],y{},z[30])", mkl("z", "x"), "tensor<float>(y{})");
+ checkReduce("tensor<bfloat16>(x[10],y{},z[30])", mkl("z", "x"), "tensor<float>(y{})");
+ checkReduce("tensor<int8>(x[10],y{},z[30])", mkl("z", "x"), "tensor<float>(y{})");
+ checkFullReduce("tensor(x[10],y{},z[30])");
+ checkFullReduce("tensor<float>(x[10],y{},z[30])");
+ checkFullReduce("tensor<bfloat16>(x[10],y{},z[30])");
+ checkFullReduce("tensor<int8>(x[10],y{},z[30])");
+ checkReduce("tensor(x[10],y{},z[30])", mkl("x", "y", "z"), "tensor()");
+ checkReduce("tensor<float>(x[10],y{},z[30])", mkl("x", "y", "z"), "tensor()");
+ checkReduce("tensor<bfloat16>(x[10],y{},z[30])", mkl("x", "y", "z"), "tensor()");
+ checkReduce("tensor<int8>(x[10],y{},z[30])", mkl("x", "y", "z"), "tensor()");
+ // for now, these will just log a warning
+ //checkReduceFails("tensor()", "x");
+ //checkReduceFails("tensor(y{})", "x");
+ //checkReduceFails("tensor<float>(y[10])", "x");
+ //checkReduceFails("tensor<int8>(y[10])", "x");
+ checkReduce("tensor()", mkl("x"), "tensor()");
+ checkReduce("tensor(y{})", mkl("x"), "tensor(y{})");
+ checkReduce("tensor<float>(y[10])", mkl("x"), "tensor<float>(y[10])");
+ checkReduce("tensor<int8>(y[10])", mkl("x"), "tensor<float>(y[10])");
+ }
+
+ @Test
+ public void verifyMerge() {
+ checkMerge("tensor(a[10])", "tensor(a[10])", "tensor(a[10])");
+ checkMerge("tensor<bfloat16>(x[5])", "tensor<bfloat16>(x[5])", "tensor<float>(x[5])");
+ checkMerge("tensor<bfloat16>(x[5])", "tensor<float>(x[5])", "tensor<float>(x[5])");
+ checkMerge("tensor<bfloat16>(x[5])", "tensor<int8>(x[5])", "tensor<float>(x[5])");
+ checkMerge("tensor<bfloat16>(x[5])", "tensor(x[5])", "tensor(x[5])");
+ checkMerge("tensor<bfloat16>(y{})", "tensor<bfloat16>(y{})", "tensor<float>(y{})");
+ checkMerge("tensor<bfloat16>(y{})", "tensor<int8>(y{})", "tensor<float>(y{})");
+ checkMerge("tensor<float>(x[5])", "tensor<float>(x[5])", "tensor<float>(x[5])");
+ checkMerge("tensor<float>(x[5])", "tensor<int8>(x[5])", "tensor<float>(x[5])");
+ checkMerge("tensor<float>(x[5])", "tensor(x[5])", "tensor(x[5])");
+ checkMerge("tensor<float>(y{})", "tensor<bfloat16>(y{})", "tensor<float>(y{})");
+ checkMerge("tensor<float>(y{})", "tensor<float>(y{})", "tensor<float>(y{})");
+ checkMerge("tensor<float>(y{})", "tensor<int8>(y{})", "tensor<float>(y{})");
+ checkMerge("tensor<int8>(x[5])", "tensor<int8>(x[5])", "tensor<float>(x[5])");
+ checkMerge("tensor<int8>(y{})", "tensor<int8>(y{})", "tensor<float>(y{})");
+ checkMerge("tensor()", "tensor()", "tensor()");
+ checkMerge("tensor(x[5])", "tensor<int8>(x[5])", "tensor(x[5])");
+ checkMerge("tensor(x[5])", "tensor(x[5])", "tensor(x[5])");
+ checkMerge("tensor(x{})", "tensor(x{})", "tensor(x{})");
+ checkMerge("tensor(x{},y{})", "tensor<bfloat16>(x{},y{})", "tensor(x{},y{})");
+ checkMerge("tensor(x{},y{})", "tensor<float>(x{},y{})", "tensor(x{},y{})");
+ checkMerge("tensor(x{},y{})", "tensor<int8>(x{},y{})", "tensor(x{},y{})");
+ checkMerge("tensor(y{})", "tensor(y{})", "tensor(y{})");
+ checkMerge("tensor(x{})", "tensor(x[5])", "tensor(x{})");
+ checkMergeFails("tensor(a[10])", "tensor()");
+ checkMergeFails("tensor(a[10])", "tensor(x{},y{},z{})");
+ checkMergeFails("tensor<bfloat16>(x[5])", "tensor()");
+ checkMergeFails("tensor<bfloat16>(x{})", "tensor()");
+ checkMergeFails("tensor<float>(x[5])", "tensor()");
+ checkMergeFails("tensor<float>(x{})", "tensor()");
+ checkMergeFails("tensor<int8>(x{})", "tensor()");
+ checkMergeFails("tensor()", "tensor<int8>(x[5])");
+ checkMergeFails("tensor()", "tensor(x{})");
+ checkMergeFails("tensor(x[3])", "tensor(x[5])");
+ checkMergeFails("tensor(x[5])", "tensor(x[3])");
+ checkMergeFails("tensor(x{})", "tensor()");
+ checkMergeFails("tensor(x{},y{})", "tensor(x{},z{})");
+ checkMergeFails("tensor(y{})", "tensor()");
+ }
+
+ @Test
+ public void verifyRename() {
+ checkRename("tensor(x[10],y[20],z[30])", mkl("y"), mkl("a"), "tensor(a[20],x[10],z[30])");
+ checkRename("tensor(x{})", mkl("x"), mkl("y"), "tensor(y{})");
+ checkRename("tensor(x{},y[5])", mkl("x","y"), mkl("y","x"), "tensor(x[5],y{})");
+ checkRename("tensor(x[10],y[20],z[30])", mkl("x", "y", "z"), mkl("c", "a", "b"), "tensor(a[20],b[30],c[10])");
+ checkRename("tensor(x{})", mkl("x"), mkl("x"), "tensor(x{})");
+ checkRename("tensor(x{})", mkl("x"), mkl("y"), "tensor(y{})");
+ checkRename("tensor<float>(x{})", mkl("x"), mkl("y"), "tensor<float>(y{})");
+ checkRename("tensor<bfloat16>(x{})", mkl("x"), mkl("y"), "tensor<bfloat16>(y{})");
+ checkRename("tensor<int8>(x{})", mkl("x"), mkl("y"), "tensor<int8>(y{})");
+
+ checkRenameFails("tensor(x{})", mkl(), mkl());
+ checkRenameFails("tensor()", mkl(), mkl());
+ checkRenameFails("tensor(x{},y{})", mkl("x"), mkl("y","z"));
+ checkRenameFails("tensor(x{},y{})", mkl("x","y"), mkl("z"));
+ checkRenameFails("tensor(x[10],y[20],z[30])", mkl("y","z"), mkl("a", "x"));
+
+ // allowed (with warning) for now:
+ checkRename("tensor()", mkl("a"), mkl("b"), "tensor()");
+ checkRename("tensor(x{},y[10])", mkl("a"), mkl("b"), "tensor(x{},y[10])");
+ //checkRenameFails("tensor()", mkl("a"), mkl("b"));
+
+ }
+
+ @Test
+ public void verifyConcat() {
+ // types can be concatenated
+ checkConcat("tensor(y[7])", "tensor(x{})", "z", "tensor(x{},y[7],z[2])");
+ checkConcat("tensor()", "tensor()", "x", "tensor(x[2])");
+ checkConcat("tensor(x[2])", "tensor()", "x", "tensor(x[3])");
+ checkConcat("tensor(x[3])", "tensor(x[2])", "x", "tensor(x[5])");
+ checkConcat("tensor(x[2])", "tensor()", "y", "tensor(x[2],y[2])");
+ checkConcat("tensor(x[2])", "tensor(x[2])", "y", "tensor(x[2],y[2])");
+ checkConcat("tensor(x[2],y[2])", "tensor(x[3])", "x", "tensor(x[5],y[2])");
+ checkConcat("tensor(x[2],y[2])", "tensor(y[7])", "y", "tensor(x[2],y[9])");
+ checkConcat("tensor(x[5])", "tensor(y[7])", "z", "tensor(x[5],y[7],z[2])");
+ // cell type is handled correctly for concat
+ checkConcat("tensor(x[3])", "tensor(x[2])", "x", "tensor(x[5])");
+ checkConcat("tensor(x[3])", "tensor<float>(x[2])", "x", "tensor(x[5])");
+ checkConcat("tensor(x[3])", "tensor<bfloat16>(x[2])", "x", "tensor(x[5])");
+ checkConcat("tensor(x[3])", "tensor<int8>(x[2])", "x", "tensor(x[5])");
+ checkConcat("tensor<float>(x[3])", "tensor<float>(x[2])", "x", "tensor<float>(x[5])");
+ checkConcat("tensor<float>(x[3])", "tensor<bfloat16>(x[2])", "x", "tensor<float>(x[5])");
+ checkConcat("tensor<float>(x[3])", "tensor<int8>(x[2])", "x", "tensor<float>(x[5])");
+ checkConcat("tensor<bfloat16>(x[3])", "tensor<bfloat16>(x[2])", "x", "tensor<bfloat16>(x[5])");
+ checkConcat("tensor<bfloat16>(x[3])", "tensor<int8>(x[2])", "x", "tensor<float>(x[5])");
+ checkConcat("tensor<int8>(x[3])", "tensor<int8>(x[2])", "x", "tensor<int8>(x[5])");
+ // concat with number preserves cell type
+ checkConcat("tensor(x[3])", "tensor()", "x", "tensor(x[4])");
+ checkConcat("tensor<float>(x[3])", "tensor()", "x", "tensor<float>(x[4])");
+ checkConcat("tensor<bfloat16>(x[3])", "tensor()", "x", "tensor<bfloat16>(x[4])");
+ checkConcat("tensor<int8>(x[3])", "tensor()", "x", "tensor<int8>(x[4])");
+ // specific for Java
+ checkConcat("tensor(x[])", "tensor(x[2])", "x", "tensor(x[])");
+ checkConcat("tensor(x[])", "tensor(x[2])", "y", "tensor(x[],y[2])");
+ checkConcat("tensor(x[3])", "tensor(x[2])", "y", "tensor(x[2],y[2])");
+ // invalid combinations must fail
+ checkConcatFails("tensor(x{})", "tensor(x[2])", "x");
+ checkConcatFails("tensor(x{})", "tensor(x{})", "x");
+ checkConcatFails("tensor(x{})", "tensor()", "x");
+ }
+
+ @Test
+ public void verifyPeek() {
+ checkPeek("tensor(x[10],y[20],z[30])", mkl("x"), "tensor(y[20],z[30])");
+ checkPeek("tensor(x[10],y[20],z[30])", mkl("y"), "tensor(x[10],z[30])");
+ checkPeek("tensor<float>(x[10],y[20],z[30])", mkl("z"), "tensor<float>(x[10],y[20])");
+ checkPeek("tensor<bfloat16>(x[10],y[20],z[30])", mkl("z"), "tensor<bfloat16>(x[10],y[20])");
+ checkPeek("tensor<int8>(x[10],y[20],z[30])", mkl("z"), "tensor<int8>(x[10],y[20])");
+ checkPeek("tensor(x[10],y[20],z[30])", mkl("x", "z"), "tensor(y[20])");
+ checkPeek("tensor<float>(x[10],y[20],z[30])", mkl("z", "x"), "tensor<float>(y[20])");
+ checkPeek("tensor<bfloat16>(x[10],y[20],z[30])", mkl("z", "x"), "tensor<bfloat16>(y[20])");
+ checkPeek("tensor<int8>(x[10],y[20],z[30])", mkl("z", "x"), "tensor<int8>(y[20])");
+ checkPeek("tensor(x[10],y[20],z[30])", mkl("x", "y", "z"), "tensor()");
+ checkPeek("tensor<float>(x[10],y[20],z[30])", mkl("x", "y", "z"), "tensor()");
+ checkPeek("tensor<bfloat16>(x[10],y[20],z[30])", mkl("x", "y", "z"), "tensor()");
+ checkPeek("tensor<int8>(x[10],y[20],z[30])", mkl("x", "y", "z"), "tensor()");
+ checkPeek("tensor(x[10],y{},z[30])", mkl("x"), "tensor(y{},z[30])");
+ checkPeek("tensor(x[10],y{},z[30])", mkl("y"), "tensor(x[10],z[30])");
+ checkPeek("tensor<float>(x[10],y{},z[30])", mkl("z"), "tensor<float>(x[10],y{})");
+ checkPeek("tensor<bfloat16>(x[10],y{},z[30])", mkl("z"), "tensor<bfloat16>(x[10],y{})");
+ checkPeek("tensor<int8>(x[10],y{},z[30])", mkl("z"), "tensor<int8>(x[10],y{})");
+ checkPeek("tensor(x[10],y{},z[30])", mkl("x", "z"), "tensor(y{})");
+ checkPeek("tensor<float>(x[10],y{},z[30])", mkl("z", "x"), "tensor<float>(y{})");
+ checkPeek("tensor<bfloat16>(x[10],y{},z[30])", mkl("z", "x"), "tensor<bfloat16>(y{})");
+ checkPeek("tensor<int8>(x[10],y{},z[30])", mkl("z", "x"), "tensor<int8>(y{})");
+ checkPeek("tensor(x[10],y{},z[30])", mkl("x", "y", "z"), "tensor()");
+ checkPeek("tensor<float>(x[10],y{},z[30])", mkl("x", "y", "z"), "tensor()");
+ checkPeek("tensor<bfloat16>(x[10],y{},z[30])", mkl("x", "y", "z"), "tensor()");
+ checkPeek("tensor<int8>(x[10],y{},z[30])", mkl("x", "y", "z"), "tensor()");
+ checkFullPeek("tensor(x[10],y[20],z[30])");
+ checkFullPeek("tensor<float>(x[10],y[20],z[30])");
+ checkFullPeek("tensor<bfloat16>(x[10],y[20],z[30])");
+ checkFullPeek("tensor<int8>(x[10],y[20],z[30])");
+ checkFullPeek("tensor(x[10],y{},z[30])");
+ checkFullPeek("tensor<float>(x[10],y{},z[30])");
+ checkFullPeek("tensor<bfloat16>(x[10],y{},z[30])");
+ checkFullPeek("tensor<int8>(x[10],y{},z[30])");
+ checkPeekFails("tensor()", mkl());
+ checkPeekFails("tensor()", mkl("x"));
+ checkPeekFails("tensor(y{})", mkl("x"));
+ checkPeekFails("tensor(y{})", mkl("y", "y"));
+ checkPeekFails("tensor<float>(y[10])", mkl("x"));
+ }
+
+ @Test
+ public void verifyCellCast() {
+ checkCast("tensor(x[10],y[20],z[30])", TensorType.Value.FLOAT, "tensor<float>(x[10],y[20],z[30])");
+ checkCasts("tensor<double>(x[10])");
+ checkCasts("tensor<float>(x[10])");
+ checkCasts("tensor<bfloat16>(x[10])");
+ checkCasts("tensor<int8>(x[10])");
+ checkCasts("tensor<double>(x{})");
+ checkCasts("tensor<float>(x{})");
+ checkCasts("tensor<bfloat16>(x{})");
+ checkCasts("tensor<int8>(x{})");
+ checkCasts("tensor<double>(x{},y[5])");
+ checkCasts("tensor<float>(x{},y[5])");
+ checkCasts("tensor<bfloat16>(x{},y[5])");
+ checkCasts("tensor<int8>(x{},y[5])");
+ checkCast("tensor()", TensorType.Value.DOUBLE, "tensor()");
+ checkCastFails("tensor()", TensorType.Value.FLOAT);
+ checkCastFails("tensor()", TensorType.Value.BFLOAT16);
+ checkCastFails("tensor()", TensorType.Value.INT8);
+ }
+
+ private static void checkMap(String specA, String expected) {
+ var a = TensorType.fromSpec(specA);
+ var result = TypeResolver.map(a);
+ assertEquals(expected, result.toString());
+ }
+
+ private static void checkJoin(String specA, String specB, String expected) {
+ var a = TensorType.fromSpec(specA);
+ var b = TensorType.fromSpec(specB);
+ var result = TypeResolver.join(a, b);
+ assertEquals(expected, result.toString());
+ }
+
+ private static void checkJoinFails(String specA, String specB) {
+ var a = TensorType.fromSpec(specA);
+ var b = TensorType.fromSpec(specB);
+ boolean caught = false;
+ try {
+ var result = TypeResolver.join(a, b);
+ System.err.println("join of "+a+" and "+b+" produces: "+result);
+ } catch (IllegalArgumentException e) {
+ caught = true;
+ }
+ assertTrue(caught);
+ }
+
+ private static void checkReduce(String specA, List<String> dims, String expected) {
+ var a = TensorType.fromSpec(specA);
+ var result = TypeResolver.reduce(a, dims);
+ assertEquals(expected, result.toString());
+ }
+
+ private static void checkFullReduce(String specA) {
+ String expected = "tensor()";
+ List<String> dims = new ArrayList<>();
+ checkReduce(specA, dims, expected);
+ var a = TensorType.fromSpec(specA);
+ for (var dim : a.dimensions()) {
+ dims.add(dim.name());
+ }
+ checkReduce(specA, dims, expected);
+ }
+
+ private static void checkReduceFails(String specA, String dim) {
+ var a = TensorType.fromSpec(specA);
+ boolean caught = false;
+ try {
+ var result = TypeResolver.reduce(a, mkl(dim));
+ System.err.println("Reduce "+specA+" with dim "+dim+" produces: "+result);
+ } catch (IllegalArgumentException e) {
+ caught = true;
+ }
+ assertTrue(caught);
+ }
+
+ private static void checkMerge(String specA, String specB, String expected) {
+ var a = TensorType.fromSpec(specA);
+ var b = TensorType.fromSpec(specB);
+ var result = TypeResolver.merge(a, b);
+ assertEquals(expected, result.toString());
+ }
+
+ private static void checkMergeFails(String specA, String specB) {
+ var a = TensorType.fromSpec(specA);
+ var b = TensorType.fromSpec(specB);
+ boolean caught = false;
+ try {
+ var result = TypeResolver.merge(a, b);
+ System.err.println("merge of "+a+" and "+b+" produces: "+result);
+ } catch (IllegalArgumentException e) {
+ caught = true;
+ }
+ assertTrue(caught);
+ }
+
+ private static void checkRename(String specA, List<String> fromDims, List<String> toDims, String expected) {
+ var a = TensorType.fromSpec(specA);
+ var result = TypeResolver.rename(a, fromDims, toDims);
+ assertEquals(expected, result.toString());
+ }
+
+ private static void checkRenameFails(String specA, List<String> fromDims, List<String> toDims) {
+ var a = TensorType.fromSpec(specA);
+ boolean caught = false;
+ try {
+ var result = TypeResolver.rename(a, fromDims, toDims);
+ System.err.println("rename "+a+" produces: "+result);
+ } catch (IllegalArgumentException e) {
+ caught = true;
+ }
+ assertTrue(caught);
+ }
+
+ private static void checkConcat(String specA, String specB, String dim, String expected) {
+ var a = TensorType.fromSpec(specA);
+ var b = TensorType.fromSpec(specB);
+ var result = TypeResolver.concat(a, b, dim);
+ assertEquals(expected, result.toString());
+ }
+
+ private static void checkConcatFails(String specA, String specB, String dim) {
+ var a = TensorType.fromSpec(specA);
+ var b = TensorType.fromSpec(specB);
+ boolean caught = false;
+ try {
+ var result = TypeResolver.concat(a, b, dim);
+ System.err.println("concat "+a+" and "+b+" along "+dim+" produces: "+result);
+ } catch (IllegalArgumentException e) {
+ caught = true;
+ }
+ assertTrue(caught);
+ }
+
+ private static void checkPeek(String specA, List<String> dims, String expected) {
+ var a = TensorType.fromSpec(specA);
+ var result = TypeResolver.peek(a, dims);
+ assertEquals(expected, result.toString());
+ }
+
+ private static void checkFullPeek(String specA) {
+ String expected = "tensor()";
+ List<String> dims = new ArrayList<>();
+ var a = TensorType.fromSpec(specA);
+ for (var dim : a.dimensions()) {
+ dims.add(dim.name());
+ }
+ checkPeek(specA, dims, expected);
+ }
+
+ private static void checkPeekFails(String specA, List<String> dims) {
+ var a = TensorType.fromSpec(specA);
+ boolean caught = false;
+ try {
+ var result = TypeResolver.peek(a, dims);
+ System.err.println("Peek "+specA+" with dims "+dims+" produces: "+result);
+ } catch (IllegalArgumentException e) {
+ caught = true;
+ }
+ assertTrue(caught);
+ }
+
+ private static void checkCast(String specA, TensorType.Value newValueType, String expected) {
+ var a = TensorType.fromSpec(specA);
+ var result = TypeResolver.cell_cast(a, newValueType);
+ assertEquals(expected, result.toString());
+ }
+
+ private static void checkCasts(String specA) {
+ var a = TensorType.fromSpec(specA);
+ for (var newValueType : TensorType.Value.values()) {
+ var result = TypeResolver.cell_cast(a, newValueType);
+ assertEquals(result.valueType(), newValueType);
+ assertEquals(result.dimensions(), a.dimensions());
+ }
+ }
+
+ private static void checkCastFails(String specA, TensorType.Value newValueType) {
+ var a = TensorType.fromSpec(specA);
+ boolean caught = false;
+ try {
+ var result = TypeResolver.cell_cast(a, newValueType);
+ System.err.println("cast of "+a+" to "+newValueType+" produces: "+result);
+ } catch (IllegalArgumentException e) {
+ caught = true;
+ }
+ assertTrue(caught);
+ }
+
+}
diff --git a/vespajlib/src/test/java/com/yahoo/tensor/functions/ConcatTestCase.java b/vespajlib/src/test/java/com/yahoo/tensor/functions/ConcatTestCase.java
index 0476fe1c757..023beb6fb0d 100644
--- a/vespajlib/src/test/java/com/yahoo/tensor/functions/ConcatTestCase.java
+++ b/vespajlib/src/test/java/com/yahoo/tensor/functions/ConcatTestCase.java
@@ -95,6 +95,153 @@ public class ConcatTestCase {
assertConcat("tensor(x[],y[])", "tensor(x[2],y[4]):{{x:0,y:0}:1.0,{x:0,y:1}:3.0,{x:0,y:2}:5.0,{x:0,y:3}:6.0,{x:1,y:0}:2.0,{x:1,y:1}:4.0,{x:1,y:2}:5.0,{x:1,y:3}:6.0}", a, b, "y");
}
+ @Test
+ public void testAdvancedMixed() {
+ Tensor a = Tensor.from("tensor(a[2],b[2],c{},d[2],e{}):{"+
+ "{a:0,b:0,c:17,d:0,e:42}:1.0,"+
+ "{a:0,b:0,c:17,d:1,e:42}:2.0,"+
+ "{a:0,b:1,c:17,d:0,e:42}:3.0,"+
+ "{a:0,b:1,c:17,d:1,e:42}:4.0,"+
+ "{a:1,b:0,c:17,d:0,e:42}:5.0,"+
+ "{a:1,b:0,c:17,d:1,e:42}:6.0,"+
+ "{a:1,b:1,c:17,d:0,e:42}:7.0,"+
+ "{a:1,b:1,c:17,d:1,e:42}:8.0}");
+ Tensor b = Tensor.from("tensor(a[2],b[2],c{},f[2],g{}):{"+
+ "{a:0,b:0,c:17,f:0,g:666}:51.0,"+
+ "{a:0,b:0,c:17,f:1,g:666}:52.0,"+
+ "{a:0,b:1,c:17,f:0,g:666}:53.0,"+
+ "{a:0,b:1,c:17,f:1,g:666}:54.0,"+
+ "{a:1,b:0,c:17,f:0,g:666}:55.0,"+
+ "{a:1,b:0,c:17,f:1,g:666}:56.0,"+
+ "{a:1,b:1,c:17,f:0,g:666}:57.0,"+
+ "{a:1,b:1,c:17,f:1,g:666}:58.0}");
+
+ assertConcat("tensor(a[4],b[2],c{},d[2],e{},f[2],g{})",
+ "tensor(a[4],b[2],c{},d[2],e{},f[2],g{}):{"+
+ "{a:0,b:0,c:17,d:0,e:42,f:0,g:666}:1.0,"+
+ "{a:0,b:0,c:17,d:0,e:42,f:1,g:666}:1.0,"+
+ "{a:0,b:0,c:17,d:1,e:42,f:0,g:666}:2.0,"+
+ "{a:0,b:0,c:17,d:1,e:42,f:1,g:666}:2.0,"+
+ "{a:0,b:1,c:17,d:0,e:42,f:0,g:666}:3.0,"+
+ "{a:0,b:1,c:17,d:0,e:42,f:1,g:666}:3.0,"+
+ "{a:0,b:1,c:17,d:1,e:42,f:0,g:666}:4.0,"+
+ "{a:0,b:1,c:17,d:1,e:42,f:1,g:666}:4.0,"+
+ "{a:1,b:0,c:17,d:0,e:42,f:0,g:666}:5.0,"+
+ "{a:1,b:0,c:17,d:0,e:42,f:1,g:666}:5.0,"+
+ "{a:1,b:0,c:17,d:1,e:42,f:0,g:666}:6.0,"+
+ "{a:1,b:0,c:17,d:1,e:42,f:1,g:666}:6.0,"+
+ "{a:1,b:1,c:17,d:0,e:42,f:0,g:666}:7.0,"+
+ "{a:1,b:1,c:17,d:0,e:42,f:1,g:666}:7.0,"+
+ "{a:1,b:1,c:17,d:1,e:42,f:0,g:666}:8.0,"+
+ "{a:1,b:1,c:17,d:1,e:42,f:1,g:666}:8.0,"+
+ "{a:2,b:0,c:17,d:0,e:42,f:0,g:666}:51.0,"+
+ "{a:2,b:0,c:17,d:0,e:42,f:1,g:666}:52.0,"+
+ "{a:2,b:0,c:17,d:1,e:42,f:0,g:666}:51.0,"+
+ "{a:2,b:0,c:17,d:1,e:42,f:1,g:666}:52.0,"+
+ "{a:2,b:1,c:17,d:0,e:42,f:0,g:666}:53.0,"+
+ "{a:2,b:1,c:17,d:0,e:42,f:1,g:666}:54.0,"+
+ "{a:2,b:1,c:17,d:1,e:42,f:0,g:666}:53.0,"+
+ "{a:2,b:1,c:17,d:1,e:42,f:1,g:666}:54.0,"+
+ "{a:3,b:0,c:17,d:0,e:42,f:0,g:666}:55.0,"+
+ "{a:3,b:0,c:17,d:0,e:42,f:1,g:666}:56.0,"+
+ "{a:3,b:0,c:17,d:1,e:42,f:0,g:666}:55.0,"+
+ "{a:3,b:0,c:17,d:1,e:42,f:1,g:666}:56.0,"+
+ "{a:3,b:1,c:17,d:0,e:42,f:0,g:666}:57.0,"+
+ "{a:3,b:1,c:17,d:0,e:42,f:1,g:666}:58.0,"+
+ "{a:3,b:1,c:17,d:1,e:42,f:0,g:666}:57.0,"+
+ "{a:3,b:1,c:17,d:1,e:42,f:1,g:666}:58.0}",
+ a, b, "a");
+
+ assertConcat("tensor(a[2],b[2],c{},d[2],e{},f[2],g{},x[2])",
+ "tensor(a[2],b[2],c{},d[2],e{},f[2],g{},x[2]):{"+
+ "{a:0,b:0,c:17,d:0,e:42,f:0,g:666,x:0}:1.0,"+
+ "{a:0,b:0,c:17,d:0,e:42,f:1,g:666,x:0}:1.0,"+
+ "{a:0,b:0,c:17,d:1,e:42,f:0,g:666,x:0}:2.0,"+
+ "{a:0,b:0,c:17,d:1,e:42,f:1,g:666,x:0}:2.0,"+
+ "{a:0,b:1,c:17,d:0,e:42,f:0,g:666,x:0}:3.0,"+
+ "{a:0,b:1,c:17,d:0,e:42,f:1,g:666,x:0}:3.0,"+
+ "{a:0,b:1,c:17,d:1,e:42,f:0,g:666,x:0}:4.0,"+
+ "{a:0,b:1,c:17,d:1,e:42,f:1,g:666,x:0}:4.0,"+
+ "{a:1,b:0,c:17,d:0,e:42,f:0,g:666,x:0}:5.0,"+
+ "{a:1,b:0,c:17,d:0,e:42,f:1,g:666,x:0}:5.0,"+
+ "{a:1,b:0,c:17,d:1,e:42,f:0,g:666,x:0}:6.0,"+
+ "{a:1,b:0,c:17,d:1,e:42,f:1,g:666,x:0}:6.0,"+
+ "{a:1,b:1,c:17,d:0,e:42,f:0,g:666,x:0}:7.0,"+
+ "{a:1,b:1,c:17,d:0,e:42,f:1,g:666,x:0}:7.0,"+
+ "{a:1,b:1,c:17,d:1,e:42,f:0,g:666,x:0}:8.0,"+
+ "{a:1,b:1,c:17,d:1,e:42,f:1,g:666,x:0}:8.0,"+
+ "{a:0,b:0,c:17,d:0,e:42,f:0,g:666,x:1}:51.0,"+
+ "{a:0,b:0,c:17,d:0,e:42,f:1,g:666,x:1}:52.0,"+
+ "{a:0,b:0,c:17,d:1,e:42,f:0,g:666,x:1}:51.0,"+
+ "{a:0,b:0,c:17,d:1,e:42,f:1,g:666,x:1}:52.0,"+
+ "{a:0,b:1,c:17,d:0,e:42,f:0,g:666,x:1}:53.0,"+
+ "{a:0,b:1,c:17,d:0,e:42,f:1,g:666,x:1}:54.0,"+
+ "{a:0,b:1,c:17,d:1,e:42,f:0,g:666,x:1}:53.0,"+
+ "{a:0,b:1,c:17,d:1,e:42,f:1,g:666,x:1}:54.0,"+
+ "{a:1,b:0,c:17,d:0,e:42,f:0,g:666,x:1}:55.0,"+
+ "{a:1,b:0,c:17,d:0,e:42,f:1,g:666,x:1}:56.0,"+
+ "{a:1,b:0,c:17,d:1,e:42,f:0,g:666,x:1}:55.0,"+
+ "{a:1,b:0,c:17,d:1,e:42,f:1,g:666,x:1}:56.0,"+
+ "{a:1,b:1,c:17,d:0,e:42,f:0,g:666,x:1}:57.0,"+
+ "{a:1,b:1,c:17,d:0,e:42,f:1,g:666,x:1}:58.0,"+
+ "{a:1,b:1,c:17,d:1,e:42,f:0,g:666,x:1}:57.0,"+
+ "{a:1,b:1,c:17,d:1,e:42,f:1,g:666,x:1}:58.0}",
+ a, b, "x");
+
+ assertConcat("tensor(a[2],b[2],c{},d[3],e{},f[2],g{})",
+ "tensor(a[2],b[2],c{},d[3],e{},f[2],g{}):{"+
+ "{a:0,b:0,c:17,d:0,e:42,f:0,g:666}:1.0,"+
+ "{a:0,b:0,c:17,d:0,e:42,f:1,g:666}:1.0,"+
+ "{a:0,b:0,c:17,d:1,e:42,f:0,g:666}:2.0,"+
+ "{a:0,b:0,c:17,d:1,e:42,f:1,g:666}:2.0,"+
+ "{a:0,b:1,c:17,d:0,e:42,f:0,g:666}:3.0,"+
+ "{a:0,b:1,c:17,d:0,e:42,f:1,g:666}:3.0,"+
+ "{a:0,b:1,c:17,d:1,e:42,f:0,g:666}:4.0,"+
+ "{a:0,b:1,c:17,d:1,e:42,f:1,g:666}:4.0,"+
+ "{a:1,b:0,c:17,d:0,e:42,f:0,g:666}:5.0,"+
+ "{a:1,b:0,c:17,d:0,e:42,f:1,g:666}:5.0,"+
+ "{a:1,b:0,c:17,d:1,e:42,f:0,g:666}:6.0,"+
+ "{a:1,b:0,c:17,d:1,e:42,f:1,g:666}:6.0,"+
+ "{a:1,b:1,c:17,d:0,e:42,f:0,g:666}:7.0,"+
+ "{a:1,b:1,c:17,d:0,e:42,f:1,g:666}:7.0,"+
+ "{a:1,b:1,c:17,d:1,e:42,f:0,g:666}:8.0,"+
+ "{a:1,b:1,c:17,d:1,e:42,f:1,g:666}:8.0,"+
+ "{a:0,b:0,c:17,d:2,e:42,f:0,g:666}:51.0,"+
+ "{a:0,b:0,c:17,d:2,e:42,f:1,g:666}:52.0,"+
+ "{a:0,b:1,c:17,d:2,e:42,f:0,g:666}:53.0,"+
+ "{a:0,b:1,c:17,d:2,e:42,f:1,g:666}:54.0,"+
+ "{a:1,b:0,c:17,d:2,e:42,f:0,g:666}:55.0,"+
+ "{a:1,b:0,c:17,d:2,e:42,f:1,g:666}:56.0,"+
+ "{a:1,b:1,c:17,d:2,e:42,f:0,g:666}:57.0,"+
+ "{a:1,b:1,c:17,d:2,e:42,f:1,g:666}:58.0}",
+ a, b, "d");
+ }
+
+ @Test
+ public void testWithEmptyMixed() {
+ Tensor a = Tensor.from("tensor(a[2],c{},d[2]):{"+
+ "{a:0,c:17,d:0}:1.0,"+
+ "{a:0,c:17,d:1}:2.0,"+
+ "{a:1,c:17,d:0}:3.0,"+
+ "{a:1,c:17,d:1}:4.0}");
+ Tensor b = Tensor.from("tensor(b{}):{}");
+ Tensor c = Tensor.from("tensor(c{}):{}");
+ Tensor d = Tensor.from("tensor(c{},d[3]):{}");
+
+ assertConcat("tensor(a[3],b{},c{},d[2])", "tensor(a[3],b{},c{},d[2]):{}",
+ a, b, "a");
+ assertConcat("tensor(a[2],b{},c{},d[2],x[2])", "tensor(a[2],b{},c{},d[2],x[2]):{}",
+ a, b, "x");
+
+ assertConcat("tensor(a[3],c{},d[2])", "tensor(a[3],c{},d[2]):{}",
+ a, c, "a");
+ assertConcat("tensor(a[2],c{},d[2],x[2])", "tensor(a[2],c{},d[2],x[2]):{}",
+ a, c, "x");
+
+ assertConcat("tensor(a[2],c{},d[5])", "tensor(a[2],c{},d[5]):{}",
+ a, d, "d");
+ }
+
private void assertConcat(String expected, Tensor a, Tensor b, String dimension) {
assertConcat(null, expected, a, b, dimension);
}
diff --git a/vespajlib/src/test/java/com/yahoo/tensor/functions/ScalarFunctionsTestCase.java b/vespajlib/src/test/java/com/yahoo/tensor/functions/ScalarFunctionsTestCase.java
new file mode 100644
index 00000000000..5890bac6c96
--- /dev/null
+++ b/vespajlib/src/test/java/com/yahoo/tensor/functions/ScalarFunctionsTestCase.java
@@ -0,0 +1,66 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package com.yahoo.tensor.functions;
+
+import java.util.function.DoubleUnaryOperator;
+
+import org.junit.Test;
+import static org.junit.Assert.assertEquals;
+
+public class ScalarFunctionsTestCase {
+
+ void expect_oddf(DoubleUnaryOperator foo, double input, double output) {
+ double res = foo.applyAsDouble(input);
+ assertEquals("apply("+foo+","+input+") -> ", output, res, 0.000000001);
+ input *= -1;
+ output *= -1;
+ res = foo.applyAsDouble(input);
+ assertEquals("apply("+foo+","+input+") -> "+res, output, res, 0.000000001);
+ }
+
+ @Test
+ public void testErrorFunction() {
+ var func = ScalarFunctions.erf();
+ // from wikipedia:
+ expect_oddf(func, 0.0, 0.0);
+ expect_oddf(func, 0.02, 0.022564575);
+ expect_oddf(func, 0.04, 0.045111106);
+ expect_oddf(func, 0.06, 0.067621594);
+ expect_oddf(func, 0.08, 0.090078126);
+ expect_oddf(func, 0.1, 0.112462916);
+ expect_oddf(func, 0.2, 0.222702589);
+ expect_oddf(func, 0.3, 0.328626759);
+ expect_oddf(func, 0.4, 0.428392355);
+ expect_oddf(func, 0.5, 0.520499878);
+ expect_oddf(func, 0.6, 0.603856091);
+ expect_oddf(func, 0.7, 0.677801194);
+ expect_oddf(func, 0.8, 0.742100965);
+ expect_oddf(func, 0.9, 0.796908212);
+ expect_oddf(func, 1.0, 0.842700793);
+ expect_oddf(func, 1.1, 0.88020507);
+ expect_oddf(func, 1.2, 0.910313978);
+ expect_oddf(func, 1.3, 0.934007945);
+ expect_oddf(func, 1.4, 0.95228512);
+ expect_oddf(func, 1.5, 0.966105146);
+ expect_oddf(func, 1.6, 0.976348383);
+ expect_oddf(func, 1.7, 0.983790459);
+ expect_oddf(func, 1.8, 0.989090502);
+ expect_oddf(func, 1.9, 0.992790429);
+ expect_oddf(func, 2.0, 0.995322265);
+ expect_oddf(func, 2.1, 0.997020533);
+ expect_oddf(func, 2.2, 0.998137154);
+ expect_oddf(func, 2.3, 0.998856823);
+ expect_oddf(func, 2.4, 0.999311486);
+ expect_oddf(func, 2.5, 0.999593048);
+ expect_oddf(func, 3.0, 0.99997791);
+ expect_oddf(func, 3.5, 0.999999257);
+ // from MPFR:
+ expect_oddf(func, 4.0, 0.99999998458);
+ expect_oddf(func, 4.2412109375, 0.9999999980);
+ expect_oddf(func, 4.2734375, 0.99999999849);
+ expect_oddf(func, 4.3203125, 0.9999999990);
+ expect_oddf(func, 5.0, 0.999999999998);
+ expect_oddf(func, 5.921875, 1.0);
+ }
+
+}
diff --git a/vespalib/CMakeLists.txt b/vespalib/CMakeLists.txt
index 3bdd477fcb6..5cf06093977 100644
--- a/vespalib/CMakeLists.txt
+++ b/vespalib/CMakeLists.txt
@@ -26,6 +26,7 @@ vespa_define_module(
src/tests/benchmark_timer
src/tests/box
src/tests/btree
+ src/tests/btree/btree_store
src/tests/child_process
src/tests/component
src/tests/compress
diff --git a/vespalib/src/tests/btree/btree_store/CMakeLists.txt b/vespalib/src/tests/btree/btree_store/CMakeLists.txt
new file mode 100644
index 00000000000..2913267bea2
--- /dev/null
+++ b/vespalib/src/tests/btree/btree_store/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(vespalib_btree_store_test_app TEST
+ SOURCES
+ btree_store_test.cpp
+ DEPENDS
+ vespalib
+ GTest::GTest
+)
+vespa_add_test(NAME vespalib_btree_store_test_app COMMAND vespalib_btree_store_test_app COST 30)
diff --git a/vespalib/src/tests/btree/btree_store/btree_store_test.cpp b/vespalib/src/tests/btree/btree_store/btree_store_test.cpp
new file mode 100644
index 00000000000..7eaf298ab40
--- /dev/null
+++ b/vespalib/src/tests/btree/btree_store/btree_store_test.cpp
@@ -0,0 +1,145 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/vespalib/btree/btreestore.h>
+#include <vespa/vespalib/btree/btreenodeallocator.hpp>
+#include <vespa/vespalib/btree/btreeroot.hpp>
+#include <vespa/vespalib/btree/btreestore.hpp>
+#include <vespa/vespalib/datastore/buffer_type.hpp>
+#include <vespa/vespalib/gtest/gtest.h>
+
+using vespalib::GenerationHandler;
+using vespalib::datastore::EntryRef;
+
+namespace vespalib::btree {
+
+using MyTraits = BTreeTraits<4, 4, 31, false>;
+using TreeStore = BTreeStore<int, int, btree::NoAggregated, std::less<int>, MyTraits>;
+
+class BTreeStoreTest : public ::testing::Test {
+protected:
+ GenerationHandler _gen_handler;
+ TreeStore _store;
+
+ BTreeStoreTest();
+ ~BTreeStoreTest();
+
+ void inc_generation()
+ {
+ _store.freeze();
+ _store.transferHoldLists(_gen_handler.getCurrentGeneration());
+ _gen_handler.incGeneration();
+ _store.trimHoldLists(_gen_handler.getFirstUsedGeneration());
+ }
+
+ EntryRef add_sequence(int start_key, int end_key)
+ {
+ std::vector<TreeStore::KeyDataType> additions;
+ std::vector<TreeStore::KeyType> removals;
+ EntryRef root;
+ for (int i = start_key; i < end_key; ++i) {
+ additions.emplace_back(i, 0);
+ }
+ _store.apply(root,
+ &additions[0], &additions[0] + additions.size(),
+ &removals[0], &removals[0] + removals.size());
+ return root;
+ }
+ static std::vector<int> make_exp_sequence(int start_key, int end_key)
+ {
+ std::vector<int> sequence;
+ for (int i = start_key; i < end_key; ++i) {
+ sequence.emplace_back(i);
+ }
+ return sequence;
+ }
+ std::vector<int> get_sequence(EntryRef root) const {
+ std::vector<int> sequence;
+ _store.foreach_frozen_key(root, [&sequence](int key) { sequence.emplace_back(key); });
+ return sequence;
+ }
+
+ void test_compact_sequence(uint32_t sequence_length);
+};
+
+BTreeStoreTest::BTreeStoreTest()
+ : _gen_handler(),
+ _store()
+{
+}
+
+BTreeStoreTest::~BTreeStoreTest()
+{
+ _store.clearBuilder();
+ inc_generation();
+}
+
+void
+BTreeStoreTest::test_compact_sequence(uint32_t sequence_length)
+{
+ auto &store = _store;
+ EntryRef ref1 = add_sequence(4, 4 + sequence_length);
+ EntryRef ref2 = add_sequence(5, 5 + sequence_length);
+ EntryRef old_ref1 = ref1;
+ EntryRef old_ref2 = ref2;
+ std::vector<EntryRef> refs;
+ for (int i = 0; i < 1000; ++i) {
+ refs.emplace_back(add_sequence(i + 6, i + 6 + sequence_length));
+ }
+ for (auto& ref : refs) {
+ store.clear(ref);
+ }
+ inc_generation();
+ auto usage_before = store.getMemoryUsage();
+ for (uint32_t pass = 0; pass < 15; ++pass) {
+ auto to_hold = store.start_compact_worst_buffers();
+ ref1 = store.move(ref1);
+ ref2 = store.move(ref2);
+ store.finishCompact(to_hold);
+ inc_generation();
+ }
+ EXPECT_NE(old_ref1, ref1);
+ EXPECT_NE(old_ref2, ref2);
+ EXPECT_EQ(make_exp_sequence(4, 4 + sequence_length), get_sequence(ref1));
+ EXPECT_EQ(make_exp_sequence(5, 5 + sequence_length), get_sequence(ref2));
+ auto usage_after = store.getMemoryUsage();
+ EXPECT_GT(usage_before.deadBytes(), usage_after.deadBytes());
+ store.clear(ref1);
+ store.clear(ref2);
+}
+
+TEST_F(BTreeStoreTest, require_that_nodes_for_multiple_btrees_are_compacted)
+{
+ auto &store = this->_store;
+ EntryRef ref1 = add_sequence(4, 40);
+ EntryRef ref2 = add_sequence(100, 130);
+ store.clear(add_sequence(1000, 20000));
+ inc_generation();
+ auto usage_before = store.getMemoryUsage();
+ for (uint32_t pass = 0; pass < 15; ++pass) {
+ auto to_hold = store.start_compact_worst_btree_nodes();
+ store.move_btree_nodes(ref1);
+ store.move_btree_nodes(ref2);
+ store.finish_compact_worst_btree_nodes(to_hold);
+ inc_generation();
+ }
+ EXPECT_EQ(make_exp_sequence(4, 40), get_sequence(ref1));
+ EXPECT_EQ(make_exp_sequence(100, 130), get_sequence(ref2));
+ auto usage_after = store.getMemoryUsage();
+ EXPECT_GT(usage_before.deadBytes(), usage_after.deadBytes());
+ store.clear(ref1);
+ store.clear(ref2);
+}
+
+TEST_F(BTreeStoreTest, require_that_short_arrays_are_compacted)
+{
+ test_compact_sequence(4);
+}
+
+TEST_F(BTreeStoreTest, require_that_btree_roots_are_compacted)
+{
+ test_compact_sequence(10);
+}
+
+}
+
+GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/vespalib/src/tests/btree/btree_test.cpp b/vespalib/src/tests/btree/btree_test.cpp
index 82fa4b6b50a..640d07f7439 100644
--- a/vespalib/src/tests/btree/btree_test.cpp
+++ b/vespalib/src/tests/btree/btree_test.cpp
@@ -1562,14 +1562,10 @@ make_iterators(Tree& t, std::vector<int>& list, std::vector<typename Tree::Const
class KeyRangeValidator
{
std::vector<int> &_list;
- size_t _start_pos;
- size_t _end_pos;
size_t _curr_pos;
public:
- KeyRangeValidator(std::vector<int> &list, size_t start_pos, size_t end_pos)
+ KeyRangeValidator(std::vector<int> &list, size_t start_pos)
: _list(list),
- _start_pos(start_pos),
- _end_pos(end_pos),
_curr_pos(start_pos)
{
}
@@ -1619,13 +1615,13 @@ TEST_F(BTreeTest, require_that_compaction_works)
EXPECT_EQ(before_iterators[i] - after_iterators[j], static_cast<ssize_t>(i - j));
EXPECT_EQ(after_iterators[j] - before_iterators[i], static_cast<ssize_t>(j - i));
if (i <= j) {
- KeyRangeValidator validate_keys(before_list, i, j);
+ KeyRangeValidator validate_keys(before_list, i);
EXPECT_EQ(i, validate_keys.curr_pos());
before_iterators[i].foreach_key_range(after_iterators[j], [&validate_keys](int key) { validate_keys(key); });
EXPECT_EQ(j, validate_keys.curr_pos());
}
if (j <= i) {
- KeyRangeValidator validate_keys(before_list, j, i);
+ KeyRangeValidator validate_keys(before_list, j);
EXPECT_EQ(j, validate_keys.curr_pos());
after_iterators[j].foreach_key_range(before_iterators[i], [&validate_keys](int key) { validate_keys(key); });
EXPECT_EQ(i, validate_keys.curr_pos());
diff --git a/vespalib/src/tests/datastore/sharded_hash_map/sharded_hash_map_test.cpp b/vespalib/src/tests/datastore/sharded_hash_map/sharded_hash_map_test.cpp
index da9bc1284fa..67195ecd8dd 100644
--- a/vespalib/src/tests/datastore/sharded_hash_map/sharded_hash_map_test.cpp
+++ b/vespalib/src/tests/datastore/sharded_hash_map/sharded_hash_map_test.cpp
@@ -277,4 +277,17 @@ TEST_F(DataStoreShardedHashTest, normalize_values_works)
}
}
+TEST_F(DataStoreShardedHashTest, compact_worst_shard_works)
+{
+ populate_sample_data();
+ for (uint32_t i = 10; i < 50; ++i) {
+ remove(i);
+ }
+ commit();
+ auto usage_before = _hash_map.get_memory_usage();
+ _hash_map.compact_worst_shard();
+ auto usage_after = _hash_map.get_memory_usage();
+ EXPECT_GT(usage_before.deadBytes(), usage_after.deadBytes());
+}
+
GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/vespalib/src/tests/datastore/unique_store_dictionary/unique_store_dictionary_test.cpp b/vespalib/src/tests/datastore/unique_store_dictionary/unique_store_dictionary_test.cpp
index ce1ebe395ce..7b46196780f 100644
--- a/vespalib/src/tests/datastore/unique_store_dictionary/unique_store_dictionary_test.cpp
+++ b/vespalib/src/tests/datastore/unique_store_dictionary/unique_store_dictionary_test.cpp
@@ -34,25 +34,37 @@ public:
return resolve(lhs).ref() == resolve(rhs).ref();
}
size_t hash(const EntryRef rhs) const override {
- return rhs.ref();
+ return rhs.valid() ? rhs.ref() : _to_find.ref();
}
};
template <typename UniqueStoreDictionaryType>
-struct DictionaryReadTest : public ::testing::Test {
+struct UniqueStoreDictionaryTest : public ::testing::Test {
UniqueStoreDictionaryType dict;
std::unique_ptr<IUniqueStoreDictionaryReadSnapshot> snapshot;
+ vespalib::GenerationHandler gen_handler;
- DictionaryReadTest()
+ UniqueStoreDictionaryTest()
: dict(std::make_unique<Comparator>(0)),
- snapshot()
+ snapshot(),
+ gen_handler()
{
}
- DictionaryReadTest& add(uint32_t value) {
+ UniqueStoreDictionaryTest& add(uint32_t value) {
auto result = dict.add(Comparator(value), [=]() noexcept { return EntryRef(value); });
assert(result.inserted());
return *this;
}
+ UniqueStoreDictionaryTest& remove(uint32_t value) {
+ dict.remove(Comparator(value), EntryRef(value));
+ return *this;
+ }
+ void inc_generation() {
+ dict.freeze();
+ dict.transfer_hold_lists(gen_handler.getCurrentGeneration());
+ gen_handler.incGeneration();
+ dict.trim_hold_lists(gen_handler.getFirstUsedGeneration());
+ }
void take_snapshot() {
dict.freeze();
snapshot = dict.get_read_snapshot();
@@ -61,8 +73,8 @@ struct DictionaryReadTest : public ::testing::Test {
}
};
-using DictionaryReadTestTypes = ::testing::Types<DefaultUniqueStoreDictionary, UniqueStoreDictionary<DefaultDictionary, IUniqueStoreDictionary, ShardedHashMap>, UniqueStoreDictionary<NoBTreeDictionary, IUniqueStoreDictionary, ShardedHashMap>>;
-VESPA_GTEST_TYPED_TEST_SUITE(DictionaryReadTest, DictionaryReadTestTypes);
+using UniqueStoreDictionaryTestTypes = ::testing::Types<DefaultUniqueStoreDictionary, UniqueStoreDictionary<DefaultDictionary, IUniqueStoreDictionary, ShardedHashMap>, UniqueStoreDictionary<NoBTreeDictionary, IUniqueStoreDictionary, ShardedHashMap>>;
+VESPA_GTEST_TYPED_TEST_SUITE(UniqueStoreDictionaryTest, UniqueStoreDictionaryTestTypes);
// Disable warnings emitted by gtest generated files when using typed tests
#pragma GCC diagnostic push
@@ -70,7 +82,7 @@ VESPA_GTEST_TYPED_TEST_SUITE(DictionaryReadTest, DictionaryReadTestTypes);
#pragma GCC diagnostic ignored "-Wsuggest-override"
#endif
-TYPED_TEST(DictionaryReadTest, can_count_occurrences_of_a_key)
+TYPED_TEST(UniqueStoreDictionaryTest, can_count_occurrences_of_a_key)
{
this->add(3).add(5).take_snapshot();
EXPECT_EQ(0, this->snapshot->count(Comparator(2)));
@@ -79,7 +91,7 @@ TYPED_TEST(DictionaryReadTest, can_count_occurrences_of_a_key)
EXPECT_EQ(1, this->snapshot->count(Comparator(5)));
}
-TYPED_TEST(DictionaryReadTest, can_count_occurrences_of_keys_in_a_range)
+TYPED_TEST(UniqueStoreDictionaryTest, can_count_occurrences_of_keys_in_a_range)
{
if (!this->dict.get_has_btree_dictionary()) {
return;
@@ -95,7 +107,7 @@ TYPED_TEST(DictionaryReadTest, can_count_occurrences_of_keys_in_a_range)
EXPECT_EQ(0, this->snapshot->count_in_range(Comparator(5), Comparator(3)));
}
-TYPED_TEST(DictionaryReadTest, can_iterate_all_keys)
+TYPED_TEST(UniqueStoreDictionaryTest, can_iterate_all_keys)
{
using EntryRefVector = std::vector<EntryRef>;
this->add(3).add(5).add(7).take_snapshot();
@@ -104,7 +116,7 @@ TYPED_TEST(DictionaryReadTest, can_iterate_all_keys)
EXPECT_EQ(EntryRefVector({EntryRef(3), EntryRef(5), EntryRef(7)}), refs);
}
-TYPED_TEST(DictionaryReadTest, memory_usage_is_reported)
+TYPED_TEST(UniqueStoreDictionaryTest, memory_usage_is_reported)
{
auto initial_usage = this->dict.get_memory_usage();
this->add(10);
@@ -114,6 +126,43 @@ TYPED_TEST(DictionaryReadTest, memory_usage_is_reported)
EXPECT_EQ(0, usage.allocatedBytesOnHold());
}
+TYPED_TEST(UniqueStoreDictionaryTest, compaction_works)
+{
+ for (uint32_t i = 1; i < 100; ++i) {
+ this->add(i);
+ }
+ for (uint32_t i = 10; i < 100; ++i) {
+ this->remove(i);
+ }
+ this->inc_generation();
+ auto btree_memory_usage_before = this->dict.get_btree_memory_usage();
+ auto hash_memory_usage_before = this->dict.get_hash_memory_usage();
+ for (uint32_t i = 0; i < 15; ++i) {
+ this->dict.compact_worst(true, true);
+ }
+ this->inc_generation();
+ auto btree_memory_usage_after = this->dict.get_btree_memory_usage();
+ auto hash_memory_usage_after = this->dict.get_hash_memory_usage();
+ if (this->dict.get_has_btree_dictionary()) {
+ EXPECT_LT(btree_memory_usage_after.deadBytes(), btree_memory_usage_before.deadBytes());
+ } else {
+ EXPECT_EQ(btree_memory_usage_after.deadBytes(), btree_memory_usage_before.deadBytes());
+ }
+ if (this->dict.get_has_hash_dictionary()) {
+ EXPECT_LT(hash_memory_usage_after.deadBytes(), hash_memory_usage_before.deadBytes());
+ } else {
+ EXPECT_EQ(hash_memory_usage_after.deadBytes(), hash_memory_usage_before.deadBytes());
+ }
+ std::vector<EntryRef> exp_refs;
+ for (uint32_t i = 1; i < 10; ++i) {
+ exp_refs.emplace_back(EntryRef(i));
+ }
+ this->take_snapshot();
+ std::vector<EntryRef> refs;
+ this->snapshot->foreach_key([&](EntryRef ref){ refs.emplace_back(ref); });
+ EXPECT_EQ(exp_refs, refs);
+}
+
#pragma GCC diagnostic pop
GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/vespalib/src/tests/require/require_test.cpp b/vespalib/src/tests/require/require_test.cpp
index 65f4d049843..b31cbd33723 100644
--- a/vespalib/src/tests/require/require_test.cpp
+++ b/vespalib/src/tests/require/require_test.cpp
@@ -156,4 +156,19 @@ TEST(RequireTest, uncomment_to_manually_check_uncompilable_code) {
//-----------------------------------------------------------------------------
+TEST(RequireTest, explicit_require_failure) {
+ EXPECT_THROW(
+ {
+ try { REQUIRE_FAILED("this is my message"); }
+ catch(const E &e) {
+ fprintf(stderr, "e.getMessage() is >>>%s<<<\n", e.getMessage().c_str());
+ fprintf(stderr, "e.getLocation() is >>>%s<<<\n", e.getLocation().c_str());
+ fprintf(stderr, "e.what() is >>>%s<<<\n", e.what());
+ throw;
+ }
+ }, E);
+}
+
+//-----------------------------------------------------------------------------
+
GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/vespalib/src/tests/stllike/hash_test.cpp b/vespalib/src/tests/stllike/hash_test.cpp
index 59081c0ab73..5643fac52bf 100644
--- a/vespalib/src/tests/stllike/hash_test.cpp
+++ b/vespalib/src/tests/stllike/hash_test.cpp
@@ -582,4 +582,11 @@ TEST("test that hash table clear does not resize hashtable") {
EXPECT_EQUAL(128u, a.capacity());
}
+TEST("test that hash nodes have expected sizes")
+{
+ EXPECT_EQUAL(8u, sizeof(hash_node<int8_t>));
+ EXPECT_EQUAL(8u, sizeof(hash_node<int32_t>));
+ EXPECT_EQUAL(16u, sizeof(hash_node<int64_t>));
+}
+
TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/vespalib/src/vespa/vespalib/btree/btreerootbase.h b/vespalib/src/vespa/vespalib/btree/btreerootbase.h
index 1813e6de3d9..f1f44b6aae6 100644
--- a/vespalib/src/vespa/vespalib/btree/btreerootbase.h
+++ b/vespalib/src/vespa/vespalib/btree/btreerootbase.h
@@ -52,6 +52,11 @@ public:
allocator.needFreeze(this);
}
+ void prepare_hold() {
+ // entry for _root is owned by new copy of BTreeRootBase.
+ _root = BTreeNode::Ref();
+ }
+
void setRoots(BTreeNode::Ref newRoot) {
_root = newRoot;
_frozenRoot = newRoot.ref();
diff --git a/vespalib/src/vespa/vespalib/btree/btreestore.h b/vespalib/src/vespa/vespalib/btree/btreestore.h
index d822c72de60..0125d74cfc8 100644
--- a/vespalib/src/vespa/vespalib/btree/btreestore.h
+++ b/vespalib/src/vespa/vespalib/btree/btreestore.h
@@ -389,6 +389,13 @@ public:
void
foreach_frozen(EntryRef ref, FunctionType func) const;
+ std::vector<uint32_t> start_compact_worst_btree_nodes();
+ void finish_compact_worst_btree_nodes(const std::vector<uint32_t>& to_hold);
+ void move_btree_nodes(EntryRef ref);
+
+ std::vector<uint32_t> start_compact_worst_buffers();
+ EntryRef move(EntryRef ref);
+
private:
static constexpr size_t MIN_BUFFER_ARRAYS = 128u;
template <typename FunctionType, bool Frozen>
diff --git a/vespalib/src/vespa/vespalib/btree/btreestore.hpp b/vespalib/src/vespa/vespalib/btree/btreestore.hpp
index bd7331bc996..9cde2979f68 100644
--- a/vespalib/src/vespa/vespalib/btree/btreestore.hpp
+++ b/vespalib/src/vespa/vespalib/btree/btreestore.hpp
@@ -968,4 +968,70 @@ getAggregated(const EntryRef ref) const
return a;
}
+template <typename KeyT, typename DataT, typename AggrT, typename CompareT,
+ typename TraitsT, typename AggrCalcT>
+std::vector<uint32_t>
+BTreeStore<KeyT, DataT, AggrT, CompareT, TraitsT, AggrCalcT>::
+start_compact_worst_btree_nodes()
+{
+ _builder.clear();
+ return _allocator.start_compact_worst();
+}
+
+template <typename KeyT, typename DataT, typename AggrT, typename CompareT,
+ typename TraitsT, typename AggrCalcT>
+void
+BTreeStore<KeyT, DataT, AggrT, CompareT, TraitsT, AggrCalcT>::
+finish_compact_worst_btree_nodes(const std::vector<uint32_t>& to_hold)
+{
+ _allocator.finishCompact(to_hold);
+}
+
+template <typename KeyT, typename DataT, typename AggrT, typename CompareT,
+ typename TraitsT, typename AggrCalcT>
+void
+BTreeStore<KeyT, DataT, AggrT, CompareT, TraitsT, AggrCalcT>::
+move_btree_nodes(EntryRef ref)
+{
+ if (ref.valid()) {
+ RefType iRef(ref);
+ uint32_t clusterSize = getClusterSize(iRef);
+ if (clusterSize == 0) {
+ BTreeType *tree = getWTreeEntry(iRef);
+ tree->move_nodes(_allocator);
+ }
+ }
+}
+
+template <typename KeyT, typename DataT, typename AggrT, typename CompareT,
+ typename TraitsT, typename AggrCalcT>
+std::vector<uint32_t>
+BTreeStore<KeyT, DataT, AggrT, CompareT, TraitsT, AggrCalcT>::
+start_compact_worst_buffers()
+{
+ freeze();
+ return _store.startCompactWorstBuffers(true, false);
+}
+
+template <typename KeyT, typename DataT, typename AggrT, typename CompareT,
+ typename TraitsT, typename AggrCalcT>
+typename BTreeStore<KeyT, DataT, AggrT, CompareT, TraitsT, AggrCalcT>::EntryRef
+BTreeStore<KeyT, DataT, AggrT, CompareT, TraitsT, AggrCalcT>::
+move(EntryRef ref)
+{
+ if (!ref.valid() || !_store.getCompacting(ref)) {
+ return ref;
+ }
+ RefType iRef(ref);
+ uint32_t clusterSize = getClusterSize(iRef);
+ if (clusterSize == 0) {
+ BTreeType *tree = getWTreeEntry(iRef);
+ auto ref_and_ptr = allocBTreeCopy(*tree);
+ tree->prepare_hold();
+ return ref_and_ptr.ref;
+ }
+ const KeyDataType *shortArray = getKeyDataEntry(iRef, clusterSize);
+ return allocKeyDataCopy(shortArray, clusterSize).ref;
+}
+
}
diff --git a/vespalib/src/vespa/vespalib/datastore/bufferstate.h b/vespalib/src/vespa/vespalib/datastore/bufferstate.h
index 4cf25de512a..f8738f17daa 100644
--- a/vespalib/src/vespa/vespalib/datastore/bufferstate.h
+++ b/vespalib/src/vespa/vespalib/datastore/bufferstate.h
@@ -166,6 +166,7 @@ public:
size_t getExtraHoldBytes() const { return _extraHoldBytes; }
bool getCompacting() const { return _compacting; }
void setCompacting() { _compacting = true; }
+ uint32_t get_used_arrays() const noexcept { return _usedElems / _arraySize; }
void fallbackResize(uint32_t bufferId, size_t elementsNeeded, void *&buffer, Alloc &holdBuffer);
bool isActive(uint32_t typeId) const {
diff --git a/vespalib/src/vespa/vespalib/datastore/i_unique_store_dictionary.h b/vespalib/src/vespa/vespalib/datastore/i_unique_store_dictionary.h
index 4f53a872822..13c389c9a31 100644
--- a/vespalib/src/vespa/vespalib/datastore/i_unique_store_dictionary.h
+++ b/vespalib/src/vespa/vespalib/datastore/i_unique_store_dictionary.h
@@ -39,6 +39,8 @@ public:
virtual bool get_has_hash_dictionary() const = 0;
virtual vespalib::MemoryUsage get_btree_memory_usage() const = 0;
virtual vespalib::MemoryUsage get_hash_memory_usage() const = 0;
+ virtual bool has_held_buffers() const = 0;
+ virtual void compact_worst(bool compact_btree_dictionary, bool compact_hash_dictionary) = 0;
};
}
diff --git a/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.cpp b/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.cpp
index 36d68873176..c2f1bf29deb 100644
--- a/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.cpp
+++ b/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.cpp
@@ -194,4 +194,30 @@ ShardedHashMap::normalize_values(std::function<EntryRef(EntryRef)> normalize)
return changed;
}
+bool
+ShardedHashMap::has_held_buffers() const
+{
+ return _gen_holder.getHeldBytes() != 0;
+}
+
+void
+ShardedHashMap::compact_worst_shard()
+{
+ size_t worst_index = 0u;
+ size_t worst_dead_bytes = 0u;
+ for (size_t i = 0; i < num_shards; ++i) {
+ auto map = _maps[i].load(std::memory_order_relaxed);
+ if (map != nullptr) {
+ auto memory_usage = map->get_memory_usage();
+ if (memory_usage.deadBytes() > worst_dead_bytes) {
+ worst_index = i;
+ worst_dead_bytes = memory_usage.deadBytes();
+ }
+ }
+ }
+ if (worst_dead_bytes > 0u) {
+ alloc_shard(worst_index);
+ }
+}
+
}
diff --git a/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.h b/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.h
index aa787421634..a75e7baac5e 100644
--- a/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.h
+++ b/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.h
@@ -58,6 +58,8 @@ public:
void foreach_key(std::function<void(EntryRef)> callback) const;
void move_keys(std::function<EntryRef(EntryRef)> callback);
bool normalize_values(std::function<EntryRef(EntryRef)> normalize);
+ bool has_held_buffers() const;
+ void compact_worst_shard();
};
}
diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store.hpp b/vespalib/src/vespa/vespalib/datastore/unique_store.hpp
index a883b2351de..cb8619831e1 100644
--- a/vespalib/src/vespa/vespalib/datastore/unique_store.hpp
+++ b/vespalib/src/vespa/vespalib/datastore/unique_store.hpp
@@ -107,7 +107,7 @@ private:
for (const auto bufferId : _bufferIdsToCompact) {
BufferState &state = _dataStore.getBufferState(bufferId);
_compacting_buffer[bufferId] = true;
- _mapping[bufferId].resize(state.size());
+ _mapping[bufferId].resize(state.get_used_arrays());
}
}
@@ -116,8 +116,9 @@ private:
assert(iRef.valid());
uint32_t buffer_id = iRef.bufferId();
if (_compacting_buffer[buffer_id]) {
- assert(iRef.offset() < _mapping[buffer_id].size());
- EntryRef &mappedRef = _mapping[buffer_id][iRef.offset()];
+ auto &inner_mapping = _mapping[buffer_id];
+ assert(iRef.unscaled_offset() < inner_mapping.size());
+ EntryRef &mappedRef = inner_mapping[iRef.unscaled_offset()];
assert(!mappedRef.valid());
EntryRef newRef = _store.move(oldRef);
mappedRef = newRef;
diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.h b/vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.h
index faaa2294ff3..1775fda7ae3 100644
--- a/vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.h
+++ b/vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.h
@@ -90,6 +90,8 @@ public:
bool get_has_hash_dictionary() const override;
vespalib::MemoryUsage get_btree_memory_usage() const override;
vespalib::MemoryUsage get_hash_memory_usage() const override;
+ bool has_held_buffers() const override;
+ void compact_worst(bool compact_btree_dictionary, bool compact_hash_dictionary) override;
};
}
diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.hpp b/vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.hpp
index 4a9cf3d96b2..87fe66d105a 100644
--- a/vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.hpp
+++ b/vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.hpp
@@ -318,4 +318,41 @@ UniqueStoreDictionary<BTreeDictionaryT, ParentT, HashDictionaryT>::get_hash_memo
return {};
}
+template <typename BTreeDictionaryT, typename ParentT, typename HashDictionaryT>
+bool
+UniqueStoreDictionary<BTreeDictionaryT, ParentT, HashDictionaryT>::has_held_buffers() const
+{
+ if constexpr (has_btree_dictionary) {
+ if (this->_btree_dict.getAllocator().getNodeStore().has_held_buffers()) {
+ return true;
+ }
+ }
+ if constexpr (has_hash_dictionary) {
+ if (this->_hash_dict.has_held_buffers()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+template <typename BTreeDictionaryT, typename ParentT, typename HashDictionaryT>
+void
+UniqueStoreDictionary<BTreeDictionaryT, ParentT, HashDictionaryT>::compact_worst(bool compact_btree_dictionary, bool compact_hash_dictionary)
+{
+ if constexpr (has_btree_dictionary) {
+ if (compact_btree_dictionary) {
+ this->_btree_dict.compact_worst();
+ }
+ } else {
+ (void) compact_btree_dictionary;
+ }
+ if constexpr (has_hash_dictionary) {
+ if (compact_hash_dictionary) {
+ this->_hash_dict.compact_worst_shard();
+ }
+ } else {
+ (void) compact_hash_dictionary;
+ }
+}
+
}
diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store_enumerator.hpp b/vespalib/src/vespa/vespalib/datastore/unique_store_enumerator.hpp
index 378fc54750d..c4443978a5d 100644
--- a/vespalib/src/vespa/vespalib/datastore/unique_store_enumerator.hpp
+++ b/vespalib/src/vespa/vespalib/datastore/unique_store_enumerator.hpp
@@ -46,7 +46,7 @@ UniqueStoreEnumerator<RefT>::allocate_enum_values()
for (uint32_t bufferId = 0; bufferId < RefType::numBuffers(); ++bufferId) {
const BufferState &state = _store.getBufferState(bufferId);
if (state.isActive()) {
- _enumValues[bufferId].resize(state.size() / state.getArraySize());
+ _enumValues[bufferId].resize(state.get_used_arrays());
}
}
}
diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store_remapper.h b/vespalib/src/vespa/vespalib/datastore/unique_store_remapper.h
index 1fa74f59f1a..e805e9c577a 100644
--- a/vespalib/src/vespa/vespalib/datastore/unique_store_remapper.h
+++ b/vespalib/src/vespa/vespalib/datastore/unique_store_remapper.h
@@ -4,6 +4,7 @@
#include "entryref.h"
#include <vector>
+#include <vespa/vespalib/stllike/allocator.h>
namespace vespalib::datastore {
@@ -18,7 +19,7 @@ public:
protected:
std::vector<bool> _compacting_buffer;
- std::vector<std::vector<EntryRef>> _mapping;
+ std::vector<std::vector<EntryRef, allocator_large<EntryRef>>> _mapping;
public:
UniqueStoreRemapper()
: _compacting_buffer(),
diff --git a/vespalib/src/vespa/vespalib/stllike/hashtable.h b/vespalib/src/vespa/vespalib/stllike/hashtable.h
index 1a982b97208..b94672aaa06 100644
--- a/vespalib/src/vespa/vespalib/stllike/hashtable.h
+++ b/vespalib/src/vespa/vespalib/stllike/hashtable.h
@@ -172,7 +172,7 @@ private:
}
}
}
- char _node[sizeof(V)] alignas(V);
+ alignas(V) char _node[sizeof(V)];
next_t _next;
};
diff --git a/vespalib/src/vespa/vespalib/trace/tracenode.cpp b/vespalib/src/vespa/vespalib/trace/tracenode.cpp
index 12dd51ac677..02883eda808 100644
--- a/vespalib/src/vespa/vespalib/trace/tracenode.cpp
+++ b/vespalib/src/vespa/vespalib/trace/tracenode.cpp
@@ -37,6 +37,12 @@ struct Cmp {
}
};
+void sortChildren(std::vector<TraceNode> & children) __attribute((noinline));
+void
+sortChildren(std::vector<TraceNode> & children) {
+ std::sort(children.begin(), children.end(), Cmp());
+}
+
} // namespace <unnamed>
@@ -121,7 +127,7 @@ TraceNode::sort()
child.sort();
}
if (!isStrict()) {
- std::sort(_children.begin(), _children.end(), Cmp());
+ sortChildren(_children);
}
}
return *this;
diff --git a/vespalib/src/vespa/vespalib/util/require.h b/vespalib/src/vespa/vespalib/util/require.h
index a4283520314..1d2069aa226 100644
--- a/vespalib/src/vespa/vespalib/util/require.h
+++ b/vespalib/src/vespa/vespalib/util/require.h
@@ -131,4 +131,11 @@ void handle_require_eq_failure [[noreturn]] (const A& a, const B& b, const char
VESPA_STRINGIZE(a) " == " VESPA_STRINGIZE(b), \
__FILE__, __LINE__)
+/**
+ * Signal the failure of some requirement with a message.
+ * Can be used instead of abort()
+ **/
+#define REQUIRE_FAILED(msg) \
+ vespalib::handle_require_failure(msg, __FILE__, __LINE__)
+
} // namespace
diff --git a/zookeeper-client-common/src/main/java/com/yahoo/vespa/zookeeper/client/ZkClientConfigBuilder.java b/zookeeper-client-common/src/main/java/com/yahoo/vespa/zookeeper/client/ZkClientConfigBuilder.java
index 8236fdedc71..6ae7fe26cd7 100644
--- a/zookeeper-client-common/src/main/java/com/yahoo/vespa/zookeeper/client/ZkClientConfigBuilder.java
+++ b/zookeeper-client-common/src/main/java/com/yahoo/vespa/zookeeper/client/ZkClientConfigBuilder.java
@@ -39,7 +39,7 @@ public class ZkClientConfigBuilder {
this(defaultTlsContext);
}
- public ZkClientConfigBuilder(TlsContext tlsContext) {
+ ZkClientConfigBuilder(TlsContext tlsContext) {
this.tlsContext = tlsContext;
}
@@ -82,10 +82,6 @@ public class ZkClientConfigBuilder {
}
private static Optional<TlsContext> getTlsContext() {
- // TODO(bjorncs) Remove handling of temporary feature flag
- boolean temporaryFeatureFlag = Optional.ofNullable(System.getenv("VESPA_USE_TLS_FOR_ZOOKEEPER_CLIENT")).map(Boolean::parseBoolean).orElse(false);
- if (!temporaryFeatureFlag) return Optional.empty();
-
if (TransportSecurityUtils.getInsecureMixedMode() == MixedMode.PLAINTEXT_CLIENT_MIXED_SERVER) return Optional.empty();
return TransportSecurityUtils.getSystemTlsContext();
}
diff --git a/zookeeper-server/CMakeLists.txt b/zookeeper-server/CMakeLists.txt
index 401ac3ffd9d..9dee07390b5 100644
--- a/zookeeper-server/CMakeLists.txt
+++ b/zookeeper-server/CMakeLists.txt
@@ -1,3 +1,4 @@
# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
add_subdirectory(zookeeper-server-common)
add_subdirectory(zookeeper-server-3.6.2)
+add_subdirectory(zookeeper-server-3.6.3)
diff --git a/zookeeper-server/pom.xml b/zookeeper-server/pom.xml
index 56b66903098..919e2b99848 100644
--- a/zookeeper-server/pom.xml
+++ b/zookeeper-server/pom.xml
@@ -14,6 +14,7 @@
<modules>
<module>zookeeper-server-common</module>
<module>zookeeper-server-3.6.2</module>
+ <module>zookeeper-server-3.6.3</module>
</modules>
<dependencies>
<dependency>
diff --git a/zookeeper-server/zookeeper-server-3.6.3/CMakeLists.txt b/zookeeper-server/zookeeper-server-3.6.3/CMakeLists.txt
new file mode 100644
index 00000000000..ddccb1f0dbc
--- /dev/null
+++ b/zookeeper-server/zookeeper-server-3.6.3/CMakeLists.txt
@@ -0,0 +1,4 @@
+# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+install_fat_java_artifact(zookeeper-server-3.6.3)
+# Needs to be included when this is the wanted default version (and symlinks for other versions need to be removed)
+#install_symlink(lib/jars/zookeeper-server-3.6.3-jar-with-dependencies.jar lib/jars/zookeeper-server-jar-with-dependencies.jar)
diff --git a/simplemetrics/pom.xml b/zookeeper-server/zookeeper-server-3.6.3/pom.xml
index 6ca02febefd..a4568d3585f 100644
--- a/simplemetrics/pom.xml
+++ b/zookeeper-server/zookeeper-server-3.6.3/pom.xml
@@ -1,73 +1,72 @@
<?xml version="1.0"?>
-<!-- Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -->
+<!-- Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>com.yahoo.vespa</groupId>
- <artifactId>parent</artifactId>
+ <artifactId>zookeeper-server</artifactId>
<version>7-SNAPSHOT</version>
- <relativePath>../parent/pom.xml</relativePath>
+ <relativePath>../pom.xml</relativePath>
</parent>
- <artifactId>simplemetrics</artifactId>
+ <artifactId>zookeeper-server-3.6.3</artifactId>
<packaging>container-plugin</packaging>
<version>7-SNAPSHOT</version>
- <description> </description>
<dependencies>
<dependency>
- <groupId>junit</groupId>
- <artifactId>junit</artifactId>
- <scope>test</scope>
- </dependency>
- <dependency>
- <groupId>org.hdrhistogram</groupId>
- <artifactId>HdrHistogram</artifactId>
- <scope>compile</scope>
- </dependency>
- <dependency>
<groupId>com.yahoo.vespa</groupId>
- <artifactId>component</artifactId>
+ <artifactId>zookeeper-server-common</artifactId>
<version>${project.version}</version>
- <scope>provided</scope>
</dependency>
<dependency>
<groupId>com.yahoo.vespa</groupId>
- <artifactId>vespajlib</artifactId>
+ <artifactId>zookeeper-client-common</artifactId>
<version>${project.version}</version>
- <scope>provided</scope>
+ <exclusions>
+ <exclusion>
+ <!-- Don't use ZK version from zookeeper-client-common -->
+ <groupId>org.apache.zookeeper</groupId>
+ <artifactId>zookeeper</artifactId>
+ </exclusion>
+ </exclusions>
</dependency>
<dependency>
- <groupId>com.yahoo.vespa</groupId>
- <artifactId>config-bundle</artifactId>
- <version>${project.version}</version>
- <scope>provided</scope>
+ <groupId>org.apache.zookeeper</groupId>
+ <artifactId>zookeeper</artifactId>
+ <version>3.6.3</version>
</dependency>
<dependency>
- <groupId>com.yahoo.vespa</groupId>
- <artifactId>jdisc_core</artifactId>
- <version>${project.version}</version>
- <scope>provided</scope>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-jdk14</artifactId>
</dependency>
<dependency>
- <groupId>com.yahoo.vespa</groupId>
- <artifactId>container-di</artifactId>
- <version>${project.version}</version>
- <scope>provided</scope>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-log4j12</artifactId>
+ <version>${slf4j.version}</version>
</dependency>
+ <!-- snappy-java and metrics-core are included here
+ to be able to work with ZooKeeper 3.6.3 due to
+ class loading issues -->
<dependency>
- <groupId>com.yahoo.vespa</groupId>
- <artifactId>container-core</artifactId>
- <version>${project.version}</version>
- <scope>provided</scope>
+ <groupId>io.dropwizard.metrics</groupId>
+ <artifactId>metrics-core</artifactId>
+ <scope>compile</scope>
+ <version>3.2.5</version>
+ </dependency>
+ <dependency>
+ <groupId>org.xerial.snappy</groupId>
+ <artifactId>snappy-java</artifactId>
+ <scope>compile</scope>
+ <version>1.1.7</version>
+ </dependency>
+ <dependency>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ <scope>test</scope>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
- <groupId>com.yahoo.vespa</groupId>
- <artifactId>bundle-plugin</artifactId>
- <extensions>true</extensions>
- </plugin>
- <plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
@@ -82,6 +81,7 @@
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<redirectTestOutputToFile>${test.hide}</redirectTestOutputToFile>
+ <forkMode>once</forkMode>
</configuration>
</plugin>
<plugin>
@@ -92,15 +92,13 @@
</configuration>
</plugin>
<plugin>
- <!-- Remove when v2.1 is the default
- - it is required by maven-project-info-reports-plugin v2.2 -->
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-site-plugin</artifactId>
- <version>2.1</version>
- </plugin>
- <plugin>
<groupId>com.yahoo.vespa</groupId>
- <artifactId>abi-check-plugin</artifactId>
+ <artifactId>bundle-plugin</artifactId>
+ <extensions>true</extensions>
+ <configuration>
+ <importPackage>com.sun.management</importPackage>
+ <bundleSymbolicName>zookeeper-server</bundleSymbolicName>
+ </configuration>
</plugin>
</plugins>
</build>
diff --git a/zookeeper-server/zookeeper-server-3.6.3/src/main/java/com/yahoo/vespa/zookeeper/ReconfigurableVespaZooKeeperServer.java b/zookeeper-server/zookeeper-server-3.6.3/src/main/java/com/yahoo/vespa/zookeeper/ReconfigurableVespaZooKeeperServer.java
new file mode 100644
index 00000000000..0b08966e241
--- /dev/null
+++ b/zookeeper-server/zookeeper-server-3.6.3/src/main/java/com/yahoo/vespa/zookeeper/ReconfigurableVespaZooKeeperServer.java
@@ -0,0 +1,43 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.zookeeper;
+
+import com.google.inject.Inject;
+import com.yahoo.cloud.config.ZookeeperServerConfig;
+import com.yahoo.component.AbstractComponent;
+
+import java.nio.file.Path;
+import java.time.Duration;
+import java.util.concurrent.atomic.AtomicReference;
+
+/**
+ * Starts or reconfigures zookeeper cluster.
+ * The QuorumPeer conditionally created here is owned by the Reconfigurer;
+ * when it already has a peer, that peer is used here in case start or shutdown is required.
+ *
+ * @author hmusum
+ */
+public class ReconfigurableVespaZooKeeperServer extends AbstractComponent implements VespaZooKeeperServer {
+
+ private final AtomicReference<QuorumPeer> peer = new AtomicReference<>();
+
+ @Inject
+ public ReconfigurableVespaZooKeeperServer(Reconfigurer reconfigurer, ZookeeperServerConfig zookeeperServerConfig) {
+ reconfigurer.startOrReconfigure(zookeeperServerConfig, this, VespaQuorumPeer::new, peer::set);
+ }
+
+ @Override
+ public void shutdown() {
+ peer.get().shutdown(Duration.ofMinutes(1));
+ }
+
+ @Override
+ public void start(Path configFilePath) {
+ peer.get().start(configFilePath);
+ }
+
+ @Override
+ public boolean reconfigurable() {
+ return true;
+ }
+
+}
diff --git a/zookeeper-server/zookeeper-server-3.6.3/src/main/java/com/yahoo/vespa/zookeeper/VespaMtlsAuthenticationProvider.java b/zookeeper-server/zookeeper-server-3.6.3/src/main/java/com/yahoo/vespa/zookeeper/VespaMtlsAuthenticationProvider.java
new file mode 100644
index 00000000000..7a0efbb6c24
--- /dev/null
+++ b/zookeeper-server/zookeeper-server-3.6.3/src/main/java/com/yahoo/vespa/zookeeper/VespaMtlsAuthenticationProvider.java
@@ -0,0 +1,41 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.zookeeper;
+
+import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.common.X509Exception;
+import org.apache.zookeeper.data.Id;
+import org.apache.zookeeper.server.ServerCnxn;
+import org.apache.zookeeper.server.auth.AuthenticationProvider;
+import org.apache.zookeeper.server.auth.X509AuthenticationProvider;
+
+import java.security.cert.X509Certificate;
+import java.util.logging.Logger;
+
+/**
+ * A {@link AuthenticationProvider} to be used in combination with Vespa mTLS
+ *
+ * @author bjorncs
+ */
+public class VespaMtlsAuthenticationProvider extends X509AuthenticationProvider {
+
+ private static final Logger log = Logger.getLogger(VespaMtlsAuthenticationProvider.class.getName());
+
+ public VespaMtlsAuthenticationProvider() throws X509Exception { super(null, null);}
+
+ @Override
+ public KeeperException.Code handleAuthentication(ServerCnxn cnxn, byte[] authData) {
+ // Vespa's mTLS peer authorization rules are performed by the underlying trust manager implementation.
+ // The client is authorized once the SSL handshake has completed.
+ X509Certificate[] certificateChain = (X509Certificate[]) cnxn.getClientCertificateChain();
+ if (certificateChain == null || certificateChain.length == 0) {
+ log.warning("Client not authenticated - should not be possible with clientAuth=NEED");
+ return KeeperException.Code.AUTHFAILED;
+ }
+ X509Certificate certificate = certificateChain[0];
+ cnxn.addAuthInfo(new Id(getScheme(), certificate.getSubjectX500Principal().getName()));
+ return KeeperException.Code.OK;
+ }
+
+ @Override public String getScheme() { return "x509"; }
+
+}
diff --git a/zookeeper-server/zookeeper-server-3.6.3/src/main/java/com/yahoo/vespa/zookeeper/VespaQuorumPeer.java b/zookeeper-server/zookeeper-server-3.6.3/src/main/java/com/yahoo/vespa/zookeeper/VespaQuorumPeer.java
new file mode 100644
index 00000000000..113669b2e76
--- /dev/null
+++ b/zookeeper-server/zookeeper-server-3.6.3/src/main/java/com/yahoo/vespa/zookeeper/VespaQuorumPeer.java
@@ -0,0 +1,60 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.zookeeper;
+
+import com.yahoo.protect.Process;
+import org.apache.zookeeper.server.admin.AdminServer;
+import org.apache.zookeeper.server.quorum.QuorumPeerConfig;
+import org.apache.zookeeper.server.quorum.QuorumPeerMain;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.time.Duration;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+/**
+ * Starts/stops a ZooKeeper server. Extends QuorumPeerMain to be able to call initializeAndRun() and wraps
+ * exceptions so it can be used by code that does not depend on ZooKeeper.
+ *
+ * @author hmusum
+ */
+class VespaQuorumPeer extends QuorumPeerMain implements QuorumPeer {
+
+ private static final Logger log = java.util.logging.Logger.getLogger(VespaQuorumPeer.class.getName());
+
+ @Override
+ public void start(Path path) {
+ initializeAndRun(new String[]{ path.toFile().getAbsolutePath()});
+ }
+
+ @Override
+ public void shutdown(Duration timeout) {
+ if (quorumPeer != null) {
+ log.log(Level.INFO, "Shutting down ZooKeeper server");
+ try {
+ quorumPeer.shutdown();
+ quorumPeer.join(timeout.toMillis()); // Wait for shutdown to complete
+ if (quorumPeer.isAlive())
+ throw new IllegalStateException("Peer still alive after " + timeout);
+ } catch (RuntimeException | InterruptedException e) {
+ // If shutdown fails, we have no other option than forcing the JVM to stop and letting it be restarted.
+ //
+ // When a VespaZooKeeperServer component receives a new config, the container will try to start a new
+ // server with the new config, this will fail until the old server is deconstructed. If the old server
+ // fails to deconstruct/shut down, the new one will never start and if that happens forcing a restart is
+ // the better option.
+ Process.logAndDie("Failed to shut down ZooKeeper properly, forcing shutdown", e);
+ }
+ }
+ }
+
+ @Override
+ protected void initializeAndRun(String[] args) {
+ try {
+ super.initializeAndRun(args);
+ } catch (QuorumPeerConfig.ConfigException | IOException | AdminServer.AdminServerException e) {
+ throw new RuntimeException("Exception when initializing or running ZooKeeper server", e);
+ }
+ }
+
+}
diff --git a/zookeeper-server/zookeeper-server-3.6.3/src/main/java/com/yahoo/vespa/zookeeper/VespaZooKeeperAdminImpl.java b/zookeeper-server/zookeeper-server-3.6.3/src/main/java/com/yahoo/vespa/zookeeper/VespaZooKeeperAdminImpl.java
new file mode 100644
index 00000000000..d92527fb5fd
--- /dev/null
+++ b/zookeeper-server/zookeeper-server-3.6.3/src/main/java/com/yahoo/vespa/zookeeper/VespaZooKeeperAdminImpl.java
@@ -0,0 +1,59 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.zookeeper;
+
+import com.yahoo.vespa.zookeeper.client.ZkClientConfigBuilder;
+import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.admin.ZooKeeperAdmin;
+
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+/**
+ * @author hmusum
+ */
+@SuppressWarnings("unused") // Created by injection
+public class VespaZooKeeperAdminImpl implements VespaZooKeeperAdmin {
+
+ private static final Logger log = java.util.logging.Logger.getLogger(VespaZooKeeperAdminImpl.class.getName());
+
+ @Override
+ public void reconfigure(String connectionSpec, String joiningServers, String leavingServers) throws ReconfigException {
+ ZooKeeperAdmin zooKeeperAdmin = null;
+ try {
+ zooKeeperAdmin = createAdmin(connectionSpec);
+ long fromConfig = -1;
+ // Using string parameters because the List variant of reconfigure fails to join empty lists (observed on 3.5.6, fixed in 3.7.0)
+ byte[] appliedConfig = zooKeeperAdmin.reconfigure(joiningServers, leavingServers, null, fromConfig, null);
+ log.log(Level.INFO, "Applied ZooKeeper config: " + new String(appliedConfig, StandardCharsets.UTF_8));
+ } catch (KeeperException e) {
+ if (retryOn(e))
+ throw new ReconfigException(e);
+ else
+ throw new RuntimeException(e);
+ } catch (IOException | InterruptedException e) {
+ throw new RuntimeException(e);
+ } finally {
+ if (zooKeeperAdmin != null) {
+ try {
+ zooKeeperAdmin.close();
+ } catch (InterruptedException e) {
+ }
+ }
+ }
+ }
+
+ private ZooKeeperAdmin createAdmin(String connectionSpec) throws IOException {
+ return new ZooKeeperAdmin(connectionSpec, (int) sessionTimeout().toMillis(),
+ (event) -> log.log(Level.INFO, event.toString()), new ZkClientConfigBuilder().toConfig());
+ }
+
+ private static boolean retryOn(KeeperException e) {
+ return e instanceof KeeperException.ReconfigInProgress ||
+ e instanceof KeeperException.ConnectionLossException ||
+ e instanceof KeeperException.NewConfigNoQuorum;
+ }
+
+}
+
diff --git a/zookeeper-server/zookeeper-server-3.6.3/src/main/java/com/yahoo/vespa/zookeeper/VespaZooKeeperServerImpl.java b/zookeeper-server/zookeeper-server-3.6.3/src/main/java/com/yahoo/vespa/zookeeper/VespaZooKeeperServerImpl.java
new file mode 100644
index 00000000000..430aab802c2
--- /dev/null
+++ b/zookeeper-server/zookeeper-server-3.6.3/src/main/java/com/yahoo/vespa/zookeeper/VespaZooKeeperServerImpl.java
@@ -0,0 +1,47 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.zookeeper;
+
+import com.google.inject.Inject;
+import com.yahoo.cloud.config.ZookeeperServerConfig;
+import com.yahoo.component.AbstractComponent;
+
+import java.nio.file.Path;
+import java.time.Duration;
+
+/**
+ * @author Ulf Lilleengen
+ * @author Harald Musum
+ */
+public class VespaZooKeeperServerImpl extends AbstractComponent implements VespaZooKeeperServer {
+
+ private final VespaQuorumPeer peer;
+ private final ZooKeeperRunner runner;
+
+ @Inject
+ public VespaZooKeeperServerImpl(ZookeeperServerConfig zookeeperServerConfig) {
+ this.peer = new VespaQuorumPeer();
+ this.runner = new ZooKeeperRunner(zookeeperServerConfig, this);
+ }
+
+ @Override
+ public void deconstruct() {
+ runner.shutdown();
+ super.deconstruct();
+ }
+
+ @Override
+ public void shutdown() {
+ peer.shutdown(Duration.ofMinutes(1));
+ }
+
+ @Override
+ public void start(Path configFilePath) {
+ peer.start(configFilePath);
+ }
+
+ @Override
+ public boolean reconfigurable() {
+ return false;
+ }
+
+}
diff --git a/zookeeper-server/zookeeper-server-3.6.3/src/main/java/org/apache/zookeeper/common/NetUtils.java b/zookeeper-server/zookeeper-server-3.6.3/src/main/java/org/apache/zookeeper/common/NetUtils.java
new file mode 100644
index 00000000000..79d063ba70a
--- /dev/null
+++ b/zookeeper-server/zookeeper-server-3.6.3/src/main/java/org/apache/zookeeper/common/NetUtils.java
@@ -0,0 +1,94 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.zookeeper.common;
+
+import java.net.Inet6Address;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+
+/**
+ * This class contains common utilities for netstuff. Like printing IPv6 literals correctly
+ */
+public class NetUtils {
+
+ // Note: Changed from original to use hostname from InetSocketAddress if there exists one
+ public static String formatInetAddr(InetSocketAddress addr) {
+ String hostName = addr.getHostName();
+ if (hostName != null) {
+ return String.format("%s:%s", hostName, addr.getPort());
+ }
+
+ InetAddress ia = addr.getAddress();
+
+ if (ia == null) {
+ return String.format("%s:%s", addr.getHostString(), addr.getPort());
+ }
+ if (ia instanceof Inet6Address) {
+ return String.format("[%s]:%s", ia.getHostAddress(), addr.getPort());
+ } else {
+ return String.format("%s:%s", ia.getHostAddress(), addr.getPort());
+ }
+ }
+
+ /**
+ * Separates host and port from given host port string if host port string is enclosed
+ * within square bracket.
+ *
+ * @param hostPort host port string
+ * @return String[]{host, port} if host port string is host:port
+ * or String[] {host, port:port} if host port string is host:port:port
+ * or String[] {host} if host port string is host
+ * or String[]{} if not a ipv6 host port string.
+ */
+ public static String[] getIPV6HostAndPort(String hostPort) {
+ if (hostPort.startsWith("[")) {
+ int i = hostPort.lastIndexOf(']');
+ if (i < 0) {
+ throw new IllegalArgumentException(
+ hostPort + " starts with '[' but has no matching ']'");
+ }
+ String host = hostPort.substring(1, i);
+ if (host.isEmpty()) {
+ throw new IllegalArgumentException(host + " is empty.");
+ }
+ if (hostPort.length() > i + 1) {
+ return getHostPort(hostPort, i, host);
+ }
+ return new String[] { host };
+ } else {
+ //Not an IPV6 host port string
+ return new String[] {};
+ }
+ }
+
+ private static String[] getHostPort(String hostPort, int indexOfClosingBracket, String host) {
+ // [127::1]:2181 , check separator : exits
+ if (hostPort.charAt(indexOfClosingBracket + 1) != ':') {
+ throw new IllegalArgumentException(hostPort + " does not have : after ]");
+ }
+ // [127::1]: scenario
+ if (indexOfClosingBracket + 2 == hostPort.length()) {
+ throw new IllegalArgumentException(hostPort + " doesn't have a port after colon.");
+ }
+ //do not include
+ String port = hostPort.substring(indexOfClosingBracket + 2);
+ return new String[] { host, port };
+ }
+}
diff --git a/zookeeper-server/zookeeper-server-3.6.3/src/main/java/org/apache/zookeeper/server/VespaNettyServerCnxnFactory.java b/zookeeper-server/zookeeper-server-3.6.3/src/main/java/org/apache/zookeeper/server/VespaNettyServerCnxnFactory.java
new file mode 100644
index 00000000000..7efec454667
--- /dev/null
+++ b/zookeeper-server/zookeeper-server-3.6.3/src/main/java/org/apache/zookeeper/server/VespaNettyServerCnxnFactory.java
@@ -0,0 +1,37 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package org.apache.zookeeper.server;
+
+import com.yahoo.vespa.zookeeper.Configurator;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.logging.Logger;
+
+/**
+ * Overrides secure setting with value from {@link Configurator}.
+ * Workaround for incorrect handling of clientSecurePort in combination with ZooKeeper Dynamic Reconfiguration in 3.6.2
+ * See https://issues.apache.org/jira/browse/ZOOKEEPER-3577.
+ *
+ * Using package {@link org.apache.zookeeper.server} as {@link NettyServerCnxnFactory#NettyServerCnxnFactory()} is package-private.
+ *
+ * @author bjorncs
+ */
+public class VespaNettyServerCnxnFactory extends NettyServerCnxnFactory {
+
+ private static final Logger log = Logger.getLogger(VespaNettyServerCnxnFactory.class.getName());
+
+ private final boolean isSecure;
+
+ public VespaNettyServerCnxnFactory() {
+ super();
+ this.isSecure = Configurator.VespaNettyServerCnxnFactory_isSecure;
+ boolean portUnificationEnabled = Boolean.getBoolean(NettyServerCnxnFactory.PORT_UNIFICATION_KEY);
+ log.info(String.format("For %h: isSecure=%b, portUnification=%b", this, isSecure, portUnificationEnabled));
+ }
+
+ @Override
+ public void configure(InetSocketAddress addr, int maxClientCnxns, int backlog, boolean secure) throws IOException {
+ log.info(String.format("For %h: configured() invoked with parameter 'secure'=%b, overridden to %b", this, secure, isSecure));
+ super.configure(addr, maxClientCnxns, backlog, isSecure);
+ }
+}
diff --git a/zookeeper-server/zookeeper-server-common/src/main/java/com/yahoo/vespa/zookeeper/Configurator.java b/zookeeper-server/zookeeper-server-common/src/main/java/com/yahoo/vespa/zookeeper/Configurator.java
index d662bab8463..f302798589c 100644
--- a/zookeeper-server/zookeeper-server-common/src/main/java/com/yahoo/vespa/zookeeper/Configurator.java
+++ b/zookeeper-server/zookeeper-server-common/src/main/java/com/yahoo/vespa/zookeeper/Configurator.java
@@ -3,7 +3,9 @@
package com.yahoo.vespa.zookeeper;
import com.yahoo.cloud.config.ZookeeperServerConfig;
+import com.yahoo.security.tls.MixedMode;
import com.yahoo.security.tls.TlsContext;
+import com.yahoo.security.tls.TransportSecurityUtils;
import com.yahoo.vespa.defaults.Defaults;
import java.io.FileWriter;
@@ -40,11 +42,14 @@ public class Configurator {
System.setProperty("zookeeper.authProvider.x509", "com.yahoo.vespa.zookeeper.VespaMtlsAuthenticationProvider");
}
- void writeConfigToDisk(Optional<TlsContext> tlsContext) {
+ void writeConfigToDisk() { writeConfigToDisk(VespaTlsConfig.fromSystem()); }
+
+ // override of Vespa TLS config for unit testing
+ void writeConfigToDisk(VespaTlsConfig vespaTlsConfig) {
configFilePath.toFile().getParentFile().mkdirs();
try {
- writeZooKeeperConfigFile(zookeeperServerConfig, tlsContext);
+ writeZooKeeperConfigFile(zookeeperServerConfig, vespaTlsConfig);
writeMyIdFile(zookeeperServerConfig);
} catch (IOException e) {
throw new RuntimeException("Error writing zookeeper config", e);
@@ -52,13 +57,13 @@ public class Configurator {
}
private void writeZooKeeperConfigFile(ZookeeperServerConfig config,
- Optional<TlsContext> tlsContext) throws IOException {
+ VespaTlsConfig vespaTlsConfig) throws IOException {
try (FileWriter writer = new FileWriter(configFilePath.toFile())) {
- writer.write(transformConfigToString(config, tlsContext));
+ writer.write(transformConfigToString(config, vespaTlsConfig));
}
}
- private String transformConfigToString(ZookeeperServerConfig config, Optional<TlsContext> tlsContext) {
+ private String transformConfigToString(ZookeeperServerConfig config, VespaTlsConfig vespaTlsConfig) {
StringBuilder sb = new StringBuilder();
sb.append("tickTime=").append(config.tickTime()).append("\n");
sb.append("initLimit=").append(config.initLimit()).append("\n");
@@ -80,8 +85,8 @@ public class Configurator {
sb.append("skipACL=yes").append("\n");
ensureThisServerIsRepresented(config.myid(), config.server());
config.server().forEach(server -> addServerToCfg(sb, server, config.clientPort()));
- sb.append(new TlsQuorumConfig().createConfig(config, tlsContext));
- sb.append(new TlsClientServerConfig().createConfig(config, tlsContext));
+ sb.append(new TlsQuorumConfig().createConfig(vespaTlsConfig));
+ sb.append(new TlsClientServerConfig().createConfig(vespaTlsConfig));
return sb.toString();
}
@@ -143,22 +148,10 @@ public class Configurator {
}
private interface TlsConfig {
- String createConfig(ZookeeperServerConfig config, Optional<TlsContext> tlsContext);
-
- default Optional<String> getEnvironmentVariable(String variableName) {
- return Optional.ofNullable(System.getenv().get(variableName))
- .filter(var -> !var.isEmpty());
- }
-
- default void validateOptions(Optional<TlsContext> tlsContext, String tlsSetting) {
- if (tlsContext.isEmpty() && !tlsSetting.equals("OFF"))
- throw new RuntimeException("Could not retrieve transport security options");
- }
-
String configFieldPrefix();
- default void appendSharedTlsConfig(StringBuilder builder, Optional<TlsContext> tlsContext) {
- tlsContext.ifPresent(ctx -> {
+ default void appendSharedTlsConfig(StringBuilder builder, VespaTlsConfig vespaTlsConfig) {
+ vespaTlsConfig.context().ifPresent(ctx -> {
builder.append(configFieldPrefix()).append(".context.supplier.class=").append(VespaSslContextProvider.class.getName()).append("\n");
String enabledCiphers = Arrays.stream(ctx.parameters().getCipherSuites()).sorted().collect(Collectors.joining(","));
builder.append(configFieldPrefix()).append(".ciphersuites=").append(enabledCiphers).append("\n");
@@ -167,39 +160,23 @@ public class Configurator {
builder.append(configFieldPrefix()).append(".clientAuth=NEED\n");
});
}
+
+ default boolean enablePortUnification(VespaTlsConfig config) {
+ return config.tlsEnabled()
+ && (config.mixedMode() == MixedMode.TLS_CLIENT_MIXED_SERVER || config.mixedMode() == MixedMode.PLAINTEXT_CLIENT_MIXED_SERVER);
+ }
}
static class TlsClientServerConfig implements TlsConfig {
- @Override
- public String createConfig(ZookeeperServerConfig config, Optional<TlsContext> tlsContext) {
- String tlsSetting = getEnvironmentVariable("VESPA_TLS_FOR_ZOOKEEPER_CLIENT_SERVER_COMMUNICATION")
- .orElse(config.tlsForClientServerCommunication().name());
- validateOptions(tlsContext, tlsSetting);
-
- StringBuilder sb = new StringBuilder();
- boolean portUnification;
- boolean secureClientPort;
- switch (tlsSetting) {
- case "OFF":
- secureClientPort = false; portUnification = false;
- break;
- case "TLS_ONLY":
- secureClientPort = true; portUnification = false;
- break;
- case "PORT_UNIFICATION":
- case "TLS_WITH_PORT_UNIFICATION":
- secureClientPort = false; portUnification = true;
- break;
- default:
- throw new IllegalArgumentException("Unknown value of config setting tlsForClientServerCommunication: " + tlsSetting);
- }
- sb.append("client.portUnification=").append(portUnification).append("\n");
+ public String createConfig(VespaTlsConfig vespaTlsConfig) {
+ StringBuilder sb = new StringBuilder()
+ .append("client.portUnification=").append(enablePortUnification(vespaTlsConfig)).append("\n");
// ZooKeeper Dynamic Reconfiguration requires the "non-secure" client port to exist
// This is a hack to override the secure parameter through our connection factory wrapper
// https://issues.apache.org/jira/browse/ZOOKEEPER-3577
- VespaNettyServerCnxnFactory_isSecure = secureClientPort;
- appendSharedTlsConfig(sb, tlsContext);
+ VespaNettyServerCnxnFactory_isSecure = vespaTlsConfig.tlsEnabled() && vespaTlsConfig.mixedMode() == MixedMode.DISABLED;
+ appendSharedTlsConfig(sb, vespaTlsConfig);
return sb.toString();
}
@@ -212,38 +189,11 @@ public class Configurator {
static class TlsQuorumConfig implements TlsConfig {
- @Override
- public String createConfig(ZookeeperServerConfig config, Optional<TlsContext> tlsContext) {
- String tlsSetting = getEnvironmentVariable("VESPA_TLS_FOR_ZOOKEEPER_QUORUM_COMMUNICATION")
- .orElse(config.tlsForQuorumCommunication().name());
- validateOptions(tlsContext, tlsSetting);
-
- StringBuilder sb = new StringBuilder();
- boolean sslQuorum;
- boolean portUnification;
- switch (tlsSetting) {
- case "OFF":
- sslQuorum = false;
- portUnification = false;
- break;
- case "PORT_UNIFICATION":
- sslQuorum = false;
- portUnification = true;
- break;
- case "TLS_WITH_PORT_UNIFICATION":
- sslQuorum = true;
- portUnification = true;
- break;
- case "TLS_ONLY":
- sslQuorum = true;
- portUnification = false;
- break;
- default: throw new IllegalArgumentException("Unknown value of config setting tlsForQuorumCommunication: " + tlsSetting);
- }
- sb.append("sslQuorum=").append(sslQuorum).append("\n");
- sb.append("portUnification=").append(portUnification).append("\n");
- appendSharedTlsConfig(sb, tlsContext);
-
+ public String createConfig(VespaTlsConfig vespaTlsConfig) {
+ StringBuilder sb = new StringBuilder()
+ .append("sslQuorum=").append(vespaTlsConfig.tlsEnabled()).append("\n")
+ .append("portUnification=").append(enablePortUnification(vespaTlsConfig)).append("\n");
+ appendSharedTlsConfig(sb, vespaTlsConfig);
return sb.toString();
}
@@ -253,4 +203,26 @@ public class Configurator {
}
}
+ static class VespaTlsConfig {
+ private final TlsContext context;
+ private final MixedMode mixedMode;
+
+ VespaTlsConfig(TlsContext context, MixedMode mixedMode) {
+ this.context = context;
+ this.mixedMode = mixedMode;
+ }
+
+ static VespaTlsConfig fromSystem() {
+ return new VespaTlsConfig(
+ TransportSecurityUtils.getSystemTlsContext().orElse(null),
+ TransportSecurityUtils.getInsecureMixedMode());
+ }
+
+ static VespaTlsConfig tlsDisabled() { return new VespaTlsConfig(null, MixedMode.defaultValue()); }
+
+ boolean tlsEnabled() { return context != null; }
+ Optional<TlsContext> context() { return Optional.ofNullable(context); }
+ MixedMode mixedMode() { return mixedMode; }
+ }
+
}
diff --git a/zookeeper-server/zookeeper-server-common/src/main/java/com/yahoo/vespa/zookeeper/ZooKeeperRunner.java b/zookeeper-server/zookeeper-server-common/src/main/java/com/yahoo/vespa/zookeeper/ZooKeeperRunner.java
index adbc7a369b3..8c748250503 100644
--- a/zookeeper-server/zookeeper-server-common/src/main/java/com/yahoo/vespa/zookeeper/ZooKeeperRunner.java
+++ b/zookeeper-server/zookeeper-server-common/src/main/java/com/yahoo/vespa/zookeeper/ZooKeeperRunner.java
@@ -4,7 +4,6 @@ package com.yahoo.vespa.zookeeper;
import com.yahoo.cloud.config.ZookeeperServerConfig;
import com.yahoo.concurrent.DaemonThreadFactory;
import com.yahoo.protect.Process;
-import com.yahoo.security.tls.TransportSecurityUtils;
import java.nio.file.Path;
import java.nio.file.Paths;
@@ -39,7 +38,7 @@ public class ZooKeeperRunner implements Runnable {
public ZooKeeperRunner(ZookeeperServerConfig zookeeperServerConfig, VespaZooKeeperServer server) {
this.zookeeperServerConfig = zookeeperServerConfig;
this.server = server;
- new Configurator(zookeeperServerConfig).writeConfigToDisk(TransportSecurityUtils.getSystemTlsContext());
+ new Configurator(zookeeperServerConfig).writeConfigToDisk();
executorService = Executors.newSingleThreadExecutor(new DaemonThreadFactory("zookeeper server"));
executorService.submit(this);
}
diff --git a/zookeeper-server/zookeeper-server-common/src/test/java/com/yahoo/vespa/zookeeper/ConfiguratorTest.java b/zookeeper-server/zookeeper-server-common/src/test/java/com/yahoo/vespa/zookeeper/ConfiguratorTest.java
index 47fed6fceac..c40b7cb7b52 100644
--- a/zookeeper-server/zookeeper-server-common/src/test/java/com/yahoo/vespa/zookeeper/ConfiguratorTest.java
+++ b/zookeeper-server/zookeeper-server-common/src/test/java/com/yahoo/vespa/zookeeper/ConfiguratorTest.java
@@ -7,6 +7,7 @@ import com.yahoo.security.X509CertificateBuilder;
import com.yahoo.security.tls.AuthorizationMode;
import com.yahoo.security.tls.DefaultTlsContext;
import com.yahoo.security.tls.HostnameVerification;
+import com.yahoo.security.tls.MixedMode;
import com.yahoo.security.tls.PeerAuthentication;
import com.yahoo.security.tls.TlsContext;
import com.yahoo.security.tls.policy.AuthorizedPeers;
@@ -24,14 +25,12 @@ import java.nio.file.Files;
import java.security.KeyPair;
import java.security.cert.X509Certificate;
import java.util.List;
-import java.util.Optional;
import java.util.Set;
-import static com.yahoo.cloud.config.ZookeeperServerConfig.TlsForClientServerCommunication;
-import static com.yahoo.cloud.config.ZookeeperServerConfig.TlsForQuorumCommunication;
import static com.yahoo.security.KeyAlgorithm.EC;
import static com.yahoo.security.SignatureAlgorithm.SHA256_WITH_ECDSA;
import static com.yahoo.vespa.defaults.Defaults.getDefaults;
+import static com.yahoo.vespa.zookeeper.Configurator.VespaTlsConfig;
import static com.yahoo.vespa.zookeeper.Configurator.ZOOKEEPER_JUTE_MAX_BUFFER;
import static java.time.Instant.EPOCH;
import static java.time.temporal.ChronoUnit.DAYS;
@@ -57,7 +56,7 @@ public class ConfiguratorTest {
@Test
public void config_is_written_correctly_when_one_server() {
ZookeeperServerConfig.Builder builder = createConfigBuilderForSingleHost(cfgFile, idFile);
- new Configurator(builder.build()).writeConfigToDisk(Optional.empty());
+ new Configurator(builder.build()).writeConfigToDisk(VespaTlsConfig.tlsDisabled());
validateConfigFileSingleHost(cfgFile);
validateIdFile(idFile, "0\n");
}
@@ -71,39 +70,25 @@ public class ConfiguratorTest {
builder.server(newServer(2, "baz", 345, 543, true));
builder.myidFile(idFile.getAbsolutePath());
builder.myid(1);
- new Configurator(builder.build()).writeConfigToDisk(Optional.empty());
+ new Configurator(builder.build()).writeConfigToDisk(VespaTlsConfig.tlsDisabled());
validateConfigFileMultipleHosts(cfgFile);
validateIdFile(idFile, "1\n");
}
@Test
- public void config_is_written_correctly_with_tls_for_quorum_communication_port_unification() {
+ public void config_is_written_correctly_with_tls_for_quorum_communication_tls_with_mixed_mode() {
ZookeeperServerConfig.Builder builder = createConfigBuilderForSingleHost(cfgFile, idFile);
- builder.tlsForQuorumCommunication(TlsForQuorumCommunication.PORT_UNIFICATION);
- builder.tlsForClientServerCommunication(TlsForClientServerCommunication.PORT_UNIFICATION);
TlsContext tlsContext = createTlsContext();
- new Configurator(builder.build()).writeConfigToDisk(Optional.of(tlsContext));
- validateConfigFilePortUnification(cfgFile);
+ new Configurator(builder.build()).writeConfigToDisk(new VespaTlsConfig(tlsContext, MixedMode.TLS_CLIENT_MIXED_SERVER));
+ validateConfigFileTlsWithMixedMode(cfgFile);
}
@Test
- public void config_is_written_correctly_with_tls_for_quorum_communication_tls_with_port_unification() {
+ public void config_is_written_correctly_with_tls_for_quorum_communication_tls_without_mixed_mode() {
ZookeeperServerConfig.Builder builder = createConfigBuilderForSingleHost(cfgFile, idFile);
- builder.tlsForQuorumCommunication(TlsForQuorumCommunication.TLS_WITH_PORT_UNIFICATION);
- builder.tlsForClientServerCommunication(TlsForClientServerCommunication.TLS_WITH_PORT_UNIFICATION);
TlsContext tlsContext = createTlsContext();
- new Configurator(builder.build()).writeConfigToDisk(Optional.of(tlsContext));
- validateConfigFileTlsWithPortUnification(cfgFile);
- }
-
- @Test
- public void config_is_written_correctly_with_tls_for_quorum_communication_tls_only() {
- ZookeeperServerConfig.Builder builder = createConfigBuilderForSingleHost(cfgFile, idFile);
- builder.tlsForQuorumCommunication(TlsForQuorumCommunication.TLS_ONLY);
- builder.tlsForClientServerCommunication(TlsForClientServerCommunication.TLS_ONLY);
- TlsContext tlsContext = createTlsContext();
- new Configurator(builder.build()).writeConfigToDisk(Optional.of(tlsContext));
- validateConfigFileTlsOnly(cfgFile);
+ new Configurator(builder.build()).writeConfigToDisk(new VespaTlsConfig(tlsContext, MixedMode.DISABLED));
+ validateConfigFileTlsWithoutMixedMode(cfgFile);
}
@Test(expected = RuntimeException.class)
@@ -113,7 +98,7 @@ public class ConfiguratorTest {
builder.server(newServer(1, "bar", 234, 432, false));
builder.server(newServer(2, "baz", 345, 543, false));
builder.myid(0);
- new Configurator(builder.build()).writeConfigToDisk(Optional.empty());
+ new Configurator(builder.build()).writeConfigToDisk(VespaTlsConfig.tlsDisabled());
}
@Test
@@ -127,12 +112,12 @@ public class ConfiguratorTest {
builder.zooKeeperConfigFile(cfgFile.getAbsolutePath());
builder.myidFile(idFile.getAbsolutePath());
- new Configurator(builder.build()).writeConfigToDisk(Optional.empty());
+ new Configurator(builder.build()).writeConfigToDisk(VespaTlsConfig.tlsDisabled());
assertEquals("" + new ZookeeperServerConfig(builder).juteMaxBuffer(), System.getProperty(ZOOKEEPER_JUTE_MAX_BUFFER));
final int max_buffer = 1;
builder.juteMaxBuffer(max_buffer);
- new Configurator(builder.build()).writeConfigToDisk(Optional.empty());
+ new Configurator(builder.build()).writeConfigToDisk(VespaTlsConfig.tlsDisabled());
assertEquals("" + max_buffer, System.getProperty(ZOOKEEPER_JUTE_MAX_BUFFER));
}
@@ -216,19 +201,8 @@ public class ConfiguratorTest {
validateConfigFile(cfgFile, expected);
}
- private void validateConfigFilePortUnification(File cfgFile) {
- String expected =
- commonConfig() +
- "server.0=foo:321:123;2181\n" +
- "sslQuorum=false\n" +
- "portUnification=true\n" +
- tlsQuorumConfig() +
- "client.portUnification=true\n" +
- tlsClientServerConfig();
- validateConfigFile(cfgFile, expected);
- }
- private void validateConfigFileTlsWithPortUnification(File cfgFile) {
+ private void validateConfigFileTlsWithMixedMode(File cfgFile) {
String expected =
commonConfig() +
"server.0=foo:321:123;2181\n" +
@@ -240,7 +214,7 @@ public class ConfiguratorTest {
validateConfigFile(cfgFile, expected);
}
- private void validateConfigFileTlsOnly(File cfgFile) {
+ private void validateConfigFileTlsWithoutMixedMode(File cfgFile) {
String expected =
commonConfig() +
"server.0=foo:321:123;2181\n" +