summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.travis.yml27
-rw-r--r--CMakeLists.txt4
-rw-r--r--CONTRIBUTING.md6
-rw-r--r--application/pom.xml7
-rw-r--r--build_settings.cmake2
-rw-r--r--client/src/main/java/ai/vespa/client/dsl/Field.java29
-rw-r--r--client/src/test/groovy/ai/vespa/client/dsl/QTest.groovy20
-rw-r--r--cloud-tenant-base-dependencies-enforcer/pom.xml10
-rw-r--r--clustercontroller-core/pom.xml7
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/SlobrokClient.java1
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/model/api/ConfigChangeReindexAction.java2
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/model/api/EndpointCertificateMetadata.java15
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java11
-rw-r--r--config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java31
-rw-r--r--config-model/src/main/java/com/yahoo/config/model/provision/InMemoryProvisioner.java26
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/MapEvaluationTypeContext.java8
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/OnnxModels.java6
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/RankProfile.java2
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/Search.java12
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/derived/RankProfileList.java6
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/derived/SummaryClass.java6
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/derived/SummaryClassField.java25
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/expressiontransforms/ConstantTensorTransformer.java13
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/processing/ExactMatch.java6
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/HostResource.java1
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/admin/Admin.java21
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/admin/clustercontroller/ClusterControllerContainerCluster.java6
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java9
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/application/validation/QuotaValidator.java32
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/VespaReindexAction.java2
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/IndexingScriptChangeValidator.java11
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomAdminBuilderBase.java4
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/chains/search/DomFederationSearcherBuilder.java5
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/chains/search/SearchChainsBuilder.java3
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainerCluster.java7
-rwxr-xr-xconfig-model/src/main/java/com/yahoo/vespa/model/container/ContainerCluster.java1
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/ContainerModelEvaluation.java12
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/docproc/DocprocChain.java5
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/http/JettyHttpServer.java2
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/search/searchchain/FederationSearcher.java123
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/search/searchchain/Provider.java6
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java15
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/StorageGroup.java10
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/search/IndexingDocprocChain.java2
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/search/TransactionLogServer.java16
-rw-r--r--config-model/src/main/javacc/SDParser.jj8
-rw-r--r--config-model/src/main/resources/schema/admin.rnc1
-rw-r--r--config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java121
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/SummaryTestCase.java255
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/derived/SummaryTestCase.java31
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionTypeResolverTestCase.java50
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionWithTensorTestCase.java20
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/admin/ClusterControllerTestCase.java41
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/admin/DedicatedAdminV4Test.java2
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/application/validation/QuotaValidatorTest.java19
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/IndexingScriptChangeValidatorTest.java16
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/builder/xml/dom/ContentBuilderTest.java45
-rwxr-xr-xconfig-model/src/test/java/com/yahoo/vespa/model/container/ContainerClusterTest.java2
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/content/ContentClusterTest.java16
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/ml/ModelEvaluationTest.java7
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/test/VespaModelTester.java36
-rw-r--r--config/src/apps/vespa-configproxy-cmd/main.cpp2
-rw-r--r--config/src/apps/vespa-get-config/getconfig.cpp2
-rw-r--r--config/src/apps/vespa-ping-configproxy/pingproxy.cpp2
-rw-r--r--config/src/main/java/com/yahoo/config/subscription/impl/JRTConfigRequester.java65
-rw-r--r--config/src/main/java/com/yahoo/vespa/config/ConfigPayload.java17
-rw-r--r--config/src/main/java/com/yahoo/vespa/config/LZ4PayloadCompressor.java10
-rw-r--r--config/src/main/java/com/yahoo/vespa/config/TimingValues.java51
-rw-r--r--config/src/main/java/com/yahoo/vespa/config/protocol/ConfigResponse.java4
-rw-r--r--config/src/main/java/com/yahoo/vespa/config/protocol/JRTServerConfigRequestV3.java11
-rw-r--r--config/src/main/java/com/yahoo/vespa/config/protocol/Payload.java15
-rw-r--r--config/src/main/java/com/yahoo/vespa/config/protocol/SlimeConfigResponse.java14
-rw-r--r--config/src/main/java/com/yahoo/vespa/config/util/ConfigUtils.java13
-rw-r--r--config/src/test/java/com/yahoo/config/subscription/impl/JRTConfigRequesterTest.java149
-rw-r--r--config/src/test/java/com/yahoo/vespa/config/protocol/ConfigResponseTest.java17
-rw-r--r--configserver-client/src/main/java/ai/vespa/hosted/client/AbstractConfigServerClient.java15
-rw-r--r--configserver-client/src/main/java/ai/vespa/hosted/client/ConfigServerClient.java30
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java12
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/ConfigServerBootstrap.java84
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/SuperModelController.java4
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/application/Application.java50
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java21
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/ApplicationFileManager.java5
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileServer.java16
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/http/HttpErrorResponse.java2
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/http/SessionHandler.java6
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/ApplicationApiHandler.java47
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/DeploymentMetricsResponse.java9
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/SessionCreateHandler.java18
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/TenantHandler.java113
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetriever.java46
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/metrics/DeploymentMetricsAggregator.java70
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/rpc/ConfigResponseFactory.java5
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/rpc/LZ4ConfigResponseFactory.java7
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/rpc/UncompressedConfigResponseFactory.java6
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/session/PrepareParams.java76
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointSerializer.java3
-rw-r--r--configserver/src/main/resources/configserver-app/services.xml4
-rwxr-xr-xconfigserver/src/main/sh/start-configserver1
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/ConfigServerBootstrapTest.java19
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/SessionCreateHandlerTest.java34
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/TenantHandlerTest.java108
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetrieverTest.java23
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/rpc/ConfigResponseFactoryTest.java4
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/session/PrepareParamsTest.java113
-rw-r--r--configserver/src/test/resources/metrics/clustercontroller_metrics.json33
-rw-r--r--configutil/src/apps/configstatus/main.cpp2
-rw-r--r--configutil/src/apps/modelinspect/main.cpp2
-rw-r--r--container-core/abi-spec.json37
-rw-r--r--container-core/src/main/java/com/yahoo/container/handler/Timing.java6
-rw-r--r--container-core/src/main/java/com/yahoo/container/jdisc/HttpRequest.java2
-rw-r--r--container-core/src/main/java/com/yahoo/container/jdisc/LoggingRequestHandler.java5
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/CookieHelper.java38
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/HttpResponse.java19
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/filter/JdiscFilterResponse.java31
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/filter/SecurityResponseFilterChain.java22
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ConnectionMetricAggregator.java65
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ConnectorFactory.java21
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/HttpResponseStatisticsCollector.java73
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JDiscContext.java6
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JDiscServerConnector.java5
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/Janitor.java46
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JettyConnectionLogger.java64
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JettyHttpServer.java22
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/MetricDefinitions.java2
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/RequestUtils.java5
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ServerMetricReporter.java1
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ServletOutputStreamWriter.java12
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ServletRequestReader.java11
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ServletResponseController.java5
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/SimpleConcurrentIdentityHashMap.java52
-rw-r--r--container-core/src/main/java/com/yahoo/restapi/RedirectResponse.java27
-rw-r--r--container-core/src/main/java/com/yahoo/restapi/RestApiException.java4
-rw-r--r--container-core/src/main/java/com/yahoo/restapi/RestApiImpl.java5
-rw-r--r--container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/ConnectorFactoryTest.java4
-rw-r--r--container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerConformanceTest.java16
-rw-r--r--container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerTest.java14
-rw-r--r--container-dependencies-enforcer/pom.xml2
-rw-r--r--container-dependency-versions/pom.xml18
-rw-r--r--container-disc/pom.xml2
-rw-r--r--container-messagebus/pom.xml7
-rw-r--r--container-search/abi-spec.json22
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/fastsearch/Base64DataField.java25
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/fastsearch/DataField.java2
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/fastsearch/DocsumField.java1
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/hitfield/RawBase64.java18
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/query/parser/AbstractParser.java3
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/query/parser/SpecialTokenRegistry.java137
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/query/parser/SpecialTokens.java167
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/query/parser/Tokenizer.java34
-rw-r--r--container-search/src/main/java/com/yahoo/search/Query.java5
-rw-r--r--container-search/src/main/java/com/yahoo/search/dispatch/InvokerFactory.java2
-rw-r--r--container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchCluster.java26
-rw-r--r--container-search/src/main/java/com/yahoo/search/federation/FederationSearcher.java3
-rw-r--r--container-search/src/main/java/com/yahoo/search/federation/sourceref/SearchChainInvocationSpec.java2
-rw-r--r--container-search/src/main/java/com/yahoo/search/federation/sourceref/SearchChainResolver.java2
-rw-r--r--container-search/src/main/java/com/yahoo/search/federation/sourceref/SingleTarget.java2
-rw-r--r--container-search/src/main/java/com/yahoo/search/federation/sourceref/Target.java2
-rw-r--r--container-search/src/main/java/com/yahoo/search/handler/SearchResponse.java6
-rw-r--r--container-search/src/main/java/com/yahoo/search/query/Presentation.java2
-rw-r--r--container-search/src/main/java/com/yahoo/search/query/Select.java6
-rw-r--r--container-search/src/main/java/com/yahoo/search/query/SelectParser.java1
-rw-r--r--container-search/src/main/java/com/yahoo/search/query/parser/ParserEnvironment.java4
-rw-r--r--container-search/src/main/java/com/yahoo/search/query/profile/QueryProfileProperties.java2
-rw-r--r--container-search/src/main/java/com/yahoo/search/query/profile/types/QueryProfileType.java10
-rw-r--r--container-search/src/main/java/com/yahoo/search/searchchain/Execution.java6
-rw-r--r--container-search/src/main/java/com/yahoo/search/searchchain/ExecutionFactory.java2
-rw-r--r--container-search/src/main/java/com/yahoo/search/searchchain/model/VespaSearchers.java5
-rw-r--r--container-search/src/main/java/com/yahoo/search/searchchain/model/federation/FederationSearcherModel.java28
-rw-r--r--container-search/src/main/java/com/yahoo/search/searchers/ValidateNearestNeighborSearcher.java1
-rw-r--r--container-search/src/main/java/com/yahoo/vespa/streamingvisitors/VdsStreamingSearcher.java6
-rw-r--r--container-search/src/test/java/com/yahoo/prelude/query/parser/test/ParseTestCase.java6
-rw-r--r--container-search/src/test/java/com/yahoo/prelude/query/parser/test/ParsingTester.java26
-rw-r--r--container-search/src/test/java/com/yahoo/prelude/query/parser/test/TokenizerTestCase.java108
-rw-r--r--container-search/src/test/java/com/yahoo/prelude/query/parser/test/replacingtokens.cfg12
-rw-r--r--container-search/src/test/java/com/yahoo/search/dispatch/searchcluster/SearchClusterCoverageTest.java18
-rw-r--r--container-search/src/test/java/com/yahoo/search/query/profile/types/test/QueryProfileTypeTestCase.java73
-rw-r--r--container-search/src/test/java/com/yahoo/search/query/rewrite/RewriterFeaturesTestCase.java2
-rw-r--r--container-test/pom.xml12
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/ClusterMetrics.java21
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/archive/ArchiveBucketDb.java16
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/archive/package-info.java2
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/aws/MockRoleService.java21
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/Invoice.java7
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/MockBillingController.java16
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/Node.java5
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/vcmr/ChangeRequestClient.java2
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/vcmr/MockChangeRequestClient.java4
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/vcmr/VCMRReport.java149
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/vcmr/VespaChangeRequest.java1
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/zone/ZoneRegistry.java3
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/PathGroup.java6
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/Policy.java6
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Application.java13
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java10
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Controller.java11
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Instance.java11
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java30
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/Endpoint.java23
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/archive/CuratorArchiveBucketDb.java31
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/archive/package-info.java5
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/Badges.java59
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentStatus.java11
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java4
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobController.java26
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ArchiveAccessMaintainer.java4
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ArchiveUriUpdater.java4
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ChangeRequestMaintainer.java15
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintenance.java8
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainer.java42
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/MetricsReporter.java20
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgradeScheduler.java2
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/TenantRoleMaintainer.java17
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/VCMRMaintainer.java76
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/metric/ConfigServerMetrics.java64
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/metric/DeploymentMetrics.java47
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/Notification.java37
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/NotificationSource.java3
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDb.java84
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ChangeRequestSerializer.java4
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/CuratorDb.java7
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/NotificationsSerializer.java32
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java21
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/billing/BillingApiHandler.java26
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/billing/BillingApiHandlerV2.java352
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/billing/CsvResponse.java33
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/ChangeManagementApiHandler.java84
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/deployment/BadgeApiHandler.java41
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/deployment/Badges.java305
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicies.java99
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicy.java47
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java39
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTester.java46
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/EndpointTest.java49
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/archive/CuratorArchiveBucketDbTest.java16
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/ApplicationPackageBuilder.java9
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/BadgesTest.java61
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentContext.java1
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java22
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/NodeRepositoryMock.java2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ServiceRegistryMock.java5
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneRegistryMock.java37
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ArchiveAccessMaintainerTest.java18
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ArchiveUriUpdaterTest.java13
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ChangeRequestMaintainerTest.java23
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainerTest.java17
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/MetricsReporterTest.java22
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgradeSchedulerTest.java6
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/TenantRoleMaintainerTest.java61
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/VCMRMaintainerTest.java49
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/metric/ConfigServerMetricsTest.java75
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDbTest.java103
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/NotificationsSerializerTest.java12
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiCloudTest.java15
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java13
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment-cloud.json48
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment-with-routing-policy-legacy.json71
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment-with-routing-policy.json6
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment-without-shared-endpoints.json3
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment.json7
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/dev-us-east-1.json4
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/notifications-tenant1-app2.json2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/notifications-tenant1.json4
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/prod-us-central-1.json7
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/billing/BillingApiHandlerTest.java7
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/billing/BillingApiHandlerV2Test.java138
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/ChangeManagementApiHandlerTest.java36
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/responses/patched-vcmr.json31
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/responses/vcmr.json36
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/deployment/BadgeApiTest.java40
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/deployment/responses/history.svg177
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/deployment/responses/overview.svg118
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/rotation/RotationRepositoryTest.java4
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java122
-rw-r--r--default_build_settings.cmake12
-rw-r--r--dist/vespa.spec230
-rw-r--r--document/src/main/java/com/yahoo/document/json/readers/TensorReader.java13
-rw-r--r--document/src/test/java/com/yahoo/document/json/JsonReaderTestCase.java21
-rw-r--r--document/src/test/java/com/yahoo/document/serialization/SerializationTestUtils.java7
-rw-r--r--document/src/test/resources/.gitattributes2
-rw-r--r--document/src/test/resources/reference/reference_with_id__cppbin84 -> 83 bytes
-rw-r--r--document/src/tests/serialization/vespadocumentserializer_test.cpp13
-rw-r--r--document/src/vespa/document/base/idstring.cpp7
-rw-r--r--document/src/vespa/document/util/bytebuffer.cpp2
-rw-r--r--documentapi/abi-spec.json3
-rw-r--r--documentapi/pom.xml7
-rw-r--r--documentapi/src/main/java/com/yahoo/documentapi/DocumentOperationParameters.java25
-rw-r--r--documentapi/src/main/java/com/yahoo/documentapi/Response.java3
-rw-r--r--documentapi/src/main/java/com/yahoo/documentapi/Result.java2
-rw-r--r--documentapi/src/main/java/com/yahoo/documentapi/messagebus/MessageBusAsyncSession.java8
-rwxr-xr-xdocumentapi/src/main/java/com/yahoo/documentapi/messagebus/MessageBusSyncSession.java5
-rwxr-xr-xdocumentapi/src/main/java/com/yahoo/documentapi/messagebus/MessageBusVisitorSession.java9
-rw-r--r--documentapi/src/main/java/com/yahoo/documentapi/messagebus/protocol/ContentPolicy.java34
-rwxr-xr-xdocumentapi/src/main/java/com/yahoo/documentapi/messagebus/protocol/ExternPolicy.java1
-rw-r--r--documentapi/src/test/java/com/yahoo/documentapi/messagebus/test/MessageBusDocumentApiTestCase.java37
-rw-r--r--eval/src/apps/tensor_conformance/generate.cpp30
-rw-r--r--eval/src/apps/tensor_conformance/generate.h14
-rw-r--r--eval/src/apps/tensor_conformance/tensor_conformance.cpp39
-rw-r--r--eval/src/tests/eval/function/function_test.cpp95
-rw-r--r--eval/src/tests/tensor/tensor_conformance/tensor_conformance_test.cpp14
-rw-r--r--eval/src/vespa/eval/eval/basic_nodes.cpp33
-rw-r--r--eval/src/vespa/eval/eval/basic_nodes.h28
-rw-r--r--eval/src/vespa/eval/eval/call_nodes.h10
-rw-r--r--eval/src/vespa/eval/eval/fast_forest.cpp12
-rw-r--r--eval/src/vespa/eval/eval/key_gen.cpp2
-rw-r--r--eval/src/vespa/eval/eval/llvm/llvm_wrapper.cpp8
-rw-r--r--eval/src/vespa/eval/eval/make_tensor_function.cpp10
-rw-r--r--eval/src/vespa/eval/eval/operator_nodes.cpp2
-rw-r--r--eval/src/vespa/eval/eval/operator_nodes.h6
-rw-r--r--eval/src/vespa/eval/eval/test/reference_operations.cpp2
-rw-r--r--eval/src/vespa/eval/eval/vm_forest.cpp14
-rw-r--r--eval/src/vespa/eval/instruction/dense_lambda_peek_optimizer.cpp2
-rw-r--r--eval/src/vespa/eval/instruction/dense_tensor_peek_function.cpp2
-rw-r--r--eval/src/vespa/eval/onnx/onnx_wrapper.h2
-rw-r--r--fbench/src/fbench/fbench.cpp2
-rw-r--r--fbench/src/filterfile/filterfile.cpp2
-rw-r--r--fbench/src/splitfile/splitfile.cpp2
-rw-r--r--filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReferenceDownloader.java1
-rw-r--r--flags/src/main/java/com/yahoo/vespa/flags/Flags.java88
-rw-r--r--flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java8
-rw-r--r--fnet/src/tests/info/info.cpp2
-rw-r--r--fnet/src/vespa/fnet/frt/invokable.h19
-rw-r--r--fsa/src/alltest/conceptnet_test.cpp2
-rw-r--r--fsa/src/apps/fsadump/fsadump.cpp2
-rw-r--r--fsa/src/apps/fsainfo/fsainfo.cpp2
-rw-r--r--fsa/src/apps/makefsa/makefsa.cpp2
-rw-r--r--functions.cmake20
-rw-r--r--indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/expressions/Expression.java5
-rw-r--r--indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/expressions/InputExpression.java2
-rw-r--r--indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/expressions/LiteralBoolExpression.java53
-rw-r--r--indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/expressions/NowExpression.java6
-rw-r--r--indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/expressions/SplitExpression.java1
-rw-r--r--indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/expressions/ToBoolExpression.java59
-rw-r--r--indexinglanguage/src/main/javacc/IndexingParser.jj20
-rw-r--r--indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/ScriptTestCase.java14
-rw-r--r--indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/expressions/HostNameTestCase.java1
-rw-r--r--indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/expressions/LiteralBoolExpressionTestCase.java63
-rw-r--r--indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/expressions/ToBoolTestCase.java76
-rw-r--r--indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/parser/ExpressionTestCase.java1
-rw-r--r--indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/parser/IdentifierTestCase.java3
-rw-r--r--jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/cors/CorsLogic.java19
-rw-r--r--jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/cors/CorsRequestFilterBase.java41
-rw-r--r--jdisc-security-filters/src/test/java/com/yahoo/jdisc/http/filter/security/cors/CorsPreflightRequestFilterTest.java11
-rw-r--r--jdisc-security-filters/src/test/java/com/yahoo/jdisc/http/filter/security/cors/CorsRequestFilterBaseTest.java60
-rw-r--r--jdisc-security-filters/src/test/java/com/yahoo/jdisc/http/filter/security/cors/CorsResponseFilterTest.java2
-rw-r--r--jdisc_core/src/test/java/com/yahoo/jdisc/application/BindingSetTestCase.java4
-rw-r--r--jrt/pom.xml7
-rw-r--r--jrt/src/com/yahoo/jrt/DataValue.java2
-rw-r--r--jrt/src/com/yahoo/jrt/slobrok/api/Mirror.java1
-rw-r--r--juniper/src/test/auxTest.cpp2
-rw-r--r--juniper/src/test/testenv.cpp2
-rw-r--r--juniper/src/vespa/juniper/SummaryConfig.h2
-rw-r--r--linguistics/abi-spec.json51
-rw-r--r--linguistics/src/main/java/com/yahoo/language/opennlp/OpenNlpTokenizer.java9
-rw-r--r--linguistics/src/main/java/com/yahoo/language/process/SpecialTokenRegistry.java72
-rw-r--r--linguistics/src/main/java/com/yahoo/language/process/SpecialTokens.java141
-rw-r--r--linguistics/src/main/java/com/yahoo/language/process/TokenType.java2
-rw-r--r--linguistics/src/main/java/com/yahoo/language/simple/SimpleLinguistics.java7
-rw-r--r--linguistics/src/main/java/com/yahoo/language/simple/SimpleTokenizer.java27
-rw-r--r--linguistics/src/test/java/com/yahoo/language/process/SpecialTokensTestCase.java40
-rw-r--r--messagebus/pom.xml7
-rw-r--r--messagebus/src/main/java/com/yahoo/messagebus/Message.java2
-rw-r--r--model-evaluation/abi-spec.json5
-rw-r--r--model-evaluation/pom.xml6
-rw-r--r--model-evaluation/src/main/java/ai/vespa/models/evaluation/FunctionEvaluator.java36
-rw-r--r--model-evaluation/src/main/java/ai/vespa/models/evaluation/LazyArrayContext.java102
-rw-r--r--model-evaluation/src/main/java/ai/vespa/models/evaluation/Model.java18
-rw-r--r--model-evaluation/src/main/java/ai/vespa/models/evaluation/ModelsEvaluator.java4
-rw-r--r--model-evaluation/src/main/java/ai/vespa/models/evaluation/OnnxModel.java57
-rw-r--r--model-evaluation/src/main/java/ai/vespa/models/evaluation/RankProfilesConfigImporter.java36
-rw-r--r--model-evaluation/src/test/java/ai/vespa/models/evaluation/ModelTester.java5
-rw-r--r--model-evaluation/src/test/java/ai/vespa/models/evaluation/ModelsEvaluatorTest.java5
-rw-r--r--model-evaluation/src/test/java/ai/vespa/models/evaluation/OnnxEvaluatorTest.java69
-rw-r--r--model-evaluation/src/test/java/ai/vespa/models/handler/HandlerTester.java76
-rw-r--r--model-evaluation/src/test/java/ai/vespa/models/handler/ModelsEvaluationHandlerTest.java93
-rw-r--r--model-evaluation/src/test/java/ai/vespa/models/handler/OnnxEvaluationHandlerTest.java137
-rw-r--r--model-evaluation/src/test/resources/config/models/onnx-models.cfg0
-rw-r--r--model-evaluation/src/test/resources/config/onnx/models/add_mul.onnx24
-rwxr-xr-xmodel-evaluation/src/test/resources/config/onnx/models/add_mul.py30
-rw-r--r--model-evaluation/src/test/resources/config/onnx/models/one_layer.onnxbin0 -> 299 bytes
-rwxr-xr-xmodel-evaluation/src/test/resources/config/onnx/models/pytorch_one_layer.py38
-rw-r--r--model-evaluation/src/test/resources/config/onnx/onnx-models.cfg16
-rw-r--r--model-evaluation/src/test/resources/config/onnx/rank-profiles.cfg17
-rw-r--r--model-evaluation/src/test/resources/config/onnx/ranking-constants.cfg0
-rw-r--r--model-evaluation/src/test/resources/config/rankexpression/onnx-models.cfg0
-rw-r--r--model-evaluation/src/test/resources/config/smallconstant/onnx-models.cfg0
-rw-r--r--model-integration/pom.xml4
-rw-r--r--model-integration/src/main/java/ai/vespa/modelintegration/evaluator/OnnxEvaluator.java79
-rw-r--r--model-integration/src/main/java/ai/vespa/modelintegration/evaluator/TensorConverter.java181
-rw-r--r--model-integration/src/main/java/ai/vespa/modelintegration/evaluator/package-info.java5
-rw-r--r--model-integration/src/test/java/ai/vespa/modelintegration/evaluator/OnnxEvaluatorTest.java93
-rw-r--r--model-integration/src/test/models/onnx/add_double.onnx16
-rwxr-xr-xmodel-integration/src/test/models/onnx/add_double.py27
-rw-r--r--model-integration/src/test/models/onnx/add_float.onnx16
-rwxr-xr-xmodel-integration/src/test/models/onnx/add_float.py27
-rw-r--r--model-integration/src/test/models/onnx/add_int64.onnx16
-rwxr-xr-xmodel-integration/src/test/models/onnx/add_int64.py27
-rw-r--r--model-integration/src/test/models/onnx/cast_bfloat16_float.onnx12
-rwxr-xr-xmodel-integration/src/test/models/onnx/cast_bfloat16_float.py24
-rw-r--r--model-integration/src/test/models/onnx/cast_float_int8.onnx12
-rwxr-xr-xmodel-integration/src/test/models/onnx/cast_float_int8.py24
-rw-r--r--model-integration/src/test/models/onnx/cast_int8_float.onnx12
-rwxr-xr-xmodel-integration/src/test/models/onnx/cast_int8_float.py24
-rw-r--r--model-integration/src/test/models/onnx/pytorch/one_layer.onnxbin0 -> 299 bytes
-rwxr-xr-xmodel-integration/src/test/models/onnx/pytorch/pytorch_one_layer.py38
-rw-r--r--model-integration/src/test/models/onnx/simple/matmul.onnx16
-rwxr-xr-xmodel-integration/src/test/models/onnx/simple/matmul.py27
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainer.java4
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollector.java11
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoredumpHandler.java14
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdater.java55
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java6
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/file/UnixPath.java9
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/yum/Yum.java42
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/yum/YumCommand.java75
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/yum/YumTester.java52
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdaterTest.java44
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImplTest.java2
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/yum/YumTest.java25
-rw-r--r--node-repository/src/main/config/node-repository.xml1
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeList.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporter.java3
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java5
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/DelegatingOsUpgrader.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/OsVersions.java8
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/RebuildingOsUpgrader.java122
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/RetiringOsUpgrader.java15
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeCandidate.java7
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/LoadBalancersV1ApiHandler.java42
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/ContainerConfig.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockDeployer.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java12
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java179
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java1
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/LoadBalancersV1ApiTest.java1
-rw-r--r--orchestrator/src/main/java/com/yahoo/vespa/orchestrator/OrchestratorImpl.java6
-rw-r--r--orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/ApplicationApi.java2
-rw-r--r--orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/ApplicationApiImpl.java10
-rw-r--r--orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/ClusterApi.java1
-rw-r--r--orchestrator/src/main/java/com/yahoo/vespa/orchestrator/policy/HostedVespaClusterPolicy.java13
-rw-r--r--orchestrator/src/main/java/com/yahoo/vespa/orchestrator/policy/HostedVespaPolicy.java6
-rw-r--r--orchestrator/src/test/java/com/yahoo/vespa/orchestrator/OrchestratorImplTest.java17
-rw-r--r--orchestrator/src/test/java/com/yahoo/vespa/orchestrator/OrchestratorTest.java5
-rw-r--r--orchestrator/src/test/java/com/yahoo/vespa/orchestrator/model/ApplicationApiImplTest.java44
-rw-r--r--orchestrator/src/test/java/com/yahoo/vespa/orchestrator/model/ClusterApiImplTest.java15
-rw-r--r--orchestrator/src/test/java/com/yahoo/vespa/orchestrator/model/ModelTestUtils.java5
-rw-r--r--orchestrator/src/test/java/com/yahoo/vespa/orchestrator/policy/HostedVespaClusterPolicyTest.java16
-rw-r--r--orchestrator/src/test/java/com/yahoo/vespa/orchestrator/policy/HostedVespaPolicyTest.java4
-rw-r--r--orchestrator/src/test/java/com/yahoo/vespa/orchestrator/resources/ApplicationSuspensionRequestHandlerTest.java4
-rw-r--r--parent/pom.xml22
-rw-r--r--pom.xml1
-rw-r--r--screwdriver.yaml11
-rwxr-xr-xscrewdriver/build-vespa.sh (renamed from travis/travis-build.sh)2
-rwxr-xr-xscrewdriver/detect-what-to-build.sh (renamed from travis/detect-what-to-build.sh)8
-rw-r--r--searchcommon/src/vespa/searchcommon/common/compaction_strategy.h2
-rw-r--r--searchcore/src/apps/vespa-feed-bm/vespa_feed_bm.cpp15
-rw-r--r--searchcore/src/tests/proton/documentdb/documentbucketmover/CMakeLists.txt12
-rw-r--r--searchcore/src/tests/proton/documentdb/documentbucketmover/documentbucketmover_test.cpp780
-rw-r--r--searchcore/src/tests/proton/documentdb/documentbucketmover/documentbucketmover_v2_test.cpp682
-rw-r--r--searchcore/src/tests/proton/documentdb/job_tracked_maintenance_job/job_tracked_maintenance_job_test.cpp18
-rw-r--r--searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_common.cpp7
-rw-r--r--searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_common.h9
-rw-r--r--searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_compaction_test.cpp80
-rw-r--r--searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_jobtest.cpp23
-rw-r--r--searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_jobtest.h4
-rw-r--r--searchcore/src/tests/proton/documentdb/maintenancecontroller/CMakeLists.txt17
-rw-r--r--searchcore/src/tests/proton/documentdb/maintenancecontroller/frozenbucketsmap_test.cpp85
-rw-r--r--searchcore/src/tests/proton/documentdb/maintenancecontroller/maintenancecontroller_test.cpp246
-rw-r--r--searchcore/src/tests/proton/documentdb/move_operation_limiter/move_operation_limiter_test.cpp6
-rw-r--r--searchcore/src/tests/proton/persistenceengine/persistence_handler_map/persistence_handler_map_test.cpp1
-rw-r--r--searchcore/src/tests/proton/persistenceengine/persistenceengine_test.cpp31
-rw-r--r--searchcore/src/vespa/searchcore/config/CMakeLists.txt2
-rw-r--r--searchcore/src/vespa/searchcore/config/proton.def13
-rw-r--r--searchcore/src/vespa/searchcore/config/ranking-expressions.def5
-rw-r--r--searchcore/src/vespa/searchcore/proton/feedoperation/pruneremoveddocumentsoperation.h26
-rw-r--r--searchcore/src/vespa/searchcore/proton/persistenceengine/bucket_guard.h35
-rw-r--r--searchcore/src/vespa/searchcore/proton/persistenceengine/ipersistencehandler.h2
-rw-r--r--searchcore/src/vespa/searchcore/proton/persistenceengine/persistenceengine.cpp3
-rw-r--r--searchcore/src/vespa/searchcore/proton/persistenceengine/persistenceengine.h4
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/CMakeLists.txt4
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/bucketmovejob.cpp526
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/bucketmovejob.h166
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/bucketmovejobv2.cpp470
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/bucketmovejobv2.h149
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/document_db_maintenance_config.cpp32
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/document_db_maintenance_config.h17
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/documentbucketmover.cpp2
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/documentdb.cpp6
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/documentdb.h4
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/documentdbconfigmanager.cpp10
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/forcecommitcontext.cpp4
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/forcecommitcontext.h2
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/frozenbuckets.cpp142
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/frozenbuckets.h100
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/i_maintenance_job.h15
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/ifrozenbuckethandler.h31
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/job_tracked_maintenance_job.h4
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job.cpp317
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job.h101
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job_base.cpp177
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job_base.h71
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job_take2.cpp167
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job_take2.h73
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/maintenance_jobs_injector.cpp63
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/maintenance_jobs_injector.h1
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/maintenancecontroller.cpp24
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/maintenancecontroller.h12
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/maintenancejobrunner.cpp2
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/operationdonecontext.cpp14
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/operationdonecontext.h12
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/persistencehandlerproxy.cpp6
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/persistencehandlerproxy.h1
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/pruneremoveddocumentsjob.cpp127
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/pruneremoveddocumentsjob.h56
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/putdonecontext.cpp5
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/putdonecontext.h5
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/removedonecontext.cpp6
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/removedonecontext.h8
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/removedonetask.cpp7
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/removedonetask.h8
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/storeonlyfeedview.cpp114
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/storeonlyfeedview.h6
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/updatedonecontext.cpp2
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/updatedonecontext.h2
-rw-r--r--searchlib/src/apps/vespa-attribute-inspect/vespa-attribute-inspect.cpp2
-rw-r--r--searchlib/src/apps/vespa-fileheader-inspect/vespa-fileheader-inspect.cpp2
-rw-r--r--searchlib/src/apps/vespa-ranking-expression-analyzer/vespa-ranking-expression-analyzer.cpp10
-rw-r--r--searchlib/src/tests/aggregator/perdocexpr.cpp2
-rw-r--r--searchlib/src/tests/attribute/benchmark/attributebenchmark.cpp2
-rw-r--r--searchlib/src/tests/bitvector/bitvectorbenchmark.cpp2
-rw-r--r--searchlib/src/tests/diskindex/fieldwriter/fieldwriter_test.cpp2
-rw-r--r--searchlib/src/tests/features/constant/constant_test.cpp46
-rw-r--r--searchlib/src/tests/features/featurebenchmark.cpp2
-rw-r--r--searchlib/src/tests/groupingengine/groupingengine_test.cpp2
-rw-r--r--searchlib/src/tests/postinglistbm/postinglistbm.cpp2
-rw-r--r--searchlib/src/tests/tensor/distance_functions/distance_functions_test.cpp13
-rw-r--r--searchlib/src/tests/transactionlogstress/translogstress.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/attribute/enumattribute.h11
-rw-r--r--searchlib/src/vespa/searchlib/attribute/enumattribute.hpp13
-rw-r--r--searchlib/src/vespa/searchlib/attribute/enumstore.cpp6
-rw-r--r--searchlib/src/vespa/searchlib/attribute/multienumattribute.h8
-rw-r--r--searchlib/src/vespa/searchlib/attribute/multienumattribute.hpp4
-rw-r--r--searchlib/src/vespa/searchlib/attribute/multinumericenumattribute.h3
-rw-r--r--searchlib/src/vespa/searchlib/attribute/multinumericpostattribute.h1
-rw-r--r--searchlib/src/vespa/searchlib/attribute/multistringattribute.h3
-rw-r--r--searchlib/src/vespa/searchlib/attribute/multistringpostattribute.h1
-rw-r--r--searchlib/src/vespa/searchlib/attribute/numericbase.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/attribute/numericbase.h1
-rw-r--r--searchlib/src/vespa/searchlib/attribute/postinglistattribute.h2
-rw-r--r--searchlib/src/vespa/searchlib/attribute/singleenumattribute.h12
-rw-r--r--searchlib/src/vespa/searchlib/attribute/singleenumattribute.hpp14
-rw-r--r--searchlib/src/vespa/searchlib/attribute/singlenumericenumattribute.h6
-rw-r--r--searchlib/src/vespa/searchlib/attribute/singlenumericenumattribute.hpp10
-rw-r--r--searchlib/src/vespa/searchlib/attribute/singlenumericpostattribute.h1
-rw-r--r--searchlib/src/vespa/searchlib/attribute/singlestringattribute.h4
-rw-r--r--searchlib/src/vespa/searchlib/attribute/singlestringpostattribute.h1
-rw-r--r--searchlib/src/vespa/searchlib/attribute/stringbase.cpp3
-rw-r--r--searchlib/src/vespa/searchlib/attribute/stringbase.h1
-rw-r--r--searchlib/src/vespa/searchlib/bitcompression/compression.cpp17
-rw-r--r--searchlib/src/vespa/searchlib/bitcompression/compression.h42
-rw-r--r--searchlib/src/vespa/searchlib/common/bitvectorcache.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/features/constant_feature.cpp27
-rw-r--r--searchlib/src/vespa/searchlib/features/onnx_feature.h2
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/predicate_search.cpp39
-rw-r--r--searchsummary/src/vespa/searchsummary/docsummary/resultconfig.cpp2
-rw-r--r--service-monitor/src/main/java/com/yahoo/vespa/service/slobrok/SlobrokMonitor.java13
-rw-r--r--service-monitor/src/main/java/com/yahoo/vespa/service/slobrok/SlobrokMonitorManagerImpl.java29
-rw-r--r--service-monitor/src/test/java/com/yahoo/vespa/service/slobrok/SlobrokMonitorManagerImplTest.java3
-rw-r--r--service-monitor/src/test/java/com/yahoo/vespa/service/slobrok/SlobrokMonitorTest.java2
-rw-r--r--slobrok/src/apps/slobrok/slobrok.cpp2
-rw-r--r--slobrok/src/tests/startsome/tstdst.cpp2
-rw-r--r--slobrok/src/vespa/slobrok/server/sbenv.cpp2
-rw-r--r--staging_vespalib/src/vespa/vespalib/util/document_runnable.cpp2
-rw-r--r--storage/src/tests/common/teststorageapp.cpp2
-rw-r--r--storage/src/tests/common/teststorageapp.h4
-rw-r--r--storage/src/tests/distributor/CMakeLists.txt2
-rw-r--r--storage/src/tests/distributor/bucketdbupdatertest.cpp14
-rw-r--r--storage/src/tests/distributor/distributor_message_sender_stub.h2
-rw-r--r--storage/src/tests/distributor/distributor_stripe_pool_test.cpp92
-rw-r--r--storage/src/tests/distributor/distributortest.cpp93
-rw-r--r--storage/src/tests/distributor/distributortestutil.cpp55
-rw-r--r--storage/src/tests/distributor/distributortestutil.h30
-rw-r--r--storage/src/tests/distributor/getoperationtest.cpp2
-rw-r--r--storage/src/tests/distributor/idealstatemanagertest.cpp19
-rw-r--r--storage/src/tests/distributor/maintenancemocks.h6
-rw-r--r--storage/src/tests/distributor/mock_tickable_stripe.h42
-rw-r--r--storage/src/tests/distributor/multi_thread_stripe_access_guard_test.cpp59
-rw-r--r--storage/src/tests/distributor/putoperationtest.cpp6
-rw-r--r--storage/src/tests/distributor/read_for_write_visitor_operation_test.cpp12
-rw-r--r--storage/src/tests/distributor/removelocationtest.cpp6
-rw-r--r--storage/src/tests/distributor/removeoperationtest.cpp4
-rw-r--r--storage/src/tests/distributor/statecheckerstest.cpp38
-rw-r--r--storage/src/tests/distributor/statoperationtest.cpp2
-rw-r--r--storage/src/tests/distributor/twophaseupdateoperationtest.cpp4
-rw-r--r--storage/src/tests/distributor/updateoperationtest.cpp3
-rw-r--r--storage/src/tests/distributor/visitoroperationtest.cpp6
-rw-r--r--storage/src/vespa/storage/common/distributorcomponent.h6
-rw-r--r--storage/src/vespa/storage/distributor/CMakeLists.txt6
-rw-r--r--storage/src/vespa/storage/distributor/bucket_space_state_map.cpp67
-rw-r--r--storage/src/vespa/storage/distributor/bucket_space_state_map.h74
-rw-r--r--storage/src/vespa/storage/distributor/bucketdbupdater.cpp82
-rw-r--r--storage/src/vespa/storage/distributor/bucketdbupdater.h21
-rw-r--r--storage/src/vespa/storage/distributor/distributor.cpp395
-rw-r--r--storage/src/vespa/storage/distributor/distributor.h108
-rw-r--r--storage/src/vespa/storage/distributor/distributor_bucket_space.cpp4
-rw-r--r--storage/src/vespa/storage/distributor/distributor_bucket_space.h5
-rw-r--r--storage/src/vespa/storage/distributor/distributor_bucket_space_repo.cpp6
-rw-r--r--storage/src/vespa/storage/distributor/distributor_bucket_space_repo.h3
-rw-r--r--storage/src/vespa/storage/distributor/distributor_component.cpp27
-rw-r--r--storage/src/vespa/storage/distributor/distributor_component.h67
-rw-r--r--storage/src/vespa/storage/distributor/distributor_interface.h22
-rw-r--r--storage/src/vespa/storage/distributor/distributor_operation_context.h30
-rw-r--r--storage/src/vespa/storage/distributor/distributor_stripe.cpp169
-rw-r--r--storage/src/vespa/storage/distributor/distributor_stripe.h53
-rw-r--r--storage/src/vespa/storage/distributor/distributor_stripe_component.cpp6
-rw-r--r--storage/src/vespa/storage/distributor/distributor_stripe_component.h4
-rw-r--r--storage/src/vespa/storage/distributor/distributor_stripe_interface.h3
-rw-r--r--storage/src/vespa/storage/distributor/distributor_stripe_operation_context.h19
-rw-r--r--storage/src/vespa/storage/distributor/distributor_stripe_pool.cpp109
-rw-r--r--storage/src/vespa/storage/distributor/distributor_stripe_pool.h89
-rw-r--r--storage/src/vespa/storage/distributor/distributor_stripe_thread.cpp105
-rw-r--r--storage/src/vespa/storage/distributor/distributor_stripe_thread.h84
-rw-r--r--storage/src/vespa/storage/distributor/distributormessagesender.h6
-rw-r--r--storage/src/vespa/storage/distributor/externaloperationhandler.cpp2
-rw-r--r--storage/src/vespa/storage/distributor/idealstatemanager.cpp19
-rw-r--r--storage/src/vespa/storage/distributor/idealstatemanager.h13
-rw-r--r--storage/src/vespa/storage/distributor/legacy_single_stripe_accessor.cpp92
-rw-r--r--storage/src/vespa/storage/distributor/legacy_single_stripe_accessor.h69
-rw-r--r--storage/src/vespa/storage/distributor/multi_threaded_stripe_access_guard.cpp174
-rw-r--r--storage/src/vespa/storage/distributor/multi_threaded_stripe_access_guard.h94
-rw-r--r--storage/src/vespa/storage/distributor/operationowner.h12
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/getoperation.cpp12
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/getoperation.h14
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/putoperation.cpp12
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/putoperation.h8
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/read_for_write_visitor_operation.cpp6
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/read_for_write_visitor_operation.h6
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/removelocationoperation.cpp14
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/removelocationoperation.h18
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/removeoperation.cpp8
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/removeoperation.h10
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/statbucketlistoperation.cpp2
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/statbucketlistoperation.h6
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/statbucketoperation.cpp6
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/statbucketoperation.h6
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.cpp50
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.h46
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/updateoperation.cpp8
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/updateoperation.h10
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/visitoroperation.cpp24
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/visitoroperation.h26
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/garbagecollectionoperation.cpp4
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/garbagecollectionoperation.h4
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/idealstateoperation.h2
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/joinoperation.cpp4
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/joinoperation.h4
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.cpp6
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.h6
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/removebucketoperation.cpp6
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/removebucketoperation.h6
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/setbucketstateoperation.cpp8
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/setbucketstateoperation.h8
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/splitoperation.cpp4
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/splitoperation.h4
-rw-r--r--storage/src/vespa/storage/distributor/operations/operation.cpp2
-rw-r--r--storage/src/vespa/storage/distributor/operations/operation.h10
-rw-r--r--storage/src/vespa/storage/distributor/persistencemessagetracker.cpp2
-rw-r--r--storage/src/vespa/storage/distributor/persistencemessagetracker.h2
-rw-r--r--storage/src/vespa/storage/distributor/potential_data_loss_report.h7
-rw-r--r--storage/src/vespa/storage/distributor/statechecker.cpp14
-rw-r--r--storage/src/vespa/storage/distributor/statechecker.h9
-rw-r--r--storage/src/vespa/storage/distributor/statecheckers.cpp18
-rw-r--r--storage/src/vespa/storage/distributor/storage_node_up_states.h14
-rw-r--r--storage/src/vespa/storage/distributor/stripe_access_guard.h20
-rw-r--r--storage/src/vespa/storage/distributor/stripe_bucket_db_updater.cpp51
-rw-r--r--storage/src/vespa/storage/distributor/stripe_bucket_db_updater.h16
-rw-r--r--storage/src/vespa/storage/distributor/stripe_host_info_notifier.h24
-rw-r--r--storage/src/vespa/storage/distributor/throttlingoperationstarter.h10
-rw-r--r--storage/src/vespa/storage/distributor/tickable_stripe.h68
-rw-r--r--storage/src/vespa/storage/storageserver/distributornode.cpp45
-rw-r--r--storage/src/vespa/storage/storageserver/distributornode.h18
-rwxr-xr-xtravis/travis.sh19
-rw-r--r--vespa-feed-client/CMakeLists.txt2
-rw-r--r--vespa-feed-client/OWNERS2
-rw-r--r--vespa-feed-client/README.md2
-rw-r--r--vespa-feed-client/pom.xml107
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/CliArguments.java199
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/CliClient.java92
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/DocumentId.java96
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClient.java33
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClientBuilder.java93
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedException.java8
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpFeedClient.java197
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpRequestStrategy.java156
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/JsonStreamFeeder.java100
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationParameters.java70
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/RequestStrategy.java20
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/Result.java35
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/SslContextBuilder.java126
-rwxr-xr-xvespa-feed-client/src/main/sh/vespa-version-generator.sh24
-rw-r--r--vespa-feed-client/src/test/java/.gitignore0
-rw-r--r--vespa-feed-client/src/test/java/ai/vespa/feed/client/CliArgumentsTest.java56
-rw-r--r--vespa-feed-client/src/test/resources/help.txt12
-rw-r--r--vespa_jersey2/pom.xml12
-rw-r--r--vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/DocumentV1ApiHandler.java38
-rw-r--r--vespaclient-container-plugin/src/main/resources/configdefinitions/document-operation-executor.def10
-rw-r--r--vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/DocumentV1ApiTest.java14
-rw-r--r--vespajlib/abi-spec.json5
-rw-r--r--vespajlib/src/main/java/com/yahoo/compress/Compressor.java12
-rw-r--r--vespajlib/src/main/java/com/yahoo/io/ByteWriter.java2
-rw-r--r--vespajlib/src/main/java/com/yahoo/io/Utf8ByteWriter.java47
-rw-r--r--vespajlib/src/main/java/com/yahoo/slime/JsonDecoder.java6
-rw-r--r--vespajlib/src/main/java/com/yahoo/tensor/IndexedTensor.java23
-rw-r--r--vespajlib/src/main/java/com/yahoo/tensor/MappedTensor.java5
-rw-r--r--vespajlib/src/main/java/com/yahoo/tensor/MixedTensor.java15
-rw-r--r--vespajlib/src/main/java/com/yahoo/tensor/Tensor.java7
-rw-r--r--vespajlib/src/main/java/com/yahoo/tensor/functions/Join.java14
-rw-r--r--vespajlib/src/main/java/com/yahoo/tensor/functions/Merge.java11
-rw-r--r--vespajlib/src/main/java/com/yahoo/tensor/functions/Slice.java3
-rw-r--r--vespajlib/src/main/java/com/yahoo/tensor/serialization/JsonFormat.java103
-rw-r--r--vespajlib/src/test/java/com/yahoo/tensor/serialization/JsonFormatTestCase.java125
-rw-r--r--vespalib/src/vespa/vespalib/io/fileutil.cpp102
-rw-r--r--vespamalloc/src/tests/stacktrace/CMakeLists.txt1
-rw-r--r--vespamalloc/src/tests/stacktrace/backtrace.c84
-rw-r--r--vespamalloc/src/tests/stacktrace/backtrace.h17
-rw-r--r--vsm/src/tests/textutil/textutil.cpp10
-rw-r--r--vsm/src/vespa/vsm/searcher/CMakeLists.txt9
-rw-r--r--vsm/src/vespa/vsm/searcher/futf8strchrfieldsearcher.cpp44
-rw-r--r--zookeeper-server/zookeeper-server-common/src/main/java/com/yahoo/vespa/zookeeper/Configurator.java2
731 files changed, 14291 insertions, 8391 deletions
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index c1f970abad4..00000000000
--- a/.travis.yml
+++ /dev/null
@@ -1,27 +0,0 @@
-# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-sudo: required
-
-services:
- - docker
-
-cache:
- timeout: 900
- directories:
- - $HOME/.m2
- - $HOME/.ccache
-
-branches:
- only:
- - master
-
-before_cache:
- - sudo rm -rf $HOME/.m2/repository/com/yahoo/vespa
- - sudo rm -rf $HOME/.m2/repository/repository.xml
- - du --summarize --human-readable $HOME/.m2/repository
- - du --summarize --human-readable $HOME/.ccache
-
-install: true
-
-language: minimal
-
-script: ./travis/travis.sh
diff --git a/CMakeLists.txt b/CMakeLists.txt
index d42c24ed49b..c98994cf993 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -131,6 +131,7 @@ add_subdirectory(vbench)
add_subdirectory(vdslib)
add_subdirectory(vdstestlib)
add_subdirectory(vespa-athenz)
+add_subdirectory(vespa-feed-client)
add_subdirectory(vespa-http-client)
add_subdirectory(vespa-osgi-testrunner)
add_subdirectory(vespa-testrunner-components)
@@ -145,7 +146,8 @@ add_subdirectory(vespajlib)
add_subdirectory(vespalib)
add_subdirectory(vespalog)
if(NOT CMAKE_HOST_SYSTEM_NAME STREQUAL "Darwin" AND
- NOT DEFINED VESPA_USE_SANITIZER)
+ NOT DEFINED VESPA_USE_SANITIZER AND
+ CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64")
add_subdirectory(vespamalloc)
endif()
add_subdirectory(vsm)
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index d5c611680dd..3a86f99d17c 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -10,7 +10,7 @@ This documents tells you what you need to know to contribute.
All work on Vespa happens directly on Github,
using the [Github flow model](https://guides.github.com/introduction/flow/).
We release the master branch a few times a week and you should expect it to almost always work.
-In addition to the [public Travis build](https://travis-ci.org/vespa-engine/vespa)
+In addition to the [public Screwdriver build](https://cd.screwdriver.cd/pipelines/6386)
we have a large acceptance and performance test suite which
is also run continuously. We plan to add this to the open source code base later.
@@ -27,7 +27,7 @@ Please follow [best practices](https://github.com/trein/dev-best-practices/wiki/
When your code is ready to be submitted, [submit a pull request](https://help.github.com/articles/creating-a-pull-request/) to begin the code review process.
-We only seek to accept code that you are authorized to contribute to the project. We have added a pull request template on our projects so that your contributions are made with the following confirmation:
+We only seek to accept code that you are authorized to contribute to the project. We have added a pull request template on our projects so that your contributions are made with the following confirmation:
> I confirm that this contribution is made under the terms of the license found in the root directory of this repository's source tree and that I have the authority necessary to make this contribution on behalf of its copyright owner.
@@ -49,7 +49,7 @@ There is also a [ToDo list](TODO.md) for larger things which nobody are working
If you have questions, want to share your experience or help others, please join our community on [Stack Overflow](http://stackoverflow.com/questions/tagged/vespa).
### Getting started
-See [README](README.md) for how to build and test Vespa.
+See [README](README.md) for how to build and test Vespa.
For an overview of the modules, see [Code-map.md](Code-map.md).
More details are in the READMEs of each module.
diff --git a/application/pom.xml b/application/pom.xml
index 25e17aca3f9..c0bef61f57d 100644
--- a/application/pom.xml
+++ b/application/pom.xml
@@ -207,6 +207,13 @@
</executions>
</plugin>
<plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-surefire-plugin</artifactId>
+ <configuration>
+ <forkCount>2</forkCount>
+ </configuration>
+ </plugin>
+ <plugin>
<groupId>com.yahoo.vespa</groupId>
<artifactId>abi-check-plugin</artifactId>
</plugin>
diff --git a/build_settings.cmake b/build_settings.cmake
index 414d73b2013..10f4c7ff926 100644
--- a/build_settings.cmake
+++ b/build_settings.cmake
@@ -178,7 +178,7 @@ if(CMAKE_HOST_SYSTEM_NAME STREQUAL "Darwin")
else()
if(NOT VESPA_USE_SANITIZER OR NOT "${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
# Don't allow unresolved symbols in shared libraries
- set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--no-undefined")
+ set(VESPA_DISALLOW_UNRESOLVED_SYMBOLS_IN_SHARED_LIBRARIES "-Wl,--no-undefined")
endif()
# Don't allow unresolved symbols in executables
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--no-undefined")
diff --git a/client/src/main/java/ai/vespa/client/dsl/Field.java b/client/src/main/java/ai/vespa/client/dsl/Field.java
index c540e844c7a..cc30b8aaded 100644
--- a/client/src/main/java/ai/vespa/client/dsl/Field.java
+++ b/client/src/main/java/ai/vespa/client/dsl/Field.java
@@ -550,6 +550,29 @@ public class Field extends QueryChain {
return common("=", annotation, false);
}
+ /**
+ * Nearest neighbor query.
+ * https://docs.vespa.ai/en/reference/query-language-reference.html#nearestneighbor
+ *
+ * @param rankFeature the rankfeature.
+ * @return the query
+ */
+ public Query nearestNeighbor(String rankFeature) {
+ return common("nearestNeighbor", annotation, (Object) rankFeature);
+ }
+
+ /**
+ * Nearest neighbor query.
+ * https://docs.vespa.ai/en/reference/query-language-reference.html#nearestneighbor
+ *
+ * @param annotation the annotation
+ * @param rankFeature the rankfeature.
+ * @return the query
+ */
+ public Query nearestNeighbor(Annotation annotation, String rankFeature) {
+ return common("nearestNeighbor", annotation, (Object) rankFeature);
+ }
+
private Query common(String relation, Annotation annotation, Object value) {
return common(relation, annotation, value, values.toArray());
}
@@ -604,6 +627,12 @@ public class Field extends QueryChain {
case "sameElement":
return String.format("%s contains %s(%s)", fieldName, relation,
((Query) values.get(0)).toCommaSeparatedAndQueries());
+ case "nearestNeighbor":
+ valuesStr = values.stream().map(i -> (String) i).collect(Collectors.joining(", "));
+
+ return hasAnnotation
+ ? String.format("([%s]nearestNeighbor(%s, %s))", annotation, fieldName, valuesStr)
+ : String.format("nearestNeighbor(%s, %s)", fieldName, valuesStr);
default:
Object value = values.get(0);
valuesStr = value instanceof Long ? value.toString() + "L" : value.toString();
diff --git a/client/src/test/groovy/ai/vespa/client/dsl/QTest.groovy b/client/src/test/groovy/ai/vespa/client/dsl/QTest.groovy
index 671405a9c73..d1560937fef 100644
--- a/client/src/test/groovy/ai/vespa/client/dsl/QTest.groovy
+++ b/client/src/test/groovy/ai/vespa/client/dsl/QTest.groovy
@@ -424,6 +424,26 @@ class QTest extends Specification {
q == """yql=select * from sources * where f1 contains ([{"key":"value"}]uri("https://test.uri"));"""
}
+ def "nearestNeighbor"() {
+ given:
+ def q = Q.p("f1").nearestNeighbor("query_vector")
+ .semicolon()
+ .build()
+
+ expect:
+ q == """yql=select * from sources * where nearestNeighbor(f1, query_vector);"""
+ }
+
+ def "nearestNeighbor with annotation"() {
+ given:
+ def q = Q.p("f1").nearestNeighbor(A.a("targetHits", 10), "query_vector")
+ .semicolon()
+ .build()
+
+ expect:
+ q == """yql=select * from sources * where ([{"targetHits":10}]nearestNeighbor(f1, query_vector));"""
+ }
+
def "use contains instead of contains equiv when input size is 1"() {
def q = Q.p("f1").containsEquiv(["p1"])
.semicolon()
diff --git a/cloud-tenant-base-dependencies-enforcer/pom.xml b/cloud-tenant-base-dependencies-enforcer/pom.xml
index 8d1f257fca8..16045d5dc75 100644
--- a/cloud-tenant-base-dependencies-enforcer/pom.xml
+++ b/cloud-tenant-base-dependencies-enforcer/pom.xml
@@ -20,8 +20,8 @@
Copied here because vz-tenant-base does not have a parent. -->
<properties>
<aopalliance.version>1.0</aopalliance.version>
- <athenz.version>1.10.11</athenz.version>
- <bouncycastle.version>1.65</bouncycastle.version>
+ <athenz.version>1.10.14</athenz.version>
+ <bouncycastle.version>1.68</bouncycastle.version>
<felix.version>6.0.3</felix.version>
<felix.log.version>1.0.1</felix.log.version>
<findbugs.version>1.3.9</findbugs.version>
@@ -30,10 +30,11 @@
<javax.inject.version>1</javax.inject.version>
<javax.servlet-api.version>3.1.0</javax.servlet-api.version>
<jaxb.version>2.3.0</jaxb.version>
- <jetty.version>9.4.40.v20210413</jetty.version>
+ <jetty.version>9.4.41.v20210516</jetty.version>
<jetty-alpn.version>1.1.3.v20160715</jetty-alpn.version>
<junit5.version>5.7.0</junit5.version>
<junit5.platform.version>1.7.0</junit5.platform.version>
+ <onnxruntime.version>1.7.0</onnxruntime.version>
<org.lz4.version>1.7.1</org.lz4.version>
<org.json.version>20090211</org.json.version><!-- TODO Vespa 8: remove as provided dependency -->
<slf4j.version>1.7.30</slf4j.version>
@@ -96,8 +97,6 @@
<include>com.sun.xml.bind:jaxb-core:[${jaxb.version}]:jar:provided</include>
<include>com.sun.xml.bind:jaxb-impl:[${jaxb.version}]:jar:provided</include>
<include>commons-logging:commons-logging:[1.2]:jar:provided</include>
- <include>jakarta.activation:jakarta.activation-api:[1.2.1]:jar:provided</include>
- <include>jakarta.xml.bind:jakarta.xml.bind-api:[2.3.2]:jar:provided</include>
<include>javax.annotation:javax.annotation-api:[${javax.annotation-api.version}]:jar:provided</include>
<include>javax.inject:javax.inject:[${javax.inject.version}]:jar:provided</include>
<include>javax.servlet:javax.servlet-api:[${javax.servlet-api.version}]:jar:provided</include>
@@ -221,6 +220,7 @@
<include>com.google.protobuf:protobuf-java:3.7.0:jar:test</include>
<include>com.ibm.icu:icu4j:57.1:jar:test</include>
<include>com.intellij:annotations:12.0:jar:test</include>
+ <include>com.microsoft.onnxruntime:onnxruntime:[${onnxruntime.version}]:jar:test</include>
<include>com.optimaize.languagedetector:language-detector:0.6:jar:test</include>
<include>com.thaiopensource:jing:20091111:jar:test</include>
<include>com.yahoo.athenz:athenz-auth-core:[${athenz.version}]:jar:test</include>
diff --git a/clustercontroller-core/pom.xml b/clustercontroller-core/pom.xml
index 5623606e9ea..47afe3b7df3 100644
--- a/clustercontroller-core/pom.xml
+++ b/clustercontroller-core/pom.xml
@@ -120,6 +120,13 @@
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
</plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-surefire-plugin</artifactId>
+ <configuration>
+ <forkCount>4</forkCount>
+ </configuration>
+ </plugin>
</plugins>
</build>
<profiles>
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/SlobrokClient.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/SlobrokClient.java
index 8649e7cc11a..e180016f286 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/SlobrokClient.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/SlobrokClient.java
@@ -53,6 +53,7 @@ public class SlobrokClient implements NodeLookup {
this.connectionSpecs = slobrokConnectionSpecs;
shutdown();
supervisor = new Supervisor(new Transport("slobrok-client"));
+ supervisor.useSmallBuffers();
SlobrokList slist = new SlobrokList();
slist.setup(slobrokConnectionSpecs);
mirror = new Mirror(supervisor, slist);
diff --git a/config-model-api/src/main/java/com/yahoo/config/model/api/ConfigChangeReindexAction.java b/config-model-api/src/main/java/com/yahoo/config/model/api/ConfigChangeReindexAction.java
index bb714a55f94..261a09b4a0d 100644
--- a/config-model-api/src/main/java/com/yahoo/config/model/api/ConfigChangeReindexAction.java
+++ b/config-model-api/src/main/java/com/yahoo/config/model/api/ConfigChangeReindexAction.java
@@ -13,7 +13,7 @@ public interface ConfigChangeReindexAction extends ConfigChangeAction {
@Override default Type getType() { return Type.REINDEX; }
/** @return name identifying this kind of change, used to identify names which should be allowed */
- default String name() { return validationId().orElseThrow().value(); }
+ default String name() { return validationId().map(ValidationId::value).orElse("reindexing"); }
/** @return name of the document type that must be re-indexed */
String getDocumentType();
diff --git a/config-model-api/src/main/java/com/yahoo/config/model/api/EndpointCertificateMetadata.java b/config-model-api/src/main/java/com/yahoo/config/model/api/EndpointCertificateMetadata.java
index 20fc911241b..07aca1f7275 100644
--- a/config-model-api/src/main/java/com/yahoo/config/model/api/EndpointCertificateMetadata.java
+++ b/config-model-api/src/main/java/com/yahoo/config/model/api/EndpointCertificateMetadata.java
@@ -1,6 +1,8 @@
// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.config.model.api;
+import java.util.Objects;
+
public class EndpointCertificateMetadata {
private final String keyName;
@@ -33,4 +35,17 @@ public class EndpointCertificateMetadata {
", version=" + version +
'}';
}
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ EndpointCertificateMetadata that = (EndpointCertificateMetadata) o;
+ return version == that.version && Objects.equals(keyName, that.keyName) && Objects.equals(certName, that.certName);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(keyName, certName, version);
+ }
}
diff --git a/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java b/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java
index 3a2f71e2b8c..5d28088d108 100644
--- a/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java
+++ b/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java
@@ -73,18 +73,19 @@ public interface ModelContext {
@ModelFeatureFlag(owners = {"baldersheim"}, comment = "Select sequencer type use while feeding") default String feedSequencerType() { throw new UnsupportedOperationException("TODO specify default value"); }
@ModelFeatureFlag(owners = {"baldersheim"}) default String responseSequencerType() { throw new UnsupportedOperationException("TODO specify default value"); }
@ModelFeatureFlag(owners = {"baldersheim"}) default int defaultNumResponseThreads() { return 2; }
- @ModelFeatureFlag(owners = {"baldersheim"}) default int maxPendingMoveOps() { throw new UnsupportedOperationException("TODO specify default value"); }
+ @ModelFeatureFlag(owners = {"baldersheim"}) default int maxPendingMoveOps() { return 100; }
@ModelFeatureFlag(owners = {"baldersheim"}) default boolean skipCommunicationManagerThread() { throw new UnsupportedOperationException("TODO specify default value"); }
@ModelFeatureFlag(owners = {"baldersheim"}) default boolean skipMbusRequestThread() { throw new UnsupportedOperationException("TODO specify default value"); }
@ModelFeatureFlag(owners = {"baldersheim"}) default boolean skipMbusReplyThread() { throw new UnsupportedOperationException("TODO specify default value"); }
@ModelFeatureFlag(owners = {"tokle"}) default boolean useAccessControlTlsHandshakeClientAuth() { return true; }
@ModelFeatureFlag(owners = {"baldersheim"}) default boolean useAsyncMessageHandlingOnSchedule() { throw new UnsupportedOperationException("TODO specify default value"); }
@ModelFeatureFlag(owners = {"baldersheim"}) default double feedConcurrency() { throw new UnsupportedOperationException("TODO specify default value"); }
- @ModelFeatureFlag(owners = {"baldersheim"}) default boolean useBucketExecutorForLidSpaceCompact() { throw new UnsupportedOperationException("TODO specify default value"); }
- @ModelFeatureFlag(owners = {"baldersheim"}) default boolean useBucketExecutorForBucketMove() { throw new UnsupportedOperationException("TODO specify default value"); }
+ @ModelFeatureFlag(owners = {"baldersheim"}) default boolean useBucketExecutorForLidSpaceCompact() { return true; }
+ @ModelFeatureFlag(owners = {"baldersheim"}) default boolean useBucketExecutorForBucketMove() { return true; }
+ @ModelFeatureFlag(owners = {"baldersheim"}) default boolean useBucketExecutorForPruneRemoved() { throw new UnsupportedOperationException("TODO specify default value"); }
@ModelFeatureFlag(owners = {"geirst"}) default boolean enableFeedBlockInDistributor() { return true; }
- @ModelFeatureFlag(owners = {"baldersheim", "geirst", "toregge"}) default double maxDeadBytesRatio() { return 0.2; }
- @ModelFeatureFlag(owners = {"hmusum"}) default int clusterControllerMaxHeapSizeInMb() { return 128; }
+ @ModelFeatureFlag(owners = {"baldersheim", "geirst", "toregge"}) default double maxDeadBytesRatio() { return 0.05; }
+ @ModelFeatureFlag(owners = {"hmusum"}, removeAfter = "7.406") default int clusterControllerMaxHeapSizeInMb() { return 128; }
@ModelFeatureFlag(owners = {"hmusum"}) default int metricsProxyMaxHeapSizeInMb(ClusterSpec.Type type) { return 256; }
@ModelFeatureFlag(owners = {"bjorncs", "tokle"}) default List<String> allowedAthenzProxyIdentities() { return List.of(); }
@ModelFeatureFlag(owners = {"tokle"}) default boolean tenantIamRole() { return false; }
diff --git a/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java b/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java
index 75a1a167446..e6bf3a835c6 100644
--- a/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java
+++ b/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java
@@ -43,7 +43,6 @@ public class TestProperties implements ModelContext.Properties, ModelContext.Fea
private String sequencerType = "LATENCY";
private String responseSequencerType = "ADAPTIVE";
private int responseNumThreads = 2;
- private int maxPendingMoveOps = 10;
private Optional<EndpointCertificateSecrets> endpointCertificateSecrets = Optional.empty();
private AthenzDomain athenzDomain;
private ApplicationRoles applicationRoles;
@@ -51,11 +50,9 @@ public class TestProperties implements ModelContext.Properties, ModelContext.Fea
private boolean useAccessControlTlsHandshakeClientAuth;
private boolean useAsyncMessageHandlingOnSchedule = false;
private double feedConcurrency = 0.5;
- private boolean useBucketExecutorForLidSpaceCompact;
- private boolean useBucketExecutorForBucketMove;
+ private boolean useBucketExecutorForPruneRemoved;
private boolean enableFeedBlockInDistributor = true;
- private double maxDeadBytesRatio = 0.2;
- private int clusterControllerMaxHeapSizeInMb = 256;
+ private int clusterControllerMaxHeapSizeInMb = 128;
private int metricsProxyMaxHeapSizeInMb = 256;
private int maxActivationInhibitedOutOfSyncGroups = 0;
private List<TenantSecretStore> tenantSecretStores = Collections.emptyList();
@@ -85,7 +82,6 @@ public class TestProperties implements ModelContext.Properties, ModelContext.Fea
@Override public Optional<ApplicationRoles> applicationRoles() { return Optional.ofNullable(applicationRoles); }
@Override public String responseSequencerType() { return responseSequencerType; }
@Override public int defaultNumResponseThreads() { return responseNumThreads; }
- @Override public int maxPendingMoveOps() { return maxPendingMoveOps; }
@Override public boolean skipCommunicationManagerThread() { return false; }
@Override public boolean skipMbusRequestThread() { return false; }
@Override public boolean skipMbusReplyThread() { return false; }
@@ -93,10 +89,8 @@ public class TestProperties implements ModelContext.Properties, ModelContext.Fea
@Override public boolean useAccessControlTlsHandshakeClientAuth() { return useAccessControlTlsHandshakeClientAuth; }
@Override public boolean useAsyncMessageHandlingOnSchedule() { return useAsyncMessageHandlingOnSchedule; }
@Override public double feedConcurrency() { return feedConcurrency; }
- @Override public boolean useBucketExecutorForLidSpaceCompact() { return useBucketExecutorForLidSpaceCompact; }
- @Override public boolean useBucketExecutorForBucketMove() { return useBucketExecutorForBucketMove; }
+ @Override public boolean useBucketExecutorForPruneRemoved() { return useBucketExecutorForPruneRemoved; }
@Override public boolean enableFeedBlockInDistributor() { return enableFeedBlockInDistributor; }
- @Override public double maxDeadBytesRatio() { return maxDeadBytesRatio; }
@Override public int clusterControllerMaxHeapSizeInMb() { return clusterControllerMaxHeapSizeInMb; }
@Override public int metricsProxyMaxHeapSizeInMb(ClusterSpec.Type type) { return metricsProxyMaxHeapSizeInMb; }
@Override public int maxActivationInhibitedOutOfSyncGroups() { return maxActivationInhibitedOutOfSyncGroups; }
@@ -131,10 +125,7 @@ public class TestProperties implements ModelContext.Properties, ModelContext.Fea
responseNumThreads = numThreads;
return this;
}
- public TestProperties setMaxPendingMoveOps(int moveOps) {
- maxPendingMoveOps = moveOps;
- return this;
- }
+
public TestProperties setDefaultTermwiseLimit(double limit) {
defaultTermwiseLimit = limit;
return this;
@@ -200,13 +191,8 @@ public class TestProperties implements ModelContext.Properties, ModelContext.Fea
return this;
}
- public TestProperties useBucketExecutorForLidSpaceCompact(boolean enabled) {
- useBucketExecutorForLidSpaceCompact = enabled;
- return this;
- }
-
- public TestProperties useBucketExecutorForBucketMove(boolean enabled) {
- useBucketExecutorForBucketMove = enabled;
+ public TestProperties useBucketExecutorForPruneRemoved(boolean enabled) {
+ useBucketExecutorForPruneRemoved = enabled;
return this;
}
@@ -215,11 +201,6 @@ public class TestProperties implements ModelContext.Properties, ModelContext.Fea
return this;
}
- public TestProperties maxDeadBytesRatio(double ratio) {
- maxDeadBytesRatio = ratio;
- return this;
- }
-
public TestProperties clusterControllerMaxHeapSizeInMb(int heapSize) {
clusterControllerMaxHeapSizeInMb = heapSize;
return this;
diff --git a/config-model/src/main/java/com/yahoo/config/model/provision/InMemoryProvisioner.java b/config-model/src/main/java/com/yahoo/config/model/provision/InMemoryProvisioner.java
index 188504edd18..e41e7309986 100644
--- a/config-model/src/main/java/com/yahoo/config/model/provision/InMemoryProvisioner.java
+++ b/config-model/src/main/java/com/yahoo/config/model/provision/InMemoryProvisioner.java
@@ -63,6 +63,8 @@ public class InMemoryProvisioner implements HostProvisioner {
private final boolean useMaxResources;
+ private final boolean alwaysReturnOneNode;
+
private Provisioned provisioned = new Provisioned();
/** Creates this with a number of nodes with resources 1, 3, 9, 1 */
@@ -72,37 +74,39 @@ public class InMemoryProvisioner implements HostProvisioner {
/** Creates this with a number of nodes with given resources */
public InMemoryProvisioner(int nodeCount, NodeResources resources, boolean sharedHosts) {
- this(Map.of(resources, createHostInstances(nodeCount)), true, false, sharedHosts, 0);
+ this(Map.of(resources, createHostInstances(nodeCount)), true, false, false, sharedHosts, 0);
}
/** Creates this with a set of host names of the flavor 'default' */
public InMemoryProvisioner(boolean failOnOutOfCapacity, boolean sharedHosts, String... hosts) {
- this(Map.of(defaultResources, toHostInstances(hosts)), failOnOutOfCapacity, false, sharedHosts, 0);
+ this(Map.of(defaultResources, toHostInstances(hosts)), failOnOutOfCapacity, false, false, sharedHosts, 0);
}
/** Creates this with a set of host names of the flavor 'default' */
public InMemoryProvisioner(boolean failOnOutOfCapacity, boolean sharedHosts, List<String> hosts) {
- this(Map.of(defaultResources, toHostInstances(hosts.toArray(new String[0]))), failOnOutOfCapacity, false, sharedHosts, 0);
+ this(Map.of(defaultResources, toHostInstances(hosts.toArray(new String[0]))), failOnOutOfCapacity, false, false, sharedHosts, 0);
}
/** Creates this with a set of hosts of the flavor 'default' */
public InMemoryProvisioner(Hosts hosts, boolean failOnOutOfCapacity, boolean sharedHosts, String ... retiredHostNames) {
- this(Map.of(defaultResources, hosts.asCollection()), failOnOutOfCapacity, false, sharedHosts, 0, retiredHostNames);
+ this(Map.of(defaultResources, hosts.asCollection()), failOnOutOfCapacity, false, false, sharedHosts, 0, retiredHostNames);
}
/** Creates this with a set of hosts of the flavor 'default' */
public InMemoryProvisioner(Hosts hosts, boolean failOnOutOfCapacity, boolean sharedHosts, int startIndexForClusters, String ... retiredHostNames) {
- this(Map.of(defaultResources, hosts.asCollection()), failOnOutOfCapacity, false, sharedHosts, startIndexForClusters, retiredHostNames);
+ this(Map.of(defaultResources, hosts.asCollection()), failOnOutOfCapacity, false, false, sharedHosts, startIndexForClusters, retiredHostNames);
}
public InMemoryProvisioner(Map<NodeResources, Collection<Host>> hosts,
boolean failOnOutOfCapacity,
boolean useMaxResources,
+ boolean alwaysReturnOneNode,
boolean sharedHosts,
int startIndexForClusters,
String ... retiredHostNames) {
this.failOnOutOfCapacity = failOnOutOfCapacity;
this.useMaxResources = useMaxResources;
+ this.alwaysReturnOneNode = alwaysReturnOneNode;
for (Map.Entry<NodeResources, Collection<Host>> hostsWithResources : hosts.entrySet())
for (Host host : hostsWithResources.getValue())
freeNodes.put(hostsWithResources.getKey(), host);
@@ -142,16 +146,20 @@ public class InMemoryProvisioner implements HostProvisioner {
public List<HostSpec> prepare(ClusterSpec cluster, ClusterResources requested, boolean required, boolean canFail) {
if (cluster.group().isPresent() && requested.groups() > 1)
throw new IllegalArgumentException("Cannot both be specifying a group and ask for groups to be created");
- int capacity = failOnOutOfCapacity || required
+
+ int nodes = failOnOutOfCapacity || required
? requested.nodes()
: Math.min(requested.nodes(), freeNodes.get(defaultResources).size() + totalAllocatedTo(cluster));
- int groups = requested.groups() > capacity ? capacity : requested.groups();
+ if (alwaysReturnOneNode)
+ nodes = 1;
+
+ int groups = requested.groups() > nodes ? nodes : requested.groups();
List<HostSpec> allocation = new ArrayList<>();
if (groups == 1) {
allocation.addAll(allocateHostGroup(cluster.with(Optional.of(ClusterSpec.Group.from(0))),
requested.nodeResources(),
- capacity,
+ nodes,
startIndexForClusters,
canFail));
}
@@ -159,7 +167,7 @@ public class InMemoryProvisioner implements HostProvisioner {
for (int i = 0; i < groups; i++) {
allocation.addAll(allocateHostGroup(cluster.with(Optional.of(ClusterSpec.Group.from(i))),
requested.nodeResources(),
- capacity / groups,
+ nodes / groups,
allocation.size(),
canFail));
}
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/MapEvaluationTypeContext.java b/config-model/src/main/java/com/yahoo/searchdefinition/MapEvaluationTypeContext.java
index e8ee5e9ed57..41cb40da4d6 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/MapEvaluationTypeContext.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/MapEvaluationTypeContext.java
@@ -206,6 +206,14 @@ public class MapEvaluationTypeContext extends FunctionReferenceContext implement
return featureTypes.get(reference);
}
+ // the name of a constant feature?
+ if (reference.isIdentifier()) {
+ Reference asConst = FeatureNames.asConstantFeature(reference.name());
+ if (featureTypes.containsKey(asConst)) {
+ return featureTypes.get(asConst);
+ }
+ }
+
// We do not know what this is - since we do not have complete knowledge about the match features
// in Java we must assume this is a match feature and return the double type - which is the type of
// all match features
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/OnnxModels.java b/config-model/src/main/java/com/yahoo/searchdefinition/OnnxModels.java
index 1cc33664e8c..60733a4f5ba 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/OnnxModels.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/OnnxModels.java
@@ -9,7 +9,7 @@ import java.util.HashMap;
import java.util.Map;
/**
- * ONNX models tied to a search definition.
+ * ONNX models tied to a search definition or global.
*
* @author lesters
*/
@@ -23,6 +23,10 @@ public class OnnxModels {
models.put(name, model);
}
+ public void add(Map<String, OnnxModel> models) {
+ models.values().forEach(this::add);
+ }
+
public OnnxModel get(String name) {
return models.get(name);
}
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/RankProfile.java b/config-model/src/main/java/com/yahoo/searchdefinition/RankProfile.java
index b460752d7bd..be246a143b2 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/RankProfile.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/RankProfile.java
@@ -161,7 +161,7 @@ public class RankProfile implements Cloneable {
return search != null ? search.rankingConstants() : model.rankingConstants();
}
- private Map<String, OnnxModel> onnxModels() {
+ public Map<String, OnnxModel> onnxModels() {
return search != null ? search.onnxModels().asMap() : Collections.emptyMap();
}
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/Search.java b/config-model/src/main/java/com/yahoo/searchdefinition/Search.java
index 64c5590b689..9b7434dccab 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/Search.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/Search.java
@@ -61,6 +61,8 @@ public class Search implements ImmutableSearch {
/** True if this doesn't define a search, just a document type */
private boolean documentsOnly = false;
+ private boolean rawAsBase64 = false;
+
/** The stemming setting of this search definition. Default is BEST. */
private Stemming stemming = Stemming.BEST;
@@ -125,6 +127,16 @@ public class Search implements ImmutableSearch {
}
/**
+ * Returns true if 'raw' fields shall be presented as base64 in summary
+ * Note that tis is temporary and will disappear on Vespa 8 as it will become default, and only option.
+ *
+ * @return true if raw shall be encoded as base64 in summary
+ */
+ public boolean isRawAsBase64() { return rawAsBase64; }
+
+ public void enableRawAsBase64() { rawAsBase64 = true; }
+
+ /**
* Sets the stemming default of fields. Default is ALL
*
* @param stemming set default stemming for this searchdefinition
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/derived/RankProfileList.java b/config-model/src/main/java/com/yahoo/searchdefinition/derived/RankProfileList.java
index 22a32c8fd65..5337d58fb82 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/derived/RankProfileList.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/derived/RankProfileList.java
@@ -57,8 +57,8 @@ public class RankProfileList extends Derived implements RankProfilesConfig.Produ
ModelContext.Properties deployProperties) {
setName(search == null ? "default" : search.getName());
this.rankingConstants = rankingConstants;
- deriveRankProfiles(rankProfileRegistry, queryProfiles, importedModels, search, attributeFields, deployProperties);
this.onnxModels = search == null ? new OnnxModels() : search.onnxModels(); // as ONNX models come from parsing rank expressions
+ deriveRankProfiles(rankProfileRegistry, queryProfiles, importedModels, search, attributeFields, deployProperties);
}
private void deriveRankProfiles(RankProfileRegistry rankProfileRegistry,
@@ -94,6 +94,10 @@ public class RankProfileList extends Derived implements RankProfilesConfig.Produ
rankingConstants.sendTo(services);
}
+ public void sendOnnxModelsTo(Collection<? extends AbstractService> services) {
+ onnxModels.sendTo(services);
+ }
+
@Override
public String getDerivedName() { return "rank-profiles"; }
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/derived/SummaryClass.java b/config-model/src/main/java/com/yahoo/searchdefinition/derived/SummaryClass.java
index b08e9948ecd..af2168545dc 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/derived/SummaryClass.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/derived/SummaryClass.java
@@ -27,6 +27,7 @@ public class SummaryClass extends Derived {
/** True if this summary class needs to access summary information on disk */
private boolean accessingDiskSummary = false;
+ private final boolean rawAsBase64;
/** The summary fields of this indexed by name */
private Map<String,SummaryClassField> fields = new java.util.LinkedHashMap<>();
@@ -42,6 +43,7 @@ public class SummaryClass extends Derived {
*/
public SummaryClass(Search search, DocumentSummary summary, DeployLogger deployLogger) {
this.deployLogger = deployLogger;
+ this.rawAsBase64 = search.isRawAsBase64();
deriveName(summary);
deriveFields(search,summary);
deriveImplicitFields(summary);
@@ -74,12 +76,12 @@ public class SummaryClass extends Derived {
private void addField(String name, DataType type, SummaryTransform transform) {
if (fields.containsKey(name)) {
SummaryClassField sf = fields.get(name);
- if (!SummaryClassField.convertDataType(type, transform).equals(sf.getType())) {
+ if (!SummaryClassField.convertDataType(type, transform, rawAsBase64).equals(sf.getType())) {
deployLogger.logApplicationPackage(Level.WARNING, "Conflicting definition of field " + name + ". " +
"Declared as type " + sf.getType() + " and " + type);
}
} else {
- fields.put(name, new SummaryClassField(name, type, transform));
+ fields.put(name, new SummaryClassField(name, type, transform, rawAsBase64));
}
}
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/derived/SummaryClassField.java b/config-model/src/main/java/com/yahoo/searchdefinition/derived/SummaryClassField.java
index 4375b446e98..3c29971a74c 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/derived/SummaryClassField.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/derived/SummaryClassField.java
@@ -5,7 +5,19 @@ import com.yahoo.document.CollectionDataType;
import com.yahoo.document.DataType;
import com.yahoo.document.MapDataType;
import com.yahoo.document.ReferenceDataType;
-import com.yahoo.document.datatypes.*;
+import com.yahoo.document.datatypes.BoolFieldValue;
+import com.yahoo.document.datatypes.ByteFieldValue;
+import com.yahoo.document.datatypes.DoubleFieldValue;
+import com.yahoo.document.datatypes.FieldValue;
+import com.yahoo.document.datatypes.Float16FieldValue;
+import com.yahoo.document.datatypes.FloatFieldValue;
+import com.yahoo.document.datatypes.IntegerFieldValue;
+import com.yahoo.document.datatypes.LongFieldValue;
+import com.yahoo.document.datatypes.PredicateFieldValue;
+import com.yahoo.document.datatypes.Raw;
+import com.yahoo.document.datatypes.StringFieldValue;
+import com.yahoo.document.datatypes.Struct;
+import com.yahoo.document.datatypes.TensorFieldValue;
import com.yahoo.vespa.documentmodel.SummaryTransform;
/**
@@ -32,6 +44,7 @@ public class SummaryClassField {
DOUBLE("double"),
STRING("string"),
DATA("data"),
+ RAW("raw"),
LONGSTRING("longstring"),
LONGDATA("longdata"),
XMLSTRING("xmlstring"),
@@ -39,7 +52,7 @@ public class SummaryClassField {
JSONSTRING("jsonstring"),
TENSOR("tensor");
- private String name;
+ private final String name;
Type(String name) {
this.name = name;
@@ -55,9 +68,9 @@ public class SummaryClassField {
}
}
- public SummaryClassField(String name, DataType type, SummaryTransform transform) {
+ public SummaryClassField(String name, DataType type, SummaryTransform transform, boolean rawAsBase64) {
this.name = name;
- this.type = convertDataType(type, transform);
+ this.type = convertDataType(type, transform, rawAsBase64);
}
public String getName() { return name; }
@@ -65,7 +78,7 @@ public class SummaryClassField {
public Type getType() { return type; }
/** Converts to the right summary field type from a field datatype and a transform*/
- public static Type convertDataType(DataType fieldType, SummaryTransform transform) {
+ public static Type convertDataType(DataType fieldType, SummaryTransform transform, boolean rawAsBase64) {
FieldValue fval = fieldType.createFieldValue();
if (fval instanceof StringFieldValue) {
if (transform != null && transform.equals(SummaryTransform.RANKFEATURES)) {
@@ -90,7 +103,7 @@ public class SummaryClassField {
} else if (fval instanceof ByteFieldValue) {
return Type.BYTE;
} else if (fval instanceof Raw) {
- return Type.DATA;
+ return rawAsBase64 ? Type.RAW : Type.DATA;
} else if (fval instanceof Struct) {
return Type.JSONSTRING;
} else if (fval instanceof PredicateFieldValue) {
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/expressiontransforms/ConstantTensorTransformer.java b/config-model/src/main/java/com/yahoo/searchdefinition/expressiontransforms/ConstantTensorTransformer.java
index 6991e2b978b..31025b0511d 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/expressiontransforms/ConstantTensorTransformer.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/expressiontransforms/ConstantTensorTransformer.java
@@ -49,11 +49,16 @@ public class ConstantTensorTransformer extends ExpressionTransformer<RankProfile
}
private ExpressionNode transformConstantReference(ReferenceNode node, RankProfileTransformContext context) {
+ String constantName = node.getName();
Reference constantReference = node.reference();
- if ( ! FeatureNames.isConstantFeature(constantReference) && constantReference.isIdentifier())
- constantReference = FeatureNames.asConstantFeature(node.getName());
-
- Value value = context.constants().get(node.getName());
+ if (FeatureNames.isConstantFeature(constantReference)) {
+ constantName = constantReference.simpleArgument().orElse(null);
+ } else if (constantReference.isIdentifier()) {
+ constantReference = FeatureNames.asConstantFeature(constantName);
+ } else {
+ return node;
+ }
+ Value value = context.constants().get(constantName);
if (value == null || value.type().rank() == 0) return node;
TensorValue tensorValue = (TensorValue)value;
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/processing/ExactMatch.java b/config-model/src/main/java/com/yahoo/searchdefinition/processing/ExactMatch.java
index d5b22988e36..d03dfba2863 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/processing/ExactMatch.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/processing/ExactMatch.java
@@ -62,9 +62,9 @@ public class ExactMatch extends Processor {
&& ! field.getMatching().getExactMatchTerminator().equals("")) {
exactTerminator = field.getMatching().getExactMatchTerminator();
} else {
- warn(search, field,
- "With 'exact' matching, an exact-terminator is needed " +
- "(using '" + exactTerminator +"' as terminator)");
+ info(search, field,
+ "With 'exact' matching, an exact-terminator is needed," +
+ " using default value '" + exactTerminator +"' as terminator");
}
field.addQueryCommand("exact " + exactTerminator);
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/HostResource.java b/config-model/src/main/java/com/yahoo/vespa/model/HostResource.java
index 5aeedb7ceb0..ef041d06978 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/HostResource.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/HostResource.java
@@ -3,6 +3,7 @@ package com.yahoo.vespa.model;
import com.yahoo.config.application.api.DeployLogger;
import com.yahoo.config.model.api.HostInfo;
+import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.HostSpec;
import com.yahoo.config.provision.NodeResources;
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/Admin.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/Admin.java
index 0b0f466342b..e080ce43730 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/admin/Admin.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/Admin.java
@@ -9,6 +9,7 @@ import com.yahoo.config.model.ConfigModelContext.ApplicationType;
import com.yahoo.config.model.deploy.DeployState;
import com.yahoo.config.model.producer.AbstractConfigProducer;
import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.Zone;
import com.yahoo.vespa.model.AbstractService;
import com.yahoo.vespa.model.ConfigProxy;
@@ -59,11 +60,13 @@ public class Admin extends AbstractConfigProducer<Admin> implements Serializable
private Logserver logserver;
private LogForwarder.Config logForwarderConfig = null;
+ private boolean logForwarderIncludeAdmin = false;
private ApplicationType applicationType = ApplicationType.DEFAULT;
- public void setLogForwarderConfig(LogForwarder.Config cfg) {
+ public void setLogForwarderConfig(LogForwarder.Config cfg, boolean includeAdmin) {
this.logForwarderConfig = cfg;
+ this.logForwarderIncludeAdmin = includeAdmin;
}
/**
@@ -216,7 +219,8 @@ public class Admin extends AbstractConfigProducer<Admin> implements Serializable
if (slobroks.isEmpty()) // TODO: Move to caller
slobroks.addAll(createDefaultSlobrokSetup(deployState.getDeployLogger()));
- addMetricsProxyCluster(hosts, deployState);
+ if (! deployState.isHosted() || ! deployState.getProperties().applicationId().instance().isTester())
+ addMetricsProxyCluster(hosts, deployState);
for (HostResource host : hosts) {
if (!host.getHost().runsConfigServer()) {
@@ -243,7 +247,18 @@ public class Admin extends AbstractConfigProducer<Admin> implements Serializable
addConfigProxy(deployState.getDeployLogger(), host);
addFileDistribution(host);
if (logForwarderConfig != null) {
- addLogForwarder(deployState.getDeployLogger(), host);
+ boolean actuallyAdd = true;
+ var membership = host.spec().membership();
+ if (membership.isPresent()) {
+ var clustertype = membership.get().cluster().type();
+ // XXX should skip only if this.isHostedVespa is true?
+ if (clustertype == ClusterSpec.Type.admin) {
+ actuallyAdd = logForwarderIncludeAdmin;
+ }
+ }
+ if (actuallyAdd) {
+ addLogForwarder(deployState.getDeployLogger(), host);
+ }
}
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/clustercontroller/ClusterControllerContainerCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/clustercontroller/ClusterControllerContainerCluster.java
index 89993780869..dbff597b251 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/admin/clustercontroller/ClusterControllerContainerCluster.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/clustercontroller/ClusterControllerContainerCluster.java
@@ -1,7 +1,6 @@
// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.model.admin.clustercontroller;
-import com.yahoo.config.model.api.ModelContext;
import com.yahoo.config.model.api.Reindexing;
import com.yahoo.config.model.deploy.DeployState;
import com.yahoo.config.model.producer.AbstractConfigProducer;
@@ -19,14 +18,12 @@ import java.util.Optional;
*/
public class ClusterControllerContainerCluster extends ContainerCluster<ClusterControllerContainer> {
- private final ModelContext.FeatureFlags featureFlags;
private final ReindexingContext reindexingContext;
public ClusterControllerContainerCluster(
AbstractConfigProducer<?> parent, String subId, String name, DeployState deployState) {
super(parent, subId, name, deployState, false);
addDefaultHandlersWithVip();
- this.featureFlags = deployState.featureFlags();
this.reindexingContext = createReindexingContext(deployState);
setJvmGCOptions(deployState.getProperties().jvmGCOptions(Optional.of(ClusterSpec.Type.admin)));
}
@@ -40,8 +37,7 @@ public class ClusterControllerContainerCluster extends ContainerCluster<ClusterC
public void getConfig(QrStartConfig.Builder builder) {
super.getConfig(builder);
- builder.jvm
- .heapsize(featureFlags.clusterControllerMaxHeapSizeInMb());
+ builder.jvm.heapsize(128);
}
public ReindexingContext reindexingContext() { return reindexingContext; }
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java
index 7929dc1e93f..034bf772ffc 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java
@@ -183,6 +183,7 @@ public class VespaMetricSet {
metrics.add(new Metric("athenz-tenant-cert.expiry.seconds.last"));
metrics.add(new Metric("jdisc.http.request.prematurely_closed.rate"));
+ addMetric(metrics, "jdisc.http.request.requests_per_connection", List.of("sum", "count", "min", "max", "average"));
metrics.add(new Metric("http.status.1xx.rate"));
metrics.add(new Metric("http.status.2xx.rate"));
@@ -239,6 +240,14 @@ public class VespaMetricSet {
metrics.add(new Metric("cluster-controller.stopping.count.last"));
metrics.add(new Metric("cluster-controller.up.count.last"));
metrics.add(new Metric("cluster-controller.cluster-state-change.count"));
+ metrics.add(new Metric("cluster-controller.busy-tick-time-ms.last"));
+ metrics.add(new Metric("cluster-controller.busy-tick-time-ms.max"));
+ metrics.add(new Metric("cluster-controller.busy-tick-time-ms.sum"));
+ metrics.add(new Metric("cluster-controller.busy-tick-time-ms.count"));
+ metrics.add(new Metric("cluster-controller.idle-tick-time-ms.last"));
+ metrics.add(new Metric("cluster-controller.idle-tick-time-ms.max"));
+ metrics.add(new Metric("cluster-controller.idle-tick-time-ms.sum"));
+ metrics.add(new Metric("cluster-controller.idle-tick-time-ms.count"));
metrics.add(new Metric("cluster-controller.is-master.last"));
metrics.add(new Metric("cluster-controller.remote-task-queue.size.last"));
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/QuotaValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/QuotaValidator.java
index d2b465e9d02..d22affaf5a3 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/QuotaValidator.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/QuotaValidator.java
@@ -3,15 +3,17 @@ package com.yahoo.vespa.model.application.validation;
import com.yahoo.config.model.deploy.DeployState;
import com.yahoo.config.provision.Capacity;
+import com.yahoo.config.provision.ClusterResources;
import com.yahoo.config.provision.ClusterSpec;
-import com.yahoo.config.provision.HostSpec;
+import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.SystemName;
import com.yahoo.vespa.model.VespaModel;
+import org.jetbrains.annotations.NotNull;
import java.math.BigDecimal;
import java.util.Locale;
import java.util.Map;
-import java.util.Objects;
+import java.util.Set;
import java.util.logging.Logger;
import java.util.stream.Collectors;
@@ -23,6 +25,7 @@ import java.util.stream.Collectors;
public class QuotaValidator extends Validator {
private static final Logger log = Logger.getLogger(QuotaValidator.class.getName());
+ private static final Capacity zeroCapacity = Capacity.from(new ClusterResources(0, 0, NodeResources.zero()));
@Override
public void validate(VespaModel model, DeployState deployState) {
@@ -32,18 +35,35 @@ public class QuotaValidator extends Validator {
}
private void validateBudget(BigDecimal budget, VespaModel model, SystemName systemName) {
- var spend = model.allocatedHosts().getHosts().stream()
+
+ var maxSpend = model.allClusters().stream()
+ .filter(id -> !adminClusterIds(model).contains(id))
+ .map(id -> model.provisioned().all().getOrDefault(id, zeroCapacity))
+ .mapToDouble(c -> c.maxResources().cost())
+ .sum();
+
+ var actualSpend = model.allocatedHosts().getHosts().stream()
.filter(hostSpec -> hostSpec.membership().get().cluster().type() != ClusterSpec.Type.admin)
.mapToDouble(hostSpec -> hostSpec.advertisedResources().cost())
.sum();
- if (Math.abs(spend) < 0.01) {
+ if (Math.abs(actualSpend) < 0.01) {
log.warning("Deploying application " + model.applicationPackage().getApplicationId() + " with zero budget use. This is suspicious, but not blocked");
return;
}
- throwIfBudgetNegative(spend, budget, systemName);
- throwIfBudgetExceeded(spend, budget, systemName);
+ throwIfBudgetNegative(actualSpend, budget, systemName);
+ throwIfBudgetExceeded(actualSpend, budget, systemName);
+ throwIfBudgetExceeded(maxSpend, budget, systemName);
+ }
+
+ @NotNull
+ private Set<ClusterSpec.Id> adminClusterIds(VespaModel model) {
+ return model.allocatedHosts().getHosts().stream()
+ .map(hostSpec -> hostSpec.membership().orElseThrow().cluster())
+ .filter(cluster -> cluster.type() == ClusterSpec.Type.admin)
+ .map(ClusterSpec::id)
+ .collect(Collectors.toUnmodifiableSet());
}
/** Check that all clusters in the application do not exceed the quota max cluster size. */
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/VespaReindexAction.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/VespaReindexAction.java
index 8b4060e7d19..c2d23844687 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/VespaReindexAction.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/VespaReindexAction.java
@@ -46,7 +46,7 @@ public class VespaReindexAction extends VespaConfigChangeAction implements Confi
return new VespaReindexAction(clusterId(), validationId, newMessage, newServices, documentType);
}
- @Override public Optional<ValidationId> validationId() { return Optional.of(validationId); }
+ @Override public Optional<ValidationId> validationId() { return Optional.ofNullable(validationId); }
@Override public String getDocumentType() { return documentType; }
@Override public boolean ignoreForInternalRedeploy() { return false; }
@Override public String toString() { return super.toString() + ", documentType='" + documentType + "'"; }
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/IndexingScriptChangeValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/IndexingScriptChangeValidator.java
index 91e370211f1..20872bcf326 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/IndexingScriptChangeValidator.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/search/IndexingScriptChangeValidator.java
@@ -2,7 +2,6 @@
package com.yahoo.vespa.model.application.validation.change.search;
import com.yahoo.config.application.api.ValidationId;
-import com.yahoo.config.application.api.ValidationOverrides;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.searchdefinition.Search;
import com.yahoo.searchdefinition.document.ImmutableSDField;
@@ -13,8 +12,8 @@ import com.yahoo.vespa.indexinglanguage.expressions.ScriptExpression;
import com.yahoo.vespa.model.application.validation.change.VespaConfigChangeAction;
import com.yahoo.vespa.model.application.validation.change.VespaReindexAction;
-import java.time.Instant;
import java.util.ArrayList;
+import java.util.LinkedHashSet;
import java.util.List;
import java.util.Optional;
@@ -37,12 +36,18 @@ public class IndexingScriptChangeValidator {
public List<VespaConfigChangeAction> validate() {
List<VespaConfigChangeAction> result = new ArrayList<>();
- for (ImmutableSDField nextField : nextSearch.allConcreteFields()) {
+ for (ImmutableSDField nextField : new LinkedHashSet<>(nextSearch.allConcreteFields())) {
String fieldName = nextField.getName();
ImmutableSDField currentField = currentSearch.getConcreteField(fieldName);
if (currentField != null) {
validateScripts(currentField, nextField).ifPresent(r -> result.add(r));
}
+ else if (nextField.isExtraField()) {
+ result.add(VespaReindexAction.of(id,
+ null,
+ "Non-document field '" + nextField.getName() +
+ "' added; this may be populated by reindexing"));
+ }
}
return result;
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomAdminBuilderBase.java b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomAdminBuilderBase.java
index c5edfb9bbf7..963d2dde7fc 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomAdminBuilderBase.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomAdminBuilderBase.java
@@ -114,14 +114,14 @@ public abstract class DomAdminBuilderBase extends VespaDomBuilder.DomConfigProdu
void addLogForwarders(ModelElement logForwardingElement, Admin admin) {
if (logForwardingElement == null) return;
-
+ boolean alsoForAdminCluster = logForwardingElement.booleanAttribute("include-admin");
for (ModelElement e : logForwardingElement.children("splunk")) {
LogForwarder.Config cfg = LogForwarder.cfg()
.withSplunkHome(e.stringAttribute("splunk-home"))
.withDeploymentServer(e.stringAttribute("deployment-server"))
.withClientName(e.stringAttribute("client-name"))
.withPhoneHomeInterval(e.integerAttribute("phone-home-interval"));
- admin.setLogForwarderConfig(cfg);
+ admin.setLogForwarderConfig(cfg, alsoForAdminCluster);
}
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/chains/search/DomFederationSearcherBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/chains/search/DomFederationSearcherBuilder.java
index 3a4e8a70613..36bf792ee82 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/chains/search/DomFederationSearcherBuilder.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/chains/search/DomFederationSearcherBuilder.java
@@ -43,7 +43,6 @@ public class DomFederationSearcherBuilder extends VespaDomBuilder.DomConfigProdu
return XML.getChild(searcherSpec, "source-set") != null;
}
-
private List<FederationSearcherModel.TargetSpec> readSources(Element searcherSpec) {
List<FederationSearcherModel.TargetSpec> sources = new ArrayList<>();
for (Element source : XML.getChildren(searcherSpec, "source")) {
@@ -76,14 +75,14 @@ public class DomFederationSearcherBuilder extends VespaDomBuilder.DomConfigProdu
}
@Override
- protected FederationSearcher doBuild(DeployState deployState, AbstractConfigProducer ancestor, Element searcherElement) {
+ protected FederationSearcher doBuild(DeployState deployState, AbstractConfigProducer<?> ancestor, Element searcherElement) {
FederationSearcherModel model = new FederationSearcherModelBuilder(searcherElement).build();
Optional<Component> targetSelector = buildTargetSelector(deployState, ancestor, searcherElement, model.getComponentId());
return new FederationSearcher(model, targetSelector);
}
- private Optional<Component> buildTargetSelector(DeployState deployState, AbstractConfigProducer ancestor, Element searcherElement, ComponentId namespace) {
+ private Optional<Component> buildTargetSelector(DeployState deployState, AbstractConfigProducer<?> ancestor, Element searcherElement, ComponentId namespace) {
Element targetSelectorElement = XML.getChild(searcherElement, "target-selector");
if (targetSelectorElement == null)
return Optional.empty();
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/chains/search/SearchChainsBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/chains/search/SearchChainsBuilder.java
index 0106123666d..9fb19efbf75 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/chains/search/SearchChainsBuilder.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/chains/search/SearchChainsBuilder.java
@@ -16,9 +16,10 @@ import java.util.List;
import java.util.Map;
/**
+ * Creates top level search chains(searchchain, provider) from xml.
+ *
* @author Tony Vaagenes
* @author gjoranv
- * Creates top level search chains(searchchain, provider) from xml.
*/
public class SearchChainsBuilder extends ChainsBuilder<Searcher<?>, SearchChain> {
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainerCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainerCluster.java
index f0c62664988..4e78f44d0fe 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainerCluster.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainerCluster.java
@@ -23,6 +23,7 @@ import com.yahoo.jdisc.http.ServletPathsConfig;
import com.yahoo.osgi.provider.model.ComponentModel;
import com.yahoo.search.config.QrStartConfig;
import com.yahoo.vespa.config.search.RankProfilesConfig;
+import com.yahoo.vespa.config.search.core.OnnxModelsConfig;
import com.yahoo.vespa.config.search.core.RankingConstantsConfig;
import com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyContainer;
import com.yahoo.vespa.model.container.component.BindingPattern;
@@ -56,6 +57,7 @@ public final class ApplicationContainerCluster extends ContainerCluster<Applicat
QrStartConfig.Producer,
RankProfilesConfig.Producer,
RankingConstantsConfig.Producer,
+ OnnxModelsConfig.Producer,
ServletPathsConfig.Producer,
ContainerMbusConfig.Producer,
MetricsProxyApiConfig.Producer,
@@ -227,6 +229,11 @@ public final class ApplicationContainerCluster extends ContainerCluster<Applicat
}
@Override
+ public void getConfig(OnnxModelsConfig.Builder builder) {
+ if (modelEvaluation != null) modelEvaluation.getConfig(builder);
+ }
+
+ @Override
public void getConfig(ContainerMbusConfig.Builder builder) {
if (mbusParams != null) {
if (mbusParams.maxConcurrentFactor != null)
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/ContainerCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/container/ContainerCluster.java
index 8868c55becb..d130b08d083 100755
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/ContainerCluster.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/ContainerCluster.java
@@ -188,6 +188,7 @@ public abstract class ContainerCluster<CONTAINER extends Container>
addSimpleComponent("com.yahoo.container.handler.VipStatus");
addSimpleComponent(com.yahoo.container.handler.ClustersStatus.class.getName());
addSimpleComponent("com.yahoo.container.jdisc.DisabledConnectionLogProvider");
+ addSimpleComponent(com.yahoo.jdisc.http.server.jetty.Janitor.class);
addJaxProviders();
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/ContainerModelEvaluation.java b/config-model/src/main/java/com/yahoo/vespa/model/container/ContainerModelEvaluation.java
index 72f1921e6a2..510d2fe3d99 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/ContainerModelEvaluation.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/ContainerModelEvaluation.java
@@ -5,6 +5,7 @@ import ai.vespa.models.evaluation.ModelsEvaluator;
import com.yahoo.osgi.provider.model.ComponentModel;
import com.yahoo.searchdefinition.derived.RankProfileList;
import com.yahoo.vespa.config.search.RankProfilesConfig;
+import com.yahoo.vespa.config.search.core.OnnxModelsConfig;
import com.yahoo.vespa.config.search.core.RankingConstantsConfig;
import com.yahoo.vespa.model.container.component.Handler;
import com.yahoo.vespa.model.container.component.SystemBindingPattern;
@@ -17,7 +18,10 @@ import java.util.Objects;
*
* @author bratseth
*/
-public class ContainerModelEvaluation implements RankProfilesConfig.Producer, RankingConstantsConfig.Producer {
+public class ContainerModelEvaluation implements RankProfilesConfig.Producer,
+ RankingConstantsConfig.Producer,
+ OnnxModelsConfig.Producer
+{
private final static String BUNDLE_NAME = "model-evaluation";
private final static String EVALUATOR_NAME = ModelsEvaluator.class.getName();
@@ -35,6 +39,7 @@ public class ContainerModelEvaluation implements RankProfilesConfig.Producer, Ra
public void prepare(List<ApplicationContainer> containers) {
rankProfileList.sendConstantsTo(containers);
+ rankProfileList.sendOnnxModelsTo(containers);
}
@Override
@@ -47,6 +52,11 @@ public class ContainerModelEvaluation implements RankProfilesConfig.Producer, Ra
rankProfileList.getConfig(builder);
}
+ @Override
+ public void getConfig(OnnxModelsConfig.Builder builder) {
+ rankProfileList.getConfig(builder);
+ }
+
public static Handler<?> getHandler() {
Handler<?> handler = new Handler<>(new ComponentModel(REST_HANDLER_NAME, null, BUNDLE_NAME));
handler.addServerBindings(
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/docproc/DocprocChain.java b/config-model/src/main/java/com/yahoo/vespa/model/container/docproc/DocprocChain.java
index ee246b5e485..2b2b17c76c3 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/docproc/DocprocChain.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/docproc/DocprocChain.java
@@ -23,8 +23,9 @@ public class DocprocChain extends Chain<DocumentProcessor> {
}
/**
- * The field name schema map that applies to this whole chain
- * @return doctype,from → to
+ * The field name schema map that applies to this whole chain.
+ *
+ * @return doctype, from → to
*/
public Map<Pair<String,String>,String> fieldNameSchemaMap() {
return fieldNameSchemaMap;
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/http/JettyHttpServer.java b/config-model/src/main/java/com/yahoo/vespa/model/container/http/JettyHttpServer.java
index ce79a124e81..d13709114bf 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/http/JettyHttpServer.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/http/JettyHttpServer.java
@@ -14,8 +14,6 @@ import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
-import static com.yahoo.component.ComponentSpecification.fromString;
-
/**
* @author Einar M R Rosenvinge
* @author bjorncs
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/search/searchchain/FederationSearcher.java b/config-model/src/main/java/com/yahoo/vespa/model/container/search/searchchain/FederationSearcher.java
index ceb48732116..6b4cb003cda 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/search/searchchain/FederationSearcher.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/search/searchchain/FederationSearcher.java
@@ -11,7 +11,12 @@ import com.yahoo.search.federation.FederationConfig;
import com.yahoo.search.searchchain.model.federation.FederationSearcherModel.TargetSpec;
import com.yahoo.vespa.model.container.component.Component;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
/**
* Config producer for the FederationSearcher.
@@ -26,8 +31,8 @@ public class FederationSearcher extends Searcher<FederationSearcherModel> implem
* Generates config for a single search chain contained in a target.
*/
private static final class SearchChainConfig {
+
private final SearchChain searchChain;
- //Zero if not applicable
final ComponentId providerId;
final FederationOptions targetOptions;
final List<String> documentTypes;
@@ -61,6 +66,7 @@ public class FederationSearcher extends Searcher<FederationSearcherModel> implem
* which can be federated to as a single entity.
*/
private static abstract class Target {
+
final ComponentId id;
final FederationOptions targetOptions;
@@ -79,41 +85,36 @@ public class FederationSearcher extends Searcher<FederationSearcherModel> implem
}
protected abstract void getSearchChainsConfig(FederationConfig.Target.Builder tb);
+
}
private static class SearchChainTarget extends Target {
+
private final SearchChainConfig searchChainConfig;
- public SearchChainTarget(SearchChain searchChain,
- FederationOptions targetOptions) {
+ public SearchChainTarget(SearchChain searchChain, FederationOptions targetOptions) {
super(searchChain.getComponentId(), targetOptions);
- searchChainConfig = new SearchChainConfig(
- searchChain,
- null,
- targetOptions,
- searchChain.getDocumentTypes());
+ searchChainConfig = new SearchChainConfig(searchChain, null, targetOptions, searchChain.getDocumentTypes());
}
@Override
protected void getSearchChainsConfig(FederationConfig.Target.Builder tB) {
tB.searchChain(searchChainConfig.getSearchChainConfig());
}
+
}
private static class SourceGroupTarget extends Target {
+
private final SearchChainConfig leaderConfig;
- private final List<SearchChainConfig> participantsConfig =
- new ArrayList<>();
+ private final List<SearchChainConfig> participantsConfig = new ArrayList<>();
- public SourceGroupTarget(SourceGroup group,
- FederationOptions targetOptions) {
+ public SourceGroupTarget(SourceGroup group, FederationOptions targetOptions) {
super(group.getComponentId(), applyDefaultSourceGroupOptions(targetOptions));
leaderConfig = createConfig(group.leader(), targetOptions);
- for (Source participant : group.participants()) {
- participantsConfig.add(
- createConfig(participant, targetOptions));
- }
+ for (Source participant : group.participants())
+ participantsConfig.add(createConfig(participant, targetOptions));
}
private static FederationOptions applyDefaultSourceGroupOptions(FederationOptions targetOptions) {
@@ -121,64 +122,49 @@ public class FederationSearcher extends Searcher<FederationSearcherModel> implem
return targetOptions.inherit(defaultSourceGroupOption);
}
- private SearchChainConfig createConfig(Source source,
- FederationOptions targetOptions) {
- return new SearchChainConfig(
- source,
- source.getParentProvider().getComponentId(),
- targetOptions,
- source.getDocumentTypes());
+ private SearchChainConfig createConfig(Source source, FederationOptions targetOptions) {
+ return new SearchChainConfig(source,
+ source.getParentProvider().getComponentId(),
+ targetOptions,
+ source.getDocumentTypes());
}
@Override
protected void getSearchChainsConfig(FederationConfig.Target.Builder tB) {
tB.searchChain(leaderConfig.getSearchChainConfig());
- for (SearchChainConfig participant : participantsConfig) {
+ for (SearchChainConfig participant : participantsConfig)
tB.searchChain(participant.getSearchChainConfig());
- }
}
}
private static class TargetResolver {
+
final ComponentRegistry<SearchChain> searchChainRegistry;
final SourceGroupRegistry sourceGroupRegistry;
- /**
- * @return true if searchChain.id newer than sourceGroup.id
- */
- private boolean newerVersion(SearchChain searchChain,
- SourceGroup sourceGroup) {
- if (searchChain == null || sourceGroup == null) {
- return false;
- } else {
- return newerVersion(searchChain.getComponentId(), sourceGroup.getComponentId());
- }
+ /** Returns true if searchChain.id newer than sourceGroup.id */
+ private boolean newerVersion(SearchChain searchChain, SourceGroup sourceGroup) {
+ if (searchChain == null || sourceGroup == null) return false;
+ return newerVersion(searchChain.getComponentId(), sourceGroup.getComponentId());
}
- /**
- * @return true if a newer than b
- */
+ /** Returns true if a newer than b */
private boolean newerVersion(ComponentId a, ComponentId b) {
return a.compareTo(b) > 0;
}
-
- TargetResolver(ComponentRegistry<SearchChain> searchChainRegistry,
- SourceGroupRegistry sourceGroupRegistry) {
+ TargetResolver(ComponentRegistry<SearchChain> searchChainRegistry, SourceGroupRegistry sourceGroupRegistry) {
this.searchChainRegistry = searchChainRegistry;
this.sourceGroupRegistry = sourceGroupRegistry;
}
Target resolve(FederationSearcherModel.TargetSpec specification) {
- SearchChain searchChain = searchChainRegistry.getComponent(
- specification.sourceSpec);
- SourceGroup sourceGroup = sourceGroupRegistry.getComponent(
- specification.sourceSpec);
+ SearchChain searchChain = searchChainRegistry.getComponent(specification.sourceSpec);
+ SourceGroup sourceGroup = sourceGroupRegistry.getComponent(specification.sourceSpec);
if (searchChain == null && sourceGroup == null) {
return null;
- } else if (sourceGroup == null ||
- newerVersion(searchChain, sourceGroup)) {
+ } else if (sourceGroup == null || newerVersion(searchChain, sourceGroup)) {
return new SearchChainTarget(searchChain, specification.federationOptions);
} else {
return new SourceGroupTarget(sourceGroup, specification.federationOptions);
@@ -186,26 +172,21 @@ public class FederationSearcher extends Searcher<FederationSearcherModel> implem
}
}
- private final Map<ComponentId, Target> resolvedTargets =
- new LinkedHashMap<>();
+ private final Map<ComponentId, Target> resolvedTargets = new LinkedHashMap<>();
public FederationSearcher(FederationSearcherModel searcherModel, Optional<Component> targetSelector) {
super(searcherModel);
this.targetSelector = targetSelector;
- if (targetSelector.isPresent())
- addChild(targetSelector.get());
+ targetSelector.ifPresent(selector -> addChild(selector));
}
@Override
public void getConfig(FederationConfig.Builder builder) {
- for (Target target : resolvedTargets.values()) {
+ for (Target target : resolvedTargets.values())
builder.target(target.getTargetConfig());
- }
- if (targetSelector.isPresent()) {
- builder.targetSelector(targetSelector.get().getGlobalComponentId().stringValue());
- }
+ targetSelector.ifPresent(selector -> builder.targetSelector(selector.getGlobalComponentId().stringValue()));
}
@Override
@@ -213,10 +194,8 @@ public class FederationSearcher extends Searcher<FederationSearcherModel> implem
initialize(getSearchChains().allChains(), getSearchChains().allSourceGroups());
}
- void initialize(ComponentRegistry<SearchChain> searchChainRegistry,
- SourceGroupRegistry sourceGroupRegistry) {
- TargetResolver targetResolver = new TargetResolver(
- searchChainRegistry, sourceGroupRegistry);
+ void initialize(ComponentRegistry<SearchChain> searchChainRegistry, SourceGroupRegistry sourceGroupRegistry) {
+ TargetResolver targetResolver = new TargetResolver(searchChainRegistry, sourceGroupRegistry);
addSourceTargets(targetResolver, model.targets);
@@ -229,16 +208,14 @@ public class FederationSearcher extends Searcher<FederationSearcherModel> implem
Target target = targetResolver.resolve(targetSpec);
if (target == null) {
- throw new RuntimeException("Can't find source " +
- targetSpec.sourceSpec +
- " used as a source for federation '" +
- getComponentId() + "'");
+ throw new RuntimeException("Can't find source " + targetSpec.sourceSpec +
+ " used as a source for federation '" + getComponentId() + "'");
}
Target duplicate = resolvedTargets.put(target.id, target);
if (duplicate != null && !duplicate.targetOptions.equals(target.targetOptions)) {
- throw new RuntimeException("Search chain " + target.id + " added twice with different federation options"
- + " to the federation searcher " + getComponentId());
+ throw new RuntimeException("Search chain " + target.id + " added twice with different federation options" +
+ " to the federation searcher " + getComponentId());
}
}
}
@@ -248,23 +225,21 @@ public class FederationSearcher extends Searcher<FederationSearcherModel> implem
for (GenericTarget genericTarget : defaultTargets(searchChainRegistry.allComponents())) {
ComponentSpecification specification = genericTarget.getComponentId().toSpecification();
- //Can't use genericTarget directly, as it might be part of a source group.
+ // Can't use genericTarget directly, as it might be part of a source group.
Target federationTarget = targetResolver.resolve(new TargetSpec(specification, new FederationOptions()));
- //Do not replace manually added sources, as they might have manually configured federation options
+ // Do not replace manually added sources, as they might have manually configured federation options
if (!resolvedTargets.containsKey(federationTarget.id))
resolvedTargets.put(federationTarget.id, federationTarget);
}
}
-
private static List<GenericTarget> defaultTargets(Collection<SearchChain> allSearchChains) {
- Collection<Provider> providers =
- CollectionUtil.filter(allSearchChains, Provider.class);
+ Collection<Provider> providers = CollectionUtil.filter(allSearchChains, Provider.class);
List<GenericTarget> defaultTargets = new ArrayList<>();
- for (Provider provider : providers) {
+ for (Provider provider : providers)
defaultTargets.addAll(provider.defaultFederationTargets());
- }
return defaultTargets;
}
+
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/search/searchchain/Provider.java b/config-model/src/main/java/com/yahoo/vespa/model/container/search/searchchain/Provider.java
index ee4edf3fd8c..10e0f3e55da 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/search/searchchain/Provider.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/search/searchchain/Provider.java
@@ -7,6 +7,7 @@ import com.yahoo.vespa.model.container.component.ConfigProducerGroup;
import java.util.Arrays;
import java.util.Collection;
+import java.util.List;
/**
* Base config producer for search chains that communicate with backends.
@@ -15,7 +16,7 @@ import java.util.Collection;
*/
public class Provider extends GenericTarget {
- private ConfigProducerGroup<Source> sources;
+ private final ConfigProducerGroup<Source> sources;
public Provider(ChainSpecification specWithoutInnerSearchers, FederationOptions federationOptions) {
super(specWithoutInnerSearchers, federationOptions);
@@ -37,9 +38,10 @@ public class Provider extends GenericTarget {
public Collection<? extends GenericTarget> defaultFederationTargets() {
if (sources.getComponents().isEmpty()) {
- return Arrays.asList(this);
+ return List.of(this);
} else {
return sources.getComponents();
}
}
+
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java
index 1dd5074aedb..18580249ddc 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java
@@ -64,11 +64,8 @@ public class ContentSearchCluster extends AbstractConfigProducer<SearchCluster>
private final Map<StorageGroup, NodeSpec> groupToSpecMap = new LinkedHashMap<>();
private Optional<ResourceLimits> resourceLimits = Optional.empty();
private final ProtonConfig.Indexing.Optimize.Enum feedSequencerType;
- private final int maxPendingMoveOps;
private final double defaultFeedConcurrency;
- private final boolean useBucketExecutorForLidSpaceCompact;
- private final boolean useBucketExecutorForBucketMove;
- private final double defaultMaxDeadBytesRatio;
+ private final boolean useBucketExecutorForPruneRemoved;
/** Whether the nodes of this cluster also hosts a container cluster in a hosted system */
private final boolean combined;
@@ -211,12 +208,9 @@ public class ContentSearchCluster extends AbstractConfigProducer<SearchCluster>
this.syncTransactionLog = syncTransactionLog;
this.combined = combined;
- maxPendingMoveOps = featureFlags.maxPendingMoveOps();
feedSequencerType = convertFeedSequencerType(featureFlags.feedSequencerType());
defaultFeedConcurrency = featureFlags.feedConcurrency();
- useBucketExecutorForLidSpaceCompact = featureFlags.useBucketExecutorForLidSpaceCompact();
- useBucketExecutorForBucketMove = featureFlags.useBucketExecutorForBucketMove();
- defaultMaxDeadBytesRatio = featureFlags.maxDeadBytesRatio();
+ useBucketExecutorForPruneRemoved = featureFlags.useBucketExecutorForPruneRemoved();
}
public void setVisibilityDelay(double delay) {
@@ -383,7 +377,6 @@ public class ContentSearchCluster extends AbstractConfigProducer<SearchCluster>
.configid(getConfigId())
.visibilitydelay(visibilityDelay)
.global(globalDocType);
- ddbB.allocation.max_dead_bytes_ratio(defaultMaxDeadBytesRatio);
if (hasIndexingModeStreaming(type)) {
hasAnyNonIndexedCluster = true;
@@ -434,9 +427,7 @@ public class ContentSearchCluster extends AbstractConfigProducer<SearchCluster>
} else {
builder.indexing.optimize(feedSequencerType);
}
- builder.maintenancejobs.maxoutstandingmoveops(maxPendingMoveOps);
- builder.lidspacecompaction.usebucketexecutor(useBucketExecutorForLidSpaceCompact);
- builder.bucketmove.usebucketexecutor(useBucketExecutorForBucketMove);
+ builder.pruneremoveddocuments.usebucketexecutor(useBucketExecutorForPruneRemoved);
}
private boolean isGloballyDistributed(NewDocumentType docType) {
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/StorageGroup.java b/config-model/src/main/java/com/yahoo/vespa/model/content/StorageGroup.java
index f106b1f7bd5..c764b5ab449 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/StorageGroup.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/StorageGroup.java
@@ -54,7 +54,7 @@ public class StorageGroup {
*
* @param isHosted true if this is in a hosted setup
* @param name the name of this group
- * @param index the distribution-key index og this group
+ * @param index the distribution-key index of this group
* @param partitions the distribution strategy to use to distribute content to subgroups or empty
* (meaning that the "*" distribution will be used) only if this is a leaf group
* (having nodes, not subgroups as children).
@@ -162,10 +162,10 @@ public class StorageGroup {
}
/** Returns the total number of nodes below this group */
- public int countNodes() {
- int nodeCount = nodes.size();
+ public int countNodes(boolean includeRetired) {
+ int nodeCount = (int)nodes.stream().filter(node -> includeRetired || ! node.isRetired()).count();
for (StorageGroup group : subgroups)
- nodeCount += group.countNodes();
+ nodeCount += group.countNodes(includeRetired);
return nodeCount;
}
@@ -220,7 +220,7 @@ public class StorageGroup {
? groupBuilder.buildHosted(deployState, owner, Optional.empty())
: groupBuilder.buildNonHosted(deployState, owner, Optional.empty());
Redundancy redundancy = redundancyBuilder.build(owner.getName(), owner.isHosted(), storageGroup.subgroups.size(),
- storageGroup.getNumberOfLeafGroups(), storageGroup.countNodes(),
+ storageGroup.getNumberOfLeafGroups(), storageGroup.countNodes(false),
maxRedundancy);
owner.setRedundancy(redundancy);
if (storageGroup.partitions.isEmpty() && (redundancy.groups() > 1)) {
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/search/IndexingDocprocChain.java b/config-model/src/main/java/com/yahoo/vespa/model/search/IndexingDocprocChain.java
index 2c1d979e2c4..8fe6b51f2b4 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/search/IndexingDocprocChain.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/search/IndexingDocprocChain.java
@@ -2,14 +2,12 @@
package com.yahoo.vespa.model.search;
import com.yahoo.component.ComponentId;
-import com.yahoo.component.ComponentSpecification;
import com.yahoo.component.chain.Phase;
import com.yahoo.component.chain.model.ChainSpecification;
import com.yahoo.vespa.configdefinition.SpecialtokensConfig;
import com.yahoo.vespa.model.container.docproc.DocprocChain;
import java.util.ArrayList;
-import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Set;
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/search/TransactionLogServer.java b/config-model/src/main/java/com/yahoo/vespa/model/search/TransactionLogServer.java
index ed12a161805..82477b811d5 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/search/TransactionLogServer.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/search/TransactionLogServer.java
@@ -1,7 +1,6 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.model.search;
-import com.yahoo.config.model.api.ModelContext;
import com.yahoo.config.model.deploy.DeployState;
import com.yahoo.searchlib.TranslogserverConfig;
import com.yahoo.config.model.producer.AbstractConfigProducer;
@@ -15,18 +14,9 @@ import org.w3c.dom.Element;
*/
public class TransactionLogServer extends AbstractService {
- private static final long serialVersionUID = 1L;
-
- private static TranslogserverConfig.Compression.Type.Enum convertCompressionType(String type) {
- try {
- return TranslogserverConfig.Compression.Type.Enum.valueOf(type);
- } catch (Throwable t) {
- return TranslogserverConfig.Compression.Type.NONE;
- }
- }
-
private final Boolean useFsync;
- public TransactionLogServer(AbstractConfigProducer searchNode, String clusterName, Boolean useFsync) {
+
+ public TransactionLogServer(AbstractConfigProducer<?> searchNode, String clusterName, Boolean useFsync) {
super(searchNode, "transactionlogserver");
portsMeta.on(0).tag("tls");
this.useFsync = useFsync;
@@ -44,7 +34,7 @@ public class TransactionLogServer extends AbstractService {
}
@Override
- protected TransactionLogServer doBuild(DeployState deployState, AbstractConfigProducer ancestor, Element producerSpec) {
+ protected TransactionLogServer doBuild(DeployState deployState, AbstractConfigProducer<?> ancestor, Element producerSpec) {
return new TransactionLogServer(ancestor, clusterName, useFsync);
}
diff --git a/config-model/src/main/javacc/SDParser.jj b/config-model/src/main/javacc/SDParser.jj
index 88c45a18e4e..8f99c388fb1 100644
--- a/config-model/src/main/javacc/SDParser.jj
+++ b/config-model/src/main/javacc/SDParser.jj
@@ -237,6 +237,7 @@ TOKEN :
| < RANKPROFILE: "rank-profile" >
| < RANKDEGRADATIONFREQ: "rank-degradation-frequency" >
| < RANKDEGRADATION: "rank-degradation" >
+| < RAW_AS_BASE64_IN_SUMMARY: "raw-as-base64-in-summary" >
| < RPBINSIZE: "doc-frequency" >
| < RPBINLOW: "min-fullrank-docs">
| < RPPOSBINSIZE: "occurrences-per-doc" >
@@ -451,6 +452,7 @@ Search rootSchema(String dir) :
Object rootSchemaItem(Search search) : { }
{
( document(search)
+ | rawAsBase64(search)
| documentSummary(search)
| field(null, search)
| index(search, null)
@@ -561,6 +563,12 @@ Object documentBody(SDDocumentType document, Search search) :
{ return null; }
}
+void rawAsBase64(Search search) :
+{}
+{
+ <RAW_AS_BASE64_IN_SUMMARY> { search.enableRawAsBase64(); }
+}
+
/**
* Consumes a document head block.
*
diff --git a/config-model/src/main/resources/schema/admin.rnc b/config-model/src/main/resources/schema/admin.rnc
index 784fb82d319..a75b51a567a 100644
--- a/config-model/src/main/resources/schema/admin.rnc
+++ b/config-model/src/main/resources/schema/admin.rnc
@@ -105,6 +105,7 @@ ClusterControllers = element cluster-controllers {
}
LogForwarding = element logforwarding {
+ attribute include-admin { xsd:boolean }? &
element splunk {
attribute splunk-home { xsd:string }? &
attribute deployment-server { xsd:string } &
diff --git a/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java b/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java
index 96e228ca1f7..79e3e869b52 100644
--- a/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java
+++ b/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java
@@ -35,7 +35,6 @@ import com.yahoo.vespa.model.test.VespaModelTester;
import com.yahoo.vespa.model.test.utils.ApplicationPackageUtils;
import com.yahoo.vespa.model.test.utils.VespaModelCreatorWithMockPkg;
import com.yahoo.yolean.Exceptions;
-import org.junit.Ignore;
import org.junit.Test;
import java.io.StringReader;
@@ -896,6 +895,89 @@ public class ModelProvisioningTest {
}
@Test
+ public void testLogForwarderNotInAdminCluster() {
+ String services =
+ "<?xml version='1.0' encoding='utf-8' ?>\n" +
+ "<services>" +
+ " <admin version='4.0'>" +
+ " <logservers>" +
+ " <nodes count='1' dedicated='true'/>" +
+ " </logservers>" +
+ " <logforwarding>" +
+ " <splunk deployment-server='bardeplserv:123' client-name='barclinam' phone-home-interval='987' />" +
+ " </logforwarding>" +
+ " </admin>" +
+ " <container version='1.0' id='foo'>" +
+ " <nodes count='1'/>" +
+ " </container>" +
+ "</services>";
+
+ int numberOfHosts = 2;
+ VespaModelTester tester = new VespaModelTester();
+ tester.addHosts(numberOfHosts+1);
+
+ VespaModel model = tester.createModel(Zone.defaultZone(), services, true);
+ assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts));
+
+ Admin admin = model.getAdmin();
+ Logserver logserver = admin.getLogserver();
+ HostResource hostResource = logserver.getHostResource();
+
+ assertNotNull(hostResource.getService("logserver"));
+ assertNull(hostResource.getService("container"));
+ assertNull(hostResource.getService("logforwarder"));
+
+ var clist = model.getContainerClusters().get("foo").getContainers();
+ assertThat(clist.size(), is(1));
+ hostResource = clist.get(0).getHostResource();
+ assertNull(hostResource.getService("logserver"));
+ assertNotNull(hostResource.getService("container"));
+ assertNotNull(hostResource.getService("logforwarder"));
+ }
+
+
+ @Test
+ public void testLogForwarderInAdminCluster() {
+ String services =
+ "<?xml version='1.0' encoding='utf-8' ?>\n" +
+ "<services>" +
+ " <admin version='4.0'>" +
+ " <logservers>" +
+ " <nodes count='1' dedicated='true'/>" +
+ " </logservers>" +
+ " <logforwarding include-admin='true'>" +
+ " <splunk deployment-server='bardeplserv:123' client-name='barclinam' phone-home-interval='987' />" +
+ " </logforwarding>" +
+ " </admin>" +
+ " <container version='1.0' id='foo'>" +
+ " <nodes count='1'/>" +
+ " </container>" +
+ "</services>";
+
+ int numberOfHosts = 2;
+ VespaModelTester tester = new VespaModelTester();
+ tester.addHosts(numberOfHosts+1);
+
+ VespaModel model = tester.createModel(Zone.defaultZone(), services, true);
+ assertThat(model.getRoot().hostSystem().getHosts().size(), is(numberOfHosts));
+
+ Admin admin = model.getAdmin();
+ Logserver logserver = admin.getLogserver();
+ HostResource hostResource = logserver.getHostResource();
+
+ assertNotNull(hostResource.getService("logserver"));
+ assertNull(hostResource.getService("container"));
+ assertNotNull(hostResource.getService("logforwarder"));
+
+ var clist = model.getContainerClusters().get("foo").getContainers();
+ assertThat(clist.size(), is(1));
+ hostResource = clist.get(0).getHostResource();
+ assertNull(hostResource.getService("logserver"));
+ assertNotNull(hostResource.getService("container"));
+ assertNotNull(hostResource.getService("logforwarder"));
+ }
+
+ @Test
public void testImplicitLogserverContainer() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
@@ -1018,6 +1100,35 @@ public class ModelProvisioningTest {
}
@Test
+ public void testRedundancy2DownscaledToOneNodeButOneRetired() {
+ String services =
+ "<?xml version='1.0' encoding='utf-8' ?>" +
+ "<services>" +
+ " <content version='1.0' id='bar'>" +
+ " <redundancy>2</redundancy>" +
+ " <documents>" +
+ " <document type='type1' mode='index'/>" +
+ " </documents>" +
+ " <nodes count='2'/>" +
+ " </content>" +
+ "</services>";
+
+ int numberOfHosts = 3;
+ VespaModelTester tester = new VespaModelTester();
+ tester.addHosts(numberOfHosts);
+ VespaModel model = tester.createModel(services, false, false, true, "node-1-3-10-03");
+ assertEquals(numberOfHosts, model.getRoot().hostSystem().getHosts().size());
+
+ ContentCluster cluster = model.getContentClusters().get("bar");
+ assertEquals(2, cluster.getStorageNodes().getChildren().size());
+ assertEquals(1, cluster.redundancy().effectiveInitialRedundancy());
+ assertEquals(1, cluster.redundancy().effectiveFinalRedundancy());
+ assertEquals(1, cluster.redundancy().effectiveReadyCopies());
+ assertEquals(2, cluster.getRootGroup().getNodes().size());
+ assertEquals(0, cluster.getRootGroup().getSubgroups().size());
+ }
+
+ @Test
public void testUsingNodesCountAttributesAndGettingTooFewNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
@@ -1483,7 +1594,7 @@ public class ModelProvisioningTest {
assertEquals("We get 1 node per cluster and no admin node apart from the dedicated cluster controller", 3, model.getHosts().size());
assertEquals(1, model.getContainerClusters().size());
assertEquals(1, model.getContainerClusters().get("foo").getContainers().size());
- assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes());
+ assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes(true));
assertEquals(1, model.getAdmin().getClusterControllers().getContainers().size());
}
@@ -1536,7 +1647,7 @@ public class ModelProvisioningTest {
assertEquals(6, model.getRoot().hostSystem().getHosts().size());
assertEquals(3, model.getAdmin().getSlobroks().size());
assertEquals(2, model.getContainerClusters().get("foo").getContainers().size());
- assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes());
+ assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes(true));
}
@Test
@@ -1606,7 +1717,7 @@ public class ModelProvisioningTest {
assertEquals(1, model.getRoot().hostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
assertEquals(1, model.getContainerClusters().get("foo").getContainers().size());
- assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes());
+ assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes(true));
}
/** Recreate the combination used in some factory tests */
@@ -1889,7 +2000,7 @@ public class ModelProvisioningTest {
assertTrue("Initial servers are not joining", config.build().server().stream().noneMatch(ZookeeperServerConfig.Server::joining));
}
{
- VespaModel nextModel = tester.createModel(Zone.defaultZone(), servicesXml.apply(5), true, false, 0, Optional.of(model), new DeployState.Builder());
+ VespaModel nextModel = tester.createModel(Zone.defaultZone(), servicesXml.apply(5), true, false, false, 0, Optional.of(model), new DeployState.Builder());
ApplicationContainerCluster cluster = nextModel.getContainerClusters().get("zk");
ZookeeperServerConfig.Builder config = new ZookeeperServerConfig.Builder();
cluster.getContainers().forEach(c -> c.getConfig(config));
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/SummaryTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/SummaryTestCase.java
index 91599e6f607..f8d03d3574b 100644
--- a/config-model/src/test/java/com/yahoo/searchdefinition/SummaryTestCase.java
+++ b/config-model/src/test/java/com/yahoo/searchdefinition/SummaryTestCase.java
@@ -6,6 +6,7 @@ import com.yahoo.vespa.documentmodel.DocumentSummary;
import com.yahoo.vespa.model.test.utils.DeployLoggerStub;
import com.yahoo.vespa.objects.FieldBase;
import org.junit.Test;
+import static com.yahoo.config.model.test.TestUtil.joinLines;
import java.util.Collection;
import java.util.List;
@@ -25,21 +26,17 @@ public class SummaryTestCase {
@Test
public void testMemorySummary() throws ParseException {
- String sd =
- "search memorysummary {\n" +
- "\n" +
- " document memorysummary {\n" +
- "\n" +
- " field inmemory type string {\n" +
- " indexing: attribute | summary\n" +
- " }\n" +
- " field ondisk type string {\n" +
- " indexing: index # no summary, so ignored\n" +
- " }\n" +
- "\n" +
- " }\n" +
- "\n" +
- "}";
+ String sd = joinLines(
+ "search memorysummary {",
+ " document memorysummary {",
+ " field inmemory type string {",
+ " indexing: attribute | summary",
+ " }",
+ " field ondisk type string {",
+ " indexing: index # no summary, so ignored",
+ " }",
+ " }",
+ "}");
DeployLoggerStub logger = new DeployLoggerStub();
SearchBuilder.createFromString(sd, logger);
assertTrue(logger.entries.isEmpty());
@@ -47,25 +44,21 @@ public class SummaryTestCase {
@Test
public void testDiskSummary() throws ParseException {
- String sd =
- "search disksummary {\n" +
- "\n" +
- " document-summary foobar {\n" +
- " summary foo1 type string { source: inmemory }\n" +
- " summary foo2 type string { source: ondisk }\n" +
- " }\n" +
- " document disksummary {\n" +
- "\n" +
- " field inmemory type string {\n" +
- " indexing: attribute | summary\n" +
- " }\n" +
- " field ondisk type string {\n" +
- " indexing: index | summary\n" +
- " }\n" +
- "\n" +
- " }\n" +
- "\n" +
- "}";
+ String sd = joinLines(
+ "search disksummary {",
+ " document-summary foobar {",
+ " summary foo1 type string { source: inmemory }",
+ " summary foo2 type string { source: ondisk }",
+ " }",
+ " document disksummary {",
+ " field inmemory type string {",
+ " indexing: attribute | summary",
+ " }",
+ " field ondisk type string {",
+ " indexing: index | summary",
+ " }",
+ " }",
+ "}");
DeployLoggerStub logger = new DeployLoggerStub();
SearchBuilder.createFromString(sd, logger);
assertEquals(1, logger.entries.size());
@@ -78,27 +71,22 @@ public class SummaryTestCase {
@Test
public void testDiskSummaryExplicit() throws ParseException {
- String sd =
- "search disksummary {\n" +
- "\n" +
- " document disksummary {\n" +
- "\n" +
- " field inmemory type string {\n" +
- " indexing: attribute | summary\n" +
- " }\n" +
- " field ondisk type string {\n" +
- " indexing: index | summary\n" +
- " }\n" +
- "\n" +
- " }\n" +
- "\n" +
- " document-summary foobar {\n" +
- " summary foo1 type string { source: inmemory }\n" +
- " summary foo2 type string { source: ondisk }\n" +
- " from-disk\n" +
- " }\n" +
- "\n" +
- "}";
+ String sd = joinLines(
+ "search disksummary {",
+ " document disksummary {",
+ " field inmemory type string {",
+ " indexing: attribute | summary",
+ " }",
+ " field ondisk type string {",
+ " indexing: index | summary",
+ " }",
+ " }",
+ " document-summary foobar {",
+ " summary foo1 type string { source: inmemory }",
+ " summary foo2 type string { source: ondisk }",
+ " from-disk",
+ " }",
+ "}");
DeployLoggerStub logger = new DeployLoggerStub();
SearchBuilder.createFromString(sd, logger);
assertTrue(logger.entries.isEmpty());
@@ -106,31 +94,30 @@ public class SummaryTestCase {
@Test
public void testStructMemorySummary() throws ParseException {
- String sd =
- "search structmemorysummary {\n" +
- " document structmemorysummary {\n" +
- " struct elem {\n" +
- " field name type string {}\n" +
- " field weight type int {}\n" +
- " }\n" +
- " field elem_array type array<elem> {\n" +
- " indexing: summary\n" +
- " struct-field name {\n" +
- " indexing: attribute\n" +
- " }\n" +
- " struct-field weight {\n" +
- " indexing: attribute\n" +
- " }\n" +
- " }\n" +
- " }\n" +
- " document-summary filtered {\n" +
- " summary elem_array_filtered type array<elem> {\n" +
- " source: elem_array\n" +
- " matched-elements-only\n" +
- " }\n" +
- " }\n" +
- "\n" +
- "}";
+ String sd = joinLines(
+ "search structmemorysummary {",
+ " document structmemorysummary {",
+ " struct elem {",
+ " field name type string {}",
+ " field weight type int {}",
+ " }",
+ " field elem_array type array<elem> {",
+ " indexing: summary",
+ " struct-field name {",
+ " indexing: attribute",
+ " }",
+ " struct-field weight {",
+ " indexing: attribute",
+ " }",
+ " }",
+ " }",
+ " document-summary filtered {",
+ " summary elem_array_filtered type array<elem> {",
+ " source: elem_array",
+ " matched-elements-only",
+ " }",
+ " }",
+ "}");
DeployLoggerStub logger = new DeployLoggerStub();
SearchBuilder.createFromString(sd, logger);
assertTrue(logger.entries.isEmpty());
@@ -138,40 +125,35 @@ public class SummaryTestCase {
@Test
public void testInheritance() throws Exception {
- String sd =
- "search music {\n" +
- "\n" +
- " document music {\n" +
- " field title type string {\n" +
- " indexing: summary | attribute | index\n" +
- " }\n" +
- " \n" +
- " field artist type string {\n" +
- " indexing: summary | attribute | index\n" +
- " }\n" +
- " \n" +
- " field album type string {\n" +
- " indexing: summary | attribute | index\n" +
- " }\n" +
- " }\n" +
- " \n" +
- " document-summary title {\n" +
- " summary title type string {\n" +
- " source: title\n" +
- " }\n" +
- " }\n" +
- " document-summary title_artist inherits title {\n" +
- " summary artist type string {\n" +
- " source: artist\n" +
- " }\n" +
- " }\n" +
- " document-summary everything inherits title_artist {\n" +
- " summary album type string {\n" +
- " source: album\n" +
- " }\n" +
- " }\n" +
- "\n" +
- "}";
+ String sd = joinLines(
+ "search music {",
+ " document music {",
+ " field title type string {",
+ " indexing: summary | attribute | index",
+ " }",
+ " field artist type string {",
+ " indexing: summary | attribute | index",
+ " }",
+ " field album type string {",
+ " indexing: summary | attribute | index",
+ " }",
+ " }",
+ " document-summary title {",
+ " summary title type string {",
+ " source: title",
+ " }",
+ " }",
+ " document-summary title_artist inherits title {",
+ " summary artist type string {",
+ " source: artist",
+ " }",
+ " }",
+ " document-summary everything inherits title_artist {",
+ " summary album type string {",
+ " source: album",
+ " }",
+ " }",
+ "}");
var logger = new DeployLoggerStub();
var search = SearchBuilder.createFromString(sd, logger).getSearch();
assertEquals(List.of(), logger.entries);
@@ -202,30 +184,27 @@ public class SummaryTestCase {
@Test
public void testRedeclaringInheritedFieldFails() throws Exception {
- String sd =
- "search music {\n" +
- "\n" +
- " document music {\n" +
- " field title type string {\n" +
- " indexing: summary | attribute | index\n" +
- " }\n" +
- " field title_short type string {\n" +
- " indexing: summary | attribute | index\n" +
- " }\n" +
- " }\n" +
- " \n" +
- " document-summary title {\n" +
- " summary title type string {\n" +
- " source: title\n" +
- " }\n" +
- " }\n" +
- " document-summary title2 inherits title {\n" +
- " summary title type string {\n" +
- " source: title_short\n" +
- " }\n" +
- " }\n" +
- " \n" +
- "}";
+ String sd = joinLines(
+ "search music {",
+ " document music {",
+ " field title type string {",
+ " indexing: summary | attribute | index",
+ " }",
+ " field title_short type string {",
+ " indexing: summary | attribute | index",
+ " }",
+ " }",
+ " document-summary title {",
+ " summary title type string {",
+ " source: title",
+ " }",
+ " }",
+ " document-summary title2 inherits title {",
+ " summary title type string {",
+ " source: title_short",
+ " }",
+ " }",
+ "}");
var logger = new DeployLoggerStub();
try {
SearchBuilder.createFromString(sd, logger);
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/derived/SummaryTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/derived/SummaryTestCase.java
index afbc9f52f6b..bfc738a4f87 100644
--- a/config-model/src/test/java/com/yahoo/searchdefinition/derived/SummaryTestCase.java
+++ b/config-model/src/test/java/com/yahoo/searchdefinition/derived/SummaryTestCase.java
@@ -23,6 +23,37 @@ import static org.junit.Assert.assertNull;
public class SummaryTestCase extends SchemaTestCase {
@Test
+ public void deriveRawAsBase64() throws ParseException {
+ String sd = joinLines(
+ "schema s {",
+ " raw-as-base64-in-summary",
+ " document s {",
+ " field raw_field type raw {",
+ " indexing: summary",
+ " }",
+ " }",
+ "}");
+ Search search = SearchBuilder.createFromString(sd).getSearch();
+ SummaryClass summary = new SummaryClass(search, search.getSummary("default"), new BaseDeployLogger());
+ assertEquals(SummaryClassField.Type.RAW, summary.getField("raw_field").getType());
+ }
+
+ @Test
+ public void deriveRawAsLegacy() throws ParseException {
+ String sd = joinLines(
+ "schema s {",
+ " document s {",
+ " field raw_field type raw {",
+ " indexing: summary",
+ " }",
+ " }",
+ "}");
+ Search search = SearchBuilder.createFromString(sd).getSearch();
+ SummaryClass summary = new SummaryClass(search, search.getSummary("default"), new BaseDeployLogger());
+ assertEquals(SummaryClassField.Type.DATA, summary.getField("raw_field").getType());
+ }
+
+ @Test
public void testDeriving() throws IOException, ParseException {
Search search = SearchBuilder.buildFromFile("src/test/examples/simple.sd");
SummaryClass summary = new SummaryClass(search, search.getSummary("default"), new BaseDeployLogger());
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionTypeResolverTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionTypeResolverTestCase.java
index b149dafab95..12fe7e151c0 100644
--- a/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionTypeResolverTestCase.java
+++ b/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionTypeResolverTestCase.java
@@ -57,6 +57,56 @@ public class RankingExpressionTypeResolverTestCase {
}
}
+
+ @Test
+ public void tensorFirstPhaseFromConstantMustProduceDouble() throws Exception {
+ try {
+ SearchBuilder builder = new SearchBuilder();
+ builder.importString(joinLines(
+ "search test {",
+ " document test { ",
+ " field a type tensor(d0[3]) {",
+ " indexing: attribute",
+ " }",
+ " }",
+ " rank-profile my_rank_profile {",
+ " function my_func() {",
+ " expression: x_tensor*2.0",
+ " }",
+ " function inline other_func() {",
+ " expression: z_tensor+3.0",
+ " }",
+ " first-phase {",
+ " expression: reduce(attribute(a),sum,d0)+y_tensor+my_func+other_func",
+ " }",
+ " constants {",
+ " x_tensor {",
+ " type: tensor(x{})",
+ " value: { {x:bar}:17 }",
+ " }",
+ " y_tensor {",
+ " type: tensor(y{})",
+ " value: { {y:foo}:42 }",
+ " }",
+ " z_tensor {",
+ " type: tensor(z{})",
+ " value: { {z:qux}:666 }",
+ " }",
+ " }",
+ " }",
+ "}"
+ ));
+ builder.build();
+ fail("Expected exception");
+ }
+ catch (IllegalArgumentException expected) {
+ assertEquals("In search definition 'test', rank profile 'my_rank_profile': The first-phase expression must produce a double (a tensor with no dimensions), but produces tensor(x{},y{},z{})",
+ Exceptions.toMessageString(expected));
+ }
+ }
+
+
+
@Test
public void tensorSecondPhaseMustProduceDouble() throws Exception {
try {
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionWithTensorTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionWithTensorTestCase.java
index a1231a1418b..10ba6eff169 100644
--- a/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionWithTensorTestCase.java
+++ b/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionWithTensorTestCase.java
@@ -33,6 +33,26 @@ public class RankingExpressionWithTensorTestCase {
}
@Test
+ public void requireConstantTensorCanBeReferredViaConstantFeature() throws ParseException {
+ RankProfileSearchFixture f = new RankProfileSearchFixture(
+ " rank-profile my_profile {\n" +
+ " first-phase {\n" +
+ " expression: sum(constant(my_tensor))\n" +
+ " }\n" +
+ " constants {\n" +
+ " my_tensor {\n" +
+ " value: { {x:1,y:2}:1, {x:2,y:1}:2 }\n" +
+ " type: tensor(x{},y{})\n" +
+ " }\n" +
+ " }\n" +
+ " }");
+ f.compileRankProfile("my_profile");
+ f.assertFirstPhaseExpression("reduce(constant(my_tensor), sum)", "my_profile");
+ f.assertRankProperty("tensor(x{},y{}):{{x:1,y:2}:1.0,{x:2,y:1}:2.0}", "constant(my_tensor).value", "my_profile");
+ f.assertRankProperty("tensor(x{},y{})", "constant(my_tensor).type", "my_profile");
+ }
+
+ @Test
public void requireThatMultiLineConstantTensorAndTypeCanBeParsed() throws ParseException {
RankProfileSearchFixture f = new RankProfileSearchFixture(
" rank-profile my_profile {\n" +
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/admin/ClusterControllerTestCase.java b/config-model/src/test/java/com/yahoo/vespa/model/admin/ClusterControllerTestCase.java
index 989ae87913d..cf142dae2c7 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/admin/ClusterControllerTestCase.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/admin/ClusterControllerTestCase.java
@@ -13,7 +13,6 @@ import com.yahoo.config.model.api.Reindexing;
import com.yahoo.config.model.application.provider.SimpleApplicationValidator;
import com.yahoo.config.model.builder.xml.test.DomBuilderTest;
import com.yahoo.config.model.deploy.DeployState;
-import com.yahoo.config.model.deploy.TestProperties;
import com.yahoo.config.model.test.MockApplicationPackage;
import com.yahoo.config.model.test.TestDriver;
import com.yahoo.config.model.test.TestRoot;
@@ -393,7 +392,7 @@ public class ClusterControllerTestCase extends DomBuilderTest {
model.getConfig(qrBuilder, "admin/cluster-controllers/0/components/clustercontroller-bar-configurer");
QrStartConfig qrStartConfig = new QrStartConfig(qrBuilder);
assertEquals(32, qrStartConfig.jvm().minHeapsize());
- assertEquals(256, qrStartConfig.jvm().heapsize());
+ assertEquals(128, qrStartConfig.jvm().heapsize());
assertEquals(0, qrStartConfig.jvm().heapSizeAsPercentageOfPhysicalMemory());
assertEquals(2, qrStartConfig.jvm().availableProcessors());
assertFalse(qrStartConfig.jvm().verbosegc());
@@ -407,44 +406,6 @@ public class ClusterControllerTestCase extends DomBuilderTest {
}
@Test
- public void testQrStartConfigWithFeatureFlagForMaxHeap() throws Exception {
- String xml = "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" +
- "<services>\n" +
- "\n" +
- " <admin version=\"2.0\">\n" +
- " <adminserver hostalias=\"configserver\" />\n" +
- " <logserver hostalias=\"logserver\" />\n" +
- " <slobroks>\n" +
- " <slobrok hostalias=\"configserver\" />\n" +
- " <slobrok hostalias=\"logserver\" />\n" +
- " </slobroks>\n" +
- " </admin>\n" +
- " <content version='1.0' id='bar'>" +
- " <redundancy>1</redundancy>\n" +
- " <documents>" +
- " <document type=\"type1\" mode=\"store-only\"/>\n" +
- " </documents>\n" +
- " <group>" +
- " <node hostalias='node0' distribution-key='0' />" +
- " </group>" +
- " </content>" +
- "\n" +
- "</services>";
-
- VespaModel model = createVespaModel(xml, new DeployState.Builder().properties(new TestProperties().clusterControllerMaxHeapSizeInMb(256)));
- assertTrue(model.getService("admin/cluster-controllers/0").isPresent());
-
- QrStartConfig.Builder qrBuilder = new QrStartConfig.Builder();
- model.getConfig(qrBuilder, "admin/cluster-controllers/0/components/clustercontroller-bar-configurer");
- QrStartConfig qrStartConfig = new QrStartConfig(qrBuilder);
- // Taken from ContainerCluster
- assertEquals(32, qrStartConfig.jvm().minHeapsize());
- // Overridden values from ClusterControllerContainerCluster
- assertEquals(256, qrStartConfig.jvm().heapsize());
- assertFalse(qrStartConfig.jvm().verbosegc());
- }
-
- @Test
public void testUnconfiguredNoContent() throws Exception {
String xml = "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" +
"<services>\n" +
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/admin/DedicatedAdminV4Test.java b/config-model/src/test/java/com/yahoo/vespa/model/admin/DedicatedAdminV4Test.java
index f1f794c5057..60672c7df07 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/admin/DedicatedAdminV4Test.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/admin/DedicatedAdminV4Test.java
@@ -147,7 +147,7 @@ public class DedicatedAdminV4Test {
" <admin version='4.0'>" +
" <slobroks><nodes count='2' dedicated='true'/></slobroks>" +
" <logservers><nodes count='1' dedicated='true'/></logservers>" +
- " <logforwarding>" +
+ " <logforwarding include-admin='true'>" +
" <splunk deployment-server='foo:123' client-name='foocli' phone-home-interval='900'/>" +
" </logforwarding>" +
" </admin>" +
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/QuotaValidatorTest.java b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/QuotaValidatorTest.java
index d92ace2939a..e99a92b530a 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/QuotaValidatorTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/QuotaValidatorTest.java
@@ -26,7 +26,7 @@ public class QuotaValidatorTest {
@Test
public void test_deploy_under_quota() {
var tester = new ValidationTester(8, false, new TestProperties().setHostedVespa(true).setQuota(quota).setZone(publicZone));
- tester.deploy(null, getServices("testCluster", 5), Environment.prod, null);
+ tester.deploy(null, getServices("testCluster", 4), Environment.prod, null);
}
@Test
@@ -54,7 +54,7 @@ public class QuotaValidatorTest {
@Test
public void test_deploy_above_quota_budget_in_publiccd() {
- var tester = new ValidationTester(13, false, new TestProperties().setHostedVespa(true).setQuota(quota).setZone(publicCdZone));
+ var tester = new ValidationTester(13, false, new TestProperties().setHostedVespa(true).setQuota(quota.withBudget(BigDecimal.ONE)).setZone(publicCdZone));
try {
tester.deploy(null, getServices("testCluster", 10), Environment.prod, null);
fail();
@@ -65,6 +65,19 @@ public class QuotaValidatorTest {
}
@Test
+ public void test_deploy_max_resources_above_quota() {
+ var tester = new ValidationTester(13, false, new TestProperties().setHostedVespa(true).setQuota(quota).setZone(publicCdZone));
+ try {
+ tester.deploy(null, getServices("testCluster", 10), Environment.prod, null);
+ fail();
+ } catch (RuntimeException e) {
+ assertEquals("publiccd: Please free up some capacity! This deployment's quota use ($-.--) exceeds reserved quota ($-.--)!",
+ ValidationTester.censorNumbers(e.getMessage()));
+
+ }
+ }
+
+ @Test
public void test_deploy_with_negative_budget() {
var quota = Quota.unlimited().withBudget(BigDecimal.valueOf(-1));
var tester = new ValidationTester(13, false, new TestProperties().setHostedVespa(true).setQuota(quota).setZone(publicZone));
@@ -88,7 +101,7 @@ public class QuotaValidatorTest {
" <document type='music' mode='index'/>" +
" </documents>" +
" <nodes count='" + nodeCount + "'>" +
- " <resources vcpu=\"[0.5, 1]\" memory=\"[1Gb, 3Gb]\" disk=\"[1Gb, 9Gb]\"/>\n" +
+ " <resources vcpu=\"[0.5, 2]\" memory=\"[1Gb, 6Gb]\" disk=\"[1Gb, 18Gb]\"/>\n" +
" </nodes>" +
" </content>" +
"</services>";
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/IndexingScriptChangeValidatorTest.java b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/IndexingScriptChangeValidatorTest.java
index 20f5a9c841c..b1fda081b64 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/IndexingScriptChangeValidatorTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/IndexingScriptChangeValidatorTest.java
@@ -2,14 +2,12 @@
package com.yahoo.vespa.model.application.validation.change.search;
import com.yahoo.config.application.api.ValidationId;
-import com.yahoo.config.application.api.ValidationOverrides;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.vespa.indexinglanguage.expressions.ScriptExpression;
import com.yahoo.vespa.model.application.validation.change.VespaConfigChangeAction;
import com.yahoo.vespa.model.application.validation.change.VespaReindexAction;
import org.junit.Test;
-import java.time.Instant;
import java.util.Arrays;
import java.util.List;
@@ -130,7 +128,7 @@ public class IndexingScriptChangeValidatorTest {
}
@Test
- public void requireThatAddingIndexFieldIsOk() throws Exception {
+ public void requireThatAddingDocumentIndexFieldIsOk() throws Exception {
new Fixture("", "field f1 type string { indexing: index | summary }").
assertValidation();
}
@@ -142,12 +140,22 @@ public class IndexingScriptChangeValidatorTest {
}
@Test
- public void requireThatAddingFieldIsOk() throws Exception {
+ public void requireThatAddingDocumentFieldIsOk() throws Exception {
new Fixture("", FIELD + " { indexing: attribute | summary }").
assertValidation();
}
@Test
+ public void requireThatAddingExtraFieldRequiresReindexing() throws Exception {
+ new Fixture(" field f1 type string { indexing: index }",
+ " field f1 type string { indexing: index } } " +
+ " field f2 type string { indexing: input f1 | summary ")
+ .assertValidation(VespaReindexAction.of(ClusterSpec.Id.from("test"),
+ null,
+ "Non-document field 'f2' added; this may be populated by reindexing"));
+ }
+
+ @Test
public void requireThatAddingSummaryAspectIsOk() throws Exception {
new Fixture(FIELD + " { indexing: attribute }",
FIELD + " { indexing: attribute | summary }").
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/builder/xml/dom/ContentBuilderTest.java b/config-model/src/test/java/com/yahoo/vespa/model/builder/xml/dom/ContentBuilderTest.java
index 3a3dde0cf87..ad4603e5c6b 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/builder/xml/dom/ContentBuilderTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/builder/xml/dom/ContentBuilderTest.java
@@ -824,55 +824,22 @@ public class ContentBuilderTest extends DomBuilderTest {
verifyThatFeatureFlagControlsVisibilityDelayDefault(0.6, 0.6);
}
- private void verifyThatFeatureFlagControlsUseBucketExecutorForLidSpaceCompact(boolean flag) {
- DeployState.Builder deployStateBuilder = new DeployState.Builder().properties(new TestProperties().useBucketExecutorForLidSpaceCompact(flag));
+ private void verifyThatFeatureFlagControlsUseBucketExecutorForPruneRemoved(boolean flag) {
+ DeployState.Builder deployStateBuilder = new DeployState.Builder().properties(new TestProperties().useBucketExecutorForPruneRemoved(flag));
VespaModel model = new VespaModelCreatorWithMockPkg(new MockApplicationPackage.Builder()
.withServices(singleNodeContentXml())
.withSearchDefinition(MockApplicationPackage.MUSIC_SEARCHDEFINITION)
.build())
.create(deployStateBuilder);
ProtonConfig config = getProtonConfig(model.getContentClusters().values().iterator().next());
- assertEquals(flag, config.lidspacecompaction().usebucketexecutor());
+ assertEquals(flag, config.pruneremoveddocuments().usebucketexecutor());
}
- private void verifyThatFeatureFlagControlsUseBucketExecutorForBucketMove(boolean flag) {
- DeployState.Builder deployStateBuilder = new DeployState.Builder().properties(new TestProperties().useBucketExecutorForBucketMove(flag));
- VespaModel model = new VespaModelCreatorWithMockPkg(new MockApplicationPackage.Builder()
- .withServices(singleNodeContentXml())
- .withSearchDefinition(MockApplicationPackage.MUSIC_SEARCHDEFINITION)
- .build())
- .create(deployStateBuilder);
- ProtonConfig config = getProtonConfig(model.getContentClusters().values().iterator().next());
- assertEquals(flag, config.bucketmove().usebucketexecutor());
- }
-
- private void verifyThatFeatureFlagControlsMaxpendingMoveOps(int moveOps) {
- DeployState.Builder deployStateBuilder = new DeployState.Builder().properties(new TestProperties().setMaxPendingMoveOps(moveOps));
- VespaModel model = new VespaModelCreatorWithMockPkg(new MockApplicationPackage.Builder()
- .withServices(singleNodeContentXml())
- .withSearchDefinition(MockApplicationPackage.MUSIC_SEARCHDEFINITION)
- .build())
- .create(deployStateBuilder);
- ProtonConfig config = getProtonConfig(model.getContentClusters().values().iterator().next());
- assertEquals(moveOps, config.maintenancejobs().maxoutstandingmoveops());
- }
-
- @Test
- public void verifyMaxPendingMoveOps() {
- verifyThatFeatureFlagControlsMaxpendingMoveOps(13);
- verifyThatFeatureFlagControlsMaxpendingMoveOps(107);
- }
-
- @Test
- public void verifyUseBucketExecutorForLidSpaceCompact() {
- verifyThatFeatureFlagControlsUseBucketExecutorForLidSpaceCompact(true);
- verifyThatFeatureFlagControlsUseBucketExecutorForLidSpaceCompact(false);
- }
@Test
- public void verifyUseBucketExecutorForBucketMove() {
- verifyThatFeatureFlagControlsUseBucketExecutorForBucketMove(true);
- verifyThatFeatureFlagControlsUseBucketExecutorForBucketMove(false);
+ public void verifyUseBucketExecutorForPruneRemoved() {
+ verifyThatFeatureFlagControlsUseBucketExecutorForPruneRemoved(true);
+ verifyThatFeatureFlagControlsUseBucketExecutorForPruneRemoved(false);
}
@Test
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/ContainerClusterTest.java b/config-model/src/test/java/com/yahoo/vespa/model/container/ContainerClusterTest.java
index 46026a5528c..ee9c9ccb681 100755
--- a/config-model/src/test/java/com/yahoo/vespa/model/container/ContainerClusterTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/container/ContainerClusterTest.java
@@ -175,7 +175,7 @@ public class ContainerClusterTest {
cluster.getConfig(qrBuilder);
QrStartConfig qrStartConfig = new QrStartConfig(qrBuilder);
assertEquals(32, qrStartConfig.jvm().minHeapsize());
- assertEquals(256, qrStartConfig.jvm().heapsize());
+ assertEquals(128, qrStartConfig.jvm().heapsize());
assertEquals(32, qrStartConfig.jvm().compressedClassSpaceSize());
assertEquals(0, qrStartConfig.jvm().heapSizeAsPercentageOfPhysicalMemory());
root.freezeModelTopology();
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/content/ContentClusterTest.java b/config-model/src/test/java/com/yahoo/vespa/model/content/ContentClusterTest.java
index 4aadc0e3f05..953c42243a6 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/content/ContentClusterTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/content/ContentClusterTest.java
@@ -1005,22 +1005,6 @@ public class ContentClusterTest extends ContentBaseTest {
assertTrue(resolveThreePhaseUpdateConfigWithFeatureFlag(true));
}
- private double resolveMaxDeadBytesRatio(double maxDeadBytesRatio) {
- VespaModel model = createEnd2EndOneNode(new TestProperties().maxDeadBytesRatio(maxDeadBytesRatio));
- ContentCluster cc = model.getContentClusters().get("storage");
- ProtonConfig.Builder protonBuilder = new ProtonConfig.Builder();
- cc.getSearch().getConfig(protonBuilder);
- ProtonConfig protonConfig = new ProtonConfig(protonBuilder);
- assertEquals(1, protonConfig.documentdb().size());
- return protonConfig.documentdb(0).allocation().max_dead_bytes_ratio();
- }
-
- @Test
- public void default_max_dead_bytes_ratio_config_controlled_by_properties() {
- assertEquals(0.2, resolveMaxDeadBytesRatio(0.2), 1e-5);
- assertEquals(0.1, resolveMaxDeadBytesRatio(0.1), 1e-5);
- }
-
void assertZookeeperServerImplementation(String expectedClassName,
ClusterControllerContainerCluster clusterControllerCluster) {
for (ClusterControllerContainer c : clusterControllerCluster.getContainers()) {
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/ml/ModelEvaluationTest.java b/config-model/src/test/java/com/yahoo/vespa/model/ml/ModelEvaluationTest.java
index fc6a4ee2783..d0196ace766 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/ml/ModelEvaluationTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/ml/ModelEvaluationTest.java
@@ -15,6 +15,7 @@ import com.yahoo.path.Path;
import com.yahoo.tensor.Tensor;
import com.yahoo.tensor.TensorType;
import com.yahoo.vespa.config.search.RankProfilesConfig;
+import com.yahoo.vespa.config.search.core.OnnxModelsConfig;
import com.yahoo.vespa.config.search.core.RankingConstantsConfig;
import com.yahoo.vespa.model.VespaModel;
import com.yahoo.vespa.model.container.ApplicationContainerCluster;
@@ -95,6 +96,10 @@ public class ModelEvaluationTest {
cluster.getConfig(cb);
RankingConstantsConfig constantsConfig = new RankingConstantsConfig(cb);
+ OnnxModelsConfig.Builder ob = new OnnxModelsConfig.Builder();
+ cluster.getConfig(ob);
+ OnnxModelsConfig onnxModelsConfig = new OnnxModelsConfig(ob);
+
assertEquals(4, config.rankprofile().size());
Set<String> modelNames = config.rankprofile().stream().map(v -> v.name()).collect(Collectors.toSet());
assertTrue(modelNames.contains("xgboost_2_2"));
@@ -109,7 +114,7 @@ public class ModelEvaluationTest {
assertEquals(profile, sb.toString());
ModelsEvaluator evaluator = new ModelsEvaluator(new ToleratingMissingConstantFilesRankProfilesConfigImporter(MockFileAcquirer.returnFile(null))
- .importFrom(config, constantsConfig));
+ .importFrom(config, constantsConfig, onnxModelsConfig));
assertEquals(4, evaluator.models().size());
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/test/VespaModelTester.java b/config-model/src/test/java/com/yahoo/vespa/model/test/VespaModelTester.java
index b72ae088484..ba975e52d1a 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/test/VespaModelTester.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/test/VespaModelTester.java
@@ -52,6 +52,7 @@ public class VespaModelTester {
private final Map<NodeResources, Collection<Host>> hostsByResources = new HashMap<>();
private ApplicationId applicationId = ApplicationId.defaultId();
private boolean useDedicatedNodeForLogserver = false;
+ private HostProvisioner provisioner;
public VespaModelTester() {
this(new NullConfigModelRegistry());
@@ -61,6 +62,12 @@ public class VespaModelTester {
this.configModelRegistry = configModelRegistry;
}
+ public HostProvisioner provisioner() {
+ if (provisioner instanceof ProvisionerAdapter)
+ return ((ProvisionerAdapter)provisioner).provisioner();
+ return provisioner;
+ }
+
/** Adds some nodes with resources 1, 3, 10 */
public Hosts addHosts(int count) { return addHosts(InMemoryProvisioner.defaultResources, count); }
@@ -108,37 +115,43 @@ public class VespaModelTester {
/** Creates a model which uses 0 as start index */
public VespaModel createModel(String services, boolean failOnOutOfCapacity, String ... retiredHostNames) {
- return createModel(Zone.defaultZone(), services, failOnOutOfCapacity, false, 0,
+ return createModel(Zone.defaultZone(), services, failOnOutOfCapacity, false, false, 0,
Optional.empty(), new DeployState.Builder(), retiredHostNames);
}
/** Creates a model which uses 0 as start index */
public VespaModel createModel(String services, boolean failOnOutOfCapacity, DeployState.Builder builder) {
- return createModel(Zone.defaultZone(), services, failOnOutOfCapacity, false, 0, Optional.empty(), builder);
+ return createModel(Zone.defaultZone(), services, failOnOutOfCapacity, false, false, 0, Optional.empty(), builder);
}
/** Creates a model which uses 0 as start index */
public VespaModel createModel(String services, boolean failOnOutOfCapacity, boolean useMaxResources, String ... retiredHostNames) {
- return createModel(Zone.defaultZone(), services, failOnOutOfCapacity, useMaxResources, 0,
+ return createModel(Zone.defaultZone(), services, failOnOutOfCapacity, useMaxResources, false, 0,
+ Optional.empty(), new DeployState.Builder(), retiredHostNames);
+ }
+
+ /** Creates a model which uses 0 as start index */
+ public VespaModel createModel(String services, boolean failOnOutOfCapacity, boolean useMaxResources, boolean alwaysReturnOneNode, String ... retiredHostNames) {
+ return createModel(Zone.defaultZone(), services, failOnOutOfCapacity, useMaxResources, alwaysReturnOneNode, 0,
Optional.empty(), new DeployState.Builder(), retiredHostNames);
}
/** Creates a model which uses 0 as start index */
public VespaModel createModel(String services, boolean failOnOutOfCapacity, int startIndexForClusters, String ... retiredHostNames) {
- return createModel(Zone.defaultZone(), services, failOnOutOfCapacity, false, startIndexForClusters,
+ return createModel(Zone.defaultZone(), services, failOnOutOfCapacity, false, false, startIndexForClusters,
Optional.empty(), new DeployState.Builder(), retiredHostNames);
}
/** Creates a model which uses 0 as start index */
public VespaModel createModel(Zone zone, String services, boolean failOnOutOfCapacity, String ... retiredHostNames) {
- return createModel(zone, services, failOnOutOfCapacity, false, 0,
+ return createModel(zone, services, failOnOutOfCapacity, false, false, 0,
Optional.empty(), new DeployState.Builder(), retiredHostNames);
}
/** Creates a model which uses 0 as start index */
public VespaModel createModel(Zone zone, String services, boolean failOnOutOfCapacity,
DeployState.Builder deployStateBuilder, String ... retiredHostNames) {
- return createModel(zone, services, failOnOutOfCapacity, false, 0,
+ return createModel(zone, services, failOnOutOfCapacity, false, false, 0,
Optional.empty(), deployStateBuilder, retiredHostNames);
}
@@ -152,15 +165,16 @@ public class VespaModelTester {
* @return the resulting model
*/
public VespaModel createModel(Zone zone, String services, boolean failOnOutOfCapacity, boolean useMaxResources,
+ boolean alwaysReturnOneNode,
int startIndexForClusters, Optional<VespaModel> previousModel,
DeployState.Builder deployStatebuilder, String ... retiredHostNames) {
VespaModelCreatorWithMockPkg modelCreatorWithMockPkg = new VespaModelCreatorWithMockPkg(null, services, ApplicationPackageUtils.generateSearchDefinition("type1"));
ApplicationPackage appPkg = modelCreatorWithMockPkg.appPkg;
- HostProvisioner provisioner = hosted ?
- new ProvisionerAdapter(new InMemoryProvisioner(hostsByResources,
+ provisioner = hosted ? new ProvisionerAdapter(new InMemoryProvisioner(hostsByResources,
failOnOutOfCapacity,
useMaxResources,
+ alwaysReturnOneNode,
false,
startIndexForClusters,
retiredHostNames)) :
@@ -184,12 +198,14 @@ public class VespaModelTester {
/** To verify that we don't call allocateHost(alias) in hosted environments */
private static class ProvisionerAdapter implements HostProvisioner {
- private final HostProvisioner provisioner;
+ private final InMemoryProvisioner provisioner;
- public ProvisionerAdapter(HostProvisioner provisioner) {
+ public ProvisionerAdapter(InMemoryProvisioner provisioner) {
this.provisioner = provisioner;
}
+ public InMemoryProvisioner provisioner() { return provisioner; }
+
@Override
public HostSpec allocateHost(String alias) {
throw new UnsupportedOperationException("Allocating hosts using <node> tags is not supported in hosted environments, " +
diff --git a/config/src/apps/vespa-configproxy-cmd/main.cpp b/config/src/apps/vespa-configproxy-cmd/main.cpp
index bb908e7268b..967bfee4ba9 100644
--- a/config/src/apps/vespa-configproxy-cmd/main.cpp
+++ b/config/src/apps/vespa-configproxy-cmd/main.cpp
@@ -19,7 +19,7 @@ public:
bool
Application::parseOpts()
{
- char c = '?';
+ int c = '?';
const char *optArg = NULL;
int optInd = 0;
while ((c = GetOpt("m:s:p:h", optArg, optInd)) != -1) {
diff --git a/config/src/apps/vespa-get-config/getconfig.cpp b/config/src/apps/vespa-get-config/getconfig.cpp
index e8ef1765473..273a3abd1cd 100644
--- a/config/src/apps/vespa-get-config/getconfig.cpp
+++ b/config/src/apps/vespa-get-config/getconfig.cpp
@@ -91,7 +91,7 @@ int
GetConfig::Main()
{
bool debugging = false;
- char c = -1;
+ int c = -1;
std::vector<vespalib::string> defSchema;
const char *schema = nullptr;
diff --git a/config/src/apps/vespa-ping-configproxy/pingproxy.cpp b/config/src/apps/vespa-ping-configproxy/pingproxy.cpp
index a47fd25f9af..787681cc670 100644
--- a/config/src/apps/vespa-ping-configproxy/pingproxy.cpp
+++ b/config/src/apps/vespa-ping-configproxy/pingproxy.cpp
@@ -70,7 +70,7 @@ PingProxy::Main()
{
int retval = 0;
bool debugging = false;
- char c = -1;
+ int c = -1;
const char *serverHost = "localhost";
int clientTimeout = 5;
diff --git a/config/src/main/java/com/yahoo/config/subscription/impl/JRTConfigRequester.java b/config/src/main/java/com/yahoo/config/subscription/impl/JRTConfigRequester.java
index efb93c6aed2..d3562a47ea1 100644
--- a/config/src/main/java/com/yahoo/config/subscription/impl/JRTConfigRequester.java
+++ b/config/src/main/java/com/yahoo/config/subscription/impl/JRTConfigRequester.java
@@ -44,8 +44,7 @@ public class JRTConfigRequester implements RequestWaiter {
private static final JRTManagedConnectionPools managedPool = new JRTManagedConnectionPools();
private static final int TRACELEVEL = 6;
private final TimingValues timingValues;
- private int fatalFailures = 0; // independent of transientFailures
- private int transientFailures = 0; // independent of fatalFailures
+ private boolean fatalFailures = false;
private final ScheduledThreadPoolExecutor scheduler;
private Instant noApplicationWarningLogged = Instant.MIN;
private static final Duration delayBetweenWarnings = Duration.ofSeconds(60);
@@ -121,14 +120,15 @@ public class JRTConfigRequester implements RequestWaiter {
}
private void doHandle(JRTConfigSubscription<ConfigInstance> sub, JRTClientConfigRequest jrtReq, Connection connection) {
+ if (subscriptionIsClosed(sub)) return; // Avoid error messages etc. after closing
+
boolean validResponse = jrtReq.validateResponse();
log.log(FINE, () -> "Request callback " + (validResponse ? "valid" : "invalid") + ". Req: " + jrtReq + "\nSpec: " + connection);
- if (sub.getState() == ConfigSubscription.State.CLOSED) return; // Avoid error messages etc. after closing
Trace trace = jrtReq.getResponseTrace();
trace.trace(TRACELEVEL, "JRTConfigRequester.doHandle()");
log.log(FINEST, () -> trace.toString());
if (validResponse) {
- handleOKRequest(jrtReq, sub, connection);
+ handleOKRequest(jrtReq, sub);
} else {
logWhenErrorResponse(jrtReq, connection);
handleFailedRequest(jrtReq, sub, connection);
@@ -165,7 +165,7 @@ public class JRTConfigRequester implements RequestWaiter {
}
ErrorType errorType = ErrorType.getErrorType(jrtReq.errorCode());
connectionPool.setError(connection, jrtReq.errorCode());
- long delay = calculateFailedRequestDelay(errorType, transientFailures, fatalFailures, timingValues, configured);
+ long delay = calculateFailedRequestDelay(errorType, fatalFailures, timingValues, configured);
if (errorType == ErrorType.TRANSIENT) {
handleTransientlyFailed(jrtReq, sub, delay, connection);
} else {
@@ -173,19 +173,22 @@ public class JRTConfigRequester implements RequestWaiter {
}
}
- static long calculateFailedRequestDelay(ErrorType errorCode, int transientFailures, int fatalFailures,
- TimingValues timingValues, boolean configured) {
- long delay;
- if (configured)
- delay = timingValues.getConfiguredErrorDelay();
- else
- delay = timingValues.getUnconfiguredDelay();
+ static long calculateFailedRequestDelay(ErrorType errorType,
+ boolean fatalFailures,
+ TimingValues timingValues,
+ boolean configured) {
+ long delay = (configured ? timingValues.getConfiguredErrorDelay(): timingValues.getUnconfiguredDelay());
- if (errorCode == ErrorType.TRANSIENT) {
- delay = delay * Math.min((transientFailures + 1), timingValues.getMaxDelayMultiplier());
- } else {
- delay = timingValues.getFixedDelay() + (delay * Math.min(fatalFailures, timingValues.getMaxDelayMultiplier()));
- delay = timingValues.getPlusMinusFractionRandom(delay, randomFraction);
+ switch (errorType) {
+ case TRANSIENT:
+ delay = timingValues.getRandomTransientDelay(delay);
+ break;
+ case FATAL:
+ delay = timingValues.getFixedDelay() + (fatalFailures ? delay : 0);
+ delay = timingValues.getPlusMinusFractionRandom(delay, randomFraction);
+ break;
+ default:
+ throw new IllegalArgumentException("Unknown error type " + errorType);
}
return delay;
}
@@ -194,10 +197,9 @@ public class JRTConfigRequester implements RequestWaiter {
JRTConfigSubscription<ConfigInstance> sub,
long delay,
Connection connection) {
- transientFailures++;
+ fatalFailures = false;
log.log(INFO, "Connection to " + connection.getAddress() +
" failed or timed out, clients will keep existing config, will keep trying.");
- if (sub.getState() != ConfigSubscription.State.OPEN) return;
scheduleNextRequest(jrtReq, sub, delay, calculateErrorTimeout());
}
@@ -214,10 +216,8 @@ public class JRTConfigRequester implements RequestWaiter {
* @param sub a config subscription
* @param delay delay before sending a new request
*/
- private void handleFatallyFailed(JRTClientConfigRequest jrtReq,
- JRTConfigSubscription<ConfigInstance> sub, long delay) {
- if (sub.getState() != ConfigSubscription.State.OPEN) return;
- fatalFailures++;
+ private void handleFatallyFailed(JRTClientConfigRequest jrtReq, JRTConfigSubscription<ConfigInstance> sub, long delay) {
+ fatalFailures = true;
// The logging depends on whether we are configured or not.
Level logLevel = sub.getConfigState().getConfig() == null ? Level.FINE : Level.INFO;
String logMessage = "Request for config " + jrtReq.getShortDescription() + "' failed with error code " +
@@ -227,12 +227,8 @@ public class JRTConfigRequester implements RequestWaiter {
scheduleNextRequest(jrtReq, sub, delay, calculateErrorTimeout());
}
- private void handleOKRequest(JRTClientConfigRequest jrtReq,
- JRTConfigSubscription<ConfigInstance> sub,
- Connection connection) {
- // Reset counters pertaining to error handling here
- fatalFailures = 0;
- transientFailures = 0;
+ private void handleOKRequest(JRTClientConfigRequest jrtReq, JRTConfigSubscription<ConfigInstance> sub) {
+ fatalFailures = false;
noApplicationWarningLogged = Instant.MIN;
sub.setLastCallBackOKTS(Instant.now());
log.log(FINE, () -> "OK response received in handleOkRequest: " + jrtReq);
@@ -244,10 +240,13 @@ public class JRTConfigRequester implements RequestWaiter {
sub.setException(new ConfigurationRuntimeException("Could not put returned request on queue of subscription " + sub));
}
}
- if (sub.getState() != ConfigSubscription.State.OPEN) return;
scheduleNextRequest(jrtReq, sub, calculateSuccessDelay(), calculateSuccessTimeout());
}
+ private boolean subscriptionIsClosed(JRTConfigSubscription<ConfigInstance> sub) {
+ return sub.getState() == ConfigSubscription.State.CLOSED;
+ }
+
private long calculateSuccessTimeout() {
return timingValues.getPlusMinusFractionRandom(timingValues.getSuccessTimeout(), randomFraction);
}
@@ -302,11 +301,7 @@ public class JRTConfigRequester implements RequestWaiter {
}
}
- int getTransientFailures() {
- return transientFailures;
- }
-
- int getFatalFailures() {
+ boolean getFatalFailures() {
return fatalFailures;
}
diff --git a/config/src/main/java/com/yahoo/vespa/config/ConfigPayload.java b/config/src/main/java/com/yahoo/vespa/config/ConfigPayload.java
index 8153179e49c..94f07a52641 100644
--- a/config/src/main/java/com/yahoo/vespa/config/ConfigPayload.java
+++ b/config/src/main/java/com/yahoo/vespa/config/ConfigPayload.java
@@ -5,16 +5,18 @@ import com.yahoo.config.ConfigInstance;
import com.yahoo.config.codegen.InnerCNode;
import com.yahoo.config.subscription.ConfigInstanceSerializer;
import com.yahoo.config.subscription.ConfigInstanceUtil;
+import com.yahoo.io.Utf8ByteWriter;
import com.yahoo.slime.JsonDecoder;
import com.yahoo.slime.JsonFormat;
import com.yahoo.slime.Slime;
import com.yahoo.slime.SlimeFormat;
-import com.yahoo.text.Utf8Array;
+import com.yahoo.text.AbstractUtf8Array;
+import com.yahoo.text.Utf8PartialArray;
import com.yahoo.text.Utf8String;
-import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.OutputStream;
+import java.nio.ByteBuffer;
/**
* A config payload.
@@ -79,19 +81,20 @@ public class ConfigPayload {
return !slime.get().valid() || slime.get().children() == 0;
}
- public Utf8Array toUtf8Array(boolean compact) {
- ByteArrayOutputStream os = new ByteArrayOutputStream(10000);
+ public AbstractUtf8Array toUtf8Array(boolean compact) {
+ Utf8ByteWriter os = new Utf8ByteWriter(8192);
try {
new JsonFormat(compact).encode(os, slime);
os.close();
} catch (IOException e) {
e.printStackTrace();
}
- return new Utf8Array(os.toByteArray());
+ ByteBuffer buf = os.getBuf();
+ return new Utf8PartialArray(buf.array(), buf.arrayOffset() + buf.position(), buf.remaining());
}
- public static ConfigPayload fromUtf8Array(Utf8Array payload) {
- return new ConfigPayload(new JsonDecoder().decode(new Slime(), payload.getBytes()));
+ public static ConfigPayload fromUtf8Array(AbstractUtf8Array payload) {
+ return new ConfigPayload(new JsonDecoder().decode(new Slime(), payload.wrap()));
}
public <ConfigType extends ConfigInstance> ConfigType toInstance(Class<ConfigType> clazz, String configId) {
diff --git a/config/src/main/java/com/yahoo/vespa/config/LZ4PayloadCompressor.java b/config/src/main/java/com/yahoo/vespa/config/LZ4PayloadCompressor.java
index 688d6d54888..03e08e039ae 100644
--- a/config/src/main/java/com/yahoo/vespa/config/LZ4PayloadCompressor.java
+++ b/config/src/main/java/com/yahoo/vespa/config/LZ4PayloadCompressor.java
@@ -5,6 +5,8 @@ import com.yahoo.compress.CompressionType;
import com.yahoo.compress.Compressor;
import com.yahoo.vespa.config.util.ConfigUtils;
+import java.nio.ByteBuffer;
+
/**
* Wrapper for LZ4 compression that selects compression level based on properties.
*
@@ -24,9 +26,17 @@ public class LZ4PayloadCompressor {
public byte[] compress(byte[] input) {
return compressor.compressUnconditionally(input);
}
+ public byte[] compress(ByteBuffer input) {
+ return compressor.compressUnconditionally(input);
+ }
public byte [] decompress(byte[] input, int uncompressedLen) {
return compressor.decompressUnconditionally(input, 0, uncompressedLen);
}
+ public byte [] decompress(ByteBuffer input, int uncompressedLen) {
+ ByteBuffer uncompressed = ByteBuffer.allocate(uncompressedLen);
+ compressor.decompressUnconditionally(input, uncompressed);
+ return uncompressed.array();
+ }
}
diff --git a/config/src/main/java/com/yahoo/vespa/config/TimingValues.java b/config/src/main/java/com/yahoo/vespa/config/TimingValues.java
index 5d5967e56c4..235928a7d0b 100644
--- a/config/src/main/java/com/yahoo/vespa/config/TimingValues.java
+++ b/config/src/main/java/com/yahoo/vespa/config/TimingValues.java
@@ -1,4 +1,4 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.config;
import java.util.Random;
@@ -21,7 +21,6 @@ public class TimingValues {
private long fixedDelay = 5000;
private long unconfiguredDelay = 1000;
private long configuredErrorDelay = 15000;
- private int maxDelayMultiplier = 10;
private final Random rand;
public TimingValues() {
@@ -38,8 +37,7 @@ public class TimingValues {
long subscribeTimeout,
long unconfiguredDelay,
long configuredErrorDelay,
- long fixedDelay,
- int maxDelayMultiplier) {
+ long fixedDelay) {
this.successTimeout = successTimeout;
this.errorTimeout = errorTimeout;
this.initialTimeout = initialTimeout;
@@ -47,7 +45,6 @@ public class TimingValues {
this.unconfiguredDelay = unconfiguredDelay;
this.configuredErrorDelay = configuredErrorDelay;
this.fixedDelay = fixedDelay;
- this.maxDelayMultiplier = maxDelayMultiplier;
this.rand = new Random(System.currentTimeMillis());
}
@@ -58,7 +55,6 @@ public class TimingValues {
long unconfiguredDelay,
long configuredErrorDelay,
long fixedDelay,
- int maxDelayMultiplier,
Random rand) {
this.successTimeout = successTimeout;
this.errorTimeout = errorTimeout;
@@ -67,7 +63,6 @@ public class TimingValues {
this.unconfiguredDelay = unconfiguredDelay;
this.configuredErrorDelay = configuredErrorDelay;
this.fixedDelay = fixedDelay;
- this.maxDelayMultiplier = maxDelayMultiplier;
this.rand = rand;
}
@@ -79,7 +74,6 @@ public class TimingValues {
tv.unconfiguredDelay,
tv.configuredErrorDelay,
tv.fixedDelay,
- tv.maxDelayMultiplier,
random);
}
@@ -154,16 +148,6 @@ public class TimingValues {
}
/**
- * Returns maximum multiplier to use when calculating delay (the delay is multiplied by the number of
- * failed requests, unless that number is this maximum multiplier).
- *
- * @return timeout in milliseconds.
- */
- public int getMaxDelayMultiplier() {
- return maxDelayMultiplier;
- }
-
- /**
* Returns fixed delay that is used when retrying getting config no matter if it was a success or an error
* and independent of number of retries.
*
@@ -176,24 +160,35 @@ public class TimingValues {
/**
* Returns a number +/- a random component
*
- * @param val input
+ * @param value input
* @param fraction for instance 0.1 for +/- 10%
* @return a number
*/
- public long getPlusMinusFractionRandom(long val, float fraction) {
- return Math.round(val - (val * fraction) + (rand.nextFloat() * 2L * val * fraction));
+ public long getPlusMinusFractionRandom(long value, float fraction) {
+ return Math.round(value - (value * fraction) + (rand.nextFloat() * 2L * value * fraction));
+ }
+
+ /**
+ * Returns a number between 0 and maxValue
+ *
+ * @param maxValue max maxValue
+ * @return a number
+ */
+ public long getRandomTransientDelay(long maxValue) {
+ return Math.round(rand.nextFloat() * maxValue);
}
@Override
public String toString() {
return "TimingValues [successTimeout=" + successTimeout
- + ", errorTimeout=" + errorTimeout + ", initialTimeout="
- + initialTimeout + ", subscribeTimeout=" + subscribeTimeout
- + ", configuredErrorTimeout=" + configuredErrorTimeout
- + ", fixedDelay=" + fixedDelay + ", unconfiguredDelay="
- + unconfiguredDelay + ", configuredErrorDelay="
- + configuredErrorDelay + ", maxDelayMultiplier="
- + maxDelayMultiplier + ", rand=" + rand + "]";
+ + ", errorTimeout=" + errorTimeout
+ + ", initialTimeout=" + initialTimeout
+ + ", subscribeTimeout=" + subscribeTimeout
+ + ", configuredErrorTimeout=" + configuredErrorTimeout
+ + ", fixedDelay=" + fixedDelay
+ + ", unconfiguredDelay=" + unconfiguredDelay
+ + ", configuredErrorDelay=" + configuredErrorDelay
+ + ", rand=" + rand + "]";
}
diff --git a/config/src/main/java/com/yahoo/vespa/config/protocol/ConfigResponse.java b/config/src/main/java/com/yahoo/vespa/config/protocol/ConfigResponse.java
index 31e280e708c..f6fce56c227 100644
--- a/config/src/main/java/com/yahoo/vespa/config/protocol/ConfigResponse.java
+++ b/config/src/main/java/com/yahoo/vespa/config/protocol/ConfigResponse.java
@@ -1,7 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.config.protocol;
-import com.yahoo.text.Utf8Array;
+import com.yahoo.text.AbstractUtf8Array;
import java.io.IOException;
import java.io.OutputStream;
@@ -16,7 +16,7 @@ import java.io.OutputStream;
*/
public interface ConfigResponse {
- Utf8Array getPayload();
+ AbstractUtf8Array getPayload();
long getGeneration();
diff --git a/config/src/main/java/com/yahoo/vespa/config/protocol/JRTServerConfigRequestV3.java b/config/src/main/java/com/yahoo/vespa/config/protocol/JRTServerConfigRequestV3.java
index 53a2f4019f9..4575368cc6a 100644
--- a/config/src/main/java/com/yahoo/vespa/config/protocol/JRTServerConfigRequestV3.java
+++ b/config/src/main/java/com/yahoo/vespa/config/protocol/JRTServerConfigRequestV3.java
@@ -14,6 +14,8 @@ import com.yahoo.vespa.config.util.ConfigUtils;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
import java.util.Optional;
import java.util.logging.Logger;
@@ -99,7 +101,14 @@ public class JRTServerConfigRequestV3 implements JRTServerConfigRequest {
}
request.returnValues().add(createResponseValue(byteArrayOutputStream));
if (changedConfigAndNewGeneration) {
- request.returnValues().add(new DataValue(responsePayload.getData().getBytes()));
+ ByteBuffer buf = responsePayload.getData().wrap();
+ if (buf.hasArray() && buf.remaining() == buf.array().length) {
+ request.returnValues().add(new DataValue(buf.array()));
+ } else {
+ byte [] dst = new byte[buf.remaining()];
+ buf.get(dst);
+ request.returnValues().add(new DataValue(dst));
+ }
} else {
request.returnValues().add(new DataValue(new byte[0]));
}
diff --git a/config/src/main/java/com/yahoo/vespa/config/protocol/Payload.java b/config/src/main/java/com/yahoo/vespa/config/protocol/Payload.java
index 13f9602f70c..f0b8640c3f6 100644
--- a/config/src/main/java/com/yahoo/vespa/config/protocol/Payload.java
+++ b/config/src/main/java/com/yahoo/vespa/config/protocol/Payload.java
@@ -1,6 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.config.protocol;
+import com.yahoo.text.AbstractUtf8Array;
import com.yahoo.text.Utf8Array;
import com.yahoo.text.Utf8String;
import com.yahoo.vespa.config.ConfigPayload;
@@ -16,7 +17,7 @@ import java.util.Objects;
*/
public class Payload {
- private final Utf8Array data;
+ private final AbstractUtf8Array data;
private final CompressionInfo compressionInfo;
private final static LZ4PayloadCompressor compressor = new LZ4PayloadCompressor();
@@ -25,7 +26,7 @@ public class Payload {
this.compressionInfo = CompressionInfo.create(CompressionType.UNCOMPRESSED, data.getByteLength());
}
- private Payload(Utf8Array payload, CompressionInfo compressionInfo) {
+ private Payload(AbstractUtf8Array payload, CompressionInfo compressionInfo) {
Objects.requireNonNull(payload, "Payload");
Objects.requireNonNull(compressionInfo, "CompressionInfo");
this.data = payload;
@@ -46,26 +47,26 @@ public class Payload {
}
/** Creates an uncompressed payload from an Utf8Array */
- public static Payload from(Utf8Array payload) {
+ public static Payload from(AbstractUtf8Array payload) {
return new Payload(payload, CompressionInfo.uncompressed());
}
- public static Payload from(Utf8Array payload, CompressionInfo compressionInfo) {
+ public static Payload from(AbstractUtf8Array payload, CompressionInfo compressionInfo) {
return new Payload(payload, compressionInfo);
}
- public Utf8Array getData() { return data; }
+ public AbstractUtf8Array getData() { return data; }
/** Returns a copy of this payload where the data is compressed using the given compression */
public Payload withCompression(CompressionType requestedCompression) {
CompressionType responseCompression = compressionInfo.getCompressionType();
if (requestedCompression == CompressionType.UNCOMPRESSED && responseCompression == CompressionType.LZ4) {
- byte[] buffer = compressor.decompress(data.getBytes(), compressionInfo.getUncompressedSize());
+ byte[] buffer = compressor.decompress(data.wrap(), compressionInfo.getUncompressedSize());
Utf8Array data = new Utf8Array(buffer);
CompressionInfo info = CompressionInfo.create(CompressionType.UNCOMPRESSED, compressionInfo.getUncompressedSize());
return Payload.from(data, info);
} else if (requestedCompression == CompressionType.LZ4 && responseCompression == CompressionType.UNCOMPRESSED) {
- Utf8Array data = new Utf8Array(compressor.compress(this.data.getBytes()));
+ Utf8Array data = new Utf8Array(compressor.compress(this.data.wrap()));
CompressionInfo info = CompressionInfo.create(CompressionType.LZ4, this.data.getByteLength());
return Payload.from(data, info);
} else {
diff --git a/config/src/main/java/com/yahoo/vespa/config/protocol/SlimeConfigResponse.java b/config/src/main/java/com/yahoo/vespa/config/protocol/SlimeConfigResponse.java
index 1fec7e17d06..1ccf6e367fc 100644
--- a/config/src/main/java/com/yahoo/vespa/config/protocol/SlimeConfigResponse.java
+++ b/config/src/main/java/com/yahoo/vespa/config/protocol/SlimeConfigResponse.java
@@ -1,11 +1,12 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.config.protocol;
-import com.yahoo.text.Utf8Array;
+import com.yahoo.text.AbstractUtf8Array;
import com.yahoo.vespa.config.ConfigPayload;
import java.io.IOException;
import java.io.OutputStream;
+import java.nio.ByteBuffer;
/**
* Class for serializing config responses based on {@link com.yahoo.slime.Slime} implementing the {@link ConfigResponse} interface.
@@ -14,7 +15,7 @@ import java.io.OutputStream;
*/
public class SlimeConfigResponse implements ConfigResponse {
- private final Utf8Array payload;
+ private final AbstractUtf8Array payload;
private final CompressionInfo compressionInfo;
private final long generation;
private final boolean applyOnRestart;
@@ -22,13 +23,13 @@ public class SlimeConfigResponse implements ConfigResponse {
public static SlimeConfigResponse fromConfigPayload(ConfigPayload payload, long generation,
boolean applyOnRestart, String configMd5) {
- Utf8Array data = payload.toUtf8Array(true);
+ AbstractUtf8Array data = payload.toUtf8Array(true);
return new SlimeConfigResponse(data, generation, applyOnRestart,
configMd5,
CompressionInfo.create(CompressionType.UNCOMPRESSED, data.getByteLength()));
}
- public SlimeConfigResponse(Utf8Array payload,
+ public SlimeConfigResponse(AbstractUtf8Array payload,
long generation,
boolean applyOnRestart,
String configMd5,
@@ -41,7 +42,7 @@ public class SlimeConfigResponse implements ConfigResponse {
}
@Override
- public Utf8Array getPayload() {
+ public AbstractUtf8Array getPayload() {
return payload;
}
@@ -60,7 +61,8 @@ public class SlimeConfigResponse implements ConfigResponse {
@Override
public void serialize(OutputStream os, CompressionType type) throws IOException {
- os.write(Payload.from(payload, compressionInfo).withCompression(type).getData().getBytes());
+ ByteBuffer buf = Payload.from(payload, compressionInfo).withCompression(type).getData().wrap();
+ os.write(buf.array(), buf.arrayOffset()+buf.position(), buf.remaining());
}
@Override
diff --git a/config/src/main/java/com/yahoo/vespa/config/util/ConfigUtils.java b/config/src/main/java/com/yahoo/vespa/config/util/ConfigUtils.java
index 8f856ff4771..a7fc8afcad9 100644
--- a/config/src/main/java/com/yahoo/vespa/config/util/ConfigUtils.java
+++ b/config/src/main/java/com/yahoo/vespa/config/util/ConfigUtils.java
@@ -6,8 +6,8 @@ import com.yahoo.io.HexDump;
import com.yahoo.io.IOUtils;
import com.yahoo.net.HostName;
import com.yahoo.slime.JsonFormat;
+import com.yahoo.text.AbstractUtf8Array;
import com.yahoo.text.Utf8;
-import com.yahoo.text.Utf8Array;
import com.yahoo.vespa.config.ConfigDefinitionKey;
import com.yahoo.vespa.config.ConfigPayload;
@@ -17,6 +17,7 @@ import java.io.IOException;
import java.io.LineNumberReader;
import java.io.Reader;
import java.io.StringReader;
+import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
@@ -69,8 +70,8 @@ public class ConfigUtils {
return getMd5(input.getBytes(StandardCharsets.UTF_8));
}
- public static String getMd5(Utf8Array input) {
- return getMd5(input.getBytes());
+ public static String getMd5(AbstractUtf8Array input) {
+ return getMd5(input.wrap());
}
public static String getMd5(byte[] input) {
@@ -79,6 +80,12 @@ public class ConfigUtils {
return HexDump.toHexString(md5.digest()).toLowerCase();
}
+ public static String getMd5(ByteBuffer input) {
+ MessageDigest md5 = getMd5Instance();
+ md5.update(input);
+ return HexDump.toHexString(md5.digest()).toLowerCase();
+ }
+
private static MessageDigest getMd5Instance() {
try {
return MessageDigest.getInstance("MD5");
diff --git a/config/src/test/java/com/yahoo/config/subscription/impl/JRTConfigRequesterTest.java b/config/src/test/java/com/yahoo/config/subscription/impl/JRTConfigRequesterTest.java
index 1b56e9290b2..bf516bee8a9 100644
--- a/config/src/test/java/com/yahoo/config/subscription/impl/JRTConfigRequesterTest.java
+++ b/config/src/test/java/com/yahoo/config/subscription/impl/JRTConfigRequesterTest.java
@@ -1,9 +1,9 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.config.subscription.impl;
import com.yahoo.config.subscription.ConfigSourceSet;
-import com.yahoo.foo.SimpletypesConfig;
import com.yahoo.config.subscription.ConfigSubscriber;
+import com.yahoo.foo.SimpletypesConfig;
import com.yahoo.jrt.Request;
import com.yahoo.vespa.config.ConfigKey;
import com.yahoo.vespa.config.ConnectionPool;
@@ -17,11 +17,12 @@ import java.util.Arrays;
import java.util.List;
import java.util.Random;
-import static org.hamcrest.CoreMatchers.is;
+import static com.yahoo.config.subscription.impl.JRTConfigRequester.calculateFailedRequestDelay;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNotSame;
import static org.junit.Assert.assertSame;
-import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
/**
@@ -36,99 +37,55 @@ public class JRTConfigRequesterTest {
TimingValues timingValues = new TimingValues(defaultTimingValues, random);
// transientFailures and fatalFailures are not set until after delay has been calculated,
- // so 0 is the case for the first failure
- int transientFailures = 0;
- int fatalFailures = 0;
+ // so false is the case for the first failure
+ boolean fatalFailures = false;
boolean configured = false;
// First time failure, not configured
- long delay = JRTConfigRequester.calculateFailedRequestDelay(ErrorType.TRANSIENT,
- transientFailures, fatalFailures, timingValues, configured);
- assertThat(delay, is(timingValues.getUnconfiguredDelay()));
- transientFailures = 5;
- delay = JRTConfigRequester.calculateFailedRequestDelay(ErrorType.TRANSIENT,
- transientFailures, fatalFailures, timingValues, configured);
- assertThat(delay, is((transientFailures + 1) * timingValues.getUnconfiguredDelay()));
- transientFailures = 0;
-
-
- delay = JRTConfigRequester.calculateFailedRequestDelay(ErrorType.FATAL,
- transientFailures, fatalFailures, timingValues, configured);
- assertTrue(delay > (1 - JRTConfigRequester.randomFraction) * timingValues.getFixedDelay());
- assertTrue(delay < (1 + JRTConfigRequester.randomFraction) * timingValues.getFixedDelay());
- assertThat(delay, is(5462L));
+ long delay = calculateFailedRequestDelay(ErrorType.TRANSIENT, fatalFailures, timingValues, configured);
+ assertTransientDelay(timingValues.getUnconfiguredDelay(), delay);
+ delay = calculateFailedRequestDelay(ErrorType.TRANSIENT, fatalFailures, timingValues, configured);
+ assertTransientDelay(timingValues.getUnconfiguredDelay(), delay);
+
+
+ delay = calculateFailedRequestDelay(ErrorType.FATAL, fatalFailures, timingValues, configured);
+ assertTrue("delay=" + delay, delay > (1 - JRTConfigRequester.randomFraction) * timingValues.getFixedDelay());
+ assertTrue("delay=" + delay,delay < (1 + JRTConfigRequester.randomFraction) * timingValues.getFixedDelay());
+ assertEquals(4481, delay);
// First time failure, configured
configured = true;
+ delay = calculateFailedRequestDelay(ErrorType.TRANSIENT, fatalFailures, timingValues, configured);
+ assertTransientDelay(timingValues.getConfiguredErrorDelay(), delay);
- delay = JRTConfigRequester.calculateFailedRequestDelay(ErrorType.TRANSIENT,
- transientFailures, fatalFailures, timingValues, configured);
- assertThat(delay, is(timingValues.getConfiguredErrorDelay()));
-
- delay = JRTConfigRequester.calculateFailedRequestDelay(ErrorType.FATAL,
- transientFailures, fatalFailures, timingValues, configured);
+ delay = calculateFailedRequestDelay(ErrorType.FATAL, fatalFailures, timingValues, configured);
assertTrue(delay > (1 - JRTConfigRequester.randomFraction) * timingValues.getFixedDelay());
assertTrue(delay < (1 + JRTConfigRequester.randomFraction) * timingValues.getFixedDelay());
- assertThat(delay, is(5663L));
+ assertEquals(5275, delay);
// nth time failure, not configured
- fatalFailures = 1;
+ fatalFailures = true;
configured = false;
- delay = JRTConfigRequester.calculateFailedRequestDelay(ErrorType.TRANSIENT,
- transientFailures, fatalFailures, timingValues, configured);
- assertThat(delay, is(timingValues.getUnconfiguredDelay()));
- delay = JRTConfigRequester.calculateFailedRequestDelay(ErrorType.FATAL,
- transientFailures, fatalFailures, timingValues, configured);
+ delay = calculateFailedRequestDelay(ErrorType.TRANSIENT, fatalFailures, timingValues, configured);
+ assertTransientDelay(timingValues.getUnconfiguredDelay(), delay);
+ delay = calculateFailedRequestDelay(ErrorType.FATAL, fatalFailures, timingValues, configured);
final long l = timingValues.getFixedDelay() + timingValues.getUnconfiguredDelay();
assertTrue(delay > (1 - JRTConfigRequester.randomFraction) * l);
assertTrue(delay < (1 + JRTConfigRequester.randomFraction) * l);
- assertThat(delay, is(5377L));
+ assertEquals(6121, delay);
// nth time failure, configured
- fatalFailures = 1;
+ fatalFailures = true;
configured = true;
- delay = JRTConfigRequester.calculateFailedRequestDelay(ErrorType.TRANSIENT,
- transientFailures, fatalFailures, timingValues, configured);
- assertThat(delay, is(timingValues.getConfiguredErrorDelay()));
- delay = JRTConfigRequester.calculateFailedRequestDelay(ErrorType.FATAL,
- transientFailures, fatalFailures, timingValues, configured);
+ delay = calculateFailedRequestDelay(ErrorType.TRANSIENT, fatalFailures, timingValues, configured);
+ assertTransientDelay(timingValues.getConfiguredErrorDelay(), delay);
+ delay = calculateFailedRequestDelay(ErrorType.FATAL, fatalFailures, timingValues, configured);
final long l1 = timingValues.getFixedDelay() + timingValues.getConfiguredErrorDelay();
assertTrue(delay > (1 - JRTConfigRequester.randomFraction) * l1);
assertTrue(delay < (1 + JRTConfigRequester.randomFraction) * l1);
- assertThat(delay, is(20851L));
-
-
- // 1 more than max delay multiplier time failure, configured
- fatalFailures = timingValues.getMaxDelayMultiplier() + 1;
- configured = true;
- delay = JRTConfigRequester.calculateFailedRequestDelay(ErrorType.TRANSIENT,
- transientFailures, fatalFailures, timingValues, configured);
- assertThat(delay, is(timingValues.getConfiguredErrorDelay()));
- assertTrue(delay < timingValues.getMaxDelayMultiplier() * timingValues.getConfiguredErrorDelay());
- delay = JRTConfigRequester.calculateFailedRequestDelay(ErrorType.FATAL,
- transientFailures, fatalFailures, timingValues, configured);
- final long l2 = timingValues.getFixedDelay() + timingValues.getMaxDelayMultiplier() * timingValues.getConfiguredErrorDelay();
- assertTrue(delay > (1 - JRTConfigRequester.randomFraction) * l2);
- assertTrue(delay < (1 + JRTConfigRequester.randomFraction) * l2);
- assertThat(delay, is(163520L));
- }
-
- @Test
- public void testDelay() {
- TimingValues timingValues = new TimingValues();
-
- // transientFailures and fatalFailures are not set until after delay has been calculated,
- // so 0 is the case for the first failure
- int transientFailures = 0;
- int fatalFailures = 0;
-
- // First time failure, configured
- long delay = JRTConfigRequester.calculateFailedRequestDelay(ErrorType.TRANSIENT,
- transientFailures, fatalFailures, timingValues, true);
- assertThat(delay, is(timingValues.getConfiguredErrorDelay()));
- assertThat(delay, is((transientFailures + 1) * timingValues.getConfiguredErrorDelay()));
+ assertEquals(20780, delay);
}
@Test
@@ -139,10 +96,10 @@ public class JRTConfigRequesterTest {
ErrorCode.ILLEGAL_DEF_MD5, ErrorCode.ILLEGAL_CONFIG_MD5, ErrorCode.ILLEGAL_TIMEOUT, ErrorCode.INTERNAL_ERROR,
9999); // unknown should also be fatal
for (Integer i : transientErrors) {
- assertThat(ErrorType.getErrorType(i), is(ErrorType.TRANSIENT));
+ assertEquals(ErrorType.TRANSIENT, ErrorType.getErrorType(i));
}
for (Integer i : fatalErrors) {
- assertThat(ErrorType.getErrorType(i), is(ErrorType.FATAL));
+ assertEquals(ErrorType.FATAL, ErrorType.getErrorType(i));
}
}
@@ -154,16 +111,15 @@ public class JRTConfigRequesterTest {
final MockConnection connection = new MockConnection();
JRTConfigRequester requester = new JRTConfigRequester(connection, timingValues);
- assertThat(requester.getConnectionPool(), is(connection));
+ assertEquals(requester.getConnectionPool(), connection);
requester.request(sub);
final Request request = connection.getRequest();
assertNotNull(request);
- assertThat(connection.getNumberOfRequests(), is(1));
+ assertEquals(1, connection.getNumberOfRequests());
JRTServerConfigRequestV3 receivedRequest = JRTServerConfigRequestV3.createFromRequest(request);
assertTrue(receivedRequest.validateParameters());
- assertThat(receivedRequest.getTimeout(), is(timingValues.getSubscribeTimeout()));
- assertThat(requester.getFatalFailures(), is(0));
- assertThat(requester.getTransientFailures(), is(0));
+ assertEquals(timingValues.getSubscribeTimeout(), receivedRequest.getTimeout());
+ assertFalse(requester.getFatalFailures());
}
@Test
@@ -175,8 +131,7 @@ public class JRTConfigRequesterTest {
JRTConfigRequester requester = new JRTConfigRequester(connection, timingValues);
requester.request(createSubscription(subscriber, timingValues));
waitUntilResponse(connection);
- assertThat(requester.getFatalFailures(), is(1));
- assertThat(requester.getTransientFailures(), is(0));
+ assertTrue(requester.getFatalFailures());
}
@Test
@@ -190,8 +145,7 @@ public class JRTConfigRequesterTest {
JRTConfigRequester requester = new JRTConfigRequester(connection, timingValues);
requester.request(sub);
waitUntilResponse(connection);
- assertThat(requester.getFatalFailures(), is(1));
- assertThat(requester.getTransientFailures(), is(0));
+ assertTrue(requester.getFatalFailures());
}
@Test
@@ -203,8 +157,7 @@ public class JRTConfigRequesterTest {
JRTConfigRequester requester = new JRTConfigRequester(connection, timingValues);
requester.request(createSubscription(subscriber, timingValues));
waitUntilResponse(connection);
- assertThat(requester.getFatalFailures(), is(0));
- assertThat(requester.getTransientFailures(), is(1));
+ assertFalse(requester.getFatalFailures());
}
@Test
@@ -218,8 +171,7 @@ public class JRTConfigRequesterTest {
JRTConfigRequester requester = new JRTConfigRequester(connection, timingValues);
requester.request(sub);
waitUntilResponse(connection);
- assertThat(requester.getFatalFailures(), is(0));
- assertThat(requester.getTransientFailures(), is(1));
+ assertFalse(requester.getFatalFailures());
}
@Test
@@ -231,12 +183,10 @@ public class JRTConfigRequesterTest {
final MockConnection connection = new MockConnection(new ErrorResponseHandler(ErrorCode.UNKNOWN_DEFINITION));
JRTConfigRequester requester = new JRTConfigRequester(connection, timingValues);
- assertThat(requester.getConnectionPool(), is(connection));
+ assertEquals(requester.getConnectionPool(), connection);
requester.request(sub);
waitUntilResponse(connection);
- assertThat(requester.getFatalFailures(), is(1));
- assertThat(requester.getTransientFailures(), is(0));
- // TODO Check that no further request was sent?
+ assertTrue(requester.getFatalFailures());
}
@Test
@@ -249,14 +199,14 @@ public class JRTConfigRequesterTest {
final MockConnection connection = new MockConnection(new MockConnection.OKResponseHandler());
JRTConfigRequester requester = new JRTConfigRequester(connection, timingValues);
requester.request(sub);
- assertThat(connection.getNumberOfRequests(), is(1));
+ assertEquals(1, connection.getNumberOfRequests());
// Check that no further request was sent?
try {
Thread.sleep(timingValues.getFixedDelay()*2);
} catch (InterruptedException e) {
e.printStackTrace();
}
- assertThat(connection.getNumberOfRequests(), is(1));
+ assertEquals(1, connection.getNumberOfRequests());
}
@Test
@@ -308,8 +258,7 @@ public class JRTConfigRequesterTest {
2000, // subscribeTimeout
250, // unconfiguredDelay
500, // configuredErrorDelay
- 250, // fixedDelay
- 5); // maxDelayMultiplier
+ 250); // fixedDelay
}
private static class ErrorResponseHandler extends MockConnection.OKResponseHandler {
@@ -371,4 +320,10 @@ public class JRTConfigRequesterTest {
requester2.close();
}
+ private void assertTransientDelay(long maxDelay, long delay) {
+ long minDelay = 0;
+ assertTrue("delay=" + delay + ", minDelay=" + minDelay + ",maxDelay=" + maxDelay,
+ delay >= minDelay && delay <= maxDelay);
+ }
+
}
diff --git a/config/src/test/java/com/yahoo/vespa/config/protocol/ConfigResponseTest.java b/config/src/test/java/com/yahoo/vespa/config/protocol/ConfigResponseTest.java
index a56c7ef2daa..c53a6b5c73d 100644
--- a/config/src/test/java/com/yahoo/vespa/config/protocol/ConfigResponseTest.java
+++ b/config/src/test/java/com/yahoo/vespa/config/protocol/ConfigResponseTest.java
@@ -2,6 +2,7 @@
package com.yahoo.vespa.config.protocol;
import com.yahoo.foo.SimpletypesConfig;
+import com.yahoo.text.AbstractUtf8Array;
import com.yahoo.text.Utf8Array;
import com.yahoo.vespa.config.ConfigPayload;
import com.yahoo.vespa.config.LZ4PayloadCompressor;
@@ -11,10 +12,8 @@ import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
-import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertThat;
/**
* @author Ulf Lilleengen
@@ -29,20 +28,20 @@ public class ConfigResponseTest {
response.serialize(baos, CompressionType.UNCOMPRESSED);
String payload = baos.toString(StandardCharsets.UTF_8);
assertNotNull(payload);
- assertEquals("{\"boolval\":false,\"doubleval\":0.0,\"enumval\":\"VAL1\",\"intval\":0,\"longval\":0,\"stringval\":\"s\"}", payload);
- assertThat(response.getGeneration(), is(3L));
- assertThat(response.getConfigMd5(), is("mymd5"));
+ assertEquals("{\"boolval\":false,\"doubleval\":0.0,\"enumval\":\"VAL1\",\"intval\":0,\"longval\":0,\"stringval\":\"s\"}", payload.toString());
+ assertEquals(response.getGeneration(), 3L);
+ assertEquals(response.getConfigMd5(), "mymd5");
baos = new ByteArrayOutputStream();
response.serialize(baos, CompressionType.UNCOMPRESSED);
- assertThat(baos.toString(), is("{\"boolval\":false,\"doubleval\":0.0,\"enumval\":\"VAL1\",\"intval\":0,\"longval\":0,\"stringval\":\"s\"}"));
+ assertEquals(baos.toString(),"{\"boolval\":false,\"doubleval\":0.0,\"enumval\":\"VAL1\",\"intval\":0,\"longval\":0,\"stringval\":\"s\"}");
}
@Test
public void require_that_slime_response_decompresses_on_serialize() throws IOException {
ConfigPayload configPayload = ConfigPayload.fromInstance(new SimpletypesConfig(new SimpletypesConfig.Builder()));
- Utf8Array data = configPayload.toUtf8Array(true);
- Utf8Array bytes = new Utf8Array(new LZ4PayloadCompressor().compress(data.getBytes()));
+ AbstractUtf8Array data = configPayload.toUtf8Array(true);
+ Utf8Array bytes = new Utf8Array(new LZ4PayloadCompressor().compress(data.wrap()));
ConfigResponse response = new SlimeConfigResponse(bytes, 3, false, "mymd5", CompressionInfo.create(CompressionType.LZ4, data.getByteLength()));
ByteArrayOutputStream baos = new ByteArrayOutputStream();
response.serialize(baos, CompressionType.UNCOMPRESSED);
@@ -51,7 +50,7 @@ public class ConfigResponseTest {
baos = new ByteArrayOutputStream();
response.serialize(baos, CompressionType.UNCOMPRESSED);
- assertThat(baos.toString(), is("{\"boolval\":false,\"doubleval\":0.0,\"enumval\":\"VAL1\",\"intval\":0,\"longval\":0,\"stringval\":\"s\"}"));
+ assertEquals(baos.toString(), "{\"boolval\":false,\"doubleval\":0.0,\"enumval\":\"VAL1\",\"intval\":0,\"longval\":0,\"stringval\":\"s\"}");
}
}
diff --git a/configserver-client/src/main/java/ai/vespa/hosted/client/AbstractConfigServerClient.java b/configserver-client/src/main/java/ai/vespa/hosted/client/AbstractConfigServerClient.java
index 81970d47bc7..608e9fe18b4 100644
--- a/configserver-client/src/main/java/ai/vespa/hosted/client/AbstractConfigServerClient.java
+++ b/configserver-client/src/main/java/ai/vespa/hosted/client/AbstractConfigServerClient.java
@@ -23,7 +23,6 @@ import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.function.BiFunction;
-import java.util.function.Consumer;
import java.util.function.Function;
import java.util.logging.Logger;
@@ -44,7 +43,7 @@ public abstract class AbstractConfigServerClient implements ConfigServerClient {
/** Executes the given request with response/error handling and retries. */
private <T> T execute(RequestBuilder builder,
BiFunction<ClassicHttpResponse, ClassicHttpRequest, T> handler,
- Consumer<IOException> catcher) {
+ ExceptionHandler catcher) {
HttpClientContext context = HttpClientContext.create();
context.setRequestConfig(builder.config);
@@ -57,8 +56,8 @@ public abstract class AbstractConfigServerClient implements ConfigServerClient {
return handler.apply(execute(request, context), request);
}
catch (IOException e) {
- catcher.accept(e);
- throw new UncheckedIOException(e); // Throw unchecked if catcher doesn't throw.
+ catcher.handle(e, request);
+ throw RetryException.wrap(e, request);
}
}
catch (RetryException e) {
@@ -118,7 +117,7 @@ public abstract class AbstractConfigServerClient implements ConfigServerClient {
private HttpEntity entity;
private RequestConfig config = ConfigServerClient.defaultRequestConfig;
private ResponseVerifier verifier = ConfigServerClient.throwOnError;
- private Consumer<IOException> catcher = ConfigServerClient.retryAll;
+ private ExceptionHandler catcher = ConfigServerClient.retryAll;
private RequestBuilder(HostStrategy hosts, Method method) {
if ( ! hosts.iterator().hasNext())
@@ -181,7 +180,7 @@ public abstract class AbstractConfigServerClient implements ConfigServerClient {
}
@Override
- public RequestBuilder catching(Consumer<IOException> catcher) {
+ public RequestBuilder catching(ExceptionHandler catcher) {
this.catcher = requireNonNull(catcher);
return this;
}
@@ -244,8 +243,8 @@ public abstract class AbstractConfigServerClient implements ConfigServerClient {
e.addSuppressed(f);
}
if (e instanceof IOException) {
- catcher.accept((IOException) e);
- throw new UncheckedIOException((IOException) e);
+ catcher.handle((IOException) e, request);
+ throw RetryException.wrap((IOException) e, request);
}
else
sneakyThrow(e); // e is a runtime exception or an error, so this is fine.
diff --git a/configserver-client/src/main/java/ai/vespa/hosted/client/ConfigServerClient.java b/configserver-client/src/main/java/ai/vespa/hosted/client/ConfigServerClient.java
index c92acd7cd0b..d5a4153fb8d 100644
--- a/configserver-client/src/main/java/ai/vespa/hosted/client/ConfigServerClient.java
+++ b/configserver-client/src/main/java/ai/vespa/hosted/client/ConfigServerClient.java
@@ -20,7 +20,6 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
-import java.util.function.Consumer;
import java.util.function.Function;
import java.util.stream.IntStream;
@@ -39,10 +38,8 @@ public interface ConfigServerClient extends Closeable {
.setRedirectsEnabled(false)
.build();
- /** Wraps with a {@link RetryException} and rethrows. */
- Consumer<IOException> retryAll = (e) -> {
- throw new RetryException(e);
- };
+ /** Does nothing, letting the client wrap with a {@link RetryException} and re-throw. */
+ ExceptionHandler retryAll = (exception, request) -> { };
/** Throws a a {@link RetryException} if {@code statusCode == 503}, or a {@link ResponseException} unless {@code 200 <= statusCode < 300}. */
ResponseVerifier throwOnError = new DefaultResponseVerifier() { };
@@ -101,15 +98,15 @@ public interface ConfigServerClient extends Closeable {
* Sets the catch clause for {@link IOException}s during execution of this.
* The default is to wrap the IOException in a {@link RetryException} and rethrow this;
* this makes the client retry the request, as long as there are remaining entries in the {@link HostStrategy}.
- * If the catcher returns normally, the {@link IOException} is unchecked and thrown instead.
+ * If the catcher returns normally, the exception is wrapped and retried, as per the default.
*/
- RequestBuilder catching(Consumer<IOException> catcher);
+ RequestBuilder catching(ExceptionHandler catcher);
/**
* Sets the (error) response handler for this request. The default is {@link #throwOnError}.
* When the handler returns normally, the response is treated as a success, and passed on to a response mapper.
*/
- RequestBuilder throwing(ResponseVerifier handler);
+ RequestBuilder throwing(ResponseVerifier handler);
/** Reads the response as a {@link String}, or throws if unsuccessful. */
String read();
@@ -203,6 +200,19 @@ public interface ConfigServerClient extends Closeable {
}
+ @FunctionalInterface
+ interface ExceptionHandler {
+
+ /**
+ * Called with any IO exception that might occur when attempting to send the request.
+ * To retry, wrap the exception with a {@link RetryException} and re-throw, or exit normally.
+ * Any other thrown exception will propagate out to the caller.
+ */
+ void handle(IOException exception, ClassicHttpRequest request);
+
+ }
+
+
/** What host(s) to try for a request, in what order. A host may be specified multiple times, for retries. */
@FunctionalInterface
interface HostStrategy extends Iterable<URI> {
@@ -240,6 +250,10 @@ public interface ConfigServerClient extends Closeable {
super(requireNonNull(cause));
}
+ static RetryException wrap(IOException exception, ClassicHttpRequest request) {
+ return new RetryException(new UncheckedIOException(request + " failed (" + exception.getClass().getSimpleName() + ")", exception));
+ }
+
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java b/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java
index 9c6786886ef..d1cf011d33a 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java
@@ -1082,15 +1082,15 @@ public class ApplicationRepository implements com.yahoo.config.provision.Deploye
}
RefeedActions refeedActions = actions.getRefeedActions();
if ( ! refeedActions.isEmpty()) {
- logger.log(Level.WARNING,
- "Change(s) between active and new application that may require re-feed:\n" +
- refeedActions.format());
+ logger.logApplicationPackage(Level.WARNING,
+ "Change(s) between active and new application that may require re-feed:\n" +
+ refeedActions.format());
}
ReindexActions reindexActions = actions.getReindexActions();
if ( ! reindexActions.isEmpty()) {
- logger.log(Level.WARNING,
- "Change(s) between active and new application that may require re-index:\n" +
- reindexActions.format());
+ logger.logApplicationPackage(Level.WARNING,
+ "Change(s) between active and new application that may require re-index:\n" +
+ reindexActions.format());
}
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/ConfigServerBootstrap.java b/configserver/src/main/java/com/yahoo/vespa/config/server/ConfigServerBootstrap.java
index 62f3e40cb50..772c2bf5125 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/ConfigServerBootstrap.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/ConfigServerBootstrap.java
@@ -16,22 +16,25 @@ import com.yahoo.yolean.Exceptions;
import java.time.Duration;
import java.time.Instant;
+import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
+import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
-import java.util.Optional;
+import java.util.Set;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
import java.util.logging.Level;
import java.util.logging.Logger;
-import java.util.stream.Collectors;
import static com.yahoo.vespa.config.server.ConfigServerBootstrap.Mode.BOOTSTRAP_IN_CONSTRUCTOR;
+import static com.yahoo.vespa.config.server.ConfigServerBootstrap.Mode.FOR_TESTING_NO_BOOTSTRAP_OF_APPS;
import static com.yahoo.vespa.config.server.ConfigServerBootstrap.RedeployingApplicationsFails.CONTINUE;
import static com.yahoo.vespa.config.server.ConfigServerBootstrap.RedeployingApplicationsFails.EXIT_JVM;
@@ -51,8 +54,7 @@ public class ConfigServerBootstrap extends AbstractComponent implements Runnable
private static final Logger log = Logger.getLogger(ConfigServerBootstrap.class.getName());
- // INITIALIZE_ONLY is for testing only
- enum Mode { BOOTSTRAP_IN_CONSTRUCTOR, BOOTSTRAP_IN_SEPARATE_THREAD, INITIALIZE_ONLY }
+ enum Mode { BOOTSTRAP_IN_CONSTRUCTOR, FOR_TESTING_NO_BOOTSTRAP_OF_APPS}
enum RedeployingApplicationsFails { EXIT_JVM, CONTINUE }
enum VipStatusMode { VIP_STATUS_FILE, VIP_STATUS_PROGRAMMATICALLY }
@@ -66,7 +68,6 @@ public class ConfigServerBootstrap extends AbstractComponent implements Runnable
private final Duration sleepTimeWhenRedeployingFails;
private final RedeployingApplicationsFails exitIfRedeployingApplicationsFails;
private final ExecutorService rpcServerExecutor;
- private final Optional<ExecutorService> bootstrapExecutor;
@Inject
public ConfigServerBootstrap(ApplicationRepository applicationRepository, RpcServer server,
@@ -79,8 +80,9 @@ public class ConfigServerBootstrap extends AbstractComponent implements Runnable
// For testing only
ConfigServerBootstrap(ApplicationRepository applicationRepository, RpcServer server, VersionState versionState,
- StateMonitor stateMonitor, VipStatus vipStatus, Mode mode, VipStatusMode vipStatusMode) {
- this(applicationRepository, server, versionState, stateMonitor, vipStatus, mode, CONTINUE, vipStatusMode);
+ StateMonitor stateMonitor, VipStatus vipStatus, VipStatusMode vipStatusMode) {
+ this(applicationRepository, server, versionState, stateMonitor, vipStatus,
+ FOR_TESTING_NO_BOOTSTRAP_OF_APPS, CONTINUE, vipStatusMode);
}
private ConfigServerBootstrap(ApplicationRepository applicationRepository, RpcServer server,
@@ -102,16 +104,10 @@ public class ConfigServerBootstrap extends AbstractComponent implements Runnable
initializing(vipStatusMode);
switch (mode) {
- case BOOTSTRAP_IN_SEPARATE_THREAD:
- bootstrapExecutor = Optional.of(Executors.newSingleThreadExecutor(new DaemonThreadFactory("config server bootstrap")));
- bootstrapExecutor.get().execute(this);
- break;
case BOOTSTRAP_IN_CONSTRUCTOR:
- bootstrapExecutor = Optional.empty();
start();
break;
- case INITIALIZE_ONLY:
- bootstrapExecutor = Optional.empty();
+ case FOR_TESTING_NO_BOOTSTRAP_OF_APPS:
break;
default:
throw new IllegalArgumentException("Unknown bootstrap mode " + mode + ", legal values: " + Arrays.toString(Mode.values()));
@@ -125,7 +121,6 @@ public class ConfigServerBootstrap extends AbstractComponent implements Runnable
server.stop();
log.log(Level.FINE, "RPC server stopped");
rpcServerExecutor.shutdown();
- bootstrapExecutor.ifPresent(ExecutorService::shutdownNow);
}
@Override
@@ -230,9 +225,9 @@ public class ConfigServerBootstrap extends AbstractComponent implements Runnable
private List<ApplicationId> redeployApplications(List<ApplicationId> applicationIds) throws InterruptedException {
ExecutorService executor = Executors.newFixedThreadPool(configserverConfig.numRedeploymentThreads(),
new DaemonThreadFactory("redeploy-apps-"));
- // Keep track of deployment per application
+ // Keep track of deployment status per application
Map<ApplicationId, Future<?>> deployments = new HashMap<>();
- log.log(Level.INFO, () -> "Redeploying " + applicationIds);
+ log.log(Level.INFO, () -> "Redeploying " + applicationIds.size() + " apps: " + applicationIds);
applicationIds.forEach(appId -> deployments.put(appId, executor.submit(() -> {
log.log(Level.INFO, () -> "Starting redeployment of " + appId);
applicationRepository.deployFromLocalActive(appId, true /* bootstrap */)
@@ -240,32 +235,65 @@ public class ConfigServerBootstrap extends AbstractComponent implements Runnable
log.log(Level.INFO, () -> appId + " redeployed");
})));
- List<ApplicationId> failedDeployments =
- deployments.entrySet().stream()
- .map(entry -> checkDeployment(entry.getKey(), entry.getValue()))
- .filter(Optional::isPresent)
- .map(Optional::get)
- .collect(Collectors.toList());
+ List<ApplicationId> failedDeployments = checkDeployments(deployments);
executor.shutdown();
executor.awaitTermination(365, TimeUnit.DAYS); // Timeout should never happen
+
return failedDeployments;
}
- // Returns an application id if deployment failed
- private Optional<ApplicationId> checkDeployment(ApplicationId applicationId, Future<?> future) {
+ private enum DeploymentStatus { inProgress, done, failed};
+
+ private List<ApplicationId> checkDeployments(Map<ApplicationId, Future<?>> deployments) {
+ int applicationCount = deployments.size();
+ Set<ApplicationId> failedDeployments = new LinkedHashSet<>();
+ Set<ApplicationId> finishedDeployments = new LinkedHashSet<>();
+ Instant lastLogged = Instant.EPOCH;
+
+ do {
+ deployments.forEach((applicationId, future) -> {
+ if (finishedDeployments.contains(applicationId) || failedDeployments.contains(applicationId)) return;
+
+ DeploymentStatus status = getDeploymentStatus(applicationId, future);
+ switch (status) {
+ case done:
+ finishedDeployments.add(applicationId);
+ break;
+ case inProgress:
+ break;
+ case failed:
+ failedDeployments.add(applicationId);
+ break;
+ default:
+ throw new IllegalArgumentException("Unknown deployment status " + status);
+ }
+ });
+ if ( ! Duration.between(lastLogged, Instant.now()).minus(Duration.ofSeconds(10)).isNegative()) {
+ log.log(Level.INFO, () -> finishedDeployments.size() + " of " + applicationCount + " apps redeployed " +
+ "(" + failedDeployments.size() + " failed)");
+ lastLogged = Instant.now();
+ }
+ } while (failedDeployments.size() + finishedDeployments.size() < applicationCount);
+
+ return new ArrayList<>(failedDeployments);
+ }
+
+ private DeploymentStatus getDeploymentStatus(ApplicationId applicationId, Future<?> future) {
try {
- future.get();
+ future.get(1, TimeUnit.MILLISECONDS);
+ return DeploymentStatus.done;
} catch (ExecutionException | InterruptedException e) {
if (e.getCause() instanceof TransientException) {
log.log(Level.INFO, "Redeploying " + applicationId +
" failed with transient error, will retry after bootstrap: " + Exceptions.toMessageString(e));
} else {
log.log(Level.WARNING, "Redeploying " + applicationId + " failed, will retry", e);
- return Optional.of(applicationId);
}
+ return DeploymentStatus.failed;
+ } catch (TimeoutException e) {
+ return DeploymentStatus.inProgress;
}
- return Optional.empty();
}
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/SuperModelController.java b/configserver/src/main/java/com/yahoo/vespa/config/server/SuperModelController.java
index 0bddd8d0637..00d010e75c8 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/SuperModelController.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/SuperModelController.java
@@ -5,7 +5,6 @@ import com.yahoo.config.codegen.DefParser;
import com.yahoo.config.model.api.ConfigDefinitionRepo;
import com.yahoo.vespa.config.ConfigDefinitionKey;
import com.yahoo.vespa.config.ConfigKey;
-import com.yahoo.vespa.config.ConfigPayload;
import com.yahoo.vespa.config.GetConfigRequest;
import com.yahoo.vespa.config.buildergen.ConfigDefinition;
import com.yahoo.vespa.config.protocol.ConfigResponse;
@@ -45,8 +44,7 @@ public class SuperModelController {
public ConfigResponse resolveConfig(GetConfigRequest request) {
ConfigKey<?> configKey = request.getConfigKey();
validateConfigDefinition(request.getConfigKey(), request.getDefContent());
- ConfigPayload payload = model.getConfig(configKey);
- return responseFactory.createResponse(payload, generation, false);
+ return responseFactory.createResponse(model.getConfig(configKey).toUtf8Array(true), generation, false);
}
private void validateConfigDefinition(ConfigKey<?> configKey, DefContent defContent) {
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/application/Application.java b/configserver/src/main/java/com/yahoo/vespa/config/server/application/Application.java
index baded957475..db3761a97f7 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/application/Application.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/application/Application.java
@@ -1,6 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.config.server.application;
+import com.yahoo.collections.Pair;
import com.yahoo.component.Version;
import com.yahoo.config.ConfigInstance;
import com.yahoo.config.ConfigurationRuntimeException;
@@ -8,6 +9,8 @@ import com.yahoo.config.model.api.ApplicationInfo;
import com.yahoo.config.model.api.Model;
import com.yahoo.config.provision.ApplicationId;
import java.util.logging.Level;
+
+import com.yahoo.text.AbstractUtf8Array;
import com.yahoo.vespa.config.ConfigCacheKey;
import com.yahoo.vespa.config.ConfigDefinitionKey;
import com.yahoo.vespa.config.ConfigKey;
@@ -123,45 +126,52 @@ public class Application implements ModelResult {
}
log.log(Level.FINE, () -> TenantRepository.logPre(getId()) + ("Resolving " + configKey + " with config definition " + def));
- ConfigInstance.Builder builder;
- ConfigPayload payload;
- boolean applyOnRestart = false;
+
+
+ var payload = createPayload(configKey, def);
+ ConfigResponse configResponse = responseFactory.createResponse(payload.getFirst(),
+ applicationGeneration,
+ payload.getSecond());
+ metricUpdater.incrementProcTime(System.currentTimeMillis() - start);
+ if (useCache(req)) {
+ cache.put(cacheKey, configResponse, configResponse.getConfigMd5());
+ metricUpdater.setCacheConfigElems(cache.configElems());
+ metricUpdater.setCacheChecksumElems(cache.checkSumElems());
+ }
+ return configResponse;
+ }
+
+ private Pair<AbstractUtf8Array, Boolean> createPayload(ConfigKey<?> configKey, ConfigDefinition def) {
try {
- builder = model.getConfigInstance(configKey, def);
+ ConfigInstance.Builder builder = model.getConfigInstance(configKey, def);
+ boolean tempApplyOnRestart = builder.getApplyOnRestart();
if (builder instanceof GenericConfig.GenericConfigBuilder) {
- payload = ((GenericConfig.GenericConfigBuilder) builder).getPayload();
- applyOnRestart = builder.getApplyOnRestart();
+ return new Pair<>(((GenericConfig.GenericConfigBuilder) builder).getPayload().toUtf8Array(true),
+ tempApplyOnRestart);
}
else {
+ String cacheBuilderClassNameForErrorReport = builder.getClass().getName();
+ ConfigPayload payload;
+ boolean applyOnRestart = false;
try {
ConfigInstance instance = ConfigInstanceBuilder.buildInstance(builder, def.getCNode());
payload = ConfigPayload.fromInstance(instance);
- applyOnRestart = builder.getApplyOnRestart();
+ applyOnRestart = tempApplyOnRestart;
} catch (ConfigurationRuntimeException e) {
// This can happen in cases where services ask for config that no longer exist before they have been able
// to reconfigure themselves
log.log(Level.INFO, TenantRepository.logPre(getId()) +
- ": Error resolving instance for builder '" + builder.getClass().getName() +
- "', returning empty config: " + Exceptions.toMessageString(e));
+ ": Error resolving instance for builder '" + cacheBuilderClassNameForErrorReport +
+ "', returning empty config: " + Exceptions.toMessageString(e));
payload = ConfigPayload.fromBuilder(new ConfigPayloadBuilder());
}
if (def.getCNode() != null)
payload.applyDefaultsFromDef(def.getCNode());
+ return new Pair<>(payload.toUtf8Array(true), applyOnRestart);
}
} catch (Exception e) {
throw new ConfigurationRuntimeException("Unable to get config for " + app, e);
}
-
- ConfigResponse configResponse = responseFactory.createResponse(payload,
- applicationGeneration,
- applyOnRestart);
- metricUpdater.incrementProcTime(System.currentTimeMillis() - start);
- if (useCache(req)) {
- cache.put(cacheKey, configResponse, configResponse.getConfigMd5());
- metricUpdater.setCacheConfigElems(cache.configElems());
- metricUpdater.setCacheChecksumElems(cache.checkSumElems());
- }
- return configResponse;
}
private boolean useCache(GetConfigRequest request) {
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java
index 8c2be6a5b07..16bcca2ea9c 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java
@@ -28,7 +28,6 @@ import com.yahoo.config.provision.TenantName;
import com.yahoo.config.provision.Zone;
import com.yahoo.container.jdisc.secretstore.SecretStore;
import com.yahoo.vespa.config.server.tenant.SecretStoreExternalIdRetriever;
-import com.yahoo.vespa.flags.BooleanFlag;
import com.yahoo.vespa.flags.FetchVector;
import com.yahoo.vespa.flags.FlagSource;
import com.yahoo.vespa.flags.Flags;
@@ -163,17 +162,13 @@ public class ModelContextImpl implements ModelContext {
private final String feedSequencer;
private final String responseSequencer;
private final int numResponseThreads;
- private final int maxPendingMoveOps;
private final boolean skipCommunicationManagerThread;
private final boolean skipMbusRequestThread;
private final boolean skipMbusReplyThread;
private final boolean useAsyncMessageHandlingOnSchedule;
private final double feedConcurrency;
- private final boolean useBucketExecutorForLidSpaceCompact;
- private final boolean useBucketExecutorForBucketMove;
+ private final boolean useBucketExecutorForPruneRemoved;
private final boolean enableFeedBlockInDistributor;
- private final double maxDeadBytesRatio;
- private final int clusterControllerMaxHeapSizeInMb;
private final ToIntFunction<ClusterSpec.Type> metricsProxyMaxHeapSizeInMb;
private final List<String> allowedAthenzProxyIdentities;
private final boolean tenantIamRole;
@@ -190,17 +185,13 @@ public class ModelContextImpl implements ModelContext {
this.feedSequencer = flagValue(source, appId, Flags.FEED_SEQUENCER_TYPE);
this.responseSequencer = flagValue(source, appId, Flags.RESPONSE_SEQUENCER_TYPE);
this.numResponseThreads = flagValue(source, appId, Flags.RESPONSE_NUM_THREADS);
- this.maxPendingMoveOps = flagValue(source, appId, Flags.MAX_PENDING_MOVE_OPS);
this.skipCommunicationManagerThread = flagValue(source, appId, Flags.SKIP_COMMUNICATIONMANAGER_THREAD);
this.skipMbusRequestThread = flagValue(source, appId, Flags.SKIP_MBUS_REQUEST_THREAD);
this.skipMbusReplyThread = flagValue(source, appId, Flags.SKIP_MBUS_REPLY_THREAD);
this.useAsyncMessageHandlingOnSchedule = flagValue(source, appId, Flags.USE_ASYNC_MESSAGE_HANDLING_ON_SCHEDULE);
this.feedConcurrency = flagValue(source, appId, Flags.FEED_CONCURRENCY);
- this.useBucketExecutorForLidSpaceCompact = flagValue(source, appId, Flags.USE_BUCKET_EXECUTOR_FOR_LID_SPACE_COMPACT);
- this.useBucketExecutorForBucketMove = flagValue(source, appId, Flags.USE_BUCKET_EXECUTOR_FOR_BUCKET_MOVE);
+ this.useBucketExecutorForPruneRemoved = flagValue(source, appId, Flags.USE_BUCKET_EXECUTOR_FOR_PRUNE_REMOVED);
this.enableFeedBlockInDistributor = flagValue(source, appId, Flags.ENABLE_FEED_BLOCK_IN_DISTRIBUTOR);
- this.maxDeadBytesRatio = flagValue(source, appId, Flags.MAX_DEAD_BYTES_RATIO);
- this.clusterControllerMaxHeapSizeInMb = flagValue(source, appId, Flags.CLUSTER_CONTROLLER_MAX_HEAP_SIZE_IN_MB);
this.metricsProxyMaxHeapSizeInMb = type -> Flags.METRICS_PROXY_MAX_HEAP_SIZE_IN_MB.bindTo(source).with(CLUSTER_TYPE, type.name()).value();
this.allowedAthenzProxyIdentities = flagValue(source, appId, Flags.ALLOWED_ATHENZ_PROXY_IDENTITIES);
this.tenantIamRole = flagValue(source, appId.tenant(), Flags.TENANT_IAM_ROLE);
@@ -217,17 +208,13 @@ public class ModelContextImpl implements ModelContext {
@Override public String feedSequencerType() { return feedSequencer; }
@Override public String responseSequencerType() { return responseSequencer; }
@Override public int defaultNumResponseThreads() { return numResponseThreads; }
- @Override public int maxPendingMoveOps() { return maxPendingMoveOps; }
@Override public boolean skipCommunicationManagerThread() { return skipCommunicationManagerThread; }
@Override public boolean skipMbusRequestThread() { return skipMbusRequestThread; }
@Override public boolean skipMbusReplyThread() { return skipMbusReplyThread; }
@Override public boolean useAsyncMessageHandlingOnSchedule() { return useAsyncMessageHandlingOnSchedule; }
@Override public double feedConcurrency() { return feedConcurrency; }
- @Override public boolean useBucketExecutorForLidSpaceCompact() { return useBucketExecutorForLidSpaceCompact; }
- @Override public boolean useBucketExecutorForBucketMove() { return useBucketExecutorForBucketMove; }
+ @Override public boolean useBucketExecutorForPruneRemoved() { return useBucketExecutorForPruneRemoved; }
@Override public boolean enableFeedBlockInDistributor() { return enableFeedBlockInDistributor; }
- @Override public double maxDeadBytesRatio() { return maxDeadBytesRatio; }
- @Override public int clusterControllerMaxHeapSizeInMb() { return clusterControllerMaxHeapSizeInMb; }
@Override public int metricsProxyMaxHeapSizeInMb(ClusterSpec.Type type) { return metricsProxyMaxHeapSizeInMb.applyAsInt(type); }
@Override public List<String> allowedAthenzProxyIdentities() { return allowedAthenzProxyIdentities; }
@Override public boolean tenantIamRole() { return tenantIamRole; }
@@ -417,7 +404,7 @@ public class ModelContextImpl implements ModelContext {
return new NodeResources(Double.parseDouble(parts[0]),
Double.parseDouble(parts[1]),
Double.parseDouble(parts[2]),
- 0.3,
+ 0.1,
NodeResources.DiskSpeed.any,
NodeResources.StorageType.any);
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/ApplicationFileManager.java b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/ApplicationFileManager.java
index 2cb94b4bb9e..4152c92c289 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/ApplicationFileManager.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/ApplicationFileManager.java
@@ -1,10 +1,11 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.config.server.filedistribution;
import com.yahoo.config.FileReference;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
+import java.net.SocketTimeoutException;
import java.net.URL;
import java.nio.channels.Channels;
import java.nio.channels.ReadableByteChannel;
@@ -44,6 +45,8 @@ public class ApplicationFileManager implements AddFileInterface {
rbc = Channels.newChannel(website.openStream());
fos = new FileOutputStream(file.getAbsolutePath());
fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE);
+ } catch (SocketTimeoutException e) {
+ throw new IllegalArgumentException("Failed connecting to or reading from " + uri, e);
} catch (IOException e) {
throw new IllegalArgumentException("Failed creating directory " + file.getParent(), e);
} finally {
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileServer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileServer.java
index c123a0c058f..93fabd8d6c0 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileServer.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileServer.java
@@ -34,8 +34,7 @@ public class FileServer {
private static final Logger log = Logger.getLogger(FileServer.class.getName());
private final FileDirectory root;
- private final ExecutorService pushExecutor;
- private final ExecutorService pullExecutor;
+ private final ExecutorService executor;
private final FileDownloader downloader;
private enum FileApiErrorCodes {
@@ -82,10 +81,8 @@ public class FileServer {
public FileServer(File rootDir, FileDownloader fileDownloader) {
this.downloader = fileDownloader;
this.root = new FileDirectory(rootDir);
- this.pushExecutor = Executors.newFixedThreadPool(Math.max(8, Runtime.getRuntime().availableProcessors()),
- new DaemonThreadFactory("file server push"));
- this.pullExecutor = Executors.newFixedThreadPool(Math.max(8, Runtime.getRuntime().availableProcessors()),
- new DaemonThreadFactory("file server pull"));
+ this.executor = Executors.newFixedThreadPool(Math.max(8, Runtime.getRuntime().availableProcessors()),
+ new DaemonThreadFactory("file server push"));
}
boolean hasFile(String fileReference) {
@@ -110,7 +107,7 @@ public class FileServer {
File file = root.getFile(reference);
if (file.exists()) {
- pushExecutor.execute(() -> serveFile(reference, target));
+ serveFile(reference, target);
}
}
@@ -151,7 +148,7 @@ public class FileServer {
}
public void serveFile(String fileReference, boolean downloadFromOtherSourceIfNotFound, Request request, Receiver receiver) {
- pullExecutor.execute(() -> serveFileInternal(fileReference, downloadFromOtherSourceIfNotFound, request, receiver));
+ executor.execute(() -> serveFileInternal(fileReference, downloadFromOtherSourceIfNotFound, request, receiver));
}
private void serveFileInternal(String fileReference, boolean downloadFromOtherSourceIfNotFound, Request request, Receiver receiver) {
@@ -200,8 +197,7 @@ public class FileServer {
public void close() {
downloader.close();
- pullExecutor.shutdown();
- pushExecutor.shutdown();
+ executor.shutdown();
}
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/http/HttpErrorResponse.java b/configserver/src/main/java/com/yahoo/vespa/config/server/http/HttpErrorResponse.java
index 3961fe12357..bd4231972e7 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/http/HttpErrorResponse.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/http/HttpErrorResponse.java
@@ -110,7 +110,7 @@ public class HttpErrorResponse extends HttpResponse {
new JsonFormat(true).encode(stream, slime);
}
- //@Override
+ @Override
public String getContentType() {
return HttpConfigResponse.JSON_CONTENT_TYPE;
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/http/SessionHandler.java b/configserver/src/main/java/com/yahoo/vespa/config/server/http/SessionHandler.java
index fcac023eec3..540a6545383 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/http/SessionHandler.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/http/SessionHandler.java
@@ -61,7 +61,11 @@ public class SessionHandler extends HttpHandler {
}
public static TimeoutBudget getTimeoutBudget(HttpRequest request, Duration defaultTimeout) {
- return new TimeoutBudget(Clock.systemUTC(), getRequestTimeout(request, defaultTimeout));
+ return getTimeoutBudget(getRequestTimeout(request, defaultTimeout));
+ }
+
+ public static TimeoutBudget getTimeoutBudget(Duration requestTimeout) {
+ return new TimeoutBudget(Clock.systemUTC(), requestTimeout);
}
/**
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/ApplicationApiHandler.java b/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/ApplicationApiHandler.java
index 9ea96b97af3..5b520b10fcf 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/ApplicationApiHandler.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/ApplicationApiHandler.java
@@ -8,14 +8,27 @@ import com.yahoo.config.provision.Zone;
import com.yahoo.container.jdisc.HttpRequest;
import com.yahoo.container.jdisc.HttpResponse;
import com.yahoo.jdisc.application.BindingMatch;
+import com.yahoo.jdisc.http.HttpHeaders;
import com.yahoo.vespa.config.server.ApplicationRepository;
import com.yahoo.vespa.config.server.application.CompressedApplicationInputStream;
+import com.yahoo.vespa.config.server.http.BadRequestException;
import com.yahoo.vespa.config.server.http.SessionHandler;
import com.yahoo.vespa.config.server.http.Utils;
import com.yahoo.vespa.config.server.session.PrepareParams;
import com.yahoo.vespa.config.server.tenant.TenantRepository;
+import com.yahoo.vespa.model.content.Content;
+import org.apache.hc.core5.http.ContentType;
+import org.eclipse.jetty.http.MultiPartFormInputStream;
+import javax.servlet.http.Part;
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
import java.time.Duration;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.logging.Level;
+import java.util.stream.Collectors;
import static com.yahoo.vespa.config.server.application.CompressedApplicationInputStream.createFromCompressedStream;
import static com.yahoo.vespa.config.server.http.Utils.checkThatTenantExists;
@@ -32,6 +45,9 @@ public class ApplicationApiHandler extends SessionHandler {
public final static String APPLICATION_X_GZIP = "application/x-gzip";
public final static String APPLICATION_ZIP = "application/zip";
+ public final static String MULTIPART_FORM_DATA = "multipart/form-data";
+ public final static String MULTIPART_PARAMS = "prepareParams";
+ public final static String MULTIPART_APPLICATION_PACKAGE = "applicationPackage";
public final static String contentTypeHeader = "Content-Type";
private final TenantRepository tenantRepository;
private final Duration zookeeperBarrierTimeout;
@@ -50,10 +66,35 @@ public class ApplicationApiHandler extends SessionHandler {
@Override
protected HttpResponse handlePOST(HttpRequest request) {
- validateDataAndHeader(request);
+ validateDataAndHeader(request, List.of(APPLICATION_X_GZIP, APPLICATION_ZIP, MULTIPART_FORM_DATA));
TenantName tenantName = validateTenant(request);
- PrepareParams prepareParams = PrepareParams.fromHttpRequest(request, tenantName, zookeeperBarrierTimeout);
- CompressedApplicationInputStream compressedStream = createFromCompressedStream(request.getData(), request.getHeader(contentTypeHeader));
+
+ PrepareParams prepareParams;
+ CompressedApplicationInputStream compressedStream;
+ boolean multipartRequest = Optional.ofNullable(request.getHeader(HttpHeaders.Names.CONTENT_TYPE))
+ .map(ContentType::parse)
+ .map(contentType -> contentType.getMimeType().equalsIgnoreCase(MULTIPART_FORM_DATA))
+ .orElse(false);
+ if(multipartRequest) {
+ try {
+ MultiPartFormInputStream multiPartFormInputStream = new MultiPartFormInputStream(request.getData(), request.getHeader(CONTENT_TYPE), /* config */null, /* contextTmpDir */null);
+ Map<String, Part> parts = multiPartFormInputStream.getParts().stream()
+ .collect(Collectors.toMap(Part::getName, p -> p));
+
+ byte[] params = parts.get(MULTIPART_PARAMS).getInputStream().readAllBytes();
+ log.log(Level.FINE, "Deploy parameters: [{}]", new String(params, StandardCharsets.UTF_8));
+ prepareParams = PrepareParams.fromJson(params, tenantName, zookeeperBarrierTimeout);
+ Part appPackagePart = parts.get(MULTIPART_APPLICATION_PACKAGE);
+ compressedStream = createFromCompressedStream(appPackagePart.getInputStream(), appPackagePart.getContentType());
+ } catch (IOException e) {
+ log.log(Level.WARNING, "Unable to parse multipart in deploy", e);
+ throw new BadRequestException("Request contains invalid data");
+ }
+ } else {
+ prepareParams = PrepareParams.fromHttpRequest(request, tenantName, zookeeperBarrierTimeout);
+ compressedStream = createFromCompressedStream(request.getData(), request.getHeader(contentTypeHeader));
+ }
+
PrepareResult result = applicationRepository.deploy(compressedStream, prepareParams);
return new SessionPrepareAndActivateResponse(result, request, prepareParams.getApplicationId(), zone);
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/DeploymentMetricsResponse.java b/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/DeploymentMetricsResponse.java
index 8244a486f1c..062a21b1f80 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/DeploymentMetricsResponse.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/DeploymentMetricsResponse.java
@@ -32,7 +32,14 @@ public class DeploymentMetricsResponse extends SlimeJsonResponse {
aggregator.aggregateDocumentCount().ifPresent(documentCount -> metrics.setDouble("documentCount", documentCount));
aggregator.aggregateQueryLatency().ifPresent(queryLatency -> metrics.setDouble("queryLatency",queryLatency));
aggregator.aggregateFeedLatency().ifPresent(feedLatency -> metrics.setDouble("feedLatency", feedLatency));
- aggregator.feedingBlocked().ifPresent(feedingBlocked -> metrics.setDouble("feedingBlocked", feedingBlocked));
+ aggregator.memoryUsage().ifPresent(memory -> {
+ metrics.setDouble("memoryUtil", memory.util());
+ metrics.setDouble("memoryFeedBlockLimit", memory.feedBlockLimit());
+ });
+ aggregator.diskUsage().ifPresent(disk -> {
+ metrics.setDouble("diskUtil", disk.util());
+ metrics.setDouble("diskFeedBlockLimit", disk.feedBlockLimit());
+ });
}
}
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/SessionCreateHandler.java b/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/SessionCreateHandler.java
index 732bad80e00..61f099fb8ea 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/SessionCreateHandler.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/SessionCreateHandler.java
@@ -9,6 +9,7 @@ import com.yahoo.config.provision.InstanceName;
import com.yahoo.config.provision.TenantName;
import com.yahoo.container.jdisc.HttpRequest;
import com.yahoo.container.jdisc.HttpResponse;
+import com.yahoo.jdisc.HeaderFields;
import com.yahoo.jdisc.application.UriPattern;
import com.yahoo.vespa.config.server.ApplicationRepository;
import com.yahoo.vespa.config.server.deploy.DeployHandlerLogger;
@@ -16,9 +17,13 @@ import com.yahoo.vespa.config.server.TimeoutBudget;
import com.yahoo.vespa.config.server.http.BadRequestException;
import com.yahoo.vespa.config.server.http.SessionHandler;
import com.yahoo.vespa.config.server.http.Utils;
+import com.yahoo.vespa.model.content.Content;
+import org.apache.hc.core5.http.ContentType;
import java.net.URI;
import java.time.Duration;
+import java.util.List;
+import java.util.stream.Collectors;
/**
* A handler that is able to create a session from an application package,
@@ -55,7 +60,7 @@ public class SessionCreateHandler extends SessionHandler {
logger = DeployHandlerLogger.forApplication(applicationId, verbose);
sessionId = applicationRepository.createSessionFromExisting(applicationId, false, timeoutBudget);
} else {
- validateDataAndHeader(request);
+ validateDataAndHeader(request, List.of(ApplicationApiHandler.APPLICATION_ZIP, ApplicationApiHandler.APPLICATION_X_GZIP));
logger = DeployHandlerLogger.forTenant(tenantName, verbose);
// TODO: Avoid using application id here at all
ApplicationId applicationId = ApplicationId.from(tenantName, ApplicationName.defaultName(), InstanceName.defaultName());
@@ -84,16 +89,19 @@ public class SessionCreateHandler extends SessionHandler {
.instanceName(match.group(6)).build();
}
- static void validateDataAndHeader(HttpRequest request) {
+ static void validateDataAndHeader(HttpRequest request, List<String> supportedContentTypes) {
if (request.getData() == null) {
throw new BadRequestException("Request contains no data");
}
String header = request.getHeader(ApplicationApiHandler.contentTypeHeader);
if (header == null) {
throw new BadRequestException("Request contains no " + ApplicationApiHandler.contentTypeHeader + " header");
- } else if (!(header.equals(ApplicationApiHandler.APPLICATION_X_GZIP) || header.equals(ApplicationApiHandler.APPLICATION_ZIP))) {
- throw new BadRequestException("Request contains invalid " + ApplicationApiHandler.contentTypeHeader + " header, only '" +
- ApplicationApiHandler.APPLICATION_X_GZIP + "' and '" + ApplicationApiHandler.APPLICATION_ZIP + "' are supported");
+ } else {
+ ContentType contentType = ContentType.parse(header);
+ if (!supportedContentTypes.contains(contentType.getMimeType())) {
+ throw new BadRequestException("Request contains invalid " + ApplicationApiHandler.contentTypeHeader + " header (" + contentType.getMimeType() + "), only '["
+ + String.join(", ", supportedContentTypes) + "]' are supported");
+ }
}
}
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/TenantHandler.java b/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/TenantHandler.java
index 6aff3b8a361..bb94f8d442a 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/TenantHandler.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/TenantHandler.java
@@ -3,112 +3,77 @@ package com.yahoo.vespa.config.server.http.v2;
import com.google.common.collect.ImmutableSet;
import com.google.inject.Inject;
-
import com.yahoo.config.provision.TenantName;
-import com.yahoo.container.jdisc.HttpRequest;
import com.yahoo.container.jdisc.HttpResponse;
-import com.yahoo.jdisc.application.BindingMatch;
+import com.yahoo.restapi.ErrorResponse;
+import com.yahoo.restapi.RestApi;
+import com.yahoo.restapi.RestApiException;
+import com.yahoo.restapi.RestApiRequestHandler;
import com.yahoo.vespa.config.server.ApplicationRepository;
import com.yahoo.vespa.config.server.tenant.TenantRepository;
import com.yahoo.yolean.Exceptions;
-import com.yahoo.vespa.config.server.http.BadRequestException;
-import com.yahoo.vespa.config.server.http.HttpHandler;
-import com.yahoo.vespa.config.server.http.InternalServerException;
-import com.yahoo.vespa.config.server.http.Utils;
/**
* Handler to create, get and delete a tenant, and listing of tenants.
*
- * @author Vegard Havdal
+ * @author jonmv
*/
-public class TenantHandler extends HttpHandler {
+public class TenantHandler extends RestApiRequestHandler<TenantHandler> {
private static final String TENANT_NAME_REGEXP = "[\\w-]+";
private final TenantRepository tenantRepository;
private final ApplicationRepository applicationRepository;
- // instantiated by dependency injection
@Inject
public TenantHandler(Context ctx, ApplicationRepository applicationRepository) {
- super(ctx);
+ super(ctx, TenantHandler::defineApi);
this.tenantRepository = applicationRepository.tenantRepository();
this.applicationRepository = applicationRepository;
}
- @Override
- protected HttpResponse handlePUT(HttpRequest request) {
- TenantName tenantName = getAndValidateTenantFromRequest(request);
- try {
- tenantRepository.addTenant(tenantName);
- } catch (Exception e) {
- throw new InternalServerException(Exceptions.toMessageString(e));
- }
- return new TenantCreateResponse(tenantName);
- }
-
- @Override
- protected HttpResponse handleGET(HttpRequest request) {
- if (isGetTenantRequest(request)) {
- final TenantName tenantName = getTenantNameFromRequest(request);
- Utils.checkThatTenantExists(tenantRepository, tenantName);
- return new TenantGetResponse(tenantName);
- } else if (isListTenantsRequest(request)) {
- return new ListTenantsResponse(ImmutableSet.copyOf(tenantRepository.getAllTenantNames()));
- } else {
- throw new BadRequestException(request.getUri().toString());
- }
- }
-
- @Override
- protected HttpResponse handleDELETE(HttpRequest request) {
- final TenantName tenantName = getTenantNameFromRequest(request);
- Utils.checkThatTenantExists(tenantRepository, tenantName);
- applicationRepository.deleteTenant(tenantName);
- return new TenantDeleteResponse(tenantName);
+ private RestApi defineApi() {
+ return RestApi.builder()
+ .addRoute(RestApi.route("/application/v2/tenant")
+ .get(this::getTenants))
+ .addRoute(RestApi.route("/application/v2/tenant/{tenant}")
+ .get(this::getTenant)
+ .put(this::putTenant)
+ .delete(this::deleteTenant))
+ .addExceptionMapper(IllegalArgumentException.class, (c, e) -> ErrorResponse.badRequest(Exceptions.toMessageString(e)))
+ .addExceptionMapper(RuntimeException.class, (c, e) -> ErrorResponse.internalServerError(Exceptions.toMessageString(e)))
+ .build();
}
- /**
- * Gets the tenant name from the request, throws if it exists already and validates its name
- *
- * @param request an {@link com.yahoo.container.jdisc.HttpRequest}
- * @return tenant name
- */
- private TenantName getAndValidateTenantFromRequest(HttpRequest request) {
- final TenantName tenantName = getTenantNameFromRequest(request);
- checkThatTenantDoesNotExist(tenantName);
- validateTenantName(tenantName);
- return tenantName;
+ private HttpResponse getTenants(RestApi.RequestContext context) {
+ return new ListTenantsResponse(ImmutableSet.copyOf(tenantRepository.getAllTenantNames()));
}
- private void validateTenantName(TenantName tenant) {
- if (!tenant.value().matches(TENANT_NAME_REGEXP)) {
- throw new BadRequestException("Illegal tenant name: " + tenant);
- }
- }
+ private HttpResponse getTenant(RestApi.RequestContext context) {
+ TenantName name = TenantName.from(context.pathParameters().getStringOrThrow("tenant"));
+ if ( ! tenantRepository.checkThatTenantExists(name))
+ throw new RestApiException.NotFound("Tenant '" + name + "' was not found.");
- private void checkThatTenantDoesNotExist(TenantName tenantName) {
- if (tenantRepository.checkThatTenantExists(tenantName))
- throw new BadRequestException("There already exists a tenant '" + tenantName + "'");
+ return new TenantGetResponse(name);
}
- private static BindingMatch<?> getBindingMatch(HttpRequest request) {
- return HttpConfigRequests.getBindingMatch(request,
- "http://*/application/v2/tenant/",
- "http://*/application/v2/tenant/*");
- }
+ private HttpResponse putTenant(RestApi.RequestContext context) {
+ TenantName name = TenantName.from(context.pathParameters().getStringOrThrow("tenant"));
+ if (tenantRepository.checkThatTenantExists(name))
+ throw new RestApiException.BadRequest("There already exists a tenant '" + name + "'");
+ if ( ! name.value().matches(TENANT_NAME_REGEXP))
+ throw new RestApiException.BadRequest("Illegal tenant name: " + name);
- private static boolean isGetTenantRequest(HttpRequest request) {
- return getBindingMatch(request).groupCount() == 3;
+ tenantRepository.addTenant(name);
+ return new TenantCreateResponse(name);
}
- private static boolean isListTenantsRequest(HttpRequest request) {
- return getBindingMatch(request).groupCount() == 2 &&
- request.getUri().getPath().endsWith("/tenant/");
- }
+ private HttpResponse deleteTenant(RestApi.RequestContext context) {
+ TenantName name = TenantName.from(context.pathParameters().getStringOrThrow("tenant"));
+ if ( ! tenantRepository.checkThatTenantExists(name))
+ throw new RestApiException.NotFound("Tenant '" + name + "' was not found.");
- private static TenantName getTenantNameFromRequest(HttpRequest request) {
- BindingMatch<?> bm = getBindingMatch(request);
- return TenantName.from(bm.group(2));
+ applicationRepository.deleteTenant(name);
+ return new TenantDeleteResponse(name);
}
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetriever.java b/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetriever.java
index 2903f0fadcc..e1135063f97 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetriever.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetriever.java
@@ -22,11 +22,13 @@ import java.time.Duration;
import java.util.Collection;
import java.util.List;
import java.util.Map;
+import java.util.OptionalDouble;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
+import java.util.function.Supplier;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.stream.Collectors;
@@ -46,8 +48,8 @@ public class ClusterDeploymentMetricsRetriever {
private static final String VESPA_CONTAINER = "vespa.container";
private static final String VESPA_QRSERVER = "vespa.qrserver";
private static final String VESPA_DISTRIBUTOR = "vespa.distributor";
- private static final String VESPA_SEARCHNODE = "vespa.searchnode";
- private static final List<String> WANTED_METRIC_SERVICES = List.of(VESPA_CONTAINER, VESPA_QRSERVER, VESPA_DISTRIBUTOR, VESPA_SEARCHNODE);
+ private static final String VESPA_CONTAINER_CLUSTERCONTROLLER = "vespa.container-clustercontroller";
+ private static final List<String> WANTED_METRIC_SERVICES = List.of(VESPA_CONTAINER, VESPA_QRSERVER, VESPA_DISTRIBUTOR, VESPA_CONTAINER_CLUSTERCONTROLLER);
private static final ExecutorService executor = Executors.newFixedThreadPool(10, new DaemonThreadFactory("cluster-deployment-metrics-retriever-"));
@@ -110,36 +112,40 @@ public class ClusterDeploymentMetricsRetriever {
private static void parseService(Inspector service, Map<ClusterInfo, DeploymentMetricsAggregator> clusterMetricsMap) {
String serviceName = service.field("name").asString();
+ if (!WANTED_METRIC_SERVICES.contains(serviceName)) return;
service.field("metrics").traverse((ArrayTraverser) (i, metric) ->
- addMetricsToAggeregator(serviceName, metric, clusterMetricsMap)
+ addMetricsToAggregator(serviceName, metric, clusterMetricsMap)
);
}
- private static void addMetricsToAggeregator(String serviceName, Inspector metric, Map<ClusterInfo, DeploymentMetricsAggregator> clusterMetricsMap) {
- if (!WANTED_METRIC_SERVICES.contains(serviceName)) return;
+ private static void addMetricsToAggregator(String serviceName, Inspector metric, Map<ClusterInfo, DeploymentMetricsAggregator> clusterMetricsMap) {
Inspector values = metric.field("values");
ClusterInfo clusterInfo = getClusterInfoFromDimensions(metric.field("dimensions"));
- DeploymentMetricsAggregator deploymentMetricsAggregator = clusterMetricsMap.computeIfAbsent(clusterInfo, c -> new DeploymentMetricsAggregator());
+ Supplier<DeploymentMetricsAggregator> aggregator = () -> clusterMetricsMap.computeIfAbsent(clusterInfo, c -> new DeploymentMetricsAggregator());
switch (serviceName) {
case VESPA_CONTAINER:
- deploymentMetricsAggregator.addContainerLatency(
- values.field("query_latency.sum").asDouble(),
- values.field("query_latency.count").asDouble());
- deploymentMetricsAggregator.addFeedLatency(
- values.field("feed.latency.sum").asDouble(),
- values.field("feed.latency.count").asDouble());
+ optionalDouble(values.field("query_latency.sum")).ifPresent(qlSum ->
+ aggregator.get()
+ .addContainerLatency(qlSum, values.field("query_latency.count").asDouble())
+ .addFeedLatency(values.field("feed.latency.sum").asDouble(), values.field("feed.latency.count").asDouble()));
break;
case VESPA_QRSERVER:
- deploymentMetricsAggregator.addQrLatency(
- values.field("query_latency.sum").asDouble(),
- values.field("query_latency.count").asDouble());
+ optionalDouble(values.field("query_latency.sum")).ifPresent(qlSum ->
+ aggregator.get()
+ .addQrLatency(qlSum, values.field("query_latency.count").asDouble()));
break;
case VESPA_DISTRIBUTOR:
- deploymentMetricsAggregator.addDocumentCount(values.field("vds.distributor.docsstored.average").asDouble());
+ optionalDouble(values.field("vds.distributor.docsstored.average"))
+ .ifPresent(docCount -> aggregator.get().addDocumentCount(docCount));
break;
- case VESPA_SEARCHNODE:
- deploymentMetricsAggregator.addFeedingBlocked((int) values.field("content.proton.resource_usage.feeding_blocked.last").asLong());
+ case VESPA_CONTAINER_CLUSTERCONTROLLER:
+ optionalDouble(values.field("cluster-controller.resource_usage.max_memory_utilization.last")).ifPresent(memoryUtil ->
+ aggregator.get()
+ .addMemoryUsage(memoryUtil,
+ values.field("cluster-controller.resource_usage.memory_limit.last").asDouble())
+ .addDiskUsage(values.field("cluster-controller.resource_usage.max_disk_utilization.last").asDouble(),
+ values.field("cluster-controller.resource_usage.disk_limit.last").asDouble()));
break;
}
}
@@ -158,4 +164,8 @@ public class ClusterDeploymentMetricsRetriever {
return new Slime();
}
}
+
+ private static OptionalDouble optionalDouble(Inspector field) {
+ return field.valid() ? OptionalDouble.of(field.asDouble()) : OptionalDouble.empty();
+ }
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/DeploymentMetricsAggregator.java b/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/DeploymentMetricsAggregator.java
index 916f5ff5613..f27cf942dd8 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/DeploymentMetricsAggregator.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/DeploymentMetricsAggregator.java
@@ -13,7 +13,8 @@ public class DeploymentMetricsAggregator {
private LatencyMetrics qr;
private LatencyMetrics container;
private Double documentCount;
- private Integer feedingBlocked;
+ private ResourceUsage memoryUsage;
+ private ResourceUsage diskUsage;
public synchronized DeploymentMetricsAggregator addFeedLatency(double sum, double count) {
this.feed = combineLatency(this.feed, sum, count);
@@ -35,50 +36,87 @@ public class DeploymentMetricsAggregator {
return this;
}
- public synchronized DeploymentMetricsAggregator addFeedingBlocked(int feedingBlocked) {
- this.feedingBlocked = Math.max(Optional.ofNullable(this.feedingBlocked).orElse(0), feedingBlocked);
+ public synchronized DeploymentMetricsAggregator addDiskUsage(double feedBlockUtil, double feedBlockLimit) {
+ this.diskUsage = combineResourceUtil(this.diskUsage, feedBlockUtil, feedBlockLimit);
+ return this;
+ }
+
+ public synchronized DeploymentMetricsAggregator addMemoryUsage(double feedBlockUtil, double feedBlockLimit) {
+ this.memoryUsage = combineResourceUtil(this.memoryUsage, feedBlockUtil, feedBlockLimit);
return this;
}
public Optional<Double> aggregateFeedLatency() {
- return Optional.ofNullable(feed).map(m -> m.latencySum / m.latencyCount).filter(num -> !num.isNaN());
+ return Optional.ofNullable(feed).map(m -> m.sum / m.count).filter(num -> !num.isNaN());
}
public Optional<Double> aggregateFeedRate() {
- return Optional.ofNullable(feed).map(m -> m.latencyCount / 60);
+ return Optional.ofNullable(feed).map(m -> m.count / 60);
}
public Optional<Double> aggregateQueryLatency() {
if (container == null && qr == null) return Optional.empty();
var c = Optional.ofNullable(container).orElseGet(LatencyMetrics::new);
var q = Optional.ofNullable(qr).orElseGet(LatencyMetrics::new);
- return Optional.of((c.latencySum + q.latencySum) / (c.latencyCount + q.latencyCount)).filter(num -> !num.isNaN());
+ return Optional.of((c.sum + q.sum) / (c.count + q.count)).filter(num -> !num.isNaN());
}
public Optional<Double> aggregateQueryRate() {
if (container == null && qr == null) return Optional.empty();
var c = Optional.ofNullable(container).orElseGet(LatencyMetrics::new);
var q = Optional.ofNullable(qr).orElseGet(LatencyMetrics::new);
- return Optional.of((c.latencyCount + q.latencyCount) / 60);
+ return Optional.of((c.count + q.count) / 60);
}
public Optional<Double> aggregateDocumentCount() {
return Optional.ofNullable(documentCount);
}
- public Optional<Integer> feedingBlocked() {
- return Optional.ofNullable(feedingBlocked);
+ public Optional<ResourceUsage> memoryUsage() {
+ return Optional.ofNullable(memoryUsage);
}
- private LatencyMetrics combineLatency(LatencyMetrics metricsOrNull, double sum, double count) {
- var metrics = Optional.ofNullable(metricsOrNull).orElseGet(LatencyMetrics::new);
- metrics.latencyCount += count;
- metrics.latencySum += sum;
- return metrics;
+ public Optional<ResourceUsage> diskUsage() {
+ return Optional.ofNullable(diskUsage);
+ }
+
+
+ private static LatencyMetrics combineLatency(LatencyMetrics metricsOrNull, double sum, double count) {
+ return Optional.ofNullable(metricsOrNull).orElseGet(LatencyMetrics::new).combine(sum, count);
+ }
+
+ private static ResourceUsage combineResourceUtil(ResourceUsage resourceUsageOrNull, double util, double limit) {
+ return Optional.ofNullable(resourceUsageOrNull).orElseGet(ResourceUsage::new).combine(util, limit);
}
private static class LatencyMetrics {
- double latencySum;
- double latencyCount;
+ private double sum;
+ private double count;
+
+ private LatencyMetrics combine(double sum, double count) {
+ this.sum += sum;
+ this.count += count;
+ return this;
+ }
+ }
+
+ public static class ResourceUsage {
+ /**
+ * Current resource utilization relative to feed block limit, i.e. value of >= 1 means utilization at or above
+ * feed block limit.
+ */
+ private double feedBlockUtil;
+
+ /** Resource utilization limit at which further external feed is blocked */
+ private double feedBlockLimit;
+
+ private ResourceUsage combine(double feedBlockUtil, double feedBlockLimit) {
+ if (feedBlockUtil > this.feedBlockUtil) this.feedBlockUtil = feedBlockUtil;
+ if (feedBlockLimit > this.feedBlockLimit) this.feedBlockLimit = feedBlockLimit;
+ return this;
+ }
+
+ public double util() { return feedBlockUtil * feedBlockLimit; }
+ public double feedBlockLimit() { return feedBlockLimit; }
}
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/ConfigResponseFactory.java b/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/ConfigResponseFactory.java
index 8003b2a2be9..7afeebdd3cf 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/ConfigResponseFactory.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/ConfigResponseFactory.java
@@ -2,6 +2,7 @@
package com.yahoo.vespa.config.server.rpc;
import com.yahoo.cloud.config.ConfigserverConfig;
+import com.yahoo.text.AbstractUtf8Array;
import com.yahoo.vespa.config.ConfigPayload;
import com.yahoo.vespa.config.protocol.ConfigResponse;
@@ -27,12 +28,12 @@ public interface ConfigResponseFactory {
/**
* Creates a {@link ConfigResponse} for a given payload and generation.
*
- * @param payload the {@link ConfigPayload} to put in the response
+ * @param rawPayload the {@link ConfigPayload} to put in the response
* @param generation the payload generation
* @param applyOnRestart true if this config change should only be applied on restart,
* false if it should be applied immediately
* @return a {@link ConfigResponse} that can be sent to the client
*/
- ConfigResponse createResponse(ConfigPayload payload, long generation, boolean applyOnRestart);
+ ConfigResponse createResponse(AbstractUtf8Array rawPayload, long generation, boolean applyOnRestart);
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/LZ4ConfigResponseFactory.java b/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/LZ4ConfigResponseFactory.java
index 619e6c0a2a2..f309b30cf8d 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/LZ4ConfigResponseFactory.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/LZ4ConfigResponseFactory.java
@@ -1,8 +1,8 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.config.server.rpc;
+import com.yahoo.text.AbstractUtf8Array;
import com.yahoo.text.Utf8Array;
-import com.yahoo.vespa.config.ConfigPayload;
import com.yahoo.vespa.config.LZ4PayloadCompressor;
import com.yahoo.vespa.config.protocol.CompressionInfo;
import com.yahoo.vespa.config.protocol.CompressionType;
@@ -20,13 +20,12 @@ public class LZ4ConfigResponseFactory implements ConfigResponseFactory {
private static final LZ4PayloadCompressor compressor = new LZ4PayloadCompressor();
@Override
- public ConfigResponse createResponse(ConfigPayload payload,
+ public ConfigResponse createResponse(AbstractUtf8Array rawPayload,
long generation,
boolean applyOnRestart) {
- Utf8Array rawPayload = payload.toUtf8Array(true);
String configMd5 = ConfigUtils.getMd5(rawPayload);
CompressionInfo info = CompressionInfo.create(CompressionType.LZ4, rawPayload.getByteLength());
- Utf8Array compressed = new Utf8Array(compressor.compress(rawPayload.getBytes()));
+ Utf8Array compressed = new Utf8Array(compressor.compress(rawPayload.wrap()));
return new SlimeConfigResponse(compressed, generation, applyOnRestart, configMd5, info);
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/UncompressedConfigResponseFactory.java b/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/UncompressedConfigResponseFactory.java
index 8d852ebd8c9..889548196aa 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/UncompressedConfigResponseFactory.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/UncompressedConfigResponseFactory.java
@@ -1,8 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.config.server.rpc;
-import com.yahoo.text.Utf8Array;
-import com.yahoo.vespa.config.ConfigPayload;
+import com.yahoo.text.AbstractUtf8Array;
import com.yahoo.vespa.config.protocol.CompressionInfo;
import com.yahoo.vespa.config.protocol.CompressionType;
import com.yahoo.vespa.config.protocol.ConfigResponse;
@@ -17,10 +16,9 @@ import com.yahoo.vespa.config.util.ConfigUtils;
public class UncompressedConfigResponseFactory implements ConfigResponseFactory {
@Override
- public ConfigResponse createResponse(ConfigPayload payload,
+ public ConfigResponse createResponse(AbstractUtf8Array rawPayload,
long generation,
boolean applyOnRestart) {
- Utf8Array rawPayload = payload.toUtf8Array(true);
String configMd5 = ConfigUtils.getMd5(rawPayload);
CompressionInfo info = CompressionInfo.create(CompressionType.UNCOMPRESSED, rawPayload.getByteLength());
return new SlimeConfigResponse(rawPayload, generation, applyOnRestart, configMd5, info);
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/session/PrepareParams.java b/configserver/src/main/java/com/yahoo/vespa/config/server/session/PrepareParams.java
index 5a3e0311db9..ea2a525b440 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/session/PrepareParams.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/session/PrepareParams.java
@@ -11,6 +11,8 @@ import com.yahoo.config.provision.AthenzDomain;
import com.yahoo.config.provision.DockerImage;
import com.yahoo.config.provision.TenantName;
import com.yahoo.container.jdisc.HttpRequest;
+import com.yahoo.slime.Inspector;
+import com.yahoo.slime.Slime;
import com.yahoo.slime.SlimeUtils;
import com.yahoo.vespa.config.server.TimeoutBudget;
import com.yahoo.config.model.api.TenantSecretStore;
@@ -21,9 +23,11 @@ import com.yahoo.vespa.config.server.tenant.TenantSecretStoreSerializer;
import java.time.Clock;
import java.time.Duration;
+import java.util.Collections;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
+import java.util.function.Function;
/**
* Parameters for preparing an application. Immutable.
@@ -163,6 +167,16 @@ public final class PrepareParams {
return this;
}
+ public Builder containerEndpointList(List<ContainerEndpoint> endpoints) {
+ this.containerEndpoints = endpoints;
+ return this;
+ }
+
+ public Builder endpointCertificateMetadata(EndpointCertificateMetadata endpointCertificateMetadata) {
+ this.endpointCertificateMetadata = Optional.ofNullable(endpointCertificateMetadata);
+ return this;
+ }
+
public Builder endpointCertificateMetadata(String serialized) {
this.endpointCertificateMetadata = (serialized == null)
? Optional.empty()
@@ -197,6 +211,11 @@ public final class PrepareParams {
return this;
}
+ public Builder quota(Quota quota) {
+ this.quota = Optional.ofNullable(quota);
+ return this;
+ }
+
public Builder quota(String serialized) {
this.quota = (serialized == null)
? Optional.empty()
@@ -253,6 +272,61 @@ public final class PrepareParams {
.build();
}
+ public static PrepareParams fromJson(byte[] json, TenantName tenant, Duration barrierTimeout) {
+ Slime slime = SlimeUtils.jsonToSlimeOrThrow(json);
+ Inspector params = slime.get();
+
+ return new Builder()
+ .ignoreValidationErrors(booleanValue(params, IGNORE_VALIDATION_PARAM_NAME))
+ .dryRun(booleanValue(params, DRY_RUN_PARAM_NAME))
+ .verbose(booleanValue(params, VERBOSE_PARAM_NAME))
+ .timeoutBudget(SessionHandler.getTimeoutBudget(getTimeout(params, barrierTimeout)))
+ .applicationId(createApplicationId(params, tenant))
+ .vespaVersion(SlimeUtils.optionalString(params.field(VESPA_VERSION_PARAM_NAME)).orElse(null))
+ .containerEndpointList(deserialize(params.field(CONTAINER_ENDPOINTS_PARAM_NAME), ContainerEndpointSerializer::endpointListFromSlime, Collections.emptyList()))
+ .endpointCertificateMetadata(deserialize(params.field(ENDPOINT_CERTIFICATE_METADATA_PARAM_NAME), EndpointCertificateMetadataSerializer::fromSlime))
+ .dockerImageRepository(SlimeUtils.optionalString(params.field(DOCKER_IMAGE_REPOSITORY)).orElse(null))
+ .athenzDomain(SlimeUtils.optionalString(params.field(ATHENZ_DOMAIN)).orElse(null))
+ .applicationRoles(ApplicationRoles.fromString(SlimeUtils.optionalString(params.field(APPLICATION_HOST_ROLE)).orElse(null), SlimeUtils.optionalString(params.field(APPLICATION_CONTAINER_ROLE)).orElse(null)))
+ .quota(deserialize(params.field(QUOTA_PARAM_NAME), Quota::fromSlime))
+ .tenantSecretStores(SlimeUtils.optionalString(params.field(TENANT_SECRET_STORES_PARAM_NAME)).orElse(null))
+ .force(booleanValue(params, FORCE_PARAM_NAME))
+ .waitForResourcesInPrepare(booleanValue(params, WAIT_FOR_RESOURCES_IN_PREPARE))
+ .build();
+ }
+
+ private static <T> T deserialize(Inspector field, Function<Inspector, T> mapper) {
+ return deserialize(field, mapper, null);
+ }
+ private static <T> T deserialize(Inspector field, Function<Inspector, T> mapper, T defaultValue) {
+ return field.valid()
+ ? mapper.apply(field)
+ : defaultValue;
+ }
+
+ private static boolean booleanValue(Inspector inspector, String fieldName) {
+ Inspector field = inspector.field(fieldName);
+ return field.valid()
+ ? field.asBool()
+ : false;
+ }
+
+ private static Duration getTimeout(Inspector params, Duration defaultTimeout) {
+ if(params.field("timeout").valid()) {
+ return Duration.ofSeconds(params.field("timeout").asLong());
+ } else {
+ return defaultTimeout;
+ }
+ }
+
+ private static ApplicationId createApplicationId(Inspector params, TenantName tenant) {
+ return new ApplicationId.Builder()
+ .tenant(tenant)
+ .applicationName(SlimeUtils.optionalString(params.field(APPLICATION_NAME_PARAM_NAME)).orElse("default"))
+ .instanceName(SlimeUtils.optionalString(params.field(INSTANCE_PARAM_NAME)).orElse("default"))
+ .build();
+ }
+
private static ApplicationId createApplicationId(HttpRequest request, TenantName tenant) {
return new ApplicationId.Builder()
.tenant(tenant)
@@ -268,7 +342,7 @@ public final class PrepareParams {
private static Optional<String> getProperty(HttpRequest request, String propertyName) {
return Optional.ofNullable(request.getProperty(propertyName));
}
-
+
public String getApplicationName() {
return applicationId.application().value();
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointSerializer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointSerializer.java
index 6ec0ba693f5..15fe04932dd 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointSerializer.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointSerializer.java
@@ -54,6 +54,9 @@ public class ContainerEndpointSerializer {
public static List<ContainerEndpoint> endpointListFromSlime(Slime slime) {
final var inspector = slime.get();
+ return endpointListFromSlime(inspector);
+ }
+ public static List<ContainerEndpoint> endpointListFromSlime(Inspector inspector) {
final var endpoints = new ArrayList<ContainerEndpoint>();
inspector.traverse((ArrayTraverser) (idx, endpointInspector) -> {
diff --git a/configserver/src/main/resources/configserver-app/services.xml b/configserver/src/main/resources/configserver-app/services.xml
index fd8bda8f305..d0e366b11a1 100644
--- a/configserver/src/main/resources/configserver-app/services.xml
+++ b/configserver/src/main/resources/configserver-app/services.xml
@@ -46,7 +46,6 @@
</components>
<preprocess:include file='config-models.xml' required='false' />
- <preprocess:include file='node-repository.xml' required='false' />
<preprocess:include file='routing-status.xml' required='false' />
<preprocess:include file='model-integration.xml' required='true' />
@@ -102,8 +101,7 @@
<binding>http://*/status</binding>
</handler>
<handler id='com.yahoo.vespa.config.server.http.v2.TenantHandler' bundle='configserver'>
- <binding>http://*/application/v2/tenant/</binding>
- <binding>http://*/application/v2/tenant/*</binding>
+ <binding>http://*/application/v2/tenant*</binding>
</handler>
<handler id='com.yahoo.vespa.config.server.http.v2.SessionCreateHandler' bundle='configserver'>
<binding>http://*/application/v2/tenant/*/session</binding>
diff --git a/configserver/src/main/sh/start-configserver b/configserver/src/main/sh/start-configserver
index efe91a5cea2..6bb43fd73c1 100755
--- a/configserver/src/main/sh/start-configserver
+++ b/configserver/src/main/sh/start-configserver
@@ -169,6 +169,7 @@ vespa-run-as-vespa-user vespa-runserver -s configserver -r 30 -p $pidfile -- \
-XX:ErrorFile="${VESPA_HOME}/var/crash/hs_err_pid%p.log" \
-XX:+ExitOnOutOfMemoryError \
-XX:-OmitStackTraceInFastThrow \
+ -XX:MaxJavaStackTraceDepth=1000000 \
$jvmargs \
--illegal-access=warn \
--add-opens=java.base/java.io=ALL-UNNAMED \
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/ConfigServerBootstrapTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/ConfigServerBootstrapTest.java
index 9a08375887c..0df80c4d5c0 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/ConfigServerBootstrapTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/ConfigServerBootstrapTest.java
@@ -38,8 +38,6 @@ import java.util.List;
import java.util.Optional;
import java.util.function.BooleanSupplier;
-import static com.yahoo.vespa.config.server.ConfigServerBootstrap.Mode.BOOTSTRAP_IN_SEPARATE_THREAD;
-import static com.yahoo.vespa.config.server.ConfigServerBootstrap.Mode.INITIALIZE_ONLY;
import static com.yahoo.vespa.config.server.ConfigServerBootstrap.VipStatusMode.VIP_STATUS_FILE;
import static com.yahoo.vespa.config.server.ConfigServerBootstrap.VipStatusMode.VIP_STATUS_PROGRAMMATICALLY;
import static com.yahoo.vespa.config.server.deploy.DeployTester.createHostedModelFactory;
@@ -74,9 +72,8 @@ public class ConfigServerBootstrapTest {
provisioner.allocations().values().iterator().next().remove(0);
StateMonitor stateMonitor = StateMonitor.createForTesting();
VipStatus vipStatus = createVipStatus(stateMonitor);
- ConfigServerBootstrap bootstrap = new ConfigServerBootstrap(tester.applicationRepository(), rpcServer,
- versionState, stateMonitor,
- vipStatus, INITIALIZE_ONLY, VIP_STATUS_PROGRAMMATICALLY);
+ ConfigServerBootstrap bootstrap = new ConfigServerBootstrap(tester.applicationRepository(), rpcServer, versionState,
+ stateMonitor, vipStatus, VIP_STATUS_PROGRAMMATICALLY);
assertFalse(vipStatus.isInRotation());
bootstrap.start();
waitUntil(rpcServer::isRunning, "failed waiting for Rpc server running");
@@ -105,9 +102,8 @@ public class ConfigServerBootstrapTest {
RpcServer rpcServer = createRpcServer(configserverConfig);
StateMonitor stateMonitor = StateMonitor.createForTesting();
VipStatus vipStatus = createVipStatus(stateMonitor);
- ConfigServerBootstrap bootstrap = new ConfigServerBootstrap(tester.applicationRepository(), rpcServer,
- versionState, stateMonitor,
- vipStatus, INITIALIZE_ONLY, VIP_STATUS_FILE);
+ ConfigServerBootstrap bootstrap = new ConfigServerBootstrap(tester.applicationRepository(), rpcServer, versionState,
+ stateMonitor, vipStatus, VIP_STATUS_FILE);
assertTrue(vipStatus.isInRotation()); // default is in rotation when using status file
bootstrap.start();
@@ -137,8 +133,7 @@ public class ConfigServerBootstrapTest {
StateMonitor stateMonitor = StateMonitor.createForTesting();
VipStatus vipStatus = createVipStatus(stateMonitor);
ConfigServerBootstrap bootstrap = new ConfigServerBootstrap(tester.applicationRepository(), rpcServer, versionState,
- stateMonitor,
- vipStatus, INITIALIZE_ONLY, VIP_STATUS_PROGRAMMATICALLY);
+ stateMonitor, vipStatus, VIP_STATUS_PROGRAMMATICALLY);
assertFalse(vipStatus.isInRotation());
// Call method directly, to be sure that it is finished redeploying all applications and we can check status
bootstrap.start();
@@ -181,8 +176,8 @@ public class ConfigServerBootstrapTest {
StateMonitor stateMonitor = StateMonitor.createForTesting();
VipStatus vipStatus = createVipStatus(stateMonitor);
ConfigServerBootstrap bootstrap = new ConfigServerBootstrap(tester.applicationRepository(), rpcServer, versionState,
- stateMonitor, vipStatus,
- BOOTSTRAP_IN_SEPARATE_THREAD, VIP_STATUS_PROGRAMMATICALLY);
+ stateMonitor, vipStatus, VIP_STATUS_PROGRAMMATICALLY);
+ bootstrap.start();
waitUntil(rpcServer::isRunning, "failed waiting for Rpc server running");
waitUntil(() -> bootstrap.status() == StateMonitor.Status.up, "failed waiting for status 'up'");
waitUntil(vipStatus::isInRotation, "failed waiting for server to be in rotation");
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/SessionCreateHandlerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/SessionCreateHandlerTest.java
index 83d6ac5b288..5d7322070e7 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/SessionCreateHandlerTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/SessionCreateHandlerTest.java
@@ -16,19 +16,24 @@ import com.yahoo.vespa.config.server.http.SessionHandlerTest;
import com.yahoo.vespa.config.server.session.Session;
import com.yahoo.vespa.config.server.tenant.TenantRepository;
import com.yahoo.vespa.config.server.tenant.TestTenantRepository;
+import org.apache.hc.core5.http.ContentType;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
+import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileWriter;
import java.io.IOException;
+import java.io.InputStream;
+import java.nio.charset.StandardCharsets;
import java.util.Collections;
import java.util.HashMap;
+import java.util.List;
import java.util.Map;
import static com.yahoo.jdisc.Response.Status.BAD_REQUEST;
@@ -43,6 +48,7 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
/**
* @author hmusum
@@ -171,6 +177,22 @@ public class SessionCreateHandlerTest extends SessionHandlerTest {
assertIllegalFromParameter("http://host:4013/application/v2/tenant/" + tenant + "/application/foo/environment/prod/region/baz/instance");
}
+ @Test
+ public void require_that_content_type_is_parsed_correctly() throws FileNotFoundException {
+ HttpRequest request = post(new ByteArrayInputStream("foo".getBytes(StandardCharsets.UTF_8)),
+ Map.of("Content-Type", "multipart/form-data; charset=ISO-8859-1; boundary=g5gJAzUWl_t6"),
+ Collections.emptyMap());
+
+ // Valid header should validate ok
+ SessionCreateHandler.validateDataAndHeader(request, List.of(ContentType.MULTIPART_FORM_DATA.getMimeType()));
+
+ // Accepting only application/json should fail:
+ try {
+ SessionCreateHandler.validateDataAndHeader(request, List.of(ContentType.APPLICATION_JSON.getMimeType()));
+ fail("Request contained invalid content type, but validated ok");
+ } catch (Exception expected) {}
+ }
+
private SessionCreateHandler createHandler() {
return new SessionCreateHandler(SessionCreateHandler.testOnlyContext(),
applicationRepository,
@@ -178,7 +200,7 @@ public class SessionCreateHandlerTest extends SessionHandlerTest {
}
private HttpRequest post() throws FileNotFoundException {
- return post(null, postHeaders, new HashMap<>());
+ return post((InputStream) null, postHeaders, new HashMap<>());
}
private HttpRequest post(File file) throws FileNotFoundException {
@@ -186,10 +208,12 @@ public class SessionCreateHandlerTest extends SessionHandlerTest {
}
private HttpRequest post(File file, Map<String, String> headers, Map<String, String> parameters) throws FileNotFoundException {
+ return post(file == null ? null : new FileInputStream(file), headers, parameters);
+ }
+
+ private HttpRequest post(InputStream data, Map <String, String > headers, Map < String, String > parameters) throws FileNotFoundException {
HttpRequest request = HttpRequest.createTestRequest("http://" + hostname + ":" + port + "/application/v2/tenant/" + tenant + "/session",
- POST,
- file == null ? null : new FileInputStream(file),
- parameters);
+ POST, data, parameters);
for (Map.Entry<String, String> entry : headers.entrySet()) {
request.getJDiscRequest().headers().put(entry.getKey(), entry.getValue());
}
@@ -197,6 +221,6 @@ public class SessionCreateHandlerTest extends SessionHandlerTest {
}
private HttpRequest post(Map<String, String> parameters) throws FileNotFoundException {
- return post(null, new HashMap<>(), parameters);
+ return post((InputStream) null, new HashMap<>(), parameters);
}
}
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/TenantHandlerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/TenantHandlerTest.java
index b0b01ea24b4..a6cf6d24c88 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/TenantHandlerTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/TenantHandlerTest.java
@@ -6,16 +6,13 @@ import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ApplicationName;
import com.yahoo.config.provision.InstanceName;
import com.yahoo.config.provision.TenantName;
-import com.yahoo.container.jdisc.HttpRequest;
-import com.yahoo.container.jdisc.HttpResponse;
+import com.yahoo.container.jdisc.HttpRequestBuilder;
import com.yahoo.jdisc.http.HttpRequest.Method;
+import com.yahoo.restapi.RestApiTestDriver;
import com.yahoo.vespa.config.server.ApplicationRepository;
import com.yahoo.vespa.config.server.MockProvisioner;
import com.yahoo.vespa.config.server.application.OrchestratorMock;
-import com.yahoo.vespa.config.server.http.BadRequestException;
-import com.yahoo.vespa.config.server.http.NotFoundException;
import com.yahoo.vespa.config.server.session.PrepareParams;
-import com.yahoo.vespa.config.server.tenant.Tenant;
import com.yahoo.vespa.config.server.tenant.TenantRepository;
import com.yahoo.vespa.config.server.tenant.TestTenantRepository;
import org.junit.After;
@@ -29,11 +26,11 @@ import java.io.File;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
-import static org.hamcrest.CoreMatchers.is;
+import static com.yahoo.jdisc.http.HttpRequest.Method.DELETE;
+import static com.yahoo.jdisc.http.HttpRequest.Method.GET;
+import static com.yahoo.jdisc.http.HttpRequest.Method.PUT;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.fail;
public class TenantHandlerTest {
@@ -44,6 +41,8 @@ public class TenantHandlerTest {
private TenantHandler handler;
private final TenantName a = TenantName.from("a");
+ private RestApiTestDriver testDriver;
+
@Rule
public TemporaryFolder temporaryFolder = new TemporaryFolder();
@@ -63,7 +62,8 @@ public class TenantHandlerTest {
.withOrchestrator(new OrchestratorMock())
.withConfigserverConfig(configserverConfig)
.build();
- handler = new TenantHandler(TenantHandler.testOnlyContext(), applicationRepository);
+ handler = new TenantHandler(RestApiTestDriver.createHandlerTestContext(), applicationRepository);
+ testDriver = RestApiTestDriver.newBuilder(handler).build();
}
@After
@@ -74,91 +74,81 @@ public class TenantHandlerTest {
@Test
public void testTenantCreate() throws Exception {
assertNull(tenantRepository.getTenant(a));
- TenantCreateResponse response = putSync(
- HttpRequest.createTestRequest("http://deploy.example.yahoo.com:80/application/v2/tenant/a", Method.PUT));
- assertResponseEquals(response, "{\"message\":\"Tenant a created.\"}");
+ assertResponse(PUT, "/application/v2/tenant/a",
+ "{\"message\":\"Tenant a created.\"}");
+ assertEquals(a, tenantRepository.getTenant(a).getName());
}
@Test
public void testTenantCreateWithAllPossibleCharactersInName() throws Exception {
TenantName tenantName = TenantName.from("aB-9999_foo");
assertNull(tenantRepository.getTenant(tenantName));
- TenantCreateResponse response = putSync(
- HttpRequest.createTestRequest("http://deploy.example.yahoo.com:80/application/v2/tenant/" + tenantName, Method.PUT));
- assertResponseEquals(response, "{\"message\":\"Tenant " + tenantName + " created.\"}");
+ assertResponse(PUT, "/application/v2/tenant/aB-9999_foo",
+ "{\"message\":\"Tenant " + tenantName + " created.\"}");
}
- @Test(expected=NotFoundException.class)
- public void testGetNonExisting() {
- handler.handleGET(HttpRequest.createTestRequest("http://deploy.example.yahoo.com:80/application/v2/tenant/x", Method.GET));
+ @Test
+ public void testGetNonExisting() throws IOException {
+ assertResponse(GET, "/application/v2/tenant/x",
+ "{\"error-code\":\"NOT_FOUND\",\"message\":\"Tenant 'x' was not found.\"}");
}
-
+
@Test
public void testGetAndList() throws Exception {
tenantRepository.addTenant(a);
- assertResponseEquals((TenantGetResponse) handler.handleGET(
- HttpRequest.createTestRequest("http://deploy.example.yahoo.com:80/application/v2/tenant/a", Method.GET)),
- "{\"message\":\"Tenant 'a' exists.\"}");
- assertResponseEquals((ListTenantsResponse) handler.handleGET(
- HttpRequest.createTestRequest("http://deploy.example.yahoo.com:80/application/v2/tenant/", Method.GET)),
- "{\"tenants\":[\"default\",\"a\"]}");
+ assertResponse(GET, "/application/v2/tenant/a",
+ "{\"message\":\"Tenant 'a' exists.\"}");
+ assertResponse(GET, "/application/v2/tenant/",
+ "{\"tenants\":[\"default\",\"a\"]}");
+ assertResponse(GET, "/application/v2/tenant",
+ "{\"tenants\":[\"default\",\"a\"]}");
}
- @Test(expected=BadRequestException.class)
+ @Test
public void testCreateExisting() throws Exception {
assertNull(tenantRepository.getTenant(a));
- TenantCreateResponse response = putSync(HttpRequest.createTestRequest(
- "http://deploy.example.yahoo.com:80/application/v2/tenant/a", Method.PUT));
- assertResponseEquals(response, "{\"message\":\"Tenant a created.\"}");
+ assertResponse(PUT, "/application/v2/tenant/a",
+ "{\"message\":\"Tenant a created.\"}");
assertEquals(tenantRepository.getTenant(a).getName(), a);
- handler.handlePUT(HttpRequest.createTestRequest("http://deploy.example.yahoo.com:80/application/v2/tenant/a", Method.PUT));
+ assertResponse(PUT, "/application/v2/tenant/a",
+ "{\"error-code\":\"BAD_REQUEST\",\"message\":\"There already exists a tenant 'a'\"}");
}
@Test
public void testDelete() throws IOException {
- putSync(HttpRequest.createTestRequest("http://deploy.example.yahoo.com:80/application/v2/tenant/a", Method.PUT));
- assertEquals(tenantRepository.getTenant(a).getName(), a);
- TenantDeleteResponse delResp = (TenantDeleteResponse) handler.handleDELETE(
- HttpRequest.createTestRequest("http://deploy.example.yahoo.com:80/application/v2/tenant/a", Method.DELETE));
- assertResponseEquals(delResp, "{\"message\":\"Tenant a deleted.\"}");
+ tenantRepository.addTenant(a);
+ assertResponse(DELETE, "/application/v2/tenant/a",
+ "{\"message\":\"Tenant a deleted.\"}");
assertNull(tenantRepository.getTenant(a));
}
@Test
- public void testDeleteTenantWithActiveApplications() {
- putSync(HttpRequest.createTestRequest("http://deploy.example.yahoo.com:80/application/v2/tenant/" + a, Method.PUT));
- Tenant tenant = tenantRepository.getTenant(a);
- assertEquals(a, tenant.getName());
-
+ public void testDeleteTenantWithActiveApplications() throws IOException {
+ tenantRepository.addTenant(a);
ApplicationId applicationId = ApplicationId.from(a, ApplicationName.from("foo"), InstanceName.defaultName());
applicationRepository.deploy(testApp, new PrepareParams.Builder().applicationId(applicationId).build());
- try {
- handler.handleDELETE(HttpRequest.createTestRequest("http://deploy.example.yahoo.com:80/application/v2/tenant/" + a, Method.DELETE));
- fail();
- } catch (IllegalArgumentException e) {
- assertThat(e.getMessage(), is("Cannot delete tenant 'a', it has active applications: [a.foo]"));
- }
+ assertResponse(DELETE, "/application/v2/tenant/a",
+ "{\"error-code\":\"BAD_REQUEST\",\"message\":\"Cannot delete tenant 'a', it has active applications: [a.foo]\"}");
}
- @Test(expected=NotFoundException.class)
- public void testDeleteNonExisting() {
- handler.handleDELETE(HttpRequest.createTestRequest("http://deploy.example.yahoo.com:80/application/v2/tenant/x", Method.DELETE));
- }
-
- @Test(expected=BadRequestException.class)
- public void testIllegalNameSlashes() {
- putSync(HttpRequest.createTestRequest("http://deploy.example.yahoo.com:80/application/v2/tenant/a/b", Method.PUT));
+ @Test
+ public void testDeleteNonExisting() throws IOException {
+ assertResponse(DELETE, "/application/v2/tenant/a",
+ "{\"error-code\":\"NOT_FOUND\",\"message\":\"Tenant 'a' was not found.\"}");
}
- private TenantCreateResponse putSync(HttpRequest testRequest) {
- return (TenantCreateResponse) handler.handlePUT(testRequest);
+ @Test
+ public void testIllegalNameSlashes() throws IOException {
+ assertResponse(PUT, "/application/v2/tenant/a/b",
+ "{\"error-code\":\"NOT_FOUND\",\"message\":\"Nothing at '/application/v2/tenant/a/b'\"}");
}
- private void assertResponseEquals(HttpResponse response, String payload) throws IOException {
+ private void assertResponse(Method method, String path, String payload) throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
- response.render(baos);
- assertEquals(baos.toString(StandardCharsets.UTF_8), payload);
+ testDriver.executeRequest(HttpRequestBuilder.create(method, path).build())
+ .render(baos);
+ assertEquals(payload, baos.toString(StandardCharsets.UTF_8));
}
}
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetrieverTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetrieverTest.java
index 5aa3e196222..7fdfbcdbf03 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetrieverTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetrieverTest.java
@@ -13,6 +13,7 @@ import java.nio.file.Path;
import java.util.List;
import java.util.Map;
import java.util.Optional;
+import java.util.Set;
import java.util.function.BiConsumer;
import java.util.stream.Collectors;
import java.util.stream.Stream;
@@ -35,7 +36,7 @@ public class ClusterDeploymentMetricsRetrieverTest {
@Test
public void testMetricAggregation() throws IOException {
- List<URI> hosts = Stream.of(1, 2, 3)
+ List<URI> hosts = Stream.of(1, 2, 3, 4)
.map(item -> URI.create("http://localhost:" + wireMock.port() + "/" + item))
.collect(Collectors.toList());
@@ -54,13 +55,22 @@ public class ClusterDeploymentMetricsRetrieverTest {
.withStatus(200)
.withBody(containerMetrics())));
+ stubFor(get(urlEqualTo("/4"))
+ .willReturn(aResponse()
+ .withStatus(200)
+ .withBody(clustercontrollerMetrics())));
+
ClusterInfo expectedContentCluster = new ClusterInfo("content_cluster_id", "content");
ClusterInfo expectedContainerCluster = new ClusterInfo("container_cluster_id", "container");
Map<ClusterInfo, DeploymentMetricsAggregator> aggregatorMap = new ClusterDeploymentMetricsRetriever().requestMetricsGroupedByCluster(hosts);
+ assertEquals(Set.of(expectedContainerCluster, expectedContentCluster), aggregatorMap.keySet());
compareAggregators(
- new DeploymentMetricsAggregator().addDocumentCount(6000.0).addFeedingBlocked(0),
+ new DeploymentMetricsAggregator()
+ .addDocumentCount(6000.0)
+ .addMemoryUsage(0.89074, 0.8)
+ .addDiskUsage(0.83517, 0.75),
aggregatorMap.get(expectedContentCluster)
);
@@ -84,6 +94,10 @@ public class ClusterDeploymentMetricsRetrieverTest {
return Files.readString(Path.of("src/test/resources/metrics/content_metrics.json"));
}
+ private String clustercontrollerMetrics() throws IOException {
+ return Files.readString(Path.of("src/test/resources/metrics/clustercontroller_metrics.json"));
+ }
+
// Same tolerance value as used internally in MetricsAggregator.isZero
private static final double metricsTolerance = 0.001;
@@ -95,7 +109,10 @@ public class ClusterDeploymentMetricsRetrieverTest {
compareOptionals(expected.aggregateFeedRate(), actual.aggregateFeedRate(), assertDoubles);
compareOptionals(expected.aggregateQueryLatency(), actual.aggregateQueryLatency(), assertDoubles);
compareOptionals(expected.aggregateFeedLatency(), actual.aggregateFeedLatency(), assertDoubles);
- assertEquals(expected.feedingBlocked(), actual.feedingBlocked());
+ compareOptionals(expected.diskUsage(), actual.diskUsage(), (a, b) -> assertDoubles.accept(a.util(), b.util()));
+ compareOptionals(expected.diskUsage(), actual.diskUsage(), (a, b) -> assertDoubles.accept(a.feedBlockLimit(), b.feedBlockLimit()));
+ compareOptionals(expected.memoryUsage(), actual.memoryUsage(), (a, b) -> assertDoubles.accept(a.util(), b.util()));
+ compareOptionals(expected.memoryUsage(), actual.memoryUsage(), (a, b) -> assertDoubles.accept(a.feedBlockLimit(), b.feedBlockLimit()));
}
@SuppressWarnings("OptionalUsedAsFieldOrParameterType")
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/rpc/ConfigResponseFactoryTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/rpc/ConfigResponseFactoryTest.java
index 5d8b8c92472..747a0ad3241 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/rpc/ConfigResponseFactoryTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/rpc/ConfigResponseFactoryTest.java
@@ -16,7 +16,7 @@ public class ConfigResponseFactoryTest {
@Test
public void testUncompressedFactory() {
UncompressedConfigResponseFactory responseFactory = new UncompressedConfigResponseFactory();
- ConfigResponse response = responseFactory.createResponse(ConfigPayload.empty(), 3, false);
+ ConfigResponse response = responseFactory.createResponse(ConfigPayload.empty().toUtf8Array(true), 3, false);
assertEquals(CompressionType.UNCOMPRESSED, response.getCompressionInfo().getCompressionType());
assertEquals(3L,response.getGeneration());
assertEquals(2, response.getPayload().getByteLength());
@@ -25,7 +25,7 @@ public class ConfigResponseFactoryTest {
@Test
public void testLZ4CompressedFactory() {
LZ4ConfigResponseFactory responseFactory = new LZ4ConfigResponseFactory();
- ConfigResponse response = responseFactory.createResponse(ConfigPayload.empty(), 3, false);
+ ConfigResponse response = responseFactory.createResponse(ConfigPayload.empty().toUtf8Array(true), 3, false);
assertEquals(CompressionType.LZ4, response.getCompressionInfo().getCompressionType());
assertEquals(3L, response.getGeneration());
assertEquals(3, response.getPayload().getByteLength());
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/session/PrepareParamsTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/session/PrepareParamsTest.java
index 941f2726b0e..f50238f2b85 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/session/PrepareParamsTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/session/PrepareParamsTest.java
@@ -3,16 +3,32 @@ package com.yahoo.vespa.config.server.session;
import com.yahoo.config.model.api.ApplicationRoles;
import com.yahoo.config.model.api.ContainerEndpoint;
+import com.yahoo.config.model.api.EndpointCertificateMetadata;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.TenantName;
import com.yahoo.container.jdisc.HttpRequest;
+import com.yahoo.slime.ArrayInserter;
+import com.yahoo.slime.Cursor;
+import com.yahoo.slime.Injector;
+import com.yahoo.slime.Inspector;
+import com.yahoo.slime.ObjectInserter;
+import com.yahoo.slime.ObjectSymbolInserter;
+import com.yahoo.slime.Slime;
+import com.yahoo.slime.SlimeInserter;
+import com.yahoo.slime.SlimeUtils;
+import com.yahoo.vespa.config.server.tenant.ContainerEndpointSerializer;
+import com.yahoo.vespa.config.server.tenant.EndpointCertificateMetadataSerializer;
import org.junit.Test;
+import java.io.IOException;
import java.net.URLEncoder;
import java.nio.charset.StandardCharsets;
import java.time.Duration;
import java.util.List;
+import java.util.Map;
+import java.util.Objects;
import java.util.Optional;
+import java.util.stream.Collectors;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertEquals;
@@ -26,13 +42,22 @@ import static org.junit.Assert.assertTrue;
public class PrepareParamsTest {
private static final String vespaVersion = "6.37.49";
- private static final String request = "http://foo:19071/application/v2/tenant/foo/application/bar?" +
+ private static final String baseRequest = "http://foo:19071/application/v2/tenant/foo/application/bar";
+ private static final String request = baseRequest + "?" +
PrepareParams.DRY_RUN_PARAM_NAME + "=true&" +
PrepareParams.VERBOSE_PARAM_NAME+ "=true&" +
PrepareParams.IGNORE_VALIDATION_PARAM_NAME + "=false&" +
PrepareParams.APPLICATION_NAME_PARAM_NAME + "=baz&" +
PrepareParams.VESPA_VERSION_PARAM_NAME + "=" + vespaVersion;
+ private static final String json = "{\n" +
+ "\"" + PrepareParams.DRY_RUN_PARAM_NAME + "\": true,\n" +
+ "\"" + PrepareParams.VERBOSE_PARAM_NAME+ "\": true,\n" +
+ "\"" + PrepareParams.IGNORE_VALIDATION_PARAM_NAME + "\": false,\n" +
+ "\"" + PrepareParams.APPLICATION_NAME_PARAM_NAME + "\":\"baz\",\n" +
+ "\"" + PrepareParams.VESPA_VERSION_PARAM_NAME + "\":\"" + vespaVersion + "\"\n" +
+ "}";
+
@Test
public void testCorrectParsing() {
PrepareParams prepareParams = createParams("http://foo:19071/application/v2/", TenantName.defaultName());
@@ -47,7 +72,7 @@ public class PrepareParamsTest {
}
@Test
- public void testCorrectParsingWithContainerEndpoints() {
+ public void testCorrectParsingWithContainerEndpoints() throws IOException {
var endpoints = List.of(new ContainerEndpoint("qrs1",
List.of("c1.example.com",
"c2.example.com")),
@@ -69,10 +94,16 @@ public class PrepareParamsTest {
var prepareParams = createParams(request + "&" + PrepareParams.CONTAINER_ENDPOINTS_PARAM_NAME +
"=" + encoded, TenantName.from("foo"));
assertEquals(endpoints, prepareParams.containerEndpoints());
+
+ // Verify using json object
+ var slime = SlimeUtils.jsonToSlime(json);
+ new Injector().inject(ContainerEndpointSerializer.endpointListToSlime(endpoints).get(), new ObjectInserter(slime.get(), PrepareParams.CONTAINER_ENDPOINTS_PARAM_NAME));
+ PrepareParams prepareParamsJson = PrepareParams.fromJson(SlimeUtils.toJsonBytes(slime), TenantName.from("foo"), Duration.ofSeconds(60));
+ assertPrepareParamsEqual(prepareParams, prepareParamsJson);
}
@Test
- public void testCorrectParsingWithApplicationRoles() {
+ public void testCorrectParsingWithApplicationRoles() throws IOException {
String req = request + "&" +
PrepareParams.APPLICATION_HOST_ROLE + "=hostRole&" +
PrepareParams.APPLICATION_CONTAINER_ROLE + "=containerRole";
@@ -82,15 +113,89 @@ public class PrepareParamsTest {
assertTrue(applicationRoles.isPresent());
assertEquals("hostRole", applicationRoles.get().applicationHostRole());
assertEquals("containerRole", applicationRoles.get().applicationContainerRole());
+
+ // Verify using json object
+ var slime = SlimeUtils.jsonToSlime(json);
+ var cursor = slime.get();
+ cursor.setString(PrepareParams.APPLICATION_HOST_ROLE, "hostRole");
+ cursor.setString(PrepareParams.APPLICATION_CONTAINER_ROLE, "containerRole");
+
+ PrepareParams prepareParamsJson = PrepareParams.fromJson(SlimeUtils.toJsonBytes(slime), TenantName.from("foo"), Duration.ofSeconds(60));
+ assertPrepareParamsEqual(prepareParams, prepareParamsJson);
}
@Test
- public void testQuotaParsing() {
+ public void testQuotaParsing() throws IOException {
var quotaParam = "{\"clusterSize\": 23, \"budget\": 23232323}";
var quotaEncoded = URLEncoder.encode(quotaParam, StandardCharsets.UTF_8);
var prepareParams = createParams(request + "&" + PrepareParams.QUOTA_PARAM_NAME + "=" + quotaEncoded, TenantName.from("foo"));
assertEquals(23, (int) prepareParams.quota().get().maxClusterSize().get());
assertEquals(23232323, (int) prepareParams.quota().get().budget().get());
+
+ // Verify using json object
+ var slime = SlimeUtils.jsonToSlime(json);
+ new Injector().inject(SlimeUtils.jsonToSlime(quotaParam).get(), new ObjectInserter(slime.get(), PrepareParams.QUOTA_PARAM_NAME));
+ PrepareParams prepareParamsJson = PrepareParams.fromJson(SlimeUtils.toJsonBytes(slime), TenantName.from("foo"), Duration.ofSeconds(60));
+ assertPrepareParamsEqual(prepareParams, prepareParamsJson);
+ }
+
+ @Test
+ public void testEndpointCertificateParsing() throws IOException {
+ var certMeta = new EndpointCertificateMetadata("key", "cert", 3);
+ var slime = new Slime();
+ EndpointCertificateMetadataSerializer.toSlime(certMeta, slime.setObject());
+ String encoded = URLEncoder.encode(new String(SlimeUtils.toJsonBytes(slime), StandardCharsets.UTF_8), StandardCharsets.UTF_8);
+ var prepareParams = createParams(request + "&" + PrepareParams.ENDPOINT_CERTIFICATE_METADATA_PARAM_NAME + "=" + encoded, TenantName.from("foo"));
+ assertTrue(prepareParams.endpointCertificateMetadata().isPresent());
+ assertEquals("key", prepareParams.endpointCertificateMetadata().get().keyName());
+ assertEquals("cert", prepareParams.endpointCertificateMetadata().get().certName());
+ assertEquals(3, prepareParams.endpointCertificateMetadata().get().version());
+
+ // Verify using json object
+ var root = SlimeUtils.jsonToSlime(json);
+ new Injector().inject(slime.get(), new ObjectInserter(root.get(), PrepareParams.ENDPOINT_CERTIFICATE_METADATA_PARAM_NAME));
+ PrepareParams prepareParamsJson = PrepareParams.fromJson(SlimeUtils.toJsonBytes(root), TenantName.from("foo"), Duration.ofSeconds(60));
+ assertPrepareParamsEqual(prepareParams, prepareParamsJson);
+ }
+
+ @Test
+ public void compareEmptyUrlparamsVsJson() {
+ TenantName tenantName = TenantName.from("foo");
+ Duration barrierTimeout = Duration.ofSeconds(60);
+ HttpRequest httpRequest = HttpRequest.createTestRequest(baseRequest, com.yahoo.jdisc.http.HttpRequest.Method.POST);
+ PrepareParams urlPrepareParams = PrepareParams.fromHttpRequest(httpRequest, tenantName, barrierTimeout);
+ PrepareParams jsonPrepareParams = PrepareParams.fromJson("{}".getBytes(StandardCharsets.UTF_8), tenantName, barrierTimeout);
+
+ assertPrepareParamsEqual(urlPrepareParams, jsonPrepareParams);
+ }
+
+ @Test
+ public void compareStandardUrlparamsVsJson() {
+ TenantName tenantName = TenantName.from("foo");
+ Duration barrierTimeout = Duration.ofSeconds(60);
+ HttpRequest httpRequest = HttpRequest.createTestRequest(request, com.yahoo.jdisc.http.HttpRequest.Method.POST);
+ PrepareParams urlPrepareParams = PrepareParams.fromHttpRequest(httpRequest, tenantName, barrierTimeout);
+ PrepareParams jsonPrepareParams = PrepareParams.fromJson(json.getBytes(StandardCharsets.UTF_8), tenantName, barrierTimeout);
+ assertPrepareParamsEqual(urlPrepareParams, jsonPrepareParams);
+ }
+
+ private void assertPrepareParamsEqual(PrepareParams urlParams, PrepareParams jsonParams) {
+ assertEquals(urlParams.ignoreValidationErrors(), jsonParams.ignoreValidationErrors());
+ assertEquals(urlParams.isDryRun(), jsonParams.isDryRun());
+ assertEquals(urlParams.isVerbose(), jsonParams.isVerbose());
+ assertEquals(urlParams.isBootstrap(), jsonParams.isBootstrap());
+ assertEquals(urlParams.force(), jsonParams.force());
+ assertEquals(urlParams.waitForResourcesInPrepare(), jsonParams.waitForResourcesInPrepare());
+ assertEquals(urlParams.getApplicationId(), jsonParams.getApplicationId());
+ assertEquals(urlParams.getTimeoutBudget().timeout(), jsonParams.getTimeoutBudget().timeout());
+ assertEquals(urlParams.vespaVersion(), jsonParams.vespaVersion());
+ assertEquals(urlParams.containerEndpoints(), jsonParams.containerEndpoints());
+ assertEquals(urlParams.endpointCertificateMetadata(), jsonParams.endpointCertificateMetadata());
+ assertEquals(urlParams.dockerImageRepository(), jsonParams.dockerImageRepository());
+ assertEquals(urlParams.athenzDomain(), jsonParams.athenzDomain());
+ assertEquals(urlParams.applicationRoles(), jsonParams.applicationRoles());
+ assertEquals(urlParams.quota(), jsonParams.quota());
+ assertEquals(urlParams.tenantSecretStores(), jsonParams.tenantSecretStores());
}
// Create PrepareParams from a request (based on uri and tenant name)
diff --git a/configserver/src/test/resources/metrics/clustercontroller_metrics.json b/configserver/src/test/resources/metrics/clustercontroller_metrics.json
new file mode 100644
index 00000000000..9afcb34d77d
--- /dev/null
+++ b/configserver/src/test/resources/metrics/clustercontroller_metrics.json
@@ -0,0 +1,33 @@
+{
+ "services": [
+ {
+ "name": "vespa.container-clustercontroller",
+ "timestamp": 1619529109,
+ "metrics": [
+ {
+ "values": {
+ "cluster-controller.resource_usage.disk_limit.last": 0.75,
+ "cluster-controller.resource_usage.nodes_above_limit.last": 0,
+ "cluster-controller.resource_usage.max_memory_utilization.last": 0.8907474348626,
+ "cluster-controller.resource_usage.max_disk_utilization.last": 0.8351705494609,
+ "cluster-controller.cluster-state-change.count": 2,
+ "cluster-controller.resource_usage.memory_limit.last": 0.8
+ },
+ "dimensions": {
+ "clustertype": "content",
+ "clusterid": "content_cluster_id"
+ }
+ },
+ {
+ "values": {
+ "some.other.metrics": 1
+ },
+ "dimensions": {
+ "clustertype": "admin",
+ "clusterid": "cluster-controllers"
+ }
+ }
+ ]
+ }
+ ]
+}
diff --git a/configutil/src/apps/configstatus/main.cpp b/configutil/src/apps/configstatus/main.cpp
index befd0e649f6..218844a2a90 100644
--- a/configutil/src/apps/configstatus/main.cpp
+++ b/configutil/src/apps/configstatus/main.cpp
@@ -33,7 +33,7 @@ Application::Application()
Application::~Application() { }
int Application::parseOpts() {
- char c = '?';
+ int c = '?';
const char *optArg = NULL;
int optInd = 0;
while ((c = GetOpt("c:s:vC:f:", optArg, optInd)) != -1) {
diff --git a/configutil/src/apps/modelinspect/main.cpp b/configutil/src/apps/modelinspect/main.cpp
index c43294be8de..9125cbff379 100644
--- a/configutil/src/apps/modelinspect/main.cpp
+++ b/configutil/src/apps/modelinspect/main.cpp
@@ -32,7 +32,7 @@ Application::~Application() { }
int
Application::parseOpts()
{
- char c = '?';
+ int c = '?';
const char *optArg = NULL;
int optInd = 0;
while ((c = GetOpt("hvut:c:C:", optArg, optInd)) != -1) {
diff --git a/container-core/abi-spec.json b/container-core/abi-spec.json
index 5f9185ebef6..efe6701342f 100644
--- a/container-core/abi-spec.json
+++ b/container-core/abi-spec.json
@@ -388,11 +388,7 @@
"public long getQueryStartTime()",
"public long getTimeout()"
],
- "fields": [
- "protected long summaryStartTime",
- "protected long queryStartTime",
- "protected long timeout"
- ]
+ "fields": []
},
"com.yahoo.container.handler.VipStatus": {
"superClass": "java.lang.Object",
@@ -1430,6 +1426,18 @@
],
"fields": []
},
+ "com.yahoo.jdisc.http.CookieHelper": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public static java.util.List decodeSetCookieHeader(com.yahoo.jdisc.HeaderFields)",
+ "public static void encodeSetCookieHeader(com.yahoo.jdisc.HeaderFields, java.util.List)"
+ ],
+ "fields": []
+ },
"com.yahoo.jdisc.http.HttpHeaders$Names": {
"superClass": "java.lang.Object",
"interfaces": [],
@@ -2294,25 +2302,6 @@
],
"fields": []
},
- "com.yahoo.jdisc.http.filter.JdiscFilterResponse": {
- "superClass": "com.yahoo.jdisc.http.filter.DiscFilterResponse",
- "interfaces": [],
- "attributes": [
- "public"
- ],
- "methods": [
- "public void <init>(com.yahoo.jdisc.http.HttpResponse)",
- "public void setStatus(int)",
- "public void setHeader(java.lang.String, java.lang.String)",
- "public void removeHeaders(java.lang.String)",
- "public void setHeaders(java.lang.String, java.lang.String)",
- "public void setHeaders(java.lang.String, java.util.List)",
- "public void addHeader(java.lang.String, java.lang.String)",
- "public java.lang.String getHeader(java.lang.String)",
- "public void setCookies(java.util.List)"
- ],
- "fields": []
- },
"com.yahoo.jdisc.http.filter.RequestFilter": {
"superClass": "java.lang.Object",
"interfaces": [
diff --git a/container-core/src/main/java/com/yahoo/container/handler/Timing.java b/container-core/src/main/java/com/yahoo/container/handler/Timing.java
index 0026854ce61..a6322289074 100644
--- a/container-core/src/main/java/com/yahoo/container/handler/Timing.java
+++ b/container-core/src/main/java/com/yahoo/container/handler/Timing.java
@@ -17,11 +17,11 @@ package com.yahoo.container.handler;
*/
public class Timing {
- protected long summaryStartTime;
+ private final long summaryStartTime;
- protected long queryStartTime;
+ private final long queryStartTime;
- protected long timeout;
+ private final long timeout;
/**
* Do consider using
diff --git a/container-core/src/main/java/com/yahoo/container/jdisc/HttpRequest.java b/container-core/src/main/java/com/yahoo/container/jdisc/HttpRequest.java
index e202442479f..96a7902a076 100644
--- a/container-core/src/main/java/com/yahoo/container/jdisc/HttpRequest.java
+++ b/container-core/src/main/java/com/yahoo/container/jdisc/HttpRequest.java
@@ -573,7 +573,7 @@ public class HttpRequest {
@Override
public long currentTimeMillis() {
- return 0;
+ return System.currentTimeMillis();
}
};
}
diff --git a/container-core/src/main/java/com/yahoo/container/jdisc/LoggingRequestHandler.java b/container-core/src/main/java/com/yahoo/container/jdisc/LoggingRequestHandler.java
index 0d20fc05586..222df8fb266 100644
--- a/container-core/src/main/java/com/yahoo/container/jdisc/LoggingRequestHandler.java
+++ b/container-core/src/main/java/com/yahoo/container/jdisc/LoggingRequestHandler.java
@@ -134,7 +134,10 @@ public abstract class LoggingRequestHandler extends ThreadedHttpRequestHandler {
long summaryStartTime = 0;
if (t != null) {
timeoutInterval = t.getTimeout();
- requestOverhead = t.getQueryStartTime() - startTime;
+ long queryStartTime = t.getQueryStartTime();
+ if (queryStartTime > 0) {
+ requestOverhead = queryStartTime - startTime;
+ }
summaryStartTime = t.getSummaryStartTime();
}
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/CookieHelper.java b/container-core/src/main/java/com/yahoo/jdisc/http/CookieHelper.java
new file mode 100644
index 00000000000..897c18d1129
--- /dev/null
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/CookieHelper.java
@@ -0,0 +1,38 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.jdisc.http;
+
+import com.yahoo.jdisc.HeaderFields;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.LinkedList;
+import java.util.List;
+
+/**
+ * Helper for encoding/decoding cookies on request/response.
+ *
+ * @author bjorncs
+ */
+public class CookieHelper {
+
+ private CookieHelper() {}
+
+ public static List<Cookie> decodeSetCookieHeader(HeaderFields headers) {
+ List<String> cookies = headers.get(HttpHeaders.Names.SET_COOKIE);
+ if (cookies == null) {
+ return Collections.emptyList();
+ }
+ List<Cookie> ret = new LinkedList<>();
+ for (String cookie : cookies) {
+ ret.add(Cookie.fromSetCookieHeader(cookie));
+ }
+ return ret;
+ }
+
+ public static void encodeSetCookieHeader(HeaderFields headers, List<Cookie> cookies) {
+ headers.remove(HttpHeaders.Names.SET_COOKIE);
+ for (Cookie cookie : cookies) {
+ headers.add(HttpHeaders.Names.SET_COOKIE, Cookie.toSetCookieHeaders(Arrays.asList(cookie)));
+ }
+ }
+}
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/HttpResponse.java b/container-core/src/main/java/com/yahoo/jdisc/http/HttpResponse.java
index f7138ba0e2b..2e2553e421a 100644
--- a/container-core/src/main/java/com/yahoo/jdisc/http/HttpResponse.java
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/HttpResponse.java
@@ -9,9 +9,6 @@ import com.yahoo.jdisc.handler.ContentChannel;
import com.yahoo.jdisc.handler.ResponseHandler;
import com.yahoo.jdisc.http.servlet.ServletOrJdiscHttpResponse;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.LinkedList;
import java.util.List;
/**
@@ -62,23 +59,13 @@ public class HttpResponse extends Response implements ServletOrJdiscHttpResponse
target.addAll(headers());
}
+ @Override
public List<Cookie> decodeSetCookieHeader() {
- List<String> cookies = headers().get(HttpHeaders.Names.SET_COOKIE);
- if (cookies == null) {
- return Collections.emptyList();
- }
- List<Cookie> ret = new LinkedList<>();
- for (String cookie : cookies) {
- ret.add(Cookie.fromSetCookieHeader(cookie));
- }
- return ret;
+ return CookieHelper.decodeSetCookieHeader(headers());
}
public void encodeSetCookieHeader(List<Cookie> cookies) {
- headers().remove(HttpHeaders.Names.SET_COOKIE);
- for (Cookie cookie : cookies) {
- headers().add(HttpHeaders.Names.SET_COOKIE, Cookie.toSetCookieHeaders(Arrays.asList(cookie)));
- }
+ CookieHelper.encodeSetCookieHeader(headers(), cookies);
}
/**
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/filter/JdiscFilterResponse.java b/container-core/src/main/java/com/yahoo/jdisc/http/filter/JdiscFilterResponse.java
index ff81359f93c..d723e2d4e5c 100644
--- a/container-core/src/main/java/com/yahoo/jdisc/http/filter/JdiscFilterResponse.java
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/filter/JdiscFilterResponse.java
@@ -1,22 +1,26 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.jdisc.http.filter;
+import com.yahoo.jdisc.HeaderFields;
+import com.yahoo.jdisc.Response;
import com.yahoo.jdisc.http.Cookie;
+import com.yahoo.jdisc.http.CookieHelper;
import com.yahoo.jdisc.http.HttpResponse;
+import com.yahoo.jdisc.http.servlet.ServletOrJdiscHttpResponse;
import java.util.List;
+import java.util.Map;
/**
* JDisc implementation of a filter request.
- *
- * @since 5.27
*/
-public class JdiscFilterResponse extends DiscFilterResponse {
+class JdiscFilterResponse extends DiscFilterResponse {
- private final HttpResponse parent;
+ private final Response parent;
- public JdiscFilterResponse(HttpResponse parent) {
- super(parent);
+ JdiscFilterResponse(Response parent) {
+ // A separate adapter is required as DiscFilterResponse will invoke methods from ServletOrJdiscHttpResponse parameter in its constructor
+ super(parent instanceof HttpResponse ? (HttpResponse)parent : new Adapter(parent));
this.parent = parent;
}
@@ -61,7 +65,20 @@ public class JdiscFilterResponse extends DiscFilterResponse {
@Override
public void setCookies(List<Cookie> cookies) {
- parent.encodeSetCookieHeader(cookies);
+ CookieHelper.encodeSetCookieHeader(parent.headers(), cookies);
+ }
+
+ private static class Adapter implements ServletOrJdiscHttpResponse {
+ private final Response response;
+
+ Adapter(Response response) {
+ this.response = response;
+ }
+
+ @Override public void copyHeaders(HeaderFields target) { target.addAll(response.headers()); }
+ @Override public int getStatus() { return response.getStatus(); }
+ @Override public Map<String, Object> context() { return response.context(); }
+ @Override public List<Cookie> decodeSetCookieHeader() { return CookieHelper.decodeSetCookieHeader(response.headers()); }
}
}
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/filter/SecurityResponseFilterChain.java b/container-core/src/main/java/com/yahoo/jdisc/http/filter/SecurityResponseFilterChain.java
index d45b406a375..ad0fb75ebff 100644
--- a/container-core/src/main/java/com/yahoo/jdisc/http/filter/SecurityResponseFilterChain.java
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/filter/SecurityResponseFilterChain.java
@@ -1,6 +1,11 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.jdisc.http.filter;
+import com.yahoo.jdisc.AbstractResource;
+import com.yahoo.jdisc.Request;
+import com.yahoo.jdisc.Response;
+import com.yahoo.jdisc.http.HttpRequest;
+
import java.net.URI;
import java.util.ArrayList;
import java.util.Arrays;
@@ -8,16 +13,10 @@ import java.util.Collections;
import java.util.List;
import java.util.Optional;
-import com.yahoo.jdisc.AbstractResource;
-import com.yahoo.jdisc.Request;
-import com.yahoo.jdisc.Response;
-import com.yahoo.jdisc.http.HttpRequest;
-import com.yahoo.jdisc.http.HttpResponse;
-
/**
* Implementation of TypedFilterChain for DiscFilterResponse
- * @author tejalk
*
+ * @author Tejal Knot
*/
public class SecurityResponseFilterChain extends AbstractResource implements ResponseFilter {
@@ -31,12 +30,9 @@ public class SecurityResponseFilterChain extends AbstractResource implements Res
@Override
public void filter(Response response, Request request) {
- if(response instanceof HttpResponse) {
- DiscFilterResponse discFilterResponse = new JdiscFilterResponse((HttpResponse)response);
- RequestView requestView = new RequestViewImpl(request);
- filter(requestView, discFilterResponse);
- }
-
+ DiscFilterResponse discFilterResponse = new JdiscFilterResponse(response);
+ RequestView requestView = new RequestViewImpl(request);
+ filter(requestView, discFilterResponse);
}
public void filter(RequestView requestView, DiscFilterResponse response) {
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ConnectionMetricAggregator.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ConnectionMetricAggregator.java
new file mode 100644
index 00000000000..a92cbf264a4
--- /dev/null
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ConnectionMetricAggregator.java
@@ -0,0 +1,65 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.jdisc.http.server.jetty;
+
+import com.yahoo.jdisc.Metric;
+import com.yahoo.jdisc.http.ServerConfig;
+import org.eclipse.jetty.io.Connection;
+import org.eclipse.jetty.server.HttpChannel;
+import org.eclipse.jetty.server.Request;
+import org.eclipse.jetty.util.component.AbstractLifeCycle;
+
+import java.util.Collection;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static com.yahoo.jdisc.http.server.jetty.RequestUtils.getConnector;
+import static com.yahoo.jdisc.http.server.jetty.RequestUtils.isHttpServerConnection;
+
+/**
+ * @author bjorncs
+ */
+class ConnectionMetricAggregator extends AbstractLifeCycle implements Connection.Listener, HttpChannel.Listener{
+
+ private final SimpleConcurrentIdentityHashMap<Connection, ConnectionMetrics> connectionsMetrics = new SimpleConcurrentIdentityHashMap<>();
+
+ private final Metric metricAggregator;
+ private final Collection<String> monitoringHandlerPaths;
+
+ ConnectionMetricAggregator(ServerConfig serverConfig, Metric metricAggregator) {
+ this.monitoringHandlerPaths = serverConfig.metric().monitoringHandlerPaths();
+ this.metricAggregator = metricAggregator;
+ }
+
+ @Override public void onOpened(Connection connection) {}
+
+ @Override
+ public void onClosed(Connection connection) {
+ if (isHttpServerConnection(connection)) {
+ connectionsMetrics.remove(connection).ifPresent(metrics ->
+ metricAggregator.set(MetricDefinitions.REQUESTS_PER_CONNECTION, metrics.requests.get(), metrics.metricContext));
+ }
+ }
+
+ @Override
+ public void onRequestBegin(Request request) {
+ if (monitoringHandlerPaths.stream()
+ .anyMatch(pathPrefix -> request.getRequestURI().startsWith(pathPrefix))){
+ return;
+ }
+ Connection connection = request.getHttpChannel().getConnection();
+ if (isHttpServerConnection(connection)) {
+ ConnectionMetrics metrics = this.connectionsMetrics.computeIfAbsent(
+ connection,
+ () -> new ConnectionMetrics(getConnector(request).getConnectorMetricContext()));
+ metrics.requests.incrementAndGet();
+ }
+ }
+
+ private static class ConnectionMetrics {
+ final AtomicLong requests = new AtomicLong();
+ final Metric.Context metricContext;
+
+ ConnectionMetrics(Metric.Context metricContext) {
+ this.metricContext = metricContext;
+ }
+ }
+}
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ConnectorFactory.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ConnectorFactory.java
index 71c0b3a0225..0d3ab1ce32d 100644
--- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ConnectorFactory.java
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ConnectorFactory.java
@@ -20,7 +20,6 @@ import org.eclipse.jetty.server.ServerConnector;
import org.eclipse.jetty.server.SslConnectionFactory;
import org.eclipse.jetty.util.ssl.SslContextFactory;
-import java.util.Collection;
import java.util.List;
/**
@@ -67,9 +66,11 @@ public class ConnectorFactory {
return connectorConfig;
}
- public ServerConnector createConnector(final Metric metric, final Server server, JettyConnectionLogger connectionLogger) {
+ public ServerConnector createConnector(final Metric metric, final Server server, JettyConnectionLogger connectionLogger,
+ ConnectionMetricAggregator connectionMetricAggregator) {
ServerConnector connector = new JDiscServerConnector(
- connectorConfig, metric, server, connectionLogger, createConnectionFactories(metric).toArray(ConnectionFactory[]::new));
+ connectorConfig, metric, server, connectionLogger, connectionMetricAggregator,
+ createConnectionFactories(metric).toArray(ConnectionFactory[]::new));
connector.setPort(connectorConfig.listenPort());
connector.setName(connectorConfig.name());
connector.setAcceptQueueSize(connectorConfig.acceptQueueSize());
@@ -103,7 +104,7 @@ public class ConnectorFactory {
HttpConnectionFactory http1Factory = newHttp1ConnectionFactory();
if (connectorConfig.http2Enabled()) {
HTTP2ServerConnectionFactory http2Factory = newHttp2ConnectionFactory();
- ALPNServerConnectionFactory alpnFactory = newAlpnConnectionFactory(List.of(http1Factory, http2Factory), http1Factory);
+ ALPNServerConnectionFactory alpnFactory = newAlpnConnectionFactory();
SslConnectionFactory sslFactory = newSslConnectionFactory(metric, alpnFactory);
if (proxyProtocolConfig.enabled()) {
ProxyConnectionFactory proxyProtocolFactory = newProxyProtocolConnectionFactory(sslFactory);
@@ -160,7 +161,9 @@ public class ConnectorFactory {
}
private HTTP2ServerConnectionFactory newHttp2ConnectionFactory() {
- return new HTTP2ServerConnectionFactory(newHttpConfiguration());
+ HTTP2ServerConnectionFactory factory = new HTTP2ServerConnectionFactory(newHttpConfiguration());
+ factory.setMaxConcurrentStreams(4096);
+ return factory;
}
private SslConnectionFactory newSslConnectionFactory(Metric metric, ConnectionFactory wrappedFactory) {
@@ -170,11 +173,9 @@ public class ConnectorFactory {
return connectionFactory;
}
- private ALPNServerConnectionFactory newAlpnConnectionFactory(Collection<ConnectionFactory> alternatives,
- ConnectionFactory defaultFactory) {
- String[] protocols = alternatives.stream().map(ConnectionFactory::getProtocol).toArray(String[]::new);
- ALPNServerConnectionFactory factory = new ALPNServerConnectionFactory(protocols);
- factory.setDefaultProtocol(defaultFactory.getProtocol());
+ private ALPNServerConnectionFactory newAlpnConnectionFactory() {
+ ALPNServerConnectionFactory factory = new ALPNServerConnectionFactory("h2", "http/1.1");
+ factory.setDefaultProtocol("http/1.1");
return factory;
}
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/HttpResponseStatisticsCollector.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/HttpResponseStatisticsCollector.java
index 82c445c7ca9..ac4e743784b 100644
--- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/HttpResponseStatisticsCollector.java
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/HttpResponseStatisticsCollector.java
@@ -49,6 +49,10 @@ public class HttpResponseStatisticsCollector extends HandlerWrapper implements G
HTTP, HTTPS, OTHER
}
+ public enum HttpProtocol {
+ HTTP1, HTTP2, OTHER
+ }
+
private static final String[] HTTP_RESPONSE_GROUPS = {
MetricDefinitions.RESPONSES_1XX,
MetricDefinitions.RESPONSES_2XX,
@@ -60,19 +64,21 @@ public class HttpResponseStatisticsCollector extends HandlerWrapper implements G
};
private final AtomicLong inFlight = new AtomicLong();
- private final LongAdder[][][][] statistics;
+ private final LongAdder[][][][][] statistics; // TODO Rewrite me to a smarter data structure
public HttpResponseStatisticsCollector(List<String> monitoringHandlerPaths, List<String> searchHandlerPaths) {
this.monitoringHandlerPaths = monitoringHandlerPaths;
this.searchHandlerPaths = searchHandlerPaths;
- statistics = new LongAdder[HttpScheme.values().length][HttpMethod.values().length][][];
- for (int scheme = 0; scheme < HttpScheme.values().length; ++scheme) {
- for (int method = 0; method < HttpMethod.values().length; method++) {
- statistics[scheme][method] = new LongAdder[HTTP_RESPONSE_GROUPS.length][];
- for (int group = 0; group < HTTP_RESPONSE_GROUPS.length; group++) {
- statistics[scheme][method][group] = new LongAdder[HttpRequest.RequestType.values().length];
- for (int requestType = 0; requestType < HttpRequest.RequestType.values().length; requestType++) {
- statistics[scheme][method][group][requestType] = new LongAdder();
+ statistics = new LongAdder[HttpProtocol.values().length][HttpScheme.values().length][HttpMethod.values().length][][];
+ for (int protocol = 0; protocol < HttpProtocol.values().length; protocol++) {
+ for (int scheme = 0; scheme < HttpScheme.values().length; ++scheme) {
+ for (int method = 0; method < HttpMethod.values().length; method++) {
+ statistics[protocol][scheme][method] = new LongAdder[HTTP_RESPONSE_GROUPS.length][];
+ for (int group = 0; group < HTTP_RESPONSE_GROUPS.length; group++) {
+ statistics[protocol][scheme][method][group] = new LongAdder[HttpRequest.RequestType.values().length];
+ for (int requestType = 0; requestType < HttpRequest.RequestType.values().length; requestType++) {
+ statistics[protocol][scheme][method][group][requestType] = new LongAdder();
+ }
}
}
}
@@ -129,13 +135,14 @@ public class HttpResponseStatisticsCollector extends HandlerWrapper implements G
private void observeEndOfRequest(Request request, HttpServletResponse flushableResponse) throws IOException {
int group = groupIndex(request);
if (group >= 0) {
+ HttpProtocol protocol = getProtocol(request);
HttpScheme scheme = getScheme(request);
HttpMethod method = getMethod(request);
HttpRequest.RequestType requestType = getRequestType(request);
- statistics[scheme.ordinal()][method.ordinal()][group][requestType.ordinal()].increment();
+ statistics[protocol.ordinal()][scheme.ordinal()][method.ordinal()][group][requestType.ordinal()].increment();
if (group == 5 || group == 6) { // if 401/403, also increment 4xx
- statistics[scheme.ordinal()][method.ordinal()][3][requestType.ordinal()].increment();
+ statistics[protocol.ordinal()][scheme.ordinal()][method.ordinal()][3][requestType.ordinal()].increment();
}
}
@@ -161,7 +168,7 @@ public class HttpResponseStatisticsCollector extends HandlerWrapper implements G
}
index = index / 100 - 1; // 1xx = 0, 2xx = 1 etc.
- if (index < 0 || index >= statistics[0].length) {
+ if (index < 0 || index >= statistics[0][0].length) {
return -1;
} else {
return index;
@@ -200,6 +207,20 @@ public class HttpResponseStatisticsCollector extends HandlerWrapper implements G
}
}
+ private HttpProtocol getProtocol(Request request) {
+ switch (request.getProtocol()) {
+ case "HTTP/1":
+ case "HTTP/1.0":
+ case "HTTP/1.1":
+ return HttpProtocol.HTTP1;
+ case "HTTP/2":
+ case "HTTP/2.0":
+ return HttpProtocol.HTTP2;
+ default:
+ return HttpProtocol.OTHER;
+ }
+ }
+
private HttpRequest.RequestType getRequestType(Request request) {
HttpRequest.RequestType requestType = (HttpRequest.RequestType)request.getAttribute(requestTypeAttribute);
if (requestType != null) return requestType;
@@ -221,15 +242,18 @@ public class HttpResponseStatisticsCollector extends HandlerWrapper implements G
public List<StatisticsEntry> takeStatistics() {
var ret = new ArrayList<StatisticsEntry>();
- for (HttpScheme scheme : HttpScheme.values()) {
- int schemeIndex = scheme.ordinal();
- for (HttpMethod method : HttpMethod.values()) {
- int methodIndex = method.ordinal();
- for (int group = 0; group < HTTP_RESPONSE_GROUPS.length; group++) {
- for (HttpRequest.RequestType type : HttpRequest.RequestType.values()) {
- long value = statistics[schemeIndex][methodIndex][group][type.ordinal()].sumThenReset();
- if (value > 0) {
- ret.add(new StatisticsEntry(scheme.name().toLowerCase(), method.name(), HTTP_RESPONSE_GROUPS[group], type.name().toLowerCase(), value));
+ for (HttpProtocol protocol : HttpProtocol.values()) {
+ int protocolIndex = protocol.ordinal();
+ for (HttpScheme scheme : HttpScheme.values()) {
+ int schemeIndex = scheme.ordinal();
+ for (HttpMethod method : HttpMethod.values()) {
+ int methodIndex = method.ordinal();
+ for (int group = 0; group < HTTP_RESPONSE_GROUPS.length; group++) {
+ for (HttpRequest.RequestType type : HttpRequest.RequestType.values()) {
+ long value = statistics[protocolIndex][schemeIndex][methodIndex][group][type.ordinal()].sumThenReset();
+ if (value > 0) {
+ ret.add(new StatisticsEntry(protocol.name().toLowerCase(), scheme.name().toLowerCase(), method.name(), HTTP_RESPONSE_GROUPS[group], type.name().toLowerCase(), value));
+ }
}
}
}
@@ -272,13 +296,15 @@ public class HttpResponseStatisticsCollector extends HandlerWrapper implements G
public static class StatisticsEntry {
+ public final String protocol;
public final String scheme;
public final String method;
public final String name;
public final String requestType;
public final long value;
- public StatisticsEntry(String scheme, String method, String name, String requestType, long value) {
+ public StatisticsEntry(String protocol, String scheme, String method, String name, String requestType, long value) {
+ this.protocol = protocol;
this.scheme = scheme;
this.method = method;
this.name = name;
@@ -288,7 +314,8 @@ public class HttpResponseStatisticsCollector extends HandlerWrapper implements G
@Override
public String toString() {
- return "scheme: " + scheme +
+ return "protocol: " + protocol +
+ ", scheme: " + scheme +
", method: " + method +
", name: " + name +
", requestType: " + requestType +
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JDiscContext.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JDiscContext.java
index b37a7352dc6..48c70095918 100644
--- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JDiscContext.java
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JDiscContext.java
@@ -5,18 +5,16 @@ import com.yahoo.jdisc.Metric;
import com.yahoo.jdisc.http.ServerConfig;
import com.yahoo.jdisc.service.CurrentContainer;
-import java.util.concurrent.Executor;
-
public class JDiscContext {
final FilterResolver filterResolver;
final CurrentContainer container;
- final Executor janitor;
+ final Janitor janitor;
final Metric metric;
final ServerConfig serverConfig;
public JDiscContext(FilterBindings filterBindings,
CurrentContainer container,
- Executor janitor,
+ Janitor janitor,
Metric metric,
ServerConfig serverConfig) {
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JDiscServerConnector.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JDiscServerConnector.java
index 99d0c5c8d8c..ae475ca4517 100644
--- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JDiscServerConnector.java
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JDiscServerConnector.java
@@ -31,7 +31,8 @@ class JDiscServerConnector extends ServerConnector {
private final String connectorName;
private final int listenPort;
- JDiscServerConnector(ConnectorConfig config, Metric metric, Server server, JettyConnectionLogger connectionLogger, ConnectionFactory... factories) {
+ JDiscServerConnector(ConnectorConfig config, Metric metric, Server server, JettyConnectionLogger connectionLogger,
+ ConnectionMetricAggregator connectionMetricAggregator, ConnectionFactory... factories) {
super(server, factories);
this.config = config;
this.tcpKeepAlive = config.tcpKeepAliveEnabled();
@@ -48,6 +49,7 @@ class JDiscServerConnector extends ServerConnector {
new ConnectionThrottler(this, throttlingConfig).registerWithConnector();
}
addBean(connectionLogger);
+ addBean(connectionMetricAggregator);
}
@Override
@@ -76,6 +78,7 @@ class JDiscServerConnector extends ServerConnector {
dimensions.put(MetricDefinitions.METHOD_DIMENSION, method);
dimensions.put(MetricDefinitions.SCHEME_DIMENSION, scheme);
dimensions.put(MetricDefinitions.CLIENT_AUTHENTICATED_DIMENSION, Boolean.toString(clientAuthenticated));
+ dimensions.put(MetricDefinitions.PROTOCOL_DIMENSION, request.getProtocol());
String serverName = Optional.ofNullable(request.getServerName()).orElse("unknown");
dimensions.put(MetricDefinitions.REQUEST_SERVER_NAME_DIMENSION, serverName);
dimensions.putAll(extraDimensions);
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/Janitor.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/Janitor.java
new file mode 100644
index 00000000000..cd2b9ca23c0
--- /dev/null
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/Janitor.java
@@ -0,0 +1,46 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.jdisc.http.server.jetty;
+
+import com.google.inject.Inject;
+import com.yahoo.component.AbstractComponent;
+import com.yahoo.concurrent.DaemonThreadFactory;
+
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.logging.Logger;
+
+/**
+ * Separate janitor threadpool for tasks that cannot be executed on the jdisc default threadpool due to risk of deadlock.
+ * Modelled as a separate component as the underlying executor must be available across {@link JettyHttpServer} instances.
+ *
+ * @author bjorncs
+ */
+public class Janitor extends AbstractComponent {
+
+ private static final Logger log = Logger.getLogger(Janitor.class.getName());
+
+ private final ExecutorService executor;
+
+ @Inject
+ public Janitor() {
+ int threadPoolSize = Math.max(2, Runtime.getRuntime().availableProcessors()/4);
+ log.info("Creating janitor executor with " + threadPoolSize + " threads");
+ this.executor = Executors.newFixedThreadPool(threadPoolSize, new DaemonThreadFactory("jdisc-janitor-"));
+ }
+
+ public void scheduleTask(Runnable task) { executor.execute(task); }
+
+ @Override
+ public void deconstruct() {
+ try {
+ executor.shutdown();
+ if (!executor.awaitTermination(10, TimeUnit.SECONDS)) {
+ log.warning("Failed to shutdown janitor in time");
+ }
+ } catch (InterruptedException e) {
+ log.warning("Interrupted while shutting down janitor");
+ Thread.currentThread().interrupt();
+ }
+ }
+}
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JettyConnectionLogger.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JettyConnectionLogger.java
index bbeccb61c8a..d337131b313 100644
--- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JettyConnectionLogger.java
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JettyConnectionLogger.java
@@ -6,6 +6,7 @@ import com.yahoo.container.logging.ConnectionLogEntry;
import com.yahoo.container.logging.ConnectionLogEntry.SslHandshakeFailure.ExceptionEntry;
import com.yahoo.io.HexDump;
import com.yahoo.jdisc.http.ServerConfig;
+import org.eclipse.jetty.alpn.server.ALPNServerConnection;
import org.eclipse.jetty.http2.server.HTTP2ServerConnection;
import org.eclipse.jetty.io.Connection;
import org.eclipse.jetty.io.EndPoint;
@@ -32,10 +33,7 @@ import java.time.Instant;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
-import java.util.Objects;
import java.util.UUID;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
import java.util.logging.Level;
import java.util.logging.Logger;
@@ -50,8 +48,10 @@ class JettyConnectionLogger extends AbstractLifeCycle implements Connection.List
private static final Logger log = Logger.getLogger(JettyConnectionLogger.class.getName());
- private final ConcurrentMap<IdentityKey<SocketChannelEndPoint>, ConnectionInfo> connectionInfo = new ConcurrentHashMap<>();
- private final ConcurrentMap<IdentityKey<SSLEngine>, ConnectionInfo> sslToConnectionInfo = new ConcurrentHashMap<>();
+ private final SimpleConcurrentIdentityHashMap<SocketChannelEndPoint, ConnectionInfo> connectionInfos = new SimpleConcurrentIdentityHashMap<>();
+ private final SimpleConcurrentIdentityHashMap<SocketChannelEndPoint, SSLEngine> sslEngines = new SimpleConcurrentIdentityHashMap<>();
+ // Extra mapping as callbacks in SslHandshakeListener only provides SSLEngine (no connection reference) as argument
+ private final SimpleConcurrentIdentityHashMap<SSLEngine, ConnectionInfo> sslToConnectionInfo = new SimpleConcurrentIdentityHashMap<>();
private final boolean enabled;
private final ConnectionLog connectionLog;
@@ -89,16 +89,14 @@ class JettyConnectionLogger extends AbstractLifeCycle implements Connection.List
public void onOpened(Connection connection) {
handleListenerInvocation("Connection.Listener", "onOpened", "%h", List.of(connection), () -> {
SocketChannelEndPoint endpoint = findUnderlyingSocketEndpoint(connection.getEndPoint());
- var endpointKey = IdentityKey.of(endpoint);
- ConnectionInfo info = connectionInfo.get(endpointKey);
- if (info == null) {
- info = ConnectionInfo.from(endpoint);
- connectionInfo.put(IdentityKey.of(endpoint), info);
- }
+ ConnectionInfo info = connectionInfos.computeIfAbsent(endpoint, ConnectionInfo::from);
String connectionClassName = connection.getClass().getSimpleName(); // For hidden implementations of Connection
if (connection instanceof SslConnection) {
SSLEngine sslEngine = ((SslConnection) connection).getSSLEngine();
- sslToConnectionInfo.put(IdentityKey.of(sslEngine), info);
+ addReferenceToSslEngine(endpoint, info, sslEngine);
+ } else if (connection instanceof ALPNServerConnection) {
+ SSLEngine sslEngine = ((ALPNServerConnection) connection).getSSLEngine();
+ addReferenceToSslEngine(endpoint, info, sslEngine);
} else if (connection instanceof HttpConnection) {
info.setHttpProtocol("HTTP/1.1");
} else if (connection instanceof HTTP2ServerConnection) {
@@ -115,12 +113,19 @@ class JettyConnectionLogger extends AbstractLifeCycle implements Connection.List
});
}
+ private void addReferenceToSslEngine(SocketChannelEndPoint endpoint, ConnectionInfo info, SSLEngine sslEngine) {
+ if (sslEngine != null) {
+ sslEngines.put(endpoint, sslEngine)
+ .ifPresent(sslToConnectionInfo::remove);
+ sslToConnectionInfo.put(sslEngine, info);
+ }
+ }
+
@Override
public void onClosed(Connection connection) {
handleListenerInvocation("Connection.Listener", "onClosed", "%h", List.of(connection), () -> {
SocketChannelEndPoint endpoint = findUnderlyingSocketEndpoint(connection.getEndPoint());
- var endpointKey = IdentityKey.of(endpoint);
- ConnectionInfo info = connectionInfo.get(endpointKey);
+ ConnectionInfo info = connectionInfos.get(endpoint).orElse(null);
if (info == null) return; // Closed connection already handled
if (connection instanceof HttpConnection) {
info.setHttpBytes(connection.getBytesIn(), connection.getBytesOut());
@@ -128,7 +133,9 @@ class JettyConnectionLogger extends AbstractLifeCycle implements Connection.List
if (!endpoint.isOpen()) {
info.setClosedAt(System.currentTimeMillis());
connectionLog.log(info.toLogEntry());
- connectionInfo.remove(endpointKey);
+ connectionInfos.remove(endpoint);
+ sslEngines.remove(endpoint)
+ .ifPresent(sslToConnectionInfo::remove);
}
});
}
@@ -143,7 +150,7 @@ class JettyConnectionLogger extends AbstractLifeCycle implements Connection.List
public void onRequestBegin(Request request) {
handleListenerInvocation("HttpChannel.Listener", "onRequestBegin", "%h", List.of(request), () -> {
SocketChannelEndPoint endpoint = findUnderlyingSocketEndpoint(request.getHttpChannel().getEndPoint());
- ConnectionInfo info = Objects.requireNonNull(connectionInfo.get(IdentityKey.of(endpoint)));
+ ConnectionInfo info = connectionInfos.get(endpoint).get();
info.incrementRequests();
request.setAttribute(CONNECTION_ID_REQUEST_ATTRIBUTE, info.uuid());
});
@@ -153,7 +160,7 @@ class JettyConnectionLogger extends AbstractLifeCycle implements Connection.List
public void onResponseBegin(Request request) {
handleListenerInvocation("HttpChannel.Listener", "onResponseBegin", "%h", List.of(request), () -> {
SocketChannelEndPoint endpoint = findUnderlyingSocketEndpoint(request.getHttpChannel().getEndPoint());
- ConnectionInfo info = connectionInfo.get(IdentityKey.of(endpoint));
+ ConnectionInfo info = connectionInfos.get(endpoint).orElse(null);
if (info == null) return; // Connection closed before response started - observed during Jetty server shutdown
info.incrementResponses();
});
@@ -169,7 +176,8 @@ class JettyConnectionLogger extends AbstractLifeCycle implements Connection.List
public void handshakeSucceeded(Event event) {
SSLEngine sslEngine = event.getSSLEngine();
handleListenerInvocation("SslHandshakeListener", "handshakeSucceeded", "sslEngine=%h", List.of(sslEngine), () -> {
- ConnectionInfo info = sslToConnectionInfo.remove(IdentityKey.of(sslEngine));
+ ConnectionInfo info = sslToConnectionInfo.get(sslEngine).orElse(null);
+ if (info == null) return;
info.setSslSessionDetails(sslEngine.getSession());
});
}
@@ -179,7 +187,8 @@ class JettyConnectionLogger extends AbstractLifeCycle implements Connection.List
SSLEngine sslEngine = event.getSSLEngine();
handleListenerInvocation("SslHandshakeListener", "handshakeFailed", "sslEngine=%h,failure=%s", List.of(sslEngine, failure), () -> {
log.log(Level.FINE, failure, failure::toString);
- ConnectionInfo info = sslToConnectionInfo.remove(IdentityKey.of(sslEngine));
+ ConnectionInfo info = sslToConnectionInfo.get(sslEngine).orElse(null);
+ if (info == null) return;
info.setSslHandshakeFailure((SSLHandshakeException)failure);
});
}
@@ -376,21 +385,4 @@ class JettyConnectionLogger extends AbstractLifeCycle implements Connection.List
}
- private static class IdentityKey<T> {
- final T instance;
-
- IdentityKey(T instance) { this.instance = instance; }
-
- static <T> IdentityKey<T> of(T instance) { return new IdentityKey<>(instance); }
-
- @Override public int hashCode() { return System.identityHashCode(instance); }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) return true;
- if (!(obj instanceof IdentityKey<?>)) return false;
- IdentityKey<?> other = (IdentityKey<?>) obj;
- return this.instance == other.instance;
- }
- }
}
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JettyHttpServer.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JettyHttpServer.java
index 510c561c10f..70f173b74e5 100644
--- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JettyHttpServer.java
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JettyHttpServer.java
@@ -4,8 +4,6 @@ package com.yahoo.jdisc.http.server.jetty;
import com.google.inject.Inject;
import com.yahoo.component.ComponentId;
import com.yahoo.component.provider.ComponentRegistry;
-import com.yahoo.concurrent.DaemonThreadFactory;
-import com.yahoo.container.logging.AccessLog;
import com.yahoo.container.logging.ConnectionLog;
import com.yahoo.container.logging.RequestLog;
import com.yahoo.jdisc.Metric;
@@ -43,8 +41,6 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.EnumSet;
import java.util.List;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.stream.Collectors;
@@ -59,8 +55,6 @@ public class JettyHttpServer extends AbstractServerProvider {
private final static Logger log = Logger.getLogger(JettyHttpServer.class.getName());
- private final ExecutorService janitor;
-
private final Server server;
private final List<Integer> listenedPorts = new ArrayList<>();
private final ServerMetricReporter metricsReporter;
@@ -71,6 +65,7 @@ public class JettyHttpServer extends AbstractServerProvider {
ServerConfig serverConfig,
ServletPathsConfig servletPathsConfig,
FilterBindings filterBindings,
+ Janitor janitor,
ComponentRegistry<ConnectorFactory> connectorFactories,
ComponentRegistry<ServletHolder> servletHolders,
FilterInvoker filterInvoker,
@@ -88,15 +83,14 @@ public class JettyHttpServer extends AbstractServerProvider {
setupJmx(server, serverConfig);
configureJettyThreadpool(server, serverConfig);
JettyConnectionLogger connectionLogger = new JettyConnectionLogger(serverConfig.connectionLog(), connectionLog);
+ ConnectionMetricAggregator connectionMetricAggregator = new ConnectionMetricAggregator(serverConfig, metric);
for (ConnectorFactory connectorFactory : connectorFactories.allComponents()) {
ConnectorConfig connectorConfig = connectorFactory.getConnectorConfig();
- server.addConnector(connectorFactory.createConnector(metric, server, connectionLogger));
+ server.addConnector(connectorFactory.createConnector(metric, server, connectionLogger, connectionMetricAggregator));
listenedPorts.add(connectorConfig.listenPort());
}
- janitor = newJanitor();
-
JDiscContext jDiscContext = new JDiscContext(filterBindings,
container,
janitor,
@@ -208,15 +202,6 @@ public class JettyHttpServer extends AbstractServerProvider {
return ports.stream().map(Object::toString).collect(Collectors.joining(":"));
}
- // Separate threadpool for tasks that cannot be executed on the jdisc default threadpool due to risk of deadlock
- private static ExecutorService newJanitor() {
- int threadPoolSize = Math.max(1, Runtime.getRuntime().availableProcessors()/8);
- log.info("Creating janitor executor with " + threadPoolSize + " threads");
- return Executors.newFixedThreadPool(
- threadPoolSize,
- new DaemonThreadFactory(JettyHttpServer.class.getName() + "-Janitor-"));
- }
-
@Override
public void start() {
try {
@@ -258,7 +243,6 @@ public class JettyHttpServer extends AbstractServerProvider {
}
metricsReporter.shutdown();
- janitor.shutdown();
}
private boolean isGracefulShutdownEnabled() {
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/MetricDefinitions.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/MetricDefinitions.java
index 5e953179b53..172e6483de2 100644
--- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/MetricDefinitions.java
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/MetricDefinitions.java
@@ -16,6 +16,7 @@ class MetricDefinitions {
static final String CLIENT_AUTHENTICATED_DIMENSION = "clientAuthenticated";
static final String REQUEST_SERVER_NAME_DIMENSION = "requestServerName";
static final String FILTER_CHAIN_ID_DIMENSION = "chainId";
+ static final String PROTOCOL_DIMENSION = "protocol";
static final String NUM_OPEN_CONNECTIONS = "serverNumOpenConnections";
static final String NUM_CONNECTIONS_OPEN_MAX = "serverConnectionsOpenMax";
@@ -23,6 +24,7 @@ class MetricDefinitions {
static final String CONNECTION_DURATION_MEAN = "serverConnectionDurationMean";
static final String CONNECTION_DURATION_STD_DEV = "serverConnectionDurationStdDev";
static final String NUM_PREMATURELY_CLOSED_CONNECTIONS = "jdisc.http.request.prematurely_closed";
+ static final String REQUESTS_PER_CONNECTION = "jdisc.http.request.requests_per_connection";
static final String NUM_BYTES_RECEIVED = "serverBytesReceived";
static final String NUM_BYTES_SENT = "serverBytesSent";
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/RequestUtils.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/RequestUtils.java
index 5fca7a8d778..b248f55a3df 100644
--- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/RequestUtils.java
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/RequestUtils.java
@@ -1,6 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.jdisc.http.server.jetty;
+import org.eclipse.jetty.http2.server.HTTP2ServerConnection;
import org.eclipse.jetty.io.Connection;
import org.eclipse.jetty.server.HttpConnection;
import org.eclipse.jetty.server.Request;
@@ -28,6 +29,10 @@ public class RequestUtils {
return (JDiscServerConnector) request.getHttpChannel().getConnector();
}
+ static boolean isHttpServerConnection(Connection connection) {
+ return connection instanceof HttpConnection || connection instanceof HTTP2ServerConnection;
+ }
+
/**
* Note: {@link HttpServletRequest#getLocalPort()} may return the local port of the load balancer / reverse proxy if proxy-protocol is enabled.
* @return the actual local port of the underlying Jetty connector
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ServerMetricReporter.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ServerMetricReporter.java
index ba3694ffc2f..ea263350d0a 100644
--- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ServerMetricReporter.java
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ServerMetricReporter.java
@@ -86,6 +86,7 @@ class ServerMetricReporter {
dimensions.put(MetricDefinitions.METHOD_DIMENSION, metricEntry.method);
dimensions.put(MetricDefinitions.SCHEME_DIMENSION, metricEntry.scheme);
dimensions.put(MetricDefinitions.REQUEST_TYPE_DIMENSION, metricEntry.requestType);
+ dimensions.put(MetricDefinitions.PROTOCOL_DIMENSION, metricEntry.protocol);
metric.add(metricEntry.name, metricEntry.value, metric.createContext(dimensions));
}
}
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ServletOutputStreamWriter.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ServletOutputStreamWriter.java
index b4d03385c3b..696fd2d51ad 100644
--- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ServletOutputStreamWriter.java
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ServletOutputStreamWriter.java
@@ -12,7 +12,6 @@ import java.util.ArrayList;
import java.util.Deque;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.Executor;
import java.util.function.Consumer;
import java.util.logging.Level;
import java.util.logging.Logger;
@@ -54,7 +53,7 @@ public class ServletOutputStreamWriter {
// GuardedBy("state")
private final ServletOutputStream outputStream;
- private final Executor executor;
+ private final Janitor janitor;
// GuardedBy("monitor")
private final Deque<ResponseContentPart> responseContentQueue = new ArrayDeque<>();
@@ -70,9 +69,9 @@ public class ServletOutputStreamWriter {
final CompletableFuture<Void> finishedFuture = new CompletableFuture<>();
- public ServletOutputStreamWriter(ServletOutputStream outputStream, Executor executor, RequestMetricReporter metricReporter) {
+ public ServletOutputStreamWriter(ServletOutputStream outputStream, Janitor janitor, RequestMetricReporter metricReporter) {
this.outputStream = outputStream;
- this.executor = executor;
+ this.janitor = janitor;
this.metricReporter = metricReporter;
}
@@ -96,7 +95,7 @@ public class ServletOutputStreamWriter {
synchronized (monitor) {
if (state == State.FINISHED_OR_ERROR) {
- executor.execute(() -> handler.failed(new IllegalStateException("ContentChannel already closed.")));
+ janitor.scheduleTask(() -> handler.failed(new IllegalStateException("ContentChannel already closed.")));
return;
}
responseContentQueue.addLast(new ResponseContentPart(buf, handler));
@@ -207,8 +206,7 @@ public class ServletOutputStreamWriter {
runCompletionHandler_logOnExceptions(
() -> responseContentPart.handler.failed(failReason));
- executor.execute(
- () -> failedParts.forEach(failCompletionHandler));
+ janitor.scheduleTask(() -> failedParts.forEach(failCompletionHandler));
}
private void writeBufferToOutputStream(ResponseContentPart contentPart) throws Throwable {
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ServletRequestReader.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ServletRequestReader.java
index 1882448757a..26d74bdccb3 100644
--- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ServletRequestReader.java
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ServletRequestReader.java
@@ -10,7 +10,6 @@ import javax.servlet.ServletInputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.Executor;
import java.util.logging.Level;
import java.util.logging.Logger;
@@ -42,7 +41,7 @@ class ServletRequestReader implements ReadListener {
private final ServletInputStream servletInputStream;
private final ContentChannel requestContentChannel;
- private final Executor executor;
+ private final Janitor janitor;
private final RequestMetricReporter metricReporter;
private int bytesRead;
@@ -93,17 +92,17 @@ class ServletRequestReader implements ReadListener {
public ServletRequestReader(
ServletInputStream servletInputStream,
ContentChannel requestContentChannel,
- Executor executor,
+ Janitor janitor,
RequestMetricReporter metricReporter) {
Preconditions.checkNotNull(servletInputStream);
Preconditions.checkNotNull(requestContentChannel);
- Preconditions.checkNotNull(executor);
+ Preconditions.checkNotNull(janitor);
Preconditions.checkNotNull(metricReporter);
this.servletInputStream = servletInputStream;
this.requestContentChannel = requestContentChannel;
- this.executor = executor;
+ this.janitor = janitor;
this.metricReporter = metricReporter;
}
@@ -163,7 +162,7 @@ class ServletRequestReader implements ReadListener {
}
if (shouldCloseRequestContentChannel) {
- executor.execute(this::closeCompletionHandler_noThrow);
+ janitor.scheduleTask(this::closeCompletionHandler_noThrow);
}
}
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ServletResponseController.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ServletResponseController.java
index 60b7878156f..31fa9e9ebaa 100644
--- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ServletResponseController.java
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ServletResponseController.java
@@ -20,7 +20,6 @@ import java.nio.ByteBuffer;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.Executor;
import java.util.logging.Level;
import java.util.logging.Logger;
@@ -57,7 +56,7 @@ public class ServletResponseController {
public ServletResponseController(
HttpServletRequest servletRequest,
HttpServletResponse servletResponse,
- Executor executor,
+ Janitor janitor,
RequestMetricReporter metricReporter,
boolean developerMode) throws IOException {
@@ -65,7 +64,7 @@ public class ServletResponseController {
this.servletResponse = servletResponse;
this.developerMode = developerMode;
this.servletOutputStreamWriter =
- new ServletOutputStreamWriter(servletResponse.getOutputStream(), executor, metricReporter);
+ new ServletOutputStreamWriter(servletResponse.getOutputStream(), janitor, metricReporter);
}
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/SimpleConcurrentIdentityHashMap.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/SimpleConcurrentIdentityHashMap.java
new file mode 100644
index 00000000000..59d606c640f
--- /dev/null
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/SimpleConcurrentIdentityHashMap.java
@@ -0,0 +1,52 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.jdisc.http.server.jetty;
+
+import java.util.Optional;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.function.Function;
+import java.util.function.Supplier;
+
+/**
+ * A simplified {@link ConcurrentMap} using reference-equality to compare keys (similarly to {@link java.util.IdentityHashMap})
+ *
+ * @author bjorncs
+ */
+class SimpleConcurrentIdentityHashMap<K, V> {
+
+ private final ConcurrentMap<IdentityKey<K>, V> wrappedMap = new ConcurrentHashMap<>();
+
+ Optional<V> get(K key) { return Optional.ofNullable(wrappedMap.get(identityKey(key))); }
+
+ Optional<V> remove(K key) { return Optional.ofNullable(wrappedMap.remove(identityKey(key))); }
+
+ Optional<V> put(K key, V value) { return Optional.ofNullable(wrappedMap.put(identityKey(key), value)); }
+
+ V computeIfAbsent(K key, Supplier<V> supplier) {
+ return wrappedMap.computeIfAbsent(identityKey(key), ignored -> supplier.get());
+ }
+
+ V computeIfAbsent(K key, Function<K, V> factory) {
+ return wrappedMap.computeIfAbsent(identityKey(key), k -> factory.apply(k.instance));
+ }
+
+ private static <K> IdentityKey<K> identityKey(K key) { return IdentityKey.of(key); }
+
+ private static class IdentityKey<K> {
+ final K instance;
+
+ IdentityKey(K instance) { this.instance = instance; }
+
+ static <K> IdentityKey<K> of(K instance) { return new IdentityKey<>(instance); }
+
+ @Override public int hashCode() { return System.identityHashCode(instance); }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) return true;
+ if (!(obj instanceof IdentityKey<?>)) return false;
+ IdentityKey<?> other = (IdentityKey<?>) obj;
+ return this.instance == other.instance;
+ }
+ }
+}
diff --git a/container-core/src/main/java/com/yahoo/restapi/RedirectResponse.java b/container-core/src/main/java/com/yahoo/restapi/RedirectResponse.java
new file mode 100644
index 00000000000..23c6a238f95
--- /dev/null
+++ b/container-core/src/main/java/com/yahoo/restapi/RedirectResponse.java
@@ -0,0 +1,27 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.restapi;
+
+import com.yahoo.jdisc.Response;
+
+import java.net.URI;
+
+/**
+ * A HTTP redirect response
+ *
+ * @author bjorncs
+ */
+public class RedirectResponse extends MessageResponse {
+
+ private RedirectResponse(int statusCode, URI location) {
+ super(statusCode, "Moved to " + location.toString());
+ headers().add("Location", location.toString());
+ }
+
+ public static RedirectResponse found(URI location) {
+ return new RedirectResponse(Response.Status.FOUND, location);
+ }
+
+ public static RedirectResponse movedPermanently(URI location) {
+ return new RedirectResponse(Response.Status.MOVED_PERMANENTLY, location);
+ }
+}
diff --git a/container-core/src/main/java/com/yahoo/restapi/RestApiException.java b/container-core/src/main/java/com/yahoo/restapi/RestApiException.java
index d9da320499f..68e46a3a9b8 100644
--- a/container-core/src/main/java/com/yahoo/restapi/RestApiException.java
+++ b/container-core/src/main/java/com/yahoo/restapi/RestApiException.java
@@ -42,6 +42,7 @@ public class RestApiException extends RuntimeException {
public static class NotFound extends RestApiException {
public NotFound() { this(null, null); }
+ public NotFound(HttpRequest request) { this("Nothing at '" + request.getUri().getRawPath() + "'", null); }
public NotFound(Throwable cause) { this(cause.getMessage(), cause); }
public NotFound(String message) { this(message, null); }
public NotFound(String message, Throwable cause) { super(ErrorResponse::notFoundError, message, cause); }
@@ -50,7 +51,8 @@ public class RestApiException extends RuntimeException {
public static class MethodNotAllowed extends RestApiException {
public MethodNotAllowed() { super(ErrorResponse::methodNotAllowed, "Method not allowed", null); }
public MethodNotAllowed(HttpRequest request) {
- super(ErrorResponse::methodNotAllowed, "Method '" + request.getMethod().name() + "' is not allowed", null);
+ super(ErrorResponse::methodNotAllowed, "Method '" + request.getMethod().name() + "' is not allowed at '" +
+ request.getUri().getRawPath() + "'", null);
}
}
diff --git a/container-core/src/main/java/com/yahoo/restapi/RestApiImpl.java b/container-core/src/main/java/com/yahoo/restapi/RestApiImpl.java
index 8ba94f9aca9..d63add5ed1d 100644
--- a/container-core/src/main/java/com/yahoo/restapi/RestApiImpl.java
+++ b/container-core/src/main/java/com/yahoo/restapi/RestApiImpl.java
@@ -14,6 +14,7 @@ import java.io.IOException;
import java.io.InputStream;
import java.net.URI;
import java.util.ArrayList;
+import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.ListIterator;
@@ -144,7 +145,7 @@ class RestApiImpl implements RestApi {
private static Route createDefaultRoute() {
RouteBuilder routeBuilder = new RouteBuilderImpl("{*}")
.defaultHandler(context -> {
- throw new RestApiException.NotFound();
+ throw new RestApiException.NotFound(context.request());
});
return ((RouteBuilderImpl)routeBuilder).build();
}
@@ -155,6 +156,8 @@ class RestApiImpl implements RestApi {
if (!disableDefaultMappers){
exceptionMappers.add(new ExceptionMapperHolder<>(RestApiException.class, (context, exception) -> exception.response()));
}
+ // Topologically sort children before superclasses, so most the specific match is found by iterating through mappers in order.
+ exceptionMappers.sort((a, b) -> (a.type.isAssignableFrom(b.type) ? 1 : 0) + (b.type.isAssignableFrom(a.type) ? -1 : 0));
return exceptionMappers;
}
diff --git a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/ConnectorFactoryTest.java b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/ConnectorFactoryTest.java
index df794c7ecb8..93261a2401f 100644
--- a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/ConnectorFactoryTest.java
+++ b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/ConnectorFactoryTest.java
@@ -31,8 +31,10 @@ public class ConnectorFactoryTest {
JettyConnectionLogger connectionLogger = new JettyConnectionLogger(
new ServerConfig.ConnectionLog.Builder().enabled(false).build(),
new VoidConnectionLog());
+ DummyMetric metric = new DummyMetric();
+ var connectionMetricAggregator = new ConnectionMetricAggregator(new ServerConfig(new ServerConfig.Builder()), metric);
JDiscServerConnector connector =
- (JDiscServerConnector)factory.createConnector(new DummyMetric(), server, connectionLogger);
+ (JDiscServerConnector)factory.createConnector(metric, server, connectionLogger, connectionMetricAggregator);
server.addConnector(connector);
server.setHandler(new HelloWorldHandler());
server.start();
diff --git a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerConformanceTest.java b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerConformanceTest.java
index 825e3eba110..2183098da2b 100644
--- a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerConformanceTest.java
+++ b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerConformanceTest.java
@@ -169,7 +169,7 @@ public class HttpServerConformanceTest extends ServerProviderConformanceTest {
@Override
@Test
public void testRequestNondeterministicExceptionWithSyncHandleResponse() throws Throwable {
- new TestRunner().expect(anyOf(success(), serverError()))
+ new TestRunner().expect(anyOf(success(), successNoContent(), serverError()))
.execute();
}
@@ -232,7 +232,7 @@ public class HttpServerConformanceTest extends ServerProviderConformanceTest {
@Override
@Test
public void testRequestContentWriteWithNondeterministicSyncFailure() throws Throwable {
- new TestRunner().expect(anyOf(success(), serverError()))
+ new TestRunner().expect(anyOf(success(), successNoContent(), serverError()))
.execute();
}
@@ -253,7 +253,7 @@ public class HttpServerConformanceTest extends ServerProviderConformanceTest {
@Override
@Test
public void testRequestContentWriteWithNondeterministicAsyncFailure() throws Throwable {
- new TestRunner().expect(anyOf(success(), serverError()))
+ new TestRunner().expect(anyOf(success(), successNoContent(), serverError()))
.execute();
}
@@ -281,7 +281,7 @@ public class HttpServerConformanceTest extends ServerProviderConformanceTest {
@Override
@Test
public void testRequestContentWriteNondeterministicException() throws Throwable {
- new TestRunner().expect(anyOf(success(), serverError(), successNoContent()))
+ new TestRunner().expect(anyOf(success(), successNoContent(), serverError(), successNoContent()))
.execute();
}
@@ -309,7 +309,7 @@ public class HttpServerConformanceTest extends ServerProviderConformanceTest {
@Override
@Test
public void testRequestContentWriteNondeterministicExceptionWithSyncCompletion() throws Throwable {
- new TestRunner().expect(anyOf(success(), serverError()))
+ new TestRunner().expect(anyOf(success(), successNoContent(), serverError()))
.execute();
}
@@ -394,7 +394,7 @@ public class HttpServerConformanceTest extends ServerProviderConformanceTest {
@Override
@Test
public void testRequestContentWriteExceptionWithNondeterministicAsyncFailure() throws Throwable {
- new TestRunner().expect(anyOf(success(), serverError()))
+ new TestRunner().expect(anyOf(success(), successNoContent(), serverError()))
.execute();
}
@@ -520,7 +520,7 @@ public class HttpServerConformanceTest extends ServerProviderConformanceTest {
@Override
@Test
public void testRequestContentCloseNondeterministicExceptionWithSyncCompletion() throws Throwable {
- new TestRunner().expect(anyOf(success(), serverError(), successNoContent()))
+ new TestRunner().expect(anyOf(success(), successNoContent(), serverError(), successNoContent()))
.execute();
}
@@ -548,7 +548,7 @@ public class HttpServerConformanceTest extends ServerProviderConformanceTest {
@Override
@Test
public void testRequestContentCloseNondeterministicExceptionWithAsyncCompletion() throws Throwable {
- new TestRunner().expect(anyOf(success(), serverError(), successNoContent()))
+ new TestRunner().expect(anyOf(success(), successNoContent(), serverError(), successNoContent()))
.execute();
}
diff --git a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerTest.java b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerTest.java
index a5804dc9b86..5056cf91d79 100644
--- a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerTest.java
+++ b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerTest.java
@@ -917,6 +917,20 @@ public class HttpServerTest {
assertThat(driver.close(), is(true));
}
+ @Test
+ public void requireThatRequestsPerConnectionMetricIsAggregated() throws IOException {
+ Path privateKeyFile = tmpFolder.newFile().toPath();
+ Path certificateFile = tmpFolder.newFile().toPath();
+ generatePrivateKeyAndCertificate(privateKeyFile, certificateFile);
+ var metricConsumer = new MetricConsumerMock();
+ InMemoryConnectionLog connectionLog = new InMemoryConnectionLog();
+ JettyTestDriver driver = createSslTestDriver(certificateFile, privateKeyFile, metricConsumer, connectionLog);
+ driver.client().get("/").expectStatusCode(is(OK));
+ assertThat(driver.close(), is(true));
+ verify(metricConsumer.mockitoMock(), atLeast(1))
+ .set(MetricDefinitions.REQUESTS_PER_CONNECTION, 1L, MetricConsumerMock.STATIC_CONTEXT);
+ }
+
private ContentResponse sendJettyClientRequest(JettyTestDriver testDriver, Path certificateFile, Object tag)
throws Exception {
HttpClient client = createJettyHttpClient(certificateFile);
diff --git a/container-dependencies-enforcer/pom.xml b/container-dependencies-enforcer/pom.xml
index ab2cfdda1a1..d692adac3c3 100644
--- a/container-dependencies-enforcer/pom.xml
+++ b/container-dependencies-enforcer/pom.xml
@@ -90,8 +90,6 @@
<include>com.sun.xml.bind:jaxb-core:[${jaxb.version}]:jar:provided</include>
<include>com.sun.xml.bind:jaxb-impl:[${jaxb.version}]:jar:provided</include>
<include>commons-logging:commons-logging:[1.2]:jar:provided</include>
- <include>jakarta.activation:jakarta.activation-api:[1.2.1]:jar:provided</include>
- <include>jakarta.xml.bind:jakarta.xml.bind-api:[2.3.2]:jar:provided</include>
<include>javax.annotation:javax.annotation-api:[${javax.annotation-api.version}]:jar:provided</include>
<include>javax.inject:javax.inject:[${javax.inject.version}]:jar:provided</include>
<include>javax.servlet:javax.servlet-api:[${javax.servlet-api.version}]:jar:provided</include>
diff --git a/container-dependency-versions/pom.xml b/container-dependency-versions/pom.xml
index 5e4a45074ae..29e9f85256d 100644
--- a/container-dependency-versions/pom.xml
+++ b/container-dependency-versions/pom.xml
@@ -165,18 +165,6 @@
<artifactId>javax.activation</artifactId>
<version>1.2.0</version>
</dependency>
- <dependency>
- <!-- Needed by jackson-module-jaxb-annotations -->
- <groupId>jakarta.xml.bind</groupId>
- <artifactId>jakarta.xml.bind-api</artifactId>
- <version>${jakarta-xml-bind-api.version}</version>
- </dependency>
- <dependency>
- <!-- Needed by jackson-module-jaxb-annotations -->
- <groupId>jakarta.activation</groupId>
- <artifactId>jakarta.activation-api</artifactId>
- <version>${jakarta-activation-api.version}</version>
- </dependency>
<!-- jaxb end -->
<dependency>
@@ -383,7 +371,7 @@
<properties>
<aopalliance.version>1.0</aopalliance.version>
- <bouncycastle.version>1.65</bouncycastle.version>
+ <bouncycastle.version>1.68</bouncycastle.version>
<felix.version>6.0.3</felix.version>
<felix.log.version>1.0.1</felix.log.version>
<findbugs.version>1.3.9</findbugs.version>
@@ -392,7 +380,7 @@
<javax.inject.version>1</javax.inject.version>
<javax.servlet-api.version>3.1.0</javax.servlet-api.version>
<jaxb.version>2.3.0</jaxb.version>
- <jetty.version>9.4.40.v20210413</jetty.version>
+ <jetty.version>9.4.41.v20210516</jetty.version>
<jetty-alpn.version>1.1.3.v20160715</jetty-alpn.version>
<org.lz4.version>1.7.1</org.lz4.version>
<org.json.version>20090211</org.json.version>
@@ -407,8 +395,6 @@
<hk2.osgi-resource-locator.version>1.0.1</hk2.osgi-resource-locator.version>
<jackson2.version>2.12.1</jackson2.version>
<jackson-databind.version>${jackson2.version}</jackson-databind.version>
- <jakarta-activation-api.version>1.2.1</jakarta-activation-api.version>
- <jakarta-xml-bind-api.version>2.3.2</jakarta-xml-bind-api.version>
<javassist.version>3.20.0-GA</javassist.version>
<javax.annotation-api.version>1.2</javax.annotation-api.version>
<javax.validation-api.version>1.1.0.Final</javax.validation-api.version>
diff --git a/container-disc/pom.xml b/container-disc/pom.xml
index e537b8f1c35..ae2e460094b 100644
--- a/container-disc/pom.xml
+++ b/container-disc/pom.xml
@@ -220,8 +220,6 @@
jackson-jaxrs-base-${jackson2.version}.jar,
jackson-jaxrs-json-provider-${jackson2.version}.jar,
jackson-module-jaxb-annotations-${jackson2.version}.jar,
- jakarta.activation-api-${jakarta-activation-api.version}.jar,
- jakarta.xml.bind-api-${jakarta-xml-bind-api.version}.jar,
javassist-${javassist.version}.jar,
javax.ws.rs-api-${javax.ws.rs-api.version}.jar,
jersey-client-${jersey2.version}.jar,
diff --git a/container-messagebus/pom.xml b/container-messagebus/pom.xml
index adaf64a0a03..3ec177e7bde 100644
--- a/container-messagebus/pom.xml
+++ b/container-messagebus/pom.xml
@@ -92,6 +92,13 @@
</execution>
</executions>
</plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-surefire-plugin</artifactId>
+ <configuration>
+ <forkCount>4</forkCount>
+ </configuration>
+ </plugin>
</plugins>
</build>
</project>
diff --git a/container-search/abi-spec.json b/container-search/abi-spec.json
index b5933936adf..b577660c1b9 100644
--- a/container-search/abi-spec.json
+++ b/container-search/abi-spec.json
@@ -201,6 +201,18 @@
],
"fields": []
},
+ "com.yahoo.prelude.hitfield.RawBase64": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>(byte[])",
+ "public java.lang.String toString()"
+ ],
+ "fields": []
+ },
"com.yahoo.prelude.hitfield.RawData": {
"superClass": "java.lang.Object",
"interfaces": [],
@@ -5574,8 +5586,8 @@
"public com.yahoo.search.query.parser.ParserEnvironment setIndexFacts(com.yahoo.prelude.IndexFacts)",
"public com.yahoo.language.Linguistics getLinguistics()",
"public com.yahoo.search.query.parser.ParserEnvironment setLinguistics(com.yahoo.language.Linguistics)",
- "public com.yahoo.prelude.query.parser.SpecialTokens getSpecialTokens()",
- "public com.yahoo.search.query.parser.ParserEnvironment setSpecialTokens(com.yahoo.prelude.query.parser.SpecialTokens)",
+ "public com.yahoo.language.process.SpecialTokens getSpecialTokens()",
+ "public com.yahoo.search.query.parser.ParserEnvironment setSpecialTokens(com.yahoo.language.process.SpecialTokens)",
"public static com.yahoo.search.query.parser.ParserEnvironment fromExecutionContext(com.yahoo.search.searchchain.Execution$Context)",
"public static com.yahoo.search.query.parser.ParserEnvironment fromParserEnvironment(com.yahoo.search.query.parser.ParserEnvironment)"
],
@@ -7765,7 +7777,7 @@
"final"
],
"methods": [
- "public void <init>(com.yahoo.search.searchchain.SearchChainRegistry, com.yahoo.prelude.IndexFacts, com.yahoo.prelude.query.parser.SpecialTokenRegistry, com.yahoo.search.rendering.RendererRegistry, com.yahoo.language.Linguistics)",
+ "public void <init>(com.yahoo.search.searchchain.SearchChainRegistry, com.yahoo.prelude.IndexFacts, com.yahoo.language.process.SpecialTokenRegistry, com.yahoo.search.rendering.RendererRegistry, com.yahoo.language.Linguistics)",
"public static com.yahoo.search.searchchain.Execution$Context createContextStub()",
"public static com.yahoo.search.searchchain.Execution$Context createContextStub(com.yahoo.prelude.IndexFacts)",
"public static com.yahoo.search.searchchain.Execution$Context createContextStub(com.yahoo.search.searchchain.SearchChainRegistry, com.yahoo.prelude.IndexFacts)",
@@ -7779,8 +7791,8 @@
"public void setIndexFacts(com.yahoo.prelude.IndexFacts)",
"public com.yahoo.search.searchchain.SearchChainRegistry searchChainRegistry()",
"public com.yahoo.search.rendering.RendererRegistry rendererRegistry()",
- "public com.yahoo.prelude.query.parser.SpecialTokenRegistry getTokenRegistry()",
- "public void setTokenRegistry(com.yahoo.prelude.query.parser.SpecialTokenRegistry)",
+ "public com.yahoo.language.process.SpecialTokenRegistry getTokenRegistry()",
+ "public void setTokenRegistry(com.yahoo.language.process.SpecialTokenRegistry)",
"public void setDetailedDiagnostics(boolean)",
"public boolean getDetailedDiagnostics()",
"public boolean getBreakdown()",
diff --git a/container-search/src/main/java/com/yahoo/prelude/fastsearch/Base64DataField.java b/container-search/src/main/java/com/yahoo/prelude/fastsearch/Base64DataField.java
new file mode 100644
index 00000000000..d51bdc0fad1
--- /dev/null
+++ b/container-search/src/main/java/com/yahoo/prelude/fastsearch/Base64DataField.java
@@ -0,0 +1,25 @@
+package com.yahoo.prelude.fastsearch;
+
+import com.yahoo.data.access.Inspector;
+import com.yahoo.data.access.simple.Value;
+import com.yahoo.prelude.hitfield.RawBase64;
+
+/**
+ * Represents a binary field that is presented as base64
+ * @author baldersheim
+ */
+public class Base64DataField extends DocsumField {
+ public Base64DataField(String name) {
+ super(name);
+ }
+
+ @Override
+ public String toString() {
+ return "field " + getName() + " type raw";
+ }
+
+ @Override
+ public Object convert(Inspector value) {
+ return new RawBase64(value.asData(Value.empty().asData()));
+ }
+}
diff --git a/container-search/src/main/java/com/yahoo/prelude/fastsearch/DataField.java b/container-search/src/main/java/com/yahoo/prelude/fastsearch/DataField.java
index de07839e3e3..af7d98311f6 100644
--- a/container-search/src/main/java/com/yahoo/prelude/fastsearch/DataField.java
+++ b/container-search/src/main/java/com/yahoo/prelude/fastsearch/DataField.java
@@ -23,7 +23,7 @@ public class DataField extends DocsumField {
super(name);
}
- private Object convert(byte[] value) {
+ private RawData convert(byte[] value) {
return new RawData(value);
}
diff --git a/container-search/src/main/java/com/yahoo/prelude/fastsearch/DocsumField.java b/container-search/src/main/java/com/yahoo/prelude/fastsearch/DocsumField.java
index ef892585d21..70ffc71495a 100644
--- a/container-search/src/main/java/com/yahoo/prelude/fastsearch/DocsumField.java
+++ b/container-search/src/main/java/com/yahoo/prelude/fastsearch/DocsumField.java
@@ -51,6 +51,7 @@ public abstract class DocsumField {
fieldFactory.put("double", DoubleField.class);
fieldFactory.put("string", StringField.class);
fieldFactory.put("data", DataField.class);
+ fieldFactory.put("raw", Base64DataField.class);
fieldFactory.put("longstring", LongstringField.class);
fieldFactory.put("longdata", LongdataField.class);
fieldFactory.put("jsonstring", StructDataField.class);
diff --git a/container-search/src/main/java/com/yahoo/prelude/hitfield/RawBase64.java b/container-search/src/main/java/com/yahoo/prelude/hitfield/RawBase64.java
new file mode 100644
index 00000000000..134d0bc902a
--- /dev/null
+++ b/container-search/src/main/java/com/yahoo/prelude/hitfield/RawBase64.java
@@ -0,0 +1,18 @@
+package com.yahoo.prelude.hitfield;
+
+import java.util.Base64;
+
+/**
+ * @author baldersheim
+ */
+public class RawBase64 {
+ private final byte[] content;
+ public RawBase64(byte[] content) {
+ this.content = content;
+ }
+
+ @Override
+ public String toString() {
+ return Base64.getEncoder().encodeToString(content);
+ }
+}
diff --git a/container-search/src/main/java/com/yahoo/prelude/query/parser/AbstractParser.java b/container-search/src/main/java/com/yahoo/prelude/query/parser/AbstractParser.java
index 902be7e15dd..732466748eb 100644
--- a/container-search/src/main/java/com/yahoo/prelude/query/parser/AbstractParser.java
+++ b/container-search/src/main/java/com/yahoo/prelude/query/parser/AbstractParser.java
@@ -19,7 +19,6 @@ import java.util.*;
* @author bratseth
* @author Steinar Knutsen
*/
-@SuppressWarnings("deprecation")
public abstract class AbstractParser implements CustomParser {
/** The current submodes of this parser */
@@ -48,7 +47,7 @@ public abstract class AbstractParser implements CustomParser {
* of these may be active at the same time. SubModes are activated or
* deactivated by specifying special indexes in the query.
*/
- final class Submodes {
+ static final class Submodes {
/**
* Url mode allows "_" and "-" as word characters. Default is false
diff --git a/container-search/src/main/java/com/yahoo/prelude/query/parser/SpecialTokenRegistry.java b/container-search/src/main/java/com/yahoo/prelude/query/parser/SpecialTokenRegistry.java
deleted file mode 100644
index be2d9f9f68b..00000000000
--- a/container-search/src/main/java/com/yahoo/prelude/query/parser/SpecialTokenRegistry.java
+++ /dev/null
@@ -1,137 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.prelude.query.parser;
-
-import com.yahoo.config.subscription.ConfigGetter;
-import com.yahoo.config.subscription.ConfigSubscriber;
-import com.yahoo.vespa.configdefinition.SpecialtokensConfig;
-import com.yahoo.vespa.configdefinition.SpecialtokensConfig.Tokenlist;
-import com.yahoo.vespa.configdefinition.SpecialtokensConfig.Tokenlist.Tokens;
-
-import java.util.*;
-import java.util.logging.Logger;
-
-
-/**
- * A <i>registry</i> which is responsible for knowing the current
- * set of special tokens. The default registry returns empty token lists
- * for all names. Usage of this registry is multithread safe.
- *
- * @author bratseth
- */
-public class SpecialTokenRegistry {
-
- /** The log of this */
- private static final Logger log = Logger.getLogger(SpecialTokenRegistry.class.getName());
-
- private static final SpecialTokens nullSpecialTokens = new SpecialTokens();
-
- /**
- * The current authorative special token lists, indexed on name.
- * These lists are unmodifiable and used directly by clients of this
- */
- private Map<String,SpecialTokens> specialTokenMap = new HashMap<>();
-
- private boolean frozen = false;
-
- /**
- * Creates an empty special token registry which
- * does not subscribe to any configuration
- */
- public SpecialTokenRegistry() {}
-
- /**
- * Create a special token registry which subscribes to the specialtokens
- * configuration. Only used for testing.
- */
- public SpecialTokenRegistry(String configId) {
- try {
- build(new ConfigGetter<>(SpecialtokensConfig.class).getConfig(configId));
- } catch (Exception e) {
- log.config(
- "No special tokens are configured (" + e.getMessage() + ")");
- }
- }
-
- /**
- * Create a special token registry from a configuration object. This is the production code path.
- */
- public SpecialTokenRegistry(SpecialtokensConfig config) {
- if (config != null) {
- build(config);
- }
- freeze();
- }
-
- private void freeze() {
- frozen = true;
- }
-
- private void build(SpecialtokensConfig config) {
- List<SpecialTokens> list = new ArrayList<>();
- for (Iterator<Tokenlist> i = config.tokenlist().iterator(); i.hasNext();) {
- Tokenlist tokenList = i.next();
- SpecialTokens tokens = new SpecialTokens(tokenList.name());
-
- for (Iterator<Tokens> j = tokenList.tokens().iterator(); j.hasNext();) {
- Tokens token = j.next();
- tokens.addSpecialToken(token.token(), token.replace());
- }
- tokens.freeze();
- list.add(tokens);
- }
- addSpecialTokens(list);
- }
-
- /**
- * Adds a SpecialTokens instance to the registry. That is, add the
- * tokens contained for the name of the SpecialTokens instance
- * given.
- *
- * @param specialTokens the SpecialTokens object to add
- */
- public void addSpecialTokens(SpecialTokens specialTokens) {
- ensureNotFrozen();
- List<SpecialTokens> list = new ArrayList<>();
- list.add(specialTokens);
- addSpecialTokens(list);
-
- }
-
- private void ensureNotFrozen() {
- if (frozen) {
- throw new IllegalStateException("Tried to modify a frozen SpecialTokenRegistry instance.");
- }
- }
-
- private void addSpecialTokens(List<SpecialTokens> list) {
- HashMap<String,SpecialTokens> tokens = new HashMap<>(specialTokenMap);
- for(SpecialTokens t: list) {
- tokens.put(t.getName(),t);
- }
- specialTokenMap = tokens;
- }
-
-
- /**
- * Returns the currently authorative list of special tokens for
- * a given name.
- *
- * @param name the name of the special tokens to return
- * null, the empth string or the string "default" returns
- * the default ones
- * @return a read-only list of SpecialToken instances, an empty list if this name
- * has no special tokens
- */
- public SpecialTokens getSpecialTokens(String name) {
- if (name == null || name.trim().equals("")) {
- name = "default";
- }
- SpecialTokens specialTokens = specialTokenMap.get(name);
-
- if (specialTokens == null) {
- return nullSpecialTokens;
- }
- return specialTokens;
- }
-
-}
diff --git a/container-search/src/main/java/com/yahoo/prelude/query/parser/SpecialTokens.java b/container-search/src/main/java/com/yahoo/prelude/query/parser/SpecialTokens.java
deleted file mode 100644
index f45ecefefa6..00000000000
--- a/container-search/src/main/java/com/yahoo/prelude/query/parser/SpecialTokens.java
+++ /dev/null
@@ -1,167 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.prelude.query.parser;
-
-import java.util.logging.Level;
-import com.yahoo.prelude.query.Substring;
-
-import java.util.*;
-import java.util.logging.Logger;
-
-import static com.yahoo.language.LinguisticsCase.toLowerCase;
-
-/**
- * A list of special tokens - string that should be treated as word
- * no matter what they contain. Special tokens are case insensitive.
- *
- * @author bratseth
- */
-public class SpecialTokens {
-
- private static final Logger log = Logger.getLogger(SpecialTokens.class.getName());
-
- private final String name;
-
- private final List<SpecialToken> specialTokens = new ArrayList<>();
-
- private boolean frozen = false;
-
- private int currentMaximumLength = 0;
-
- /** Creates a null list of special tokens */
- public SpecialTokens() {
- this.name = "(null)";
- }
-
- public SpecialTokens(String name) {
- this.name = name;
- }
-
- /** Returns the name of this special tokens list */
- public String getName() {
- return name;
- }
-
- /**
- * Adds a special token to this
- *
- * @param token the special token string to add
- * @param replace the token to replace instances of the special token with, or null to keep the token
- */
- public void addSpecialToken(String token, String replace) {
- ensureNotFrozen();
- if (!caseIndependentLength(token)) {
- return;
- }
- // TODO are special tokens correctly unicode normalized in reagards to query parsing?
- final SpecialToken specialTokenToAdd = new SpecialToken(token, replace);
- currentMaximumLength = Math.max(currentMaximumLength, specialTokenToAdd.token.length());
- specialTokens.add(specialTokenToAdd);
- Collections.sort(specialTokens);
- }
-
- private boolean caseIndependentLength(String token) {
- // XXX not fool proof length test, should test codepoint by codepoint for mixed case user input? not even that will necessarily be 100% robust...
- String asLow = toLowerCase(token);
- // TODO put along with the global toLowerCase
- String asHigh = token.toUpperCase(Locale.ENGLISH);
- if (asLow.length() != token.length() || asHigh.length() != token.length()) {
- log.log(Level.SEVERE, "Special token '" + token + "' has case sensitive length. Ignoring the token."
- + " Please report this message in a bug to the Vespa team.");
- return false;
- } else {
- return true;
- }
- }
-
- /**
- * Returns the special token starting at the start of the given string, or null if no
- * special token starts at this string
- *
- * @param string the string to search for a special token at the start position
- * @param substring true to allow the special token to be followed by a character which does not
- * mark the end of a token
- */
- public SpecialToken tokenize(String string, boolean substring) {
- // XXX detonator pattern token.length may be != the length of the
- // matching data in string, ref caseIndependentLength(String)
- final String input = toLowerCase(string.substring(0, Math.min(string.length(), currentMaximumLength)));
- for (Iterator<SpecialToken> i = specialTokens.iterator(); i.hasNext();) {
- SpecialTokens.SpecialToken special = i.next();
-
- if (input.startsWith(special.token())) {
- if (string.length() == special.token().length() || substring || tokenEndsAt(special.token().length(), string))
- return special;
- }
- }
- return null;
- }
-
- private boolean tokenEndsAt(int position,String string) {
- return !Character.isLetterOrDigit(string.charAt(position));
- }
-
- /** Returns the number of special tokens in this */
- public int size() {
- return specialTokens.size();
- }
-
- private void ensureNotFrozen() {
- if (frozen) {
- throw new IllegalStateException("Tried to modify a frozen SpecialTokens instance.");
- }
- }
-
- public void freeze() {
- frozen = true;
- }
-
- /** An immutable special token */
- public final static class SpecialToken implements Comparable<SpecialToken> {
-
- private String token;
-
- private String replace;
-
- public SpecialToken(String token, String replace) {
- this.token = toLowerCase(token);
- if (replace == null || replace.trim().equals("")) {
- this.replace = this.token;
- } else {
- this.replace = toLowerCase(replace);
- }
- }
-
- /** Returns the special token */
- public String token() {
- return token;
- }
-
- /** Returns the right replace value, never null or an empty string */
- public String replace() {
- return replace;
- }
-
- @Override
- public int compareTo(SpecialToken other) {
- if (this.token().length() < other.token().length()) return 1;
- if (this.token().length() == other.token().length()) return 0;
- return -1;
- }
-
- @Override
- public boolean equals(Object other) {
- if (other == this) return true;
- if ( ! (other instanceof SpecialToken)) return false;
- return Objects.equals(this.token, ((SpecialToken)other).token);
- }
-
- @Override
- public int hashCode() { return token.hashCode(); }
-
- public Token toToken(int start, String rawSource) {
- return new Token(Token.Kind.WORD, replace(), true, new Substring(start, start + token.length(), rawSource)); // XXX: Unsafe?
- }
-
- }
-
-}
diff --git a/container-search/src/main/java/com/yahoo/prelude/query/parser/Tokenizer.java b/container-search/src/main/java/com/yahoo/prelude/query/parser/Tokenizer.java
index 2dc2254df68..b71bd57539f 100644
--- a/container-search/src/main/java/com/yahoo/prelude/query/parser/Tokenizer.java
+++ b/container-search/src/main/java/com/yahoo/prelude/query/parser/Tokenizer.java
@@ -3,6 +3,7 @@ package com.yahoo.prelude.query.parser;
import com.yahoo.language.Linguistics;
import com.yahoo.language.process.CharacterClasses;
+import com.yahoo.language.process.SpecialTokens;
import com.yahoo.prelude.Index;
import com.yahoo.prelude.IndexFacts;
import com.yahoo.prelude.query.Substring;
@@ -200,7 +201,7 @@ public final class Tokenizer {
}
StringBuilder tmp = new StringBuilder();
for (int i = 0; i < tokencnt; i++) {
- Token useToken = tokens.get(backtrack+i);
+ Token useToken = tokens.get(backtrack + i);
tmp.append(useToken.image);
}
String indexName = tmp.toString();
@@ -216,20 +217,20 @@ public final class Tokenizer {
}
private int consumeSpecialToken(int start) {
- SpecialTokens.SpecialToken specialToken=getSpecialToken(start);
- if (specialToken==null) return start;
- tokens.add(specialToken.toToken(start,source));
- return start + specialToken.token().length();
+ SpecialTokens.Token token = getSpecialToken(start);
+ if (token == null) return start;
+ tokens.add(toToken(token, start, source));
+ return start + token.token().length();
}
- private SpecialTokens.SpecialToken getSpecialToken(int start) {
+ private SpecialTokens.Token getSpecialToken(int start) {
if (specialTokens == null) return null;
return specialTokens.tokenize(source.substring(start), substringSpecialTokens);
}
private int consumeExact(int start,Index index) {
if (index.getExactTerminator() == null) return consumeHeuristicExact(start);
- return consumeToTerminator(start,index.getExactTerminator());
+ return consumeToTerminator(start, index.getExactTerminator());
}
private boolean looksLikeExactEnd(int end) {
@@ -467,7 +468,7 @@ public final class Tokenizer {
/** Consumes a word or number <i>and/or possibly</i> a special token starting within this word or number */
private int consumeWordOrNumber(int start, Index currentIndex) {
int tokenEnd = start;
- SpecialTokens.SpecialToken substringSpecialToken = null;
+ SpecialTokens.Token substringToken = null;
boolean digitsOnly = true;
// int underscores = 0;
// boolean underscoresOnly = true;
@@ -475,8 +476,8 @@ public final class Tokenizer {
while (tokenEnd < source.length()) {
if (substringSpecialTokens) {
- substringSpecialToken = getSpecialToken(tokenEnd);
- if (substringSpecialToken != null) break;
+ substringToken = getSpecialToken(tokenEnd);
+ if (substringToken != null) break;
}
int c = source.codePointAt(tokenEnd);
@@ -524,11 +525,11 @@ public final class Tokenizer {
}
}
- if (substringSpecialToken == null)
+ if (substringToken == null)
return --tokenEnd;
// TODO: test the logic around tokenEnd with friends
- addToken(substringSpecialToken.toToken(tokenEnd, source));
- return --tokenEnd + substringSpecialToken.token().length();
+ addToken(toToken(substringToken, tokenEnd, source));
+ return --tokenEnd + substringToken.token().length();
}
private void addToken(Token.Kind kind, String word, int start, int end) {
@@ -539,4 +540,11 @@ public final class Tokenizer {
tokens.add(token);
}
+ public Token toToken(SpecialTokens.Token specialToken, int start, String rawSource) {
+ return new Token(Token.Kind.WORD,
+ specialToken.replacement(),
+ true,
+ new Substring(start, start + specialToken.token().length(), rawSource)); // XXX: Unsafe?
+ }
+
}
diff --git a/container-search/src/main/java/com/yahoo/search/Query.java b/container-search/src/main/java/com/yahoo/search/Query.java
index ce31b9a3ba3..4ecede819de 100644
--- a/container-search/src/main/java/com/yahoo/search/Query.java
+++ b/container-search/src/main/java/com/yahoo/search/Query.java
@@ -7,7 +7,6 @@ import com.yahoo.collections.Tuple2;
import com.yahoo.component.Version;
import com.yahoo.container.jdisc.HttpRequest;
import com.yahoo.fs4.MapEncoder;
-import java.util.logging.Level;
import com.yahoo.prelude.fastsearch.DocumentDatabase;
import com.yahoo.prelude.query.Highlight;
import com.yahoo.prelude.query.textualrepresentation.TextualQueryRepresentation;
@@ -56,6 +55,8 @@ import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
+import java.util.concurrent.TimeUnit;
+import java.util.logging.Level;
import java.util.logging.Logger;
/**
@@ -336,7 +337,7 @@ public class Query extends com.yahoo.processing.Request implements Cloneable {
}
private void init(Map<String, String> requestMap, CompiledQueryProfile queryProfile) {
- startTime = System.currentTimeMillis();
+ startTime = httpRequest.getJDiscRequest().creationTime(TimeUnit.MILLISECONDS);
if (queryProfile != null) {
// Move all request parameters to the query profile just to validate that the parameter settings are legal
Properties queryProfileProperties = new QueryProfileProperties(queryProfile);
diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/InvokerFactory.java b/container-search/src/main/java/com/yahoo/search/dispatch/InvokerFactory.java
index 1bcb640e3a5..1de274ce6cf 100644
--- a/container-search/src/main/java/com/yahoo/search/dispatch/InvokerFactory.java
+++ b/container-search/src/main/java/com/yahoo/search/dispatch/InvokerFactory.java
@@ -82,7 +82,7 @@ public abstract class InvokerFactory {
if ( ! searchCluster.isPartialGroupCoverageSufficient(success) && !acceptIncompleteCoverage) {
return Optional.empty();
}
- if (invokers.size() == 0) {
+ if (invokers.isEmpty()) {
return Optional.of(createCoverageErrorInvoker(nodes, failed));
}
}
diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchCluster.java b/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchCluster.java
index ce834b108db..159a42676ec 100644
--- a/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchCluster.java
+++ b/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchCluster.java
@@ -311,19 +311,17 @@ public class SearchCluster implements NodeManager<Node> {
// With just one group sufficient coverage may not be the same as full coverage, as the
// group will always be marked sufficient for use.
updateSufficientCoverage(group, true);
- boolean sufficientCoverage = isGroupCoverageSufficient(group.workingNodes(),
- group.getActiveDocuments(),
+ boolean sufficientCoverage = isGroupCoverageSufficient(group.getActiveDocuments(),
group.getActiveDocuments());
trackGroupCoverageChanges(group, sufficientCoverage, group.getActiveDocuments());
}
private void pingIterationCompletedMultipleGroups() {
- aggregateNodeValues();
+ orderedGroups().forEach(Group::aggregateNodeValues);
long medianDocuments = medianDocumentsPerGroup();
boolean anyGroupsSufficientCoverage = false;
for (Group group : orderedGroups()) {
- boolean sufficientCoverage = isGroupCoverageSufficient(group.workingNodes(),
- group.getActiveDocuments(),
+ boolean sufficientCoverage = isGroupCoverageSufficient(group.getActiveDocuments(),
medianDocuments);
anyGroupsSufficientCoverage = anyGroupsSufficientCoverage || sufficientCoverage;
updateSufficientCoverage(group, sufficientCoverage);
@@ -331,10 +329,6 @@ public class SearchCluster implements NodeManager<Node> {
}
}
- private void aggregateNodeValues() {
- orderedGroups().forEach(Group::aggregateNodeValues);
- }
-
private long medianDocumentsPerGroup() {
if (orderedGroups().isEmpty()) return 0;
var activeDocuments = orderedGroups().stream().map(Group::getActiveDocuments).collect(Collectors.toList());
@@ -356,23 +350,13 @@ public class SearchCluster implements NodeManager<Node> {
}
}
- private boolean isGroupCoverageSufficient(int workingNodesInGroup, long activeDocuments, long medianDocuments) {
+ private boolean isGroupCoverageSufficient(long activeDocuments, long medianDocuments) {
double documentCoverage = 100.0 * (double) activeDocuments / medianDocuments;
if (medianDocuments > 0 && documentCoverage < dispatchConfig.minActivedocsPercentage())
return false;
-
- if ( ! isGroupNodeCoverageSufficient(workingNodesInGroup))
- return false;
-
return true;
}
- private boolean isGroupNodeCoverageSufficient(int workingNodesInGroup) {
- int nodesAllowedDown = dispatchConfig.maxNodesDownPerGroup()
- + (int) (((double) wantedGroupSize() * (100.0 - dispatchConfig.minGroupCoverage())) / 100.0);
- return workingNodesInGroup + nodesAllowedDown >= wantedGroupSize();
- }
-
public boolean isGroupWellBalanced(OptionalInt groupId) {
if (groupId.isEmpty()) return false;
Group group = groups().get(groupId.getAsInt());
@@ -386,7 +370,7 @@ public class SearchCluster implements NodeManager<Node> {
if (orderedGroups().size() == 1)
return nodes.size() >= wantedGroupSize() - dispatchConfig.maxNodesDownPerGroup();
long activeDocuments = nodes.stream().mapToLong(Node::getActiveDocuments).sum();
- return isGroupCoverageSufficient(nodes.size(), activeDocuments, medianDocumentsPerGroup());
+ return isGroupCoverageSufficient(activeDocuments, medianDocumentsPerGroup());
}
private void trackGroupCoverageChanges(Group group, boolean fullCoverage, long medianDocuments) {
diff --git a/container-search/src/main/java/com/yahoo/search/federation/FederationSearcher.java b/container-search/src/main/java/com/yahoo/search/federation/FederationSearcher.java
index f6bf91f5f85..8d0e4944ab8 100644
--- a/container-search/src/main/java/com/yahoo/search/federation/FederationSearcher.java
+++ b/container-search/src/main/java/com/yahoo/search/federation/FederationSearcher.java
@@ -338,9 +338,8 @@ public class FederationSearcher extends ForkingSearcher {
private List<String> allSourceRefDescriptions() {
List<String> descriptions = new ArrayList<>();
- for (com.yahoo.search.federation.sourceref.Target target : searchChainResolver.allTopLevelTargets()) {
+ for (com.yahoo.search.federation.sourceref.Target target : searchChainResolver.allTopLevelTargets())
descriptions.add(target.searchRefDescription());
- }
return descriptions;
}
diff --git a/container-search/src/main/java/com/yahoo/search/federation/sourceref/SearchChainInvocationSpec.java b/container-search/src/main/java/com/yahoo/search/federation/sourceref/SearchChainInvocationSpec.java
index 6cb8d2ef174..59b4e521a56 100644
--- a/container-search/src/main/java/com/yahoo/search/federation/sourceref/SearchChainInvocationSpec.java
+++ b/container-search/src/main/java/com/yahoo/search/federation/sourceref/SearchChainInvocationSpec.java
@@ -9,7 +9,7 @@ import java.util.List;
import java.util.Objects;
/**
- * Specifices which search chain should be run and how it should be run.
+ * Specifies which search chain should be run and how it should be run.
* This is a value object.
*
* @author Tony Vaagenes
diff --git a/container-search/src/main/java/com/yahoo/search/federation/sourceref/SearchChainResolver.java b/container-search/src/main/java/com/yahoo/search/federation/sourceref/SearchChainResolver.java
index 36f6fe424a0..6626c1b3cc4 100644
--- a/container-search/src/main/java/com/yahoo/search/federation/sourceref/SearchChainResolver.java
+++ b/container-search/src/main/java/com/yahoo/search/federation/sourceref/SearchChainResolver.java
@@ -92,7 +92,6 @@ public class SearchChainResolver {
public Builder addSourceForProvider(ComponentId sourceId, ComponentId providerId, ComponentId searchChainId,
boolean isDefaultProviderForSource, FederationOptions federationOptions,
List<String> documentTypes) {
-
SearchChainInvocationSpec searchChainInvocationSpec =
new SearchChainInvocationSpec(searchChainId, sourceId, providerId, federationOptions, documentTypes);
@@ -133,7 +132,6 @@ public class SearchChainResolver {
this.defaultTargets = Collections.unmodifiableSortedSet(defaultTargets);
}
-
public SearchChainInvocationSpec resolve(ComponentSpecification sourceRef, Properties sourceToProviderMap)
throws UnresolvedSearchChainException {
diff --git a/container-search/src/main/java/com/yahoo/search/federation/sourceref/SingleTarget.java b/container-search/src/main/java/com/yahoo/search/federation/sourceref/SingleTarget.java
index 9c7e1024518..4613c73c4b4 100644
--- a/container-search/src/main/java/com/yahoo/search/federation/sourceref/SingleTarget.java
+++ b/container-search/src/main/java/com/yahoo/search/federation/sourceref/SingleTarget.java
@@ -5,8 +5,6 @@ import com.yahoo.component.ComponentId;
import com.yahoo.processing.request.Properties;
/**
- * TODO: What is this?
- *
* @author Tony Vaagenes
*/
public class SingleTarget extends Target {
diff --git a/container-search/src/main/java/com/yahoo/search/federation/sourceref/Target.java b/container-search/src/main/java/com/yahoo/search/federation/sourceref/Target.java
index f23e24525bb..1b11e588f11 100644
--- a/container-search/src/main/java/com/yahoo/search/federation/sourceref/Target.java
+++ b/container-search/src/main/java/com/yahoo/search/federation/sourceref/Target.java
@@ -6,8 +6,6 @@ import com.yahoo.component.ComponentId;
import com.yahoo.processing.request.Properties;
/**
- * TODO: What's this?
- *
* @author Tony Vaagenes
*/
public abstract class Target extends AbstractComponent {
diff --git a/container-search/src/main/java/com/yahoo/search/handler/SearchResponse.java b/container-search/src/main/java/com/yahoo/search/handler/SearchResponse.java
index b4a469c569a..5b5ff0770c4 100644
--- a/container-search/src/main/java/com/yahoo/search/handler/SearchResponse.java
+++ b/container-search/src/main/java/com/yahoo/search/handler/SearchResponse.java
@@ -49,9 +49,11 @@ public class SearchResponse {
}
public static Timing createTiming(Query query, Result result) {
- return new Timing(result.getElapsedTime().firstFill(),
+ long summaryStartTime = result.getElapsedTime().firstFill();
+ long queryStartTime = result.getElapsedTime().first();
+ return new Timing(summaryStartTime,
0,
- result.getElapsedTime().first(),
+ queryStartTime == Long.MAX_VALUE ? 0 : queryStartTime,
query.getTimeout());
}
diff --git a/container-search/src/main/java/com/yahoo/search/query/Presentation.java b/container-search/src/main/java/com/yahoo/search/query/Presentation.java
index e147b14071a..b10e8442a5f 100644
--- a/container-search/src/main/java/com/yahoo/search/query/Presentation.java
+++ b/container-search/src/main/java/com/yahoo/search/query/Presentation.java
@@ -128,7 +128,7 @@ public class Presentation implements Cloneable {
return clone;
}
catch (CloneNotSupportedException e) {
- throw new RuntimeException("Someone inserted a noncloneable superclass",e);
+ throw new RuntimeException("Someone inserted a noncloneable superclass", e);
}
}
diff --git a/container-search/src/main/java/com/yahoo/search/query/Select.java b/container-search/src/main/java/com/yahoo/search/query/Select.java
index d90550084eb..a7e491f5269 100644
--- a/container-search/src/main/java/com/yahoo/search/query/Select.java
+++ b/container-search/src/main/java/com/yahoo/search/query/Select.java
@@ -1,7 +1,6 @@
// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.search.query;
-import com.yahoo.processing.request.CompoundName;
import com.yahoo.search.Query;
import com.yahoo.search.grouping.GroupingRequest;
import com.yahoo.search.query.parser.ParserEnvironment;
@@ -12,13 +11,12 @@ import com.yahoo.search.yql.VespaGroupingStep;
import java.util.ArrayList;
import java.util.Collections;
-import java.util.LinkedHashSet;
import java.util.List;
import java.util.Objects;
/**
- * The parameters defining the where-clause and groping of a query
+ * The parameters defining the where-clause and grouping of a query
*
* @author henrhoi
*/
@@ -26,7 +24,6 @@ public class Select implements Cloneable {
/** The type representing the property arguments consumed by this */
private static final QueryProfileType argumentType;
- private static final CompoundName argumentTypeName;
public static final String SELECT = "select";
public static final String WHERE = "where";
@@ -46,7 +43,6 @@ public class Select implements Cloneable {
argumentType.addField(new FieldDescription(WHERE, "string"));
argumentType.addField(new FieldDescription(GROUPING, "string"));
argumentType.freeze();
- argumentTypeName = new CompoundName(argumentType.getId().getName());
}
public static QueryProfileType getArgumentType() { return argumentType; }
diff --git a/container-search/src/main/java/com/yahoo/search/query/SelectParser.java b/container-search/src/main/java/com/yahoo/search/query/SelectParser.java
index d212c2d6ec4..f94c6300e83 100644
--- a/container-search/src/main/java/com/yahoo/search/query/SelectParser.java
+++ b/container-search/src/main/java/com/yahoo/search/query/SelectParser.java
@@ -467,7 +467,6 @@ public class SelectParser implements Parser {
}
private Item buildNearestNeighbor(String key, Inspector value) {
-
HashMap<Integer, Inspector> children = childMap(value);
Preconditions.checkArgument(children.size() == 2, "Expected 2 arguments, got %s.", children.size());
String field = children.get(0).asString();
diff --git a/container-search/src/main/java/com/yahoo/search/query/parser/ParserEnvironment.java b/container-search/src/main/java/com/yahoo/search/query/parser/ParserEnvironment.java
index 94b9bf6ce65..df96d314455 100644
--- a/container-search/src/main/java/com/yahoo/search/query/parser/ParserEnvironment.java
+++ b/container-search/src/main/java/com/yahoo/search/query/parser/ParserEnvironment.java
@@ -4,7 +4,7 @@ package com.yahoo.search.query.parser;
import com.yahoo.language.Linguistics;
import com.yahoo.language.simple.SimpleLinguistics;
import com.yahoo.prelude.IndexFacts;
-import com.yahoo.prelude.query.parser.SpecialTokens;
+import com.yahoo.language.process.SpecialTokens;
import com.yahoo.search.Searcher;
import com.yahoo.search.searchchain.Execution;
@@ -18,7 +18,7 @@ public final class ParserEnvironment {
private IndexFacts indexFacts = new IndexFacts();
private Linguistics linguistics = new SimpleLinguistics();
- private SpecialTokens specialTokens = new SpecialTokens();
+ private SpecialTokens specialTokens = SpecialTokens.empty();
public IndexFacts getIndexFacts() {
return indexFacts;
diff --git a/container-search/src/main/java/com/yahoo/search/query/profile/QueryProfileProperties.java b/container-search/src/main/java/com/yahoo/search/query/profile/QueryProfileProperties.java
index 41272d695ac..34fe376150d 100644
--- a/container-search/src/main/java/com/yahoo/search/query/profile/QueryProfileProperties.java
+++ b/container-search/src/main/java/com/yahoo/search/query/profile/QueryProfileProperties.java
@@ -117,7 +117,7 @@ public class QueryProfileProperties extends Properties {
value = fieldDescription.getType().convertFrom(value, profile.getRegistry());
if (value == null)
throw new IllegalInputException("'" + value + "' is not a " +
- fieldDescription.getType().toInstanceDescription());
+ fieldDescription.getType().toInstanceDescription());
}
else if (fieldDescription.getType() instanceof QueryProfileFieldType) {
// If a type is specified, use that instead of the type implied by the name
diff --git a/container-search/src/main/java/com/yahoo/search/query/profile/types/QueryProfileType.java b/container-search/src/main/java/com/yahoo/search/query/profile/types/QueryProfileType.java
index c02aada2062..e4396894595 100644
--- a/container-search/src/main/java/com/yahoo/search/query/profile/types/QueryProfileType.java
+++ b/container-search/src/main/java/com/yahoo/search/query/profile/types/QueryProfileType.java
@@ -177,11 +177,12 @@ public class QueryProfileType extends FreezableSimpleComponent {
public void freeze() {
if (isFrozen()) return;
- // Flatten the inheritance hierarchy into this to facilitate faster lookup
+ // Flatten for faster lookup
for (QueryProfileType inheritedType : inherited) {
for (FieldDescription field : inheritedType.fields().values())
- if ( ! fields.containsKey(field.getName()))
- fields.put(field.getName(),field);
+ if ( ! fields.containsKey(field.getName())) {
+ fields.put(field.getName(), field);
+ }
}
fields = ImmutableMap.copyOf(fields);
inherited = ImmutableList.copyOf(inherited);
@@ -354,9 +355,10 @@ public class QueryProfileType extends FreezableSimpleComponent {
if (inherited().size() == 0) return Collections.unmodifiableMap(fields);
// Collapse inherited
- Map<String, FieldDescription> allFields = new HashMap<>(fields);
+ Map<String, FieldDescription> allFields = new HashMap<>();
for (QueryProfileType inheritedType : inherited)
allFields.putAll(inheritedType.fields());
+ allFields.putAll(fields);
return Collections.unmodifiableMap(allFields);
}
diff --git a/container-search/src/main/java/com/yahoo/search/searchchain/Execution.java b/container-search/src/main/java/com/yahoo/search/searchchain/Execution.java
index 84fe88d0292..0574fc660c3 100644
--- a/container-search/src/main/java/com/yahoo/search/searchchain/Execution.java
+++ b/container-search/src/main/java/com/yahoo/search/searchchain/Execution.java
@@ -6,7 +6,7 @@ import com.yahoo.language.Linguistics;
import com.yahoo.prelude.IndexFacts;
import com.yahoo.prelude.Ping;
import com.yahoo.prelude.Pong;
-import com.yahoo.prelude.query.parser.SpecialTokenRegistry;
+import com.yahoo.language.process.SpecialTokenRegistry;
import com.yahoo.processing.Processor;
import com.yahoo.processing.Request;
import com.yahoo.processing.Response;
@@ -17,8 +17,6 @@ import com.yahoo.search.cluster.PingableSearcher;
import com.yahoo.search.rendering.RendererRegistry;
import com.yahoo.search.statistics.TimeTracker;
-import java.util.logging.Logger;
-
/**
* <p>An execution of a search chain. This keeps track of the call state for an execution (in the calling thread)
* of the searchers of a search chain.</p>
@@ -111,7 +109,7 @@ public class Execution extends com.yahoo.processing.execution.Execution {
public Context(SearchChainRegistry searchChainRegistry, IndexFacts indexFacts,
SpecialTokenRegistry tokenRegistry, RendererRegistry rendererRegistry, Linguistics linguistics)
{
- owner=null;
+ owner = null;
// The next time something is added here, compose into wrapper objects. Many arguments...
// Four methods need to be updated when adding something:
diff --git a/container-search/src/main/java/com/yahoo/search/searchchain/ExecutionFactory.java b/container-search/src/main/java/com/yahoo/search/searchchain/ExecutionFactory.java
index 31b6d06f78e..a813229c984 100644
--- a/container-search/src/main/java/com/yahoo/search/searchchain/ExecutionFactory.java
+++ b/container-search/src/main/java/com/yahoo/search/searchchain/ExecutionFactory.java
@@ -13,7 +13,7 @@ import com.yahoo.language.Linguistics;
import com.yahoo.language.simple.SimpleLinguistics;
import com.yahoo.prelude.IndexFacts;
import com.yahoo.prelude.IndexModel;
-import com.yahoo.prelude.query.parser.SpecialTokenRegistry;
+import com.yahoo.language.process.SpecialTokenRegistry;
import com.yahoo.processing.rendering.Renderer;
import com.yahoo.search.Searcher;
import com.yahoo.search.config.IndexInfoConfig;
diff --git a/container-search/src/main/java/com/yahoo/search/searchchain/model/VespaSearchers.java b/container-search/src/main/java/com/yahoo/search/searchchain/model/VespaSearchers.java
index c264425cb9c..2f680a8f3bd 100644
--- a/container-search/src/main/java/com/yahoo/search/searchchain/model/VespaSearchers.java
+++ b/container-search/src/main/java/com/yahoo/search/searchchain/model/VespaSearchers.java
@@ -60,8 +60,9 @@ public class VespaSearchers {
private static FederationSearcherModel federationSearcherModel() {
return new FederationSearcherModel(new ComponentSpecification("federation"),
- Dependencies.emptyDependencies(),
- Collections.emptyList(), true);
+ Dependencies.emptyDependencies(),
+ Collections.emptyList(),
+ true);
}
private static boolean allAdded(Collection<ChainedComponentModel> searcherModels, Set<ComponentId> componentIds) {
diff --git a/container-search/src/main/java/com/yahoo/search/searchchain/model/federation/FederationSearcherModel.java b/container-search/src/main/java/com/yahoo/search/searchchain/model/federation/FederationSearcherModel.java
index 2778f7e97db..01dccee5c7f 100644
--- a/container-search/src/main/java/com/yahoo/search/searchchain/model/federation/FederationSearcherModel.java
+++ b/container-search/src/main/java/com/yahoo/search/searchchain/model/federation/FederationSearcherModel.java
@@ -5,7 +5,6 @@ import java.util.List;
import com.google.common.collect.ImmutableList;
import com.yahoo.container.bundle.BundleInstantiationSpecification;
-import net.jcip.annotations.Immutable;
import com.yahoo.component.ComponentSpecification;
import com.yahoo.component.chain.dependencies.Dependencies;
@@ -17,23 +16,8 @@ import com.yahoo.search.federation.FederationSearcher;
*
* @author Tony Vaagenes
*/
-@Immutable
public class FederationSearcherModel extends ChainedComponentModel {
- /**
- * Specifies one or more search chains that can be addressed
- * as a single source.
- */
- public static class TargetSpec {
- public final ComponentSpecification sourceSpec;
- public final FederationOptions federationOptions;
-
- public TargetSpec(ComponentSpecification sourceSpec, FederationOptions federationOptions) {
- this.sourceSpec = sourceSpec;
- this.federationOptions = federationOptions;
- }
- }
-
private static final ComponentSpecification federationSearcherComponentSpecification =
new ComponentSpecification(FederationSearcher.class.getName());
@@ -48,4 +32,16 @@ public class FederationSearcherModel extends ChainedComponentModel {
this.targets = ImmutableList.copyOf(targets);
}
+ /** Specifies one or more search chains that can be addressed as a single source. */
+ public static class TargetSpec {
+
+ public final ComponentSpecification sourceSpec;
+ public final FederationOptions federationOptions;
+
+ public TargetSpec(ComponentSpecification sourceSpec, FederationOptions federationOptions) {
+ this.sourceSpec = sourceSpec;
+ this.federationOptions = federationOptions;
+ }
+ }
+
}
diff --git a/container-search/src/main/java/com/yahoo/search/searchers/ValidateNearestNeighborSearcher.java b/container-search/src/main/java/com/yahoo/search/searchers/ValidateNearestNeighborSearcher.java
index 65ca4a93cc1..ca9d17cb656 100644
--- a/container-search/src/main/java/com/yahoo/search/searchers/ValidateNearestNeighborSearcher.java
+++ b/container-search/src/main/java/com/yahoo/search/searchers/ValidateNearestNeighborSearcher.java
@@ -29,7 +29,6 @@ import java.util.Optional;
*
* @author arnej
*/
-@Beta
@Before(GroupingExecutor.COMPONENT_NAME) // Must happen before query.prepare()
public class ValidateNearestNeighborSearcher extends Searcher {
diff --git a/container-search/src/main/java/com/yahoo/vespa/streamingvisitors/VdsStreamingSearcher.java b/container-search/src/main/java/com/yahoo/vespa/streamingvisitors/VdsStreamingSearcher.java
index d06c4cd6f01..3528da17dfe 100644
--- a/container-search/src/main/java/com/yahoo/vespa/streamingvisitors/VdsStreamingSearcher.java
+++ b/container-search/src/main/java/com/yahoo/vespa/streamingvisitors/VdsStreamingSearcher.java
@@ -5,7 +5,6 @@ import com.yahoo.document.DocumentId;
import com.yahoo.document.select.parser.ParseException;
import com.yahoo.document.select.parser.TokenMgrException;
import com.yahoo.fs4.DocsumPacket;
-import java.util.logging.Level;
import com.yahoo.messagebus.routing.Route;
import com.yahoo.prelude.Ping;
import com.yahoo.prelude.Pong;
@@ -30,6 +29,7 @@ import java.math.BigInteger;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
+import java.util.logging.Level;
import java.util.logging.Logger;
/**
@@ -136,6 +136,10 @@ public class VdsStreamingSearcher extends VespaBackEndSearcher {
@Override
public Result doSearch2(Query query, Execution execution) {
+ if (query.getTimeLeft() <= 0) {
+ return new Result(query, ErrorMessage.createTimeout(String.format("No time left for searching (timeout=%d)", query.getTimeout())));
+ }
+
initializeMissingQueryFields(query);
if (documentSelectionQueryParameterCount(query) != 1) {
return new Result(query, ErrorMessage.createBackendCommunicationError("Streaming search needs one and " +
diff --git a/container-search/src/test/java/com/yahoo/prelude/query/parser/test/ParseTestCase.java b/container-search/src/test/java/com/yahoo/prelude/query/parser/test/ParseTestCase.java
index 6afea895f3a..cef8ae1751c 100644
--- a/container-search/src/test/java/com/yahoo/prelude/query/parser/test/ParseTestCase.java
+++ b/container-search/src/test/java/com/yahoo/prelude/query/parser/test/ParseTestCase.java
@@ -18,16 +18,14 @@ import com.yahoo.prelude.query.PhraseSegmentItem;
import com.yahoo.prelude.query.PrefixItem;
import com.yahoo.prelude.query.RankItem;
import com.yahoo.prelude.query.SubstringItem;
-import com.yahoo.prelude.query.SubstringItem;
import com.yahoo.prelude.query.SuffixItem;
import com.yahoo.prelude.query.TaggableItem;
import com.yahoo.prelude.query.WordItem;
-import com.yahoo.prelude.query.parser.SpecialTokens;
+import com.yahoo.language.process.SpecialTokens;
import com.yahoo.prelude.query.parser.TestLinguistics;
import com.yahoo.search.Query;
import org.junit.Test;
-import java.util.Collections;
import java.util.Iterator;
import static org.junit.Assert.assertEquals;
@@ -1639,7 +1637,7 @@ public class ParseTestCase {
@Test
public void testNonSpecialTokenParsing() {
- ParsingTester customTester = new ParsingTester(new SpecialTokens("default"));
+ ParsingTester customTester = new ParsingTester(SpecialTokens.empty());
customTester.assertParsed("OR c or c with (AND tcp ip)", "c# or c++ with tcp/ip", Query.Type.ANY);
}
diff --git a/container-search/src/test/java/com/yahoo/prelude/query/parser/test/ParsingTester.java b/container-search/src/test/java/com/yahoo/prelude/query/parser/test/ParsingTester.java
index 17155fff5de..fd7e4cbe0e6 100644
--- a/container-search/src/test/java/com/yahoo/prelude/query/parser/test/ParsingTester.java
+++ b/container-search/src/test/java/com/yahoo/prelude/query/parser/test/ParsingTester.java
@@ -11,8 +11,8 @@ import com.yahoo.prelude.IndexFacts;
import com.yahoo.prelude.IndexModel;
import com.yahoo.prelude.query.Item;
import com.yahoo.prelude.query.NullItem;
-import com.yahoo.prelude.query.parser.SpecialTokenRegistry;
-import com.yahoo.prelude.query.parser.SpecialTokens;
+import com.yahoo.language.process.SpecialTokenRegistry;
+import com.yahoo.language.process.SpecialTokens;
import com.yahoo.search.Query;
import com.yahoo.search.config.IndexInfoConfig;
import com.yahoo.search.query.parser.Parsable;
@@ -20,6 +20,9 @@ import com.yahoo.search.query.parser.Parser;
import com.yahoo.search.query.parser.ParserEnvironment;
import com.yahoo.search.query.parser.ParserFactory;
+import java.util.ArrayList;
+import java.util.List;
+
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
@@ -32,7 +35,7 @@ import static org.junit.Assert.assertTrue;
public class ParsingTester {
private static final Linguistics linguistics = new SimpleLinguistics();
- private IndexFacts indexFacts;
+ private final IndexFacts indexFacts;
private SpecialTokenRegistry tokenRegistry;
public ParsingTester() {
@@ -49,11 +52,10 @@ public class ParsingTester {
public ParsingTester(IndexFacts indexFacts, SpecialTokens specialTokens) {
indexFacts.freeze();
- specialTokens.freeze();
this.indexFacts = indexFacts;
tokenRegistry = new SpecialTokenRegistry();
- tokenRegistry.addSpecialTokens(specialTokens);
+ tokenRegistry = new SpecialTokenRegistry(List.of(specialTokens));
}
/**
@@ -72,13 +74,13 @@ public class ParsingTester {
* This can be used to add new tokens and passing the resulting special tokens to the constructor of this.
*/
public static SpecialTokens createSpecialTokens() {
- SpecialTokens tokens = new SpecialTokens("default");
- tokens.addSpecialToken("c++", null);
- tokens.addSpecialToken(".net", "dotnet");
- tokens.addSpecialToken("tcp/ip", null);
- tokens.addSpecialToken("c#", null);
- tokens.addSpecialToken("special-token-fs","firstsecond");
- return tokens;
+ List<SpecialTokens.Token> tokens = new ArrayList<>();
+ tokens.add(new SpecialTokens.Token("c++"));
+ tokens.add(new SpecialTokens.Token(".net", "dotnet"));
+ tokens.add(new SpecialTokens.Token("tcp/ip"));
+ tokens.add(new SpecialTokens.Token("c#"));
+ tokens.add(new SpecialTokens.Token("special-token-fs","firstsecond"));
+ return new SpecialTokens("default", tokens);
}
/**
diff --git a/container-search/src/test/java/com/yahoo/prelude/query/parser/test/TokenizerTestCase.java b/container-search/src/test/java/com/yahoo/prelude/query/parser/test/TokenizerTestCase.java
index aa2e9dbcf75..ab727a10cdd 100644
--- a/container-search/src/test/java/com/yahoo/prelude/query/parser/test/TokenizerTestCase.java
+++ b/container-search/src/test/java/com/yahoo/prelude/query/parser/test/TokenizerTestCase.java
@@ -6,12 +6,13 @@ import com.yahoo.prelude.Index;
import com.yahoo.prelude.IndexFacts;
import com.yahoo.prelude.IndexModel;
import com.yahoo.prelude.SearchDefinition;
-import com.yahoo.prelude.query.parser.SpecialTokenRegistry;
-import com.yahoo.prelude.query.parser.SpecialTokens;
+import com.yahoo.language.process.SpecialTokenRegistry;
+import com.yahoo.language.process.SpecialTokens;
import com.yahoo.prelude.query.parser.Token;
import com.yahoo.prelude.query.parser.Tokenizer;
import org.junit.Test;
+import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
@@ -35,17 +36,15 @@ import static org.junit.Assert.assertTrue;
/**
* Tests the tokenizer
*
- * @author bratseth
+ * @author bratseth
*/
public class TokenizerTestCase {
- private SpecialTokenRegistry defaultRegistry = new SpecialTokenRegistry("file:src/test/java/com/yahoo/prelude/query/parser/test/replacingtokens.cfg");
-
@Test
public void testPlainTokenization() {
Tokenizer tokenizer = new Tokenizer(new SimpleLinguistics());
- tokenizer.setSpecialTokens(createSpecialTokens());
+ tokenizer.setSpecialTokens(createSpecialTokens().getSpecialTokens("default"));
List<?> tokens = tokenizer.tokenize("drive (to hwy88, 88) +or language:en ugcapi_1 & &a");
assertEquals(new Token(WORD, "drive"), tokens.get(0));
@@ -87,7 +86,7 @@ public class TokenizerTestCase {
public void testOneSpecialToken() {
Tokenizer tokenizer = new Tokenizer(new SimpleLinguistics());
- tokenizer.setSpecialTokens(createSpecialTokens());
+ tokenizer.setSpecialTokens(createSpecialTokens().getSpecialTokens("default"));
List<?> tokens = tokenizer.tokenize("c++ lovers, please apply");
assertEquals(new Token(WORD, "c++"), tokens.get(0));
@@ -97,7 +96,7 @@ public class TokenizerTestCase {
public void testSpecialTokenCombination() {
Tokenizer tokenizer = new Tokenizer(new SimpleLinguistics());
- tokenizer.setSpecialTokens(createSpecialTokens());
+ tokenizer.setSpecialTokens(createSpecialTokens().getSpecialTokens("default"));
List<?> tokens = tokenizer.tokenize("c#, c++ or .net know, not tcp/ip");
assertEquals(new Token(WORD, "c#"), tokens.get(0));
@@ -123,10 +122,9 @@ public class TokenizerTestCase {
*/
@Test
public void testSpecialTokenCJK() {
- assertEquals("Special tokens configured", 6, defaultRegistry.getSpecialTokens("default").size());
Tokenizer tokenizer = new Tokenizer(new SimpleLinguistics());
tokenizer.setSubstringSpecialTokens(true);
- tokenizer.setSpecialTokens(defaultRegistry.getSpecialTokens("default"));
+ tokenizer.setSpecialTokens(createSpecialTokens().getSpecialTokens("replacing"));
List<?> tokens = tokenizer.tokenize("fooc#bar,c++with spacebarknowknowknow,knowknownot know");
assertEquals(new Token(WORD, "foo"), tokens.get(0));
@@ -151,7 +149,7 @@ public class TokenizerTestCase {
public void testSpecialTokenCaseInsensitive() {
Tokenizer tokenizer = new Tokenizer(new SimpleLinguistics());
- tokenizer.setSpecialTokens(createSpecialTokens());
+ tokenizer.setSpecialTokens(createSpecialTokens().getSpecialTokens("default"));
List<?> tokens = tokenizer.tokenize("The AS/400 is great");
assertEquals(new Token(WORD, "The"), tokens.get(0));
@@ -167,7 +165,7 @@ public class TokenizerTestCase {
public void testSpecialTokenNonMatch() {
Tokenizer tokenizer = new Tokenizer(new SimpleLinguistics());
- tokenizer.setSpecialTokens(createSpecialTokens());
+ tokenizer.setSpecialTokens(createSpecialTokens().getSpecialTokens("default"));
List<?> tokens = tokenizer.tokenize("c++ c+ aS/400 i/o .net i/ooo ap.net");
assertEquals(new Token(WORD, "c++"), tokens.get(0));
@@ -190,18 +188,9 @@ public class TokenizerTestCase {
@Test
public void testSpecialTokenConfigurationDefault() {
- String tokenFile = "file:src/test/java/com/yahoo/prelude/query/parser/test/specialtokens.cfg";
-
- SpecialTokenRegistry r = new SpecialTokenRegistry(tokenFile);
- assertEquals("Special tokens configured", 6,
- r.getSpecialTokens("default").size());
- assertEquals("Special tokens configured", 4,
- r.getSpecialTokens("other").size());
-
Tokenizer tokenizer = new Tokenizer(new SimpleLinguistics());
- tokenizer.setSpecialTokens(
- r.getSpecialTokens("default"));
+ tokenizer.setSpecialTokens(createSpecialTokens().getSpecialTokens("default"));
List<?> tokens = tokenizer.tokenize(
"with space, c++ or .... know, not b.s.d.");
@@ -224,18 +213,9 @@ public class TokenizerTestCase {
@Test
public void testSpecialTokenConfigurationOther() {
- String tokenFile = "file:src/test/java/com/yahoo/prelude/query/parser/test/specialtokens.cfg";
-
- SpecialTokenRegistry r = new SpecialTokenRegistry(tokenFile);
- assertEquals("Special tokens configured", 6,
- r.getSpecialTokens("default").size());
- assertEquals("Special tokens configured", 4,
- r.getSpecialTokens("other").size());
-
Tokenizer tokenizer = new Tokenizer(new SimpleLinguistics());
- tokenizer.setSpecialTokens(
- r.getSpecialTokens("other"));
+ tokenizer.setSpecialTokens(createSpecialTokens().getSpecialTokens("other"));
List<?> tokens = tokenizer.tokenize(
"with space,!!!*** [huh] or ------ " + "know, &&&%%% b.s.d.");
@@ -267,26 +247,9 @@ public class TokenizerTestCase {
}
@Test
- public void testSpecialTokenConfigurationMissing() {
- String tokenFile = "file:source/bogus/specialtokens.cfg";
-
- SpecialTokenRegistry r = new SpecialTokenRegistry(tokenFile);
-
- Tokenizer tokenizer = new Tokenizer(new SimpleLinguistics());
-
- tokenizer.setSpecialTokens(r.getSpecialTokens("other"));
- List<?> tokens = tokenizer.tokenize("c++");
-
- assertEquals(new Token(WORD, "c"), tokens.get(0));
- assertEquals(new Token(PLUS, "+"), tokens.get(1));
- assertEquals(new Token(PLUS, "+"), tokens.get(2));
- }
-
- @Test
public void testTokenReplacing() {
- assertEquals("Special tokens configured", 6, defaultRegistry.getSpecialTokens("default").size());
Tokenizer tokenizer = new Tokenizer(new SimpleLinguistics());
- tokenizer.setSpecialTokens(defaultRegistry.getSpecialTokens("default"));
+ tokenizer.setSpecialTokens(createSpecialTokens().getSpecialTokens("replacing"));
List<?> tokens = tokenizer.tokenize("with space, c++ or .... know, not b.s.d.");
assertEquals(new Token(WORD, "with-space"), tokens.get(0));
@@ -745,7 +708,7 @@ public class TokenizerTestCase {
public void testSingleQuoteAsWordCharacter() {
Tokenizer tokenizer = new Tokenizer(new SimpleLinguistics());
- tokenizer.setSpecialTokens(createSpecialTokens());
+ tokenizer.setSpecialTokens(createSpecialTokens().getSpecialTokens("default"));
List<?> tokens = tokenizer.tokenize("drive (to hwy88, 88) +or language:en nalle:a'a ugcapi_1 'a' 'a a'");
assertEquals(new Token(WORD, "drive"), tokens.get(0));
@@ -781,17 +744,38 @@ public class TokenizerTestCase {
assertEquals(new Token(WORD, "a'"), tokens.get(30));
}
- private SpecialTokens createSpecialTokens() {
- SpecialTokens tokens = new SpecialTokens("default");
-
- tokens.addSpecialToken("c+", null);
- tokens.addSpecialToken("c++", null);
- tokens.addSpecialToken(".net", null);
- tokens.addSpecialToken("tcp/ip", null);
- tokens.addSpecialToken("i/o", null);
- tokens.addSpecialToken("c#", null);
- tokens.addSpecialToken("AS/400", null);
- return tokens;
+ private SpecialTokenRegistry createSpecialTokens() {
+ List<SpecialTokens.Token> tokens = new ArrayList<>();
+ tokens.add(new SpecialTokens.Token("c+"));
+ tokens.add(new SpecialTokens.Token("c++"));
+ tokens.add(new SpecialTokens.Token(".net"));
+ tokens.add(new SpecialTokens.Token("tcp/ip"));
+ tokens.add(new SpecialTokens.Token("i/o"));
+ tokens.add(new SpecialTokens.Token("c#"));
+ tokens.add(new SpecialTokens.Token("AS/400"));
+ tokens.add(new SpecialTokens.Token("...."));
+ tokens.add(new SpecialTokens.Token("b.s.d."));
+ tokens.add(new SpecialTokens.Token("with space"));
+ tokens.add(new SpecialTokens.Token("dvd\\xB1r"));
+ SpecialTokens defaultTokens = new SpecialTokens("default", tokens);
+
+ tokens = new ArrayList<>();
+ tokens.add(new SpecialTokens.Token("[huh]"));
+ tokens.add(new SpecialTokens.Token("&&&%%%"));
+ tokens.add(new SpecialTokens.Token("------"));
+ tokens.add(new SpecialTokens.Token("!!!***"));
+ SpecialTokens otherTokens = new SpecialTokens("other", tokens);
+
+ tokens = new ArrayList<>();
+ tokens.add(new SpecialTokens.Token("...."));
+ tokens.add(new SpecialTokens.Token("c++", "cpp"));
+ tokens.add(new SpecialTokens.Token("b.s.d."));
+ tokens.add(new SpecialTokens.Token("with space", "with-space"));
+ tokens.add(new SpecialTokens.Token("c#"));
+ tokens.add(new SpecialTokens.Token("know", "knuwww"));
+ SpecialTokens replacingTokens = new SpecialTokens("replacing", tokens);
+
+ return new SpecialTokenRegistry(List.of(defaultTokens, otherTokens, replacingTokens));
}
}
diff --git a/container-search/src/test/java/com/yahoo/prelude/query/parser/test/replacingtokens.cfg b/container-search/src/test/java/com/yahoo/prelude/query/parser/test/replacingtokens.cfg
deleted file mode 100644
index 6a189de0164..00000000000
--- a/container-search/src/test/java/com/yahoo/prelude/query/parser/test/replacingtokens.cfg
+++ /dev/null
@@ -1,12 +0,0 @@
-tokenlist[1]
-tokenlist[0].name default
-tokenlist[0].tokens[6]
-tokenlist[0].tokens[0].token ....
-tokenlist[0].tokens[1].token c++
-tokenlist[0].tokens[1].replace cpp
-tokenlist[0].tokens[2].token b.s.d.
-tokenlist[0].tokens[3].token with space
-tokenlist[0].tokens[3].replace with-space
-tokenlist[0].tokens[4].token c#
-tokenlist[0].tokens[5].token know
-tokenlist[0].tokens[5].replace knuwww
diff --git a/container-search/src/test/java/com/yahoo/search/dispatch/searchcluster/SearchClusterCoverageTest.java b/container-search/src/test/java/com/yahoo/search/dispatch/searchcluster/SearchClusterCoverageTest.java
index 6338107d4b6..65e7173c4ee 100644
--- a/container-search/src/test/java/com/yahoo/search/dispatch/searchcluster/SearchClusterCoverageTest.java
+++ b/container-search/src/test/java/com/yahoo/search/dispatch/searchcluster/SearchClusterCoverageTest.java
@@ -74,10 +74,10 @@ public class SearchClusterCoverageTest {
@Test
public void three_groups_one_has_a_node_down() {
- var tester = new SearchClusterTester(3, 3);
+ var tester = new SearchClusterTester(3, 3);
tester.setDocsPerNode(100, 0);
- tester.setDocsPerNode(150, 1);
+ tester.setDocsPerNode(100, 1);
tester.setDocsPerNode(100, 2);
tester.setWorking(1, 1, false);
tester.pingIterationCompleted();
@@ -86,4 +86,18 @@ public class SearchClusterCoverageTest {
assertTrue(tester.group(2).hasSufficientCoverage());
}
+ @Test
+ public void three_groups_one_has_a_node_down_but_remaining_has_enough_docs() {
+ var tester = new SearchClusterTester(3, 3);
+
+ tester.setDocsPerNode(100, 0);
+ tester.setDocsPerNode(150, 1);
+ tester.setDocsPerNode(100, 2);
+ tester.setWorking(1, 1, false);
+ tester.pingIterationCompleted();
+ assertTrue(tester.group(0).hasSufficientCoverage());
+ assertTrue("Sufficient documents on remaining two nodes", tester.group(1).hasSufficientCoverage());
+ assertTrue(tester.group(2).hasSufficientCoverage());
+ }
+
}
diff --git a/container-search/src/test/java/com/yahoo/search/query/profile/types/test/QueryProfileTypeTestCase.java b/container-search/src/test/java/com/yahoo/search/query/profile/types/test/QueryProfileTypeTestCase.java
index a0bf0972281..39ba607b741 100644
--- a/container-search/src/test/java/com/yahoo/search/query/profile/types/test/QueryProfileTypeTestCase.java
+++ b/container-search/src/test/java/com/yahoo/search/query/profile/types/test/QueryProfileTypeTestCase.java
@@ -38,29 +38,32 @@ public class QueryProfileTypeTestCase {
private QueryProfileRegistry registry;
- private QueryProfileType type, typeStrict, user, userStrict;
+ private QueryProfileType testtype, emptyInheritingTesttype, testtypeStrict, user, userStrict;
@Before
public void setUp() {
registry = new QueryProfileRegistry();
- type = new QueryProfileType(new ComponentId("testtype"));
- type.inherited().add(registry.getTypeRegistry().getComponent(new ComponentId("native")));
- typeStrict = new QueryProfileType(new ComponentId("testtypeStrict"));
- typeStrict.setStrict(true);
+ testtype = new QueryProfileType(new ComponentId("testtype"));
+ testtype.inherited().add(registry.getTypeRegistry().getComponent(new ComponentId("native")));
+ emptyInheritingTesttype = new QueryProfileType(new ComponentId("emptyInheritingTesttype"));
+ emptyInheritingTesttype.inherited().add(testtype);
+ testtypeStrict = new QueryProfileType(new ComponentId("testtypeStrict"));
+ testtypeStrict.setStrict(true);
user = new QueryProfileType(new ComponentId("user"));
userStrict = new QueryProfileType(new ComponentId("userStrict"));
userStrict.setStrict(true);
- registry.getTypeRegistry().register(type);
- registry.getTypeRegistry().register(typeStrict);
+ registry.getTypeRegistry().register(testtype);
+ registry.getTypeRegistry().register(emptyInheritingTesttype);
+ registry.getTypeRegistry().register(testtypeStrict);
registry.getTypeRegistry().register(user);
registry.getTypeRegistry().register(userStrict);
- addTypeFields(type, registry.getTypeRegistry());
- type.addField(new FieldDescription("myUserQueryProfile", FieldType.fromString("query-profile:user", registry.getTypeRegistry())));
- addTypeFields(typeStrict, registry.getTypeRegistry());
- typeStrict.addField(new FieldDescription("myUserQueryProfile", FieldType.fromString("query-profile:userStrict", registry.getTypeRegistry())));
+ addTypeFields(testtype, registry.getTypeRegistry());
+ testtype.addField(new FieldDescription("myUserQueryProfile", FieldType.fromString("query-profile:user", registry.getTypeRegistry())));
+ addTypeFields(testtypeStrict, registry.getTypeRegistry());
+ testtypeStrict.addField(new FieldDescription("myUserQueryProfile", FieldType.fromString("query-profile:userStrict", registry.getTypeRegistry())));
addUserFields(user, registry.getTypeRegistry());
addUserFields(userStrict, registry.getTypeRegistry());
@@ -89,7 +92,7 @@ public class QueryProfileTypeTestCase {
@Test
public void testTypedOfPrimitivesAssignmentNonStrict() {
QueryProfile profile=new QueryProfile("test");
- profile.setType(type);
+ profile.setType(testtype);
registry.register(profile);
profile.set("myString","anyValue", registry);
@@ -99,7 +102,7 @@ public class QueryProfileTypeTestCase {
profile.set("myInteger", 3, registry);
assertWrongType(profile,"long","myLong","notLong");
assertWrongType(profile, "long", "myLong", "1.5");
- profile.set("myLong", 4000000000000l, registry);
+ profile.set("myLong", 4000000000000L, registry);
assertWrongType(profile, "float", "myFloat", "notFloat");
profile.set("myFloat", 3.14f, registry);
assertWrongType(profile, "double", "myDouble", "notDouble");
@@ -156,7 +159,7 @@ public class QueryProfileTypeTestCase {
@Test
public void testTypedOfPrimitivesAssignmentStrict() {
QueryProfile profile=new QueryProfile("test");
- profile.setType(typeStrict);
+ profile.setType(testtypeStrict);
profile.set("myString", "anyValue", registry);
assertNotPermitted(profile, "nontypedString", "anyValueToo"); // Illegal because this is strict
@@ -198,7 +201,7 @@ public class QueryProfileTypeTestCase {
@Test
public void testTypedAssignmentOfQueryProfilesNonStrict() {
QueryProfile profile=new QueryProfile("test");
- profile.setType(type);
+ profile.setType(testtype);
QueryProfile map1=new QueryProfile("myMap1");
map1.set("key1","value1", registry);
@@ -229,7 +232,7 @@ public class QueryProfileTypeTestCase {
@Test
public void testTypedAssignmentOfQueryProfilesStrict() {
QueryProfile profile=new QueryProfile("test");
- profile.setType(typeStrict);
+ profile.setType(testtypeStrict);
QueryProfile map1=new QueryProfile("myMap1");
map1.set("key1","value1", registry);
@@ -260,7 +263,7 @@ public class QueryProfileTypeTestCase {
@Test
public void testTypedAssignmentOfQueryProfileReferencesNonStrict() {
QueryProfile profile = new QueryProfile("test");
- profile.setType(type);
+ profile.setType(testtype);
QueryProfile map1 = new QueryProfile("myMap1");
map1.set("key1","value1", registry);
@@ -304,7 +307,7 @@ public class QueryProfileTypeTestCase {
@Test
public void testTypedOverridingOfQueryProfileReferencesNonStrictThroughQuery() {
QueryProfile profile=new QueryProfile("test");
- profile.setType(type);
+ profile.setType(testtype);
QueryProfile myUser=new QueryProfile("myUser");
myUser.setType(user);
@@ -338,7 +341,7 @@ public class QueryProfileTypeTestCase {
@Test
public void testTypedAssignmentOfQueryProfileReferencesNonStrictThroughQuery() {
QueryProfile profile = new QueryProfile("test");
- profile.setType(type);
+ profile.setType(testtype);
QueryProfile newUser = new QueryProfile("newUser");
newUser.setType(user);
@@ -367,7 +370,7 @@ public class QueryProfileTypeTestCase {
@Test
public void testTypedAssignmentOfQueryProfileReferencesStrictThroughQuery() {
QueryProfile profile = new QueryProfile("test");
- profile.setType(typeStrict);
+ profile.setType(testtypeStrict);
QueryProfile newUser = new QueryProfile("newUser");
newUser.setType(userStrict);
@@ -399,7 +402,25 @@ public class QueryProfileTypeTestCase {
@Test
public void testTensorRankFeatureInRequest() throws UnsupportedEncodingException {
QueryProfile profile = new QueryProfile("test");
- profile.setType(type);
+ profile.setType(testtype);
+ registry.register(profile);
+
+ CompiledQueryProfileRegistry cRegistry = registry.compile();
+ String tensorString = "{{a:a1, b:b1}:1.0, {a:a2, b:b1}:2.0}}";
+ Query query = new Query(HttpRequest.createTestRequest("?" + encode("ranking.features.query(myTensor1)") +
+ "=" + encode(tensorString),
+ com.yahoo.jdisc.http.HttpRequest.Method.GET),
+ cRegistry.getComponent("test"));
+ assertEquals(0, query.errors().size());
+ assertEquals(Tensor.from(tensorString), query.properties().get("ranking.features.query(myTensor1)"));
+ assertEquals(Tensor.from(tensorString), query.getRanking().getFeatures().getTensor("query(myTensor1)").get());
+ }
+
+ // Expected to work exactly as testTensorRankFeatureInRequest
+ @Test
+ public void testTensorRankFeatureInRequestWithInheritedQueryProfileType() throws UnsupportedEncodingException {
+ QueryProfile profile = new QueryProfile("test");
+ profile.setType(emptyInheritingTesttype);
registry.register(profile);
CompiledQueryProfileRegistry cRegistry = registry.compile();
@@ -420,7 +441,7 @@ public class QueryProfileTypeTestCase {
@Test
public void testIllegalStrictAssignmentFromRequest() {
QueryProfile profile = new QueryProfile("test");
- profile.setType(typeStrict);
+ profile.setType(testtypeStrict);
QueryProfile newUser = new QueryProfile("newUser");
newUser.setType(userStrict);
@@ -452,7 +473,7 @@ public class QueryProfileTypeTestCase {
topMap.set("subMap", subMap, registry);
QueryProfile test = new QueryProfile("test");
- test.setType(type);
+ test.setType(testtype);
subMap.set("typeProfile", test, registry);
QueryProfile myUser = new QueryProfile("myUser");
@@ -494,7 +515,7 @@ public class QueryProfileTypeTestCase {
topMap.set("subMap", subMap, registry);
QueryProfile test = new QueryProfile("test");
- test.setType(type);
+ test.setType(testtype);
subMap.set("typeProfile", test, registry);
QueryProfile myUser = new QueryProfile("myUser");
@@ -533,7 +554,7 @@ public class QueryProfileTypeTestCase {
topMap.set("subMap", subMap, registry);
QueryProfile test = new QueryProfile("test");
- test.setType(typeStrict);
+ test.setType(testtypeStrict);
subMap.set("typeProfile", test, registry);
registry.register(topMap);
@@ -567,7 +588,7 @@ public class QueryProfileTypeTestCase {
topMap.set("subMap",subMap, registry);
QueryProfile test = new QueryProfile("test");
- test.setType(type);
+ test.setType(testtype);
subMap.set("typeProfile",test, registry);
QueryProfile newUser = new QueryProfile("newUser");
diff --git a/container-search/src/test/java/com/yahoo/search/query/rewrite/RewriterFeaturesTestCase.java b/container-search/src/test/java/com/yahoo/search/query/rewrite/RewriterFeaturesTestCase.java
index 5508c2a73a7..08146bbe069 100644
--- a/container-search/src/test/java/com/yahoo/search/query/rewrite/RewriterFeaturesTestCase.java
+++ b/container-search/src/test/java/com/yahoo/search/query/rewrite/RewriterFeaturesTestCase.java
@@ -8,7 +8,7 @@ import org.junit.Test;
import com.yahoo.prelude.query.AndItem;
import com.yahoo.prelude.query.CompositeItem;
import com.yahoo.prelude.query.Item;
-import com.yahoo.prelude.query.parser.SpecialTokenRegistry;
+import com.yahoo.language.process.SpecialTokenRegistry;
import com.yahoo.search.Query;
import com.yahoo.search.searchchain.Execution;
import com.yahoo.search.searchchain.Execution.Context;
diff --git a/container-test/pom.xml b/container-test/pom.xml
index 3eebd64cc82..efc2c273a02 100644
--- a/container-test/pom.xml
+++ b/container-test/pom.xml
@@ -39,6 +39,18 @@
<dependency>
<groupId>com.fasterxml.jackson.dataformat</groupId>
<artifactId>jackson-dataformat-xml</artifactId>
+ <exclusions>
+ <exclusion>
+ <!-- Conflicts with javax.activation:javax.activation-api:1.2.0, which is "exported" via jdisc_core. -->
+ <groupId>jakarta.activation</groupId>
+ <artifactId>jakarta.activation-api</artifactId>
+ </exclusion>
+ <exclusion>
+ <!-- Conflicts with javax.xml.bind:jaxb-api:2.3, which is "exported" via jdisc_core.-->
+ <groupId>jakarta.xml.bind</groupId>
+ <artifactId>jakarta.xml.bind-api</artifactId>
+ </exclusion>
+ </exclusions>
</dependency>
<dependency>
<groupId>io.airlift</groupId>
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/ClusterMetrics.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/ClusterMetrics.java
index cb29d5854a0..0e11bcdccaf 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/ClusterMetrics.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/ClusterMetrics.java
@@ -15,7 +15,10 @@ public class ClusterMetrics {
public static final String DOCUMENT_COUNT = "documentCount";
public static final String FEED_LATENCY = "feedLatency";
public static final String QUERY_LATENCY = "queryLatency";
- public static final String FEEDING_BLOCKED = "feedingBlocked";
+ public static final String MEMORY_UTIL = "memoryUtil";
+ public static final String MEMORY_FEED_BLOCK_LIMIT = "memoryFeedBlockLimit";
+ public static final String DISK_UTIL = "diskUtil";
+ public static final String DISK_FEED_BLOCK_LIMIT = "diskFeedBlockLimit";
private final String clusterId;
private final String clusterType;
@@ -55,8 +58,20 @@ public class ClusterMetrics {
return Optional.ofNullable(metrics.get(QUERY_LATENCY));
}
- public Optional<Double> feedingBlocked() {
- return Optional.ofNullable(metrics.get(FEEDING_BLOCKED));
+ public Optional<Double> memoryUtil() {
+ return Optional.ofNullable(metrics.get(MEMORY_UTIL));
+ }
+
+ public Optional<Double> memoryFeedBlockLimit() {
+ return Optional.ofNullable(metrics.get(MEMORY_FEED_BLOCK_LIMIT));
+ }
+
+ public Optional<Double> diskUtil() {
+ return Optional.ofNullable(metrics.get(DISK_UTIL));
+ }
+
+ public Optional<Double> diskFeedBlockLimit() {
+ return Optional.ofNullable(metrics.get(DISK_FEED_BLOCK_LIMIT));
}
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/archive/ArchiveBucketDb.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/archive/ArchiveBucketDb.java
deleted file mode 100644
index 91c7cd68ea2..00000000000
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/archive/ArchiveBucketDb.java
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2021 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.controller.api.integration.archive;
-
-import com.yahoo.config.provision.TenantName;
-import com.yahoo.config.provision.zone.ZoneId;
-
-import java.net.URI;
-import java.util.Optional;
-import java.util.Set;
-
-public interface ArchiveBucketDb {
-
- Optional<URI> archiveUriFor(ZoneId zoneId, TenantName tenant);
-
- Set<ArchiveBucket> buckets(ZoneId zoneId);
-}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/archive/package-info.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/archive/package-info.java
index 067d05cd14e..2fa68f1f39d 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/archive/package-info.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/archive/package-info.java
@@ -5,4 +5,4 @@
@ExportPackage
package com.yahoo.vespa.hosted.controller.api.integration.archive;
-import com.yahoo.osgi.annotation.ExportPackage; \ No newline at end of file
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/aws/MockRoleService.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/aws/MockRoleService.java
new file mode 100644
index 00000000000..f7c51f0bbc2
--- /dev/null
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/aws/MockRoleService.java
@@ -0,0 +1,21 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package com.yahoo.vespa.hosted.controller.api.integration.aws;
+
+import com.yahoo.config.provision.TenantName;
+
+import java.util.List;
+
+public class MockRoleService extends NoopRoleService {
+
+ private List<TenantName> maintainedTenants;
+
+ @Override
+ public void maintainRoles(List<TenantName> tenants) {
+ maintainedTenants = List.copyOf(tenants);
+ }
+
+ public List<TenantName> maintainedTenants() {
+ return maintainedTenants;
+ }
+}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/Invoice.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/Invoice.java
index 39d974378b4..f8ef2958f63 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/Invoice.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/Invoice.java
@@ -6,6 +6,8 @@ import com.yahoo.config.provision.TenantName;
import com.yahoo.config.provision.zone.ZoneId;
import java.math.BigDecimal;
+import java.time.Clock;
+import java.time.ZoneOffset;
import java.time.ZonedDateTime;
import java.util.List;
import java.util.Map;
@@ -325,9 +327,10 @@ public class Invoice {
this.history = history;
}
- public static StatusHistory open() {
+ public static StatusHistory open(Clock clock) {
+ var now = clock.instant().atZone(ZoneOffset.UTC);
return new StatusHistory(
- new TreeMap<>(Map.of(ZonedDateTime.now(), "OPEN"))
+ new TreeMap<>(Map.of(now, "OPEN"))
);
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/MockBillingController.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/MockBillingController.java
index b24d532d4a3..535f344d352 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/MockBillingController.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/MockBillingController.java
@@ -5,7 +5,9 @@ import com.yahoo.config.provision.TenantName;
import com.yahoo.vespa.hosted.controller.api.integration.user.User;
import java.math.BigDecimal;
+import java.time.Clock;
import java.time.LocalDate;
+import java.time.ZoneOffset;
import java.time.ZonedDateTime;
import java.util.ArrayList;
import java.util.Collection;
@@ -21,6 +23,7 @@ import java.util.stream.Collectors;
*/
public class MockBillingController implements BillingController {
+ private final Clock clock;
Map<TenantName, PlanId> plans = new HashMap<>();
Map<TenantName, PaymentInstrument> activeInstruments = new HashMap<>();
Map<TenantName, List<Invoice>> committedInvoices = new HashMap<>();
@@ -28,6 +31,10 @@ public class MockBillingController implements BillingController {
Map<TenantName, List<Invoice.LineItem>> unusedLineItems = new HashMap<>();
Map<TenantName, CollectionMethod> collectionMethod = new HashMap<>();
+ public MockBillingController(Clock clock) {
+ this.clock = clock;
+ }
+
@Override
public PlanId getPlan(TenantName tenant) {
return plans.getOrDefault(tenant, PlanId.from("trial"));
@@ -63,7 +70,7 @@ public class MockBillingController implements BillingController {
.add(new Invoice(
invoiceId,
tenant,
- Invoice.StatusHistory.open(),
+ Invoice.StatusHistory.open(clock),
List.of(),
startTime,
endTime
@@ -104,10 +111,11 @@ public class MockBillingController implements BillingController {
@Override
public void updateInvoiceStatus(Invoice.Id invoiceId, String agent, String status) {
+ var now = clock.instant().atZone(ZoneOffset.UTC);
committedInvoices.values().stream()
.flatMap(List::stream)
.filter(invoice -> invoiceId.equals(invoice.id()))
- .forEach(invoice -> invoice.statusHistory().history.put(ZonedDateTime.now(), status));
+ .forEach(invoice -> invoice.statusHistory().history.put(now, status));
}
@Override
@@ -192,6 +200,8 @@ public class MockBillingController implements BillingController {
}
private Invoice emptyInvoice() {
- return new Invoice(Invoice.Id.of("empty"), TenantName.defaultName(), Invoice.StatusHistory.open(), List.of(), ZonedDateTime.now(), ZonedDateTime.now());
+ var start = clock.instant().atZone(ZoneOffset.UTC);
+ var end = clock.instant().atZone(ZoneOffset.UTC);
+ return new Invoice(Invoice.Id.of("empty"), TenantName.defaultName(), Invoice.StatusHistory.open(clock), List.of(), start, end);
}
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/Node.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/Node.java
index 0f9e12d8cf2..5f46b949844 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/Node.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/Node.java
@@ -548,6 +548,11 @@ public class Node {
return this;
}
+ public Builder reports(Map<String, JsonNode> reports) {
+ this.reports = reports;
+ return this;
+ }
+
public Node build() {
return new Node(hostname, parentHostname, state, type, resources, owner, currentVersion, wantedVersion,
currentOsVersion, wantedOsVersion, currentFirmwareCheck, wantedFirmwareCheck, serviceState,
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/vcmr/ChangeRequestClient.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/vcmr/ChangeRequestClient.java
index f8f54567bea..4fa195f1b05 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/vcmr/ChangeRequestClient.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/vcmr/ChangeRequestClient.java
@@ -11,6 +11,6 @@ public interface ChangeRequestClient {
/** Get upcoming change requests and updated status of previously stored requests */
List<ChangeRequest> getChangeRequests(List<ChangeRequest> changeRequests);
- void approveChangeRequests(List<ChangeRequest> changeRequests);
+ void approveChangeRequest(ChangeRequest changeRequest);
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/vcmr/MockChangeRequestClient.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/vcmr/MockChangeRequestClient.java
index 10175f36991..e64b2ee3368 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/vcmr/MockChangeRequestClient.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/vcmr/MockChangeRequestClient.java
@@ -18,8 +18,8 @@ public class MockChangeRequestClient implements ChangeRequestClient {
}
@Override
- public void approveChangeRequests(List<ChangeRequest> changeRequests) {
- approvedChangeRequests.addAll(changeRequests);
+ public void approveChangeRequest(ChangeRequest changeRequest) {
+ approvedChangeRequests.add(changeRequest);
}
public void setUpcomingChangeRequests(List<ChangeRequest> changeRequests) {
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/vcmr/VCMRReport.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/vcmr/VCMRReport.java
new file mode 100644
index 00000000000..33d10083b63
--- /dev/null
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/vcmr/VCMRReport.java
@@ -0,0 +1,149 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.api.integration.vcmr;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.core.type.TypeReference;
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule;
+import com.yahoo.vespa.hosted.controller.api.integration.configserver.Node;
+
+import java.time.ZonedDateTime;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Set;
+
+import static com.yahoo.yolean.Exceptions.uncheck;
+
+/**
+ * @author olaa
+ *
+ * Node repository report containing list of upcoming VCMRs impacting a node
+ */
+@JsonIgnoreProperties(ignoreUnknown = true)
+public class VCMRReport {
+
+ private static final String REPORT_ID = "vcmr";
+ private static final ObjectMapper objectMapper = new ObjectMapper()
+ .registerModule(new JavaTimeModule());
+
+ private Set<VCMR> vcmrs;
+
+ public VCMRReport() {
+ this(new HashSet<>());
+ }
+
+ public VCMRReport(Set<VCMR> vcmrs) {
+ this.vcmrs = vcmrs;
+ }
+
+ public Set<VCMR> getVcmrs() {
+ return vcmrs;
+ }
+
+ /**
+ * @return true if list of VCMRs is changed
+ */
+ public boolean addVcmr(String id, ZonedDateTime plannedStartTime, ZonedDateTime plannedEndtime) {
+ var vcmr = new VCMR(id, plannedStartTime, plannedEndtime);
+ if (vcmrs.contains(vcmr))
+ return false;
+
+ // Remove to catch any changes in start/end time
+ removeVcmr(id);
+ return vcmrs.add(vcmr);
+ }
+
+ public boolean removeVcmr(String id) {
+ return vcmrs.removeIf(vcmr -> id.equals(vcmr.getId()));
+ }
+
+ public static String getReportId() {
+ return REPORT_ID;
+ }
+
+ /**
+ * Serialization functions - mapped to {@link Node#reports()}
+ */
+ public static VCMRReport fromReports(Map<String, JsonNode> reports) {
+ var serialized = reports.get(REPORT_ID);
+ if (serialized == null)
+ return new VCMRReport();
+
+ var typeRef = new TypeReference<Set<VCMR>>() {};
+ var vcmrs = uncheck(() -> objectMapper.readValue(objectMapper.treeAsTokens(serialized), typeRef));
+ return new VCMRReport(vcmrs);
+ }
+
+ /**
+ * Set report to 'null' if list is empty - clearing the report
+ * See NodePatcher in node-repository
+ */
+ public Map<String, JsonNode> toNodeReports() {
+ Map<String, JsonNode> reports = new HashMap<>();
+ JsonNode jsonNode = vcmrs.isEmpty() ?
+ null : uncheck(() -> objectMapper.valueToTree(vcmrs));
+ reports.put(REPORT_ID, jsonNode);
+ return reports;
+ }
+
+ @Override
+ public String toString() {
+ return "VCMRReport{" + vcmrs + "}";
+ }
+
+ public static class VCMR {
+
+ private String id;
+ private ZonedDateTime plannedStartTime;
+ private ZonedDateTime plannedEndTime;
+
+ VCMR(@JsonProperty("id") String id,
+ @JsonProperty("plannedStartTime") ZonedDateTime plannedStartTime,
+ @JsonProperty("plannedEndTime") ZonedDateTime plannedEndTime) {
+ this.id = id;
+ this.plannedStartTime = plannedStartTime;
+ this.plannedEndTime = plannedEndTime;
+ }
+
+ public String getId() {
+ return id;
+ }
+
+ public ZonedDateTime getPlannedStartTime() {
+ return plannedStartTime;
+ }
+
+ public ZonedDateTime getPlannedEndTime() {
+ return plannedEndTime;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ VCMR vcmr = (VCMR) o;
+ return Objects.equals(id, vcmr.id) &&
+ Objects.equals(plannedStartTime, vcmr.plannedStartTime) &&
+ Objects.equals(plannedEndTime, vcmr.plannedEndTime);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(id, plannedStartTime, plannedEndTime);
+ }
+
+ @Override
+ public String toString() {
+ return "VCMR{" +
+ "id='" + id + '\'' +
+ ", plannedStartTime=" + plannedStartTime +
+ ", plannedEndTime=" + plannedEndTime +
+ '}';
+ }
+ }
+
+}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/vcmr/VespaChangeRequest.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/vcmr/VespaChangeRequest.java
index a8be4a77c71..915f43dc369 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/vcmr/VespaChangeRequest.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/vcmr/VespaChangeRequest.java
@@ -89,6 +89,7 @@ public class VespaChangeRequest extends ChangeRequest {
public enum Status {
COMPLETED,
+ READY,
IN_PROGRESS,
PENDING_ACTION,
PENDING_ASSESSMENT,
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/zone/ZoneRegistry.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/zone/ZoneRegistry.java
index 76637d10a6e..7539ef3c63a 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/zone/ZoneRegistry.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/zone/ZoneRegistry.java
@@ -86,9 +86,6 @@ public interface ZoneRegistry {
/** Returns a URL used to request support from the Vespa team. */
URI supportUrl();
- /** Returns a URL used to generate flashy badges from strings. */
- URI badgeUrl();
-
/** Returns a URL to the controller's api endpoint */
URI apiUrl();
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/PathGroup.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/PathGroup.java
index d052a000860..9f6c0a79455 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/PathGroup.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/PathGroup.java
@@ -78,6 +78,11 @@ enum PathGroup {
billingList(Matcher.tenant,
"/billing/v1/tenant/{tenant}/billing/{*}"),
+ billing(Matcher.tenant,
+ "/billing/v2/tenant/{tenant}/{*}"),
+
+ accountant("/billing/v2/accountant/{*}"),
+
applicationKeys(Matcher.tenant,
Matcher.application,
"/application/v4/tenant/{tenant}/application/{application}/key/"),
@@ -283,6 +288,7 @@ enum PathGroup {
PathGroup.billingInstrument,
PathGroup.billingList,
PathGroup.billingPlan,
+ PathGroup.billing,
PathGroup.hostedAccountant
);
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/Policy.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/Policy.java
index b48e786c178..ee5f1d806ab 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/Policy.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/Policy.java
@@ -174,7 +174,7 @@ enum Policy {
/** Ability to update tenant payment instrument */
planUpdate(Privilege.grant(Action.update)
- .on(PathGroup.billingPlan)
+ .on(PathGroup.billingPlan, PathGroup.billing)
.in(SystemName.PublicCd, SystemName.Public)),
/** Ability to update tenant collection method */
@@ -185,12 +185,12 @@ enum Policy {
/** Read the generated bills */
billingInformationRead(Privilege.grant(Action.read)
- .on(PathGroup.billingList)
+ .on(PathGroup.billingList, PathGroup.billing)
.in(SystemName.PublicCd, SystemName.Public)),
/** Invoice management */
hostedAccountant(Privilege.grant(Action.all())
- .on(PathGroup.hostedAccountant)
+ .on(PathGroup.hostedAccountant, PathGroup.accountant)
.in(SystemName.PublicCd, SystemName.Public)),
/** Listing endpoint certificate request info */
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Application.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Application.java
index 159e0bb1f0f..f8624b40737 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Application.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Application.java
@@ -1,7 +1,6 @@
// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.controller;
-import com.google.common.collect.ImmutableSortedMap;
import com.yahoo.component.Version;
import com.yahoo.config.application.api.DeploymentSpec;
import com.yahoo.config.application.api.ValidationOverrides;
@@ -21,6 +20,7 @@ import com.yahoo.vespa.hosted.controller.tenant.Tenant;
import java.security.PublicKey;
import java.time.Instant;
import java.util.Collection;
+import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
@@ -29,6 +29,7 @@ import java.util.Optional;
import java.util.OptionalInt;
import java.util.OptionalLong;
import java.util.Set;
+import java.util.TreeMap;
import java.util.function.Function;
import java.util.stream.Collectors;
@@ -79,7 +80,15 @@ public class Application {
this.deployKeys = Objects.requireNonNull(deployKeys, "deployKeys cannot be null");
this.projectId = Objects.requireNonNull(projectId, "projectId cannot be null");
this.latestVersion = requireNotUnknown(latestVersion);
- this.instances = ImmutableSortedMap.copyOf(instances.stream().collect(Collectors.toMap(Instance::name, Function.identity())));
+ this.instances = instances.stream().collect(
+ Collectors.collectingAndThen(Collectors.toMap(Instance::name,
+ Function.identity(),
+ (i1, i2) -> {
+ throw new IllegalArgumentException("Duplicate key " + i1);
+ },
+ TreeMap::new),
+ Collections::unmodifiableMap)
+ );
}
public TenantAndApplicationId id() { return id; }
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java
index 32063bf9ba5..e6c96134ca7 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java
@@ -394,10 +394,10 @@ public class ApplicationController {
// Record the quota usage for this application
var quotaUsage = deploymentQuotaUsage(zone, job.application());
- // For direct deployments use the full application ID, but otherwise use just the tenant and application as
+ // For direct deployments use the full deployment ID, but otherwise use just the tenant and application as
// the source since it's the same application, so it should have the same warnings
NotificationSource source = zone.environment().isManuallyDeployed() ?
- NotificationSource.from(job.application()) : NotificationSource.from(applicationId);
+ NotificationSource.from(new DeploymentId(job.application(), zone)) : NotificationSource.from(applicationId);
List<String> warnings = Optional.ofNullable(result.prepareResponse().log)
.map(logs -> logs.stream()
.filter(log -> log.applicationPackage)
@@ -407,8 +407,8 @@ public class ApplicationController {
.distinct()
.collect(Collectors.toList()))
.orElseGet(List::of);
- if (warnings.isEmpty()) controller.notificationsDb().removeNotification(source, Notification.Type.APPLICATION_PACKAGE_WARNING);
- else controller.notificationsDb().setNotification(source, Notification.Type.APPLICATION_PACKAGE_WARNING, warnings);
+ if (warnings.isEmpty()) controller.notificationsDb().removeNotification(source, Notification.Type.applicationPackage);
+ else controller.notificationsDb().setNotification(source, Notification.Type.applicationPackage, Notification.Level.warning, warnings);
lockApplicationOrThrow(applicationId, application ->
store(application.with(job.application().instance(),
@@ -702,6 +702,8 @@ public class ApplicationController {
controller.routing().policies().refresh(application.get().id().instance(instanceName), application.get().deploymentSpec(), zone);
if (zone.environment().isManuallyDeployed())
applicationStore.putMetaTombstone(id, clock.instant());
+ if (!zone.environment().isTest())
+ controller.notificationsDb().removeNotifications(NotificationSource.from(id));
}
return application.with(instanceName, instance -> instance.withoutDeploymentIn(zone));
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Controller.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Controller.java
index 2de8fa6457a..e61a376730e 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Controller.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Controller.java
@@ -22,7 +22,6 @@ import com.yahoo.vespa.hosted.controller.auditlog.AuditLogger;
import com.yahoo.vespa.hosted.controller.config.ControllerConfig;
import com.yahoo.vespa.hosted.controller.deployment.JobController;
import com.yahoo.vespa.hosted.controller.dns.NameServiceForwarder;
-import com.yahoo.vespa.hosted.controller.metric.ConfigServerMetrics;
import com.yahoo.vespa.hosted.controller.notification.NotificationsDb;
import com.yahoo.vespa.hosted.controller.persistence.CuratorDb;
import com.yahoo.vespa.hosted.controller.persistence.JobControlFlags;
@@ -73,7 +72,6 @@ public class Controller extends AbstractComponent {
private final Clock clock;
private final ZoneRegistry zoneRegistry;
private final ServiceRegistry serviceRegistry;
- private final ConfigServerMetrics metrics;
private final AuditLogger auditLogger;
private final FlagSource flagSource;
private final NameServiceForwarder nameServiceForwarder;
@@ -111,7 +109,6 @@ public class Controller extends AbstractComponent {
this.mavenRepository = Objects.requireNonNull(mavenRepository, "MavenRepository cannot be null");
this.metric = Objects.requireNonNull(metric, "Metric cannot be null");
- metrics = new ConfigServerMetrics(serviceRegistry.configServer());
nameServiceForwarder = new NameServiceForwarder(curator);
jobController = new JobController(this);
applicationController = new ApplicationController(this, curator, accessControl, clock, flagSource, serviceRegistry.billingController());
@@ -224,8 +221,7 @@ public class Controller extends AbstractComponent {
if (version.isEmpty()) {
throw new IllegalArgumentException("Invalid version '" + version.toFullString() + "'");
}
- Set<CloudName> clouds = clouds();
- if (!clouds.contains(cloudName)) {
+ if (!clouds().contains(cloudName)) {
throw new IllegalArgumentException("Cloud '" + cloudName + "' does not exist in this system");
}
try (Lock lock = curator.lockOsVersions()) {
@@ -268,10 +264,6 @@ public class Controller extends AbstractComponent {
return HostName.from(hostnameSupplier.get());
}
- public ConfigServerMetrics metrics() {
- return metrics;
- }
-
public SystemName system() {
return zoneRegistry.system();
}
@@ -313,4 +305,5 @@ public class Controller extends AbstractComponent {
public NotificationsDb notificationsDb() {
return notificationsDb;
}
+
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Instance.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Instance.java
index 025b785a693..0fcb3cd9be4 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Instance.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Instance.java
@@ -1,7 +1,6 @@
// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.controller;
-import com.google.common.collect.ImmutableMap;
import com.yahoo.component.Version;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.Environment;
@@ -55,8 +54,8 @@ public class Instance {
public Instance(ApplicationId id, Collection<Deployment> deployments, Map<JobType, Instant> jobPauses,
List<AssignedRotation> rotations, RotationStatus rotationStatus, Change change) {
this.id = Objects.requireNonNull(id, "id cannot be null");
- this.deployments = ImmutableMap.copyOf(Objects.requireNonNull(deployments, "deployments cannot be null").stream()
- .collect(Collectors.toMap(Deployment::zone, Function.identity())));
+ this.deployments = Objects.requireNonNull(deployments, "deployments cannot be null").stream()
+ .collect(Collectors.toUnmodifiableMap(Deployment::zone, Function.identity()));
this.jobPauses = Map.copyOf(Objects.requireNonNull(jobPauses, "deploymentJobs cannot be null"));
this.rotations = List.copyOf(Objects.requireNonNull(rotations, "rotations cannot be null"));
this.rotationStatus = Objects.requireNonNull(rotationStatus, "rotationStatus cannot be null");
@@ -140,9 +139,9 @@ public class Instance {
* (deployments also includes manually deployed environments)
*/
public Map<ZoneId, Deployment> productionDeployments() {
- return ImmutableMap.copyOf(deployments.values().stream()
- .filter(deployment -> deployment.zone().environment() == Environment.prod)
- .collect(Collectors.toMap(Deployment::zone, Function.identity())));
+ return deployments.values().stream()
+ .filter(deployment -> deployment.zone().environment() == Environment.prod)
+ .collect(Collectors.toUnmodifiableMap(Deployment::zone, Function.identity()));
}
/** Returns the instant until which the given job is paused, or empty. */
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java
index c7cc0f361bc..433b2b340d5 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java
@@ -95,8 +95,8 @@ public class RoutingController {
if (!policy.status().isActive()) continue;
for (var routingMethod : controller.zoneRegistry().routingMethods(policy.id().zone())) {
if (routingMethod.isDirect() && !isSystemApplication && !canRouteDirectlyTo(deployment, application.get())) continue;
- endpoints.add(policy.endpointIn(controller.system(), routingMethod, controller.zoneRegistry()));
- endpoints.add(policy.regionEndpointIn(controller.system(), routingMethod));
+ endpoints.addAll(policy.endpointsIn(controller.system(), routingMethod, controller.zoneRegistry()));
+ endpoints.addAll(policy.regionEndpointsIn(controller.system(), routingMethod));
}
}
return EndpointList.copyOf(endpoints);
@@ -140,7 +140,7 @@ public class RoutingController {
public Map<ZoneId, List<Endpoint>> zoneEndpointsOf(Collection<DeploymentId> deployments) {
var endpoints = new TreeMap<ZoneId, List<Endpoint>>(Comparator.comparing(ZoneId::value));
for (var deployment : deployments) {
- EndpointList zoneEndpoints = endpointsOf(deployment).scope(Endpoint.Scope.zone);
+ EndpointList zoneEndpoints = endpointsOf(deployment).scope(Endpoint.Scope.zone).not().legacy();
zoneEndpoints = directEndpoints(zoneEndpoints, deployment.applicationId());
if ( ! zoneEndpoints.isEmpty()) {
endpoints.put(deployment.zoneId(), zoneEndpoints.asList());
@@ -189,9 +189,7 @@ public class RoutingController {
/** Returns the global endpoints for given deployment as container endpoints */
public Set<ContainerEndpoint> containerEndpointsOf(Application application, InstanceName instanceName, ZoneId zone) {
Instance instance = application.require(instanceName);
- boolean registerLegacyNames = application.deploymentSpec().instance(instanceName)
- .flatMap(DeploymentInstanceSpec::globalServiceId)
- .isPresent();
+ boolean registerLegacyNames = legacyNamesAvailable(application, instanceName);
Set<ContainerEndpoint> containerEndpoints = new HashSet<>();
EndpointList endpoints = endpointsOf(application, instanceName);
// Add endpoints backed by a rotation, and register them in DNS if necessary
@@ -305,6 +303,7 @@ public class RoutingController {
var directMethods = 0;
var zones = deployments.stream().map(DeploymentId::zoneId).collect(Collectors.toList());
var availableRoutingMethods = routingMethodsOfAll(deployments, application);
+ boolean legacyNamesAvailable = legacyNamesAvailable(application, routingId.application().instance());
for (var method : availableRoutingMethods) {
if (method.isDirect() && ++directMethods > 1) {
@@ -316,8 +315,16 @@ public class RoutingController {
.on(Port.fromRoutingMethod(method))
.routingMethod(method)
.in(controller.system()));
- // TODO(mpolden): Remove this once all applications have migrated away from legacy endpoints
- if (method == RoutingMethod.shared) {
+ if (controller.system().isPublic()) {
+ endpoints.add(Endpoint.of(routingId.application())
+ .target(routingId.endpointId(), cluster, zones)
+ .on(Port.fromRoutingMethod(method))
+ .routingMethod(method)
+ .legacy()
+ .in(controller.system()));
+ }
+ // Add legacy endpoints
+ if (legacyNamesAvailable && method == RoutingMethod.shared) {
endpoints.add(Endpoint.of(routingId.application())
.target(routingId.endpointId(), cluster, zones)
.on(Port.plain(4080))
@@ -335,6 +342,13 @@ public class RoutingController {
return endpoints;
}
+ /** Whether legacy global DNS names should be available for given application */
+ private static boolean legacyNamesAvailable(Application application, InstanceName instanceName) {
+ return application.deploymentSpec().instance(instanceName)
+ .flatMap(DeploymentInstanceSpec::globalServiceId)
+ .isPresent();
+ }
+
/** Returns direct routing endpoints if any exist and feature flag is set for given application */
// TODO: Remove this when feature flag is removed, and in-line .direct() filter where relevant
public EndpointList directEndpoints(EndpointList endpoints, ApplicationId application) {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/Endpoint.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/Endpoint.java
index cc1a0a455c4..3f079a5fb9b 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/Endpoint.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/Endpoint.java
@@ -28,6 +28,10 @@ public class Endpoint {
private static final String OATH_DNS_SUFFIX = ".vespa.oath.cloud";
private static final String PUBLIC_DNS_SUFFIX = ".public.vespa.oath.cloud";
private static final String PUBLIC_CD_DNS_SUFFIX = ".public-cd.vespa.oath.cloud";
+ // TODO(mpolden): New domain is considered "legacy" for the time being, until it's ready for use. Once it's ready
+ // we'll make the vespa.oath.cloud variant legacy and this non-legacy.
+ private static final String PUBLIC_DNS_LEGACY_SUFFIX = ".vespa-app.cloud";
+ private static final String PUBLIC_CD_LEGACY_DNS_SUFFIX = ".cd.vespa-app.cloud";
private final EndpointId id;
private final ClusterSpec.Id cluster;
@@ -173,13 +177,13 @@ public class Endpoint {
String portPart = port.isDefault() ? "" : ":" + port.port;
return URI.create(scheme + "://" +
sanitize(namePart(name, separator)) +
- systemPart(system, separator) +
+ systemPart(system, separator, legacy) +
sanitize(instancePart(application, separator)) +
sanitize(application.application().value()) +
separator +
sanitize(application.tenant().value()) +
"." +
- scopePart(scope, zones, legacy) +
+ scopePart(scope, zones, legacy, system) +
dnsSuffix(system, legacy) +
portPart +
"/");
@@ -201,7 +205,15 @@ public class Endpoint {
return name + separator;
}
- private static String scopePart(Scope scope, List<ZoneId> zones, boolean legacy) {
+ private static String scopePart(Scope scope, List<ZoneId> zones, boolean legacy, SystemName system) {
+ if (system.isPublic() && legacy) {
+ if (scope == Scope.global) return "g";
+ var zone = zones.get(0);
+ var region = zone.region().value();
+ char scopeSymbol = scope == Scope.region ? 'r' : 'z';
+ String environment = zone.environment().isProduction() ? "" : "." + zone.environment().value();
+ return region + environment + "." + scopeSymbol;
+ }
if (scope == Scope.global) return "global";
var zone = zones.get(0);
var region = zone.region().value();
@@ -215,8 +227,9 @@ public class Endpoint {
return application.instance().value() + separator;
}
- private static String systemPart(SystemName system, String separator) {
+ private static String systemPart(SystemName system, String separator, boolean legacy) {
if (!system.isCd()) return "";
+ if (system.isPublic() && legacy) return "";
return system.value() + separator;
}
@@ -227,8 +240,10 @@ public class Endpoint {
if (legacy) return YAHOO_DNS_SUFFIX;
return OATH_DNS_SUFFIX;
case Public:
+ if (legacy) return PUBLIC_DNS_LEGACY_SUFFIX;
return PUBLIC_DNS_SUFFIX;
case PublicCd:
+ if (legacy) return PUBLIC_CD_LEGACY_DNS_SUFFIX;
return PUBLIC_CD_DNS_SUFFIX;
default: throw new IllegalArgumentException("No DNS suffix declared for system " + system);
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/archive/CuratorArchiveBucketDb.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/archive/CuratorArchiveBucketDb.java
index 8bc2271825b..bb38dd612db 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/archive/CuratorArchiveBucketDb.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/archive/CuratorArchiveBucketDb.java
@@ -1,18 +1,12 @@
// Copyright 2021 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.controller.archive;
-import com.google.inject.Inject;
import com.yahoo.config.provision.TenantName;
import com.yahoo.config.provision.zone.ZoneId;
-import com.yahoo.vespa.flags.FetchVector;
-import com.yahoo.vespa.flags.Flags;
-import com.yahoo.vespa.flags.StringFlag;
import com.yahoo.vespa.hosted.controller.Controller;
import com.yahoo.vespa.hosted.controller.api.integration.archive.ArchiveBucket;
-import com.yahoo.vespa.hosted.controller.api.integration.archive.ArchiveBucketDb;
import com.yahoo.vespa.hosted.controller.api.integration.archive.ArchiveService;
import com.yahoo.vespa.hosted.controller.persistence.CuratorDb;
-import org.jetbrains.annotations.NotNull;
import java.net.URI;
import java.util.HashSet;
@@ -27,7 +21,7 @@ import java.util.stream.Collectors;
*
* @author andreer
*/
-public class CuratorArchiveBucketDb implements ArchiveBucketDb {
+public class CuratorArchiveBucketDb {
/**
* Due to policy limits, we can't put data for more than this many tenants in a bucket.
@@ -45,27 +39,20 @@ public class CuratorArchiveBucketDb implements ArchiveBucketDb {
private final ArchiveService archiveService;
private final CuratorDb curatorDb;
- private final StringFlag bucketNameFlag;
+ private final boolean enabled;
- @Inject
public CuratorArchiveBucketDb(Controller controller) {
this.archiveService = controller.serviceRegistry().archiveService();
this.curatorDb = controller.curator();
- this.bucketNameFlag = Flags.SYNC_HOST_LOGS_TO_S3_BUCKET.bindTo(controller.flagSource());
+ this.enabled = controller.zoneRegistry().system().isPublic();
}
- @Override
public Optional<URI> archiveUriFor(ZoneId zoneId, TenantName tenant) {
- String bucketName = bucketNameFlag
- .with(FetchVector.Dimension.ZONE_ID, zoneId.value())
- .with(FetchVector.Dimension.TENANT_ID, tenant.value())
- .value();
-
- if (bucketName.isBlank()) return Optional.empty();
-
- if ("auto".equals(bucketName)) bucketName = findOrAssignBucket(zoneId, tenant);
-
- return Optional.of(URI.create(String.format("s3://%s/%s/", bucketName, tenant.value())));
+ if (enabled) {
+ return Optional.of(URI.create(String.format("s3://%s/%s/", findOrAssignBucket(zoneId, tenant), tenant.value())));
+ } else {
+ return Optional.empty();
+ }
}
private String findOrAssignBucket(ZoneId zoneId, TenantName tenant) {
@@ -106,12 +93,10 @@ public class CuratorArchiveBucketDb implements ArchiveBucketDb {
}
}
- @Override
public Set<ArchiveBucket> buckets(ZoneId zoneId) {
return curatorDb.readArchiveBuckets(zoneId);
}
- @NotNull
private Optional<String> findAndUpdateArchiveUriCache(ZoneId zoneId, TenantName tenant, Set<ArchiveBucket> zoneBuckets) {
Optional<String> bucketName = zoneBuckets.stream()
.filter(bucket -> bucket.tenants().contains(tenant))
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/archive/package-info.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/archive/package-info.java
deleted file mode 100644
index c93eb56d294..00000000000
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/archive/package-info.java
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2021 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-@ExportPackage
-package com.yahoo.vespa.hosted.controller.archive;
-
-import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/Badges.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/Badges.java
deleted file mode 100644
index f5369406f97..00000000000
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/Badges.java
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2020 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.controller.deployment;
-
-import com.yahoo.config.provision.ApplicationId;
-
-import java.net.URI;
-import java.util.List;
-import java.util.Optional;
-
-/**
- * URLs for deployment job badges using <a href="https://github.com/yahoo/badge-up">badge-up</a>.
- *
- * @author jonmv
- */
-class Badges {
-
- static final String dark = "555555",
- blue = "4477DD",
- red = "DD4444",
- purple = "AA11CC",
- yellow = "DDAA11",
- white = "FFFFFF";
-
- private final URI badgeApi;
-
- Badges(URI badgeApi) {
- this.badgeApi = badgeApi;
- }
-
- /** Returns a URI which gives a history badge for the given runs. */
- URI historic(ApplicationId id, Optional<Run> lastCompleted, List<Run> runs) {
- StringBuilder path = new StringBuilder(id + ";" + dark);
-
- lastCompleted.ifPresent(last -> path.append("/").append(last.id().type().jobName()).append(";").append(colorOf(last)));
- for (Run run : runs)
- path.append("/%20;").append(colorOf(run)).append(";s%7B").append(white).append("%7D");
-
- return badgeApi.resolve(path.toString());
- }
-
- /** Returns a URI which gives an overview badge for the given runs. */
- URI overview(ApplicationId id, List<Run> runs) {
- StringBuilder path = new StringBuilder(id + ";" + dark);
- for (Run run : runs)
- path.append("/").append(run.id().type().jobName()).append(";").append(colorOf(run));
-
- return badgeApi.resolve(path.toString());
- }
-
- private static String colorOf(Run run) {
- switch (run.status()) {
- case success: return blue;
- case running: return purple;
- case aborted: return yellow;
- default: return red;
- }
- }
-
-}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentStatus.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentStatus.java
index 3f1e8831e83..5b873f11618 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentStatus.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentStatus.java
@@ -22,6 +22,7 @@ import java.time.Duration;
import java.time.Instant;
import java.time.temporal.ChronoUnit;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
@@ -92,7 +93,7 @@ public class DeploymentStatus {
this.now = requireNonNull(now);
List<StepStatus> allSteps = new ArrayList<>();
this.jobSteps = jobDependencies(application.deploymentSpec(), allSteps);
- this.allSteps = List.copyOf(allSteps);
+ this.allSteps = Collections.unmodifiableList(allSteps);
}
/** The application this deployment status concerns. */
@@ -146,7 +147,7 @@ public class DeploymentStatus {
Map.Entry::getValue,
DeploymentStatus::union,
LinkedHashMap::new),
- ImmutableMap::copyOf));
+ Collections::unmodifiableMap));
}
private Map<JobId, List<Versions>> jobsToRun(Map<InstanceName, Change> changes, boolean eagerTests) {
@@ -173,7 +174,7 @@ public class DeploymentStatus {
if (step.completedAt(change, firstProductionJobWithDeployment).isEmpty())
jobs.merge(job, List.of(versions), DeploymentStatus::union);
});
- return ImmutableMap.copyOf(jobs);
+ return Collections.unmodifiableMap(jobs);
}
/** The set of jobs that need to run for the given changes to be considered complete. */
@@ -281,7 +282,7 @@ public class DeploymentStatus {
testJobs.merge(firstDeclaredOrElseImplicitTest(testType), List.of(versions), DeploymentStatus::union);
});
}
- return ImmutableMap.copyOf(testJobs);
+ return Collections.unmodifiableMap(testJobs);
}
private JobId firstDeclaredOrElseImplicitTest(JobType testJob) {
@@ -306,7 +307,7 @@ public class DeploymentStatus {
for (DeploymentSpec.Step step : spec.steps())
previous = fillStep(dependencies, allSteps, step, previous, null);
- return ImmutableMap.copyOf(dependencies);
+ return Collections.unmodifiableMap(dependencies);
}
/** Adds the primitive steps contained in the given step, which depend on the given previous primitives, to the dependency graph. */
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java
index 0458a64c5a9..5bd43dfd695 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java
@@ -712,12 +712,12 @@ public class InternalStepRunner implements StepRunner {
private void updateConsoleNotification(Run run) {
NotificationSource source = NotificationSource.from(run.id());
- Consumer<String> updater = msg -> controller.notificationsDb().setNotification(source, Notification.Type.DEPLOYMENT_FAILURE, msg);
+ Consumer<String> updater = msg -> controller.notificationsDb().setNotification(source, Notification.Type.deployment, Notification.Level.error, msg);
switch (run.status()) {
case aborted: return; // wait and see how the next run goes.
case running:
case success:
- controller.notificationsDb().removeNotification(source, Notification.Type.DEPLOYMENT_FAILURE);
+ controller.notificationsDb().removeNotification(source, Notification.Type.deployment);
return;
case outOfCapacity:
if ( ! run.id().type().environment().isTest()) updater.accept("lack of capacity. Please contact the Vespa team to request more!");
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobController.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobController.java
index 3dc88d5d6d2..25bc21a0076 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobController.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobController.java
@@ -82,7 +82,6 @@ public class JobController {
private final CuratorDb curator;
private final BufferedLogStore logs;
private final TesterCloud cloud;
- private final Badges badges;
private final JobMetrics metric;
private final AtomicReference<Consumer<Run>> runner = new AtomicReference<>(__ -> { });
@@ -92,7 +91,6 @@ public class JobController {
this.curator = controller.curator();
this.logs = new BufferedLogStore(curator, controller.serviceRegistry().runDataStore());
this.cloud = controller.serviceRegistry().testerCloud();
- this.badges = new Badges(controller.zoneRegistry().badgeUrl());
this.metric = new JobMetrics(controller.metric(), controller::system);
}
@@ -539,30 +537,6 @@ public class JobController {
}
}
- /** Returns a URI which points at a badge showing historic status of given length for the given job type for the given application. */
- public URI historicBadge(ApplicationId id, JobType type, int historyLength) {
- List<Run> runs = new ArrayList<>(runs(id, type).values());
- Run lastCompleted = null;
- if (runs.size() > 0)
- lastCompleted = runs.get(runs.size() - 1);
- if (runs.size() > 1 && ! lastCompleted.hasEnded())
- lastCompleted = runs.get(runs.size() - 2);
-
- return badges.historic(id, Optional.ofNullable(lastCompleted), runs.subList(Math.max(0, runs.size() - historyLength), runs.size()));
- }
-
- /** Returns a URI which points at a badge showing current status for all jobs for the given application. */
- public URI overviewBadge(ApplicationId id) {
- DeploymentSteps steps = new DeploymentSteps(controller.applications().requireApplication(TenantAndApplicationId.from(id))
- .deploymentSpec().requireInstance(id.instance()),
- controller::system);
- return badges.overview(id,
- steps.jobs().stream()
- .map(type -> last(id, type))
- .flatMap(Optional::stream)
- .collect(toList()));
- }
-
private void prunePackages(TenantAndApplicationId id) {
controller.applications().lockApplicationIfPresent(id, application -> {
application.get().productionDeployments().values().stream()
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ArchiveAccessMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ArchiveAccessMaintainer.java
index 97385da468b..1a9889284e1 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ArchiveAccessMaintainer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ArchiveAccessMaintainer.java
@@ -4,9 +4,9 @@ package com.yahoo.vespa.hosted.controller.maintenance;
import com.google.common.collect.Maps;
import com.yahoo.jdisc.Metric;
import com.yahoo.vespa.hosted.controller.Controller;
-import com.yahoo.vespa.hosted.controller.api.integration.archive.ArchiveBucketDb;
import com.yahoo.vespa.hosted.controller.api.integration.archive.ArchiveService;
import com.yahoo.vespa.hosted.controller.api.integration.zone.ZoneRegistry;
+import com.yahoo.vespa.hosted.controller.archive.CuratorArchiveBucketDb;
import com.yahoo.vespa.hosted.controller.tenant.CloudTenant;
import com.yahoo.vespa.hosted.controller.tenant.Tenant;
@@ -23,7 +23,7 @@ public class ArchiveAccessMaintainer extends ControllerMaintainer {
private static final String bucketCountMetricName = "archive.bucketCount";
- private final ArchiveBucketDb archiveBucketDb;
+ private final CuratorArchiveBucketDb archiveBucketDb;
private final ArchiveService archiveService;
private final ZoneRegistry zoneRegistry;
private final Metric metric;
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ArchiveUriUpdater.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ArchiveUriUpdater.java
index faa4813e6b0..d2141b097b3 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ArchiveUriUpdater.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ArchiveUriUpdater.java
@@ -6,9 +6,9 @@ import com.yahoo.config.provision.TenantName;
import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.vespa.hosted.controller.ApplicationController;
import com.yahoo.vespa.hosted.controller.Controller;
-import com.yahoo.vespa.hosted.controller.api.integration.archive.ArchiveBucketDb;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.NodeRepository;
import com.yahoo.vespa.hosted.controller.application.SystemApplication;
+import com.yahoo.vespa.hosted.controller.archive.CuratorArchiveBucketDb;
import java.net.URI;
import java.time.Duration;
@@ -28,7 +28,7 @@ public class ArchiveUriUpdater extends ControllerMaintainer {
private final ApplicationController applications;
private final NodeRepository nodeRepository;
- private final ArchiveBucketDb archiveBucketDb;
+ private final CuratorArchiveBucketDb archiveBucketDb;
public ArchiveUriUpdater(Controller controller, Duration duration) {
super(controller, duration, ArchiveUriUpdater.class.getSimpleName(), SystemName.all());
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ChangeRequestMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ChangeRequestMaintainer.java
index 0ebf4cbc2d2..1f360c477b9 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ChangeRequestMaintainer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ChangeRequestMaintainer.java
@@ -31,14 +31,12 @@ public class ChangeRequestMaintainer extends ControllerMaintainer {
private final Logger logger = Logger.getLogger(ChangeRequestMaintainer.class.getName());
private final ChangeRequestClient changeRequestClient;
- private final SystemName system;
private final CuratorDb curator;
private final NodeRepository nodeRepository;
public ChangeRequestMaintainer(Controller controller, Duration interval) {
super(controller, interval, null, SystemName.allOf(Predicate.not(SystemName::isPublic)));
this.changeRequestClient = controller.serviceRegistry().changeRequestClient();
- this.system = controller.system();
this.curator = controller.curator();
this.nodeRepository = controller.serviceRegistry().configServer().nodeRepository();
}
@@ -51,23 +49,10 @@ public class ChangeRequestMaintainer extends ControllerMaintainer {
logger.fine(() -> "Found requests: " + changeRequests);
storeChangeRequests(changeRequests);
- if (system.equals(SystemName.main)) {
- approveChanges(changeRequests);
- }
return true;
}
- private void approveChanges(List<ChangeRequest> changeRequests) {
- var unapprovedRequests = changeRequests
- .stream()
- .filter(changeRequest -> changeRequest.getApproval() == ChangeRequest.Approval.REQUESTED)
- .collect(Collectors.toList());
-
- logger.fine(() -> "Approving " + unapprovedRequests);
- changeRequestClient.approveChangeRequests(unapprovedRequests);
- }
-
private void storeChangeRequests(List<ChangeRequest> changeRequests) {
var existingChangeRequests = curator.readChangeRequests()
.stream()
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintenance.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintenance.java
index 015da1faae8..5a7ef12b246 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintenance.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintenance.java
@@ -61,7 +61,7 @@ public class ControllerMaintenance extends AbstractComponent {
maintainers.add(new SystemRoutingPolicyMaintainer(controller, intervals.systemRoutingPolicyMaintainer));
maintainers.add(new ApplicationMetaDataGarbageCollector(controller, intervals.applicationMetaDataGarbageCollector));
maintainers.add(new ContainerImageExpirer(controller, intervals.containerImageExpirer));
- maintainers.add(new HostInfoUpdater(controller, intervals.hostSwitchUpdater));
+ maintainers.add(new HostInfoUpdater(controller, intervals.hostInfoUpdater));
maintainers.add(new ReindexingTriggerer(controller, intervals.reindexingTriggerer));
maintainers.add(new EndpointCertificateMaintainer(controller, intervals.endpointCertificateMaintainer));
maintainers.add(new TrafficShareUpdater(controller, intervals.trafficFractionUpdater));
@@ -116,7 +116,7 @@ public class ControllerMaintenance extends AbstractComponent {
private final Duration systemRoutingPolicyMaintainer;
private final Duration applicationMetaDataGarbageCollector;
private final Duration containerImageExpirer;
- private final Duration hostSwitchUpdater;
+ private final Duration hostInfoUpdater;
private final Duration reindexingTriggerer;
private final Duration endpointCertificateMaintainer;
private final Duration trafficFractionUpdater;
@@ -135,7 +135,7 @@ public class ControllerMaintenance extends AbstractComponent {
this.deploymentMetricsMaintainer = duration(10, MINUTES);
this.applicationOwnershipConfirmer = duration(12, HOURS);
this.systemUpgrader = duration(2, MINUTES);
- this.jobRunner = duration(90, SECONDS);
+ this.jobRunner = duration(system.isCd() ? 45 : 90, SECONDS);
this.osVersionStatusUpdater = duration(2, MINUTES);
this.osUpgrader = duration(1, MINUTES);
this.osUpgradeScheduler = duration(3, HOURS);
@@ -148,7 +148,7 @@ public class ControllerMaintenance extends AbstractComponent {
this.systemRoutingPolicyMaintainer = duration(10, MINUTES);
this.applicationMetaDataGarbageCollector = duration(12, HOURS);
this.containerImageExpirer = duration(12, HOURS);
- this.hostSwitchUpdater = duration(12, HOURS);
+ this.hostInfoUpdater = duration(12, HOURS);
this.reindexingTriggerer = duration(1, HOURS);
this.endpointCertificateMaintainer = duration(12, HOURS);
this.trafficFractionUpdater = duration(5, MINUTES);
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainer.java
index c607cdb2090..a79d705963b 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainer.java
@@ -5,15 +5,21 @@ import com.yahoo.config.provision.SystemName;
import com.yahoo.vespa.hosted.controller.ApplicationController;
import com.yahoo.vespa.hosted.controller.Controller;
import com.yahoo.vespa.hosted.controller.Instance;
+import com.yahoo.vespa.hosted.controller.api.application.v4.model.ClusterMetrics;
+import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
import com.yahoo.vespa.hosted.controller.application.Deployment;
import com.yahoo.vespa.hosted.controller.application.DeploymentMetrics;
import com.yahoo.yolean.Exceptions;
import java.time.Duration;
+import java.time.Instant;
+import java.util.List;
+import java.util.Optional;
import java.util.concurrent.ForkJoinPool;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
+import java.util.function.Function;
import java.util.logging.Level;
import java.util.logging.Logger;
@@ -51,22 +57,18 @@ public class DeploymentMetricsMaintainer extends ControllerMaintainer {
for (Deployment deployment : instance.deployments().values()) {
attempts.incrementAndGet();
try {
- var collectedMetrics = controller().metrics().getDeploymentMetrics(instance.id(), deployment.zone());
- var now = controller().clock().instant();
+ DeploymentId deploymentId = new DeploymentId(instance.id(), deployment.zone());
+ List<ClusterMetrics> clusterMetrics = controller().serviceRegistry().configServer().getDeploymentMetrics(deploymentId);
+ Instant now = controller().clock().instant();
applications.lockApplicationIfPresent(application.id(), locked -> {
Deployment existingDeployment = locked.get().require(instance.name()).deployments().get(deployment.zone());
if (existingDeployment == null) return; // Deployment removed since we started collecting metrics
- DeploymentMetrics newMetrics = existingDeployment.metrics()
- .withQueriesPerSecond(collectedMetrics.queriesPerSecond())
- .withWritesPerSecond(collectedMetrics.writesPerSecond())
- .withDocumentCount(collectedMetrics.documentCount())
- .withQueryLatencyMillis(collectedMetrics.queryLatencyMillis())
- .withWriteLatencyMillis(collectedMetrics.writeLatencyMillis())
- .at(now);
+ DeploymentMetrics newMetrics = updateDeploymentMetrics(existingDeployment.metrics(), clusterMetrics).at(now);
applications.store(locked.with(instance.name(),
lockedInstance -> lockedInstance.with(existingDeployment.zone(), newMetrics)
.recordActivityAt(now, existingDeployment.zone())));
+ controller().notificationsDb().setDeploymentFeedingBlockedNotifications(deploymentId, clusterMetrics);
});
} catch (Exception e) {
failures.incrementAndGet();
@@ -92,4 +94,26 @@ public class DeploymentMetricsMaintainer extends ControllerMaintainer {
return lastException.get() == null;
}
+ static DeploymentMetrics updateDeploymentMetrics(DeploymentMetrics current, List<ClusterMetrics> metrics) {
+ return current
+ .withQueriesPerSecond(metrics.stream().flatMap(m -> m.queriesPerSecond().stream()).mapToDouble(Double::doubleValue).sum())
+ .withWritesPerSecond(metrics.stream().flatMap(m -> m.feedPerSecond().stream()).mapToDouble(Double::doubleValue).sum())
+ .withDocumentCount(metrics.stream().flatMap(m -> m.documentCount().stream()).mapToLong(Double::longValue).sum())
+ .withQueryLatencyMillis(weightedAverageLatency(metrics, ClusterMetrics::queriesPerSecond, ClusterMetrics::queryLatency))
+ .withWriteLatencyMillis(weightedAverageLatency(metrics, ClusterMetrics::feedPerSecond, ClusterMetrics::feedLatency));
+ }
+
+ private static double weightedAverageLatency(List<ClusterMetrics> metrics,
+ Function<ClusterMetrics, Optional<Double>> rateExtractor,
+ Function<ClusterMetrics, Optional<Double>> latencyExtractor) {
+ double rateSum = metrics.stream().flatMap(m -> rateExtractor.apply(m).stream()).mapToDouble(Double::longValue).sum();
+ if (rateSum == 0) return 0.0;
+
+ double weightedLatency = metrics.stream()
+ .flatMap(m -> latencyExtractor.apply(m).flatMap(l -> rateExtractor.apply(m).map(r -> l * r)).stream())
+ .mapToDouble(Double::doubleValue)
+ .sum();
+
+ return weightedLatency / rateSum;
+ }
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/MetricsReporter.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/MetricsReporter.java
index dbc2f04ee2e..b26b94f0b28 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/MetricsReporter.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/MetricsReporter.java
@@ -9,6 +9,7 @@ import com.yahoo.jdisc.Metric;
import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.Controller;
import com.yahoo.vespa.hosted.controller.Instance;
+import com.yahoo.vespa.hosted.controller.api.integration.billing.PlanId;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.ApplicationVersion;
import com.yahoo.vespa.hosted.controller.application.ApplicationList;
import com.yahoo.vespa.hosted.controller.application.Deployment;
@@ -31,6 +32,7 @@ import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
+import java.util.TreeMap;
import java.util.concurrent.ConcurrentHashMap;
import java.util.function.Function;
import java.util.stream.Collectors;
@@ -43,6 +45,7 @@ import java.util.stream.Collectors;
*/
public class MetricsReporter extends ControllerMaintainer {
+ public static final String TENANT_METRIC = "billing.tenants";
public static final String DEPLOYMENT_FAIL_METRIC = "deployment.failurePercentage";
public static final String DEPLOYMENT_AVERAGE_DURATION = "deployment.averageDuration";
public static final String DEPLOYMENT_FAILING_UPGRADES = "deployment.failingUpgrades";
@@ -78,6 +81,7 @@ public class MetricsReporter extends ControllerMaintainer {
reportInfrastructureUpgradeMetrics(versionStatus);
reportAuditLog();
reportBrokenSystemVersion(versionStatus);
+ reportTenantMetrics();
return true;
}
@@ -193,6 +197,22 @@ public class MetricsReporter extends ControllerMaintainer {
});
}
+ private void reportTenantMetrics() {
+ if (! controller().system().isPublic()) return;
+
+ var planCounter = new TreeMap<String, Integer>();
+
+ controller().tenants().asList().forEach(tenant -> {
+ var planId = controller().serviceRegistry().billingController().getPlan(tenant.name());
+ planCounter.merge(planId.value(), 1, Integer::sum);
+ });
+
+ planCounter.forEach((planId, count) -> {
+ var context = metric.createContext(Map.of("plan", planId));
+ metric.set(TENANT_METRIC, count, context);
+ });
+ }
+
private Map<NodeVersion, Duration> platformChangeDurations(VersionStatus versionStatus) {
return changeDurations(versionStatus.versions(), VespaVersion::nodeVersions);
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgradeScheduler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgradeScheduler.java
index 04b06b3e1f6..e1618f05a7d 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgradeScheduler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgradeScheduler.java
@@ -27,7 +27,7 @@ import java.util.stream.Collectors;
public class OsUpgradeScheduler extends ControllerMaintainer {
/** Trigger a new upgrade when the current target version reaches this age */
- private static final Duration MAX_VERSION_AGE = Duration.ofDays(30);
+ private static final Duration MAX_VERSION_AGE = Duration.ofDays(45);
/**
* The interval at which new versions become available. We use this to avoid scheduling upgrades to a version that
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/TenantRoleMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/TenantRoleMaintainer.java
index e8b50a6b604..1265d687850 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/TenantRoleMaintainer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/TenantRoleMaintainer.java
@@ -2,22 +2,24 @@
package com.yahoo.vespa.hosted.controller.maintenance;
+import com.yahoo.config.provision.TenantName;
import com.yahoo.vespa.flags.BooleanFlag;
import com.yahoo.vespa.flags.FetchVector;
import com.yahoo.vespa.flags.Flags;
+import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.Controller;
import com.yahoo.vespa.hosted.controller.tenant.Tenant;
import java.time.Duration;
+import java.util.List;
+import java.util.Map;
+import java.util.function.Predicate;
import java.util.stream.Collectors;
public class TenantRoleMaintainer extends ControllerMaintainer {
- private final BooleanFlag provisionTenantRoles;
-
public TenantRoleMaintainer(Controller controller, Duration tenantRoleMaintainer) {
super(controller, tenantRoleMaintainer);
- provisionTenantRoles = Flags.PROVISION_TENANT_ROLES.bindTo(controller.flagSource());
}
@Override
@@ -26,10 +28,15 @@ public class TenantRoleMaintainer extends ControllerMaintainer {
var tenants = controller().tenants().asList();
var tenantsWithRoles = tenants.stream()
.map(Tenant::name)
- // Only maintain a subset of the tenants
- .filter(name -> provisionTenantRoles.with(FetchVector.Dimension.TENANT_ID, name.value()).value())
+ .filter(this::hasProductionDeployment)
.collect(Collectors.toList());
roleService.maintainRoles(tenantsWithRoles);
return true;
}
+
+ private boolean hasProductionDeployment(TenantName tenant) {
+ return controller().applications().asList(tenant).stream()
+ .map(Application::productionInstances)
+ .anyMatch(Predicate.not(Map::isEmpty));
+ }
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/VCMRMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/VCMRMaintainer.java
index a8de70a56a2..fedf3d90760 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/VCMRMaintainer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/VCMRMaintainer.java
@@ -10,9 +10,12 @@ import com.yahoo.vespa.hosted.controller.api.integration.configserver.Node;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.NodeRepository;
import com.yahoo.vespa.hosted.controller.api.integration.noderepository.NodeRepositoryNode;
import com.yahoo.vespa.hosted.controller.api.integration.noderepository.NodeState;
+import com.yahoo.vespa.hosted.controller.api.integration.vcmr.ChangeRequest;
import com.yahoo.vespa.hosted.controller.api.integration.vcmr.ChangeRequest.Impact;
+import com.yahoo.vespa.hosted.controller.api.integration.vcmr.ChangeRequestClient;
import com.yahoo.vespa.hosted.controller.api.integration.vcmr.HostAction;
import com.yahoo.vespa.hosted.controller.api.integration.vcmr.HostAction.State;
+import com.yahoo.vespa.hosted.controller.api.integration.vcmr.VCMRReport;
import com.yahoo.vespa.hosted.controller.api.integration.vcmr.VespaChangeRequest;
import com.yahoo.vespa.hosted.controller.api.integration.vcmr.VespaChangeRequest.Status;
import com.yahoo.vespa.hosted.controller.persistence.CuratorDb;
@@ -24,6 +27,7 @@ import java.time.ZonedDateTime;
import java.util.List;
import java.util.Map;
import java.util.Optional;
+import java.util.Set;
import java.util.function.Predicate;
import java.util.logging.Logger;
import java.util.stream.Collectors;
@@ -41,11 +45,15 @@ public class VCMRMaintainer extends ControllerMaintainer {
private final Duration ALLOWED_POSTPONEMENT_TIME = Duration.ofDays(7);
private final CuratorDb curator;
private final NodeRepository nodeRepository;
+ private final ChangeRequestClient changeRequestClient;
+ private final SystemName system;
public VCMRMaintainer(Controller controller, Duration interval) {
super(controller, interval, null, SystemName.allOf(Predicate.not(SystemName::isPublic)));
this.curator = controller.curator();
this.nodeRepository = controller.serviceRegistry().configServer().nodeRepository();
+ this.changeRequestClient = controller.serviceRegistry().changeRequestClient();
+ this.system = controller.system();
}
@Override
@@ -65,11 +73,14 @@ public class VCMRMaintainer extends ControllerMaintainer {
try (var lock = curator.lockChangeRequests()) {
// Read the vcmr again, in case the source status has been updated
curator.readChangeRequest(changeRequest.getId())
- .ifPresent(vcmr -> curator.writeChangeRequest(vcmr.withActionPlan(nextActions)
- .withStatus(status)));
+ .ifPresent(vcmr -> {
+ var updatedVcmr = vcmr.withActionPlan(nextActions)
+ .withStatus(status);
+ curator.writeChangeRequest(updatedVcmr);
+ approveChangeRequest(updatedVcmr);
+ });
}
});
-
return true;
}
@@ -77,7 +88,7 @@ public class VCMRMaintainer extends ControllerMaintainer {
* Status is based on:
* 1. Whether the source has reportedly closed the request
* 2. Whether any host requires operator action
- * 3. Whether any host has started/finished retiring
+ * 3. Whether any host is pending/started/finished retirement
*/
private Status getStatus(List<HostAction> nextActions, VespaChangeRequest changeRequest) {
if (changeRequest.getChangeRequestSource().isClosed()) {
@@ -90,10 +101,14 @@ public class VCMRMaintainer extends ControllerMaintainer {
return Status.REQUIRES_OPERATOR_ACTION;
}
- if (byActionState.getOrDefault(State.RETIRING, 0L) + byActionState.getOrDefault(State.RETIRED, 0L) > 0) {
+ if (byActionState.getOrDefault(State.RETIRING, 0L) > 0) {
return Status.IN_PROGRESS;
}
+ if (Set.of(State.RETIRED, State.NONE).containsAll(byActionState.keySet())) {
+ return Status.READY;
+ }
+
if (byActionState.getOrDefault(State.PENDING_RETIREMENT, 0L) > 0) {
return Status.PENDING_ACTION;
}
@@ -130,9 +145,15 @@ public class VCMRMaintainer extends ControllerMaintainer {
if (changeRequest.getChangeRequestSource().isClosed()) {
logger.fine(() -> changeRequest.getChangeRequestSource().getId() + " is closed, recycling " + node.hostname());
recycleNode(changeRequest.getZoneId(), node, hostAction);
+ removeReport(changeRequest, node);
return hostAction.withState(State.COMPLETE);
}
+ if (isLowImpact(changeRequest))
+ return hostAction;
+
+ addReport(changeRequest, node);
+
if (isPostponed(changeRequest, hostAction)) {
logger.fine(() -> changeRequest.getChangeRequestSource().getId() + " is postponed, recycling " + node.hostname());
recycleNode(changeRequest.getZoneId(), node, hostAction);
@@ -233,9 +254,12 @@ public class VCMRMaintainer extends ControllerMaintainer {
.orElse(false);
}
private Predicate<VespaChangeRequest> shouldUpdate() {
- return changeRequest -> changeRequest.getStatus() != Status.COMPLETED &&
- List.of(Impact.HIGH, Impact.VERY_HIGH)
- .contains(changeRequest.getImpact());
+ return changeRequest -> changeRequest.getStatus() != Status.COMPLETED;
+ }
+
+ private boolean isLowImpact(VespaChangeRequest changeRequest) {
+ return !List.of(Impact.HIGH, Impact.VERY_HIGH)
+ .contains(changeRequest.getImpact());
}
private boolean hasSpareCapacity(ZoneId zoneId, List<Node> nodes) {
@@ -253,4 +277,40 @@ public class VCMRMaintainer extends ControllerMaintainer {
newNode.setWantToRetire(wantToRetire);
nodeRepository.patchNode(zoneId, node.hostname().value(), newNode);
}
+
+ private void approveChangeRequest(VespaChangeRequest changeRequest) {
+ if (!system.equals(SystemName.main))
+ return;
+ if (changeRequest.getStatus() == Status.REQUIRES_OPERATOR_ACTION)
+ return;
+ if (changeRequest.getApproval() != ChangeRequest.Approval.REQUESTED)
+ return;
+
+ logger.info("Approving " + changeRequest.getChangeRequestSource().getId());
+ changeRequestClient.approveChangeRequest(changeRequest);
+ }
+
+ private void removeReport(VespaChangeRequest changeRequest, Node node) {
+ var report = VCMRReport.fromReports(node.reports());
+
+ if (report.removeVcmr(changeRequest.getChangeRequestSource().getId())) {
+ updateReport(changeRequest.getZoneId(), node, report);
+ }
+ }
+
+ private void addReport(VespaChangeRequest changeRequest, Node node) {
+ var report = VCMRReport.fromReports(node.reports());
+
+ var source = changeRequest.getChangeRequestSource();
+ if (report.addVcmr(source.getId(), source.getPlannedStartTime(), source.getPlannedEndTime())) {
+ updateReport(changeRequest.getZoneId(), node, report);
+ }
+ }
+
+ private void updateReport(ZoneId zoneId, Node node, VCMRReport report) {
+ logger.info(String.format("Updating report for %s: %s", node.hostname(), report));
+ var newNode = new NodeRepositoryNode();
+ newNode.setReports(report.toNodeReports());
+ nodeRepository.patchNode(zoneId, node.hostname().value(), newNode);
+ }
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/metric/ConfigServerMetrics.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/metric/ConfigServerMetrics.java
deleted file mode 100644
index 266af5e35fe..00000000000
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/metric/ConfigServerMetrics.java
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2020 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.controller.metric;
-
-import com.yahoo.config.provision.ApplicationId;
-import com.yahoo.config.provision.zone.ZoneId;
-import com.yahoo.vespa.hosted.controller.api.application.v4.model.ClusterMetrics;
-import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
-import com.yahoo.vespa.hosted.controller.api.integration.configserver.ConfigServer;
-
-import java.util.List;
-import java.util.Optional;
-import java.util.function.Function;
-
-/**
- * Retrieves metrics from the configuration server.
- *
- * @author ogronnesby
- */
-public class ConfigServerMetrics {
-
- private final ConfigServer configServer;
-
- public ConfigServerMetrics(ConfigServer configServer) {
- this.configServer = configServer;
- }
-
- public ApplicationMetrics getApplicationMetrics(ApplicationId application) {
- // TODO(ogronnesby): How to produce these values in Public context?
- return new ApplicationMetrics(0.0, 0.0);
- }
-
- public DeploymentMetrics getDeploymentMetrics(ApplicationId application, ZoneId zone) {
- var deploymentId = new DeploymentId(application, zone);
- var metrics = configServer.getDeploymentMetrics(deploymentId);
-
- // The field names here come from the MetricsResponse class.
- return new DeploymentMetrics(
- metrics.stream().flatMap(m -> m.queriesPerSecond().stream()).mapToDouble(Double::doubleValue).sum(),
- metrics.stream().flatMap(m -> m.feedPerSecond().stream()).mapToDouble(Double::doubleValue).sum(),
- metrics.stream().flatMap(m -> m.documentCount().stream()).mapToLong(Double::longValue).sum(),
- weightedAverageLatency(metrics, ClusterMetrics::queriesPerSecond, ClusterMetrics::queryLatency),
- weightedAverageLatency(metrics, ClusterMetrics::feedPerSecond, ClusterMetrics::feedLatency)
- );
- }
-
- private double weightedAverageLatency(List<ClusterMetrics> metrics,
- Function<ClusterMetrics, Optional<Double>> rateExtractor,
- Function<ClusterMetrics, Optional<Double>> latencyExtractor)
- {
- var rateSum = metrics.stream().flatMap(m -> rateExtractor.apply(m).stream()).mapToDouble(Double::longValue).sum();
- if (rateSum == 0) {
- return 0.0;
- }
-
- var weightedLatency = metrics.stream()
- .flatMap(m -> {
- return latencyExtractor.apply(m).flatMap(l -> rateExtractor.apply(m).map(r -> l * r)).stream();
- })
- .mapToDouble(Double::doubleValue)
- .sum();
-
- return weightedLatency / rateSum;
- }
-}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/metric/DeploymentMetrics.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/metric/DeploymentMetrics.java
deleted file mode 100644
index 33a3ce957ed..00000000000
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/metric/DeploymentMetrics.java
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.controller.metric;
-
-/**
- * Metrics for a single deployment of an application.
- *
- * @author bratseth
- */
-public class DeploymentMetrics {
-
- private final double queriesPerSecond;
- private final double writesPerSecond;
- private final long documentCount;
- private final double queryLatencyMillis;
- private final double writeLatencyMillis;
-
- public DeploymentMetrics(double queriesPerSecond, double writesPerSecond,
- long documentCount,
- double queryLatencyMillis, double writeLatencyMillis) {
- this.queriesPerSecond = queriesPerSecond;
- this.writesPerSecond = writesPerSecond;
- this.documentCount = documentCount;
- this.queryLatencyMillis = queryLatencyMillis;
- this.writeLatencyMillis = writeLatencyMillis;
- }
-
- public double queriesPerSecond() {
- return queriesPerSecond;
- }
-
- public double writesPerSecond() {
- return writesPerSecond;
- }
-
- public long documentCount() {
- return documentCount;
- }
-
- public double queryLatencyMillis() {
- return queryLatencyMillis;
- }
-
- public double writeLatencyMillis() {
- return writeLatencyMillis;
- }
-
-}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/Notification.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/Notification.java
index 299ef3ef50d..ea0422ea9fc 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/Notification.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/Notification.java
@@ -6,17 +6,24 @@ import java.util.List;
import java.util.Objects;
/**
+ * Represents an event that we want to notify the tenant about. The message(s) should be short
+ * and only describe event details: the final presentation will prefix the message with general
+ * information from other metadata in this notification (e.g. links to relevant console views
+ * and/or relevant documentation.
+ *
* @author freva
*/
public class Notification {
private final Instant at;
private final Type type;
+ private final Level level;
private final NotificationSource source;
private final List<String> messages;
- public Notification(Instant at, Type type, NotificationSource source, List<String> messages) {
+ public Notification(Instant at, Type type, Level level, NotificationSource source, List<String> messages) {
this.at = Objects.requireNonNull(at, "at cannot be null");
this.type = Objects.requireNonNull(type, "type cannot be null");
+ this.level = Objects.requireNonNull(level, "level cannot be null");
this.source = Objects.requireNonNull(source, "source cannot be null");
this.messages = List.copyOf(Objects.requireNonNull(messages, "messages cannot be null"));
if (messages.size() < 1) throw new IllegalArgumentException("messages cannot be empty");
@@ -24,6 +31,7 @@ public class Notification {
public Instant at() { return at; }
public Type type() { return type; }
+ public Level level() { return level; }
public NotificationSource source() { return source; }
public List<String> messages() { return messages; }
@@ -32,12 +40,13 @@ public class Notification {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Notification that = (Notification) o;
- return at.equals(that.at) && type == that.type && source.equals(that.source) && messages.equals(that.messages);
+ return at.equals(that.at) && type == that.type && level == that.level &&
+ source.equals(that.source) && messages.equals(that.messages);
}
@Override
public int hashCode() {
- return Objects.hash(at, type, source, messages);
+ return Objects.hash(at, type, level, source, messages);
}
@Override
@@ -45,30 +54,26 @@ public class Notification {
return "Notification{" +
"at=" + at +
", type=" + type +
+ ", level=" + level +
", source=" + source +
", messages=" + messages +
'}';
}
public enum Level {
- warning, error;
+ // Must be ordered in order of importance
+ warning, error
}
public enum Type {
- /** Warnings about usage of deprecated features in application package */
- APPLICATION_PACKAGE_WARNING(Level.warning),
+ /** Related to contents of application package, e.g. usage of deprecated features/syntax */
+ applicationPackage,
- /** Failure to deploy application package */
- DEPLOYMENT_FAILURE(Level.error);
+ /** Related to deployment of application, e.g. system test failure, out of capacity, internal errors, etc. */
+ deployment,
- private final Level level;
- Type(Level level) {
- this.level = level;
- }
-
- public Level level() {
- return level;
- }
+ /** Application cluster is (near) external feed blocked */
+ feedBlock;
}
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/NotificationSource.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/NotificationSource.java
index 827b5a71eb1..fa89c6459f1 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/NotificationSource.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/NotificationSource.java
@@ -84,11 +84,10 @@ public class NotificationSource {
* staging zone), or if this is at tenant or application level
*/
public boolean isProduction() {
- if (instance.isEmpty()) return true;
return ! zoneId.map(ZoneId::environment)
.or(() -> jobType.map(JobType::environment))
.map(Environment::isManuallyDeployed)
- .orElse(true); // Assume that notification with full application ID concern dev deployments
+ .orElse(false);
}
@Override
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDb.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDb.java
index 950dddfc056..21df0c01f0f 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDb.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDb.java
@@ -1,14 +1,26 @@
// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.controller.notification;
+import com.yahoo.collections.Pair;
+import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.vespa.curator.Lock;
import com.yahoo.vespa.hosted.controller.Controller;
+import com.yahoo.vespa.hosted.controller.api.application.v4.model.ClusterMetrics;
+import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
import com.yahoo.vespa.hosted.controller.persistence.CuratorDb;
import java.time.Clock;
+import java.time.Instant;
import java.util.ArrayList;
+import java.util.Comparator;
import java.util.List;
+import java.util.Locale;
+import java.util.Optional;
import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import static com.yahoo.vespa.hosted.controller.notification.Notification.Level;
+import static com.yahoo.vespa.hosted.controller.notification.Notification.Type;
/**
* Adds, updates and removes tenant notifications in ZK
@@ -35,26 +47,26 @@ public class NotificationsDb {
.collect(Collectors.toUnmodifiableList());
}
- public void setNotification(NotificationSource source, Notification.Type type, String message) {
- setNotification(source, type, List.of(message));
+ public void setNotification(NotificationSource source, Type type, Level level, String message) {
+ setNotification(source, type, level, List.of(message));
}
/**
* Add a notification with given source and type. If a notification with same source and type
* already exists, it'll be replaced by this one instead
*/
- public void setNotification(NotificationSource source, Notification.Type type, List<String> messages) {
+ public void setNotification(NotificationSource source, Type type, Level level, List<String> messages) {
try (Lock lock = curatorDb.lockNotifications(source.tenant())) {
List<Notification> notifications = curatorDb.readNotifications(source.tenant()).stream()
.filter(notification -> !source.equals(notification.source()) || type != notification.type())
.collect(Collectors.toCollection(ArrayList::new));
- notifications.add(new Notification(clock.instant(), type, source, messages));
+ notifications.add(new Notification(clock.instant(), type, level, source, messages));
curatorDb.writeNotifications(source.tenant(), notifications);
}
}
/** Remove the notification with the given source and type */
- public void removeNotification(NotificationSource source, Notification.Type type) {
+ public void removeNotification(NotificationSource source, Type type) {
try (Lock lock = curatorDb.lockNotifications(source.tenant())) {
List<Notification> initial = curatorDb.readNotifications(source.tenant());
List<Notification> filtered = initial.stream()
@@ -81,4 +93,66 @@ public class NotificationsDb {
curatorDb.writeNotifications(source.tenant(), filtered);
}
}
+
+ /**
+ * Updates feeding blocked notifications for the given deployment based on current cluster metrics.
+ * Will clear notifications of any cluster not reporting the metrics or whose metrics indicate feed is not blocked,
+ * while setting notifications for cluster that are (Level.error) or are nearly (Level.warning) feed blocked.
+ */
+ public void setDeploymentFeedingBlockedNotifications(DeploymentId deploymentId, List<ClusterMetrics> clusterMetrics) {
+ Instant now = clock.instant();
+ List<Notification> feedBlockNotifications = clusterMetrics.stream()
+ .flatMap(metric -> {
+ Optional<Pair<Level, String>> memoryStatus =
+ resourceUtilToFeedBlockStatus("memory", metric.memoryUtil(), metric.memoryFeedBlockLimit());
+ Optional<Pair<Level, String>> diskStatus =
+ resourceUtilToFeedBlockStatus("disk", metric.diskUtil(), metric.diskFeedBlockLimit());
+ if (memoryStatus.isEmpty() && diskStatus.isEmpty()) return Stream.empty();
+
+ // Find the max among levels
+ Level level = Stream.of(memoryStatus, diskStatus)
+ .flatMap(status -> status.stream().map(Pair::getFirst))
+ .max(Comparator.comparing(Enum::ordinal)).get();
+ List<String> messages = Stream.concat(memoryStatus.stream(), diskStatus.stream())
+ .filter(status -> status.getFirst() == level) // Do not mix message from different levels
+ .map(Pair::getSecond)
+ .collect(Collectors.toUnmodifiableList());
+ NotificationSource source = NotificationSource.from(deploymentId, ClusterSpec.Id.from(metric.getClusterId()));
+ return Stream.of(new Notification(now, Type.feedBlock, level, source, messages));
+ })
+ .collect(Collectors.toUnmodifiableList());
+
+ NotificationSource deploymentSource = NotificationSource.from(deploymentId);
+ try (Lock lock = curatorDb.lockNotifications(deploymentSource.tenant())) {
+ List<Notification> initial = curatorDb.readNotifications(deploymentSource.tenant());
+ List<Notification> updated = Stream.concat(
+ initial.stream()
+ .filter(notification ->
+ // Filter out old feed block notifications for this deployment
+ notification.type() != Type.feedBlock || !deploymentSource.contains(notification.source())),
+ // ... and add the new notifications for this deployment
+ feedBlockNotifications.stream())
+ .collect(Collectors.toUnmodifiableList());
+
+ if (!initial.equals(updated))
+ curatorDb.writeNotifications(deploymentSource.tenant(), updated);
+ }
+ }
+
+ /**
+ * Returns a feed block summary for the given resource: the notification level and
+ * notification message for the given resource utilization wrt. given resource limit.
+ * If utilization is well below the limit, Optional.empty() is returned.
+ */
+ private static Optional<Pair<Level, String>> resourceUtilToFeedBlockStatus(
+ String resource, Optional<Double> util, Optional<Double> feedBlockLimit) {
+ if (util.isEmpty() || feedBlockLimit.isEmpty()) return Optional.empty();
+ double utilRelativeToLimit = util.get() / feedBlockLimit.get();
+ if (utilRelativeToLimit < 0.9) return Optional.empty();
+
+ String message = String.format(Locale.US, "%s (usage: %.1f%%, feed block limit: %.1f%%)",
+ resource, 100 * util.get(), 100 * feedBlockLimit.get());
+ if (utilRelativeToLimit < 1) return Optional.of(new Pair<>(Level.warning, message));
+ return Optional.of(new Pair<>(Level.error, message));
+ }
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ChangeRequestSerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ChangeRequestSerializer.java
index 407eb5ad5ab..49da8d7a2a2 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ChangeRequestSerializer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ChangeRequestSerializer.java
@@ -119,7 +119,7 @@ public class ChangeRequestSerializer {
cursor.setString(STATUS_FIELD, source.getStatus().name());
}
- private static ChangeRequestSource readChangeRequestSource(Inspector inspector) {
+ public static ChangeRequestSource readChangeRequestSource(Inspector inspector) {
return new ChangeRequestSource(
inspector.field(SOURCE_SYSTEM_FIELD).asString(),
inspector.field(ID_FIELD).asString(),
@@ -130,7 +130,7 @@ public class ChangeRequestSerializer {
);
}
- private static List<HostAction> readHostActionPlan(Inspector inspector) {
+ public static List<HostAction> readHostActionPlan(Inspector inspector) {
if (!inspector.valid())
return List.of();
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/CuratorDb.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/CuratorDb.java
index 3d6cb45aeb1..fb004938572 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/CuratorDb.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/CuratorDb.java
@@ -602,6 +602,13 @@ public class CuratorDb {
.map(slime -> NotificationsSerializer.fromSlime(tenantName, slime)).orElseGet(List::of);
}
+
+ public List<TenantName> listNotifications() {
+ return curator.getChildren(notificationsRoot).stream()
+ .map(TenantName::from)
+ .collect(Collectors.toUnmodifiableList());
+ }
+
public void writeNotifications(TenantName tenantName, List<Notification> notifications) {
curator.set(notificationsPath(tenantName), asJson(NotificationsSerializer.toSlime(notifications)));
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/NotificationsSerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/NotificationsSerializer.java
index dcb485b9016..54dc102d573 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/NotificationsSerializer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/NotificationsSerializer.java
@@ -15,7 +15,6 @@ import com.yahoo.vespa.hosted.controller.notification.Notification;
import com.yahoo.vespa.hosted.controller.notification.NotificationSource;
import java.util.List;
-import java.util.function.Function;
import java.util.stream.Collectors;
/**
@@ -35,6 +34,7 @@ public class NotificationsSerializer {
private static final String notificationsFieldName = "notifications";
private static final String atFieldName = "at";
private static final String typeField = "type";
+ private static final String levelField = "level";
private static final String messagesField = "messages";
private static final String applicationField = "application";
private static final String instanceField = "instance";
@@ -51,6 +51,7 @@ public class NotificationsSerializer {
Cursor notificationObject = notificationsArray.addObject();
notificationObject.setLong(atFieldName, notification.at().toEpochMilli());
notificationObject.setString(typeField, asString(notification.type()));
+ notificationObject.setString(levelField, asString(notification.level()));
Cursor messagesArray = notificationObject.setArray(messagesField);
notification.messages().forEach(messagesArray::addString);
@@ -72,9 +73,10 @@ public class NotificationsSerializer {
}
private static Notification fromInspector(TenantName tenantName, Inspector inspector) {
- return new Notification(
+ return new Notification(
Serializers.instant(inspector.field(atFieldName)),
typeFrom(inspector.field(typeField)),
+ levelFrom(inspector.field(levelField)),
new NotificationSource(
tenantName,
Serializers.optionalString(inspector.field(applicationField)).map(ApplicationName::from),
@@ -88,17 +90,35 @@ public class NotificationsSerializer {
private static String asString(Notification.Type type) {
switch (type) {
- case APPLICATION_PACKAGE_WARNING: return "APPLICATION_PACKAGE_WARNING";
- case DEPLOYMENT_FAILURE: return "DEPLOYMENT_FAILURE";
+ case applicationPackage: return "applicationPackage";
+ case deployment: return "deployment";
+ case feedBlock: return "feedBlock";
default: throw new IllegalArgumentException("No serialization defined for notification type " + type);
}
}
private static Notification.Type typeFrom(Inspector field) {
switch (field.asString()) {
- case "APPLICATION_PACKAGE_WARNING": return Notification.Type.APPLICATION_PACKAGE_WARNING;
- case "DEPLOYMENT_FAILURE": return Notification.Type.DEPLOYMENT_FAILURE;
+ case "applicationPackage": return Notification.Type.applicationPackage;
+ case "deployment": return Notification.Type.deployment;
+ case "feedBlock": return Notification.Type.feedBlock;
default: throw new IllegalArgumentException("Unknown serialized notification type value '" + field.asString() + "'");
}
}
+
+ private static String asString(Notification.Level level) {
+ switch (level) {
+ case warning: return "warning";
+ case error: return "error";
+ default: throw new IllegalArgumentException("No serialization defined for notification level " + level);
+ }
+ }
+
+ private static Notification.Level levelFrom(Inspector field) {
+ switch (field.asString()) {
+ case "warning": return Notification.Level.warning;
+ case "error": return Notification.Level.error;
+ default: throw new IllegalArgumentException("Unknown serialized notification level value '" + field.asString() + "'");
+ }
+ }
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
index 994dc877182..ae4d891069c 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
@@ -497,7 +497,7 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler {
private static void toSlime(Cursor cursor, Notification notification) {
cursor.setLong("at", notification.at().toEpochMilli());
- cursor.setString("level", notificatioLevelAsString(notification.type().level()));
+ cursor.setString("level", notificationLevelAsString(notification.level()));
cursor.setString("type", notificationTypeAsString(notification.type()));
Cursor messagesArray = cursor.setArray("messages");
notification.messages().forEach(messagesArray::addString);
@@ -515,13 +515,14 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler {
private static String notificationTypeAsString(Notification.Type type) {
switch (type) {
- case APPLICATION_PACKAGE_WARNING: return "APPLICATION_PACKAGE_WARNING";
- case DEPLOYMENT_FAILURE: return "DEPLOYMENT_FAILURE";
+ case applicationPackage: return "applicationPackage";
+ case deployment: return "deployment";
+ case feedBlock: return "feedBlock";
default: throw new IllegalArgumentException("No serialization defined for notification type " + type);
}
}
- private static String notificatioLevelAsString(Notification.Level level) {
+ private static String notificationLevelAsString(Notification.Level level) {
switch (level) {
case warning: return "warning";
case error: return "error";
@@ -1307,6 +1308,7 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler {
object.setString("url", endpoint.url().toString());
object.setString("scope", endpointScopeString(endpoint.scope()));
object.setString("routingMethod", routingMethodString(endpoint.routingMethod()));
+ object.setBool("legacy", endpoint.legacy());
}
private void toSlime(Cursor response, DeploymentId deploymentId, Deployment deployment, HttpRequest request) {
@@ -1318,17 +1320,22 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler {
var application = controller.applications().requireApplication(TenantAndApplicationId.from(deploymentId.applicationId()));
// Add zone endpoints
+ boolean legacyEndpoints = request.getBooleanProperty("includeLegacyEndpoints");
var endpointArray = response.setArray("endpoints");
EndpointList zoneEndpoints = controller.routing().endpointsOf(deploymentId)
- .scope(Endpoint.Scope.zone)
- .not().legacy();
+ .scope(Endpoint.Scope.zone);
+ if (!legacyEndpoints) {
+ zoneEndpoints = zoneEndpoints.not().legacy();
+ }
for (var endpoint : controller.routing().directEndpoints(zoneEndpoints, deploymentId.applicationId())) {
toSlime(endpoint, endpointArray.addObject());
}
// Add global endpoints
EndpointList globalEndpoints = controller.routing().endpointsOf(application, deploymentId.applicationId().instance())
- .not().legacy()
.targets(deploymentId.zoneId());
+ if (!legacyEndpoints) {
+ globalEndpoints = globalEndpoints.not().legacy();
+ }
for (var endpoint : controller.routing().directEndpoints(globalEndpoints, deploymentId.applicationId())) {
toSlime(endpoint, endpointArray.addObject());
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/billing/BillingApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/billing/BillingApiHandler.java
index c56c2e93f65..daa84f4700c 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/billing/BillingApiHandler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/billing/BillingApiHandler.java
@@ -26,14 +26,11 @@ import com.yahoo.vespa.hosted.controller.api.integration.billing.BillingControll
import com.yahoo.vespa.hosted.controller.api.integration.billing.PlanId;
import com.yahoo.vespa.hosted.controller.tenant.Tenant;
import com.yahoo.yolean.Exceptions;
-import org.apache.commons.csv.CSVFormat;
import javax.ws.rs.BadRequestException;
import javax.ws.rs.ForbiddenException;
import javax.ws.rs.NotFoundException;
import java.io.IOException;
-import java.io.OutputStream;
-import java.io.OutputStreamWriter;
import java.math.BigDecimal;
import java.security.Principal;
import java.time.LocalDate;
@@ -482,27 +479,4 @@ public class BillingApiHandler extends LoggingRequestHandler {
.count() > 0;
}
- private static class CsvResponse extends HttpResponse {
- private final String[] header;
- private final List<Object[]> rows;
-
- CsvResponse(String[] header, List<Object[]> rows) {
- super(200);
- this.header = header;
- this.rows = rows;
- }
-
- @Override
- public void render(OutputStream outputStream) throws IOException {
- var writer = new OutputStreamWriter(outputStream);
- var printer = CSVFormat.DEFAULT.withRecordSeparator('\n').withHeader(this.header).print(writer);
- for (var row : this.rows) printer.printRecord(row);
- printer.flush();
- }
-
- @Override
- public String getContentType() {
- return "text/csv; encoding=utf-8";
- }
- }
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/billing/BillingApiHandlerV2.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/billing/BillingApiHandlerV2.java
new file mode 100644
index 00000000000..bfcefecba0c
--- /dev/null
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/billing/BillingApiHandlerV2.java
@@ -0,0 +1,352 @@
+package com.yahoo.vespa.hosted.controller.restapi.billing;
+
+import com.yahoo.config.provision.TenantName;
+import com.yahoo.container.jdisc.HttpResponse;
+import com.yahoo.container.jdisc.LoggingRequestHandler;
+import com.yahoo.restapi.MessageResponse;
+import com.yahoo.restapi.RestApi;
+import com.yahoo.restapi.RestApiException;
+import com.yahoo.restapi.RestApiRequestHandler;
+import com.yahoo.restapi.SlimeJsonResponse;
+import com.yahoo.slime.Cursor;
+import com.yahoo.slime.Inspector;
+import com.yahoo.slime.Slime;
+import com.yahoo.slime.SlimeUtils;
+import com.yahoo.slime.Type;
+import com.yahoo.vespa.hosted.controller.ApplicationController;
+import com.yahoo.vespa.hosted.controller.Controller;
+import com.yahoo.vespa.hosted.controller.TenantController;
+import com.yahoo.vespa.hosted.controller.api.integration.billing.BillingController;
+import com.yahoo.vespa.hosted.controller.api.integration.billing.CollectionMethod;
+import com.yahoo.vespa.hosted.controller.api.integration.billing.Invoice;
+import com.yahoo.vespa.hosted.controller.api.integration.billing.PlanId;
+import com.yahoo.vespa.hosted.controller.api.role.Role;
+import com.yahoo.vespa.hosted.controller.api.role.SecurityContext;
+import com.yahoo.vespa.hosted.controller.tenant.CloudTenant;
+import com.yahoo.vespa.hosted.controller.tenant.Tenant;
+
+import javax.ws.rs.BadRequestException;
+import java.io.IOException;
+import java.math.BigDecimal;
+import java.time.Clock;
+import java.time.Instant;
+import java.time.LocalDate;
+import java.time.ZoneOffset;
+import java.time.format.DateTimeFormatter;
+import java.util.Comparator;
+import java.util.Optional;
+import java.util.List;
+
+/**
+ * @author ogronnesby
+ */
+public class BillingApiHandlerV2 extends RestApiRequestHandler<BillingApiHandlerV2> {
+ private static final String[] CSV_INVOICE_HEADER = new String[]{ "ID", "Tenant", "From", "To", "CpuHours", "MemoryHours", "DiskHours", "Cpu", "Memory", "Disk", "Additional" };
+
+ private final ApplicationController applications;
+ private final TenantController tenants;
+ private final BillingController billing;
+ private final Clock clock;
+
+ public BillingApiHandlerV2(LoggingRequestHandler.Context context, Controller controller) {
+ super(context, BillingApiHandlerV2::createRestApi);
+ this.applications = controller.applications();
+ this.tenants = controller.tenants();
+ this.billing = controller.serviceRegistry().billingController();
+ this.clock = controller.serviceRegistry().clock();
+ }
+
+ private static RestApi createRestApi(BillingApiHandlerV2 self) {
+ return RestApi.builder()
+ /*
+ * This is the API that is available to tenants to view their status
+ */
+ .addRoute(RestApi.route("/billing/v2/tenant/{tenant}")
+ .get(self::tenant)
+ .patch(Slime.class, self::patchTenant))
+ .addRoute(RestApi.route("/billing/v2/tenant/{tenant}/usage")
+ .get(self::tenantUsage))
+ .addRoute(RestApi.route("/billing/v2/tenant/{tenant}/bill")
+ .get(self::tenantInvoiceList))
+ .addRoute(RestApi.route("/billing/v2/tenant/{tenant}/bill/{invoice}")
+ .get(self::tenantInvoice))
+ /*
+ * This is the API that is created for accountant role in Vespa Cloud
+ */
+ .addRoute(RestApi.route("/billing/v2/accountant")
+ .get(self::accountant))
+ .addRoute(RestApi.route("/billing/v2/accountant/preview/tenant/{tenant}")
+ .get(self::previewBill)
+ .post(Slime.class, self::createBill))
+ /*
+ * Utility - map Slime.class => SlimeJsonResponse
+ */
+ .addRequestMapper(Slime.class, BillingApiHandlerV2::slimeRequestMapper)
+ .addResponseMapper(Slime.class, BillingApiHandlerV2::slimeResponseMapper)
+ .build();
+ }
+
+ // ---------- TENANT API ----------
+
+ private Slime tenant(RestApi.RequestContext requestContext) {
+ var tenantName = TenantName.from(requestContext.pathParameters().getStringOrThrow("tenant"));
+ var tenant = tenants.require(tenantName, CloudTenant.class);
+
+ var plan = billing.getPlan(tenant.name());
+ var collectionMethod = billing.getCollectionMethod(tenant.name());
+
+ var response = new Slime();
+ var cursor = response.setObject();
+ cursor.setString("tenant", tenant.name().value());
+ cursor.setString("plan", plan.value());
+ cursor.setString("collection", collectionMethod.name());
+ return response;
+ }
+
+ private Slime patchTenant(RestApi.RequestContext requestContext, Slime body) {
+ var security = requestContext.attributes().get(SecurityContext.ATTRIBUTE_NAME)
+ .map(SecurityContext.class::cast)
+ .orElseThrow(() -> new RestApiException.Forbidden("Must be logged in"));
+
+ var tenantName = TenantName.from(requestContext.pathParameters().getStringOrThrow("tenant"));
+ var tenant = tenants.require(tenantName, CloudTenant.class);
+
+ var newPlan = body.get().field("plan");
+ var newCollection = body.get().field("collection");
+
+ if (newPlan.valid() && newPlan.type() == Type.STRING) {
+ var planId = PlanId.from(newPlan.asString());
+ var hasDeployments = tenantHasDeployments(tenant.name());
+ var result = billing.setPlan(tenant.name(), planId, hasDeployments);
+ if (! result.isSuccess()) {
+ throw new RestApiException.Forbidden(result.getErrorMessage().get());
+ }
+ }
+
+ if (newCollection.valid() && newCollection.type() == Type.STRING) {
+ if (security.roles().contains(Role.hostedAccountant())) {
+ var collection = CollectionMethod.valueOf(newCollection.asString());
+ billing.setCollectionMethod(tenant.name(), collection);
+ } else {
+ throw new RestApiException.Forbidden("Only accountant can change billing method");
+ }
+ }
+
+ var response = new Slime();
+ var cursor = response.setObject();
+ cursor.setString("tenant", tenant.name().value());
+ cursor.setString("plan", billing.getPlan(tenant.name()).value());
+ cursor.setString("collection", billing.getCollectionMethod(tenant.name()).name());
+ return response;
+ }
+
+ private Slime tenantInvoiceList(RestApi.RequestContext requestContext) {
+ var tenantName = TenantName.from(requestContext.pathParameters().getStringOrThrow("tenant"));
+ var tenant = tenants.require(tenantName, CloudTenant.class);
+
+ var slime = new Slime();
+ invoicesSummaryToSlime(slime.setObject().setArray("invoices"), billing.getInvoicesForTenant(tenant.name()));
+ return slime;
+ }
+
+ private HttpResponse tenantInvoice(RestApi.RequestContext requestContext) {
+ var tenantName = TenantName.from(requestContext.pathParameters().getStringOrThrow("tenant"));
+ var tenant = tenants.require(tenantName, CloudTenant.class);
+ var invoiceId = requestContext.pathParameters().getStringOrThrow("invoice");
+ var format = requestContext.queryParameters().getString("format").orElse("json");
+
+ var invoice = billing.getInvoicesForTenant(tenant.name()).stream()
+ .filter(inv -> inv.id().value().equals(invoiceId))
+ .findAny()
+ .orElseThrow(RestApiException.NotFound::new);
+
+ if (format.equals("json")) {
+ var slime = new Slime();
+ toSlime(slime.setObject(), invoice);
+ return new SlimeJsonResponse(slime);
+ }
+
+ if (format.equals("csv")) {
+ var csv = toCsv(invoice);
+ return new CsvResponse(CSV_INVOICE_HEADER, csv);
+ }
+
+ throw new RestApiException.BadRequest("Unknown format: " + format);
+ }
+
+ private boolean tenantHasDeployments(TenantName tenant) {
+ return applications.asList(tenant).stream()
+ .flatMap(app -> app.instances().values().stream())
+ .mapToLong(instance -> instance.deployments().size())
+ .sum() > 0;
+ }
+
+ private Slime tenantUsage(RestApi.RequestContext requestContext) {
+ var tenantName = TenantName.from(requestContext.pathParameters().getStringOrThrow("tenant"));
+ var tenant = tenants.require(tenantName, CloudTenant.class);
+ var untilAt = untilParameter(requestContext);
+ var usage = billing.createUncommittedInvoice(tenant.name(), untilAt.atZone(ZoneOffset.UTC).toLocalDate());
+
+ var slime = new Slime();
+ usageToSlime(slime.setObject(), usage);
+ return slime;
+ }
+
+ // --------- ACCOUNTANT API ----------
+
+ private Slime accountant(RestApi.RequestContext requestContext) {
+ var untilAt = untilParameter(requestContext);
+ var usagePerTenant = billing.createUncommittedInvoices(untilAt.atZone(ZoneOffset.UTC).toLocalDate());
+
+ var response = new Slime();
+ var tenantsResponse = response.setObject().setArray("tenants");
+ tenants.asList().stream().sorted(Comparator.comparing(Tenant::name)).forEach(tenant -> {
+ var usage = Optional.ofNullable(usagePerTenant.get(tenant.name()));
+ var tenantResponse = tenantsResponse.addObject();
+ tenantResponse.setString("tenant", tenant.name().value());
+ tenantResponse.setString("plan", billing.getPlan(tenant.name()).value());
+ tenantResponse.setString("collection", billing.getCollectionMethod(tenant.name()).name());
+ tenantResponse.setString("lastBill", usage.map(Invoice::getStartTime).map(DateTimeFormatter.ISO_DATE::format).orElse(null));
+ tenantResponse.setString("unbilled", usage.map(Invoice::sum).map(BigDecimal::toPlainString).orElse("0.00"));
+ });
+
+ return response;
+ }
+
+ private Slime previewBill(RestApi.RequestContext requestContext) {
+ var tenantName = TenantName.from(requestContext.pathParameters().getStringOrThrow("tenant"));
+ var tenant = tenants.require(tenantName, CloudTenant.class);
+ var untilAt = untilParameter(requestContext);
+
+ var usage = billing.createUncommittedInvoice(tenant.name(), untilAt.atZone(ZoneOffset.UTC).toLocalDate());
+
+ var slime = new Slime();
+ toSlime(slime.setObject(), usage);
+ return slime;
+ }
+
+ private HttpResponse createBill(RestApi.RequestContext requestContext, Slime slime) {
+ var body = slime.get();
+ var security = requestContext.attributes().get(SecurityContext.ATTRIBUTE_NAME)
+ .map(SecurityContext.class::cast)
+ .orElseThrow(() -> new RestApiException.Forbidden("Must be logged in"));
+
+ var tenantName = TenantName.from(requestContext.pathParameters().getStringOrThrow("tenant"));
+ var tenant = tenants.require(tenantName, CloudTenant.class);
+
+ var startAt = LocalDate.parse(getInspectorFieldOrThrow(body, "from")).atStartOfDay(ZoneOffset.UTC);
+ var endAt = LocalDate.parse(getInspectorFieldOrThrow(body, "to")).atStartOfDay(ZoneOffset.UTC);
+
+ var invoiceId = billing.createInvoiceForPeriod(tenant.name(), startAt, endAt, security.principal().getName());
+
+ // TODO: Make a redirect to the bill itself
+ return new MessageResponse("Created bill " + invoiceId.value());
+ }
+
+
+ // --------- INVOICE RENDERING ----------
+
+ private void invoicesSummaryToSlime(Cursor slime, List<Invoice> invoices) {
+ invoices.forEach(invoice -> invoiceSummaryToSlime(slime.addObject(), invoice));
+ }
+
+ private void invoiceSummaryToSlime(Cursor slime, Invoice invoice) {
+ slime.setString("id", invoice.id().value());
+ slime.setString("from", invoice.getStartTime().format(DateTimeFormatter.ISO_LOCAL_DATE));
+ slime.setString("to", invoice.getStartTime().format(DateTimeFormatter.ISO_LOCAL_DATE));
+ slime.setString("total", invoice.sum().toString());
+ slime.setString("status", invoice.status());
+ }
+
+ private void usageToSlime(Cursor slime, Invoice invoice) {
+ slime.setString("from", invoice.getStartTime().format(DateTimeFormatter.ISO_LOCAL_DATE));
+ slime.setString("to", invoice.getStartTime().format(DateTimeFormatter.ISO_LOCAL_DATE));
+ slime.setString("total", invoice.sum().toString());
+ toSlime(slime.setArray("items"), invoice.lineItems());
+ }
+
+ private void toSlime(Cursor slime, Invoice invoice) {
+ slime.setString("id", invoice.id().value());
+ slime.setString("from", invoice.getStartTime().format(DateTimeFormatter.ISO_LOCAL_DATE));
+ slime.setString("to", invoice.getStartTime().format(DateTimeFormatter.ISO_LOCAL_DATE));
+ slime.setString("total", invoice.sum().toString());
+ slime.setString("status", invoice.status());
+ toSlime(slime.setArray("statusHistory"), invoice.statusHistory());
+ toSlime(slime.setArray("items"), invoice.lineItems());
+ }
+
+ private void toSlime(Cursor slime, Invoice.StatusHistory history) {
+ history.getHistory().forEach((key, value) -> {
+ var c = slime.addObject();
+ c.setString("at", key.format(DateTimeFormatter.ISO_OFFSET_DATE_TIME));
+ c.setString("status", value);
+ });
+ }
+
+ private void toSlime(Cursor slime, List<Invoice.LineItem> items) {
+ items.forEach(item -> toSlime(slime.addObject(), item));
+ }
+
+ private void toSlime(Cursor slime, Invoice.LineItem item) {
+ slime.setString("id", item.id());
+ slime.setString("description", item.description());
+ slime.setString("amount",item.amount().toString());
+ slime.setString("plan", item.plan());
+ slime.setString("planName", billing.getPlanDisplayName(PlanId.from(item.plan())));
+
+ item.applicationId().ifPresent(appId -> {
+ slime.setString("application", appId.application().value());
+ slime.setString("instance", appId.instance().value());
+ });
+
+ item.zoneId().ifPresent(z -> slime.setString("zone", z.value()));
+
+ toSlime(slime.setObject("cpu"), item.getCpuHours(), item.getCpuCost());
+ toSlime(slime.setObject("memory"), item.getMemoryHours(), item.getMemoryCost());
+ toSlime(slime.setObject("disk"), item.getDiskHours(), item.getDiskCost());
+ }
+
+ private void toSlime(Cursor slime, Optional<BigDecimal> hours, Optional<BigDecimal> cost) {
+ hours.ifPresent(h -> slime.setString("hours", h.toString()));
+ cost.ifPresent(c -> slime.setString("cost", c.toString()));
+ }
+
+ private List<Object[]> toCsv(Invoice invoice) {
+ return List.<Object[]>of(new Object[]{
+ invoice.id().value(), invoice.tenant().value(),
+ invoice.getStartTime().format(DateTimeFormatter.ISO_DATE),
+ invoice.getEndTime().format(DateTimeFormatter.ISO_DATE),
+ invoice.sumCpuHours(), invoice.sumMemoryHours(), invoice.sumDiskHours(),
+ invoice.sumCpuCost(), invoice.sumMemoryCost(), invoice.sumDiskCost(),
+ invoice.sumAdditionalCost()
+ });
+ }
+
+ // ---------- END INVOICE RENDERING ----------
+
+ private Instant untilParameter(RestApi.RequestContext ctx) {
+ return ctx.queryParameters().getString("until")
+ .map(LocalDate::parse)
+ .map(date -> date.plusDays(1).atStartOfDay(ZoneOffset.UTC).toInstant())
+ .orElseGet(clock::instant);
+ }
+
+ private static String getInspectorFieldOrThrow(Inspector inspector, String field) {
+ if (!inspector.field(field).valid())
+ throw new BadRequestException("Field " + field + " cannot be null");
+ return inspector.field(field).asString();
+ }
+
+ private static Optional<Slime> slimeRequestMapper(RestApi.RequestContext requestContext) {
+ try {
+ return Optional.of(SlimeUtils.jsonToSlime(requestContext.requestContentOrThrow().content().readAllBytes()));
+ } catch (IOException e) {
+ throw new IllegalArgumentException("Could not parse JSON input");
+ }
+ }
+
+ private static HttpResponse slimeResponseMapper(RestApi.RequestContext ctx, Slime slime) {
+ return new SlimeJsonResponse(slime);
+ }
+
+}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/billing/CsvResponse.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/billing/CsvResponse.java
new file mode 100644
index 00000000000..5aa993f2727
--- /dev/null
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/billing/CsvResponse.java
@@ -0,0 +1,33 @@
+package com.yahoo.vespa.hosted.controller.restapi.billing;
+
+import com.yahoo.container.jdisc.HttpResponse;
+import org.apache.commons.csv.CSVFormat;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.OutputStreamWriter;
+import java.util.List;
+
+class CsvResponse extends HttpResponse {
+ private final String[] header;
+ private final List<Object[]> rows;
+
+ CsvResponse(String[] header, List<Object[]> rows) {
+ super(200);
+ this.header = header;
+ this.rows = rows;
+ }
+
+ @Override
+ public void render(OutputStream outputStream) throws IOException {
+ var writer = new OutputStreamWriter(outputStream);
+ var printer = CSVFormat.DEFAULT.withRecordSeparator('\n').withHeader(this.header).print(writer);
+ for (var row : this.rows) printer.printRecord(row);
+ printer.flush();
+ }
+
+ @Override
+ public String getContentType() {
+ return "text/csv; encoding=utf-8";
+ }
+}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/ChangeManagementApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/ChangeManagementApiHandler.java
index 5973cc3fcf3..ac9612a56c5 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/ChangeManagementApiHandler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/ChangeManagementApiHandler.java
@@ -18,6 +18,7 @@ import com.yahoo.slime.SlimeUtils;
import com.yahoo.vespa.hosted.controller.Controller;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.Node;
import com.yahoo.vespa.hosted.controller.api.integration.vcmr.ChangeRequest;
+import com.yahoo.vespa.hosted.controller.api.integration.vcmr.VespaChangeRequest;
import com.yahoo.vespa.hosted.controller.auditlog.AuditLoggingRequestHandler;
import com.yahoo.vespa.hosted.controller.maintenance.ChangeManagementAssessor;
import com.yahoo.vespa.hosted.controller.persistence.ChangeRequestSerializer;
@@ -50,6 +51,10 @@ public class ChangeManagementApiHandler extends AuditLoggingRequestHandler {
return get(request);
case POST:
return post(request);
+ case PATCH:
+ return patch(request);
+ case DELETE:
+ return delete(request);
default:
return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is unsupported");
}
@@ -65,6 +70,7 @@ public class ChangeManagementApiHandler extends AuditLoggingRequestHandler {
Path path = new Path(request.getUri());
if (path.matches("/changemanagement/v1/assessment/{changeRequestId}")) return changeRequestAssessment(path.get("changeRequestId"));
if (path.matches("/changemanagement/v1/vcmr")) return getVCMRs();
+ if (path.matches("/changemanagement/v1/vcmr/{vcmrId}")) return getVCMR(path.get("vcmrId"));
return ErrorResponse.notFoundError("Nothing at " + path);
}
@@ -74,6 +80,18 @@ public class ChangeManagementApiHandler extends AuditLoggingRequestHandler {
return ErrorResponse.notFoundError("Nothing at " + path);
}
+ private HttpResponse patch(HttpRequest request) {
+ Path path = new Path(request.getUri());
+ if (path.matches("/changemanagement/v1/vcmr/{vcmrId}")) return patchVCMR(request, path.get("vcmrId"));
+ return ErrorResponse.notFoundError("Nothing at " + path);
+ }
+
+ private HttpResponse delete(HttpRequest request) {
+ Path path = new Path(request.getUri());
+ if (path.matches("/changemanagement/v1/vcmr/{vcmrId}")) return deleteVCMR(path.get("vcmrId"));
+ return ErrorResponse.notFoundError("Nothing at " + path);
+ }
+
private Inspector inspectorOrThrow(HttpRequest request) {
try {
return SlimeUtils.jsonToSlime(request.getData().readAllBytes()).get();
@@ -183,6 +201,72 @@ public class ChangeManagementApiHandler extends AuditLoggingRequestHandler {
return new SlimeJsonResponse(slime);
}
+ private HttpResponse getVCMR(String vcmrId) {
+ var changeRequest = controller.curator().readChangeRequest(vcmrId);
+
+ if (changeRequest.isEmpty()) {
+ return ErrorResponse.notFoundError("No VCMR with id: " + vcmrId);
+ }
+
+ var slime = new Slime();
+ var cursor = slime.setObject();
+
+ ChangeRequestSerializer.writeChangeRequest(cursor, changeRequest.get());
+ return new SlimeJsonResponse(slime);
+ }
+
+ private HttpResponse patchVCMR(HttpRequest request, String vcmrId) {
+ var optionalChangeRequest = controller.curator().readChangeRequest(vcmrId);
+
+ if (optionalChangeRequest.isEmpty()) {
+ return ErrorResponse.notFoundError("No VCMR with id: " + vcmrId);
+ }
+
+ var changeRequest = optionalChangeRequest.get();
+ var inspector = inspectorOrThrow(request);
+
+ if (inspector.field("approval").valid()) {
+ var approval = ChangeRequest.Approval.valueOf(inspector.field("approval").asString());
+ changeRequest = changeRequest.withApproval(approval);
+ }
+
+ if (inspector.field("actionPlan").valid()) {
+ var actionPlan = ChangeRequestSerializer.readHostActionPlan(inspector.field("actionPlan"));
+ changeRequest = changeRequest.withActionPlan(actionPlan);
+ }
+
+ if (inspector.field("status").valid()) {
+ var status = VespaChangeRequest.Status.valueOf(inspector.field("status").asString());
+ changeRequest = changeRequest.withStatus(status);
+ }
+
+ try (var lock = controller.curator().lockChangeRequests()) {
+ controller.curator().writeChangeRequest(changeRequest);
+ }
+
+ var slime = new Slime();
+ var cursor = slime.setObject();
+ ChangeRequestSerializer.writeChangeRequest(cursor, changeRequest);
+ return new SlimeJsonResponse(slime);
+ }
+
+ private HttpResponse deleteVCMR(String vcmrId) {
+ var changeRequest = controller.curator().readChangeRequest(vcmrId);
+
+ if (changeRequest.isEmpty()) {
+ return ErrorResponse.notFoundError("No VCMR with id: " + vcmrId);
+ }
+
+ try (var lock = controller.curator().lockChangeRequests()) {
+ controller.curator().deleteChangeRequest(changeRequest.get());
+ }
+
+ var slime = new Slime();
+ var cursor = slime.setObject();
+ ChangeRequestSerializer.writeChangeRequest(cursor, changeRequest.get());
+ return new SlimeJsonResponse(slime);
+ }
+
private Optional<ZoneId> affectedZone(List<String> hosts) {
var affectedHosts = hosts.stream()
.map(HostName::from)
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/deployment/BadgeApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/deployment/BadgeApiHandler.java
index befec42e84e..ff24253128b 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/deployment/BadgeApiHandler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/deployment/BadgeApiHandler.java
@@ -5,19 +5,25 @@ import com.yahoo.config.provision.ApplicationId;
import com.yahoo.container.jdisc.HttpRequest;
import com.yahoo.container.jdisc.HttpResponse;
import com.yahoo.container.jdisc.LoggingRequestHandler;
-import com.yahoo.jdisc.Response;
import com.yahoo.jdisc.http.HttpRequest.Method;
+import com.yahoo.restapi.ErrorResponse;
import com.yahoo.restapi.Path;
import com.yahoo.vespa.hosted.controller.Controller;
+import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobId;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
-import com.yahoo.restapi.ErrorResponse;
+import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
+import com.yahoo.vespa.hosted.controller.deployment.DeploymentStatus;
+import com.yahoo.vespa.hosted.controller.deployment.JobStatus;
import com.yahoo.yolean.Exceptions;
+import java.io.IOException;
import java.io.OutputStream;
-import java.net.URI;
+import java.util.function.Predicate;
import java.util.logging.Level;
import java.util.logging.Logger;
+import static java.nio.charset.StandardCharsets.UTF_8;
+
/**
* This API serves redirects to a badge server.
*
@@ -62,24 +68,31 @@ public class BadgeApiHandler extends LoggingRequestHandler {
/** Returns a URI which points to an overview badge for the given application. */
private HttpResponse badge(String tenant, String application, String instance) {
- URI location = controller.jobController().overviewBadge(ApplicationId.from(tenant, application, instance));
- return redirect(location);
+ ApplicationId id = ApplicationId.from(tenant, application, instance);
+ DeploymentStatus status = controller.jobController().deploymentStatus(controller.applications().requireApplication(TenantAndApplicationId.from(id)));
+ Predicate<JobStatus> isDeclaredJob = job -> status.jobSteps().get(job.id()) != null && status.jobSteps().get(job.id()).isDeclared();
+ return svgResponse(Badges.overviewBadge(id,
+ status.jobs().instance(id.instance()).matching(isDeclaredJob),
+ controller.system()));
}
/** Returns a URI which points to a history badge for the given application and job type. */
private HttpResponse badge(String tenant, String application, String instance, String jobName, String historyLength) {
- URI location = controller.jobController().historicBadge(ApplicationId.from(tenant, application, instance),
- JobType.fromJobName(jobName),
- historyLength == null ? 5 : Math.min(32, Math.max(0, Integer.parseInt(historyLength))));
- return redirect(location);
+ ApplicationId id = ApplicationId.from(tenant, application, instance);
+ return svgResponse(Badges.historyBadge(id,
+ controller.jobController().jobStatus(new JobId(id, JobType.fromJobName(jobName))),
+ historyLength == null ? 5 : Math.min(32, Math.max(0, Integer.parseInt(historyLength)))));
}
- private static HttpResponse redirect(URI location) {
- HttpResponse httpResponse = new HttpResponse(Response.Status.FOUND) {
- @Override public void render(OutputStream outputStream) { }
+ private static HttpResponse svgResponse(String svg) {
+ return new HttpResponse(200) {
+ @Override public void render(OutputStream outputStream) throws IOException {
+ outputStream.write(svg.getBytes(UTF_8));
+ }
+ @Override public String getContentType() {
+ return "image/svg+xml; charset=UTF-8";
+ }
};
- httpResponse.headers().add("Location", location.toString());
- return httpResponse;
}
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/deployment/Badges.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/deployment/Badges.java
new file mode 100644
index 00000000000..2b3f954c2ef
--- /dev/null
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/deployment/Badges.java
@@ -0,0 +1,305 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.restapi.deployment;
+
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.SystemName;
+import com.yahoo.slime.ArrayTraverser;
+import com.yahoo.slime.SlimeUtils;
+import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
+import com.yahoo.vespa.hosted.controller.deployment.JobList;
+import com.yahoo.vespa.hosted.controller.deployment.JobStatus;
+import com.yahoo.vespa.hosted.controller.deployment.Run;
+import com.yahoo.vespa.hosted.controller.deployment.RunStatus;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+
+import static java.util.stream.Collectors.toList;
+
+public class Badges {
+
+ // https://chrishewett.com/blog/calculating-text-width-programmatically/ thank you!
+ private static final String characterWidths = "[[\" \",35.156],[\"!\",39.355],[\"\\\"\",45.898],[\"#\",81.836],[\"$\",63.574],[\"%\",107.617],[\"&\",72.656],[\"'\",26.855],[\"(\",45.41],[\")\",45.41],[\"*\",63.574],[\"+\",81.836],[\",\",36.377],[\"-\",45.41],[\".\",36.377],[\"/\",45.41],[\"0\",63.574],[\"1\",63.574],[\"2\",63.574],[\"3\",63.574],[\"4\",63.574],[\"5\",63.574],[\"6\",63.574],[\"7\",63.574],[\"8\",63.574],[\"9\",63.574],[\":\",45.41],[\";\",45.41],[\"<\",81.836],[\"=\",81.836],[\">\",81.836],[\"?\",54.541],[\"@\",100],[\"A\",68.359],[\"B\",68.555],[\"C\",69.824],[\"D\",77.051],[\"E\",63.232],[\"F\",57.471],[\"G\",77.539],[\"H\",75.146],[\"I\",42.09],[\"J\",45.459],[\"K\",69.287],[\"L\",55.664],[\"M\",84.277],[\"N\",74.805],[\"O\",78.711],[\"P\",60.303],[\"Q\",78.711],[\"R\",69.531],[\"S\",68.359],[\"T\",61.621],[\"U\",73.193],[\"V\",68.359],[\"W\",98.877],[\"X\",68.506],[\"Y\",61.523],[\"Z\",68.506],[\"[\",45.41],[\"\\\\\",45.41],[\"]\",45.41],[\"^\",81.836],[\"_\",63.574],[\"`\",63.574],[\"a\",60.059],[\"b\",62.305],[\"c\",52.1],[\"d\",62.305],[\"e\",59.57],[\"f\",35.156],[\"g\",62.305],[\"h\",63.281],[\"i\",27.441],[\"j\",34.424],[\"k\",59.18],[\"l\",27.441],[\"m\",97.266],[\"n\",63.281],[\"o\",60.693],[\"p\",62.305],[\"q\",62.305],[\"r\",42.676],[\"s\",52.1],[\"t\",39.404],[\"u\",63.281],[\"v\",59.18],[\"w\",81.836],[\"x\",59.18],[\"y\",59.18],[\"z\",52.539],[\"{\",63.477],[\"|\",45.41],[\"}\",63.477],[\"~\",81.836],[\"_median\",63.281]]";
+ private static final double[] widths = new double[128]; // 0-94 hold widths for corresponding chars (+32); 95 holds the fallback width.
+
+ static {
+ SlimeUtils.jsonToSlimeOrThrow(characterWidths).get()
+ .traverse((ArrayTraverser) (i, pair) -> {
+ if (i < 95)
+ assert Arrays.equals(new byte[]{(byte) (i + 32)}, pair.entry(0).asUtf8()) : i + ": " + pair.entry(0).asString();
+ else
+ assert "_median".equals(pair.entry(0).asString());
+
+ widths[i] = pair.entry(1).asDouble();
+ });
+ }
+
+ /** Character pixel width of a 100px size Verdana font rendering of the given code point, for code points in the range [32, 126]. */
+ public static double widthOf(int codePoint) {
+ return 32 <= codePoint && codePoint <= 126 ? widths[codePoint - 32] : widths[95];
+ }
+
+ /** Computes an approximate pixel width of the given size Verdana font rendering of the given string, ignoring kerning. */
+ public static double widthOf(String text, int size) {
+ return text.codePoints().mapToDouble(Badges::widthOf).sum() * (size - 0.5) / 100;
+ }
+
+ /** Computes an approximate pixel width of a 11px size Verdana font rendering of the given string, ignoring kerning. */
+ public static double widthOf(String text) {
+ return widthOf(text, 11);
+ }
+
+ static String colorOf(Run run, Boolean wasOk) {
+ switch (run.status()) {
+ case running:
+ return wasOk ? "url(#run-on-success)" : "url(#run-on-failure)";
+ case success:
+ return success;
+ default:
+ return failure;
+ }
+ }
+
+ static String nameOf(JobType type) {
+ return type.isTest() ? type.isProduction() ? "test"
+ : type.jobName()
+ : type.jobName().replace("production-", "");
+ }
+
+ static final double xPad = 6;
+ static final double logoSize = 16;
+ static final String dark = "#404040";
+ static final String success = "#00f244";
+ static final String running = "#ab83ff";
+ static final String failure = "#bf103c";
+
+ static void addText(List<String> texts, String text, double x, double width) {
+ addText(texts, text, x, width, 11);
+ }
+
+ static void addText(List<String> texts, String text, double x, double width, int size) {
+ texts.add(" <text font-size='" + size + "' x='" + (x + 0.5) + "' y='" + (15) + "' fill='#000' fill-opacity='.4' textLength='" + width + "'>" + text + "</text>\n");
+ texts.add(" <text font-size='" + size + "' x='" + x + "' y='" + (14) + "' fill='#fff' textLength='" + width + "'>" + text + "</text>\n");
+ }
+
+ static void addShade(List<String> sections, double x, double width) {
+ sections.add(" <rect x='" + (x - 6) + "' rx='3' width='" + (width + 6) + "' height='20' fill='url(#shade)'/>\n");
+ }
+
+ static void addShadow(List<String> sections, double x) {
+ sections.add(" <rect x='" + (x - 6) + "' rx='3' width='" + 8 + "' height='20' fill='url(#shadow)'/>\n");
+ }
+
+ static String historyBadge(ApplicationId id, JobStatus status, int length) {
+ List<String> sections = new ArrayList<>();
+ List<String> texts = new ArrayList<>();
+
+ double x = 0;
+ String text = id.toFullString();
+ double textWidth = widthOf(text);
+ double dx = xPad + logoSize + xPad + textWidth + xPad;
+
+ addShade(sections, x, dx);
+ sections.add(" <rect width='" + dx + "' height='20' fill='" + dark + "'/>\n");
+ addText(texts, text, x + (xPad + logoSize + dx) / 2, textWidth);
+ x += dx;
+
+ if (status.lastTriggered().isEmpty())
+ return badge(sections, texts, x);
+
+ Run lastTriggered = status.lastTriggered().get();
+ List<Run> runs = status.runs().descendingMap().values().stream()
+ .filter(Run::hasEnded)
+ .limit(length + (lastTriggered.hasEnded() ? 0 : 1))
+ .collect(toList());
+
+ boolean isOk = runs.isEmpty() || runs.get(0).status() == RunStatus.success;
+ if ( ! lastTriggered.hasEnded())
+ runs.remove(0);
+
+ text = lastTriggered.id().type().jobName();
+ textWidth = widthOf(text);
+ dx = xPad + textWidth + xPad;
+ addShade(sections, x, dx);
+ sections.add(" <rect x='" + (x - 6) + "' rx='3' width='" + (dx + 6) + "' height='20' fill='" + colorOf(lastTriggered, isOk) + "'/>\n");
+ addShadow(sections, x + dx);
+ addText(texts, text, x + dx / 2, textWidth);
+ x += dx;
+
+ dx = xPad * (192.0 / (32 + runs.size())); // Broader sections with shorter history.
+ for (Run run : runs) {
+ addShade(sections, x, dx);
+ sections.add(" <rect x='" + (x - 6) + "' rx='3' width='" + (dx + 6) + "' height='20' fill='" + colorOf(run, null) + "'/>\n");
+ addShadow(sections, x + dx);
+ dx *= Math.pow(0.3, 1.0 / (runs.size() + 8)); // Gradually narrowing sections with age.
+ x += dx;
+ }
+ Collections.reverse(sections);
+
+ return badge(sections, texts, x);
+ }
+
+ static String overviewBadge(ApplicationId id, JobList jobs, SystemName system) {
+ // Put production tests right after their deployments, for a more compact rendering.
+ List<Run> runs = new ArrayList<>(jobs.lastTriggered().asList());
+ boolean anyTest = false;
+ for (int i = 0; i < runs.size(); i++) {
+ Run run = runs.get(i);
+ if (run.id().type().isProduction() && run.id().type().isTest()) {
+ anyTest = true;
+ int j = i;
+ while ( ! runs.get(j - 1).id().type().zone(system).equals(run.id().type().zone(system)))
+ runs.set(j, runs.get(--j));
+ runs.set(j, run);
+ }
+ }
+
+ List<String> sections = new ArrayList<>();
+ List<String> texts = new ArrayList<>();
+
+ double x = 0;
+ String text = id.toFullString();
+ double textWidth = widthOf(text);
+ double dx = xPad + logoSize + xPad + textWidth + xPad;
+ double tdx = xPad + widthOf("test");
+
+ addShade(sections, 0, dx);
+ sections.add(" <rect width='" + dx + "' height='20' fill='" + dark + "'/>\n");
+ addText(texts, text, x + (xPad + logoSize + dx) / 2, textWidth);
+ x += dx;
+
+ for (int i = 0; i < runs.size(); i++) {
+ Run run = runs.get(i);
+ Run test = i + 1 < runs.size() ? runs.get(i + 1) : null;
+ if (test == null || ! test.id().type().isTest() || ! test.id().type().isProduction())
+ test = null;
+
+ boolean isTest = run.id().type().isTest() && run.id().type().isProduction();
+ text = nameOf(run.id().type());
+ textWidth = widthOf(text, isTest ? 9 : 11);
+ dx = xPad + textWidth + (isTest ? 0 : xPad);
+ boolean wasOk = jobs.get(run.id().job()).flatMap(JobStatus::lastStatus).map(RunStatus.success::equals).orElse(true);
+
+ addText(texts, text, x + (dx - (isTest ? xPad : 0)) / 2, textWidth, isTest ? 9 : 11);
+
+ // Add "deploy" when appropriate
+ if ( ! run.id().type().isTest() && anyTest) {
+ String deploy = "deploy";
+ textWidth = widthOf(deploy, 9);
+ addText(texts, deploy, x + dx + textWidth / 2, textWidth, 9);
+ dx += textWidth + xPad;
+ }
+
+ // Add shade across zone section.
+ if ( ! (isTest))
+ addShade(sections, x, dx + (test != null ? tdx : 0));
+
+ // Add colored section for job ...
+ if (test == null)
+ sections.add(" <rect x='" + (x - 16) + "' rx='3' width='" + (dx + 16) + "' height='20' fill='" + colorOf(run, wasOk) + "'/>\n");
+ // ... with a slant if a test is next.
+ else
+ sections.add(" <polygon points='" + (x - 6) + " 0 " + (x - 6) + " 20 " + (x + dx - 7) + " 20 " + (x + dx + 1) + " 0' fill='" + colorOf(run, wasOk) + "'/>\n");
+
+ // Cast a shadow onto the next zone ...
+ if (test == null)
+ addShadow(sections, x + dx);
+
+ x += dx;
+ }
+ Collections.reverse(sections);
+
+ return badge(sections, texts, x);
+ }
+
+ static String badge(List<String> sections, List<String> texts, double width) {
+ return "<svg xmlns='http://www.w3.org/2000/svg' width='" + width + "' height='20' role='img' aria-label='Deployment Status'>\n" +
+ " <title>Deployment Status</title>\n" +
+ // Lighting to give the badge a 3d look--dispersion at the top, shadow at the bottom.
+ " <linearGradient id='light' x2='0' y2='100%'>\n" +
+ " <stop offset='0' stop-color='#fff' stop-opacity='.5'/>\n" +
+ " <stop offset='.1' stop-color='#fff' stop-opacity='.15'/>\n" +
+ " <stop offset='.9' stop-color='#000' stop-opacity='.15'/>\n" +
+ " <stop offset='1' stop-color='#000' stop-opacity='.5'/>\n" +
+ " </linearGradient>\n" +
+ // Dispersed light at the left of the badge.
+ " <linearGradient id='left-light' x2='100%' y2='0'>\n" +
+ " <stop offset='0' stop-color='#fff' stop-opacity='.3'/>\n" +
+ " <stop offset='.5' stop-color='#fff' stop-opacity='.1'/>\n" +
+ " <stop offset='1' stop-color='#fff' stop-opacity='.0'/>\n" +
+ " </linearGradient>\n" +
+ // Shadow at the right of the badge.
+ " <linearGradient id='right-shadow' x2='100%' y2='0'>\n" +
+ " <stop offset='0' stop-color='#000' stop-opacity='.0'/>\n" +
+ " <stop offset='.5' stop-color='#000' stop-opacity='.1'/>\n" +
+ " <stop offset='1' stop-color='#000' stop-opacity='.3'/>\n" +
+ " </linearGradient>\n" +
+ // Shadow to highlight the border between sections, without using a heavy separator.
+ " <linearGradient id='shadow' x2='100%' y2='0'>\n" +
+ " <stop offset='0' stop-color='#222' stop-opacity='.3'/>\n" +
+ " <stop offset='.625' stop-color='#555' stop-opacity='.3'/>\n" +
+ " <stop offset='.9' stop-color='#555' stop-opacity='.05'/>\n" +
+ " <stop offset='1' stop-color='#555' stop-opacity='.0'/>\n" +
+ " </linearGradient>\n" +
+ // Weak shade across each panel to highlight borders further.
+ " <linearGradient id='shade' x2='100%' y2='0'>\n" +
+ " <stop offset='0' stop-color='#000' stop-opacity='.20'/>\n" +
+ " <stop offset='0.05' stop-color='#000' stop-opacity='.10'/>\n" +
+ " <stop offset='1' stop-color='#000' stop-opacity='.0'/>\n" +
+ " </linearGradient>\n" +
+ // Running color sloshing back and forth on top of the failure color.
+ " <linearGradient id='run-on-failure' x1='40%' x2='80%' y2='0%'>\n" +
+ " <stop offset='0' stop-color='" + running + "' />\n" +
+ " <stop offset='1' stop-color='" + failure + "' />\n" +
+ " <animate attributeName='x1' values='-110%;150%;20%;-110%' dur='6s' repeatCount='indefinite' />\n" +
+ " <animate attributeName='x2' values='-10%;250%;120%;-10%' dur='6s' repeatCount='indefinite' />\n" +
+ " </linearGradient>\n" +
+ // Running color sloshing back and forth on top of the success color.
+ " <linearGradient id='run-on-success' x1='40%' x2='80%' y2='0%'>\n" +
+ " <stop offset='0' stop-color='" + running + "' />\n" +
+ " <stop offset='1' stop-color='" + success + "' />\n" +
+ " <animate attributeName='x1' values='-110%;150%;20%;-110%' dur='6s' repeatCount='indefinite' />\n" +
+ " <animate attributeName='x2' values='-10%;250%;120%;-10%' dur='6s' repeatCount='indefinite' />\n" +
+ " </linearGradient>\n" +
+ // Clipping to give the badge rounded corners.
+ " <clipPath id='rounded'>\n" +
+ " <rect width='" + width + "' height='20' rx='3' fill='#fff'/>\n" +
+ " </clipPath>\n" +
+ // Badge section backgrounds with status colors and shades for distinction.
+ " <g clip-path='url(#rounded)'>\n" +
+ String.join("", sections) +
+ " <rect width='" + 2 + "' height='20' fill='url(#left-light)'/>\n" +
+ " <rect x='" + (width - 2) + "' width='" + 2 + "' height='20' fill='url(#right-shadow)'/>\n" +
+ " <rect width='" + width + "' height='20' fill='url(#light)'/>\n" +
+ " </g>\n" +
+ " <g fill='#fff' text-anchor='middle' font-family='Verdana,Geneva,DejaVu Sans,sans-serif' text-rendering='geometricPrecision' font-size='11'>\n" +
+ // The vespa.ai logo (with a slightly coloured shadow)!
+ " <svg x='" + (xPad + 0.5) + "' y='" + ((20 - logoSize) / 2 + 1) + "' width='" + logoSize + "' height='" + logoSize + "' viewBox='0 0 150 150'>\n" +
+ " <polygon fill='#402a14' fill-opacity='0.5' points='84.84 10 34.1 44.46 34.1 103.78 84.84 68.02 135.57 103.78 135.57 44.46 84.84 10'/>\n" +
+ " <polygon fill='#402a14' fill-opacity='0.5' points='84.84 68.02 84.84 10 135.57 44.46 135.57 103.78 84.84 68.02'/>\n" +
+ " <polygon fill='#061a29' fill-opacity='0.5' points='65.07 81.99 14.34 46.22 14.34 105.54 65.07 140 115.81 105.54 115.81 46.22 65.07 81.99'/>\n" +
+ " <polygon fill='#061a29' fill-opacity='0.5' points='65.07 81.99 65.07 140 14.34 105.54 14.34 46.22 65.07 81.99'/>\n" +
+ " </svg>\n" +
+ " <svg x='" + xPad + "' y='" + ((20 - logoSize) / 2) + "' width='" + logoSize + "' height='" + logoSize + "' viewBox='0 0 150 150'>\n" +
+ " <linearGradient id='yellow-shaded' x1='91.17' y1='44.83' x2='136.24' y2='73.4' gradientUnits='userSpaceOnUse'>\n" +
+ " <stop offset='0.01' stop-color='#c6783e'/>\n" +
+ " <stop offset='0.54' stop-color='#ff9750'/>\n" +
+ " </linearGradient>\n" +
+ " <linearGradient id='blue-shaded' x1='60.71' y1='104.56' x2='-15.54' y2='63' gradientUnits='userSpaceOnUse'>\n" +
+ " <stop offset='0' stop-color='#005a8e'/>\n" +
+ " <stop offset='0.54' stop-color='#1a7db6'/>\n" +
+ " </linearGradient>\n" +
+ " <polygon fill='#ff9d4b' points='84.84 10 34.1 44.46 34.1 103.78 84.84 68.02 135.57 103.78 135.57 44.46 84.84 10'/>\n" +
+ " <polygon fill='url(#yellow-shaded)' points='84.84 68.02 84.84 10 135.57 44.46 135.57 103.78 84.84 68.02'/>\n" +
+ " <polygon fill='#1a7db6' points='65.07 81.99 14.34 46.22 14.34 105.54 65.07 140 115.81 105.54 115.81 46.22 65.07 81.99'/>\n" +
+ " <polygon fill='url(#blue-shaded)' points='65.07 81.99 65.07 140 14.34 105.54 14.34 46.22 65.07 81.99'/>\n" +
+ " </svg>\n" +
+ // Application ID and job names.
+ String.join("", texts) +
+ " </g>\n" +
+ "</svg>\n";
+ }
+
+} \ No newline at end of file
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicies.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicies.java
index 898b2531460..e2a8be15361 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicies.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicies.java
@@ -119,54 +119,63 @@ public class RoutingPolicies {
}
}
- /** Update global DNS record for given policies */
+ /** Update global DNS records for given policies */
private void updateGlobalDnsOf(Collection<RoutingPolicy> routingPolicies, Set<ZoneId> inactiveZones, @SuppressWarnings("unused") Lock lock) {
Map<RoutingId, List<RoutingPolicy>> routingTable = routingTableFrom(routingPolicies);
for (Map.Entry<RoutingId, List<RoutingPolicy>> routeEntry : routingTable.entrySet()) {
- Collection<RegionEndpoint> regionEndpoints = computeRegionEndpoints(routeEntry.getValue(), inactiveZones);
- // Create a weighted ALIAS per region, pointing to all zones within the same region
- regionEndpoints.forEach(regionEndpoint -> {
- controller.nameServiceForwarder().createAlias(RecordName.from(regionEndpoint.target().name().value()),
- Collections.unmodifiableSet(regionEndpoint.zoneTargets()),
- Priority.normal);
- });
-
- // Create global latency-based ALIAS pointing to each per-region weighted ALIAS
- Set<AliasTarget> latencyTargets = new LinkedHashSet<>();
- Set<AliasTarget> inactiveLatencyTargets = new LinkedHashSet<>();
- for (var regionEndpoint : regionEndpoints) {
- if (regionEndpoint.active()) {
- latencyTargets.add(regionEndpoint.target());
- } else {
- inactiveLatencyTargets.add(regionEndpoint.target());
- }
- }
- // If all targets are configured out, all targets are set in. We do this because otherwise removing 100% of
- // the ALIAS records would cause the global endpoint to stop resolving entirely (NXDOMAIN).
- if (latencyTargets.isEmpty() && !inactiveLatencyTargets.isEmpty()) {
- latencyTargets.addAll(inactiveLatencyTargets);
- inactiveLatencyTargets.clear();
+ RoutingId routingId = routeEntry.getKey();
+ controller.routing().endpointsOf(routingId.application())
+ .named(routingId.endpointId())
+ .not().requiresRotation()
+ .forEach(endpoint -> updateGlobalDnsOf(endpoint, inactiveZones, routeEntry.getValue()));
+ }
+ }
+
+ /** Update global DNS records for given global endpoint */
+ private void updateGlobalDnsOf(Endpoint endpoint, Set<ZoneId> inactiveZones, List<RoutingPolicy> policies) {
+ if (endpoint.scope() != Endpoint.Scope.global) throw new IllegalArgumentException("Endpoint " + endpoint + " is not global");
+ // Create a weighted ALIAS per region, pointing to all zones within the same region
+ Collection<RegionEndpoint> regionEndpoints = computeRegionEndpoints(policies, inactiveZones, endpoint.legacy());
+ regionEndpoints.forEach(regionEndpoint -> {
+ controller.nameServiceForwarder().createAlias(RecordName.from(regionEndpoint.target().name().value()),
+ Collections.unmodifiableSet(regionEndpoint.zoneTargets()),
+ Priority.normal);
+ });
+
+ // Create global latency-based ALIAS pointing to each per-region weighted ALIAS
+ Set<AliasTarget> latencyTargets = new LinkedHashSet<>();
+ Set<AliasTarget> inactiveLatencyTargets = new LinkedHashSet<>();
+ for (var regionEndpoint : regionEndpoints) {
+ if (regionEndpoint.active()) {
+ latencyTargets.add(regionEndpoint.target());
+ } else {
+ inactiveLatencyTargets.add(regionEndpoint.target());
}
- var endpoints = controller.routing().endpointsOf(routeEntry.getKey().application())
- .named(routeEntry.getKey().endpointId())
- .not().requiresRotation();
- endpoints.forEach(endpoint -> controller.nameServiceForwarder().createAlias(RecordName.from(endpoint.dnsName()),
- latencyTargets, Priority.normal));
- inactiveLatencyTargets.forEach(t -> controller.nameServiceForwarder()
- .removeRecords(Record.Type.ALIAS,
- RecordData.fqdn(t.name().value()),
- Priority.normal));
}
+
+ // If all targets are configured OUT, all targets are kept IN. We do this because otherwise removing 100% of
+ // the ALIAS records would cause the global endpoint to stop resolving entirely (NXDOMAIN).
+ if (latencyTargets.isEmpty() && !inactiveLatencyTargets.isEmpty()) {
+ latencyTargets.addAll(inactiveLatencyTargets);
+ inactiveLatencyTargets.clear();
+ }
+
+ controller.nameServiceForwarder().createAlias(RecordName.from(endpoint.dnsName()), latencyTargets, Priority.normal);
+ inactiveLatencyTargets.forEach(t -> controller.nameServiceForwarder()
+ .removeRecords(Record.Type.ALIAS,
+ RecordData.fqdn(t.name().value()),
+ Priority.normal));
}
+
/** Compute region endpoints and their targets from given policies */
- private Collection<RegionEndpoint> computeRegionEndpoints(List<RoutingPolicy> policies, Set<ZoneId> inactiveZones) {
+ private Collection<RegionEndpoint> computeRegionEndpoints(List<RoutingPolicy> policies, Set<ZoneId> inactiveZones, boolean legacy) {
Map<Endpoint, RegionEndpoint> endpoints = new LinkedHashMap<>();
RoutingMethod routingMethod = RoutingMethod.exclusive;
for (var policy : policies) {
if (policy.dnsZone().isEmpty()) continue;
if (!controller.zoneRegistry().routingMethods(policy.id().zone()).contains(routingMethod)) continue;
- Endpoint regionEndpoint = policy.regionEndpointIn(controller.system(), routingMethod);
+ Endpoint regionEndpoint = policy.regionEndpointIn(controller.system(), routingMethod, legacy);
var zonePolicy = db.readZoneRoutingPolicy(policy.id().zone());
long weight = 1;
if (isConfiguredOut(policy, zonePolicy, inactiveZones)) {
@@ -206,10 +215,11 @@ public class RoutingPolicies {
/** Update zone DNS record for given policy */
private void updateZoneDnsOf(RoutingPolicy policy) {
- var name = RecordName.from(policy.endpointIn(controller.system(), RoutingMethod.exclusive, controller.zoneRegistry())
- .dnsName());
- var data = RecordData.fqdn(policy.canonicalName().value());
- nameServiceForwarderIn(policy.id().zone()).createCname(name, data, Priority.normal);
+ for (var endpoint : policy.endpointsIn(controller.system(), RoutingMethod.exclusive, controller.zoneRegistry())) {
+ var name = RecordName.from(endpoint.dnsName());
+ var data = RecordData.fqdn(policy.canonicalName().value());
+ nameServiceForwarderIn(policy.id().zone()).createCname(name, data, Priority.normal);
+ }
}
/** Remove policies and zone DNS records unreferenced by given load balancers */
@@ -221,11 +231,12 @@ public class RoutingPolicies {
// Leave active load balancers and irrelevant zones alone
if (activeIds.contains(policy.id()) ||
!policy.id().zone().equals(allocation.deployment.zoneId())) continue;
-
- var dnsName = policy.endpointIn(controller.system(), RoutingMethod.exclusive, controller.zoneRegistry()).dnsName();
- nameServiceForwarderIn(allocation.deployment.zoneId()).removeRecords(Record.Type.CNAME,
- RecordName.from(dnsName),
- Priority.normal);
+ for (var endpoint : policy.endpointsIn(controller.system(), RoutingMethod.exclusive, controller.zoneRegistry())) {
+ var dnsName = endpoint.dnsName();
+ nameServiceForwarderIn(allocation.deployment.zoneId()).removeRecords(Record.Type.CNAME,
+ RecordName.from(dnsName),
+ Priority.normal);
+ }
newPolicies.remove(policy.id());
}
db.writeRoutingPolicies(allocation.deployment.applicationId(), newPolicies);
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicy.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicy.java
index a0fecbdf9e1..ae33d214ecc 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicy.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicy.java
@@ -11,6 +11,9 @@ import com.yahoo.vespa.hosted.controller.application.Endpoint.Port;
import com.yahoo.vespa.hosted.controller.application.EndpointId;
import com.yahoo.vespa.hosted.controller.application.SystemApplication;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
@@ -69,17 +72,49 @@ public class RoutingPolicy {
return new RoutingPolicy(id, canonicalName, dnsZone, endpoints, status);
}
- /** Returns the zone endpoint of this */
- public Endpoint endpointIn(SystemName system, RoutingMethod routingMethod, ZoneRegistry zoneRegistry) {
+ /** Returns the zone endpoints of this */
+ public List<Endpoint> endpointsIn(SystemName system, RoutingMethod routingMethod, ZoneRegistry zoneRegistry) {
Optional<Endpoint> infraEndpoint = SystemApplication.matching(id.owner())
.flatMap(app -> app.endpointIn(id.zone(), zoneRegistry));
- return infraEndpoint.orElseGet(() -> endpoint(routingMethod).target(id.cluster(), id.zone())
- .in(system));
+ if (infraEndpoint.isPresent()) {
+ return List.of(infraEndpoint.get());
+ }
+ List<Endpoint> endpoints = new ArrayList<>(3);
+ endpoints.add(endpoint(routingMethod).target(id.cluster(), id.zone()).in(system));
+ if (system.isPublic()) {
+ endpoints.add(endpoint(routingMethod).target(id.cluster(), id.zone()).legacy().in(system));
+ }
+ // Add legacy endpoints
+ if (routingMethod == RoutingMethod.shared) {
+ endpoints.add(endpoint(routingMethod).target(id.cluster(), id.zone())
+ .on(Port.plain(4080))
+ .legacy()
+ .in(system));
+ endpoints.add(endpoint(routingMethod).target(id.cluster(), id.zone())
+ .on(Port.tls(4443))
+ .legacy()
+ .in(system));
+ }
+ return endpoints;
+ }
+
+ /** Returns all region endpoints of this */
+ public List<Endpoint> regionEndpointsIn(SystemName system, RoutingMethod routingMethod) {
+ List<Endpoint> endpoints = new ArrayList<>(2);
+ endpoints.add(regionEndpointIn(system, routingMethod, false));
+ if (system.isPublic()) {
+ endpoints.add(regionEndpointIn(system, routingMethod, true));
+ }
+ return Collections.unmodifiableList(endpoints);
}
/** Returns the region endpoint of this */
- public Endpoint regionEndpointIn(SystemName system, RoutingMethod routingMethod) {
- return endpoint(routingMethod).targetRegion(id.cluster(), id.zone()).in(system);
+ public Endpoint regionEndpointIn(SystemName system, RoutingMethod routingMethod, boolean legacy) {
+ Endpoint.EndpointBuilder endpoint = endpoint(routingMethod).targetRegion(id.cluster(), id.zone());
+ if (legacy) {
+ endpoint = endpoint.legacy();
+ }
+ return endpoint.in(system);
}
@Override
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java
index 0c0d1d80adb..e52d1900a9d 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java
@@ -197,7 +197,7 @@ public class ControllerTest {
}
@Test
- public void testGlobalRotations() {
+ public void testGlobalRotationStatus() {
var context = tester.newDeploymentContext();
var zone1 = ZoneId.from("prod", "us-west-1");
var zone2 = ZoneId.from("prod", "us-east-3");
@@ -229,7 +229,7 @@ public class ControllerTest {
}
@Test
- public void testDnsAliasRegistration() {
+ public void testDnsUpdatesForGlobalEndpoint() {
var context = tester.newDeploymentContext("tenant1", "app1", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("default", "foo")
@@ -254,10 +254,15 @@ public class ControllerTest {
assertTrue(record.isPresent());
assertEquals("app1--tenant1.global.vespa.oath.cloud", record.get().name().asString());
assertEquals("rotation-fqdn-01.", record.get().data().asString());
+
+ List<String> globalDnsNames = tester.controller().routing().endpointsOf(context.instanceId())
+ .scope(Endpoint.Scope.global)
+ .mapToList(Endpoint::dnsName);
+ assertEquals(List.of("app1--tenant1.global.vespa.oath.cloud"), globalDnsNames);
}
@Test
- public void testDnsAliasRegistrationLegacy() {
+ public void testDnsUpdatesForGlobalEndpointLegacySyntax() {
var context = tester.newDeploymentContext("tenant1", "app1", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.globalServiceId("foo")
@@ -293,10 +298,18 @@ public class ControllerTest {
assertTrue(record.isPresent());
assertEquals("app1.tenant1.global.vespa.yahooapis.com", record.get().name().asString());
assertEquals("rotation-fqdn-01.", record.get().data().asString());
+
+ List<String> globalDnsNames = tester.controller().routing().endpointsOf(context.instanceId())
+ .scope(Endpoint.Scope.global)
+ .mapToList(Endpoint::dnsName);
+ assertEquals(List.of("app1--tenant1.global.vespa.oath.cloud",
+ "app1.tenant1.global.vespa.yahooapis.com",
+ "app1--tenant1.global.vespa.yahooapis.com"),
+ globalDnsNames);
}
@Test
- public void testDnsAliasRegistrationWithEndpoints() {
+ public void testDnsUpdatesForMultipleGlobalEndpoints() {
var context = tester.newDeploymentContext("tenant1", "app1", "default");
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.endpoint("foobar", "qrs", "us-west-1", "us-central-1") // Rotation 01
@@ -349,7 +362,7 @@ public class ControllerTest {
}
@Test
- public void testDnsAliasRegistrationWithChangingEndpoints() {
+ public void testDnsUpdatesForGlobalEndpointChanges() {
var context = tester.newDeploymentContext("tenant1", "app1", "default");
var west = ZoneId.from("prod", "us-west-1");
var central = ZoneId.from("prod", "us-central-1");
@@ -491,7 +504,7 @@ public class ControllerTest {
}
@Test
- public void testUpdatesExistingDnsAlias() {
+ public void testDnsUpdatesWithChangeInRotationAssignment() {
// Application 1 is deployed and deleted
{
var context = tester.newDeploymentContext("tenant1", "app1", "default");
@@ -586,7 +599,7 @@ public class ControllerTest {
var context = tester.newDeploymentContext();
ZoneId zone = ZoneId.from("dev", "us-east-1");
tester.controllerTester().zoneRegistry()
- .setRoutingMethod(ZoneApiMock.from(zone), RoutingMethod.shared, RoutingMethod.sharedLayer4);
+ .setRoutingMethod(ZoneApiMock.from(zone), RoutingMethod.shared, RoutingMethod.sharedLayer4);
// Deploy
context.runJob(zone, applicationPackage);
@@ -719,7 +732,7 @@ public class ControllerTest {
}
@Test
- public void testDeployWithCrossCloudEndpoints() {
+ public void testDeployWithGlobalEndpointsInMultipleClouds() {
tester.controllerTester().zoneRegistry().setZones(
ZoneApiMock.fromId("prod.us-west-1"),
ZoneApiMock.newBuilder().with(CloudName.from("aws")).withId("prod.aws-us-east-1").build()
@@ -821,7 +834,7 @@ public class ControllerTest {
@Test
public void testDeploymentDirectRouting() {
// Rotation-less system
- DeploymentTester tester = new DeploymentTester(new ControllerTester(new RotationsConfig.Builder().build()));
+ DeploymentTester tester = new DeploymentTester(new ControllerTester(new RotationsConfig.Builder().build(), main));
var context = tester.newDeploymentContext();
var zone1 = ZoneId.from("prod", "us-west-1");
var zone2 = ZoneId.from("prod", "us-east-3");
@@ -896,6 +909,14 @@ public class ControllerTest {
"application--tenant.global.vespa.oath.cloud"),
tester.configServer().containerEndpoints().get(context.deploymentIdIn(zone)));
}
+ List<String> zoneDnsNames = tester.controller().routing().endpointsOf(context.deploymentIdIn(zone1))
+ .scope(Endpoint.Scope.zone)
+ .mapToList(Endpoint::dnsName);
+ assertEquals(List.of("application--tenant.us-west-1.vespa.oath.cloud",
+ "application.tenant.us-west-1.prod.vespa.yahooapis.com",
+ "application--tenant.us-west-1.prod.vespa.yahooapis.com",
+ "application.tenant.us-west-1.vespa.oath.cloud"),
+ zoneDnsNames);
}
@Test
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTester.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTester.java
index 03487163936..006e4e63136 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTester.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTester.java
@@ -6,7 +6,9 @@ import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.HostName;
import com.yahoo.config.provision.RegionName;
+import com.yahoo.config.provision.SystemName;
import com.yahoo.config.provision.TenantName;
+import com.yahoo.config.provision.zone.RoutingMethod;
import com.yahoo.config.provision.zone.ZoneApi;
import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.test.ManualClock;
@@ -16,6 +18,7 @@ import com.yahoo.vespa.athenz.api.AthenzUser;
import com.yahoo.vespa.athenz.api.OktaAccessToken;
import com.yahoo.vespa.athenz.api.OktaIdentityToken;
import com.yahoo.vespa.flags.InMemoryFlagSource;
+import com.yahoo.vespa.flags.PermanentFlags;
import com.yahoo.vespa.hosted.controller.api.identifiers.Property;
import com.yahoo.vespa.hosted.controller.api.identifiers.PropertyId;
import com.yahoo.vespa.hosted.controller.api.integration.athenz.AthenzClientFactoryMock;
@@ -25,6 +28,8 @@ import com.yahoo.vespa.hosted.controller.api.integration.dns.Record;
import com.yahoo.vespa.hosted.controller.api.integration.dns.RecordName;
import com.yahoo.vespa.hosted.controller.api.integration.organization.Contact;
import com.yahoo.vespa.hosted.controller.api.integration.stubs.MockMavenRepository;
+import com.yahoo.vespa.hosted.controller.api.integration.stubs.MockUserManagement;
+import com.yahoo.vespa.hosted.controller.api.role.Role;
import com.yahoo.vespa.hosted.controller.api.role.SimplePrincipal;
import com.yahoo.vespa.hosted.controller.application.SystemApplication;
import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
@@ -34,6 +39,7 @@ import com.yahoo.vespa.hosted.controller.integration.ConfigServerMock;
import com.yahoo.vespa.hosted.controller.integration.MetricsMock;
import com.yahoo.vespa.hosted.controller.integration.SecretStoreMock;
import com.yahoo.vespa.hosted.controller.integration.ServiceRegistryMock;
+import com.yahoo.vespa.hosted.controller.integration.ZoneApiMock;
import com.yahoo.vespa.hosted.controller.integration.ZoneRegistryMock;
import com.yahoo.vespa.hosted.controller.persistence.CuratorDb;
import com.yahoo.vespa.hosted.controller.persistence.MockCuratorDb;
@@ -41,6 +47,7 @@ import com.yahoo.vespa.hosted.controller.restapi.ContainerTester;
import com.yahoo.vespa.hosted.controller.security.AthenzCredentials;
import com.yahoo.vespa.hosted.controller.security.AthenzTenantSpec;
import com.yahoo.vespa.hosted.controller.security.Auth0Credentials;
+import com.yahoo.vespa.hosted.controller.security.CloudAccessControl;
import com.yahoo.vespa.hosted.controller.security.CloudTenantSpec;
import com.yahoo.vespa.hosted.controller.security.Credentials;
import com.yahoo.vespa.hosted.controller.security.TenantSpec;
@@ -54,15 +61,16 @@ import java.time.Duration;
import java.time.Instant;
import java.time.ZoneOffset;
import java.util.Arrays;
-import java.util.Collections;
import java.util.List;
import java.util.Optional;
import java.util.OptionalLong;
+import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.Consumer;
import java.util.logging.Handler;
import java.util.logging.Logger;
+import java.util.stream.Collectors;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
@@ -101,8 +109,8 @@ public final class ControllerTester {
this(new AthenzDbMock(), new MockCuratorDb(), defaultRotationsConfig(), serviceRegistryMock);
}
- public ControllerTester(RotationsConfig rotationsConfig) {
- this(rotationsConfig, new MockCuratorDb());
+ public ControllerTester(RotationsConfig rotationsConfig, SystemName system) {
+ this(new AthenzDbMock(), new MockCuratorDb(), rotationsConfig, new ServiceRegistryMock(system));
}
public ControllerTester(MockCuratorDb curatorDb) {
@@ -113,6 +121,10 @@ public final class ControllerTester {
this(defaultRotationsConfig(), new MockCuratorDb());
}
+ public ControllerTester(SystemName system) {
+ this(new AthenzDbMock(), new MockCuratorDb(), defaultRotationsConfig(), new ServiceRegistryMock(system));
+ }
+
private ControllerTester(AthenzDbMock athenzDb, boolean inContainer,
CuratorDb curator, RotationsConfig rotationsConfig,
ServiceRegistryMock serviceRegistry, Controller controller) {
@@ -188,6 +200,21 @@ public final class ControllerTester {
return new Version(current.getMajor(), nextMinorVersion.getAndIncrement(), current.getMicro());
}
+ /** Set the zones and system for this and bootstrap infrastructure nodes */
+ public ControllerTester setZones(List<ZoneId> zones, SystemName system) {
+ zoneRegistry().setZones(zones.stream().map(ZoneApiMock::from).collect(Collectors.toList()))
+ .setSystemName(system);
+ configServer().bootstrap(zones, SystemApplication.notController());
+ return this;
+ }
+
+ /** Set the routing method for given zones */
+ public ControllerTester setRoutingMethod(List<ZoneId> zones, RoutingMethod routingMethod) {
+ zoneRegistry().setRoutingMethod(zones.stream().map(ZoneApiMock::from).collect(Collectors.toList()),
+ routingMethod);
+ return this;
+ }
+
/** Create a new controller instance. Useful to verify that controller state is rebuilt from persistence */
public final void createNewController() {
if (inContainer)
@@ -265,8 +292,7 @@ public final class ControllerTester {
}
public TenantName createTenant(String tenantName) {
- return createTenant(tenantName, "domain" + nextDomainId.getAndIncrement(),
- nextPropertyId.getAndIncrement());
+ return createTenant(tenantName, zoneRegistry().system().isPublic() ? Tenant.Type.cloud : Tenant.Type.athenz);
}
public TenantName createTenant(String tenantName, Tenant.Type type) {
@@ -303,7 +329,7 @@ public final class ControllerTester {
private TenantName createCloudTenant(String tenantName) {
TenantName tenant = TenantName.from(tenantName);
TenantSpec spec = new CloudTenantSpec(tenant, "token");
- controller().tenants().create(spec, new Auth0Credentials(new SimplePrincipal("dev"), Collections.emptySet()));
+ controller().tenants().create(spec, new Auth0Credentials(new SimplePrincipal("dev"), Set.of(Role.administrator(tenant))));
return tenant;
}
@@ -348,11 +374,15 @@ public final class ControllerTester {
private static Controller createController(CuratorDb curator, RotationsConfig rotationsConfig,
AthenzDbMock athensDb,
ServiceRegistryMock serviceRegistry) {
+ InMemoryFlagSource flagSource = new InMemoryFlagSource()
+ .withBooleanFlag(PermanentFlags.ENABLE_PUBLIC_SIGNUP_FLOW.id(), true);
Controller controller = new Controller(curator,
rotationsConfig,
- new AthenzFacade(new AthenzClientFactoryMock(athensDb)),
+ serviceRegistry.zoneRegistry().system().isPublic() ?
+ new CloudAccessControl(new MockUserManagement(), flagSource, serviceRegistry) :
+ new AthenzFacade(new AthenzClientFactoryMock(athensDb)),
() -> "test-controller",
- new InMemoryFlagSource(),
+ flagSource,
new MockMavenRepository(),
serviceRegistry,
new MetricsMock(), new SecretStoreMock(),
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/EndpointTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/EndpointTest.java
index 2d81d7304a1..468c92d3539 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/EndpointTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/EndpointTest.java
@@ -69,6 +69,21 @@ public class EndpointTest {
Endpoint.of(app1).target(endpointId).on(Port.tls()).routingMethod(RoutingMethod.exclusive).in(SystemName.Public)
);
tests.forEach((expected, endpoint) -> assertEquals(expected, endpoint.url().toString()));
+
+ Map<String, Endpoint> tests2 = Map.of(
+ // Default endpoint in public system using new domain
+ "https://a1.t1.g.vespa-app.cloud/",
+ Endpoint.of(app1).target(endpointId).on(Port.tls()).routingMethod(RoutingMethod.exclusive).legacy().in(SystemName.Public),
+
+ // Default endpoint in public CD system using new domain
+ "https://a1.t1.g.cd.vespa-app.cloud/",
+ Endpoint.of(app1).target(endpointId).on(Port.tls()).routingMethod(RoutingMethod.exclusive).legacy().in(SystemName.PublicCd),
+
+ // Custom instance in public system, using new domain
+ "https://i2.a2.t2.g.vespa-app.cloud/",
+ Endpoint.of(app2).target(endpointId).on(Port.tls()).routingMethod(RoutingMethod.exclusive).legacy().in(SystemName.Public)
+ );
+ tests2.forEach((expected, endpoint) -> assertEquals(expected, endpoint.url().toString()));
}
@Test
@@ -117,6 +132,13 @@ public class EndpointTest {
Endpoint.of(app1).target(endpointId).on(Port.tls()).routingMethod(RoutingMethod.exclusive).in(SystemName.Public)
);
tests.forEach((expected, endpoint) -> assertEquals(expected, endpoint.url().toString()));
+
+ Map<String, Endpoint> tests2 = Map.of(
+ // Custom endpoint and instance in public system, using new domain
+ "https://foo.i2.a2.t2.g.vespa-app.cloud/",
+ Endpoint.of(app2).target(EndpointId.of("foo")).on(Port.tls()).routingMethod(RoutingMethod.exclusive).legacy().in(SystemName.Public)
+ );
+ tests2.forEach((expected, endpoint) -> assertEquals(expected, endpoint.url().toString()));
}
@Test
@@ -167,6 +189,21 @@ public class EndpointTest {
Endpoint.of(app1).target(cluster, prodZone).on(Port.tls()).routingMethod(RoutingMethod.sharedLayer4).in(SystemName.main)
);
tests.forEach((expected, endpoint) -> assertEquals(expected, endpoint.url().toString()));
+
+ Map<String, Endpoint> tests2 = Map.of(
+ // Custom cluster name in public, using new domain
+ "https://c1.a1.t1.us-north-1.z.vespa-app.cloud/",
+ Endpoint.of(app1).target(ClusterSpec.Id.from("c1"), prodZone).on(Port.tls()).routingMethod(RoutingMethod.exclusive).legacy().in(SystemName.Public),
+
+ // Default cluster name in non-production zone in public, using new domain
+ "https://a1.t1.us-north-2.test.z.vespa-app.cloud/",
+ Endpoint.of(app1).target(ClusterSpec.Id.from("default"), testZone).on(Port.tls()).routingMethod(RoutingMethod.exclusive).legacy().in(SystemName.Public),
+
+ // Default cluster name in public CD, using new domain
+ "https://a1.t1.us-north-1.z.cd.vespa-app.cloud/",
+ Endpoint.of(app1).target(ClusterSpec.Id.from("default"), prodZone).on(Port.tls()).routingMethod(RoutingMethod.exclusive).legacy().in(SystemName.PublicCd)
+ );
+ tests2.forEach((expected, endpoint) -> assertEquals(expected, endpoint.url().toString()));
}
@Test
@@ -229,8 +266,9 @@ public class EndpointTest {
}
@Test
- public void weighted_endpoints() {
+ public void region_endpoints() {
var cluster = ClusterSpec.Id.from("default");
+ var prodZone = ZoneId.from("prod", "us-north-2");
Map<String, Endpoint> tests = Map.of(
"https://a1.t1.us-north-1-w.public.vespa.oath.cloud/",
Endpoint.of(app1)
@@ -240,7 +278,7 @@ public class EndpointTest {
.in(SystemName.Public),
"https://a1.t1.us-north-2-w.public.vespa.oath.cloud/",
Endpoint.of(app1)
- .targetRegion(cluster, ZoneId.from("prod", "us-north-2"))
+ .targetRegion(cluster, prodZone)
.routingMethod(RoutingMethod.exclusive)
.on(Port.tls())
.in(SystemName.Public),
@@ -249,6 +287,13 @@ public class EndpointTest {
.targetRegion(cluster, ZoneId.from("test", "us-north-2"))
.routingMethod(RoutingMethod.exclusive)
.on(Port.tls())
+ .in(SystemName.Public),
+ "https://c1.a1.t1.us-north-2.r.vespa-app.cloud/",
+ Endpoint.of(app1)
+ .targetRegion(ClusterSpec.Id.from("c1"), prodZone)
+ .routingMethod(RoutingMethod.exclusive)
+ .on(Port.tls())
+ .legacy()
.in(SystemName.Public)
);
tests.forEach((expected, endpoint) -> assertEquals(expected, endpoint.url().toString()));
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/archive/CuratorArchiveBucketDbTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/archive/CuratorArchiveBucketDbTest.java
index 57fa7cc8e44..ab669d6fe14 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/archive/CuratorArchiveBucketDbTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/archive/CuratorArchiveBucketDbTest.java
@@ -1,9 +1,8 @@
package com.yahoo.vespa.hosted.controller.archive;
+import com.yahoo.config.provision.SystemName;
import com.yahoo.config.provision.TenantName;
import com.yahoo.config.provision.zone.ZoneId;
-import com.yahoo.vespa.flags.Flags;
-import com.yahoo.vespa.flags.InMemoryFlagSource;
import com.yahoo.vespa.hosted.controller.ControllerTester;
import com.yahoo.vespa.hosted.controller.api.integration.archive.ArchiveBucket;
import org.apache.curator.shaded.com.google.common.collect.Streams;
@@ -22,22 +21,13 @@ public class CuratorArchiveBucketDbTest {
@Test
public void archiveUriFor() {
- ControllerTester tester = new ControllerTester();
- InMemoryFlagSource flagSource = (InMemoryFlagSource) tester.controller().flagSource();
+ ControllerTester tester = new ControllerTester(SystemName.Public);
CuratorArchiveBucketDb bucketDb = new CuratorArchiveBucketDb(tester.controller());
tester.curator().writeArchiveBuckets(ZoneId.defaultId(),
Set.of(new ArchiveBucket("existingBucket", "keyArn").withTenant(TenantName.defaultName())));
- // Nothing when feature flag is not set.
- assertEquals(Optional.empty(), bucketDb.archiveUriFor(ZoneId.defaultId(), TenantName.defaultName()));
-
- // Returns hardcoded name from feature flag
- flagSource.withStringFlag(Flags.SYNC_HOST_LOGS_TO_S3_BUCKET.id(), "hardcoded");
- assertEquals(Optional.of(URI.create("s3://hardcoded/default/")), bucketDb.archiveUriFor(ZoneId.defaultId(), TenantName.defaultName()));
-
- // Finds existing bucket in db when set to "auto"
- flagSource.withStringFlag(Flags.SYNC_HOST_LOGS_TO_S3_BUCKET.id(), "auto");
+ // Finds existing bucket in db
assertEquals(Optional.of(URI.create("s3://existingBucket/default/")), bucketDb.archiveUriFor(ZoneId.defaultId(), TenantName.defaultName()));
// Assigns to existing bucket while there is space
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/ApplicationPackageBuilder.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/ApplicationPackageBuilder.java
index 9eaa15cdbe3..fc7a99eb2f0 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/ApplicationPackageBuilder.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/ApplicationPackageBuilder.java
@@ -186,27 +186,28 @@ public class ApplicationPackageBuilder {
return this;
}
+ /** Add a trusted certificate to security/clients.pem */
public ApplicationPackageBuilder trust(X509Certificate certificate) {
this.trustedCertificates.add(certificate);
return this;
}
+ /** Add a default trusted certificate to security/clients.pem */
public ApplicationPackageBuilder trustDefaultCertificate() {
try {
var generator = KeyPairGenerator.getInstance("RSA");
- var builder = X509CertificateBuilder.fromKeypair(
+ var certificate = X509CertificateBuilder.fromKeypair(
generator.generateKeyPair(),
new X500Principal("CN=name"),
Instant.now(),
Instant.now().plusMillis(300_000),
SignatureAlgorithm.SHA256_WITH_RSA,
X509CertificateBuilder.generateRandomSerialNumber()
- );
- this.trustedCertificates.add(builder.build());
+ ).build();
+ return trust(certificate);
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
- return this;
}
private byte[] deploymentSpec() {
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/BadgesTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/BadgesTest.java
deleted file mode 100644
index 06d5a42f9c0..00000000000
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/BadgesTest.java
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2020 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.controller.deployment;
-
-import com.google.common.collect.ImmutableMap;
-import com.yahoo.config.provision.ApplicationId;
-import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
-import com.yahoo.vespa.hosted.controller.api.integration.deployment.RunId;
-import org.junit.Test;
-
-import java.net.URI;
-import java.util.Collections;
-import java.util.List;
-import java.util.Optional;
-
-import static com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType.stagingTest;
-import static com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType.systemTest;
-import static com.yahoo.vespa.hosted.controller.deployment.Step.report;
-import static java.time.Instant.EPOCH;
-import static java.time.Instant.now;
-import static org.junit.Assert.assertEquals;
-
-/**
- * @author jonmv
- */
-public class BadgesTest {
-
- private static final ApplicationId id = ApplicationId.from("tenant", "application", "default");
- private static final Run success = new Run(new RunId(id, systemTest, 3), ImmutableMap.of(report, new StepInfo(report, Step.Status.succeeded, Optional.empty())),
- null, null, Optional.of(now()), RunStatus.success, 0, EPOCH, Optional.empty(), Optional.empty(), Optional.empty());
-
- private static final Run running = new Run(new RunId(id, systemTest, 4), ImmutableMap.of(report, new StepInfo(report, Step.Status.succeeded, Optional.empty())),
- null, null, Optional.empty(), RunStatus.running, 0, EPOCH, Optional.empty(), Optional.empty(), Optional.empty());
-
- private static final Run failure = new Run(new RunId(id, JobType.stagingTest, 2), ImmutableMap.of(report, new StepInfo(report, Step.Status.succeeded, Optional.empty())),
- null, null, Optional.of(now()), RunStatus.testFailure, 0, EPOCH, Optional.empty(), Optional.empty(), Optional.empty());
-
- @Test
- public void test() {
- Badges badges = new Badges(URI.create("https://badges.tld/api/"));
-
- assertEquals(URI.create("https://badges.tld/api/tenant.application;" + Badges.dark),
- badges.historic(id, Optional.empty(), Collections.emptyList()));
-
- assertEquals(URI.create("https://badges.tld/api/tenant.application;" + Badges.dark +
- "/" + systemTest.jobName() + ";" + Badges.blue +
- "/%20;" + Badges.purple + ";s%7B" + Badges.white + "%7D"),
- badges.historic(id, Optional.of(success), Collections.singletonList(running)));
-
- assertEquals(URI.create("https://badges.tld/api/tenant.application;" + Badges.dark +
- "/" + systemTest.jobName() + ";" + Badges.blue +
- "/%20;" + Badges.blue + ";s%7B" + Badges.white + "%7D" +
- "/%20;" + Badges.purple + ";s%7B" + Badges.white + "%7D"),
- badges.historic(id, Optional.of(success), List.of(success, running)));
-
- assertEquals(URI.create("https://badges.tld/api/tenant.application;" + Badges.dark +
- "/" + systemTest.jobName() + ";" + Badges.purple +
- "/" + stagingTest.jobName() + ";" + Badges.red),
- badges.overview(id, List.of(running, failure)));
- }
-
-}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentContext.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentContext.java
index 976cdb5c674..5d6f1965009 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentContext.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentContext.java
@@ -41,7 +41,6 @@ import com.yahoo.vespa.hosted.controller.routing.Status;
import javax.security.auth.x500.X500Principal;
import java.math.BigInteger;
-import java.net.URI;
import java.security.KeyPair;
import java.security.cert.X509Certificate;
import java.time.Duration;
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java
index 81c9f51278e..c8b4eaa5236 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java
@@ -5,15 +5,11 @@ import com.yahoo.component.Version;
import com.yahoo.config.provision.SystemName;
import com.yahoo.config.provision.zone.RoutingMethod;
import com.yahoo.config.provision.zone.ZoneId;
-import com.yahoo.vespa.hosted.controller.ControllerTester;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.ApplicationVersion;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.RunId;
import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.application.Change;
-import com.yahoo.vespa.hosted.controller.application.SystemApplication;
-import com.yahoo.vespa.hosted.controller.integration.ServiceRegistryMock;
-import com.yahoo.vespa.hosted.controller.integration.ZoneApiMock;
import com.yahoo.vespa.hosted.controller.versions.VespaVersion;
import org.junit.Test;
@@ -1061,17 +1057,13 @@ public class DeploymentTriggerTest {
ApplicationPackage cdPackage = new ApplicationPackageBuilder().region("cd-us-central-1")
.region("cd-aws-us-east-1a")
.build();
- ServiceRegistryMock services = new ServiceRegistryMock();
- var zones = List.of(ZoneApiMock.fromId("test.cd-us-central-1"),
- ZoneApiMock.fromId("staging.cd-us-central-1"),
- ZoneApiMock.fromId("prod.cd-us-central-1"),
- ZoneApiMock.fromId("prod.cd-aws-us-east-1a"));
- services.zoneRegistry()
- .setSystemName(SystemName.cd)
- .setZones(zones)
- .setRoutingMethod(zones, RoutingMethod.shared);
- tester = new DeploymentTester(new ControllerTester(services));
- tester.configServer().bootstrap(services.zoneRegistry().zones().all().ids(), SystemApplication.values());
+ var zones = List.of(ZoneId.from("test.cd-us-central-1"),
+ ZoneId.from("staging.cd-us-central-1"),
+ ZoneId.from("prod.cd-us-central-1"),
+ ZoneId.from("prod.cd-aws-us-east-1a"));
+ tester.controllerTester()
+ .setZones(zones, SystemName.cd)
+ .setRoutingMethod(zones, RoutingMethod.shared);
tester.controllerTester().upgradeSystem(Version.fromString("6.1"));
tester.controllerTester().computeVersionStatus();
var app = tester.newDeploymentContext();
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/NodeRepositoryMock.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/NodeRepositoryMock.java
index fe241976d13..afb56f10c38 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/NodeRepositoryMock.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/NodeRepositoryMock.java
@@ -292,6 +292,8 @@ public class NodeRepositoryMock implements NodeRepository {
newNode.modelName(node.getModelName());
if (node.getWantToRetire() != null)
newNode.wantToRetire(node.getWantToRetire());
+ if (!node.getReports().isEmpty())
+ newNode.reports(node.getReports());
putNodes(zoneId, newNode.build());
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ServiceRegistryMock.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ServiceRegistryMock.java
index 326928b9463..702ce83d116 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ServiceRegistryMock.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ServiceRegistryMock.java
@@ -9,6 +9,7 @@ import com.yahoo.test.ManualClock;
import com.yahoo.vespa.hosted.controller.api.integration.ServiceRegistry;
import com.yahoo.vespa.hosted.controller.api.integration.archive.ArchiveService;
import com.yahoo.vespa.hosted.controller.api.integration.archive.MockArchiveService;
+import com.yahoo.vespa.hosted.controller.api.integration.aws.MockRoleService;
import com.yahoo.vespa.hosted.controller.api.integration.aws.RoleService;
import com.yahoo.vespa.hosted.controller.api.integration.aws.MockAwsEventFetcher;
import com.yahoo.vespa.hosted.controller.api.integration.aws.MockResourceTagger;
@@ -66,8 +67,8 @@ public class ServiceRegistryMock extends AbstractComponent implements ServiceReg
private final ApplicationStoreMock applicationStoreMock = new ApplicationStoreMock();
private final MockRunDataStore mockRunDataStore = new MockRunDataStore();
private final MockResourceTagger mockResourceTagger = new MockResourceTagger();
- private final RoleService roleService = new NoopRoleService();
- private final BillingController billingController = new MockBillingController();
+ private final RoleService roleService = new MockRoleService();
+ private final BillingController billingController = new MockBillingController(clock);
private final ContainerRegistryMock containerRegistry = new ContainerRegistryMock();
private final NoopTenantSecretService tenantSecretService = new NoopTenantSecretService();
private final ArchiveService archiveService = new MockArchiveService();
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneRegistryMock.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneRegistryMock.java
index 1d2c743ffba..bc81924225c 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneRegistryMock.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneRegistryMock.java
@@ -50,21 +50,25 @@ public class ZoneRegistryMock extends AbstractComponent implements ZoneRegistry
*/
public ZoneRegistryMock(SystemName system) {
this.system = system;
- this.zones = List.of(ZoneApiMock.fromId("test.us-east-1"),
- ZoneApiMock.fromId("staging.us-east-3"),
- ZoneApiMock.fromId("dev.us-east-1"),
- ZoneApiMock.fromId("dev.aws-us-east-2a"),
- ZoneApiMock.fromId("perf.us-east-3"),
- ZoneApiMock.fromId("prod.aws-us-east-1a"),
- ZoneApiMock.fromId("prod.ap-northeast-1"),
- ZoneApiMock.fromId("prod.ap-northeast-2"),
- ZoneApiMock.fromId("prod.ap-southeast-1"),
- ZoneApiMock.fromId("prod.us-east-3"),
- ZoneApiMock.fromId("prod.us-west-1"),
- ZoneApiMock.fromId("prod.us-central-1"),
- ZoneApiMock.fromId("prod.eu-west-1"));
+ this.zones = system.isPublic() ?
+ List.of(ZoneApiMock.fromId("test.aws-us-east-1c"),
+ ZoneApiMock.fromId("staging.aws-us-east-1c"),
+ ZoneApiMock.fromId("prod.aws-us-east-1c")) :
+ List.of(ZoneApiMock.fromId("test.us-east-1"),
+ ZoneApiMock.fromId("staging.us-east-3"),
+ ZoneApiMock.fromId("dev.us-east-1"),
+ ZoneApiMock.fromId("dev.aws-us-east-2a"),
+ ZoneApiMock.fromId("perf.us-east-3"),
+ ZoneApiMock.fromId("prod.aws-us-east-1a"),
+ ZoneApiMock.fromId("prod.ap-northeast-1"),
+ ZoneApiMock.fromId("prod.ap-northeast-2"),
+ ZoneApiMock.fromId("prod.ap-southeast-1"),
+ ZoneApiMock.fromId("prod.us-east-3"),
+ ZoneApiMock.fromId("prod.us-west-1"),
+ ZoneApiMock.fromId("prod.us-central-1"),
+ ZoneApiMock.fromId("prod.eu-west-1"));
// All zones use a shared routing method by default
- setRoutingMethod(this.zones, RoutingMethod.shared);
+ setRoutingMethod(this.zones, system.isPublic() ? RoutingMethod.exclusive : RoutingMethod.shared);
}
public ZoneRegistryMock setDeploymentTimeToLive(ZoneId zone, Duration duration) {
@@ -206,11 +210,6 @@ public class ZoneRegistryMock extends AbstractComponent implements ZoneRegistry
}
@Override
- public URI badgeUrl() {
- return URI.create("https://badges.tld");
- }
-
- @Override
public URI apiUrl() {
return URI.create("https://api.tld:4443/");
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ArchiveAccessMaintainerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ArchiveAccessMaintainerTest.java
index 56247b04ac6..969fb606d62 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ArchiveAccessMaintainerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ArchiveAccessMaintainerTest.java
@@ -1,18 +1,14 @@
// Copyright 2020 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.controller.maintenance;
+import com.yahoo.config.provision.SystemName;
import com.yahoo.config.provision.TenantName;
import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.jdisc.test.MockMetric;
-import com.yahoo.vespa.flags.Flags;
-import com.yahoo.vespa.flags.InMemoryFlagSource;
-import com.yahoo.vespa.flags.PermanentFlags;
import com.yahoo.vespa.hosted.controller.ControllerTester;
import com.yahoo.vespa.hosted.controller.LockedTenant;
import com.yahoo.vespa.hosted.controller.api.integration.archive.ArchiveBucket;
import com.yahoo.vespa.hosted.controller.api.integration.archive.MockArchiveService;
-import com.yahoo.vespa.hosted.controller.restapi.ContainerTester;
-import com.yahoo.vespa.hosted.controller.restapi.ControllerContainerCloudTest;
import com.yahoo.vespa.hosted.controller.tenant.Tenant;
import org.junit.Test;
@@ -27,29 +23,25 @@ import static org.junit.Assert.assertNull;
/**
* @author andreer
*/
-public class ArchiveAccessMaintainerTest extends ControllerContainerCloudTest {
+public class ArchiveAccessMaintainerTest {
@Test
public void grantsRoleAccess() {
- var containerTester = new ContainerTester(container, "");
- ((InMemoryFlagSource) containerTester.controller().flagSource())
- .withBooleanFlag(PermanentFlags.ENABLE_PUBLIC_SIGNUP_FLOW.id(), true)
- .withStringFlag(Flags.SYNC_HOST_LOGS_TO_S3_BUCKET.id(), "auto");
- var tester = new ControllerTester(containerTester);
+ var tester = new ControllerTester(SystemName.Public);
String tenant1role = "arn:aws:iam::123456789012:role/my-role";
String tenant2role = "arn:aws:iam::210987654321:role/my-role";
var tenant1 = createTenantWithAccessRole(tester, "tenant1", tenant1role);
createTenantWithAccessRole(tester, "tenant2", tenant2role);
- ZoneId testZone = ZoneId.from("prod.us-east-3");
+ ZoneId testZone = ZoneId.from("prod.aws-us-east-1c");
tester.controller().archiveBucketDb().archiveUriFor(testZone, tenant1);
var testBucket = new ArchiveBucket("bucketName", "keyArn").withTenant(tenant1);
MockArchiveService archiveService = (MockArchiveService) tester.controller().serviceRegistry().archiveService();
assertNull(archiveService.authorizedIamRoles.get(testBucket));
MockMetric metric = new MockMetric();
- new ArchiveAccessMaintainer(containerTester.controller(), metric, Duration.ofMinutes(10)).maintain();
+ new ArchiveAccessMaintainer(tester.controller(), metric, Duration.ofMinutes(10)).maintain();
assertEquals(Map.of(tenant1, tenant1role), archiveService.authorizedIamRoles.get(testBucket));
var expected = Map.of("archive.bucketCount",
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ArchiveUriUpdaterTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ArchiveUriUpdaterTest.java
index 505536558ab..d7934f08fee 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ArchiveUriUpdaterTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ArchiveUriUpdaterTest.java
@@ -5,8 +5,7 @@ import com.yahoo.component.Version;
import com.yahoo.config.provision.SystemName;
import com.yahoo.config.provision.TenantName;
import com.yahoo.config.provision.zone.ZoneId;
-import com.yahoo.vespa.flags.Flags;
-import com.yahoo.vespa.flags.InMemoryFlagSource;
+import com.yahoo.vespa.hosted.controller.ControllerTester;
import com.yahoo.vespa.hosted.controller.api.integration.archive.ArchiveBucket;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.NodeRepository;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
@@ -20,7 +19,6 @@ import java.net.URI;
import java.time.Duration;
import java.util.LinkedHashSet;
import java.util.Map;
-import java.util.Set;
import java.util.stream.Collectors;
import static org.junit.Assert.assertEquals;
@@ -30,20 +28,17 @@ import static org.junit.Assert.assertEquals;
*/
public class ArchiveUriUpdaterTest {
- private final DeploymentTester tester = new DeploymentTester();
+ private final DeploymentTester tester = new DeploymentTester(new ControllerTester(SystemName.Public));
@Test
public void archive_uri_test() {
var updater = new ArchiveUriUpdater(tester.controller(), Duration.ofDays(1));
- ((InMemoryFlagSource) tester.controller().flagSource())
- .withStringFlag(Flags.SYNC_HOST_LOGS_TO_S3_BUCKET.id(), "auto");
-
var tenant1 = TenantName.from("tenant1");
var tenant2 = TenantName.from("tenant2");
var tenantInfra = SystemApplication.TENANT;
var application = tester.newDeploymentContext(tenant1.value(), "app1", "instance1");
- ZoneId zone = ZoneId.from("prod", "ap-northeast-1");
+ ZoneId zone = ZoneId.from("prod", "aws-us-east-1c");
// Initially we should not set any archive URIs as the archive service does not return any
updater.maintain();
@@ -85,6 +80,6 @@ public class ArchiveUriUpdaterTest {
}
private void deploy(DeploymentContext application, ZoneId zone) {
- application.runJob(JobType.from(SystemName.main, zone).orElseThrow(), new ApplicationPackage(new byte[0]), Version.fromString("7.1"));
+ application.runJob(JobType.from(SystemName.Public, zone).orElseThrow(), new ApplicationPackage(new byte[0]), Version.fromString("7.1"));
}
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ChangeRequestMaintainerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ChangeRequestMaintainerTest.java
index 290e08ca47b..15a2cf3063d 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ChangeRequestMaintainerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ChangeRequestMaintainerTest.java
@@ -26,29 +26,6 @@ public class ChangeRequestMaintainerTest {
private final ChangeRequestMaintainer changeRequestMaintainer = new ChangeRequestMaintainer(tester.controller(), Duration.ofMinutes(1));
@Test
- public void only_approve_requests_pending_approval() {
- var changeRequest1 = newChangeRequest("id1", ChangeRequest.Approval.APPROVED);
- var changeRequest2 = newChangeRequest("id2", ChangeRequest.Approval.REQUESTED);
- var upcomingChangeRequests = List.of(
- changeRequest1,
- changeRequest2
- );
-
- changeRequestClient.setUpcomingChangeRequests(upcomingChangeRequests);
- changeRequestMaintainer.maintain();
-
- var approvedChangeRequests = changeRequestClient.getApprovedChangeRequests();
-
- assertEquals(1, approvedChangeRequests.size());
- assertEquals("id2", approvedChangeRequests.get(0).getId());
- var writtenChangeRequests = tester.curator().readChangeRequests();
- assertEquals(2, writtenChangeRequests.size());
-
- var expectedChangeRequest = new VespaChangeRequest(changeRequest1, ZoneId.from("prod.us-east-3"));
- assertEquals(expectedChangeRequest, writtenChangeRequests.get(0));
- }
-
- @Test
public void updates_status_time_and_approval() {
var time = ZonedDateTime.now();
var persistedChangeRequest = persistedChangeRequest("some-id", time.minusDays(5), Status.WAITING_FOR_APPROVAL);
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainerTest.java
index 84f4f3d9b7c..59fb5b596f1 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainerTest.java
@@ -11,12 +11,14 @@ import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.application.Deployment;
+import com.yahoo.vespa.hosted.controller.application.DeploymentMetrics;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester;
import org.junit.Test;
import java.time.Duration;
import java.time.Instant;
import java.util.HashMap;
+import java.util.List;
import java.util.Map;
import java.util.function.Supplier;
@@ -113,6 +115,21 @@ public class DeploymentMetricsMaintainerTest {
assertEquals(5, deployment.get().activity().lastWritesPerSecond().getAsDouble(), Double.MIN_VALUE);
}
+ @Test
+ public void cluster_metric_aggregation_test() {
+ List<ClusterMetrics> clusterMetrics = List.of(
+ new ClusterMetrics("niceCluster", "container", Map.of("queriesPerSecond", 23.0, "queryLatency", 1337.0)),
+ new ClusterMetrics("alsoNiceCluster", "container", Map.of("queriesPerSecond", 11.0, "queryLatency", 12.0)));
+
+ DeploymentMetrics deploymentMetrics = DeploymentMetricsMaintainer.updateDeploymentMetrics(DeploymentMetrics.none, clusterMetrics);
+
+ assertEquals(23.0 + 11.0, deploymentMetrics.queriesPerSecond(), 0.001);
+ assertEquals(908.323, deploymentMetrics.queryLatencyMillis(), 0.001);
+ assertEquals(0, deploymentMetrics.documentCount(), 0.001);
+ assertEquals(0.0, deploymentMetrics.writeLatencyMillis(), 0.001);
+ assertEquals(0.0, deploymentMetrics.writesPerSecond(), 0.001);
+ }
+
private void setMetrics(ApplicationId application, Map<String, Double> metrics) {
var clusterMetrics = new ClusterMetrics("default", "container", metrics);
tester.controllerTester().serviceRegistry().configServerMock().setMetrics(new DeploymentId(application, ZoneId.from("dev", "us-east-1")), clusterMetrics);
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/MetricsReporterTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/MetricsReporterTest.java
index d42342b57fb..29c5573a1f5 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/MetricsReporterTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/MetricsReporterTest.java
@@ -5,11 +5,14 @@ import com.yahoo.component.Version;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.CloudName;
import com.yahoo.config.provision.HostName;
+import com.yahoo.config.provision.SystemName;
+import com.yahoo.config.provision.TenantName;
import com.yahoo.config.provision.zone.UpgradePolicy;
import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.container.jdisc.HttpRequest;
import com.yahoo.vespa.hosted.controller.Controller;
import com.yahoo.vespa.hosted.controller.ControllerTester;
+import com.yahoo.vespa.hosted.controller.api.integration.billing.PlanId;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.Node;
import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.application.Change;
@@ -18,12 +21,14 @@ import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester;
import com.yahoo.vespa.hosted.controller.integration.MetricsMock;
import com.yahoo.vespa.hosted.controller.integration.ZoneApiMock;
+import com.yahoo.vespa.hosted.controller.tenant.Tenant;
import com.yahoo.vespa.hosted.controller.versions.VespaVersion;
import org.junit.Test;
import java.time.Duration;
import java.util.Comparator;
import java.util.List;
+import java.util.Map;
import java.util.Set;
import java.util.function.UnaryOperator;
import java.util.stream.Collectors;
@@ -472,6 +477,23 @@ public class MetricsReporterTest {
assertEquals(0, metrics.getMetric(MetricsReporter.BROKEN_SYSTEM_VERSION));
}
+ @Test
+ public void tenant_counter() {
+ var tester = new ControllerTester(SystemName.Public);
+ tester.zoneRegistry().setSystemName(SystemName.Public);
+ tester.createTenant("foo", Tenant.Type.cloud);
+ tester.createTenant("bar", Tenant.Type.cloud);
+ tester.createTenant("fix", Tenant.Type.cloud);
+ tester.controller().serviceRegistry().billingController().setPlan(TenantName.from("foo"), PlanId.from("pay-as-you-go"), false);
+ tester.controller().serviceRegistry().billingController().setPlan(TenantName.from("bar"), PlanId.from("pay-as-you-go"), false);
+
+ var reporter = createReporter(tester.controller());
+ reporter.maintain();
+
+ assertEquals(2, metrics.getMetric(d -> "pay-as-you-go".equals(d.get("plan")), MetricsReporter.TENANT_METRIC).get());
+ assertEquals(1, metrics.getMetric(d -> "trial".equals(d.get("plan")), MetricsReporter.TENANT_METRIC).get());
+ }
+
private void assertNodeCount(String metric, int n, Version version) {
long nodeCount = metrics.getMetric((dimensions) -> version.toFullString().equals(dimensions.get("currentVersion")), metric)
.stream()
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgradeSchedulerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgradeSchedulerTest.java
index 76781d964a1..7d512ba090c 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgradeSchedulerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgradeSchedulerTest.java
@@ -40,14 +40,14 @@ public class OsUpgradeSchedulerTest {
tester.controller().upgradeOsIn(cloud, version0, Duration.ofDays(1), false);
// Target remains unchanged as it hasn't expired yet
- for (var interval : List.of(Duration.ZERO, Duration.ofDays(15))) {
+ for (var interval : List.of(Duration.ZERO, Duration.ofDays(30))) {
tester.clock().advance(interval);
scheduler.maintain();
assertEquals(version0, tester.controller().osVersionTarget(cloud).get().osVersion().version());
}
- // Just over 30 days pass, and a new target replaces the expired one
- Version version1 = Version.fromString("7.0.0.20210215");
+ // Just over 45 days pass, and a new target replaces the expired one
+ Version version1 = Version.fromString("7.0.0.20210302");
tester.clock().advance(Duration.ofDays(15).plus(Duration.ofSeconds(1)));
scheduler.maintain();
assertEquals("New target set", version1, tester.controller().osVersionTarget(cloud).get().osVersion().version());
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/TenantRoleMaintainerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/TenantRoleMaintainerTest.java
new file mode 100644
index 00000000000..050610905f3
--- /dev/null
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/TenantRoleMaintainerTest.java
@@ -0,0 +1,61 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package com.yahoo.vespa.hosted.controller.maintenance;
+
+import com.yahoo.config.provision.TenantName;
+import com.yahoo.vespa.hosted.controller.Instance;
+import com.yahoo.vespa.hosted.controller.api.integration.aws.MockRoleService;
+import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
+import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
+import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder;
+import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester;
+import org.junit.Test;
+
+import java.time.Duration;
+import java.util.List;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * @author mortent
+ */
+public class TenantRoleMaintainerTest {
+
+ private final DeploymentTester tester = new DeploymentTester();
+
+ @Test
+ public void maintains_iam_roles_for_tenants_in_production() {
+ var devAppTenant1 = tester.newDeploymentContext("tenant1", "app1", "default");
+ var prodAppTenant2 = tester.newDeploymentContext("tenant2", "app2", "default");
+ var devAppTenant2 = tester.newDeploymentContext("tenant2","app3","default");
+ ApplicationPackage appPackage = new ApplicationPackageBuilder()
+ .region("us-west-1")
+ .build();
+
+ // Deploy dev apps
+ devAppTenant1.runJob(JobType.devUsEast1, appPackage);
+ devAppTenant2.runJob(JobType.devUsEast1, appPackage);
+
+ // Deploy prod
+ prodAppTenant2.submit(appPackage).deploy();
+ assertEquals(1, permanentDeployments(devAppTenant1.instance()));
+ assertEquals(1, permanentDeployments(devAppTenant2.instance()));
+ assertEquals(1, permanentDeployments(prodAppTenant2.instance()));
+
+ var maintainer = new TenantRoleMaintainer(tester.controller(), Duration.ofDays(1));
+ maintainer.maintain();
+
+ var roleService = tester.controller().serviceRegistry().roleService();
+ List<TenantName> tenantNames = ((MockRoleService) roleService).maintainedTenants();
+
+ assertEquals(1, tenantNames.size());
+ assertEquals(prodAppTenant2.application().id().tenant(), tenantNames.get(0));
+ }
+
+ private long permanentDeployments(Instance instance) {
+ return tester.controller().applications().requireInstance(instance.id()).deployments().values().stream()
+ .filter(deployment -> !deployment.zone().environment().isTest())
+ .count();
+ }
+
+}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/VCMRMaintainerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/VCMRMaintainerTest.java
index d5c35f806f4..16ed6b7ef98 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/VCMRMaintainerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/VCMRMaintainerTest.java
@@ -9,9 +9,11 @@ import com.yahoo.vespa.hosted.controller.api.integration.vcmr.ChangeRequest;
import com.yahoo.vespa.hosted.controller.api.integration.vcmr.ChangeRequestSource;
import com.yahoo.vespa.hosted.controller.api.integration.vcmr.HostAction;
import com.yahoo.vespa.hosted.controller.api.integration.vcmr.HostAction.State;
+import com.yahoo.vespa.hosted.controller.api.integration.vcmr.VCMRReport;
import com.yahoo.vespa.hosted.controller.api.integration.vcmr.VespaChangeRequest;
import com.yahoo.vespa.hosted.controller.api.integration.vcmr.VespaChangeRequest.Status;
import com.yahoo.vespa.hosted.controller.integration.NodeRepositoryMock;
+import org.junit.Before;
import org.junit.Test;
import java.time.Duration;
@@ -26,27 +28,44 @@ import static org.junit.Assert.*;
*/
public class VCMRMaintainerTest {
- private final ControllerTester tester = new ControllerTester();
- private final VCMRMaintainer maintainer = new VCMRMaintainer(tester.controller(), Duration.ofMinutes(1));
- private final NodeRepositoryMock nodeRepo = tester.serviceRegistry().configServer().nodeRepository();
+ private ControllerTester tester;
+ private VCMRMaintainer maintainer;
+ private NodeRepositoryMock nodeRepo;
private final ZoneId zoneId = ZoneId.from("prod.us-east-3");
private final HostName host1 = HostName.from("host1");
private final HostName host2 = HostName.from("host2");
private final String changeRequestId = "id123";
+ @Before
+ public void setup() {
+ tester = new ControllerTester();
+ maintainer = new VCMRMaintainer(tester.controller(), Duration.ofMinutes(1));
+ nodeRepo = tester.serviceRegistry().configServer().nodeRepository().allowPatching(true);
+ }
+
@Test
public void recycle_hosts_after_completion() {
+ var vcmrReport = new VCMRReport();
+ vcmrReport.addVcmr("id123", ZonedDateTime.now(), ZonedDateTime.now());
var parkedNode = createNode(host1, NodeType.host, Node.State.parked, true);
var failedNode = createNode(host2, NodeType.host, Node.State.failed, false);
+ parkedNode = new Node.Builder(parkedNode)
+ .reports(vcmrReport.toNodeReports())
+ .build();
+
nodeRepo.putNodes(zoneId, List.of(parkedNode, failedNode));
tester.curator().writeChangeRequest(canceledChangeRequest());
maintainer.maintain();
- // Only the parked node is recycled
+ // Only the parked node is recycled, VCMR report is cleared
var nodeList = nodeRepo.list(zoneId, List.of(host1, host2));
assertEquals(Node.State.dirty, nodeList.get(0).state());
assertEquals(Node.State.failed, nodeList.get(1).state());
+
+ var report = nodeList.get(0).reports();
+ assertNull(report.get(VCMRReport.getReportId()));
+
var writtenChangeRequest = tester.curator().readChangeRequest(changeRequestId).get();
assertEquals(Status.COMPLETED, writtenChangeRequest.getStatus());
}
@@ -74,7 +93,7 @@ public class VCMRMaintainerTest {
var activeNode = createNode(host1, NodeType.host, Node.State.active, false);
var failedNode = createNode(host2, NodeType.host, Node.State.failed, false);
nodeRepo.putNodes(zoneId, List.of(activeNode, failedNode));
- nodeRepo.allowPatching(true).hasSpareCapacity(true);
+ nodeRepo.hasSpareCapacity(true);
tester.curator().writeChangeRequest(startingChangeRequest());
maintainer.maintain();
@@ -88,7 +107,6 @@ public class VCMRMaintainerTest {
activeNode = nodeRepo.list(zoneId, List.of(activeNode.hostname())).get(0);
assertTrue(activeNode.wantToRetire());
-
}
@Test
@@ -107,6 +125,9 @@ public class VCMRMaintainerTest {
assertEquals(State.REQUIRES_OPERATOR_ACTION, parkedNodeAction.getState());
assertEquals(State.REQUIRES_OPERATOR_ACTION, failedNodeAction.getState());
assertEquals(Status.REQUIRES_OPERATOR_ACTION, writtenChangeRequest.getStatus());
+
+ var approvedChangeRequests = tester.serviceRegistry().changeRequestClient().getApprovedChangeRequests();
+ assertTrue(approvedChangeRequests.isEmpty());
}
@Test
@@ -121,7 +142,7 @@ public class VCMRMaintainerTest {
var writtenChangeRequest = tester.curator().readChangeRequest(changeRequestId).orElseThrow();
var parkedNodeAction = writtenChangeRequest.getHostActionPlan().get(0);
assertEquals(State.RETIRED, parkedNodeAction.getState());
- assertEquals(Status.IN_PROGRESS, writtenChangeRequest.getStatus());
+ assertEquals(Status.READY, writtenChangeRequest.getStatus());
}
@Test
@@ -137,6 +158,16 @@ public class VCMRMaintainerTest {
var tenantHostAction = writtenChangeRequest.getHostActionPlan().get(0);
assertEquals(State.PENDING_RETIREMENT, tenantHostAction.getState());
assertEquals(Status.PENDING_ACTION, writtenChangeRequest.getStatus());
+
+ var approvedChangeRequests = tester.serviceRegistry().changeRequestClient().getApprovedChangeRequests();
+ assertEquals(1, approvedChangeRequests.size());
+
+ activeNode = nodeRepo.list(zoneId, List.of(host2)).get(0);
+ var report = VCMRReport.fromReports(activeNode.reports());
+ var reportAdded = report.getVcmrs().stream()
+ .filter(vcmr -> vcmr.getId().equals(changeRequestId))
+ .count() == 1;
+ assertTrue(reportAdded);
}
@Test
@@ -144,7 +175,7 @@ public class VCMRMaintainerTest {
var parkedNode = createNode(host1, NodeType.host, Node.State.parked, false);
var retiringNode = createNode(host2, NodeType.host, Node.State.active, true);
nodeRepo.putNodes(zoneId, List.of(parkedNode, retiringNode));
- nodeRepo.allowPatching(true).hasSpareCapacity(true);
+ nodeRepo.hasSpareCapacity(true);
tester.curator().writeChangeRequest(postponedChangeRequest());
maintainer.maintain();
@@ -195,7 +226,7 @@ public class VCMRMaintainerTest {
source,
List.of("switch1"),
List.of("host1", "host2"),
- ChangeRequest.Approval.APPROVED,
+ ChangeRequest.Approval.REQUESTED,
ChangeRequest.Impact.VERY_HIGH,
VespaChangeRequest.Status.IN_PROGRESS,
actionPlan,
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/metric/ConfigServerMetricsTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/metric/ConfigServerMetricsTest.java
deleted file mode 100644
index 33b043bc93d..00000000000
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/metric/ConfigServerMetricsTest.java
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2020 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.controller.metric;
-
-import com.yahoo.config.provision.ApplicationId;
-import com.yahoo.config.provision.SystemName;
-import com.yahoo.config.provision.zone.ZoneId;
-import com.yahoo.vespa.hosted.controller.api.application.v4.model.ClusterMetrics;
-import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
-import com.yahoo.vespa.hosted.controller.integration.ConfigServerMock;
-import com.yahoo.vespa.hosted.controller.integration.ZoneRegistryMock;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.util.List;
-import java.util.Map;
-
-import static org.junit.Assert.assertEquals;
-
-/**
- * @author olaa
- */
-public class ConfigServerMetricsTest {
-
- private final ApplicationId applicationId = new ApplicationId.Builder()
- .tenant("foo")
- .applicationName("bar")
- .instanceName("default")
- .build();
-
- private final ZoneId zoneId = ZoneId.from("prod", "us-west-1");
-
- private ConfigServerMock configServer;
- private ConfigServerMetrics service;
-
- @Before
- public void before() {
- configServer = new ConfigServerMock(new ZoneRegistryMock(SystemName.main));
- service = new ConfigServerMetrics(configServer);
- }
-
- @Test
- public void test_returning_metrics() {
- //
- // Wire up the test
- //
- var deploymentId = new DeploymentId(applicationId, zoneId);
-
- var clusterMetrics1 = new ClusterMetrics("niceCluster", "container", Map.of("queriesPerSecond", 23.0, "queryLatency", 1337.0));
-
- var clusterMetrics2 = new ClusterMetrics("alsoNiceCluster", "container", Map.of("queriesPerSecond", 11.0, "queryLatency", 12.0));
-
- var response = List.of(clusterMetrics1, clusterMetrics2);
-
- configServer.setMetrics(deploymentId, response);
-
- //
- // Now we can actually test stuff :(
- //
- var deploymentMetrics = service.getDeploymentMetrics(applicationId, zoneId);
-
- assertEquals(23.0 + 11.0, deploymentMetrics.queriesPerSecond(), 0.001);
- assertEquals(908.323, deploymentMetrics.queryLatencyMillis(), 0.001);
- assertEquals(0, deploymentMetrics.documentCount());
- assertEquals(0.0, deploymentMetrics.writeLatencyMillis(), 0.001);
- assertEquals(0.0, deploymentMetrics.writesPerSecond(), 0.001);
- }
-
- @Test
- public void test_not_implemented_application_metrics() {
- var applicationMetrics = service.getApplicationMetrics(applicationId);
- assertEquals(0.0, applicationMetrics.queryServiceQuality(), 0.001);
- assertEquals(0.0, applicationMetrics.writeServiceQuality(), 0.001);
- }
-
-}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDbTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDbTest.java
index 90d1ecb2f20..5bd7d1db769 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDbTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDbTest.java
@@ -7,6 +7,7 @@ import com.yahoo.config.provision.TenantName;
import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.path.Path;
import com.yahoo.test.ManualClock;
+import com.yahoo.vespa.hosted.controller.api.application.v4.model.ClusterMetrics;
import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.RunId;
@@ -18,13 +19,18 @@ import org.junit.Test;
import java.time.Instant;
import java.util.ArrayList;
import java.util.Arrays;
+import java.util.HashMap;
import java.util.List;
+import java.util.Map;
import java.util.stream.Collectors;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
+import static com.yahoo.vespa.hosted.controller.notification.Notification.Level;
+import static com.yahoo.vespa.hosted.controller.notification.Notification.Type;
+
/**
* @author freva
*/
@@ -32,12 +38,12 @@ public class NotificationsDbTest {
private static final TenantName tenant = TenantName.from("tenant1");
private static final List<Notification> notifications = List.of(
- notification(1001, Notification.Type.DEPLOYMENT_FAILURE, NotificationSource.from(tenant), "tenant msg"),
- notification(1101, Notification.Type.DEPLOYMENT_FAILURE, NotificationSource.from(TenantAndApplicationId.from(tenant.value(), "app1")), "app msg"),
- notification(1201, Notification.Type.DEPLOYMENT_FAILURE, NotificationSource.from(ApplicationId.from(tenant.value(), "app2", "instance2")), "instance msg"),
- notification(1301, Notification.Type.DEPLOYMENT_FAILURE, NotificationSource.from(new DeploymentId(ApplicationId.from(tenant.value(), "app2", "instance2"), ZoneId.from("prod", "us-north-2"))), "deployment msg"),
- notification(1401, Notification.Type.DEPLOYMENT_FAILURE, NotificationSource.from(new DeploymentId(ApplicationId.from(tenant.value(), "app1", "instance1"), ZoneId.from("dev", "us-south-1")), ClusterSpec.Id.from("cluster1")), "cluster msg"),
- notification(1501, Notification.Type.DEPLOYMENT_FAILURE, NotificationSource.from(new RunId(ApplicationId.from(tenant.value(), "app1", "instance1"), JobType.devUsEast1, 4)), "run id msg"));
+ notification(1001, Type.deployment, Level.error, NotificationSource.from(tenant), "tenant msg"),
+ notification(1101, Type.applicationPackage, Level.warning, NotificationSource.from(TenantAndApplicationId.from(tenant.value(), "app1")), "app msg"),
+ notification(1201, Type.deployment, Level.error, NotificationSource.from(ApplicationId.from(tenant.value(), "app2", "instance2")), "instance msg"),
+ notification(1301, Type.deployment, Level.warning, NotificationSource.from(new DeploymentId(ApplicationId.from(tenant.value(), "app2", "instance2"), ZoneId.from("prod", "us-north-2"))), "deployment msg"),
+ notification(1401, Type.feedBlock, Level.error, NotificationSource.from(new DeploymentId(ApplicationId.from(tenant.value(), "app1", "instance1"), ZoneId.from("dev", "us-south-1")), ClusterSpec.Id.from("cluster1")), "cluster msg"),
+ notification(1501, Type.deployment, Level.warning, NotificationSource.from(new RunId(ApplicationId.from(tenant.value(), "app1", "instance1"), JobType.devUsEast1, 4)), "run id msg"));
private final ManualClock clock = new ManualClock(Instant.ofEpochSecond(12345));
private final MockCuratorDb curatorDb = new MockCuratorDb();
@@ -46,7 +52,7 @@ public class NotificationsDbTest {
@Test
public void list_test() {
assertEquals(notifications, notificationsDb.listNotifications(NotificationSource.from(tenant), false));
- assertEquals(notificationIndices(0, 1, 3), notificationsDb.listNotifications(NotificationSource.from(tenant), true));
+ assertEquals(notificationIndices(0, 1, 2, 3), notificationsDb.listNotifications(NotificationSource.from(tenant), true));
assertEquals(notificationIndices(2, 3), notificationsDb.listNotifications(NotificationSource.from(TenantAndApplicationId.from(tenant.value(), "app2")), false));
assertEquals(notificationIndices(4, 5), notificationsDb.listNotifications(NotificationSource.from(ApplicationId.from(tenant.value(), "app1", "instance1")), false));
assertEquals(notificationIndices(5), notificationsDb.listNotifications(NotificationSource.from(new RunId(ApplicationId.from(tenant.value(), "app1", "instance1"), JobType.devUsEast1, 5)), false));
@@ -55,14 +61,14 @@ public class NotificationsDbTest {
@Test
public void add_test() {
- Notification notification1 = notification(12345, Notification.Type.DEPLOYMENT_FAILURE, NotificationSource.from(ApplicationId.from(tenant.value(), "app2", "instance2")), "instance msg #2");
- Notification notification2 = notification(12345, Notification.Type.DEPLOYMENT_FAILURE, NotificationSource.from(ApplicationId.from(tenant.value(), "app3", "instance2")), "instance msg #3");
+ Notification notification1 = notification(12345, Type.deployment, Level.warning, NotificationSource.from(ApplicationId.from(tenant.value(), "app2", "instance2")), "instance msg #2");
+ Notification notification2 = notification(12345, Type.deployment, Level.error, NotificationSource.from(ApplicationId.from(tenant.value(), "app3", "instance2")), "instance msg #3");
// Replace the 3rd notification
- notificationsDb.setNotification(notification1.source(), notification1.type(), notification1.messages());
+ notificationsDb.setNotification(notification1.source(), notification1.type(), notification1.level(), notification1.messages());
// Notification for a new app, add without replacement
- notificationsDb.setNotification(notification2.source(), notification2.type(), notification2.messages());
+ notificationsDb.setNotification(notification2.source(), notification2.type(), notification2.level(), notification2.messages());
List<Notification> expected = notificationIndices(0, 1, 3, 4, 5);
expected.addAll(List.of(notification1, notification2));
@@ -72,10 +78,10 @@ public class NotificationsDbTest {
@Test
public void remove_single_test() {
// Remove the 3rd notification
- notificationsDb.removeNotification(NotificationSource.from(ApplicationId.from(tenant.value(), "app2", "instance2")), Notification.Type.DEPLOYMENT_FAILURE);
+ notificationsDb.removeNotification(NotificationSource.from(ApplicationId.from(tenant.value(), "app2", "instance2")), Type.deployment);
// Removing something that doesn't exist is OK
- notificationsDb.removeNotification(NotificationSource.from(ApplicationId.from(tenant.value(), "app3", "instance2")), Notification.Type.DEPLOYMENT_FAILURE);
+ notificationsDb.removeNotification(NotificationSource.from(ApplicationId.from(tenant.value(), "app3", "instance2")), Type.deployment);
assertEquals(notificationIndices(0, 1, 3, 4, 5), curatorDb.readNotifications(tenant));
}
@@ -92,6 +98,64 @@ public class NotificationsDbTest {
assertFalse(curatorDb.curator().exists(Path.fromString("/controller/v1/notifications/" + tenant.value())));
}
+ @Test
+ public void feed_blocked_single_cluster_test() {
+ DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenant.value(), "app1", "instance1"), ZoneId.from("prod", "us-south-3"));
+ NotificationSource sourceCluster1 = NotificationSource.from(deploymentId, ClusterSpec.Id.from("cluster1"));
+ List<Notification> expected = new ArrayList<>(notifications);
+
+ // No metrics, no new notification
+ notificationsDb.setDeploymentFeedingBlockedNotifications(deploymentId, List.of());
+ assertEquals(expected, curatorDb.readNotifications(tenant));
+
+ // Metrics that contain none of the feed block metrics does not create new notification
+ notificationsDb.setDeploymentFeedingBlockedNotifications(deploymentId, List.of(clusterMetrics("cluster1", null, null, null, null)));
+ assertEquals(expected, curatorDb.readNotifications(tenant));
+
+ // Metrics that only contain util or limit (should not be possible) should not cause any issues
+ notificationsDb.setDeploymentFeedingBlockedNotifications(deploymentId, List.of(clusterMetrics("cluster1", 0.95, null, null, 0.5)));
+ assertEquals(expected, curatorDb.readNotifications(tenant));
+
+ // One resource is at warning
+ notificationsDb.setDeploymentFeedingBlockedNotifications(deploymentId, List.of(clusterMetrics("cluster1", 0.85, 0.9, 0.3, 0.5)));
+ expected.add(notification(12345, Type.feedBlock, Level.warning, sourceCluster1, "disk (usage: 85.0%, feed block limit: 90.0%)"));
+ assertEquals(expected, curatorDb.readNotifications(tenant));
+
+ // Both resources over the limit
+ notificationsDb.setDeploymentFeedingBlockedNotifications(deploymentId, List.of(clusterMetrics("cluster1", 0.95, 0.9, 0.3, 0.5)));
+ expected.set(6, notification(12345, Type.feedBlock, Level.error, sourceCluster1, "disk (usage: 95.0%, feed block limit: 90.0%)"));
+ assertEquals(expected, curatorDb.readNotifications(tenant));
+
+ // One resource at warning, one at error: Only show error message
+ notificationsDb.setDeploymentFeedingBlockedNotifications(deploymentId, List.of(clusterMetrics("cluster1", 0.95, 0.9, 0.7, 0.5)));
+ expected.set(6, notification(12345, Type.feedBlock, Level.error, sourceCluster1,
+ "memory (usage: 70.0%, feed block limit: 50.0%)", "disk (usage: 95.0%, feed block limit: 90.0%)"));
+ assertEquals(expected, curatorDb.readNotifications(tenant));
+ }
+
+ @Test
+ public void feed_blocked_multiple_cluster_test() {
+ DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenant.value(), "app1", "instance1"), ZoneId.from("prod", "us-south-3"));
+ NotificationSource sourceCluster1 = NotificationSource.from(deploymentId, ClusterSpec.Id.from("cluster1"));
+ NotificationSource sourceCluster2 = NotificationSource.from(deploymentId, ClusterSpec.Id.from("cluster2"));
+ NotificationSource sourceCluster3 = NotificationSource.from(deploymentId, ClusterSpec.Id.from("cluster3"));
+ List<Notification> expected = new ArrayList<>(notifications);
+
+ // Cluster1 and cluster2 are having issues
+ notificationsDb.setDeploymentFeedingBlockedNotifications(deploymentId, List.of(
+ clusterMetrics("cluster1", 0.85, 0.9, 0.3, 0.5), clusterMetrics("cluster2", 0.6, 0.8, 0.9, 0.75), clusterMetrics("cluster3", 0.1, 0.8, 0.2, 0.9)));
+ expected.add(notification(12345, Type.feedBlock, Level.warning, sourceCluster1, "disk (usage: 85.0%, feed block limit: 90.0%)"));
+ expected.add(notification(12345, Type.feedBlock, Level.error, sourceCluster2, "memory (usage: 90.0%, feed block limit: 75.0%)"));
+ assertEquals(expected, curatorDb.readNotifications(tenant));
+
+ // Cluster1 improves, while cluster3 starts having issues
+ notificationsDb.setDeploymentFeedingBlockedNotifications(deploymentId, List.of(
+ clusterMetrics("cluster1", 0.15, 0.9, 0.3, 0.5), clusterMetrics("cluster2", 0.6, 0.8, 0.9, 0.75), clusterMetrics("cluster3", 0.75, 0.8, 0.2, 0.9)));
+ expected.set(6, notification(12345, Type.feedBlock, Level.error, sourceCluster2, "memory (usage: 90.0%, feed block limit: 75.0%)"));
+ expected.set(7, notification(12345, Type.feedBlock, Level.warning, sourceCluster3, "disk (usage: 75.0%, feed block limit: 80.0%)"));
+ assertEquals(expected, curatorDb.readNotifications(tenant));
+ }
+
@Before
public void init() {
curatorDb.writeNotifications(tenant, notifications);
@@ -101,7 +165,16 @@ public class NotificationsDbTest {
return Arrays.stream(indices).mapToObj(notifications::get).collect(Collectors.toCollection(ArrayList::new));
}
- private static Notification notification(long secondsSinceEpoch, Notification.Type type, NotificationSource source, String... messages) {
- return new Notification(Instant.ofEpochSecond(secondsSinceEpoch), type, source, List.of(messages));
+ private static Notification notification(long secondsSinceEpoch, Type type, Level level, NotificationSource source, String... messages) {
+ return new Notification(Instant.ofEpochSecond(secondsSinceEpoch), type, level, source, List.of(messages));
+ }
+
+ private static ClusterMetrics clusterMetrics(String clusterId, Double diskUtil, Double diskLimit, Double memoryUtil, Double memoryLimit) {
+ Map<String, Double> metrics = new HashMap<>();
+ if (diskUtil != null) metrics.put(ClusterMetrics.DISK_UTIL, diskUtil);
+ if (diskLimit != null) metrics.put(ClusterMetrics.DISK_FEED_BLOCK_LIMIT, diskLimit);
+ if (memoryUtil != null) metrics.put(ClusterMetrics.MEMORY_UTIL, memoryUtil);
+ if (memoryLimit != null) metrics.put(ClusterMetrics.MEMORY_FEED_BLOCK_LIMIT, memoryLimit);
+ return new ClusterMetrics(clusterId, "content", metrics);
}
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/NotificationsSerializerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/NotificationsSerializerTest.java
index f3f2d10cfd0..f13f92dee85 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/NotificationsSerializerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/NotificationsSerializerTest.java
@@ -28,11 +28,13 @@ public class NotificationsSerializerTest {
TenantName tenantName = TenantName.from("tenant1");
List<Notification> notifications = List.of(
new Notification(Instant.ofEpochSecond(1234),
- Notification.Type.APPLICATION_PACKAGE_WARNING,
+ Notification.Type.applicationPackage,
+ Notification.Level.warning,
NotificationSource.from(TenantAndApplicationId.from(tenantName.value(), "app1")),
List.of("Something something deprecated...")),
new Notification(Instant.ofEpochSecond(2345),
- Notification.Type.DEPLOYMENT_FAILURE,
+ Notification.Type.deployment,
+ Notification.Level.error,
NotificationSource.from(new RunId(ApplicationId.from(tenantName.value(), "app1", "instance1"), JobType.systemTest, 12)),
List.of("Failed to deploy: Out of capacity")));
@@ -40,12 +42,14 @@ public class NotificationsSerializerTest {
assertEquals("{\"notifications\":[" +
"{" +
"\"at\":1234000," +
- "\"type\":\"APPLICATION_PACKAGE_WARNING\"," +
+ "\"type\":\"applicationPackage\"," +
+ "\"level\":\"warning\"," +
"\"messages\":[\"Something something deprecated...\"]," +
"\"application\":\"app1\"" +
"},{" +
"\"at\":2345000," +
- "\"type\":\"DEPLOYMENT_FAILURE\"," +
+ "\"type\":\"deployment\"," +
+ "\"level\":\"error\"," +
"\"messages\":[\"Failed to deploy: Out of capacity\"]," +
"\"application\":\"app1\"," +
"\"instance\":\"instance1\"," +
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiCloudTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiCloudTest.java
index 7074d0d7354..5f76a30bf45 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiCloudTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiCloudTest.java
@@ -3,9 +3,11 @@ package com.yahoo.vespa.hosted.controller.restapi.application;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ApplicationName;
+import com.yahoo.config.provision.InstanceName;
import com.yahoo.config.provision.TenantName;
import com.yahoo.vespa.flags.InMemoryFlagSource;
import com.yahoo.vespa.flags.PermanentFlags;
+import com.yahoo.vespa.hosted.controller.ControllerTester;
import com.yahoo.vespa.hosted.controller.LockedTenant;
import com.yahoo.vespa.hosted.controller.api.integration.billing.PlanId;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
@@ -13,6 +15,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.secrets.TenantSecretSto
import com.yahoo.vespa.hosted.controller.api.role.Role;
import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder;
+import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester;
import com.yahoo.vespa.hosted.controller.restapi.ContainerTester;
import com.yahoo.vespa.hosted.controller.restapi.ControllerContainerCloudTest;
import com.yahoo.vespa.hosted.controller.security.Auth0Credentials;
@@ -23,6 +26,7 @@ import org.junit.Before;
import org.junit.Test;
import javax.ws.rs.ForbiddenException;
+import java.io.File;
import java.util.Collections;
import java.util.Optional;
import java.util.Set;
@@ -198,6 +202,11 @@ public class ApplicationApiCloudTest extends ControllerContainerCloudTest {
@Test
public void archive_uri_test() {
+ new DeploymentTester(new ControllerTester(tester))
+ .newDeploymentContext(ApplicationId.from(tenantName, applicationName, InstanceName.defaultName()))
+ .submit()
+ .deploy();
+
tester.assertResponse(request("/application/v4/tenant/scoober", GET).roles(Role.reader(tenantName)),
(response) -> assertFalse(response.getBodyAsString().contains("archiveAccessRole")),
200);
@@ -212,6 +221,10 @@ public class ApplicationApiCloudTest extends ControllerContainerCloudTest {
(response) -> assertTrue(response.getBodyAsString().contains("\"archiveAccessRole\":\"arn:aws:iam::123456789012:role/my-role\"")),
200);
+ tester.assertResponse(request("/application/v4/tenant/scoober/application/albums/environment/prod/region/aws-us-east-1c/instance/default", GET)
+ .roles(Role.reader(tenantName)),
+ new File("deployment-cloud.json"));
+
tester.assertResponse(request("/application/v4/tenant/scoober/archive-access", DELETE).roles(Role.administrator(tenantName)),
"{\"message\":\"Archive access role removed for tenant scoober.\"}", 200);
tester.assertResponse(request("/application/v4/tenant/scoober", GET).roles(Role.reader(tenantName)),
@@ -222,7 +235,7 @@ public class ApplicationApiCloudTest extends ControllerContainerCloudTest {
private ApplicationPackageBuilder prodBuilder() {
return new ApplicationPackageBuilder()
.instances("default")
- .region("aws-us-east-1a");
+ .region("aws-us-east-1c");
}
private void setupTenantAndApplication() {
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java
index 0137ea7eeba..10e398ad133 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java
@@ -156,7 +156,6 @@ public class ApplicationApiTest extends ControllerContainerTest {
@Test
public void testApplicationApi() {
createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID); // (Necessary but not provided in this API)
- ((InMemoryFlagSource) tester.controller().flagSource()).withStringFlag(Flags.SYNC_HOST_LOGS_TO_S3_BUCKET.id(), "my-bucket");
// GET API root
tester.assertResponse(request("/application/v4/", GET).userIdentity(USER_ID),
@@ -1473,6 +1472,12 @@ public class ApplicationApiTest extends ControllerContainerTest {
.userIdentity(USER_ID),
new File("deployment-with-routing-policy.json"));
+ // GET deployment including legacy endpoints
+ tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/instance1", GET)
+ .userIdentity(USER_ID)
+ .properties(Map.of("includeLegacyEndpoints", "true")),
+ new File("deployment-with-routing-policy-legacy.json"));
+
// Hide shared endpoints
((InMemoryFlagSource) tester.controller().flagSource()).withBooleanFlag(Flags.HIDE_SHARED_ROUTING_ENDPOINT.id(), true);
@@ -1641,11 +1646,13 @@ public class ApplicationApiTest extends ControllerContainerTest {
private void addNotifications(TenantName tenantName) {
tester.controller().notificationsDb().setNotification(
NotificationSource.from(TenantAndApplicationId.from(tenantName.value(), "app1")),
- Notification.Type.APPLICATION_PACKAGE_WARNING,
+ Notification.Type.applicationPackage,
+ Notification.Level.warning,
"Something something deprecated...");
tester.controller().notificationsDb().setNotification(
NotificationSource.from(new RunId(ApplicationId.from(tenantName.value(), "app2", "instance1"), JobType.systemTest, 12)),
- Notification.Type.DEPLOYMENT_FAILURE,
+ Notification.Type.deployment,
+ Notification.Level.error,
"Failed to deploy: Out of capacity");
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment-cloud.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment-cloud.json
new file mode 100644
index 00000000000..3353d80204e
--- /dev/null
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment-cloud.json
@@ -0,0 +1,48 @@
+{
+ "tenant": "scoober",
+ "application": "albums",
+ "instance": "default",
+ "environment": "prod",
+ "region": "aws-us-east-1c",
+ "endpoints": [
+ {
+ "cluster": "default",
+ "tls": true,
+ "url": "https://albums.scoober.aws-us-east-1c.public.vespa.oath.cloud/",
+ "scope": "zone",
+ "routingMethod": "exclusive",
+ "legacy": false
+ }
+ ],
+ "clusters": "http://localhost:8080/application/v4/tenant/scoober/application/albums/instance/default/environment/prod/region/aws-us-east-1c/clusters",
+ "nodes": "http://localhost:8080/zone/v2/prod/aws-us-east-1c/nodes/v2/node/?recursive=true&application=scoober.albums.default",
+ "yamasUrl": "http://monitoring-system.test/?environment=prod&region=aws-us-east-1c&application=scoober.albums",
+ "version": "(ignore)",
+ "revision": "1.0.1-commit1",
+ "deployTimeEpochMs": "(ignore)",
+ "screwdriverId": "1000",
+ "gitRepository": "repository1",
+ "gitBranch": "master",
+ "gitCommit": "commit1",
+ "applicationVersion": {
+ "hash": "1.0.1-commit1",
+ "build": 1,
+ "source": {
+ "gitRepository": "repository1",
+ "gitBranch": "master",
+ "gitCommit": "commit1"
+ },
+ "sourceUrl": "repository1/tree/commit1",
+ "commit": "commit1"
+ },
+ "status": "complete",
+ "archiveUri": "s3://bucketName/scoober/",
+ "activity": {},
+ "metrics": {
+ "queriesPerSecond": 0.0,
+ "writesPerSecond": 0.0,
+ "documentCount": 0.0,
+ "queryLatencyMillis": 0.0,
+ "writeLatencyMillis": 0.0
+ }
+}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment-with-routing-policy-legacy.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment-with-routing-policy-legacy.json
new file mode 100644
index 00000000000..6efcc822264
--- /dev/null
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment-with-routing-policy-legacy.json
@@ -0,0 +1,71 @@
+{
+ "tenant": "tenant1",
+ "application": "application1",
+ "instance": "instance1",
+ "environment": "prod",
+ "region": "us-west-1",
+ "endpoints": [
+ {
+ "cluster": "default",
+ "tls": true,
+ "url": "https://instance1.application1.tenant1.us-west-1.vespa.oath.cloud/",
+ "scope": "zone",
+ "routingMethod": "exclusive",
+ "legacy": false
+ },
+ {
+ "cluster": "default",
+ "tls": true,
+ "url": "https://instance1--application1--tenant1.us-west-1.vespa.oath.cloud:4443/",
+ "scope": "zone",
+ "routingMethod": "shared",
+ "legacy": false
+ },
+ {
+ "cluster": "default",
+ "tls": false,
+ "url": "http://instance1.application1.tenant1.us-west-1.prod.vespa.yahooapis.com:4080/",
+ "scope": "zone",
+ "routingMethod": "shared",
+ "legacy": true
+ },
+ {
+ "cluster": "default",
+ "tls": true,
+ "url": "https://instance1--application1--tenant1.us-west-1.prod.vespa.yahooapis.com:4443/",
+ "scope": "zone",
+ "routingMethod": "shared",
+ "legacy": true
+ }
+ ],
+ "clusters": "http://localhost:8080/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/clusters",
+ "nodes": "http://localhost:8080/zone/v2/prod/us-west-1/nodes/v2/node/?recursive=true&application=tenant1.application1.instance1",
+ "yamasUrl": "http://monitoring-system.test/?environment=prod&region=us-west-1&application=tenant1.application1.instance1",
+ "version": "6.1.0",
+ "revision": "1.0.1-commit1",
+ "deployTimeEpochMs": "(ignore)",
+ "screwdriverId": "1000",
+ "gitRepository": "repository1",
+ "gitBranch": "master",
+ "gitCommit": "commit1",
+ "applicationVersion": {
+ "hash": "1.0.1-commit1",
+ "build": 1,
+ "source": {
+ "gitRepository": "repository1",
+ "gitBranch": "master",
+ "gitCommit": "commit1"
+ },
+ "sourceUrl": "repository1/tree/commit1",
+ "commit": "commit1"
+ },
+ "status": "complete",
+ "activity": {},
+ "metrics": {
+ "queriesPerSecond": 0.0,
+ "writesPerSecond": 0.0,
+ "documentCount": 0.0,
+ "queryLatencyMillis": 0.0,
+ "writeLatencyMillis": 0.0
+ }
+}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment-with-routing-policy.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment-with-routing-policy.json
index 6b3c316a485..8767c369bc3 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment-with-routing-policy.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment-with-routing-policy.json
@@ -10,14 +10,16 @@
"tls": true,
"url": "https://instance1.application1.tenant1.us-west-1.vespa.oath.cloud/",
"scope": "zone",
- "routingMethod": "exclusive"
+ "routingMethod": "exclusive",
+ "legacy": false
},
{
"cluster": "default",
"tls": true,
"url": "https://instance1--application1--tenant1.us-west-1.vespa.oath.cloud:4443/",
"scope": "zone",
- "routingMethod": "shared"
+ "routingMethod": "shared",
+ "legacy": false
}
],
"clusters":"http://localhost:8080/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/clusters",
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment-without-shared-endpoints.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment-without-shared-endpoints.json
index 66fe28a95ad..b59c1d6cf73 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment-without-shared-endpoints.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment-without-shared-endpoints.json
@@ -10,7 +10,8 @@
"tls": true,
"url": "https://instance1.application1.tenant1.us-west-1.vespa.oath.cloud/",
"scope": "zone",
- "routingMethod": "exclusive"
+ "routingMethod": "exclusive",
+ "legacy": false
}
],
"clusters":"http://localhost:8080/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/clusters",
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment.json
index 443e49a3896..6c00d654008 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment.json
@@ -10,14 +10,16 @@
"tls": true,
"url": "https://instance1--application1--tenant1.us-central-1.vespa.oath.cloud:4443/",
"scope": "zone",
- "routingMethod": "shared"
+ "routingMethod": "shared",
+ "legacy": false
},
{
"cluster": "foo",
"tls": true,
"url": "https://instance1--application1--tenant1.global.vespa.oath.cloud:4443/",
"scope": "global",
- "routingMethod": "shared"
+ "routingMethod": "shared",
+ "legacy": false
}
],
"clusters": "http://localhost:8080/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/clusters",
@@ -51,7 +53,6 @@
"commit": "commit1"
},
"status": "complete",
- "archiveUri":"s3://my-bucket/tenant1/",
"activity": {
"lastQueried": 1527848130000,
"lastWritten": 1527848130000,
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/dev-us-east-1.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/dev-us-east-1.json
index 7181c4ee2be..1084afc9388 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/dev-us-east-1.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/dev-us-east-1.json
@@ -10,7 +10,8 @@
"tls": true,
"url": "https://instance1--application1--tenant1.us-east-1.dev.vespa.oath.cloud:4443/",
"scope": "zone",
- "routingMethod": "shared"
+ "routingMethod": "shared",
+ "legacy": false
}
],
"clusters":"http://localhost:8080/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/dev/region/us-east-1/clusters",
@@ -20,7 +21,6 @@
"revision": "(ignore)",
"deployTimeEpochMs": "(ignore)",
"screwdriverId": "123",
- "archiveUri":"s3://my-bucket/tenant1/",
"activity": {
"lastQueried": 1527848130000,
"lastWritten": 1527848130000,
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/notifications-tenant1-app2.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/notifications-tenant1-app2.json
index ab8262e26bd..0e0ca7405ca 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/notifications-tenant1-app2.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/notifications-tenant1-app2.json
@@ -3,7 +3,7 @@
{
"at": "(ignore)",
"level": "error",
- "type": "DEPLOYMENT_FAILURE",
+ "type": "deployment",
"messages": [
"Failed to deploy: Out of capacity"
],
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/notifications-tenant1.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/notifications-tenant1.json
index 2b2c03bb75a..7d3dd5e672f 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/notifications-tenant1.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/notifications-tenant1.json
@@ -3,7 +3,7 @@
{
"at": "(ignore)",
"level": "warning",
- "type": "APPLICATION_PACKAGE_WARNING",
+ "type": "applicationPackage",
"messages": [
"Something something deprecated..."
],
@@ -12,7 +12,7 @@
{
"at": "(ignore)",
"level": "error",
- "type": "DEPLOYMENT_FAILURE",
+ "type": "deployment",
"messages": [
"Failed to deploy: Out of capacity"
],
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/prod-us-central-1.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/prod-us-central-1.json
index 12bd5a6efbd..9059ea338b1 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/prod-us-central-1.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/prod-us-central-1.json
@@ -13,14 +13,16 @@
"tls": true,
"url": "https://instance1--application1--tenant1.us-central-1.vespa.oath.cloud:4443/",
"scope": "zone",
- "routingMethod": "shared"
+ "routingMethod": "shared",
+ "legacy": false
},
{
"cluster": "foo",
"tls": true,
"url": "https://instance1--application1--tenant1.global.vespa.oath.cloud:4443/",
"scope": "global",
- "routingMethod": "shared"
+ "routingMethod": "shared",
+ "legacy": false
}
],
"clusters":"http://localhost:8080/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-central-1/clusters",
@@ -54,7 +56,6 @@
"commit": "commit1"
},
"status": "complete",
- "archiveUri":"s3://my-bucket/tenant1/",
"activity": {
"lastQueried": 1527848130000,
"lastWritten": 1527848130000,
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/billing/BillingApiHandlerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/billing/BillingApiHandlerTest.java
index b88715efcc4..88b2b939c48 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/billing/BillingApiHandlerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/billing/BillingApiHandlerTest.java
@@ -20,6 +20,7 @@ import java.io.File;
import java.math.BigDecimal;
import java.time.LocalDate;
import java.time.ZoneId;
+import java.time.ZoneOffset;
import java.time.ZonedDateTime;
import java.util.List;
import java.util.Map;
@@ -221,8 +222,8 @@ public class BillingApiHandlerTest extends ControllerContainerCloudTest {
assertEquals(CollectionMethod.INVOICE, billingController.getCollectionMethod(tenant));
}
- private Invoice createInvoice() {
- var start = LocalDate.of(2020, 5, 23).atStartOfDay(ZoneId.systemDefault());
+ static Invoice createInvoice() {
+ var start = LocalDate.of(2020, 5, 23).atStartOfDay(ZoneOffset.UTC);
var end = start.plusDays(5);
var statusHistory = new Invoice.StatusHistory(new TreeMap<>(Map.of(start, "OPEN")));
return new Invoice(
@@ -235,7 +236,7 @@ public class BillingApiHandlerTest extends ControllerContainerCloudTest {
);
}
- private Invoice.LineItem createLineItem(ZonedDateTime addedAt) {
+ static Invoice.LineItem createLineItem(ZonedDateTime addedAt) {
return new Invoice.LineItem(
"some-id",
"description",
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/billing/BillingApiHandlerV2Test.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/billing/BillingApiHandlerV2Test.java
new file mode 100644
index 00000000000..e733f8e27d6
--- /dev/null
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/billing/BillingApiHandlerV2Test.java
@@ -0,0 +1,138 @@
+package com.yahoo.vespa.hosted.controller.restapi.billing;
+
+import com.yahoo.application.container.handler.Request;
+import com.yahoo.config.provision.TenantName;
+import com.yahoo.test.ManualClock;
+import com.yahoo.vespa.hosted.controller.api.integration.billing.MockBillingController;
+import com.yahoo.vespa.hosted.controller.api.role.Role;
+import com.yahoo.vespa.hosted.controller.restapi.ContainerTester;
+import com.yahoo.vespa.hosted.controller.restapi.ControllerContainerCloudTest;
+import com.yahoo.vespa.hosted.controller.security.Auth0Credentials;
+import com.yahoo.vespa.hosted.controller.security.CloudTenantSpec;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.time.Instant;
+import java.util.Set;
+
+/**
+ * @author ogronnesby
+ */
+public class BillingApiHandlerV2Test extends ControllerContainerCloudTest {
+
+ private static final String responseFiles = "src/test/java/com/yahoo/vespa/hosted/controller/restapi/billing/responses/";
+
+ private static final TenantName tenant = TenantName.from("tenant1");
+ private static final TenantName tenant2 = TenantName.from("tenant2");
+ private static final Set<Role> tenantReader = Set.of(Role.reader(tenant));
+ private static final Set<Role> tenantAdmin = Set.of(Role.administrator(tenant));
+ private static final Set<Role> financeAdmin = Set.of(Role.hostedAccountant());
+
+ private static final String ACCESS_DENIED = "{\n" +
+ " \"code\" : 403,\n" +
+ " \"message\" : \"Access denied\"\n" +
+ "}";
+
+ private MockBillingController billingController;
+ private ContainerTester tester;
+
+ @Before
+ public void before() {
+ tester = new ContainerTester(container, responseFiles);
+ tester.controller().tenants().create(new CloudTenantSpec(tenant, ""), new Auth0Credentials(() -> "foo", Set.of(Role.hostedOperator())));
+ var clock = (ManualClock) tester.controller().serviceRegistry().clock();
+ clock.setInstant(Instant.parse("2021-04-13T00:00:00Z"));
+ billingController = (MockBillingController) tester.serviceRegistry().billingController();
+ billingController.addInvoice(tenant, BillingApiHandlerTest.createInvoice(), true);
+ }
+
+ @Override
+ protected String variablePartXml() {
+ return " <component id='com.yahoo.vespa.hosted.controller.security.CloudAccessControlRequests'/>\n" +
+ " <component id='com.yahoo.vespa.hosted.controller.security.CloudAccessControl'/>\n" +
+
+ " <handler id='com.yahoo.vespa.hosted.controller.restapi.billing.BillingApiHandlerV2'>\n" +
+ " <binding>http://*/billing/v2/*</binding>\n" +
+ " </handler>\n" +
+
+ " <http>\n" +
+ " <server id='default' port='8080' />\n" +
+ " <filtering>\n" +
+ " <request-chain id='default'>\n" +
+ " <filter id='com.yahoo.vespa.hosted.controller.restapi.filter.ControllerAuthorizationFilter'/>\n" +
+ " <binding>http://*/*</binding>\n" +
+ " </request-chain>\n" +
+ " </filtering>\n" +
+ " </http>\n";
+ }
+
+ @Test
+ public void require_tenant_info() {
+ var request = request("/billing/v2/tenant/" + tenant.value()).roles(tenantReader);
+ tester.assertResponse(request, "{\"tenant\":\"tenant1\",\"plan\":\"trial\",\"collection\":\"AUTO\"}");
+ }
+
+ @Test
+ public void require_admin_for_update_plan() {
+ var request = request("/billing/v2/tenant/" + tenant.value(), Request.Method.PATCH)
+ .data("{\"plan\": \"pay-as-you-go\"}");
+
+ var forbidden = request.roles(tenantReader);
+ tester.assertResponse(forbidden, ACCESS_DENIED, 403);
+ var success = request.roles(tenantAdmin);
+ tester.assertResponse(success, "{\"tenant\":\"tenant1\",\"plan\":\"pay-as-you-go\",\"collection\":\"AUTO\"}");
+ }
+
+ @Test
+ public void require_accountant_for_update_collection() {
+ var request = request("/billing/v2/tenant/" + tenant.value(), Request.Method.PATCH)
+ .data("{\"collection\": \"INVOICE\"}");
+
+ var forbidden = request.roles(tenantAdmin);
+ tester.assertResponse(forbidden, "{\"error-code\":\"FORBIDDEN\",\"message\":\"Only accountant can change billing method\"}", 403);
+
+ var success = request.roles(financeAdmin);
+ tester.assertResponse(success, "{\"tenant\":\"tenant1\",\"plan\":\"trial\",\"collection\":\"INVOICE\"}");
+ }
+
+ @Test
+ public void require_tenant_usage() {
+ var request = request("/billing/v2/tenant/" + tenant + "/usage").roles(tenantReader);
+ tester.assertResponse(request, "{\"from\":\"2021-04-13\",\"to\":\"2021-04-13\",\"total\":\"0.00\",\"items\":[]}");
+ }
+
+ @Test
+ public void require_tenant_invoice() {
+ var listRequest = request("/billing/v2/tenant/" + tenant + "/bill").roles(tenantReader);
+ tester.assertResponse(listRequest, "{\"invoices\":[{\"id\":\"id-1\",\"from\":\"2020-05-23\",\"to\":\"2020-05-23\",\"total\":\"123.00\",\"status\":\"OPEN\"}]}");
+
+ var singleRequest = request("/billing/v2/tenant/" + tenant + "/bill/id-1").roles(tenantReader);
+ tester.assertResponse(singleRequest, "{\"id\":\"id-1\",\"from\":\"2020-05-23\",\"to\":\"2020-05-23\",\"total\":\"123.00\",\"status\":\"OPEN\",\"statusHistory\":[{\"at\":\"2020-05-23T00:00:00Z\",\"status\":\"OPEN\"}],\"items\":[{\"id\":\"some-id\",\"description\":\"description\",\"amount\":\"123.00\",\"plan\":\"some-plan\",\"planName\":\"Plan with id: some-plan\",\"cpu\":{},\"memory\":{},\"disk\":{}}]}");
+ }
+
+ @Test
+ public void require_accountant_summary() {
+ var tenantRequest = request("/billing/v2/accountant").roles(tenantReader);
+ tester.assertResponse(tenantRequest, "{\n" +
+ " \"code\" : 403,\n" +
+ " \"message\" : \"Access denied\"\n" +
+ "}", 403);
+
+ var accountantRequest = request("/billing/v2/accountant").roles(Role.hostedAccountant());
+ tester.assertResponse(accountantRequest, "{\"tenants\":[{\"tenant\":\"tenant1\",\"plan\":\"trial\",\"collection\":\"AUTO\",\"lastBill\":null,\"unbilled\":\"0.00\"}]}");
+ }
+
+ @Test
+ public void require_accountant_tenant_preview() {
+ var accountantRequest = request("/billing/v2/accountant/preview/tenant/tenant1").roles(Role.hostedAccountant());
+ tester.assertResponse(accountantRequest, "{\"id\":\"empty\",\"from\":\"2021-04-13\",\"to\":\"2021-04-13\",\"total\":\"0.00\",\"status\":\"OPEN\",\"statusHistory\":[{\"at\":\"2021-04-13T00:00:00Z\",\"status\":\"OPEN\"}],\"items\":[]}");
+ }
+
+ @Test
+ public void require_accountant_tenant_bill() {
+ var accountantRequest = request("/billing/v2/accountant/preview/tenant/tenant1", Request.Method.POST)
+ .roles(Role.hostedAccountant())
+ .data("{\"from\": \"2020-05-01\",\"to\": \"2020-06-01\"}");
+ tester.assertResponse(accountantRequest, "{\"message\":\"Created bill id-123\"}");
+ }
+}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/ChangeManagementApiHandlerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/ChangeManagementApiHandlerTest.java
index c4412531f80..d87da62b8f2 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/ChangeManagementApiHandlerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/ChangeManagementApiHandlerTest.java
@@ -27,11 +27,15 @@ import java.time.Instant;
import java.time.ZonedDateTime;
import java.util.ArrayList;
import java.util.List;
+import java.util.UUID;
+
+import static org.junit.Assert.assertEquals;
public class ChangeManagementApiHandlerTest extends ControllerContainerTest {
private static final String responses = "src/test/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/responses/";
private static final AthenzIdentity operator = AthenzUser.fromUserId("operatorUser");
+ private static final String changeRequestId = "id123";
private ContainerTester tester;
@@ -51,6 +55,36 @@ public class ChangeManagementApiHandlerTest extends ControllerContainerTest {
assertFile(new Request("http://localhost:8080/changemanagement/v1/vcmr"), "vcmrs.json");
}
+ @Test
+ public void deletes_vcmr() {
+ assertEquals(1, tester.controller().curator().readChangeRequests().size());
+ assertFile(new Request("http://localhost:8080/changemanagement/v1/vcmr/" + changeRequestId, "", Request.Method.DELETE), "vcmr.json");
+ assertEquals(0, tester.controller().curator().readChangeRequests().size());
+ }
+
+ @Test
+ public void get_vcmr() {
+ assertFile(new Request("http://localhost:8080/changemanagement/v1/vcmr/" + changeRequestId, "", Request.Method.GET), "vcmr.json");
+ }
+
+ @Test
+ public void patch_vcmr() {
+ var payload = "{" +
+ "\"approval\": \"REJECTED\"," +
+ "\"status\": \"COMPLETED\"," +
+ "\"actionPlan\": {" +
+ " \"hosts\": [{" +
+ " \"hostname\": \"host1\"," +
+ " \"state\": \"REQUIRES_OPERATOR_ACTION\"," +
+ " \"lastUpdated\": \"2021-05-10T14:08:15Z\"" +
+ "}]}" +
+ "}";
+ assertFile(new Request("http://localhost:8080/changemanagement/v1/vcmr/" + changeRequestId, payload, Request.Method.PATCH), "patched-vcmr.json");
+ var changeRequest = tester.controller().curator().readChangeRequest(changeRequestId).orElseThrow();
+ assertEquals(ChangeRequest.Approval.REJECTED, changeRequest.getApproval());
+ assertEquals(VespaChangeRequest.Status.COMPLETED, changeRequest.getStatus());
+ }
+
private void assertResponse(Request request, @Language("JSON") String body, int statusCode) {
addIdentityToRequest(request, operator);
tester.assertResponse(request, body, statusCode);
@@ -77,7 +111,7 @@ public class ChangeManagementApiHandlerTest extends ControllerContainerTest {
);
return new VespaChangeRequest(
- "id123",
+ changeRequestId,
source,
List.of("switch1"),
List.of("host1", "host2"),
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/responses/patched-vcmr.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/responses/patched-vcmr.json
new file mode 100644
index 00000000000..3db8b226b21
--- /dev/null
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/responses/patched-vcmr.json
@@ -0,0 +1,31 @@
+{
+ "id": "id123",
+ "status": "COMPLETED",
+ "impact": "VERY_HIGH",
+ "approval": "REJECTED",
+ "zoneId": "prod.default",
+ "source": {
+ "system": "aws",
+ "id": "id321",
+ "url": "url",
+ "plannedStartTime": "1970-01-01T00:00:09.001Z[UTC]",
+ "plannedEndTime": "1970-01-01T00:00:09.001Z[UTC]",
+ "status": "STARTED"
+ },
+ "actionPlan": {
+ "hosts": [
+ {
+ "hostname": "host1",
+ "state": "REQUIRES_OPERATOR_ACTION",
+ "lastUpdated": "2021-05-10T14:08:15Z"
+ }
+ ]
+ },
+ "impactedHosts": [
+ "host1",
+ "host2"
+ ],
+ "impactedSwitches": [
+ "switch1"
+ ]
+}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/responses/vcmr.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/responses/vcmr.json
new file mode 100644
index 00000000000..545fe289be8
--- /dev/null
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/responses/vcmr.json
@@ -0,0 +1,36 @@
+{
+ "id": "id123",
+ "status": "IN_PROGRESS",
+ "impact": "VERY_HIGH",
+ "approval": "APPROVED",
+ "zoneId": "prod.default",
+ "source": {
+ "system": "aws",
+ "id": "id321",
+ "url": "url",
+ "plannedStartTime": "1970-01-01T00:00:09.001Z[UTC]",
+ "plannedEndTime": "1970-01-01T00:00:09.001Z[UTC]",
+ "status": "STARTED"
+ },
+ "actionPlan": {
+ "hosts": [
+ {
+ "hostname": "host1",
+ "state": "RETIRING",
+ "lastUpdated": "1970-01-01T00:00:09.001Z"
+ },
+ {
+ "hostname": "host2",
+ "state": "RETIRED",
+ "lastUpdated": "1970-01-01T00:00:09.001Z"
+ }
+ ]
+ },
+ "impactedHosts": [
+ "host1",
+ "host2"
+ ],
+ "impactedSwitches": [
+ "switch1"
+ ]
+}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/deployment/BadgeApiTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/deployment/BadgeApiTest.java
index ff02a1e16be..ebde4671859 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/deployment/BadgeApiTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/deployment/BadgeApiTest.java
@@ -2,11 +2,18 @@
package com.yahoo.vespa.hosted.controller.restapi.deployment;
import com.yahoo.vespa.hosted.controller.ControllerTester;
+import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
+import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
+import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester;
import com.yahoo.vespa.hosted.controller.restapi.ContainerTester;
import com.yahoo.vespa.hosted.controller.restapi.ControllerContainerTest;
import org.junit.Test;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+
/**
* @author jonmv
*/
@@ -15,15 +22,38 @@ public class BadgeApiTest extends ControllerContainerTest {
private static final String responseFiles = "src/test/java/com/yahoo/vespa/hosted/controller/restapi/deployment/responses/";
@Test
- public void testBadgeApi() {
+ public void testBadgeApi() throws IOException {
ContainerTester tester = new ContainerTester(container, responseFiles);
var application = new DeploymentTester(new ControllerTester(tester)).newDeploymentContext("tenant", "application", "default");
- application.submit();
+ ApplicationPackage applicationPackage = new ApplicationPackageBuilder().systemTest()
+ .parallel("us-west-1", "aws-us-east-1a")
+ .test("us-west-1")
+ .region("ap-southeast-1")
+ .test("ap-southeast-1")
+ .region("eu-west-1")
+ .test("eu-west-1")
+ .build();
+ application.submit(applicationPackage).deploy();
+ application.submit(applicationPackage)
+ .runJob(JobType.systemTest)
+ .runJob(JobType.stagingTest)
+ .runJob(JobType.productionUsWest1)
+ .runJob(JobType.productionAwsUsEast1a)
+ .runJob(JobType.testUsWest1)
+ .runJob(JobType.productionApSoutheast1)
+ .failDeployment(JobType.testApSoutheast1);
+ application.submit(applicationPackage)
+ .runJob(JobType.systemTest)
+ .runJob(JobType.stagingTest);
+ for (int i = 0; i < 32; i++)
+ application.failDeployment(JobType.productionUsWest1);
+ application.triggerJobs();
+ tester.controller().applications().deploymentTrigger().reTrigger(application.instanceId(), JobType.testEuWest1);
tester.assertResponse(authenticatedRequest("http://localhost:8080/badge/v1/tenant/application/default"),
- "", 302);
- tester.assertResponse(authenticatedRequest("http://localhost:8080/badge/v1/tenant/application/default/system-test?historyLength=10"),
- "", 302);
+ Files.readString(Paths.get(responseFiles + "overview.svg")), 200);
+ tester.assertResponse(authenticatedRequest("http://localhost:8080/badge/v1/tenant/application/default/production-us-west-1?historyLength=32"),
+ Files.readString(Paths.get(responseFiles + "history.svg")), 200);
}
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/deployment/responses/history.svg b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/deployment/responses/history.svg
new file mode 100644
index 00000000000..f466f38750c
--- /dev/null
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/deployment/responses/history.svg
@@ -0,0 +1,177 @@
+<svg xmlns='http://www.w3.org/2000/svg' width='659.058159125822' height='20' role='img' aria-label='Deployment Status'>
+ <title>Deployment Status</title>
+ <linearGradient id='light' x2='0' y2='100%'>
+ <stop offset='0' stop-color='#fff' stop-opacity='.5'/>
+ <stop offset='.1' stop-color='#fff' stop-opacity='.15'/>
+ <stop offset='.9' stop-color='#000' stop-opacity='.15'/>
+ <stop offset='1' stop-color='#000' stop-opacity='.5'/>
+ </linearGradient>
+ <linearGradient id='left-light' x2='100%' y2='0'>
+ <stop offset='0' stop-color='#fff' stop-opacity='.3'/>
+ <stop offset='.5' stop-color='#fff' stop-opacity='.1'/>
+ <stop offset='1' stop-color='#fff' stop-opacity='.0'/>
+ </linearGradient>
+ <linearGradient id='right-shadow' x2='100%' y2='0'>
+ <stop offset='0' stop-color='#000' stop-opacity='.0'/>
+ <stop offset='.5' stop-color='#000' stop-opacity='.1'/>
+ <stop offset='1' stop-color='#000' stop-opacity='.3'/>
+ </linearGradient>
+ <linearGradient id='shadow' x2='100%' y2='0'>
+ <stop offset='0' stop-color='#222' stop-opacity='.3'/>
+ <stop offset='.625' stop-color='#555' stop-opacity='.3'/>
+ <stop offset='.9' stop-color='#555' stop-opacity='.05'/>
+ <stop offset='1' stop-color='#555' stop-opacity='.0'/>
+ </linearGradient>
+ <linearGradient id='shade' x2='100%' y2='0'>
+ <stop offset='0' stop-color='#000' stop-opacity='.20'/>
+ <stop offset='0.05' stop-color='#000' stop-opacity='.10'/>
+ <stop offset='1' stop-color='#000' stop-opacity='.0'/>
+ </linearGradient>
+ <linearGradient id='run-on-failure' x1='40%' x2='80%' y2='0%'>
+ <stop offset='0' stop-color='#ab83ff' />
+ <stop offset='1' stop-color='#bf103c' />
+ <animate attributeName='x1' values='-110%;150%;20%;-110%' dur='6s' repeatCount='indefinite' />
+ <animate attributeName='x2' values='-10%;250%;120%;-10%' dur='6s' repeatCount='indefinite' />
+ </linearGradient>
+ <linearGradient id='run-on-success' x1='40%' x2='80%' y2='0%'>
+ <stop offset='0' stop-color='#ab83ff' />
+ <stop offset='1' stop-color='#00f244' />
+ <animate attributeName='x1' values='-110%;150%;20%;-110%' dur='6s' repeatCount='indefinite' />
+ <animate attributeName='x2' values='-10%;250%;120%;-10%' dur='6s' repeatCount='indefinite' />
+ </linearGradient>
+ <clipPath id='rounded'>
+ <rect width='659.058159125822' height='20' rx='3' fill='#fff'/>
+ </clipPath>
+ <g clip-path='url(#rounded)'>
+ <rect x='653.26809109179' rx='3' width='8' height='20' fill='url(#shadow)'/>
+ <rect x='646.1879570885093' rx='3' width='13.080134003280707' height='20' fill='#00f244'/>
+ <rect x='646.1879570885093' rx='3' width='13.080134003280707' height='20' fill='url(#shade)'/>
+ <rect x='646.4043039211865' rx='3' width='8' height='20' fill='url(#shadow)'/>
+ <rect x='639.1078230852286' rx='3' width='13.296480835957981' height='20' fill='#bf103c'/>
+ <rect x='639.1078230852286' rx='3' width='13.296480835957981' height='20' fill='url(#shade)'/>
+ <rect x='639.3307808029524' rx='3' width='8' height='20' fill='url(#shadow)'/>
+ <rect x='631.8113422492706' rx='3' width='13.519438553681752' height='20' fill='#bf103c'/>
+ <rect x='631.8113422492706' rx='3' width='13.519438553681752' height='20' fill='url(#shade)'/>
+ <rect x='632.0411128600877' rx='3' width='8' height='20' fill='url(#shadow)'/>
+ <rect x='624.2919036955889' rx='3' width='13.749209164498811' height='20' fill='#bf103c'/>
+ <rect x='624.2919036955889' rx='3' width='13.749209164498811' height='20' fill='url(#shade)'/>
+ <rect x='624.5286953802824' rx='3' width='8' height='20' fill='url(#shadow)'/>
+ <rect x='616.5426945310901' rx='3' width='13.986000849192376' height='20' fill='#bf103c'/>
+ <rect x='616.5426945310901' rx='3' width='13.986000849192376' height='20' fill='url(#shade)'/>
+ <rect x='616.7867218317995' rx='3' width='8' height='20' fill='url(#shadow)'/>
+ <rect x='608.5566936818977' rx='3' width='14.230028149901685' height='20' fill='#bf103c'/>
+ <rect x='608.5566936818977' rx='3' width='14.230028149901685' height='20' fill='url(#shade)'/>
+ <rect x='608.8081776965013' rx='3' width='8' height='20' fill='url(#shadow)'/>
+ <rect x='600.326665531996' rx='3' width='14.48151216450522' height='20' fill='#bf103c'/>
+ <rect x='600.326665531996' rx='3' width='14.48151216450522' height='20' fill='url(#shade)'/>
+ <rect x='600.5858341144344' rx='3' width='8' height='20' fill='url(#shadow)'/>
+ <rect x='591.8451533674908' rx='3' width='14.740680746943664' height='20' fill='#bf103c'/>
+ <rect x='591.8451533674908' rx='3' width='14.740680746943664' height='20' fill='url(#shade)'/>
+ <rect x='592.1122413342111' rx='3' width='8' height='20' fill='url(#shadow)'/>
+ <rect x='583.1044726205471' rx='3' width='15.007768713664104' height='20' fill='#bf103c'/>
+ <rect x='583.1044726205471' rx='3' width='15.007768713664104' height='20' fill='url(#shade)'/>
+ <rect x='583.3797219632555' rx='3' width='8' height='20' fill='url(#shadow)'/>
+ <rect x='574.096703906883' rx='3' width='15.283018056372542' height='20' fill='#bf103c'/>
+ <rect x='574.096703906883' rx='3' width='15.283018056372542' height='20' fill='url(#shade)'/>
+ <rect x='574.380364011798' rx='3' width='8' height='20' fill='url(#shadow)'/>
+ <rect x='564.8136858505105' rx='3' width='15.566678161287442' height='20' fill='#bf103c'/>
+ <rect x='564.8136858505105' rx='3' width='15.566678161287442' height='20' fill='url(#shade)'/>
+ <rect x='565.1060137243161' rx='3' width='8' height='20' fill='url(#shadow)'/>
+ <rect x='555.2470076892231' rx='3' width='15.859006035092985' height='20' fill='#bf103c'/>
+ <rect x='555.2470076892231' rx='3' width='15.859006035092985' height='20' fill='url(#shade)'/>
+ <rect x='555.5482681919269' rx='3' width='8' height='20' fill='url(#shadow)'/>
+ <rect x='545.3880016541301' rx='3' width='16.16026653779677' height='20' fill='#bf103c'/>
+ <rect x='545.3880016541301' rx='3' width='16.16026653779677' height='20' fill='url(#shade)'/>
+ <rect x='545.6984677390362' rx='3' width='8' height='20' fill='url(#shadow)'/>
+ <rect x='535.2277351163333' rx='3' width='16.470732622702883' height='20' fill='#bf103c'/>
+ <rect x='535.2277351163333' rx='3' width='16.470732622702883' height='20' fill='url(#shade)'/>
+ <rect x='535.5476880773482' rx='3' width='8' height='20' fill='url(#shadow)'/>
+ <rect x='524.7570024936304' rx='3' width='16.790685583717845' height='20' fill='#bf103c'/>
+ <rect x='524.7570024936304' rx='3' width='16.790685583717845' height='20' fill='url(#shade)'/>
+ <rect x='525.0867322201259' rx='3' width='8' height='20' fill='url(#shadow)'/>
+ <rect x='513.9663169099125' rx='3' width='17.12041531021341' height='20' fill='#bf103c'/>
+ <rect x='513.9663169099125' rx='3' width='17.12041531021341' height='20' fill='url(#shade)'/>
+ <rect x='514.3061221493763' rx='3' width='8' height='20' fill='url(#shadow)'/>
+ <rect x='502.84590159969906' rx='3' width='17.460220549677203' height='20' fill='#bf103c'/>
+ <rect x='502.84590159969906' rx='3' width='17.460220549677203' height='20' fill='url(#shade)'/>
+ <rect x='503.196090228411' rx='3' width='8' height='20' fill='url(#shadow)'/>
+ <rect x='491.38568105002184' rx='3' width='17.810409178389147' height='20' fill='#bf103c'/>
+ <rect x='491.38568105002184' rx='3' width='17.810409178389147' height='20' fill='url(#shade)'/>
+ <rect x='491.7465703520016' rx='3' width='8' height='20' fill='url(#shadow)'/>
+ <rect x='479.5752718716327' rx='3' width='18.171298480368904' height='20' fill='#bf103c'/>
+ <rect x='479.5752718716327' rx='3' width='18.171298480368904' height='20' fill='url(#shade)'/>
+ <rect x='479.9471888261109' rx='3' width='8' height='20' fill='url(#shadow)'/>
+ <rect x='467.40397339126383' rx='3' width='18.543215434847077' height='20' fill='#bf103c'/>
+ <rect x='467.40397339126383' rx='3' width='18.543215434847077' height='20' fill='url(#shade)'/>
+ <rect x='467.7872549689374' rx='3' width='8' height='20' fill='url(#shadow)'/>
+ <rect x='454.86075795641676' rx='3' width='18.92649701252067' height='20' fill='#bf103c'/>
+ <rect x='454.86075795641676' rx='3' width='18.92649701252067' height='20' fill='url(#shade)'/>
+ <rect x='455.25575142475725' rx='3' width='8' height='20' fill='url(#shadow)'/>
+ <rect x='441.9342609438961' rx='3' width='19.32149048086113' height='20' fill='#bf103c'/>
+ <rect x='441.9342609438961' rx='3' width='19.32149048086113' height='20' fill='url(#shade)'/>
+ <rect x='442.3413241817867' rx='3' width='8' height='20' fill='url(#shadow)'/>
+ <rect x='428.61277046303496' rx='3' width='19.728553718751726' height='20' fill='#bf103c'/>
+ <rect x='428.61277046303496' rx='3' width='19.728553718751726' height='20' fill='url(#shade)'/>
+ <rect x='429.03227228502243' rx='3' width='8' height='20' fill='url(#shadow)'/>
+ <rect x='414.8842167442832' rx='3' width='20.148055540739207' height='20' fill='#bf103c'/>
+ <rect x='414.8842167442832' rx='3' width='20.148055540739207' height='20' fill='url(#shade)'/>
+ <rect x='415.31653723473755' rx='3' width='8' height='20' fill='url(#shadow)'/>
+ <rect x='400.736161203544' rx='3' width='20.58037603119359' height='20' fill='#bf103c'/>
+ <rect x='400.736161203544' rx='3' width='20.58037603119359' height='20' fill='url(#shade)'/>
+ <rect x='401.1816920610293' rx='3' width='8' height='20' fill='url(#shadow)'/>
+ <rect x='386.1557851723504' rx='3' width='21.025906888678875' height='20' fill='#bf103c'/>
+ <rect x='386.1557851723504' rx='3' width='21.025906888678875' height='20' fill='url(#shade)'/>
+ <rect x='386.61493006451815' rx='3' width='8' height='20' fill='url(#shadow)'/>
+ <rect x='371.12987828367153' rx='3' width='21.485051780846597' height='20' fill='#bf103c'/>
+ <rect x='371.12987828367153' rx='3' width='21.485051780846597' height='20' fill='url(#shade)'/>
+ <rect x='371.60305321299876' rx='3' width='8' height='20' fill='url(#shadow)'/>
+ <rect x='355.6448265028249' rx='3' width='21.958226710173836' height='20' fill='#bf103c'/>
+ <rect x='355.6448265028249' rx='3' width='21.958226710173836' height='20' fill='url(#shade)'/>
+ <rect x='356.13246018352817' rx='3' width='8' height='20' fill='url(#shadow)'/>
+ <rect x='339.68659979265107' rx='3' width='22.445860390877083' height='20' fill='#bf103c'/>
+ <rect x='339.68659979265107' rx='3' width='22.445860390877083' height='20' fill='url(#shade)'/>
+ <rect x='340.18913403911733' rx='3' width='8' height='20' fill='url(#shadow)'/>
+ <rect x='323.24073940177396' rx='3' width='22.948394637343355' height='20' fill='#bf103c'/>
+ <rect x='323.24073940177396' rx='3' width='22.948394637343355' height='20' fill='url(#shade)'/>
+ <rect x='323.7586295288612' rx='3' width='8' height='20' fill='url(#shadow)'/>
+ <rect x='306.2923447644306' rx='3' width='23.466284764430597' height='20' fill='#bf103c'/>
+ <rect x='306.2923447644306' rx='3' width='23.466284764430597' height='20' fill='url(#shade)'/>
+ <rect x='306.82606' rx='3' width='8' height='20' fill='url(#shadow)'/>
+ <rect x='288.82606' rx='3' width='24.0' height='20' fill='#bf103c'/>
+ <rect x='288.82606' rx='3' width='24.0' height='20' fill='url(#shade)'/>
+ <rect x='288.82606' rx='3' width='8' height='20' fill='url(#shadow)'/>
+ <rect x='163.18729000000002' rx='3' width='131.63876999999997' height='20' fill='url(#run-on-failure)'/>
+ <rect x='163.18729000000002' rx='3' width='131.63876999999997' height='20' fill='url(#shade)'/>
+ <rect width='169.18729000000002' height='20' fill='#404040'/>
+ <rect x='-6.0' rx='3' width='175.18729000000002' height='20' fill='url(#shade)'/>
+ <rect width='2' height='20' fill='url(#left-light)'/>
+ <rect x='657.058159125822' width='2' height='20' fill='url(#right-shadow)'/>
+ <rect width='659.058159125822' height='20' fill='url(#light)'/>
+ </g>
+ <g fill='#fff' text-anchor='middle' font-family='Verdana,Geneva,DejaVu Sans,sans-serif' text-rendering='geometricPrecision' font-size='11'>
+ <svg x='6.5' y='3.0' width='16.0' height='16.0' viewBox='0 0 150 150'>
+ <polygon fill='#402a14' fill-opacity='0.5' points='84.84 10 34.1 44.46 34.1 103.78 84.84 68.02 135.57 103.78 135.57 44.46 84.84 10'/>
+ <polygon fill='#402a14' fill-opacity='0.5' points='84.84 68.02 84.84 10 135.57 44.46 135.57 103.78 84.84 68.02'/>
+ <polygon fill='#061a29' fill-opacity='0.5' points='65.07 81.99 14.34 46.22 14.34 105.54 65.07 140 115.81 105.54 115.81 46.22 65.07 81.99'/>
+ <polygon fill='#061a29' fill-opacity='0.5' points='65.07 81.99 65.07 140 14.34 105.54 14.34 46.22 65.07 81.99'/>
+ </svg>
+ <svg x='6.0' y='2.0' width='16.0' height='16.0' viewBox='0 0 150 150'>
+ <linearGradient id='yellow-shaded' x1='91.17' y1='44.83' x2='136.24' y2='73.4' gradientUnits='userSpaceOnUse'>
+ <stop offset='0.01' stop-color='#c6783e'/>
+ <stop offset='0.54' stop-color='#ff9750'/>
+ </linearGradient>
+ <linearGradient id='blue-shaded' x1='60.71' y1='104.56' x2='-15.54' y2='63' gradientUnits='userSpaceOnUse'>
+ <stop offset='0' stop-color='#005a8e'/>
+ <stop offset='0.54' stop-color='#1a7db6'/>
+ </linearGradient>
+ <polygon fill='#ff9d4b' points='84.84 10 34.1 44.46 34.1 103.78 84.84 68.02 135.57 103.78 135.57 44.46 84.84 10'/>
+ <polygon fill='url(#yellow-shaded)' points='84.84 68.02 84.84 10 135.57 44.46 135.57 103.78 84.84 68.02'/>
+ <polygon fill='#1a7db6' points='65.07 81.99 14.34 46.22 14.34 105.54 65.07 140 115.81 105.54 115.81 46.22 65.07 81.99'/>
+ <polygon fill='url(#blue-shaded)' points='65.07 81.99 65.07 140 14.34 105.54 14.34 46.22 65.07 81.99'/>
+ </svg>
+ <text font-size='11' x='96.09364500000001' y='15' fill='#000' fill-opacity='.4' textLength='135.18729000000002'>tenant.application.default</text>
+ <text font-size='11' x='95.59364500000001' y='14' fill='#fff' textLength='135.18729000000002'>tenant.application.default</text>
+ <text font-size='11' x='232.506675' y='15' fill='#000' fill-opacity='.4' textLength='113.63876999999998'>production-us-west-1</text>
+ <text font-size='11' x='232.006675' y='14' fill='#fff' textLength='113.63876999999998'>production-us-west-1</text>
+ </g>
+</svg>
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/deployment/responses/overview.svg b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/deployment/responses/overview.svg
new file mode 100644
index 00000000000..dde2b740e37
--- /dev/null
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/deployment/responses/overview.svg
@@ -0,0 +1,118 @@
+<svg xmlns='http://www.w3.org/2000/svg' width='763.7809900000001' height='20' role='img' aria-label='Deployment Status'>
+ <title>Deployment Status</title>
+ <linearGradient id='light' x2='0' y2='100%'>
+ <stop offset='0' stop-color='#fff' stop-opacity='.5'/>
+ <stop offset='.1' stop-color='#fff' stop-opacity='.15'/>
+ <stop offset='.9' stop-color='#000' stop-opacity='.15'/>
+ <stop offset='1' stop-color='#000' stop-opacity='.5'/>
+ </linearGradient>
+ <linearGradient id='left-light' x2='100%' y2='0'>
+ <stop offset='0' stop-color='#fff' stop-opacity='.3'/>
+ <stop offset='.5' stop-color='#fff' stop-opacity='.1'/>
+ <stop offset='1' stop-color='#fff' stop-opacity='.0'/>
+ </linearGradient>
+ <linearGradient id='right-shadow' x2='100%' y2='0'>
+ <stop offset='0' stop-color='#000' stop-opacity='.0'/>
+ <stop offset='.5' stop-color='#000' stop-opacity='.1'/>
+ <stop offset='1' stop-color='#000' stop-opacity='.3'/>
+ </linearGradient>
+ <linearGradient id='shadow' x2='100%' y2='0'>
+ <stop offset='0' stop-color='#222' stop-opacity='.3'/>
+ <stop offset='.625' stop-color='#555' stop-opacity='.3'/>
+ <stop offset='.9' stop-color='#555' stop-opacity='.05'/>
+ <stop offset='1' stop-color='#555' stop-opacity='.0'/>
+ </linearGradient>
+ <linearGradient id='shade' x2='100%' y2='0'>
+ <stop offset='0' stop-color='#000' stop-opacity='.20'/>
+ <stop offset='0.05' stop-color='#000' stop-opacity='.10'/>
+ <stop offset='1' stop-color='#000' stop-opacity='.0'/>
+ </linearGradient>
+ <linearGradient id='run-on-failure' x1='40%' x2='80%' y2='0%'>
+ <stop offset='0' stop-color='#ab83ff' />
+ <stop offset='1' stop-color='#bf103c' />
+ <animate attributeName='x1' values='-110%;150%;20%;-110%' dur='6s' repeatCount='indefinite' />
+ <animate attributeName='x2' values='-10%;250%;120%;-10%' dur='6s' repeatCount='indefinite' />
+ </linearGradient>
+ <linearGradient id='run-on-success' x1='40%' x2='80%' y2='0%'>
+ <stop offset='0' stop-color='#ab83ff' />
+ <stop offset='1' stop-color='#00f244' />
+ <animate attributeName='x1' values='-110%;150%;20%;-110%' dur='6s' repeatCount='indefinite' />
+ <animate attributeName='x2' values='-10%;250%;120%;-10%' dur='6s' repeatCount='indefinite' />
+ </linearGradient>
+ <clipPath id='rounded'>
+ <rect width='763.7809900000001' height='20' rx='3' fill='#fff'/>
+ </clipPath>
+ <g clip-path='url(#rounded)'>
+ <rect x='757.7809900000001' rx='3' width='8' height='20' fill='url(#shadow)'/>
+ <rect x='725.59036' rx='3' width='38.19063' height='20' fill='url(#run-on-success)'/>
+ <polygon points='635.8470950000001 0 635.8470950000001 20 734.59036 20 742.59036 0' fill='#00f244'/>
+ <rect x='635.8470950000001' rx='3' width='131.74345499999998' height='20' fill='url(#shade)'/>
+ <rect x='635.8470950000001' rx='3' width='8' height='20' fill='url(#shadow)'/>
+ <rect x='603.656465' rx='3' width='38.19063' height='20' fill='#bf103c'/>
+ <polygon points='486.981225 0 486.981225 20 612.656465 20 620.656465 0' fill='#00f244'/>
+ <rect x='486.981225' rx='3' width='158.67543' height='20' fill='url(#shade)'/>
+ <rect x='486.981225' rx='3' width='8' height='20' fill='url(#shadow)'/>
+ <rect x='348.865175' rx='3' width='144.11604999999997' height='20' fill='url(#run-on-success)'/>
+ <rect x='358.865175' rx='3' width='134.11604999999997' height='20' fill='url(#shade)'/>
+ <rect x='358.865175' rx='3' width='8' height='20' fill='url(#shadow)'/>
+ <rect x='326.674545' rx='3' width='38.19063' height='20' fill='#00f244'/>
+ <polygon points='237.71563000000003 0 237.71563000000003 20 335.674545 20 343.674545 0' fill='url(#run-on-failure)'/>
+ <rect x='237.71563000000003' rx='3' width='130.959105' height='20' fill='url(#shade)'/>
+ <rect x='237.71563000000003' rx='3' width='8' height='20' fill='url(#shadow)'/>
+ <rect x='153.18729000000002' rx='3' width='90.52834000000001' height='20' fill='#00f244'/>
+ <rect x='163.18729000000002' rx='3' width='80.52834000000001' height='20' fill='url(#shade)'/>
+ <rect width='169.18729000000002' height='20' fill='#404040'/>
+ <rect x='-6.0' rx='3' width='175.18729000000002' height='20' fill='url(#shade)'/>
+ <rect width='2' height='20' fill='url(#left-light)'/>
+ <rect x='761.7809900000001' width='2' height='20' fill='url(#right-shadow)'/>
+ <rect width='763.7809900000001' height='20' fill='url(#light)'/>
+ </g>
+ <g fill='#fff' text-anchor='middle' font-family='Verdana,Geneva,DejaVu Sans,sans-serif' text-rendering='geometricPrecision' font-size='11'>
+ <svg x='6.5' y='3.0' width='16.0' height='16.0' viewBox='0 0 150 150'>
+ <polygon fill='#402a14' fill-opacity='0.5' points='84.84 10 34.1 44.46 34.1 103.78 84.84 68.02 135.57 103.78 135.57 44.46 84.84 10'/>
+ <polygon fill='#402a14' fill-opacity='0.5' points='84.84 68.02 84.84 10 135.57 44.46 135.57 103.78 84.84 68.02'/>
+ <polygon fill='#061a29' fill-opacity='0.5' points='65.07 81.99 14.34 46.22 14.34 105.54 65.07 140 115.81 105.54 115.81 46.22 65.07 81.99'/>
+ <polygon fill='#061a29' fill-opacity='0.5' points='65.07 81.99 65.07 140 14.34 105.54 14.34 46.22 65.07 81.99'/>
+ </svg>
+ <svg x='6.0' y='2.0' width='16.0' height='16.0' viewBox='0 0 150 150'>
+ <linearGradient id='yellow-shaded' x1='91.17' y1='44.83' x2='136.24' y2='73.4' gradientUnits='userSpaceOnUse'>
+ <stop offset='0.01' stop-color='#c6783e'/>
+ <stop offset='0.54' stop-color='#ff9750'/>
+ </linearGradient>
+ <linearGradient id='blue-shaded' x1='60.71' y1='104.56' x2='-15.54' y2='63' gradientUnits='userSpaceOnUse'>
+ <stop offset='0' stop-color='#005a8e'/>
+ <stop offset='0.54' stop-color='#1a7db6'/>
+ </linearGradient>
+ <polygon fill='#ff9d4b' points='84.84 10 34.1 44.46 34.1 103.78 84.84 68.02 135.57 103.78 135.57 44.46 84.84 10'/>
+ <polygon fill='url(#yellow-shaded)' points='84.84 68.02 84.84 10 135.57 44.46 135.57 103.78 84.84 68.02'/>
+ <polygon fill='#1a7db6' points='65.07 81.99 14.34 46.22 14.34 105.54 65.07 140 115.81 105.54 115.81 46.22 65.07 81.99'/>
+ <polygon fill='url(#blue-shaded)' points='65.07 81.99 65.07 140 14.34 105.54 14.34 46.22 65.07 81.99'/>
+ </svg>
+ <text font-size='11' x='96.09364500000001' y='15' fill='#000' fill-opacity='.4' textLength='135.18729000000002'>tenant.application.default</text>
+ <text font-size='11' x='95.59364500000001' y='14' fill='#fff' textLength='135.18729000000002'>tenant.application.default</text>
+ <text font-size='11' x='206.95146000000003' y='15' fill='#000' fill-opacity='.4' textLength='62.52834000000001'>system-test</text>
+ <text font-size='11' x='206.45146000000003' y='14' fill='#fff' textLength='62.52834000000001'>system-test</text>
+ <text font-size='11' x='276.60659250000003' y='15' fill='#000' fill-opacity='.4' textLength='52.781925'>us-west-1</text>
+ <text font-size='11' x='276.10659250000003' y='14' fill='#fff' textLength='52.781925'>us-west-1</text>
+ <text font-size='9' x='323.08605000000006' y='15' fill='#000' fill-opacity='.4' textLength='28.176989999999996'>deploy</text>
+ <text font-size='9' x='322.58605000000006' y='14' fill='#fff' textLength='28.176989999999996'>deploy</text>
+ <text font-size='9' x='351.26986000000005' y='15' fill='#000' fill-opacity='.4' textLength='16.190630000000002'>test</text>
+ <text font-size='9' x='350.76986000000005' y='14' fill='#fff' textLength='16.190630000000002'>test</text>
+ <text font-size='11' x='412.334705' y='15' fill='#000' fill-opacity='.4' textLength='81.93905999999998'>aws-us-east-1a</text>
+ <text font-size='11' x='411.834705' y='14' fill='#fff' textLength='81.93905999999998'>aws-us-east-1a</text>
+ <text font-size='9' x='473.39273000000003' y='15' fill='#000' fill-opacity='.4' textLength='28.176989999999996'>deploy</text>
+ <text font-size='9' x='472.89273000000003' y='14' fill='#fff' textLength='28.176989999999996'>deploy</text>
+ <text font-size='11' x='539.73035' y='15' fill='#000' fill-opacity='.4' textLength='80.49825'>ap-southeast-1</text>
+ <text font-size='11' x='539.23035' y='14' fill='#fff' textLength='80.49825'>ap-southeast-1</text>
+ <text font-size='9' x='600.06797' y='15' fill='#000' fill-opacity='.4' textLength='28.176989999999996'>deploy</text>
+ <text font-size='9' x='599.56797' y='14' fill='#fff' textLength='28.176989999999996'>deploy</text>
+ <text font-size='9' x='628.25178' y='15' fill='#000' fill-opacity='.4' textLength='16.190630000000002'>test</text>
+ <text font-size='9' x='627.75178' y='14' fill='#fff' textLength='16.190630000000002'>test</text>
+ <text font-size='11' x='675.1302325' y='15' fill='#000' fill-opacity='.4' textLength='53.566275'>eu-west-1</text>
+ <text font-size='11' x='674.6302325' y='14' fill='#fff' textLength='53.566275'>eu-west-1</text>
+ <text font-size='9' x='722.0018650000001' y='15' fill='#000' fill-opacity='.4' textLength='28.176989999999996'>deploy</text>
+ <text font-size='9' x='721.5018650000001' y='14' fill='#fff' textLength='28.176989999999996'>deploy</text>
+ <text font-size='9' x='750.1856750000001' y='15' fill='#000' fill-opacity='.4' textLength='16.190630000000002'>test</text>
+ <text font-size='9' x='749.6856750000001' y='14' fill='#fff' textLength='16.190630000000002'>test</text>
+ </g>
+</svg>
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/rotation/RotationRepositoryTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/rotation/RotationRepositoryTest.java
index 136ed508a33..aa9775f1d43 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/rotation/RotationRepositoryTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/rotation/RotationRepositoryTest.java
@@ -51,7 +51,7 @@ public class RotationRepositoryTest {
.region("us-west-1")
.build();
- private final DeploymentTester tester = new DeploymentTester(new ControllerTester(rotationsConfig));
+ private final DeploymentTester tester = new DeploymentTester(new ControllerTester(rotationsConfig, SystemName.main));
private final RotationRepository repository = tester.controller().routing().rotations();
private final DeploymentContext application = tester.newDeploymentContext("tenant1", "app1", "default");
@@ -92,7 +92,7 @@ public class RotationRepositoryTest {
@Test
public void strips_whitespace_in_rotation_fqdn() {
- var tester = new DeploymentTester(new ControllerTester(rotationsConfigWhitespaces));
+ var tester = new DeploymentTester(new ControllerTester(rotationsConfigWhitespaces, SystemName.main));
RotationRepository repository = tester.controller().routing().rotations();
var application2 = tester.newDeploymentContext("tenant1", "app2", "default");
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java
index d03dec06753..047a4461f7c 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java
@@ -14,7 +14,6 @@ import com.yahoo.config.provision.HostName;
import com.yahoo.config.provision.RegionName;
import com.yahoo.config.provision.SystemName;
import com.yahoo.config.provision.zone.RoutingMethod;
-import com.yahoo.config.provision.zone.ZoneApi;
import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.vespa.hosted.controller.ControllerTester;
import com.yahoo.vespa.hosted.controller.Instance;
@@ -28,11 +27,11 @@ import com.yahoo.vespa.hosted.controller.api.integration.dns.RecordName;
import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.application.Endpoint;
import com.yahoo.vespa.hosted.controller.application.EndpointId;
+import com.yahoo.vespa.hosted.controller.application.EndpointList;
import com.yahoo.vespa.hosted.controller.application.SystemApplication;
import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentContext;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester;
-import com.yahoo.vespa.hosted.controller.integration.ServiceRegistryMock;
import com.yahoo.vespa.hosted.controller.integration.ZoneApiMock;
import com.yahoo.vespa.hosted.controller.maintenance.NameServiceDispatcher;
import com.yahoo.vespa.hosted.rotation.config.RotationsConfig;
@@ -313,7 +312,7 @@ public class RoutingPoliciesTest {
@Test
public void zone_routing_policies_without_dns_update() {
- var tester = new RoutingPoliciesTester(new DeploymentTester(), false);
+ var tester = new RoutingPoliciesTester(new DeploymentTester(), SystemName.main, false);
var context = tester.newDeploymentContext("tenant1", "app1", "default");
tester.provisionLoadBalancers(1, context.instanceId(), true, zone1, zone2);
context.submit(applicationPackage).deferLoadBalancerProvisioningIn(Environment.prod).deploy();
@@ -324,13 +323,17 @@ public class RoutingPoliciesTest {
@Test
public void global_routing_policies_in_rotationless_system() {
- var tester = new RoutingPoliciesTester(new DeploymentTester(new ControllerTester(new RotationsConfig.Builder().build())), true);
+ var tester = new RoutingPoliciesTester(SystemName.Public);
var context = tester.newDeploymentContext("tenant1", "app1", "default");
+ List<ZoneId> prodZones = tester.controllerTester().controller().zoneRegistry().zones().all().in(Environment.prod).ids();
+ ZoneId zone1 = prodZones.get(0);
+ ZoneId zone2 = prodZones.get(1);
tester.provisionLoadBalancers(1, context.instanceId(), zone1, zone2);
var applicationPackage = applicationPackageBuilder()
.region(zone1.region().value())
.endpoint("r0", "c0")
+ .trustDefaultCertificate()
.build();
context.submit(applicationPackage).deferLoadBalancerProvisioningIn(Environment.prod).deploy();
@@ -341,6 +344,39 @@ public class RoutingPoliciesTest {
}
@Test
+ public void global_routing_policies_in_public() {
+ var tester = new RoutingPoliciesTester(SystemName.Public);
+ var context = tester.newDeploymentContext("tenant1", "app1", "default");
+ List<ZoneId> prodZones = tester.controllerTester().controller().zoneRegistry().zones().all().in(Environment.prod).ids();
+ ZoneId zone1 = prodZones.get(0);
+ ZoneId zone2 = prodZones.get(1);
+
+ var applicationPackage = applicationPackageBuilder()
+ .region(zone1.region().value())
+ .region(zone2.region().value())
+ .endpoint("default", "default")
+ .trustDefaultCertificate()
+ .build();
+ context.submit(applicationPackage).deploy();
+
+ tester.assertTargets(context.instanceId(), EndpointId.defaultId(),
+ ClusterSpec.Id.from("default"), 0,
+ Map.of(zone1, 1L, zone2, 1L), true);
+ assertEquals("Registers expected DNS names",
+ Set.of("app1.tenant1.aws-eu-west-1-w.public.vespa.oath.cloud",
+ "app1.tenant1.aws-eu-west-1.r.vespa-app.cloud",
+ "app1.tenant1.aws-eu-west-1a.public.vespa.oath.cloud",
+ "app1.tenant1.aws-eu-west-1a.z.vespa-app.cloud",
+ "app1.tenant1.aws-us-east-1-w.public.vespa.oath.cloud",
+ "app1.tenant1.aws-us-east-1.r.vespa-app.cloud",
+ "app1.tenant1.aws-us-east-1c.public.vespa.oath.cloud",
+ "app1.tenant1.aws-us-east-1c.z.vespa-app.cloud",
+ "app1.tenant1.g.vespa-app.cloud",
+ "app1.tenant1.global.public.vespa.oath.cloud"),
+ tester.recordNames());
+ }
+
+ @Test
public void manual_deployment_creates_routing_policy() {
// Empty application package is valid in manually deployed environments
var tester = new RoutingPoliciesTester();
@@ -573,24 +609,11 @@ public class RoutingPoliciesTest {
var tester = new RoutingPoliciesTester(SystemName.Public);
// Configure the system to use the same region for test, staging and prod
- var sharedRegion = RegionName.from("aws-us-east-1c");
- var prodZone = ZoneId.from(Environment.prod, sharedRegion);
- var stagingZone = ZoneId.from(Environment.staging, sharedRegion);
- var testZone = ZoneId.from(Environment.test, sharedRegion);
- var zones = List.of(ZoneApiMock.from(prodZone),
- ZoneApiMock.from(stagingZone),
- ZoneApiMock.from(testZone));
- tester.controllerTester().zoneRegistry()
- .setZones(zones)
- .setRoutingMethod(zones, RoutingMethod.exclusive);
- tester.controllerTester().configServer().bootstrap(List.of(prodZone, stagingZone, testZone),
- SystemApplication.notController());
-
var context = tester.tester.newDeploymentContext();
var endpointId = EndpointId.of("r0");
var applicationPackage = applicationPackageBuilder()
.trustDefaultCertificate()
- .region(sharedRegion)
+ .region("aws-us-east-1c")
.endpoint(endpointId.id(), "default")
.build();
@@ -601,14 +624,14 @@ public class RoutingPoliciesTest {
// Since runJob implicitly tears down the deployment and immediately deletes DNS records associated with the
// deployment, we consume only one DNS update at a time here
do {
- context = context.flushDnsUpdates(1);
+ context.flushDnsUpdates(1);
tester.assertTargets(context.instanceId(), endpointId, 0);
} while (!tester.recordNames().isEmpty());
}
// Deployment completes
context.completeRollout();
- tester.assertTargets(context.instanceId(), endpointId, ClusterSpec.Id.from("default"), 0, Map.of(prodZone, 1L));
+ tester.assertTargets(context.instanceId(), endpointId, ClusterSpec.Id.from("default"), 0, Map.of(ZoneId.from("prod", "aws-us-east-1c"), 1L));
}
@Test
@@ -712,6 +735,14 @@ public class RoutingPoliciesTest {
return loadBalancers;
}
+ private static List<ZoneId> publicZones() {
+ var sharedRegion = RegionName.from("aws-us-east-1c");
+ return List.of(ZoneId.from(Environment.prod, sharedRegion),
+ ZoneId.from(Environment.prod, RegionName.from("aws-eu-west-1a")),
+ ZoneId.from(Environment.staging, sharedRegion),
+ ZoneId.from(Environment.test, sharedRegion));
+ }
+
private static class RoutingPoliciesTester {
private final DeploymentTester tester;
@@ -721,7 +752,26 @@ public class RoutingPoliciesTest {
}
public RoutingPoliciesTester(SystemName system) {
- this(new DeploymentTester(new ControllerTester(new ServiceRegistryMock(system))), true);
+ this(new DeploymentTester(system.isPublic()
+ ? new ControllerTester(new RotationsConfig.Builder().build(), system)
+ : new ControllerTester()),
+ system,
+ true);
+ }
+
+ public RoutingPoliciesTester(DeploymentTester tester, SystemName system, boolean exclusiveRouting) {
+ this.tester = tester;
+ List<ZoneId> zones;
+ if (system.isPublic()) {
+ zones = publicZones();
+ } else {
+ zones = new ArrayList<>(tester.controllerTester().zoneRegistry().zones().all().ids()); // Default zones
+ zones.add(zone4); // Missing from default ZoneRegistryMock zones
+ }
+ tester.controllerTester().setZones(zones, system);
+ if (exclusiveRouting) {
+ tester.controllerTester().setRoutingMethod(zones, RoutingMethod.exclusive);
+ }
}
public RoutingPolicies routingPolicies() {
@@ -740,19 +790,6 @@ public class RoutingPoliciesTest {
return tester.controllerTester();
}
- public RoutingPoliciesTester(DeploymentTester tester, boolean exclusiveRouting) {
- this.tester = tester;
- List<ZoneApi> zones = new ArrayList<>(tester.controllerTester().zoneRegistry().zones().all().zones());
- zones.add(ZoneApiMock.from(zone3));
- zones.add(ZoneApiMock.from(zone4));
- tester.controllerTester().zoneRegistry().setZones(zones);
- if (exclusiveRouting) {
- tester.controllerTester().zoneRegistry().exclusiveRoutingIn(zones);
- }
- tester.controllerTester().configServer().bootstrap(tester.controllerTester().zoneRegistry().zones().all().ids(),
- SystemApplication.notController());
- }
-
private void provisionLoadBalancers(int clustersPerZone, ApplicationId application, boolean shared, ZoneId... zones) {
for (ZoneId zone : zones) {
tester.configServer().removeLoadBalancers(application, zone);
@@ -790,14 +827,21 @@ public class RoutingPoliciesTest {
}
private void assertTargets(ApplicationId application, EndpointId endpointId, ClusterSpec.Id cluster, int loadBalancerId, Map<ZoneId, Long> zoneWeights) {
+ assertTargets(application, endpointId, cluster, loadBalancerId, zoneWeights, false);
+ }
+
+ private void assertTargets(ApplicationId application, EndpointId endpointId, ClusterSpec.Id cluster, int loadBalancerId, Map<ZoneId, Long> zoneWeights, boolean legacy) {
Set<String> latencyTargets = new HashSet<>();
Map<String, List<ZoneId>> zonesByRegionEndpoint = new HashMap<>();
for (var zone : zoneWeights.keySet()) {
- Endpoint regionEndpoint = tester.controller().routing().endpointsOf(new DeploymentId(application, zone))
- .scope(Endpoint.Scope.region)
- .cluster(cluster)
- .asList()
- .get(0);
+ DeploymentId deployment = new DeploymentId(application, zone);
+ EndpointList regionEndpoints = tester.controller().routing().endpointsOf(deployment)
+ .cluster(cluster)
+ .scope(Endpoint.Scope.region);
+ if (!legacy) {
+ regionEndpoints = regionEndpoints.not().legacy();
+ }
+ Endpoint regionEndpoint = regionEndpoints.first().orElseThrow(() -> new IllegalArgumentException("No region endpoint found for " + cluster + " in " + deployment));
zonesByRegionEndpoint.computeIfAbsent(regionEndpoint.dnsName(), (k) -> new ArrayList<>())
.add(zone);
}
diff --git a/default_build_settings.cmake b/default_build_settings.cmake
index 141b9e61bdb..326481e25c7 100644
--- a/default_build_settings.cmake
+++ b/default_build_settings.cmake
@@ -85,6 +85,13 @@ function(setup_vespa_default_build_settings_fedora_35)
set(DEFAULT_VESPA_LLVM_VERSION "12" PARENT_SCOPE)
endfunction()
+function(setup_vespa_default_build_settings_amzn_2)
+ message("-- Setting up default build settings for amzn 2")
+ set(DEFAULT_EXTRA_LINK_DIRECTORY "${VESPA_DEPS}/lib64" "/usr/lib64/llvm7.0/lib" PARENT_SCOPE)
+ set(DEFAULT_EXTRA_INCLUDE_DIRECTORY "${VESPA_DEPS}/include" "/usr/include/llvm7.0" "/usr/include/openblas" PARENT_SCOPE)
+ set(DEFAULT_VESPA_LLVM_VERSION "7" PARENT_SCOPE)
+endfunction()
+
function(setup_vespa_default_build_settings_ubuntu)
message("-- Setting up default build settings for ubuntu")
SET(CMAKE_FIND_PACKAGE_SORT_ORDER NATURAL)
@@ -194,6 +201,8 @@ function(vespa_use_default_build_settings)
setup_vespa_default_build_settings_fedora_34()
elseif(VESPA_OS_DISTRO_COMBINED STREQUAL "fedora 35")
setup_vespa_default_build_settings_fedora_35()
+ elseif(VESPA_OS_DISTRO_COMBINED STREQUAL "amzn 2")
+ setup_vespa_default_build_settings_amzn_2()
elseif(VESPA_OS_DISTRO STREQUAL "ubuntu")
setup_vespa_default_build_settings_ubuntu()
elseif(VESPA_OS_DISTRO STREQUAL "debian")
@@ -295,6 +304,9 @@ function(vespa_use_default_cxx_compiler)
if(APPLE)
set(DEFAULT_CMAKE_C_COMPILER "/usr/local/bin/gcc-10")
set(DEFAULT_CMAKE_CXX_COMPILER "/usr/local/bin/g++-10")
+ elseif(VESPA_OS_DISTRO_COMBINED STREQUAL "amzn 2")
+ set(DEFAULT_CMAKE_C_COMPILER "/usr/bin/gcc10-gcc")
+ set(DEFAULT_CMAKE_CXX_COMPILER "/usr/bin/gcc10-g++")
endif()
elseif(VESPA_COMPILER_VARIANT STREQUAL "clang")
if(APPLE)
diff --git a/dist/vespa.spec b/dist/vespa.spec
index 65b5bcfe220..2c75319da58 100644
--- a/dist/vespa.spec
+++ b/dist/vespa.spec
@@ -28,17 +28,30 @@ Source0: vespa-%{version}.tar.gz
%if 0%{?centos}
BuildRequires: epel-release
-%if 0%{?el7}
+%if 0%{?el7} && ! 0%{?amzn2}
BuildRequires: centos-release-scl
%endif
%endif
%if 0%{?el7}
+%if 0%{?amzn2}
+BuildRequires: gcc10-c++
+BuildRequires: libatomic10-devel
+BuildRequires: gcc10-binutils
+BuildRequires: maven
+%define _use_mvn_wrapper 1
+%define _java_home /usr/lib/jvm/java-11-amazon-corretto.%{?_arch}
+BuildRequires: python3-pytest
+%else
BuildRequires: devtoolset-9-gcc-c++
BuildRequires: devtoolset-9-libatomic-devel
BuildRequires: devtoolset-9-binutils
BuildRequires: rh-maven35
%define _devtoolset_enable /opt/rh/devtoolset-9/enable
%define _rhmaven35_enable /opt/rh/rh-maven35/enable
+BuildRequires: python36-pytest
+%endif
+BuildRequires: vespa-pybind11-devel
+BuildRequires: python3-devel
%endif
%if 0%{?el8}
%if 0%{?centos}
@@ -55,10 +68,16 @@ BuildRequires: gcc-toolset-9-binutils
%define _devtoolset_enable /opt/rh/gcc-toolset-9/enable
%endif
BuildRequires: maven
+BuildRequires: pybind11-devel
+BuildRequires: python3-pytest
+BuildRequires: python36-devel
%endif
%if 0%{?fedora}
BuildRequires: gcc-c++
BuildRequires: libatomic
+BuildRequires: pybind11-devel
+BuildRequires: python3-pytest
+BuildRequires: python3-devel
%endif
%if 0%{?el7}
BuildRequires: cmake3
@@ -70,9 +89,13 @@ BuildRequires: vespa-lz4-devel >= 1.9.2-2
BuildRequires: vespa-onnxruntime-devel = 1.7.1
BuildRequires: vespa-openssl-devel >= 1.1.1k-1
%if 0%{?centos}
-BuildRequires: vespa-protobuf-devel = 3.7.0-4
+%if 0%{?amzn2}
+BuildRequires: vespa-protobuf-devel = 3.7.0-5.amzn2
%else
-BuildRequires: vespa-protobuf-devel = 3.7.0-5
+BuildRequires: vespa-protobuf-devel = 3.7.0-4.el7
+%endif
+%else
+BuildRequires: vespa-protobuf-devel = 3.7.0-5.el7
%endif
BuildRequires: vespa-libzstd-devel >= 1.4.5-2
%endif
@@ -88,7 +111,7 @@ BuildRequires: openssl-devel
BuildRequires: vespa-gtest >= 1.8.1-1
BuildRequires: vespa-lz4-devel >= 1.9.2-2
BuildRequires: vespa-onnxruntime-devel = 1.7.1
-BuildRequires: vespa-protobuf-devel = 3.7.0-5
+BuildRequires: vespa-protobuf-devel = 3.7.0-5.el8
BuildRequires: vespa-libzstd-devel >= 1.4.5-2
%endif
%if 0%{?fedora}
@@ -127,23 +150,30 @@ BuildRequires: gtest-devel
BuildRequires: gmock-devel
%endif
%endif
+%if 0%{?el7} && 0%{?amzn2}
+BuildRequires: vespa-xxhash-devel = 0.8.0
+BuildRequires: vespa-openblas-devel = 0.3.12
+BuildRequires: vespa-re2-devel = 20190801
+%else
BuildRequires: xxhash-devel >= 0.8.0
BuildRequires: openblas-devel
-BuildRequires: zlib-devel
BuildRequires: re2-devel
+%endif
+BuildRequires: zlib-devel
%if ! 0%{?el7}
BuildRequires: libicu-devel
%endif
+%if 0%{?el7} && 0%{?amzn2}
+BuildRequires: java-11-amazon-corretto
+%else
BuildRequires: java-11-openjdk-devel
+%endif
BuildRequires: rpm-build
BuildRequires: make
BuildRequires: git
BuildRequires: systemd
BuildRequires: flex >= 2.5.0
BuildRequires: bison >= 3.0.0
-%if 0%{?centos}
-Requires: epel-release
-%endif
Requires: which
Requires: initscripts
Requires: libcgroup-tools
@@ -166,18 +196,13 @@ Requires: perl-URI
%if ! 0%{?el7}
Requires: valgrind
%endif
+%if 0%{?el7} && 0%{?amzn2}
+Requires: vespa-xxhash = 0.8.0
+%else
Requires: xxhash
Requires: xxhash-libs >= 0.8.0
-%if 0%{?el8}
-Requires: openblas
-%else
-Requires: openblas-serial
%endif
Requires: zlib
-Requires: re2
-%if ! 0%{?el7}
-Requires: libicu
-%endif
Requires: perf
Requires: gdb
Requires: nc
@@ -187,77 +212,63 @@ Requires: unzip
Requires: zstd
%if 0%{?el7}
Requires: llvm7.0
-Requires: vespa-icu >= 65.1.0-1
-Requires: vespa-lz4 >= 1.9.2-2
-Requires: vespa-onnxruntime = 1.7.1
-Requires: vespa-openssl >= 1.1.1k-1
-%if 0%{?centos}
-Requires: vespa-protobuf = 3.7.0-4
-%else
-Requires: vespa-protobuf = 3.7.0-5
-%endif
+%if ! 0%{?amzn2}
Requires: vespa-telegraf >= 1.1.1-1
-Requires: vespa-valgrind >= 3.16.0-1
-Requires: vespa-zstd >= 1.4.5-2
+Requires: vespa-valgrind >= 3.17.0-1
+%endif
%define _vespa_llvm_version 7
%define _extra_link_directory /usr/lib64/llvm7.0/lib;%{_vespa_deps_prefix}/lib64
+%if 0%{?amzn2}
+%define _extra_include_directory /usr/include/llvm7.0;%{_vespa_deps_prefix}/include
+%else
%define _extra_include_directory /usr/include/llvm7.0;%{_vespa_deps_prefix}/include;/usr/include/openblas
%endif
+%endif
%if 0%{?el8}
%if 0%{?_centos_stream}
-Requires: llvm-libs >= 11.0.0
%define _vespa_llvm_version 11
%else
-Requires: llvm-libs >= 10.0.1
%define _vespa_llvm_version 10
%endif
-Requires: openssl-libs
-Requires: vespa-lz4 >= 1.9.2-2
-Requires: vespa-onnxruntime = 1.7.1
-Requires: vespa-protobuf = 3.7.0-5
-Requires: vespa-zstd >= 1.4.5-2
%define _extra_link_directory %{_vespa_deps_prefix}/lib64
%define _extra_include_directory %{_vespa_deps_prefix}/include;/usr/include/openblas
%endif
%if 0%{?fedora}
-Requires: openssl-libs
-Requires: vespa-lz4 >= 1.9.2-2
-Requires: vespa-onnxruntime = 1.7.1
-Requires: vespa-zstd >= 1.4.5-2
%if 0%{?fc32}
-Requires: protobuf
-Requires: llvm-libs >= 10.0.0
%define _vespa_llvm_version 10
%endif
%if 0%{?fc33}
-Requires: protobuf
-Requires: llvm-libs >= 11.0.0
%define _vespa_llvm_version 11
%endif
%if 0%{?fc34}
-Requires: protobuf
-Requires: llvm-libs >= 12.0.0
%define _vespa_llvm_version 12
%endif
%if 0%{?fc35}
-Requires: protobuf
-Requires: llvm-libs >= 12.0.0
%define _vespa_llvm_version 12
%endif
%define _extra_link_directory %{_vespa_deps_prefix}/lib64
%define _extra_include_directory %{_vespa_deps_prefix}/include;/usr/include/openblas
%endif
+%ifnarch x86_64
+%define _skip_vespamalloc 1
+%endif
Requires: %{name}-base = %{version}-%{release}
-Requires: %{name}-base-libs = %{version}-%{release}
+Requires: %{name}-libs = %{version}-%{release}
Requires: %{name}-clients = %{version}-%{release}
Requires: %{name}-config-model-fat = %{version}-%{release}
Requires: %{name}-jars = %{version}-%{release}
+%if ! 0%{?_skip_vespamalloc:1}
Requires: %{name}-malloc = %{version}-%{release}
+%endif
Requires: %{name}-tools = %{version}-%{release}
# Ugly workaround because vespamalloc/src/vespamalloc/malloc/mmap.cpp uses the private
# _dl_sym function. Exclude automated reqires for libraries in /opt/vespa-deps/lib64.
+%if 0%{?amzn2}
+%global __requires_exclude ^lib(c\\.so\\.6\\(GLIBC_PRIVATE\\)|pthread\\.so\\.0\\(GLIBC_PRIVATE\\)|(crypto|icui18n|icuuc|lz4|protobuf|ssl|zstd|onnxruntime|openblas|re2|xxhash)\\.so\\.[0-9.]*\\([A-Z._0-9]*\\))\\(64bit\\)$
+%else
%global __requires_exclude ^lib(c\\.so\\.6\\(GLIBC_PRIVATE\\)|pthread\\.so\\.0\\(GLIBC_PRIVATE\\)|(crypto|icui18n|icuuc|lz4|protobuf|ssl|zstd|onnxruntime)\\.so\\.[0-9.]*\\([A-Z._0-9]*\\))\\(64bit\\)$
+%endif
%description
@@ -268,7 +279,11 @@ Vespa - The open big data serving engine
Summary: Vespa - The open big data serving engine - base
+%if 0%{?el7} && 0%{?amzn2}
+Requires: java-11-amazon-corretto
+%else
Requires: java-11-openjdk-devel
+%endif
Requires: perl
Requires: perl-Getopt-Long
Requires(pre): shadow-utils
@@ -279,9 +294,16 @@ Vespa - The open big data serving engine - base
%package base-libs
-Summary: Vespa - The open big data serving engine - base C++ libs
+Summary: Vespa - The open big data serving engine - base C++ libraries
+%if 0%{?centos}
+Requires: epel-release
+%endif
+%if 0%{?amzn2}
+Requires: vespa-xxhash = 0.8.0
+%else
Requires: xxhash-libs >= 0.8.0
+%endif
%if 0%{?el7}
Requires: vespa-openssl >= 1.1.1k-1
%else
@@ -289,10 +311,75 @@ Requires: openssl-libs
%endif
Requires: vespa-lz4 >= 1.9.2-2
Requires: vespa-libzstd >= 1.4.5-2
+%if 0%{?el8}
+Requires: openblas
+%else
+%if 0%{?amzn2}
+Requires: vespa-openblas
+%else
+Requires: openblas-serial
+%endif
+%endif
+%if 0%{?amzn2}
+Requires: vespa-re2 = 20190801
+%else
+Requires: re2
+%endif
%description base-libs
-Vespa - The open big data serving engine - base C++ libs
+Vespa - The open big data serving engine - base C++ libraries
+
+%package libs
+
+Summary: Vespa - The open big data serving engine - C++ libraries
+
+Requires: %{name}-base-libs = %{version}-%{release}
+%if 0%{?el7}
+Requires: llvm7.0-libs
+Requires: vespa-icu >= 65.1.0-1
+Requires: vespa-openssl >= 1.1.1k-1
+%if 0%{?centos}
+%if 0%{?amzn2}
+Requires: vespa-protobuf = 3.7.0-5.amzn2
+%else
+Requires: vespa-protobuf = 3.7.0-4.el7
+%endif
+%else
+Requires: vespa-protobuf = 3.7.0-5.el7
+%endif
+%else
+Requires: libicu
+Requires: openssl-libs
+%endif
+%if 0%{?el8}
+%if 0%{?_centos_stream}
+Requires: llvm-libs >= 11.0.0
+%else
+Requires: llvm-libs >= 10.0.1
+%endif
+Requires: vespa-protobuf = 3.7.0-5.el8
+%endif
+%if 0%{?fedora}
+Requires: protobuf
+%if 0%{?fc32}
+Requires: llvm-libs >= 10.0.0
+%endif
+%if 0%{?fc33}
+Requires: llvm-libs >= 11.0.0
+%endif
+%if 0%{?fc34}
+Requires: llvm-libs >= 12.0.0
+%endif
+%if 0%{?fc35}
+Requires: llvm-libs >= 12.0.0
+%endif
+%endif
+Requires: vespa-onnxruntime = 1.7.1
+
+%description libs
+
+Vespa - The open big data serving engine - C++ libraries
%package clients
@@ -329,6 +416,7 @@ Summary: Vespa - The open big data serving engine - shared java jar files
Vespa - The open big data serving engine - shared java jar files
+%if ! 0%{?_skip_vespamalloc:1}
%package malloc
Summary: Vespa - The open big data serving engine - malloc library
@@ -336,6 +424,7 @@ Summary: Vespa - The open big data serving engine - malloc library
%description malloc
Vespa - The open big data serving engine - malloc library
+%endif
%package tools
@@ -364,14 +453,21 @@ source %{_devtoolset_enable} || true
source %{_rhmaven35_enable} || true
%endif
+%if 0%{?_java_home:1}
+export JAVA_HOME=%{?_java_home}
+%else
export JAVA_HOME=/usr/lib/jvm/java-11-openjdk
+%endif
export PATH="$JAVA_HOME/bin:$PATH"
export FACTORY_VESPA_VERSION=%{version}
-sh bootstrap.sh java
-mvn --batch-mode -nsu -T 1C install -Dmaven.test.skip=true -Dmaven.javadoc.skip=true
+%if 0%{?_use_mvn_wrapper}
+mvn --batch-mode -e -N io.takari:maven:wrapper -Dmaven=3.6.3
+%endif
+%{?_use_mvn_wrapper:env VESPA_MAVEN_COMMAND=$(pwd)/mvnw }sh bootstrap.sh java
+%{?_use_mvn_wrapper:./mvnw}%{!?_use_mvn_wrapper:mvn} --batch-mode -nsu -T 1C install -Dmaven.test.skip=true -Dmaven.javadoc.skip=true
cmake3 -DCMAKE_INSTALL_PREFIX=%{_prefix} \
- -DJAVA_HOME=/usr/lib/jvm/java-11-openjdk \
+ -DJAVA_HOME=$JAVA_HOME \
-DCMAKE_PREFIX_PATH=%{_vespa_deps_prefix} \
-DEXTRA_LINK_DIRECTORY="%{_extra_link_directory}" \
-DEXTRA_INCLUDE_DIRECTORY="%{_extra_include_directory}" \
@@ -489,7 +585,9 @@ fi
%dir %{_prefix}/etc
%{_prefix}/etc/systemd
%{_prefix}/etc/vespa
+%if ! 0%{?_skip_vespamalloc:1}
%exclude %{_prefix}/etc/vespamalloc.conf
+%endif
%{_prefix}/include
%dir %{_prefix}/lib
%dir %{_prefix}/lib/jars
@@ -523,14 +621,6 @@ fi
%{_prefix}/lib/jars/vespa-testrunner-components-jar-with-dependencies.jar
%{_prefix}/lib/jars/zookeeper-command-line-client-jar-with-dependencies.jar
%{_prefix}/lib/perl5
-%{_prefix}/lib64
-%exclude %{_prefix}/lib64/libfastos.so
-%exclude %{_prefix}/lib64/libfnet.so
-%exclude %{_prefix}/lib64/libstaging_vespalib.so
-%exclude %{_prefix}/lib64/libvespadefaults.so
-%exclude %{_prefix}/lib64/libvespalib.so
-%exclude %{_prefix}/lib64/libvespalog.so
-%exclude %{_prefix}/lib64/vespa
%{_prefix}/libexec
%exclude %{_prefix}/libexec/vespa/common-env.sh
%exclude %{_prefix}/libexec/vespa/node-admin.sh
@@ -590,6 +680,22 @@ fi
%{_prefix}/lib64/libvespalib.so
%{_prefix}/lib64/libvespalog.so
+%files libs
+%if %{_defattr_is_vespa_vespa}
+%defattr(-,%{_vespa_user},%{_vespa_group},-)
+%endif
+%dir %{_prefix}
+%{_prefix}/lib64
+%exclude %{_prefix}/lib64/libfastos.so
+%exclude %{_prefix}/lib64/libfnet.so
+%exclude %{_prefix}/lib64/libstaging_vespalib.so
+%exclude %{_prefix}/lib64/libvespadefaults.so
+%exclude %{_prefix}/lib64/libvespalib.so
+%exclude %{_prefix}/lib64/libvespalog.so
+%if ! 0%{?_skip_vespamalloc:1}
+%exclude %{_prefix}/lib64/vespa
+%endif
+
%files clients
%if %{_defattr_is_vespa_vespa}
%defattr(-,%{_vespa_user},%{_vespa_group},-)
@@ -651,8 +757,6 @@ fi
%{_prefix}/lib/jars/hk2-*.jar
%{_prefix}/lib/jars/hosted-zone-api-jar-with-dependencies.jar
%{_prefix}/lib/jars/jackson-*.jar
-%{_prefix}/lib/jars/jakarta.activation-api-*.jar
-%{_prefix}/lib/jars/jakarta.xml.bind-api-*.jar
%{_prefix}/lib/jars/javassist-*.jar
%{_prefix}/lib/jars/javax.*.jar
%{_prefix}/lib/jars/jdisc-cloud-aws-jar-with-dependencies.jar
@@ -681,6 +785,7 @@ fi
%dir %{_prefix}/libexec/vespa
%{_prefix}/libexec/vespa/standalone-container.sh
+%if ! 0%{?_skip_vespamalloc:1}
%files malloc
%if %{_defattr_is_vespa_vespa}
%defattr(-,%{_vespa_user},%{_vespa_group},-)
@@ -690,6 +795,7 @@ fi
%config(noreplace) %{_prefix}/etc/vespamalloc.conf
%dir %{_prefix}/lib64
%{_prefix}/lib64/vespa
+%endif
%files tools
%if %{_defattr_is_vespa_vespa}
diff --git a/document/src/main/java/com/yahoo/document/json/readers/TensorReader.java b/document/src/main/java/com/yahoo/document/json/readers/TensorReader.java
index ad016a40fca..c6722322982 100644
--- a/document/src/main/java/com/yahoo/document/json/readers/TensorReader.java
+++ b/document/src/main/java/com/yahoo/document/json/readers/TensorReader.java
@@ -11,6 +11,7 @@ import com.yahoo.tensor.TensorAddress;
import com.yahoo.tensor.TensorType;
import static com.yahoo.document.json.readers.JsonParserHelpers.*;
+import static com.yahoo.tensor.serialization.JsonFormat.decodeHexString;
/**
* Reads the tensor format defined at
@@ -41,7 +42,7 @@ public class TensorReader {
else if (TENSOR_BLOCKS.equals(buffer.currentName()))
readTensorBlocks(buffer, builder);
else if (builder.type().dimensions().stream().anyMatch(d -> d.isIndexed())) // sparse can be empty
- throw new IllegalArgumentException("Expected a tensor value to contain either 'cells', 'values' or 'blocks'");
+ throw new IllegalArgumentException("Expected a tensor value to contain either 'cells', 'values' or 'blocks', but got: "+buffer.currentName());
}
expectObjectEnd(buffer.currentToken());
tensorFieldValue.assign(builder.build());
@@ -91,10 +92,18 @@ public class TensorReader {
throw new IllegalArgumentException("The 'values' field can only be used with dense tensors. " +
"Use 'cells' or 'blocks' instead");
IndexedTensor.BoundBuilder indexedBuilder = (IndexedTensor.BoundBuilder)builder;
+ if (buffer.currentToken() == JsonToken.VALUE_STRING) {
+ double[] decoded = decodeHexString(buffer.currentText(), builder.type().valueType());
+ for (int i = 0; i < decoded.length; i++) {
+ indexedBuilder.cellByDirectIndex(i, decoded[i]);
+ }
+ return;
+ }
int index = 0;
int initNesting = buffer.nesting();
- for (buffer.next(); buffer.nesting() >= initNesting; buffer.next())
+ for (buffer.next(); buffer.nesting() >= initNesting; buffer.next()) {
indexedBuilder.cellByDirectIndex(index++, readDouble(buffer));
+ }
expectCompositeEnd(buffer.currentToken());
}
diff --git a/document/src/test/java/com/yahoo/document/json/JsonReaderTestCase.java b/document/src/test/java/com/yahoo/document/json/JsonReaderTestCase.java
index da9ab4ea7bf..4fea220b2e8 100644
--- a/document/src/test/java/com/yahoo/document/json/JsonReaderTestCase.java
+++ b/document/src/test/java/com/yahoo/document/json/JsonReaderTestCase.java
@@ -164,6 +164,8 @@ public class JsonReaderTestCase {
new TensorDataType(new TensorType.Builder().mapped("x").mapped("y").build())));
x.addField(new Field("dense_tensor",
new TensorDataType(new TensorType.Builder().indexed("x", 2).indexed("y", 3).build())));
+ x.addField(new Field("dense_int8_tensor",
+ new TensorDataType(TensorType.fromSpec("tensor<int8>(x[2],y[3])"))));
x.addField(new Field("dense_unbound_tensor",
new TensorDataType(new TensorType.Builder().indexed("x").indexed("y").build())));
x.addField(new Field("mixed_tensor",
@@ -1324,6 +1326,25 @@ public class JsonReaderTestCase {
}
@Test
+ public void testParsingOfDenseTensorHexFormat() {
+ Tensor.Builder builder = Tensor.Builder.of(TensorType.fromSpec("tensor<int8>(x[2],y[3])"));
+ builder.cell().label("x", 0).label("y", 0).value(2.0);
+ builder.cell().label("x", 0).label("y", 1).value(3.0);
+ builder.cell().label("x", 0).label("y", 2).value(4.0);
+ builder.cell().label("x", 1).label("y", 0).value(5.0);
+ builder.cell().label("x", 1).label("y", 1).value(6.0);
+ builder.cell().label("x", 1).label("y", 2).value(7.0);
+ Tensor expected = builder.build();
+
+ Tensor tensor = assertTensorField(expected,
+ createPutWithTensor(inputJson("{",
+ " 'values': \"020304050607\"",
+ "}"), "dense_int8_tensor"), "dense_int8_tensor");
+ assertTrue(tensor instanceof IndexedTensor); // this matters for performance
+ }
+
+
+ @Test
public void testParsingOfMixedTensorOnMixedForm() {
Tensor.Builder builder = Tensor.Builder.of(TensorType.fromSpec("tensor(x{},y[3])"));
builder.cell().label("x", 0).label("y", 0).value(2.0);
diff --git a/document/src/test/java/com/yahoo/document/serialization/SerializationTestUtils.java b/document/src/test/java/com/yahoo/document/serialization/SerializationTestUtils.java
index 25021b0d2f8..951ee802e58 100644
--- a/document/src/test/java/com/yahoo/document/serialization/SerializationTestUtils.java
+++ b/document/src/test/java/com/yahoo/document/serialization/SerializationTestUtils.java
@@ -12,6 +12,7 @@ import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.StandardOpenOption;
+import java.nio.file.StandardCopyOption;
import static org.junit.Assert.assertEquals;
@@ -50,8 +51,10 @@ public class SerializationTestUtils {
public static void assertSerializationMatchesCpp(String binaryFilesFolder, String fileName,
Document document, TestDocumentFactory factory) throws IOException {
byte[] buf = serializeDocument(document);
- Files.write(Paths.get(binaryFilesFolder, fileName + "__java"), buf,
- StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING);
+ Files.write(Paths.get(binaryFilesFolder, fileName + "__java.new"), buf,
+ StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING);
+ Files.move(Paths.get(binaryFilesFolder, fileName + "__java.new"),
+ Paths.get(binaryFilesFolder, fileName + "__java"), StandardCopyOption.ATOMIC_MOVE);
assertDeserializeFromFile(Paths.get(binaryFilesFolder, fileName + "__java"), document, factory);
assertDeserializeFromFile(Paths.get(binaryFilesFolder, fileName + "__cpp"), document, factory);
diff --git a/document/src/test/resources/.gitattributes b/document/src/test/resources/.gitattributes
new file mode 100644
index 00000000000..7ab6af7a4f4
--- /dev/null
+++ b/document/src/test/resources/.gitattributes
@@ -0,0 +1,2 @@
+*__cpp binary
+*__java binary
diff --git a/document/src/test/resources/reference/reference_with_id__cpp b/document/src/test/resources/reference/reference_with_id__cpp
index d2668f8a5b1..6e9158827d7 100644
--- a/document/src/test/resources/reference/reference_with_id__cpp
+++ b/document/src/test/resources/reference/reference_with_id__cpp
Binary files differ
diff --git a/document/src/tests/serialization/vespadocumentserializer_test.cpp b/document/src/tests/serialization/vespadocumentserializer_test.cpp
index 0c9dfaf2e56..60ec692e078 100644
--- a/document/src/tests/serialization/vespadocumentserializer_test.cpp
+++ b/document/src/tests/serialization/vespadocumentserializer_test.cpp
@@ -713,7 +713,9 @@ void checkDeserialization(const string &name, std::unique_ptr<Slime> slime) {
const string data_dir = TEST_PATH("../../test/resources/predicates/");
PredicateFieldValue value(std::move(slime));
- serializeToFile(value, data_dir + name + "__cpp");
+ serializeToFile(value, data_dir + name + "__cpp.new");
+ vespalib::rename(data_dir + name + "__cpp.new",
+ data_dir + name + "__cpp");
deserializeAndCheck(data_dir + name + "__cpp", value);
deserializeAndCheck(data_dir + name + "__java", value);
@@ -841,7 +843,10 @@ void checkDeserialization(const string &name, std::unique_ptr<vespalib::eval::Va
if (tensor) {
value = std::move(tensor);
}
- serializeToFile(value, data_dir + name + "__cpp");
+ serializeToFile(value, data_dir + name + "__cpp.new");
+ vespalib::rename(data_dir + name + "__cpp.new",
+ data_dir + name + "__cpp");
+
deserializeAndCheck(data_dir + name + "__cpp", value);
deserializeAndCheck(data_dir + name + "__java", value);
}
@@ -966,8 +971,10 @@ struct RefFixture {
const ReferenceFieldValue& value) {
const string data_dir = TEST_PATH("../../test/resources/reference/");
const string field_name = "ref_field";
- serializeToFile(value, data_dir + file_base_name + "__cpp",
+ serializeToFile(value, data_dir + file_base_name + "__cpp.new",
ref_doc_type, field_name);
+ vespalib::rename(data_dir + file_base_name + "__cpp.new",
+ data_dir + file_base_name + "__cpp");
deserializeAndCheck(data_dir + file_base_name + "__cpp",
value, fixed_repo, field_name);
diff --git a/document/src/vespa/document/base/idstring.cpp b/document/src/vespa/document/base/idstring.cpp
index 6cdfec3c24a..4762c940939 100644
--- a/document/src/vespa/document/base/idstring.cpp
+++ b/document/src/vespa/document/base/idstring.cpp
@@ -7,6 +7,7 @@
#include <vespa/vespalib/util/stringfmt.h>
#include <vespa/vespalib/util/optimized.h>
#include <cerrno>
+#include <cstring>
using vespalib::string;
using vespalib::stringref;
@@ -56,13 +57,16 @@ union FourByte {
const FourByte _G_null = {{'n', 'u', 'l', 'l'}};
const TwoByte _G_id = {{'i', 'd'}};
+#ifdef __x86_64__
typedef char v16qi __attribute__ ((__vector_size__(16)));
v16qi _G_zero = { ':', ':', ':', ':', ':', ':', ':', ':', ':', ':', ':', ':', ':', ':', ':', ':' };
+#endif
inline const char *
fmemchr(const char * s, const char * e)
{
+#ifdef __x86_64__
while (s+15 < e) {
#ifdef __clang__
v16qi tmpCurrent = __builtin_ia32_lddqu(s);
@@ -101,6 +105,9 @@ fmemchr(const char * s, const char * e)
s++;
}
return nullptr;
+#else
+ return static_cast<const char *>(memchr(s, ':', e - s));
+#endif
}
void
diff --git a/document/src/vespa/document/util/bytebuffer.cpp b/document/src/vespa/document/util/bytebuffer.cpp
index 644edb0664d..ad28096cfad 100644
--- a/document/src/vespa/document/util/bytebuffer.cpp
+++ b/document/src/vespa/document/util/bytebuffer.cpp
@@ -28,7 +28,7 @@ void throwOutOfBounds(size_t want, size_t has)
}
-#if defined(__i386__) || defined(__x86_64__)
+#if defined(__i386__) || defined(__x86_64__) || defined(__AARCH64EL__)
template<typename T>
void
diff --git a/documentapi/abi-spec.json b/documentapi/abi-spec.json
index 36c2e1fda99..9cc4f60ed7e 100644
--- a/documentapi/abi-spec.json
+++ b/documentapi/abi-spec.json
@@ -168,11 +168,13 @@
"public com.yahoo.documentapi.DocumentOperationParameters withFieldSet(java.lang.String)",
"public com.yahoo.documentapi.DocumentOperationParameters withRoute(java.lang.String)",
"public com.yahoo.documentapi.DocumentOperationParameters withTraceLevel(int)",
+ "public com.yahoo.documentapi.DocumentOperationParameters withDeadline(java.time.Instant)",
"public com.yahoo.documentapi.DocumentOperationParameters withResponseHandler(com.yahoo.documentapi.ResponseHandler)",
"public java.util.Optional priority()",
"public java.util.Optional fieldSet()",
"public java.util.Optional route()",
"public java.util.OptionalInt traceLevel()",
+ "public java.util.Optional deadline()",
"public java.util.Optional responseHandler()",
"public boolean equals(java.lang.Object)",
"public int hashCode()",
@@ -414,6 +416,7 @@
"public static final enum com.yahoo.documentapi.Response$Outcome CONDITION_FAILED",
"public static final enum com.yahoo.documentapi.Response$Outcome NOT_FOUND",
"public static final enum com.yahoo.documentapi.Response$Outcome INSUFFICIENT_STORAGE",
+ "public static final enum com.yahoo.documentapi.Response$Outcome TIMEOUT",
"public static final enum com.yahoo.documentapi.Response$Outcome ERROR"
]
},
diff --git a/documentapi/pom.xml b/documentapi/pom.xml
index 5fb82e06d1b..0c4195d2b99 100644
--- a/documentapi/pom.xml
+++ b/documentapi/pom.xml
@@ -82,6 +82,13 @@
<groupId>com.yahoo.vespa</groupId>
<artifactId>abi-check-plugin</artifactId>
</plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-surefire-plugin</artifactId>
+ <configuration>
+ <forkCount>2</forkCount>
+ </configuration>
+ </plugin>
</plugins>
</build>
</project>
diff --git a/documentapi/src/main/java/com/yahoo/documentapi/DocumentOperationParameters.java b/documentapi/src/main/java/com/yahoo/documentapi/DocumentOperationParameters.java
index 1d934680586..fa38312582e 100644
--- a/documentapi/src/main/java/com/yahoo/documentapi/DocumentOperationParameters.java
+++ b/documentapi/src/main/java/com/yahoo/documentapi/DocumentOperationParameters.java
@@ -5,6 +5,7 @@ import com.yahoo.document.fieldset.FieldSet;
import com.yahoo.document.fieldset.FieldSetRepo;
import com.yahoo.documentapi.messagebus.protocol.DocumentProtocol;
+import java.time.Instant;
import java.util.Objects;
import java.util.Optional;
import java.util.OptionalInt;
@@ -18,20 +19,22 @@ import static java.util.Objects.requireNonNull;
*/
public class DocumentOperationParameters {
- private static final DocumentOperationParameters empty = new DocumentOperationParameters(null, null, null, -1, null);
+ private static final DocumentOperationParameters empty = new DocumentOperationParameters(null, null, null, -1, null, null);
private final DocumentProtocol.Priority priority;
private final String fieldSet;
private final String route;
private final int traceLevel;
+ private final Instant deadline;
private final ResponseHandler responseHandler;
private DocumentOperationParameters(DocumentProtocol.Priority priority, String fieldSet, String route,
- int traceLevel, ResponseHandler responseHandler) {
+ int traceLevel, Instant deadline, ResponseHandler responseHandler) {
this.priority = priority;
this.fieldSet = fieldSet;
this.route = route;
this.traceLevel = traceLevel;
+ this.deadline = deadline;
this.responseHandler = responseHandler;
}
@@ -41,22 +44,22 @@ public class DocumentOperationParameters {
/** Sets the priority with which to perform an operation. */
public DocumentOperationParameters withPriority(DocumentProtocol.Priority priority) {
- return new DocumentOperationParameters(requireNonNull(priority), fieldSet, route, traceLevel, responseHandler);
+ return new DocumentOperationParameters(requireNonNull(priority), fieldSet, route, traceLevel, deadline, responseHandler);
}
/** Sets the field set used for retrieval. */
public DocumentOperationParameters withFieldSet(FieldSet fieldSet) {
- return new DocumentOperationParameters(priority, new FieldSetRepo().serialize(fieldSet), route, traceLevel, responseHandler);
+ return new DocumentOperationParameters(priority, new FieldSetRepo().serialize(fieldSet), route, traceLevel, deadline, responseHandler);
}
/** Sets the field set used for retrieval. */
public DocumentOperationParameters withFieldSet(String fieldSet) {
- return new DocumentOperationParameters(priority, requireNonNull(fieldSet), route, traceLevel, responseHandler);
+ return new DocumentOperationParameters(priority, requireNonNull(fieldSet), route, traceLevel, deadline, responseHandler);
}
/** Sets the route along which to send the operation. */
public DocumentOperationParameters withRoute(String route) {
- return new DocumentOperationParameters(priority, fieldSet, requireNonNull(route), traceLevel, responseHandler);
+ return new DocumentOperationParameters(priority, fieldSet, requireNonNull(route), traceLevel, deadline, responseHandler);
}
/** Sets the trace level for an operation. */
@@ -64,18 +67,24 @@ public class DocumentOperationParameters {
if (traceLevel < 0 || traceLevel > 9)
throw new IllegalArgumentException("Trace level must be from 0 (no tracing) to 9 (maximum)");
- return new DocumentOperationParameters(priority, fieldSet, route, traceLevel, responseHandler);
+ return new DocumentOperationParameters(priority, fieldSet, route, traceLevel, deadline, responseHandler);
+ }
+
+ /** Sets the deadline for an operation. */
+ public DocumentOperationParameters withDeadline(Instant deadline) {
+ return new DocumentOperationParameters(priority, fieldSet, route, traceLevel, requireNonNull(deadline), responseHandler);
}
/** Sets the {@link ResponseHandler} to handle the {@link Response} of an async operation, instead of the session default. */
public DocumentOperationParameters withResponseHandler(ResponseHandler responseHandler) {
- return new DocumentOperationParameters(priority, fieldSet, route, traceLevel, requireNonNull(responseHandler));
+ return new DocumentOperationParameters(priority, fieldSet, route, traceLevel, deadline, requireNonNull(responseHandler));
}
public Optional<DocumentProtocol.Priority> priority() { return Optional.ofNullable(priority); }
public Optional<String> fieldSet() { return Optional.ofNullable(fieldSet); }
public Optional<String> route() { return Optional.ofNullable(route); }
public OptionalInt traceLevel() { return traceLevel >= 0 ? OptionalInt.of(traceLevel) : OptionalInt.empty(); }
+ public Optional<Instant> deadline() { return Optional.ofNullable(deadline); }
public Optional<ResponseHandler> responseHandler() { return Optional.ofNullable(responseHandler); }
@Override
diff --git a/documentapi/src/main/java/com/yahoo/documentapi/Response.java b/documentapi/src/main/java/com/yahoo/documentapi/Response.java
index cea9f247ade..4e4e038e3fc 100644
--- a/documentapi/src/main/java/com/yahoo/documentapi/Response.java
+++ b/documentapi/src/main/java/com/yahoo/documentapi/Response.java
@@ -127,6 +127,9 @@ public class Response {
/** The operation failed because the cluster had insufficient storage to accept it. */
INSUFFICIENT_STORAGE,
+ /** The operation timed out before it reached its destination. */
+ TIMEOUT,
+
/** The operation failed for some unknown reason. */
ERROR
diff --git a/documentapi/src/main/java/com/yahoo/documentapi/Result.java b/documentapi/src/main/java/com/yahoo/documentapi/Result.java
index 9b77090ea6d..38c49873d9b 100644
--- a/documentapi/src/main/java/com/yahoo/documentapi/Result.java
+++ b/documentapi/src/main/java/com/yahoo/documentapi/Result.java
@@ -82,7 +82,7 @@ public class Result {
/** The request failed, and retrying is pointless. */
FATAL_ERROR,
/** Condition specified in operation not met error */
- @Deprecated(since = "7", forRemoval = true) // TODO: Remove on Vespa 8 — this is a Response outcome, not a Result outcome.
+ @Deprecated(since = "7", forRemoval = true) // TODO: Remove on Vespa 8 — this is a Response outcome, not a Result outcome.
CONDITION_NOT_MET_ERROR
}
diff --git a/documentapi/src/main/java/com/yahoo/documentapi/messagebus/MessageBusAsyncSession.java b/documentapi/src/main/java/com/yahoo/documentapi/messagebus/MessageBusAsyncSession.java
index 5def71e2d81..1da6f8bb472 100644
--- a/documentapi/src/main/java/com/yahoo/documentapi/messagebus/MessageBusAsyncSession.java
+++ b/documentapi/src/main/java/com/yahoo/documentapi/messagebus/MessageBusAsyncSession.java
@@ -26,7 +26,6 @@ import com.yahoo.documentapi.messagebus.protocol.RemoveDocumentMessage;
import com.yahoo.documentapi.messagebus.protocol.RemoveDocumentReply;
import com.yahoo.documentapi.messagebus.protocol.UpdateDocumentMessage;
import com.yahoo.documentapi.messagebus.protocol.UpdateDocumentReply;
-import java.util.logging.Level;
import com.yahoo.messagebus.ErrorCode;
import com.yahoo.messagebus.Message;
import com.yahoo.messagebus.MessageBus;
@@ -36,11 +35,14 @@ import com.yahoo.messagebus.SourceSession;
import com.yahoo.messagebus.StaticThrottlePolicy;
import com.yahoo.messagebus.ThrottlePolicy;
+import java.time.Duration;
+import java.time.Instant;
import java.util.Queue;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
+import java.util.logging.Level;
import java.util.logging.Logger;
import static com.yahoo.documentapi.DocumentOperationParameters.parameters;
@@ -49,6 +51,7 @@ import static com.yahoo.documentapi.Response.Outcome.ERROR;
import static com.yahoo.documentapi.Response.Outcome.INSUFFICIENT_STORAGE;
import static com.yahoo.documentapi.Response.Outcome.NOT_FOUND;
import static com.yahoo.documentapi.Response.Outcome.SUCCESS;
+import static com.yahoo.documentapi.Response.Outcome.TIMEOUT;
/**
* An access session which wraps a messagebus source session sending document messages.
@@ -168,6 +171,7 @@ public class MessageBusAsyncSession implements MessageBusSession, AsyncSession {
long reqId = requestId.incrementAndGet();
msg.setContext(new OperationContext(reqId, parameters.responseHandler().orElse(null)));
msg.getTrace().setLevel(parameters.traceLevel().orElse(traceLevel));
+ parameters.deadline().ifPresent(deadline -> msg.setTimeRemaining(Math.max(1, Duration.between(Instant.now(), deadline).toMillis())));
// Use route from parameters, or session route if non-default, or finally, defaults for get and non-get, if set. Phew!
String toRoute = parameters.route().orElse(mayOverrideWithGetOnlyRoute(msg) ? routeForGet : route);
if (toRoute != null) {
@@ -284,6 +288,8 @@ public class MessageBusAsyncSession implements MessageBusSession, AsyncSession {
if ( reply instanceof UpdateDocumentReply && ! ((UpdateDocumentReply) reply).wasFound()
|| reply instanceof RemoveDocumentReply && ! ((RemoveDocumentReply) reply).wasFound())
return NOT_FOUND;
+ if (reply.getErrorCodes().contains(ErrorCode.TIMEOUT))
+ return TIMEOUT;
return ERROR;
}
diff --git a/documentapi/src/main/java/com/yahoo/documentapi/messagebus/MessageBusSyncSession.java b/documentapi/src/main/java/com/yahoo/documentapi/messagebus/MessageBusSyncSession.java
index c7ab8a23e11..8aada611d80 100755
--- a/documentapi/src/main/java/com/yahoo/documentapi/messagebus/MessageBusSyncSession.java
+++ b/documentapi/src/main/java/com/yahoo/documentapi/messagebus/MessageBusSyncSession.java
@@ -28,6 +28,7 @@ import com.yahoo.messagebus.Reply;
import com.yahoo.messagebus.ReplyHandler;
import java.time.Duration;
+import java.time.Instant;
import static com.yahoo.documentapi.DocumentOperationParameters.parameters;
@@ -91,12 +92,12 @@ public class MessageBusSyncSession implements MessageBusSession, SyncSession, Re
}
private Reply syncSend(Message msg, DocumentOperationParameters parameters) {
- return syncSend(msg, defaultTimeout, parameters());
+ return syncSend(msg, defaultTimeout, parameters);
}
private Reply syncSend(Message msg, Duration timeout, DocumentOperationParameters parameters) {
if (timeout != null) {
- msg.setTimeRemaining(timeout.toMillis());
+ parameters = parameters.withDeadline(Instant.now().plus(timeout));
}
try {
RequestMonitor monitor = new RequestMonitor();
diff --git a/documentapi/src/main/java/com/yahoo/documentapi/messagebus/MessageBusVisitorSession.java b/documentapi/src/main/java/com/yahoo/documentapi/messagebus/MessageBusVisitorSession.java
index 248fcf7a624..f71fc54a302 100755
--- a/documentapi/src/main/java/com/yahoo/documentapi/messagebus/MessageBusVisitorSession.java
+++ b/documentapi/src/main/java/com/yahoo/documentapi/messagebus/MessageBusVisitorSession.java
@@ -968,7 +968,7 @@ public class MessageBusVisitorSession implements VisitorSession {
}
private long messageTimeoutMillis() {
- return !isInfiniteTimeout(params.getTimeoutMs()) ? params.getTimeoutMs() : 5 * 60 * 1000;
+ return !isInfiniteTimeout(params.getTimeoutMs()) ? Math.max(1, params.getTimeoutMs()) : 5 * 60 * 1000;
}
private long sessionTimeoutMillis() {
@@ -985,9 +985,10 @@ public class MessageBusVisitorSession implements VisitorSession {
private long computeBoundedMessageTimeoutMillis(long elapsedMs) {
final long messageTimeoutMillis = messageTimeoutMillis();
- return !isInfiniteTimeout(sessionTimeoutMillis())
- ? Math.min(sessionTimeoutMillis() - elapsedMs, messageTimeoutMillis)
- : messageTimeoutMillis;
+ return ! isInfiniteTimeout(sessionTimeoutMillis())
+ ? Math.min(Math.max(1, sessionTimeoutMillis() - elapsedMs),
+ messageTimeoutMillis)
+ : messageTimeoutMillis;
}
/**
diff --git a/documentapi/src/main/java/com/yahoo/documentapi/messagebus/protocol/ContentPolicy.java b/documentapi/src/main/java/com/yahoo/documentapi/messagebus/protocol/ContentPolicy.java
index f8e6989bbfa..4561099c74e 100644
--- a/documentapi/src/main/java/com/yahoo/documentapi/messagebus/protocol/ContentPolicy.java
+++ b/documentapi/src/main/java/com/yahoo/documentapi/messagebus/protocol/ContentPolicy.java
@@ -24,7 +24,6 @@ import com.yahoo.vdslib.state.State;
import com.yahoo.vespa.config.content.DistributionConfig;
import java.util.ArrayList;
-import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Optional;
@@ -76,13 +75,26 @@ public class ContentPolicy extends SlobrokPolicy {
private static class Targets {
private final List<Integer> list;
- private final int total;
+ private final AtomicInteger size;
+ final int total;
Targets() {
- this(Collections.emptyList(), 1);
+ this(List.of(), 0);
}
Targets(List<Integer> list, int total) {
- this.list = list;
- this.total = total;
+ this.list = new CopyOnWriteArrayList<>(list);
+ this.size = new AtomicInteger(list.size());
+ this.total = Math.max(1, total);
+ }
+ Integer get(int i) {
+ return list.get(i);
+ }
+ void remove(Integer v) {
+ size.decrementAndGet();
+ list.add(null); // Avoid index out of bounds for racing getters.
+ list.remove(v);
+ }
+ int size() {
+ return size.get();
}
}
@@ -99,22 +111,22 @@ public class ContentPolicy extends SlobrokPolicy {
for (int i=0; i<state.getNodeCount(NodeType.DISTRIBUTOR); ++i) {
if (state.getNodeState(new Node(NodeType.DISTRIBUTOR, i)).getState().oneOf(upStates)) validRandomTargets.add(i);
}
- validTargets.set(new Targets(new CopyOnWriteArrayList<>(validRandomTargets), state.getNodeCount(NodeType.DISTRIBUTOR)));
+ validTargets.set(new Targets(validRandomTargets, state.getNodeCount(NodeType.DISTRIBUTOR)));
}
public abstract String getTargetSpec(Integer distributor, RoutingContext context);
String getRandomTargetSpec(RoutingContext context) {
Targets targets = validTargets.get();
// Try to use list of random targets, if at least X % of the nodes are up
- while ((targets.total != 0) &&
- (100 * targets.list.size() / targets.total >= requiredUpPercentageToSendToKnownGoodNodes))
+ while (100 * targets.size() >= requiredUpPercentageToSendToKnownGoodNodes * targets.total)
{
- int randIndex = randomizer.nextInt(targets.list.size());
- String targetSpec = getTargetSpec(targets.list.get(randIndex), context);
+ Integer distributor = targets.get(randomizer.nextInt(targets.size()));
+ if (distributor == null) continue;
+ String targetSpec = getTargetSpec(distributor, context);
if (targetSpec != null) {
context.trace(3, "Sending to random node seen up in cluster state");
return targetSpec;
}
- targets.list.remove(randIndex);
+ targets.remove(distributor);
}
context.trace(3, "Too few nodes seen up in state. Sending totally random.");
return getTargetSpec(null, context);
diff --git a/documentapi/src/main/java/com/yahoo/documentapi/messagebus/protocol/ExternPolicy.java b/documentapi/src/main/java/com/yahoo/documentapi/messagebus/protocol/ExternPolicy.java
index 030a0d2588c..94dfabb2c4f 100755
--- a/documentapi/src/main/java/com/yahoo/documentapi/messagebus/protocol/ExternPolicy.java
+++ b/documentapi/src/main/java/com/yahoo/documentapi/messagebus/protocol/ExternPolicy.java
@@ -59,6 +59,7 @@ public class ExternPolicy implements DocumentProtocolRoutingPolicy {
pattern = args[1];
session = pattern.substring(pos);
orb = new Supervisor(new Transport("externpolicy"));
+ orb.useSmallBuffers();
mirror = new Mirror(orb, slobroks);
error = null;
}
diff --git a/documentapi/src/test/java/com/yahoo/documentapi/messagebus/test/MessageBusDocumentApiTestCase.java b/documentapi/src/test/java/com/yahoo/documentapi/messagebus/test/MessageBusDocumentApiTestCase.java
index 4ba51f3b3d8..78e4e49b78d 100644
--- a/documentapi/src/test/java/com/yahoo/documentapi/messagebus/test/MessageBusDocumentApiTestCase.java
+++ b/documentapi/src/test/java/com/yahoo/documentapi/messagebus/test/MessageBusDocumentApiTestCase.java
@@ -1,9 +1,17 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.documentapi.messagebus.test;
+import com.yahoo.document.Document;
+import com.yahoo.document.DocumentId;
+import com.yahoo.document.DocumentPut;
+import com.yahoo.document.DocumentType;
import com.yahoo.document.select.parser.ParseException;
+import com.yahoo.documentapi.AsyncParameters;
+import com.yahoo.documentapi.AsyncSession;
import com.yahoo.documentapi.DocumentAccess;
+import com.yahoo.documentapi.DocumentOperationParameters;
import com.yahoo.documentapi.ProgressToken;
+import com.yahoo.documentapi.Response;
import com.yahoo.documentapi.VisitorParameters;
import com.yahoo.documentapi.VisitorSession;
import com.yahoo.documentapi.messagebus.MessageBusDocumentAccess;
@@ -22,6 +30,13 @@ import org.junit.After;
import org.junit.Before;
import org.junit.Test;
+import java.time.Instant;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
/**
@@ -96,4 +111,26 @@ public class MessageBusDocumentApiTestCase extends AbstractDocumentApiTestCase {
// TODO(vekterli): test remote-to-local message sending as well?
// TODO(vekterli): test DocumentAccess shutdown during active ession?
}
+
+ @Test
+ public void requireThatTimeoutWorks() throws InterruptedException {
+ CountDownLatch latch = new CountDownLatch(1);
+ AtomicReference<Response> response = new AtomicReference<>();
+ AsyncSession session = access().createAsyncSession(new AsyncParameters());
+ DocumentType type = access().getDocumentTypeManager().getDocumentType("music");
+ Document doc1 = new Document(type, new DocumentId("id:ns:music::1"));
+ assertTrue(session.put(new DocumentPut(doc1),
+ DocumentOperationParameters.parameters()
+ .withResponseHandler(result -> {
+ response.set(result);
+ latch.countDown();
+ })
+ .withDeadline(Instant.now().minusSeconds(1)))
+ .isSuccess());
+ assertTrue(latch.await(60, TimeUnit.SECONDS));
+ assertNotNull(response.get());
+ assertEquals(Response.Outcome.TIMEOUT, response.get().outcome());
+ session.destroy();
+ }
+
}
diff --git a/eval/src/apps/tensor_conformance/generate.cpp b/eval/src/apps/tensor_conformance/generate.cpp
index 4776b9f220b..9ae33c1234f 100644
--- a/eval/src/apps/tensor_conformance/generate.cpp
+++ b/eval/src/apps/tensor_conformance/generate.cpp
@@ -432,9 +432,13 @@ void generate_expanding_reduce(TestBuilder &dst) {
//-----------------------------------------------------------------------------
void generate_converting_lambda(TestBuilder &dst) {
- auto spec = GenSpec::from_desc("x3y5_2");
+ auto dense = GenSpec::from_desc("x3");
+ auto sparse = GenSpec::from_desc("y5_2");
+ auto mixed = GenSpec::from_desc("x3y5_2");
// change cell type and dimension types
- dst.add("tensor<bfloat16>(x[5],y[10])(a{x:(x),y:(y)})", {{"a", spec}});
+ dst.add("tensor<bfloat16>(x[5])(a{x:(x)})", {{"a", dense}});
+ dst.add("tensor<bfloat16>(y[10])(a{y:(y)})", {{"a", sparse}});
+ dst.add("tensor<bfloat16>(x[5],y[10])(a{x:(x),y:(y)})", {{"a", mixed}});
}
//-----------------------------------------------------------------------------
@@ -477,6 +481,27 @@ void generate_erf_value_test(TestBuilder &dst) {
//-----------------------------------------------------------------------------
+void generate_nan_existence(TestBuilder &dst) {
+ auto seq1 = Seq({1.0, 1.0, my_nan, my_nan});
+ auto seq2 = Seq({2.0, 2.0, my_nan, my_nan});
+ auto sparse1 = GenSpec().from_desc("x8_1").seq(seq1);
+ auto sparse2 = GenSpec().from_desc("x8_2").seq(seq2);
+ auto mixed1 = GenSpec().from_desc("x4_1y4").seq(seq1);
+ auto mixed2 = GenSpec().from_desc("x4_2y4").seq(seq2);
+ // try to provoke differences between nan and non-existence
+ const vespalib::string inner_expr = "f(x,y)(if(isNan(x),11,x)+if(isNan(y),22,y))";
+ vespalib::string merge_expr = fmt("merge(a,b,%s)", inner_expr.c_str());
+ vespalib::string join_expr = fmt("join(a,b,%s)", inner_expr.c_str());
+ dst.add(merge_expr, {{"a", sparse1}, {"b", sparse2}});
+ dst.add(merge_expr, {{"a", mixed1}, {"b", mixed2}});
+ dst.add(join_expr, {{"a", sparse1}, {"b", sparse2}});
+ dst.add(join_expr, {{"a", mixed1}, {"b", mixed2}});
+ dst.add(join_expr, {{"a", sparse1}, {"b", mixed2}});
+ dst.add(join_expr, {{"a", mixed1}, {"b", sparse2}});
+}
+
+//-----------------------------------------------------------------------------
+
} // namespace <unnamed>
//-----------------------------------------------------------------------------
@@ -505,4 +530,5 @@ Generator::generate(TestBuilder &dst)
generate_strict_verbatim_peek(dst);
generate_nested_tensor_lambda(dst);
generate_erf_value_test(dst);
+ generate_nan_existence(dst);
}
diff --git a/eval/src/apps/tensor_conformance/generate.h b/eval/src/apps/tensor_conformance/generate.h
index a71531f7cf3..e9482b9015c 100644
--- a/eval/src/apps/tensor_conformance/generate.h
+++ b/eval/src/apps/tensor_conformance/generate.h
@@ -4,13 +4,25 @@
#include <vespa/eval/eval/tensor_spec.h>
#include <map>
+#include <set>
struct TestBuilder {
bool full;
TestBuilder(bool full_in) : full(full_in) {}
using TensorSpec = vespalib::eval::TensorSpec;
virtual void add(const vespalib::string &expression,
- const std::map<vespalib::string,TensorSpec> &inputs) = 0;
+ const std::map<vespalib::string,TensorSpec> &inputs,
+ const std::set<vespalib::string> &ignore) = 0;
+ void add(const vespalib::string &expression,
+ const std::map<vespalib::string,TensorSpec> &inputs)
+ {
+ add(expression, inputs, {});
+ }
+ void add_ignore_java(const vespalib::string &expression,
+ const std::map<vespalib::string,TensorSpec> &inputs)
+ {
+ add(expression, inputs, {"vespajlib"});
+ }
virtual ~TestBuilder() {}
};
diff --git a/eval/src/apps/tensor_conformance/tensor_conformance.cpp b/eval/src/apps/tensor_conformance/tensor_conformance.cpp
index 37ecce51714..e6bbb1f8a41 100644
--- a/eval/src/apps/tensor_conformance/tensor_conformance.cpp
+++ b/eval/src/apps/tensor_conformance/tensor_conformance.cpp
@@ -37,6 +37,7 @@ using namespace std::placeholders;
//-----------------------------------------------------------------------------
size_t fail_cnt = 0;
+size_t ignore_cnt = 0;
//-----------------------------------------------------------------------------
@@ -164,8 +165,8 @@ void print_test(const Inspector &test, OutputWriter &dst) {
auto value = extract_value(test["inputs"][input]);
dst.printf("input '%s': %s\n", input.c_str(), value.to_string().c_str());
}
- auto result = extract_value(test["result"]["expect"]);
- dst.printf("expected result: %s\n", result.to_string().c_str());
+ auto result = eval_expr(test, prod_factory);
+ dst.printf("result: %s\n", result.to_string().c_str());
}
//-----------------------------------------------------------------------------
@@ -176,7 +177,8 @@ private:
public:
MyTestBuilder(bool full_in, Output &out) : TestBuilder(full_in), _writer(out) {}
void add(const vespalib::string &expression,
- const std::map<vespalib::string,TensorSpec> &inputs_in) override
+ const std::map<vespalib::string,TensorSpec> &inputs_in,
+ const std::set<vespalib::string> &ignore_in) override
{
Cursor &test = _writer.create();
test.setString("expression", expression);
@@ -184,20 +186,30 @@ public:
for (const auto& [name, spec]: inputs_in) {
insert_value(inputs, name, spec);
}
- insert_value(test.setObject("result"), "expect", ref_eval(test));
+ test.setObject("result");
+ if (!ignore_in.empty()) {
+ Cursor &ignore = test.setObject("ignore");
+ for (const auto &impl: ignore_in) {
+ ignore.setBool(impl, true);
+ }
+ }
}
- void add_failing_test() {
+ void add_failing_test(bool ignore_fail) {
Cursor &test = _writer.create();
test.setString("expression", "a");
insert_value(test.setObject("inputs"), "a", GenSpec(1).idx("x", 3));
insert_value(test.setObject("result"), "dummy", GenSpec(2).idx("x", 3));
+ if (ignore_fail) {
+ test.setObject("ignore").setBool("dummy", true);
+ }
}
};
void generate(Output &out, bool full) {
MyTestBuilder my_test_builder(full, out);
Generator::generate(my_test_builder);
- // my_test_builder.add_failing_test();
+ // my_test_builder.add_failing_test(true);
+ // my_test_builder.add_failing_test(false);
}
//-----------------------------------------------------------------------------
@@ -228,9 +240,14 @@ void verify(Input &in, Output &out) {
++result_map[result];
auto actual_result = extract_value(slime["result"][result]);
if (!require_impl::eq(actual_result, reference_result)) {
- ++fail_cnt;
- fprintf(stderr, "expression failed('%s'): '%s'\n", result.c_str(),
- slime["expression"].asString().make_string().c_str());
+ bool ignore_fail = slime["ignore"][result].asBool();
+ if (ignore_fail) {
+ ++ignore_cnt;
+ } else {
+ ++fail_cnt;
+ }
+ fprintf(stderr, "%sexpression failed('%s'): '%s'\n", ignore_fail ? "IGNORED: " : "",
+ result.c_str(), slime["expression"].asString().make_string().c_str());
fprintf(stderr, "%s", TensorSpec::diff(actual_result, "actual", reference_result, "expected").c_str());
dump_test(slime.get());
}
@@ -241,6 +258,10 @@ void verify(Input &in, Output &out) {
for (const auto &entry: result_map) {
stats.setLong(entry.first, entry.second);
}
+ REQUIRE(!slime["fail_cnt"].valid());
+ REQUIRE(!slime["ignore_cnt"].valid());
+ slime.get().setLong("fail_cnt", fail_cnt);
+ slime.get().setLong("ignore_cnt", ignore_cnt);
JsonFormat::encode(slime, out, false);
};
for_each_test(in, handle_test, handle_summary);
diff --git a/eval/src/tests/eval/function/function_test.cpp b/eval/src/tests/eval/function/function_test.cpp
index 4dff2934873..aca19e2ccc9 100644
--- a/eval/src/tests/eval/function/function_test.cpp
+++ b/eval/src/tests/eval/function/function_test.cpp
@@ -3,12 +3,15 @@
#include <vespa/eval/eval/function.h>
#include <vespa/eval/eval/operator_nodes.h>
#include <vespa/eval/eval/node_traverser.h>
+#include <vespa/eval/eval/value_codec.h>
#include <set>
#include <vespa/eval/eval/test/eval_spec.h>
+#include <vespa/eval/eval/test/gen_spec.h>
#include <vespa/eval/eval/check_type.h>
using namespace vespalib::eval;
using namespace vespalib::eval::nodes;
+using vespalib::eval::test::GenSpec;
std::vector<vespalib::string> params({"x", "y", "z", "w"});
@@ -351,7 +354,7 @@ TEST("require that Not child can be accessed") {
const Node &root = f->root();
EXPECT_TRUE(!root.is_leaf());
ASSERT_EQUAL(1u, root.num_children());
- EXPECT_EQUAL(1.0, root.get_child(0).get_const_value());
+ EXPECT_EQUAL(1.0, root.get_child(0).get_const_double_value());
}
TEST("require that If children can be accessed") {
@@ -359,9 +362,9 @@ TEST("require that If children can be accessed") {
const Node &root = f->root();
EXPECT_TRUE(!root.is_leaf());
ASSERT_EQUAL(3u, root.num_children());
- EXPECT_EQUAL(1.0, root.get_child(0).get_const_value());
- EXPECT_EQUAL(2.0, root.get_child(1).get_const_value());
- EXPECT_EQUAL(3.0, root.get_child(2).get_const_value());
+ EXPECT_EQUAL(1.0, root.get_child(0).get_const_double_value());
+ EXPECT_EQUAL(2.0, root.get_child(1).get_const_double_value());
+ EXPECT_EQUAL(3.0, root.get_child(2).get_const_double_value());
}
TEST("require that Operator children can be accessed") {
@@ -369,8 +372,8 @@ TEST("require that Operator children can be accessed") {
const Node &root = f->root();
EXPECT_TRUE(!root.is_leaf());
ASSERT_EQUAL(2u, root.num_children());
- EXPECT_EQUAL(1.0, root.get_child(0).get_const_value());
- EXPECT_EQUAL(2.0, root.get_child(1).get_const_value());
+ EXPECT_EQUAL(1.0, root.get_child(0).get_const_double_value());
+ EXPECT_EQUAL(2.0, root.get_child(1).get_const_double_value());
}
TEST("require that Call children can be accessed") {
@@ -378,8 +381,8 @@ TEST("require that Call children can be accessed") {
const Node &root = f->root();
EXPECT_TRUE(!root.is_leaf());
ASSERT_EQUAL(2u, root.num_children());
- EXPECT_EQUAL(1.0, root.get_child(0).get_const_value());
- EXPECT_EQUAL(2.0, root.get_child(1).get_const_value());
+ EXPECT_EQUAL(1.0, root.get_child(0).get_const_double_value());
+ EXPECT_EQUAL(2.0, root.get_child(1).get_const_double_value());
}
struct MyNodeHandler : public NodeHandler {
@@ -498,7 +501,7 @@ TEST("require that node types can be checked") {
TEST("require that parameter is param, but not const") {
EXPECT_TRUE(Function::parse("x")->root().is_param());
- EXPECT_TRUE(!Function::parse("x")->root().is_const());
+ EXPECT_TRUE(!Function::parse("x")->root().is_const_double());
}
TEST("require that inverted parameter is not param") {
@@ -506,43 +509,43 @@ TEST("require that inverted parameter is not param") {
}
TEST("require that number is const, but not param") {
- EXPECT_TRUE(Function::parse("123")->root().is_const());
+ EXPECT_TRUE(Function::parse("123")->root().is_const_double());
EXPECT_TRUE(!Function::parse("123")->root().is_param());
}
TEST("require that string is const") {
- EXPECT_TRUE(Function::parse("\"x\"")->root().is_const());
+ EXPECT_TRUE(Function::parse("\"x\"")->root().is_const_double());
}
TEST("require that neg is const if sub-expression is const") {
- EXPECT_TRUE(Function::parse("-123")->root().is_const());
- EXPECT_TRUE(!Function::parse("-x")->root().is_const());
+ EXPECT_TRUE(Function::parse("-123")->root().is_const_double());
+ EXPECT_TRUE(!Function::parse("-x")->root().is_const_double());
}
TEST("require that not is const if sub-expression is const") {
- EXPECT_TRUE(Function::parse("!1")->root().is_const());
- EXPECT_TRUE(!Function::parse("!x")->root().is_const());
+ EXPECT_TRUE(Function::parse("!1")->root().is_const_double());
+ EXPECT_TRUE(!Function::parse("!x")->root().is_const_double());
}
TEST("require that operators are cost if both children are const") {
- EXPECT_TRUE(!Function::parse("x+y")->root().is_const());
- EXPECT_TRUE(!Function::parse("1+y")->root().is_const());
- EXPECT_TRUE(!Function::parse("x+2")->root().is_const());
- EXPECT_TRUE(Function::parse("1+2")->root().is_const());
+ EXPECT_TRUE(!Function::parse("x+y")->root().is_const_double());
+ EXPECT_TRUE(!Function::parse("1+y")->root().is_const_double());
+ EXPECT_TRUE(!Function::parse("x+2")->root().is_const_double());
+ EXPECT_TRUE(Function::parse("1+2")->root().is_const_double());
}
TEST("require that set membership is never tagged as const (NB: avoids jit recursion)") {
- EXPECT_TRUE(!Function::parse("x in [x,y,z]")->root().is_const());
- EXPECT_TRUE(!Function::parse("1 in [x,y,z]")->root().is_const());
- EXPECT_TRUE(!Function::parse("1 in [1,y,z]")->root().is_const());
- EXPECT_TRUE(!Function::parse("1 in [1,2,3]")->root().is_const());
+ EXPECT_TRUE(!Function::parse("x in [x,y,z]")->root().is_const_double());
+ EXPECT_TRUE(!Function::parse("1 in [x,y,z]")->root().is_const_double());
+ EXPECT_TRUE(!Function::parse("1 in [1,y,z]")->root().is_const_double());
+ EXPECT_TRUE(!Function::parse("1 in [1,2,3]")->root().is_const_double());
}
TEST("require that calls are cost if all parameters are const") {
- EXPECT_TRUE(!Function::parse("max(x,y)")->root().is_const());
- EXPECT_TRUE(!Function::parse("max(1,y)")->root().is_const());
- EXPECT_TRUE(!Function::parse("max(x,2)")->root().is_const());
- EXPECT_TRUE(Function::parse("max(1,2)")->root().is_const());
+ EXPECT_TRUE(!Function::parse("max(x,y)")->root().is_const_double());
+ EXPECT_TRUE(!Function::parse("max(1,y)")->root().is_const_double());
+ EXPECT_TRUE(!Function::parse("max(x,2)")->root().is_const_double());
+ EXPECT_TRUE(Function::parse("max(1,2)")->root().is_const_double());
}
//-----------------------------------------------------------------------------
@@ -1059,4 +1062,42 @@ TEST_FF("require that all conformance test expressions can be parsed",
//-----------------------------------------------------------------------------
+TEST("require that constant double value can be (pre-)calculated") {
+ auto expect = GenSpec(42).gen();
+ auto f = Function::parse("21+21");
+ ASSERT_TRUE(!f->has_error());
+ const Node &root = f->root();
+ auto value = root.get_const_value();
+ ASSERT_TRUE(value);
+ EXPECT_EQUAL(spec_from_value(*value), expect);
+}
+
+TEST("require that constant tensor value can be (pre-)calculated") {
+ auto expect = GenSpec().idx("x", 10).gen();
+ auto f = Function::parse("concat(tensor(x[4])(x+1),tensor(x[6])(x+5),x)");
+ ASSERT_TRUE(!f->has_error());
+ const Node &root = f->root();
+ auto value = root.get_const_value();
+ ASSERT_TRUE(value);
+ EXPECT_EQUAL(spec_from_value(*value), expect);
+}
+
+TEST("require that non-const value cannot be (pre-)calculated") {
+ auto f = Function::parse("a+b");
+ ASSERT_TRUE(!f->has_error());
+ const Node &root = f->root();
+ auto value = root.get_const_value();
+ EXPECT_TRUE(value.get() == nullptr);
+}
+
+TEST("require that parse error does not produce a const value") {
+ auto f = Function::parse("this is a parse error");
+ EXPECT_TRUE(f->has_error());
+ const Node &root = f->root();
+ auto value = root.get_const_value();
+ EXPECT_TRUE(value.get() == nullptr);
+}
+
+//-----------------------------------------------------------------------------
+
TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/eval/src/tests/tensor/tensor_conformance/tensor_conformance_test.cpp b/eval/src/tests/tensor/tensor_conformance/tensor_conformance_test.cpp
index e33cc116fba..8271e8b57ee 100644
--- a/eval/src/tests/tensor/tensor_conformance/tensor_conformance_test.cpp
+++ b/eval/src/tests/tensor/tensor_conformance/tensor_conformance_test.cpp
@@ -20,16 +20,18 @@ TEST("require that (some) cross-language tensor conformance tests pass with C++
MappedFileInput input(result_file);
JsonFormat::decode(input, result);
fprintf(stderr, "conformance summary: %s\n", result.toString().c_str());
- int num_tests = result.get()["num_tests"].asLong();
- int prod_tests = result.get()["stats"]["cpp_prod"].asLong();
- int simple_tests = result.get()["stats"]["cpp_simple_value"].asLong();
- int streamed_tests = result.get()["stats"]["cpp_streamed_value"].asLong();
- int with_expect = result.get()["stats"]["expect"].asLong();
+ int num_tests = result["num_tests"].asLong();
+ int prod_tests = result["stats"]["cpp_prod"].asLong();
+ int simple_tests = result["stats"]["cpp_simple_value"].asLong();
+ int streamed_tests = result["stats"]["cpp_streamed_value"].asLong();
+ EXPECT_TRUE(result["fail_cnt"].valid());
+ EXPECT_EQUAL(result["fail_cnt"].asLong(), 0);
+ EXPECT_TRUE(result["ignore_cnt"].valid());
+ EXPECT_EQUAL(result["ignore_cnt"].asLong(), 0);
EXPECT_GREATER(num_tests, 1000);
EXPECT_EQUAL(prod_tests, num_tests);
EXPECT_EQUAL(simple_tests, num_tests);
EXPECT_EQUAL(streamed_tests, num_tests);
- EXPECT_EQUAL(with_expect, num_tests);
}
TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/eval/src/vespa/eval/eval/basic_nodes.cpp b/eval/src/vespa/eval/eval/basic_nodes.cpp
index d7fa76bf1cc..98ce50b2543 100644
--- a/eval/src/vespa/eval/eval/basic_nodes.cpp
+++ b/eval/src/vespa/eval/eval/basic_nodes.cpp
@@ -5,6 +5,8 @@
#include "node_visitor.h"
#include "interpreted_function.h"
#include "simple_value.h"
+#include "fast_value.h"
+#include "node_tools.h"
namespace vespalib::eval::nodes {
@@ -21,8 +23,9 @@ struct Frame {
} // namespace vespalib::eval::nodes::<unnamed>
double
-Node::get_const_value() const {
- assert(is_const());
+Node::get_const_double_value() const
+{
+ assert(is_const_double());
NodeTypes node_types(*this);
InterpretedFunction function(SimpleValueBuilderFactory::get(), *this, node_types);
NoParams no_params;
@@ -30,6 +33,24 @@ Node::get_const_value() const {
return function.eval(ctx, no_params).as_double();
}
+Value::UP
+Node::get_const_value() const
+{
+ if (nodes::as<nodes::Error>(*this)) {
+ // cannot get const value for parse error
+ return {nullptr};
+ }
+ if (NodeTools::min_num_params(*this) != 0) {
+ // cannot get const value for non-const sub-expression
+ return {nullptr};
+ }
+ NodeTypes node_types(*this);
+ InterpretedFunction function(SimpleValueBuilderFactory::get(), *this, node_types);
+ NoParams no_params;
+ InterpretedFunction::Context ctx(function);
+ return FastValueBuilderFactory::get().copy(function.eval(ctx, no_params));
+}
+
void
Node::traverse(NodeTraverser &traverser) const
{
@@ -69,16 +90,16 @@ If::If(Node_UP cond_in, Node_UP true_expr_in, Node_UP false_expr_in, double p_tr
auto less = as<Less>(cond());
auto in = as<In>(cond());
auto inverted = as<Not>(cond());
- bool true_is_subtree = (true_expr().is_tree() || true_expr().is_const());
- bool false_is_subtree = (false_expr().is_tree() || false_expr().is_const());
+ bool true_is_subtree = (true_expr().is_tree() || true_expr().is_const_double());
+ bool false_is_subtree = (false_expr().is_tree() || false_expr().is_const_double());
if (true_is_subtree && false_is_subtree) {
if (less) {
- _is_tree = (less->lhs().is_param() && less->rhs().is_const());
+ _is_tree = (less->lhs().is_param() && less->rhs().is_const_double());
} else if (in) {
_is_tree = in->child().is_param();
} else if (inverted) {
if (auto ge = as<GreaterEqual>(inverted->child())) {
- _is_tree = (ge->lhs().is_param() && ge->rhs().is_const());
+ _is_tree = (ge->lhs().is_param() && ge->rhs().is_const_double());
}
}
}
diff --git a/eval/src/vespa/eval/eval/basic_nodes.h b/eval/src/vespa/eval/eval/basic_nodes.h
index c1192585f7c..c6b19f6ce12 100644
--- a/eval/src/vespa/eval/eval/basic_nodes.h
+++ b/eval/src/vespa/eval/eval/basic_nodes.h
@@ -2,6 +2,7 @@
#pragma once
+#include "value.h"
#include "string_stuff.h"
#include <vespa/vespalib/util/hdr_abort.h>
#include <vespa/vespalib/stllike/string.h>
@@ -48,9 +49,10 @@ struct DumpContext {
struct Node {
virtual bool is_forest() const { return false; }
virtual bool is_tree() const { return false; }
- virtual bool is_const() const { return false; }
+ virtual bool is_const_double() const { return false; }
virtual bool is_param() const { return false; }
- virtual double get_const_value() const;
+ virtual double get_const_double_value() const;
+ Value::UP get_const_value() const;
void traverse(NodeTraverser &traverser) const;
virtual vespalib::string dump(DumpContext &ctx) const = 0;
virtual void accept(NodeVisitor &visitor) const = 0;
@@ -92,8 +94,8 @@ private:
double _value;
public:
Number(double value_in) : _value(value_in) {}
- virtual bool is_const() const override { return true; }
- virtual double get_const_value() const override { return value(); }
+ virtual bool is_const_double() const override { return true; }
+ double get_const_double_value() const override { return value(); }
double value() const { return _value; }
vespalib::string dump(DumpContext &) const override {
return make_string("%g", _value);
@@ -120,8 +122,8 @@ private:
vespalib::string _value;
public:
String(const vespalib::string &value_in) : _value(value_in) {}
- bool is_const() const override { return true; }
- double get_const_value() const override { return hash(); }
+ bool is_const_double() const override { return true; }
+ double get_const_double_value() const override { return hash(); }
const vespalib::string &value() const { return _value; }
uint32_t hash() const { return hash_code(_value.data(), _value.size()); }
vespalib::string dump(DumpContext &) const override {
@@ -137,7 +139,7 @@ private:
public:
In(Node_UP child) : _child(std::move(child)), _entries() {}
void add_entry(Node_UP entry) {
- assert(entry->is_const());
+ assert(entry->is_const_double());
_entries.push_back(std::move(entry));
}
size_t num_entries() const { return _entries.size(); }
@@ -171,10 +173,10 @@ public:
class Neg : public Node {
private:
Node_UP _child;
- bool _is_const;
+ bool _is_const_double;
public:
- Neg(Node_UP child_in) : _child(std::move(child_in)), _is_const(_child->is_const()) {}
- bool is_const() const override { return _is_const; }
+ Neg(Node_UP child_in) : _child(std::move(child_in)), _is_const_double(_child->is_const_double()) {}
+ bool is_const_double() const override { return _is_const_double; }
const Node &child() const { return *_child; }
size_t num_children() const override { return _child ? 1 : 0; }
const Node &get_child(size_t idx) const override {
@@ -198,10 +200,10 @@ public:
class Not : public Node {
private:
Node_UP _child;
- bool _is_const;
+ bool _is_const_double;
public:
- Not(Node_UP child_in) : _child(std::move(child_in)), _is_const(_child->is_const()) {}
- bool is_const() const override { return _is_const; }
+ Not(Node_UP child_in) : _child(std::move(child_in)), _is_const_double(_child->is_const_double()) {}
+ bool is_const_double() const override { return _is_const_double; }
const Node &child() const { return *_child; }
size_t num_children() const override { return _child ? 1 : 0; }
const Node &get_child(size_t idx) const override {
diff --git a/eval/src/vespa/eval/eval/call_nodes.h b/eval/src/vespa/eval/eval/call_nodes.h
index c5a41756005..2a7d4173e64 100644
--- a/eval/src/vespa/eval/eval/call_nodes.h
+++ b/eval/src/vespa/eval/eval/call_nodes.h
@@ -25,12 +25,12 @@ private:
vespalib::string _name;
size_t _num_params;
std::vector<Node_UP> _args;
- bool _is_const;
+ bool _is_const_double;
public:
Call(const vespalib::string &name_in, size_t num_params_in)
- : _name(name_in), _num_params(num_params_in), _is_const(false) {}
+ : _name(name_in), _num_params(num_params_in), _is_const_double(false) {}
~Call();
- bool is_const() const override { return _is_const; }
+ bool is_const_double() const override { return _is_const_double; }
const vespalib::string &name() const { return _name; }
size_t num_params() const { return _num_params; }
size_t num_args() const { return _args.size(); }
@@ -45,9 +45,9 @@ public:
}
virtual void bind_next(Node_UP arg_in) {
if (_args.empty()) {
- _is_const = arg_in->is_const();
+ _is_const_double = arg_in->is_const_double();
} else {
- _is_const = (_is_const && arg_in->is_const());
+ _is_const_double = (_is_const_double && arg_in->is_const_double());
}
_args.push_back(std::move(arg_in));
}
diff --git a/eval/src/vespa/eval/eval/fast_forest.cpp b/eval/src/vespa/eval/eval/fast_forest.cpp
index 47932ff14fb..4eea4a5cce7 100644
--- a/eval/src/vespa/eval/eval/fast_forest.cpp
+++ b/eval/src/vespa/eval/eval/fast_forest.cpp
@@ -85,26 +85,26 @@ State::encode_node(uint32_t tree_id, const nodes::Node &node)
if (less) {
auto symbol = nodes::as<nodes::Symbol>(less->lhs());
assert(symbol);
- assert(less->rhs().is_const());
+ assert(less->rhs().is_const_double());
size_t feature = symbol->id();
assert(feature < cmp_nodes.size());
- cmp_nodes[feature].emplace_back(less->rhs().get_const_value(), tree_id, true_leafs, true);
+ cmp_nodes[feature].emplace_back(less->rhs().get_const_double_value(), tree_id, true_leafs, true);
} else {
assert(inverted);
auto ge = nodes::as<nodes::GreaterEqual>(inverted->child());
assert(ge);
auto symbol = nodes::as<nodes::Symbol>(ge->lhs());
assert(symbol);
- assert(ge->rhs().is_const());
+ assert(ge->rhs().is_const_double());
size_t feature = symbol->id();
assert(feature < cmp_nodes.size());
- cmp_nodes[feature].emplace_back(ge->rhs().get_const_value(), tree_id, true_leafs, false);
+ cmp_nodes[feature].emplace_back(ge->rhs().get_const_double_value(), tree_id, true_leafs, false);
}
return BitRange::join(true_leafs, false_leafs);
} else {
- assert(node.is_const());
+ assert(node.is_const_double());
BitRange leaf_range(leafs[tree_id].size());
- leafs[tree_id].push_back(node.get_const_value());
+ leafs[tree_id].push_back(node.get_const_double_value());
return leaf_range;
}
}
diff --git a/eval/src/vespa/eval/eval/key_gen.cpp b/eval/src/vespa/eval/eval/key_gen.cpp
index 80803d3b2a2..a8fb205f124 100644
--- a/eval/src/vespa/eval/eval/key_gen.cpp
+++ b/eval/src/vespa/eval/eval/key_gen.cpp
@@ -28,7 +28,7 @@ struct KeyGen : public NodeVisitor, public NodeTraverser {
void visit(const In &node) override { add_byte( 4);
add_size(node.num_entries());
for (size_t i = 0; i < node.num_entries(); ++i) {
- add_double(node.get_entry(i).get_const_value());
+ add_double(node.get_entry(i).get_const_double_value());
}
}
void visit(const Neg &) override { add_byte( 5); }
diff --git a/eval/src/vespa/eval/eval/llvm/llvm_wrapper.cpp b/eval/src/vespa/eval/eval/llvm/llvm_wrapper.cpp
index fce9abb7316..42911a56c14 100644
--- a/eval/src/vespa/eval/eval/llvm/llvm_wrapper.cpp
+++ b/eval/src/vespa/eval/eval/llvm/llvm_wrapper.cpp
@@ -61,7 +61,7 @@ struct SetMemberHash : PluginState {
vespalib::hash_set<double> members;
explicit SetMemberHash(const In &in) : members(in.num_entries() * 3) {
for (size_t i = 0; i < in.num_entries(); ++i) {
- members.insert(in.get_entry(i).get_const_value());
+ members.insert(in.get_entry(i).get_const_double_value());
}
}
static bool check_membership(const PluginState *state, double value) {
@@ -260,8 +260,8 @@ struct FunctionBuilder : public NodeVisitor, public NodeTraverser {
//-------------------------------------------------------------------------
bool open(const Node &node) override {
- if (node.is_const()) {
- push_double(node.get_const_value());
+ if (node.is_const_double()) {
+ push_double(node.get_const_double_value());
return false;
}
if (!inside_forest && (pass_params != PassParams::SEPARATE) && node.is_forest()) {
@@ -412,7 +412,7 @@ struct FunctionBuilder : public NodeVisitor, public NodeTraverser {
// build explicit code to check all set members
llvm::Value *found = builder.getFalse();
for (size_t i = 0; i < item.num_entries(); ++i) {
- llvm::Value *elem = llvm::ConstantFP::get(builder.getDoubleTy(), item.get_entry(i).get_const_value());
+ llvm::Value *elem = llvm::ConstantFP::get(builder.getDoubleTy(), item.get_entry(i).get_const_double_value());
llvm::Value *elem_eq = builder.CreateFCmpOEQ(lhs, elem, "elem_eq");
found = builder.CreateOr(found, elem_eq, "found");
}
diff --git a/eval/src/vespa/eval/eval/make_tensor_function.cpp b/eval/src/vespa/eval/eval/make_tensor_function.cpp
index 15f188db51a..b65c3d5aaa7 100644
--- a/eval/src/vespa/eval/eval/make_tensor_function.cpp
+++ b/eval/src/vespa/eval/eval/make_tensor_function.cpp
@@ -83,14 +83,14 @@ struct TensorFunctionBuilder : public NodeVisitor, public NodeTraverser {
bool maybe_make_const(const Node &node) {
if (auto create = as<TensorCreate>(node)) {
- bool is_const = true;
+ bool is_const_double = true;
for (size_t i = 0; i < create->num_children(); ++i) {
- is_const &= create->get_child(i).is_const();
+ is_const_double &= create->get_child(i).is_const_double();
}
- if (is_const) {
+ if (is_const_double) {
TensorSpec spec(create->type().to_spec());
for (size_t i = 0; i < create->num_children(); ++i) {
- spec.add(create->get_child_address(i), create->get_child(i).get_const_value());
+ spec.add(create->get_child_address(i), create->get_child(i).get_const_double_value());
}
make_const(node, *stash.create<Value::UP>(value_from_spec(spec, factory)));
return true;
@@ -172,7 +172,7 @@ struct TensorFunctionBuilder : public NodeVisitor, public NodeTraverser {
void visit(const In &node) override {
auto my_in = std::make_unique<In>(std::make_unique<Symbol>(0));
for (size_t i = 0; i < node.num_entries(); ++i) {
- my_in->add_entry(std::make_unique<Number>(node.get_entry(i).get_const_value()));
+ my_in->add_entry(std::make_unique<Number>(node.get_entry(i).get_const_double_value()));
}
auto my_fun = Function::create(std::move(my_in), {"x"});
const auto &token = stash.create<CompileCache::Token::UP>(CompileCache::compile(*my_fun, PassParams::SEPARATE));
diff --git a/eval/src/vespa/eval/eval/operator_nodes.cpp b/eval/src/vespa/eval/eval/operator_nodes.cpp
index 4c66268dfa2..98072b99324 100644
--- a/eval/src/vespa/eval/eval/operator_nodes.cpp
+++ b/eval/src/vespa/eval/eval/operator_nodes.cpp
@@ -13,7 +13,7 @@ Operator::Operator(const vespalib::string &op_str_in, int priority_in, Order ord
_order(order_in),
_lhs(),
_rhs(),
- _is_const(false)
+ _is_const_double(false)
{}
Operator::~Operator() { }
diff --git a/eval/src/vespa/eval/eval/operator_nodes.h b/eval/src/vespa/eval/eval/operator_nodes.h
index eafd817d42c..5562659f0f7 100644
--- a/eval/src/vespa/eval/eval/operator_nodes.h
+++ b/eval/src/vespa/eval/eval/operator_nodes.h
@@ -32,7 +32,7 @@ private:
Order _order;
Node_UP _lhs;
Node_UP _rhs;
- bool _is_const;
+ bool _is_const_double;
public:
Operator(const vespalib::string &op_str_in, int priority_in, Order order_in);
@@ -42,7 +42,7 @@ public:
Order order() const { return _order; }
const Node &lhs() const { return *_lhs; }
const Node &rhs() const { return *_rhs; }
- bool is_const() const override { return _is_const; }
+ bool is_const_double() const override { return _is_const_double; }
size_t num_children() const override { return (_lhs && _rhs) ? 2 : 0; }
const Node &get_child(size_t idx) const override {
assert(idx < 2);
@@ -67,7 +67,7 @@ public:
virtual void bind(Node_UP lhs_in, Node_UP rhs_in) {
_lhs = std::move(lhs_in);
_rhs = std::move(rhs_in);
- _is_const = (_lhs->is_const() && _rhs->is_const());
+ _is_const_double = (_lhs->is_const_double() && _rhs->is_const_double());
}
vespalib::string dump(DumpContext &ctx) const override {
diff --git a/eval/src/vespa/eval/eval/test/reference_operations.cpp b/eval/src/vespa/eval/eval/test/reference_operations.cpp
index 34452c1b2ae..577af47e2ae 100644
--- a/eval/src/vespa/eval/eval/test/reference_operations.cpp
+++ b/eval/src/vespa/eval/eval/test/reference_operations.cpp
@@ -239,7 +239,7 @@ TensorSpec ReferenceOperations::peek(const PeekSpec &peek_spec, const std::vecto
if (is_mapped_dim(dim)) {
addr.emplace(dim, vespalib::make_string("%" PRId64, int64_t(child_value)));
} else {
- addr.emplace(dim, child_value);
+ addr.emplace(dim, (int64_t)child_value);
}
}
}, label_or_child);
diff --git a/eval/src/vespa/eval/eval/vm_forest.cpp b/eval/src/vespa/eval/eval/vm_forest.cpp
index e0fac9405ce..a31c5f502ac 100644
--- a/eval/src/vespa/eval/eval/vm_forest.cpp
+++ b/eval/src/vespa/eval/eval/vm_forest.cpp
@@ -136,8 +136,8 @@ void encode_less(const nodes::Less &less,
auto symbol = nodes::as<nodes::Symbol>(less.lhs());
assert(symbol);
model_out.push_back(uint32_t(symbol->id()) << 12);
- assert(less.rhs().is_const());
- encode_const(less.rhs().get_const_value(), model_out);
+ assert(less.rhs().is_const_double());
+ encode_const(less.rhs().get_const_double_value(), model_out);
size_t skip_idx = model_out.size();
model_out.push_back(0); // left child size placeholder
uint32_t left_type = encode_node(left_child, model_out);
@@ -157,7 +157,7 @@ void encode_in(const nodes::In &in,
size_t set_size_idx = model_out.size();
model_out.push_back(in.num_entries());
for (size_t i = 0; i < in.num_entries(); ++i) {
- encode_large_const(in.get_entry(i).get_const_value(), model_out);
+ encode_large_const(in.get_entry(i).get_const_double_value(), model_out);
}
size_t left_idx = model_out.size();
uint32_t left_type = encode_node(left_child, model_out);
@@ -176,8 +176,8 @@ void encode_inverted(const nodes::Not &inverted,
auto symbol = nodes::as<nodes::Symbol>(ge->lhs());
assert(symbol);
model_out.push_back(uint32_t(symbol->id()) << 12);
- assert(ge->rhs().is_const());
- encode_const(ge->rhs().get_const_value(), model_out);
+ assert(ge->rhs().is_const_double());
+ encode_const(ge->rhs().get_const_double_value(), model_out);
size_t skip_idx = model_out.size();
model_out.push_back(0); // left child size placeholder
uint32_t left_type = encode_node(left_child, model_out);
@@ -204,8 +204,8 @@ uint32_t encode_node(const nodes::Node &node_in, std::vector<uint32_t> &model_ou
return INVERTED;
}
} else {
- assert(node_in.is_const());
- encode_const(node_in.get_const_value(), model_out);
+ assert(node_in.is_const_double());
+ encode_const(node_in.get_const_double_value(), model_out);
return LEAF;
}
}
diff --git a/eval/src/vespa/eval/instruction/dense_lambda_peek_optimizer.cpp b/eval/src/vespa/eval/instruction/dense_lambda_peek_optimizer.cpp
index e584d94edbe..e1d5a5d4b7a 100644
--- a/eval/src/vespa/eval/instruction/dense_lambda_peek_optimizer.cpp
+++ b/eval/src/vespa/eval/instruction/dense_lambda_peek_optimizer.cpp
@@ -139,7 +139,7 @@ struct PeekAnalyzer {
do {
for (size_t i = 0; i < src_dim_funs.size(); ++i) {
auto dim_fun = src_dim_funs[i]->get_function();
- size_t dim_idx = dim_fun(&params[0]);
+ size_t dim_idx = (int64_t) dim_fun(&params[0]);
if (dim_idx >= src_dim_sizes[i]) {
return Result::invalid();
}
diff --git a/eval/src/vespa/eval/instruction/dense_tensor_peek_function.cpp b/eval/src/vespa/eval/instruction/dense_tensor_peek_function.cpp
index b5f99ad6c8b..323e0a59ff3 100644
--- a/eval/src/vespa/eval/instruction/dense_tensor_peek_function.cpp
+++ b/eval/src/vespa/eval/instruction/dense_tensor_peek_function.cpp
@@ -21,7 +21,7 @@ void my_tensor_peek_op(InterpretedFunction::State &state, uint64_t param) {
if (dim.first >= 0) {
idx += (dim.first * factor);
} else {
- size_t dim_idx = state.peek(0).as_double();
+ size_t dim_idx = (int64_t) state.peek(0).as_double();
state.stack.pop_back();
valid &= (dim_idx < dim.second);
idx += (dim_idx * factor);
diff --git a/eval/src/vespa/eval/onnx/onnx_wrapper.h b/eval/src/vespa/eval/onnx/onnx_wrapper.h
index 1f36d576c33..507d75efbd9 100644
--- a/eval/src/vespa/eval/onnx/onnx_wrapper.h
+++ b/eval/src/vespa/eval/onnx/onnx_wrapper.h
@@ -154,7 +154,7 @@ private:
std::vector<const char *> _input_name_refs;
std::vector<const char *> _output_name_refs;
- void extract_meta_data();
+ void extract_meta_data() __attribute__((noinline));
public:
Onnx(const vespalib::string &model_file, Optimize optimize);
diff --git a/fbench/src/fbench/fbench.cpp b/fbench/src/fbench/fbench.cpp
index 084fc2e48e6..57efb8a47e0 100644
--- a/fbench/src/fbench/fbench.cpp
+++ b/fbench/src/fbench/fbench.cpp
@@ -366,7 +366,7 @@ FBench::Main(int argc, char *argv[])
// parse options and override defaults.
int idx;
- char opt;
+ int opt;
const char *arg;
bool optError;
diff --git a/fbench/src/filterfile/filterfile.cpp b/fbench/src/filterfile/filterfile.cpp
index e9b35de97e0..74d00ecf2b9 100644
--- a/fbench/src/filterfile/filterfile.cpp
+++ b/fbench/src/filterfile/filterfile.cpp
@@ -21,7 +21,7 @@ main(int argc, char** argv)
// parse options and override defaults.
int optIdx;
- char opt;
+ int opt;
const char *arg;
bool optError;
diff --git a/fbench/src/splitfile/splitfile.cpp b/fbench/src/splitfile/splitfile.cpp
index 1e86ab6bf5c..ef85a59020d 100644
--- a/fbench/src/splitfile/splitfile.cpp
+++ b/fbench/src/splitfile/splitfile.cpp
@@ -20,7 +20,7 @@ main(int argc, char** argv)
// parse options and override defaults.
int idx;
- char opt;
+ int opt;
const char *arg;
bool optError;
diff --git a/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReferenceDownloader.java b/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReferenceDownloader.java
index da9d4ceab88..7c46c6e2873 100644
--- a/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReferenceDownloader.java
+++ b/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReferenceDownloader.java
@@ -181,6 +181,7 @@ public class FileReferenceDownloader {
}
public void close() {
+ downloadExecutor.shutdown();
try {
downloadExecutor.awaitTermination(1, TimeUnit.SECONDS);
} catch (InterruptedException e) {
diff --git a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
index 38d2bbe6fc0..e2a86a3755c 100644
--- a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
+++ b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
@@ -13,7 +13,6 @@ import java.util.Optional;
import java.util.TreeMap;
import static com.yahoo.vespa.flags.FetchVector.Dimension.APPLICATION_ID;
-import static com.yahoo.vespa.flags.FetchVector.Dimension.CLUSTER_ID;
import static com.yahoo.vespa.flags.FetchVector.Dimension.CLUSTER_TYPE;
import static com.yahoo.vespa.flags.FetchVector.Dimension.HOSTNAME;
import static com.yahoo.vespa.flags.FetchVector.Dimension.TENANT_ID;
@@ -137,13 +136,6 @@ public class Flags {
"Takes effect at redeployment",
ZONE_ID, APPLICATION_ID);
- public static final UnboundIntFlag MAX_PENDING_MOVE_OPS = defineIntFlag(
- "max-pending-move-ops", 10,
- List.of("baldersheim"), "2021-02-15", "2021-05-01",
- "Max number of move operations inflight",
- "Takes effect at redeployment",
- ZONE_ID, APPLICATION_ID);
-
public static final UnboundDoubleFlag FEED_CONCURRENCY = defineDoubleFlag(
"feed-concurrency", 0.5,
List.of("baldersheim"), "2020-12-02", "2022-01-01",
@@ -151,17 +143,10 @@ public class Flags {
"Takes effect at redeployment",
ZONE_ID, APPLICATION_ID);
- public static final UnboundBooleanFlag USE_BUCKET_EXECUTOR_FOR_LID_SPACE_COMPACT = defineFeatureFlag(
- "use-bucket-executor-for-lid-space-compact", false,
- List.of("baldersheim"), "2021-01-24", "2021-05-01",
- "Wheter to use content-level bucket executor or legacy frozen buckets",
- "Takes effect on next internal redeployment",
- APPLICATION_ID);
-
- public static final UnboundBooleanFlag USE_BUCKET_EXECUTOR_FOR_BUCKET_MOVE = defineFeatureFlag(
- "use-bucket-executor-for-bucket-move", false,
- List.of("baldersheim"), "2021-02-15", "2021-05-01",
- "Wheter to use content-level bucket executor or legacy frozen buckets",
+ public static final UnboundBooleanFlag USE_BUCKET_EXECUTOR_FOR_PRUNE_REMOVED = defineFeatureFlag(
+ "use-bucket-executor-for-prune-removed", true,
+ List.of("baldersheim"), "2021-05-04", "2021-06-01",
+ "Wheter to use content-level bucket executor or legacy frozen buckets for prune removed",
"Takes effect on next internal redeployment",
APPLICATION_ID);
@@ -172,53 +157,32 @@ public class Flags {
"Takes effect on the next suspension request to the Orchestrator.",
APPLICATION_ID);
+ public static final UnboundBooleanFlag ENCRYPT_DISK = defineFeatureFlag(
+ "encrypt-disk", false,
+ List.of("hakonhall"), "2021-05-05", "2021-06-05",
+ "Allow migrating an unencrypted data partition to being encrypted.",
+ "Takes effect on next host-admin tick.");
+
+ public static final UnboundBooleanFlag ENCRYPT_DIRTY_DISK = defineFeatureFlag(
+ "encrypt-dirty-disk", false,
+ List.of("hakonhall"), "2021-05-14", "2021-06-05",
+ "Allow migrating an unencrypted data partition to being encrypted when provisioned or dirty.",
+ "Takes effect on next host-admin tick.");
+
public static final UnboundBooleanFlag ENABLE_FEED_BLOCK_IN_DISTRIBUTOR = defineFeatureFlag(
"enable-feed-block-in-distributor", true,
- List.of("geirst"), "2021-01-27", "2021-05-01",
+ List.of("geirst"), "2021-01-27", "2021-07-01",
"Enables blocking of feed in the distributor if resource usage is above limit on at least one content node",
"Takes effect at redeployment",
ZONE_ID, APPLICATION_ID);
- public static final UnboundDoubleFlag MAX_DEAD_BYTES_RATIO = defineDoubleFlag(
- "max-dead-bytes-ratio", 0.15,
- List.of("baldersheim", "geirst","toregge"), "2021-02-03", "2021-05-01",
- "max ratio of dead to used memory bytes in large data structures before compaction is attempted",
- "Takes effect at redeployment",
- ZONE_ID, APPLICATION_ID);
-
- public static final UnboundStringFlag SYNC_HOST_LOGS_TO_S3_BUCKET = defineStringFlag(
- "sync-host-logs-to-s3-bucket", "", List.of("andreer", "valerijf"), "2021-02-10", "2021-05-01",
- "Host-admin should sync host logs to an S3 bucket named by this flag. If left empty, sync is disabled",
- "Takes effect on next run of S3 log sync task in host-admin",
- TENANT_ID, ZONE_ID);
-
- public static final UnboundBooleanFlag CACHE_ACL = defineFeatureFlag(
- "cache-acl", true,
- List.of("hakon"), "2021-04-26", "2021-05-26",
- "Whether host-admin should cache the ACL responses w/TTL 115s, or always ask config server.",
- "Takes effect on next host-admin tick.",
- ZONE_ID);
-
- public static final UnboundIntFlag CLUSTER_CONTROLLER_MAX_HEAP_SIZE_IN_MB = defineIntFlag(
- "cluster-controller-max-heap-size-in-mb", 128,
- List.of("hmusum"), "2021-02-10", "2021-05-15",
- "JVM max heap size for cluster controller in Mb",
- "Takes effect when restarting cluster controller");
-
public static final UnboundIntFlag METRICS_PROXY_MAX_HEAP_SIZE_IN_MB = defineIntFlag(
"metrics-proxy-max-heap-size-in-mb", 256,
- List.of("hmusum"), "2021-03-01", "2021-05-15",
+ List.of("hmusum"), "2021-03-01", "2021-06-15",
"JVM max heap size for metrics proxy in Mb",
"Takes effect when restarting metrics proxy",
CLUSTER_TYPE);
- public static final UnboundIntFlag CONFIG_PROXY_MAX_HEAP_SIZE_IN_MB = defineIntFlag(
- "config-proxy-max-heap-size-in-mb", 256,
- List.of("hmusum"), "2021-03-15", "2021-05-15",
- "JVM max heap size for config proxy in Mb",
- "Takes effect on restart of Docker container",
- CLUSTER_TYPE, CLUSTER_ID);
-
public static final UnboundStringFlag DEDICATED_CLUSTER_CONTROLLER_FLAVOR = defineStringFlag(
"dedicated-cluster-controller-flavor", "", List.of("jonmv"), "2021-02-25", "2021-05-25",
"Flavor as <vpu>-<memgb>-<diskgb> to use for dedicated cluster controller nodes",
@@ -240,7 +204,7 @@ public class Flags {
public static final UnboundIntFlag MAX_ACTIVATION_INHIBITED_OUT_OF_SYNC_GROUPS = defineIntFlag(
"max-activation-inhibited-out-of-sync-groups", 0,
- List.of("vekterli"), "2021-02-19", "2021-05-01",
+ List.of("vekterli"), "2021-02-19", "2021-07-01",
"Allows replicas in up to N content groups to not be activated " +
"for query visibility if they are out of sync with a majority of other replicas",
"Takes effect at redeployment",
@@ -260,13 +224,6 @@ public class Flags {
"Takes effect at redeployment",
APPLICATION_ID);
- public static final UnboundBooleanFlag UPGRADE_DELL_SSD_FIRMWARE = defineFeatureFlag(
- "upgrade_dell_ssd_firmware", false,
- List.of("andreer"), "2021-04-13", "2021-05-13",
- "Whether to consider upgrading Dell SSD firmware",
- "Takes effect on next host-admin tick",
- HOSTNAME);
-
public static final UnboundIntFlag NUM_DISTRIBUTOR_STRIPES = defineIntFlag(
"num-distributor-stripes", 0,
List.of("geirst", "vekterli"), "2021-04-20", "2021-07-01",
@@ -281,6 +238,13 @@ public class Flags {
"Takes effect on next host-admin tick",
HOSTNAME);
+ public static final UnboundBooleanFlag CFG_DEPLOY_MULTIPART = defineFeatureFlag(
+ "cfg-deploy-multipart", false,
+ List.of("tokle"), "2021-05-19", "2021-08-01",
+ "Wheter to deploy applications using multipart form data (instead of url params)",
+ "Takes effect immediately",
+ APPLICATION_ID);
+
/** WARNING: public for testing: All flags should be defined in {@link Flags}. */
public static UnboundBooleanFlag defineFeatureFlag(String flagId, boolean defaultValue, List<String> owners,
String createdAt, String expiresAt, String description,
diff --git a/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java b/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java
index d6d8b1a26e1..52b09a281d5 100644
--- a/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java
+++ b/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java
@@ -122,7 +122,7 @@ public class PermanentFlags {
ZONE_ID, APPLICATION_ID);
public static final UnboundStringFlag ZOOKEEPER_SERVER_VERSION = defineStringFlag(
- "zookeeper-server-version", "3.6.2",
+ "zookeeper-server-version", "3.6.3",
"ZooKeeper server version, a jar file zookeeper-server-<ZOOKEEPER_SERVER_VERSION>-jar-with-dependencies.jar must exist",
"Takes effect on restart of Docker container",
NODE_TYPE, APPLICATION_ID, HOSTNAME);
@@ -157,6 +157,12 @@ public class PermanentFlags {
"Takes effect on redeployment",
APPLICATION_ID);
+ public static final UnboundIntFlag MAX_REBUILDS = defineIntFlag(
+ "max-host-rebuilds", 10,
+ "The maximum number of hosts allowed to rebuild at a time",
+ "Takes effect immediately, but any current excess rebuilds will not be cancelled"
+ );
+
private PermanentFlags() {}
private static UnboundBooleanFlag defineFeatureFlag(
diff --git a/fnet/src/tests/info/info.cpp b/fnet/src/tests/info/info.cpp
index 3422efe1da6..d093ee1da06 100644
--- a/fnet/src/tests/info/info.cpp
+++ b/fnet/src/tests/info/info.cpp
@@ -72,6 +72,8 @@ TEST("size of important objects")
{
#ifdef __APPLE__
constexpr size_t MUTEX_SIZE = 64u;
+#elif defined(__aarch64__)
+ constexpr size_t MUTEX_SIZE = 48u;
#else
constexpr size_t MUTEX_SIZE = 40u;
#endif
diff --git a/fnet/src/vespa/fnet/frt/invokable.h b/fnet/src/vespa/fnet/frt/invokable.h
index 48e6aefe776..df4abd41f90 100644
--- a/fnet/src/vespa/fnet/frt/invokable.h
+++ b/fnet/src/vespa/fnet/frt/invokable.h
@@ -12,5 +12,22 @@ public:
typedef void (FRT_Invokable::*FRT_METHOD_PT)(FRT_RPCRequest *);
-#define FRT_METHOD(pt) ((FRT_METHOD_PT) &pt)
+namespace fnet::internal {
+
+template <class T>
+using frt_method_precast_pt = void (T::*)(FRT_RPCRequest *);
+
+template <class T>
+FRT_METHOD_PT
+frt_method_pt_cast(frt_method_precast_pt<T> pt)
+{
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wshift-negative-value"
+ return (FRT_METHOD_PT) pt;
+#pragma GCC diagnostic pop
+}
+
+}
+
+#define FRT_METHOD(pt) (fnet::internal::frt_method_pt_cast(&pt))
diff --git a/fsa/src/alltest/conceptnet_test.cpp b/fsa/src/alltest/conceptnet_test.cpp
index 03a88434776..3b19f6eade8 100644
--- a/fsa/src/alltest/conceptnet_test.cpp
+++ b/fsa/src/alltest/conceptnet_test.cpp
@@ -10,7 +10,7 @@ using namespace fsa;
int main(int argc, char **argv)
{
- char opt;
+ int opt;
//extern char *optarg;
extern int optind;
diff --git a/fsa/src/apps/fsadump/fsadump.cpp b/fsa/src/apps/fsadump/fsadump.cpp
index 5308f113356..117f755eb22 100644
--- a/fsa/src/apps/fsadump/fsadump.cpp
+++ b/fsa/src/apps/fsadump/fsadump.cpp
@@ -67,7 +67,7 @@ int main(int argc, char** argv)
FSA_Input_Format format = OUTPUT_UNDEF;
const char *input_file;
- char opt;
+ int opt;
extern int optind;
while((opt=getopt(argc,argv,"ebBhntpdV")) != -1){
diff --git a/fsa/src/apps/fsainfo/fsainfo.cpp b/fsa/src/apps/fsainfo/fsainfo.cpp
index 61af267545b..76177592257 100644
--- a/fsa/src/apps/fsainfo/fsainfo.cpp
+++ b/fsa/src/apps/fsainfo/fsainfo.cpp
@@ -41,7 +41,7 @@ int main(int argc, char** argv)
{
const char *fsa_file;
- char opt;
+ int opt;
extern int optind;
while((opt=getopt(argc,argv,"hV")) != -1){
diff --git a/fsa/src/apps/makefsa/makefsa.cpp b/fsa/src/apps/makefsa/makefsa.cpp
index f86a0d781c1..72162851f16 100644
--- a/fsa/src/apps/makefsa/makefsa.cpp
+++ b/fsa/src/apps/makefsa/makefsa.cpp
@@ -73,7 +73,7 @@ int main(int argc, char** argv)
bool verbose = false;
unsigned int lines=0,count = 0;
- char opt;
+ int opt;
extern char *optarg;
extern int optind;
diff --git a/functions.cmake b/functions.cmake
index 9fa1f326e0a..fe59cc3aaa9 100644
--- a/functions.cmake
+++ b/functions.cmake
@@ -183,7 +183,7 @@ endmacro()
function(vespa_add_library TARGET)
cmake_parse_arguments(ARG
- "STATIC;OBJECT;INTERFACE;TEST"
+ "STATIC;OBJECT;INTERFACE;TEST;ALLOW_UNRESOLVED_SYMBOLS"
"INSTALL;OUTPUT_NAME"
"DEPENDS;EXTERNAL_DEPENDS;AFTER;SOURCES"
${ARGN})
@@ -227,6 +227,10 @@ function(vespa_add_library TARGET)
set_target_properties(${TARGET} PROPERTIES OUTPUT_NAME ${ARG_OUTPUT_NAME})
endif()
+ if(NOT ARG_OBJECT AND NOT ARG_STATIC AND NOT ARG_INTERFACE AND NOT ARG_ALLOW_UNRESOLVED_SYMBOLS AND DEFINED VESPA_DISALLOW_UNRESOLVED_SYMBOLS_IN_SHARED_LIBRARIES)
+ __add_private_target_link_option(${TARGET} ${VESPA_DISALLOW_UNRESOLVED_SYMBOLS_IN_SHARED_LIBRARIES})
+ endif()
+
__add_target_to_module(${TARGET})
__export_include_directories(${TARGET})
if(VESPA_ADD_IMPLICIT_OBJECT_LIBRARY)
@@ -759,3 +763,17 @@ function(vespa_suppress_warnings_for_protobuf_sources)
set_source_files_properties(${ARG_SOURCES} PROPERTIES COMPILE_FLAGS "-Wno-array-bounds -Wno-suggest-override -Wno-inline ${VESPA_DISABLE_UNUSED_WARNING}")
endif()
endfunction()
+
+function(__add_private_target_link_option TARGET TARGET_LINK_OPTION)
+ if(COMMAND target_link_options)
+ target_link_options(${TARGET} PRIVATE ${TARGET_LINK_OPTION})
+ else()
+ get_target_property(TARGET_LINK_FLAGS ${TARGET} LINK_FLAGS)
+ if (NOT DEFINED TARGET_LINK_FLAGS OR ${TARGET_LINK_FLAGS} STREQUAL "" OR ${TARGET_LINK_FLAGS} STREQUAL "TARGET_LINK_FLAGS-NOTFOUND")
+ set(TARGET_LINK_FLAGS ${TARGET_LINK_OPTION})
+ else()
+ set(TARGET_LINK_FLAGS "${TARGET_LINK_FLAGS} ${TARGET_LINK_OPTION}")
+ endif()
+ set_target_properties(${TARGET} PROPERTIES LINK_FLAGS "${TARGET_LINK_FLAGS}")
+ endif()
+endfunction()
diff --git a/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/expressions/Expression.java b/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/expressions/Expression.java
index 50dd7611bb0..b23fe4d0ebd 100644
--- a/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/expressions/Expression.java
+++ b/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/expressions/Expression.java
@@ -177,7 +177,6 @@ public abstract class Expression extends Selectable {
public abstract DataType createdOutputType();
/** Creates an expression with simple lingustics for testing */
- @SuppressWarnings("deprecation")
public static Expression fromString(String expression) throws ParseException {
return fromString(expression, new SimpleLinguistics());
}
@@ -210,9 +209,11 @@ public abstract class Expression extends Selectable {
public static Document execute(Expression expression, Document doc) {
return expression.execute(new SimpleAdapterFactory(), doc);
}
- public static final DocumentUpdate execute(Expression expression, DocumentUpdate update) {
+
+ public static DocumentUpdate execute(Expression expression, DocumentUpdate update) {
return execute(expression, new SimpleAdapterFactory(), update);
}
+
public final FieldValue execute() {
return execute(new ExecutionContext());
}
diff --git a/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/expressions/InputExpression.java b/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/expressions/InputExpression.java
index f6196cfa158..d707a50fbd8 100644
--- a/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/expressions/InputExpression.java
+++ b/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/expressions/InputExpression.java
@@ -96,7 +96,7 @@ public final class InputExpression extends Expression {
public static class InputFieldNameExtractor implements ObjectOperation, ObjectPredicate {
- private List<String> inputFieldNames = new ArrayList<>(1);
+ private final List<String> inputFieldNames = new ArrayList<>(1);
public List<String> getInputFieldNames() { return inputFieldNames; }
diff --git a/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/expressions/LiteralBoolExpression.java b/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/expressions/LiteralBoolExpression.java
new file mode 100644
index 00000000000..a989f1a57d2
--- /dev/null
+++ b/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/expressions/LiteralBoolExpression.java
@@ -0,0 +1,53 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.indexinglanguage.expressions;
+
+import com.yahoo.document.DataType;
+import com.yahoo.document.datatypes.BoolFieldValue;
+
+/**
+ * 'true' or 'false
+ *
+ * @author bratseth
+ */
+public class LiteralBoolExpression extends Expression {
+
+ private final boolean value;
+
+ public LiteralBoolExpression(boolean value) {
+ super(null);
+ this.value = value;
+ }
+
+ @Override
+ protected void doExecute(ExecutionContext context) {
+ context.setValue(new BoolFieldValue(value));
+ }
+
+ @Override
+ protected void doVerify(VerificationContext context) {
+ context.setValue(createdOutputType());
+ }
+
+ @Override
+ public DataType createdOutputType() {
+ return DataType.BOOL;
+ }
+
+ @Override
+ public String toString() {
+ return String.valueOf(value);
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (other == this) return true;
+ if ( ! (other instanceof LiteralBoolExpression)) return false;
+ return ((LiteralBoolExpression)other).value == this.value;
+ }
+
+ @Override
+ public int hashCode() {
+ return value ? 1 : 0;
+ }
+
+}
diff --git a/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/expressions/NowExpression.java b/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/expressions/NowExpression.java
index 6a155d30814..c8df0c0d667 100644
--- a/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/expressions/NowExpression.java
+++ b/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/expressions/NowExpression.java
@@ -61,9 +61,10 @@ public final class NowExpression extends Expression {
return getClass().hashCode() + timer.hashCode();
}
- public static interface Timer {
+ public interface Timer {
+
+ long currentTimeSeconds();
- public long currentTimeSeconds();
}
private static class SystemTimer implements Timer {
@@ -75,4 +76,5 @@ public final class NowExpression extends Expression {
return System.currentTimeMillis() / 1000;
}
}
+
}
diff --git a/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/expressions/SplitExpression.java b/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/expressions/SplitExpression.java
index 1ed862f4798..b436775b151 100644
--- a/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/expressions/SplitExpression.java
+++ b/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/expressions/SplitExpression.java
@@ -68,4 +68,5 @@ public final class SplitExpression extends Expression {
public int hashCode() {
return getClass().hashCode() + splitPattern.toString().hashCode();
}
+
}
diff --git a/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/expressions/ToBoolExpression.java b/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/expressions/ToBoolExpression.java
new file mode 100644
index 00000000000..d6d8ed54d26
--- /dev/null
+++ b/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/expressions/ToBoolExpression.java
@@ -0,0 +1,59 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.indexinglanguage.expressions;
+
+import com.yahoo.document.DataType;
+import com.yahoo.document.NumericDataType;
+import com.yahoo.document.datatypes.BoolFieldValue;
+import com.yahoo.document.datatypes.FieldValue;
+import com.yahoo.document.datatypes.LongFieldValue;
+import com.yahoo.document.datatypes.NumericFieldValue;
+import com.yahoo.document.datatypes.StringFieldValue;
+
+/**
+ * @author bratseth
+ */
+public final class ToBoolExpression extends Expression {
+
+ public ToBoolExpression() {
+ super(UnresolvedDataType.INSTANCE);
+ }
+
+ @Override
+ protected void doExecute(ExecutionContext context) {
+ context.setValue(new BoolFieldValue(toBooleanValue(context.getValue())));
+ }
+
+ private boolean toBooleanValue(FieldValue value) {
+ if (value instanceof NumericFieldValue)
+ return ((NumericFieldValue)value).getNumber().intValue() != 0;
+ if (value instanceof StringFieldValue)
+ return ! ((StringFieldValue)value).getString().isEmpty();
+ return false;
+ }
+
+ @Override
+ protected void doVerify(VerificationContext context) {
+ context.setValue(createdOutputType());
+ }
+
+ @Override
+ public DataType createdOutputType() {
+ return DataType.BOOL;
+ }
+
+ @Override
+ public String toString() {
+ return "to_bool";
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ return obj instanceof ToBoolExpression;
+ }
+
+ @Override
+ public int hashCode() {
+ return getClass().hashCode();
+ }
+
+}
diff --git a/indexinglanguage/src/main/javacc/IndexingParser.jj b/indexinglanguage/src/main/javacc/IndexingParser.jj
index 3afb52a8039..632a19a2c6c 100644
--- a/indexinglanguage/src/main/javacc/IndexingParser.jj
+++ b/indexinglanguage/src/main/javacc/IndexingParser.jj
@@ -191,8 +191,11 @@ TOKEN :
<TO_POS: "to_pos"> |
<TO_STRING: "to_string"> |
<TO_WSET: "to_wset"> |
+ <TO_BOOL: "to_bool"> |
<TRIM: "trim"> |
<ZCURVE: "zcurve"> |
+ <TRUE: "true" > |
+ <FALSE: "false" > |
<IDENTIFIER: ["a"-"z","A"-"Z", "_"] (["a"-"z","A"-"Z","0"-"9","_","-"])*>
}
@@ -312,7 +315,9 @@ Expression value() :
val = toPosExp() |
val = toStringExp() |
val = toWsetExp() |
+ val = toBoolExp() |
val = trimExp() |
+ val = literalBoolExp() |
val = zcurveExp() |
( <LPAREN> val = statement() <RPAREN> { val = new ParenthesisExpression(val); } ) )
{ return val; }
@@ -684,12 +689,24 @@ Expression toWsetExp() :
{ return new ToWsetExpression(createIfNonExistent, removeIfZero); }
}
+Expression toBoolExp() : { }
+{
+ ( <TO_BOOL> )
+ { return new ToBoolExpression(); }
+}
+
Expression trimExp() : { }
{
( <TRIM> )
{ return new TrimExpression(); }
}
+Expression literalBoolExp() : { }
+{
+ ( <TRUE> | <FALSE> )
+ { return new LiteralBoolExpression(Boolean.parseBoolean(token.image)); }
+}
+
Expression zcurveExp() : { }
{
( <ZCURVE> )
@@ -751,8 +768,11 @@ String identifier() :
<TO_POS> |
<TO_STRING> |
<TO_WSET> |
+ <TO_BOOL> |
<TOKENIZE> |
<TRIM> |
+ <TRUE> |
+ <FALSE> |
<ZCURVE> ) { val = token.image; } )
{ return val; }
}
diff --git a/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/ScriptTestCase.java b/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/ScriptTestCase.java
index 0edf0f645a7..14a8f40c46c 100644
--- a/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/ScriptTestCase.java
+++ b/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/ScriptTestCase.java
@@ -4,6 +4,7 @@ package com.yahoo.vespa.indexinglanguage;
import com.yahoo.document.DataType;
import com.yahoo.document.Document;
import com.yahoo.document.DocumentType;
+import com.yahoo.document.datatypes.BoolFieldValue;
import com.yahoo.document.datatypes.StringFieldValue;
import com.yahoo.vespa.indexinglanguage.expressions.*;
import com.yahoo.vespa.indexinglanguage.parser.ParseException;
@@ -24,6 +25,7 @@ public class ScriptTestCase {
type.addField("in-2", DataType.STRING);
type.addField("out-1", DataType.STRING);
type.addField("out-2", DataType.STRING);
+ type.addField("mybool", DataType.BOOL);
}
@Test
@@ -76,4 +78,16 @@ public class ScriptTestCase {
assertNotNull(output);
assertEquals(new StringFieldValue("foo"), output.getFieldValue("out-1"));
}
+
+ @Test
+ public void testLiteralBoolean() throws ParseException {
+ Document input = new Document(type, "id:scheme:mytype::");
+ input.setFieldValue("in-1", new StringFieldValue("foo"));
+ var expression = Expression.fromString("if (input 'in-1' == \"foo\") { true | summary 'mybool' | attribute 'mybool' }");
+ System.out.println(expression);
+ Document output = Expression.execute(expression, input);
+ assertNotNull(output);
+ assertEquals(new BoolFieldValue(true), output.getFieldValue("mybool"));
+ }
+
}
diff --git a/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/expressions/HostNameTestCase.java b/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/expressions/HostNameTestCase.java
index 3d5792b5c1b..157d06106d0 100644
--- a/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/expressions/HostNameTestCase.java
+++ b/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/expressions/HostNameTestCase.java
@@ -43,4 +43,5 @@ public class HostNameTestCase {
assertEquals(HostNameExpression.normalizeHostName(getDefaults().vespaHostname()),
((StringFieldValue)val).getString());
}
+
}
diff --git a/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/expressions/LiteralBoolExpressionTestCase.java b/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/expressions/LiteralBoolExpressionTestCase.java
new file mode 100644
index 00000000000..1b3beaf10ee
--- /dev/null
+++ b/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/expressions/LiteralBoolExpressionTestCase.java
@@ -0,0 +1,63 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.indexinglanguage.expressions;
+
+import com.yahoo.document.DataType;
+import com.yahoo.document.datatypes.BoolFieldValue;
+import com.yahoo.document.datatypes.FieldValue;
+import com.yahoo.vespa.indexinglanguage.SimpleTestAdapter;
+import org.junit.Test;
+
+import static com.yahoo.vespa.indexinglanguage.expressions.ExpressionAssert.assertVerify;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * @author bratseth
+ */
+public class LiteralBoolExpressionTestCase {
+
+ @Test
+ public void requireThatHashCodeAndEqualsAreImplemented() {
+ assertFalse(new LiteralBoolExpression(true).equals(new Object()));
+ assertEquals(new LiteralBoolExpression(true), new LiteralBoolExpression(true));
+ assertEquals(new LiteralBoolExpression(false), new LiteralBoolExpression(false));
+ assertNotEquals(new LiteralBoolExpression(true), new LiteralBoolExpression(false));
+ assertEquals(new LiteralBoolExpression(false).hashCode(), new LiteralBoolExpression(false).hashCode());
+ assertEquals(new LiteralBoolExpression(true).hashCode(), new LiteralBoolExpression(true).hashCode());
+ assertNotEquals(new LiteralBoolExpression(true).hashCode(), new LiteralBoolExpression(false).hashCode());
+ }
+
+ @Test
+ public void requireThatExpressionCanBeVerified() {
+ Expression exp = new LiteralBoolExpression(true);
+ assertVerify(DataType.INT, exp, DataType.BOOL);
+ assertVerify(DataType.STRING, exp, DataType.BOOL);
+ }
+
+ @Test
+ public void testToString() {
+ assertEquals("false", new LiteralBoolExpression(false).toString());
+ assertEquals("true", new LiteralBoolExpression(true).toString());
+ }
+
+ @Test
+ public void requireThatTrueBecomesTrue() {
+ ExecutionContext context = new ExecutionContext(new SimpleTestAdapter());
+ context.execute(new LiteralBoolExpression(true));
+ FieldValue value = context.getValue();
+ assertTrue(value instanceof BoolFieldValue);
+ assertTrue(((BoolFieldValue)value).getBoolean());
+ }
+
+ @Test
+ public void requireThatFalseBecomesFalse() {
+ ExecutionContext context = new ExecutionContext(new SimpleTestAdapter());
+ context.execute(new LiteralBoolExpression(false));
+ FieldValue value = context.getValue();
+ assertTrue(value instanceof BoolFieldValue);
+ assertFalse(((BoolFieldValue)value).getBoolean());
+ }
+
+}
diff --git a/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/expressions/ToBoolTestCase.java b/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/expressions/ToBoolTestCase.java
new file mode 100644
index 00000000000..5067f0c4c67
--- /dev/null
+++ b/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/expressions/ToBoolTestCase.java
@@ -0,0 +1,76 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.indexinglanguage.expressions;
+
+import com.yahoo.document.DataType;
+import com.yahoo.document.datatypes.BoolFieldValue;
+import com.yahoo.document.datatypes.FieldValue;
+import com.yahoo.document.datatypes.IntegerFieldValue;
+import com.yahoo.document.datatypes.LongFieldValue;
+import com.yahoo.document.datatypes.StringFieldValue;
+import com.yahoo.vespa.indexinglanguage.SimpleTestAdapter;
+import org.junit.Test;
+
+import static com.yahoo.vespa.indexinglanguage.expressions.ExpressionAssert.assertVerify;
+import static com.yahoo.vespa.indexinglanguage.expressions.ExpressionAssert.assertVerifyThrows;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * @author bratseth
+ */
+public class ToBoolTestCase {
+
+ @Test
+ public void requireThatHashCodeAndEqualsAreImplemented() {
+ Expression exp = new ToBoolExpression();
+ assertFalse(exp.equals(new Object()));
+ assertEquals(exp, new ToBoolExpression());
+ assertEquals(exp.hashCode(), new ToBoolExpression().hashCode());
+ }
+
+ @Test
+ public void requireThatExpressionCanBeVerified() {
+ Expression exp = new ToBoolExpression();
+ assertVerify(DataType.INT, exp, DataType.BOOL);
+ assertVerify(DataType.STRING, exp, DataType.BOOL);
+ assertVerifyThrows(null, exp, "Expected any input, got null.");
+ }
+
+ @Test
+ public void requireThatNonEmptyStringBecomesTrue() {
+ ExecutionContext context = new ExecutionContext(new SimpleTestAdapter());
+ context.setValue(new StringFieldValue("false")).execute(new ToBoolExpression());
+ FieldValue value = context.getValue();
+ assertTrue(value instanceof BoolFieldValue);
+ assertTrue(((BoolFieldValue)value).getBoolean());
+ }
+
+ @Test
+ public void requireThatEmptyStringBecomesFalse() {
+ ExecutionContext context = new ExecutionContext(new SimpleTestAdapter());
+ context.setValue(new StringFieldValue("")).execute(new ToBoolExpression());
+ FieldValue value = context.getValue();
+ assertTrue(value instanceof BoolFieldValue);
+ assertFalse(((BoolFieldValue)value).getBoolean());
+ }
+
+ @Test
+ public void requireThatNonZeroBecomesTrue() {
+ ExecutionContext context = new ExecutionContext(new SimpleTestAdapter());
+ context.setValue(new IntegerFieldValue(37)).execute(new ToBoolExpression());
+ FieldValue value = context.getValue();
+ assertTrue(value instanceof BoolFieldValue);
+ assertTrue(((BoolFieldValue)value).getBoolean());
+ }
+
+ @Test
+ public void requireThatZeroBecomesFalse() {
+ ExecutionContext context = new ExecutionContext(new SimpleTestAdapter());
+ context.setValue(new IntegerFieldValue(0)).execute(new ToBoolExpression());
+ FieldValue value = context.getValue();
+ assertTrue(value instanceof BoolFieldValue);
+ assertFalse(((BoolFieldValue)value).getBoolean());
+ }
+
+}
diff --git a/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/parser/ExpressionTestCase.java b/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/parser/ExpressionTestCase.java
index 095842544a6..e9a0d0253e1 100644
--- a/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/parser/ExpressionTestCase.java
+++ b/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/parser/ExpressionTestCase.java
@@ -73,6 +73,7 @@ public class ExpressionTestCase {
assertExpression(ToPositionExpression.class, "to_pos");
assertExpression(ToStringExpression.class, "to_string");
assertExpression(ToWsetExpression.class, "to_wset");
+ assertExpression(ToBoolExpression.class, "to_bool");
assertExpression(ToWsetExpression.class, "to_wset create_if_non_existent");
assertExpression(ToWsetExpression.class, "to_wset remove_if_zero");
assertExpression(ToWsetExpression.class, "to_wset create_if_non_existent remove_if_zero");
diff --git a/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/parser/IdentifierTestCase.java b/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/parser/IdentifierTestCase.java
index d7f1582fd50..1dc8c4a607c 100644
--- a/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/parser/IdentifierTestCase.java
+++ b/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/parser/IdentifierTestCase.java
@@ -63,7 +63,10 @@ public class IdentifierTestCase {
"to_pos",
"to_string",
"to_wset",
+ "to_bool",
"trim",
+ "true",
+ "false",
"zcurve");
for (String str : tokens) {
IndexingParser parser = new IndexingParser(new IndexingInput(str));
diff --git a/jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/cors/CorsLogic.java b/jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/cors/CorsLogic.java
index 3d8a661d5d1..650ec851ffd 100644
--- a/jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/cors/CorsLogic.java
+++ b/jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/cors/CorsLogic.java
@@ -27,25 +27,20 @@ class CorsLogic {
static Map<String, String> createCorsResponseHeaders(String requestOriginHeader,
Set<String> allowedOrigins) {
if (requestOriginHeader == null) return Map.of();
+
TreeMap<String, String> headers = new TreeMap<>();
- allowedOrigins.stream()
- .filter(allowedUrl -> matchesRequestOrigin(requestOriginHeader, allowedUrl))
- .findAny()
- .ifPresent(allowedOrigin -> headers.put(ALLOW_ORIGIN_HEADER, allowedOrigin));
- ACCESS_CONTROL_HEADERS.forEach(headers::put);
+ if (requestOriginMatchesAnyAllowed(requestOriginHeader, allowedOrigins))
+ headers.put(ALLOW_ORIGIN_HEADER, requestOriginHeader);
+ headers.putAll(ACCESS_CONTROL_HEADERS);
return headers;
}
static Map<String, String> createCorsPreflightResponseHeaders(String requestOriginHeader,
Set<String> allowedOrigins) {
- TreeMap<String, String> headers = new TreeMap<>();
- if (requestOriginHeader != null && allowedOrigins.contains(requestOriginHeader))
- headers.put(ALLOW_ORIGIN_HEADER, requestOriginHeader);
- ACCESS_CONTROL_HEADERS.forEach(headers::put);
- return headers;
+ return createCorsResponseHeaders(requestOriginHeader, allowedOrigins);
}
- private static boolean matchesRequestOrigin(String requestOrigin, String allowedUrl) {
- return allowedUrl.equals("*") || requestOrigin.startsWith(allowedUrl);
+ private static boolean requestOriginMatchesAnyAllowed(String requestOrigin, Set<String> allowedUrls) {
+ return allowedUrls.stream().anyMatch(requestOrigin::startsWith) || allowedUrls.contains("*");
}
}
diff --git a/jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/cors/CorsRequestFilterBase.java b/jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/cors/CorsRequestFilterBase.java
deleted file mode 100644
index b565ad374ed..00000000000
--- a/jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/cors/CorsRequestFilterBase.java
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.jdisc.http.filter.security.cors;
-
-import com.yahoo.jdisc.Response;
-import com.yahoo.jdisc.http.filter.DiscFilterRequest;
-import com.yahoo.jdisc.http.filter.security.base.JsonSecurityRequestFilterBase;
-
-import java.util.Optional;
-import java.util.Set;
-
-import static com.yahoo.jdisc.http.filter.security.cors.CorsLogic.createCorsResponseHeaders;
-
-/**
- * Security request filters should extend this base class to ensure that CORS header are included in the response of a rejected request.
- * This is required as response filter chains are not executed when a request is rejected in a request filter.
- *
- * @author bjorncs
- */
-public abstract class CorsRequestFilterBase extends JsonSecurityRequestFilterBase {
-
- private final Set<String> allowedUrls;
-
- protected CorsRequestFilterBase(Set<String> allowedUrls) {
- this.allowedUrls = allowedUrls;
- }
-
- @Override
- public final Optional<ErrorResponse> filter(DiscFilterRequest request) {
- Optional<ErrorResponse> errorResponse = filterRequest(request);
- errorResponse.ifPresent(response -> addCorsHeaders(request, response.getResponse()));
- return errorResponse;
- }
-
- protected abstract Optional<ErrorResponse> filterRequest(DiscFilterRequest request);
-
- private void addCorsHeaders(DiscFilterRequest request, Response response) {
- createCorsResponseHeaders(request.getHeader("Origin"), allowedUrls)
- .forEach(response.headers()::add);
- }
-
-}
diff --git a/jdisc-security-filters/src/test/java/com/yahoo/jdisc/http/filter/security/cors/CorsPreflightRequestFilterTest.java b/jdisc-security-filters/src/test/java/com/yahoo/jdisc/http/filter/security/cors/CorsPreflightRequestFilterTest.java
index 2486bc444c8..8b77fc0abbd 100644
--- a/jdisc-security-filters/src/test/java/com/yahoo/jdisc/http/filter/security/cors/CorsPreflightRequestFilterTest.java
+++ b/jdisc-security-filters/src/test/java/com/yahoo/jdisc/http/filter/security/cors/CorsPreflightRequestFilterTest.java
@@ -15,7 +15,9 @@ import java.util.Arrays;
import static com.yahoo.jdisc.http.HttpRequest.Method.OPTIONS;
import static com.yahoo.jdisc.http.filter.security.cors.CorsLogic.ACCESS_CONTROL_HEADERS;
import static com.yahoo.jdisc.http.filter.security.cors.CorsLogic.ALLOW_ORIGIN_HEADER;
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@@ -41,6 +43,13 @@ public class CorsPreflightRequestFilterTest {
}
@Test
+ public void allowed_wildcard_origin_yields_origin_header_in_response() {
+ final String ALLOWED_ORIGIN = "http://allowed.origin";
+ HeaderFields headers = doFilterRequest(newRequestFilter("*"), ALLOWED_ORIGIN);
+ assertEquals(ALLOWED_ORIGIN, headers.getFirst(ALLOW_ORIGIN_HEADER));
+ }
+
+ @Test
public void disallowed_request_origin_does_not_yield_allow_origin_header_in_response() {
HeaderFields headers = doFilterRequest(newRequestFilter("http://allowed.origin"), "http://disallowed.origin");
assertNull(headers.getFirst(ALLOW_ORIGIN_HEADER));
diff --git a/jdisc-security-filters/src/test/java/com/yahoo/jdisc/http/filter/security/cors/CorsRequestFilterBaseTest.java b/jdisc-security-filters/src/test/java/com/yahoo/jdisc/http/filter/security/cors/CorsRequestFilterBaseTest.java
deleted file mode 100644
index 2cb25bc93cb..00000000000
--- a/jdisc-security-filters/src/test/java/com/yahoo/jdisc/http/filter/security/cors/CorsRequestFilterBaseTest.java
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.jdisc.http.filter.security.cors;
-
-import com.yahoo.container.jdisc.RequestHandlerTestDriver.MockResponseHandler;
-import com.yahoo.jdisc.Response;
-import com.yahoo.jdisc.http.filter.DiscFilterRequest;
-import org.junit.Test;
-
-import java.util.Collections;
-import java.util.List;
-import java.util.Optional;
-import java.util.Set;
-
-import static com.yahoo.jdisc.http.filter.security.cors.CorsLogic.ALLOW_ORIGIN_HEADER;
-import static org.hamcrest.CoreMatchers.equalTo;
-import static org.hamcrest.CoreMatchers.notNullValue;
-import static org.junit.Assert.assertThat;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-
-/**
- * @author bjorncs
- */
-public class CorsRequestFilterBaseTest {
-
- @Test
- public void adds_cors_headers_when_filter_reject_request() {
- String origin = "http://allowed.origin";
- Set<String> allowedOrigins = Collections.singleton(origin);
- int statusCode = 403;
- SimpleCorsRequestFilter filter =
- new SimpleCorsRequestFilter(allowedOrigins, statusCode, "Forbidden");
- DiscFilterRequest request = mock(DiscFilterRequest.class);
- when(request.getHeader("Origin")).thenReturn(origin);
- MockResponseHandler responseHandler = new MockResponseHandler();
- filter.filter(request, responseHandler);
-
- Response response = responseHandler.getResponse();
- assertThat(response, notNullValue());
- assertThat(response.getStatus(), equalTo(statusCode));
- List<String> allowOriginHeader = response.headers().get(ALLOW_ORIGIN_HEADER);
- assertThat(allowOriginHeader.size(), equalTo(1));
- assertThat(allowOriginHeader.get(0), equalTo(origin));
- }
-
- private static class SimpleCorsRequestFilter extends CorsRequestFilterBase {
- private final ErrorResponse errorResponse;
-
- SimpleCorsRequestFilter(Set<String> allowedUrls, int statusCode, String message) {
- super(allowedUrls);
- this.errorResponse = new ErrorResponse(statusCode, message);
- }
-
- @Override
- protected Optional<ErrorResponse> filterRequest(DiscFilterRequest request) {
- return Optional.ofNullable(this.errorResponse);
- }
- }
-
-} \ No newline at end of file
diff --git a/jdisc-security-filters/src/test/java/com/yahoo/jdisc/http/filter/security/cors/CorsResponseFilterTest.java b/jdisc-security-filters/src/test/java/com/yahoo/jdisc/http/filter/security/cors/CorsResponseFilterTest.java
index 2967a7659f5..0c8cf9b0ffb 100644
--- a/jdisc-security-filters/src/test/java/com/yahoo/jdisc/http/filter/security/cors/CorsResponseFilterTest.java
+++ b/jdisc-security-filters/src/test/java/com/yahoo/jdisc/http/filter/security/cors/CorsResponseFilterTest.java
@@ -53,7 +53,7 @@ public class CorsResponseFilterTest {
@Test
public void any_request_origin_yields_allow_origin_header_in_response_when_wildcard_is_allowed() {
Map<String, String> headers = doFilterRequest(newResponseFilter("*"), "http://any.origin");
- assertEquals("*", headers.get(ALLOW_ORIGIN_HEADER));
+ assertEquals("http://any.origin", headers.get(ALLOW_ORIGIN_HEADER));
}
private static Map<String, String> doFilterRequest(SecurityResponseFilter filter, String originUrl) {
diff --git a/jdisc_core/src/test/java/com/yahoo/jdisc/application/BindingSetTestCase.java b/jdisc_core/src/test/java/com/yahoo/jdisc/application/BindingSetTestCase.java
index 028d0d69df6..64bcaf15a69 100644
--- a/jdisc_core/src/test/java/com/yahoo/jdisc/application/BindingSetTestCase.java
+++ b/jdisc_core/src/test/java/com/yahoo/jdisc/application/BindingSetTestCase.java
@@ -249,7 +249,7 @@ public class BindingSetTestCase {
handlers.put(new UriPattern("http://*/config/v1/*/"), foo1);
handlers.put(new UriPattern("http://*/config/v1/*/*"), foo2);
handlers.put(new UriPattern("http://*/config/v1/*/*/"), foo3);
- handlers.put(new UriPattern("http://*/application/v2/tenant/"), foo4);
+ handlers.put(new UriPattern("http://*/application/v2/tenant*"), foo4);
handlers.put(new UriPattern("http://*/application/v2/tenant/*"), foo5);
handlers.put(new UriPattern("http://*/application/v2/tenant/*/session"), foo6);
handlers.put(new UriPattern("http://*/application/v2/tenant/*/session/*/prepared"), foo7);
@@ -276,7 +276,7 @@ public class BindingSetTestCase {
assertSame(foo3, bindings.resolve(URI.create("http://abcxyz.yahoo.com:19071" +
"/config/v1/cloud.config.log.logd/admin/")));
assertSame(foo4, bindings.resolve(URI.create("http://abcxyz.yahoo.com:19071" +
- "/application/v2/tenant/")));
+ "/application/v2/tenant")));
assertSame(foo5, bindings.resolve(URI.create("http://abcxyz.yahoo.com:19071" +
"/application/v2/tenant/b")));
assertSame(foo6, bindings.resolve(URI.create("http://abcxyz.yahoo.com:19071" +
diff --git a/jrt/pom.xml b/jrt/pom.xml
index 5208c0417cc..15cfe661350 100644
--- a/jrt/pom.xml
+++ b/jrt/pom.xml
@@ -68,6 +68,13 @@
<updateReleaseInfo>true</updateReleaseInfo>
</configuration>
</plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-surefire-plugin</artifactId>
+ <configuration>
+ <forkCount>4</forkCount>
+ </configuration>
+ </plugin>
</plugins>
</build>
</project>
diff --git a/jrt/src/com/yahoo/jrt/DataValue.java b/jrt/src/com/yahoo/jrt/DataValue.java
index 15b9ce3df13..8739e4b3817 100644
--- a/jrt/src/com/yahoo/jrt/DataValue.java
+++ b/jrt/src/com/yahoo/jrt/DataValue.java
@@ -10,7 +10,7 @@ import java.nio.ByteBuffer;
**/
public class DataValue extends Value
{
- private byte[] value;
+ private final byte[] value;
/**
* Create from a Java-type value
diff --git a/jrt/src/com/yahoo/jrt/slobrok/api/Mirror.java b/jrt/src/com/yahoo/jrt/slobrok/api/Mirror.java
index 6e39e2a3dbd..996459dc5db 100644
--- a/jrt/src/com/yahoo/jrt/slobrok/api/Mirror.java
+++ b/jrt/src/com/yahoo/jrt/slobrok/api/Mirror.java
@@ -58,7 +58,6 @@ public class Mirror implements IMirror {
**/
public Mirror(Supervisor orb, SlobrokList slobroks, BackOffPolicy bop) {
this.orb = orb;
- orb.useSmallBuffers();
this.slobroks = slobroks;
this.backOff = bop;
transportThread = orb.transport().selectThread();
diff --git a/juniper/src/test/auxTest.cpp b/juniper/src/test/auxTest.cpp
index 74b3469f2c0..2557fbf890d 100644
--- a/juniper/src/test/auxTest.cpp
+++ b/juniper/src/test/auxTest.cpp
@@ -302,7 +302,7 @@ void test_dump(const char* s, unsigned int len)
unsigned int start = i;
for (; i < len;)
{
- if (s[i] < 0) {
+ if ((signed char) s[i] < 0) {
printf("�");
} else {
printf("%c", s[i]);
diff --git a/juniper/src/test/testenv.cpp b/juniper/src/test/testenv.cpp
index 2a83df13487..650bc9f2cb7 100644
--- a/juniper/src/test/testenv.cpp
+++ b/juniper/src/test/testenv.cpp
@@ -19,7 +19,7 @@ Juniper * _Juniper;
TestEnv::TestEnv(FastOS_Application* app, const char* propfile) :
_props(), _config(), _juniper(), _wordFolder()
{
- char c;
+ int c;
const char* oarg = NULL;
int oind = 1;
diff --git a/juniper/src/vespa/juniper/SummaryConfig.h b/juniper/src/vespa/juniper/SummaryConfig.h
index 96023b9523b..1212132c76e 100644
--- a/juniper/src/vespa/juniper/SummaryConfig.h
+++ b/juniper/src/vespa/juniper/SummaryConfig.h
@@ -31,7 +31,7 @@ public:
inline const std::string & highlight_on() const { return _highlight_on; }
inline const std::string & highlight_off() const { return _highlight_off; }
inline const std::string & dots() const { return _dots; }
- inline bool separator(const char c) const { return (c < 0 ? false : _separator.test(c)); }
+ inline bool separator(const char c) const { return ((signed char) c < 0 ? false : _separator.test(c)); }
inline bool connector(const unsigned char c) const { return _connector.test(c); }
inline ConfigFlag escape_markup() const { return _escape_markup; }
inline ConfigFlag preserve_white_space() const { return _preserve_white_space; }
diff --git a/linguistics/abi-spec.json b/linguistics/abi-spec.json
index 58b838d7332..b77b03664d4 100644
--- a/linguistics/abi-spec.json
+++ b/linguistics/abi-spec.json
@@ -427,6 +427,57 @@
],
"fields": []
},
+ "com.yahoo.language.process.SpecialTokenRegistry": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>()",
+ "public void <init>(com.yahoo.vespa.configdefinition.SpecialtokensConfig)",
+ "public void <init>(java.util.List)",
+ "public com.yahoo.language.process.SpecialTokens getSpecialTokens(java.lang.String)"
+ ],
+ "fields": []
+ },
+ "com.yahoo.language.process.SpecialTokens$Token": {
+ "superClass": "java.lang.Object",
+ "interfaces": [
+ "java.lang.Comparable"
+ ],
+ "attributes": [
+ "public",
+ "final"
+ ],
+ "methods": [
+ "public void <init>(java.lang.String)",
+ "public void <init>(java.lang.String, java.lang.String)",
+ "public java.lang.String token()",
+ "public java.lang.String replacement()",
+ "public int compareTo(com.yahoo.language.process.SpecialTokens$Token)",
+ "public boolean equals(java.lang.Object)",
+ "public int hashCode()",
+ "public java.lang.String toString()",
+ "public bridge synthetic int compareTo(java.lang.Object)"
+ ],
+ "fields": []
+ },
+ "com.yahoo.language.process.SpecialTokens": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>(java.lang.String, java.util.List)",
+ "public java.lang.String name()",
+ "public java.util.Map asMap()",
+ "public com.yahoo.language.process.SpecialTokens$Token tokenize(java.lang.String, boolean)",
+ "public static com.yahoo.language.process.SpecialTokens empty()"
+ ],
+ "fields": []
+ },
"com.yahoo.language.process.StemList": {
"superClass": "java.util.AbstractList",
"interfaces": [],
diff --git a/linguistics/src/main/java/com/yahoo/language/opennlp/OpenNlpTokenizer.java b/linguistics/src/main/java/com/yahoo/language/opennlp/OpenNlpTokenizer.java
index e1185cb2457..73518876c3f 100644
--- a/linguistics/src/main/java/com/yahoo/language/opennlp/OpenNlpTokenizer.java
+++ b/linguistics/src/main/java/com/yahoo/language/opennlp/OpenNlpTokenizer.java
@@ -4,6 +4,7 @@ package com.yahoo.language.opennlp;
import com.yahoo.language.Language;
import com.yahoo.language.LinguisticsCase;
import com.yahoo.language.process.Normalizer;
+import com.yahoo.language.process.SpecialTokenRegistry;
import com.yahoo.language.process.StemMode;
import com.yahoo.language.process.Token;
import com.yahoo.language.process.TokenType;
@@ -32,15 +33,21 @@ public class OpenNlpTokenizer implements Tokenizer {
private final Normalizer normalizer;
private final Transformer transformer;
private final SimpleTokenizer simpleTokenizer;
+ private final SpecialTokenRegistry specialTokenRegistry;
public OpenNlpTokenizer() {
this(new SimpleNormalizer(), new SimpleTransformer());
}
public OpenNlpTokenizer(Normalizer normalizer, Transformer transformer) {
+ this(normalizer, transformer, new SpecialTokenRegistry(List.of()));
+ }
+
+ public OpenNlpTokenizer(Normalizer normalizer, Transformer transformer, SpecialTokenRegistry specialTokenRegistry) {
this.normalizer = normalizer;
this.transformer = transformer;
- simpleTokenizer = new SimpleTokenizer(normalizer, transformer);
+ this.specialTokenRegistry = specialTokenRegistry;
+ this.simpleTokenizer = new SimpleTokenizer(normalizer, transformer, specialTokenRegistry);
}
@Override
diff --git a/linguistics/src/main/java/com/yahoo/language/process/SpecialTokenRegistry.java b/linguistics/src/main/java/com/yahoo/language/process/SpecialTokenRegistry.java
new file mode 100644
index 00000000000..b6335d67967
--- /dev/null
+++ b/linguistics/src/main/java/com/yahoo/language/process/SpecialTokenRegistry.java
@@ -0,0 +1,72 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.language.process;
+
+import com.yahoo.vespa.configdefinition.SpecialtokensConfig;
+import com.yahoo.vespa.configdefinition.SpecialtokensConfig.Tokenlist;
+import com.yahoo.vespa.configdefinition.SpecialtokensConfig.Tokenlist.Tokens;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+
+/**
+ * Immutable named lists of "special tokens" - strings which should override the normal tokenizer semantics
+ * and be tokenized into a single token.
+ *
+ * @author bratseth
+ */
+public class SpecialTokenRegistry {
+
+ /**
+ * The current special token lists, indexed on name.
+ * These lists are unmodifiable and used directly by clients of this
+ */
+ private final Map<String, SpecialTokens> specialTokenMap;
+
+ /** Creates an empty special token registry */
+ public SpecialTokenRegistry() {
+ this(List.of());
+ }
+
+ /** Create a special token registry from a configuration object. */
+ public SpecialTokenRegistry(SpecialtokensConfig config) {
+ this(specialTokensFrom(config));
+ }
+
+ public SpecialTokenRegistry(List<SpecialTokens> specialTokensList) {
+ specialTokenMap = specialTokensList.stream().collect(Collectors.toUnmodifiableMap(t -> t.name(), t -> t));
+ }
+
+ private static List<SpecialTokens> specialTokensFrom(SpecialtokensConfig config) {
+ List<SpecialTokens> specialTokensList = new ArrayList<>();
+ for (Iterator<Tokenlist> i = config.tokenlist().iterator(); i.hasNext();) {
+ Tokenlist tokenListConfig = i.next();
+
+ List<SpecialTokens.Token> tokenList = new ArrayList<>();
+ for (Iterator<Tokens> j = tokenListConfig.tokens().iterator(); j.hasNext();) {
+ Tokens tokenConfig = j.next();
+ tokenList.add(new SpecialTokens.Token(tokenConfig.token(), tokenConfig.replace()));
+ }
+ specialTokensList.add(new SpecialTokens(tokenListConfig.name(), tokenList));
+ }
+ return specialTokensList;
+ }
+
+ /**
+ * Returns the list of special tokens for a given name.
+ *
+ * @param name the name of the special tokens to return
+ * null, the empty string or the string "default" returns
+ * the default ones
+ * @return a read-only list of SpecialToken instances, an empty list if this name
+ * has no special tokens
+ */
+ public SpecialTokens getSpecialTokens(String name) {
+ if (name == null || name.trim().equals(""))
+ name = "default";
+ return specialTokenMap.getOrDefault(name, SpecialTokens.empty());
+ }
+
+}
diff --git a/linguistics/src/main/java/com/yahoo/language/process/SpecialTokens.java b/linguistics/src/main/java/com/yahoo/language/process/SpecialTokens.java
new file mode 100644
index 00000000000..465d9b754b3
--- /dev/null
+++ b/linguistics/src/main/java/com/yahoo/language/process/SpecialTokens.java
@@ -0,0 +1,141 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.language.process;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Objects;
+import java.util.stream.Collectors;
+
+import static com.yahoo.language.LinguisticsCase.toLowerCase;
+
+/**
+ * An immutable list of special tokens - strings which should override the normal tokenizer semantics
+ * and be tokenized into a single token. Special tokens are case insensitive.
+ *
+ * @author bratseth
+ */
+public class SpecialTokens {
+
+ private static final SpecialTokens empty = new SpecialTokens("(empty)", List.of());
+
+ private final String name;
+ private final int maximumLength;
+ private final List<Token> tokens;
+ private final Map<String, String> tokenMap;
+
+ public SpecialTokens(String name, List<Token> tokens) {
+ tokens.stream().peek(token -> token.validate());
+ List<Token> mutableTokens = new ArrayList<>(tokens);
+ Collections.sort(mutableTokens);
+ this.name = name;
+ this.maximumLength = tokens.stream().mapToInt(token -> token.token().length()).max().orElse(0);
+ this.tokens = List.copyOf(mutableTokens);
+ this.tokenMap = tokens.stream().collect(Collectors.toUnmodifiableMap(t -> t.token(), t -> t.replacement()));
+ }
+
+ /** Returns the name of this special tokens list */
+ public String name() {
+ return name;
+ }
+
+ /**
+ * Returns the tokens of this as an immutable map from token to replacement.
+ * Tokens which do not have a replacement token maps to themselves.
+ */
+ public Map<String, String> asMap() { return tokenMap; }
+
+ /**
+ * Returns the special token starting at the start of the given string, or null if no
+ * special token starts at this string
+ *
+ * @param string the string to search for a special token at the start position
+ * @param substring true to allow the special token to be followed by a character which does not
+ * mark the end of a token
+ */
+ public Token tokenize(String string, boolean substring) {
+ // XXX detonator pattern token.length may be != the length of the
+ // matching data in string, ref caseIndependentLength(String)
+ String input = toLowerCase(string.substring(0, Math.min(string.length(), maximumLength)));
+ for (Iterator<Token> i = tokens.iterator(); i.hasNext();) {
+ Token special = i.next();
+
+ if (input.startsWith(special.token())) {
+ if (string.length() == special.token().length() || substring || tokenEndsAt(special.token().length(), string))
+ return special;
+ }
+ }
+ return null;
+ }
+
+ private boolean tokenEndsAt(int position, String string) {
+ return !Character.isLetterOrDigit(string.charAt(position));
+ }
+
+ public static SpecialTokens empty() { return empty; }
+
+ /** An immutable special token */
+ public final static class Token implements Comparable<Token> {
+
+ private final String token;
+ private final String replacement;
+
+ /** Creates a special token */
+ public Token(String token) {
+ this(token, null);
+ }
+
+ /** Creates a special token which will be represented by the given replacement token */
+ public Token(String token, String replacement) {
+ this.token = toLowerCase(token);
+ if (replacement == null || replacement.trim().equals(""))
+ this.replacement = this.token;
+ else
+ this.replacement = toLowerCase(replacement);
+ }
+
+ /** Returns the special token */
+ public String token() { return token; }
+
+ /** Returns the token to replace occurrences of this by, which equals token() unless this has a replacement. */
+ public String replacement() { return replacement; }
+
+ @Override
+ public int compareTo(Token other) {
+ if (this.token().length() < other.token().length()) return 1;
+ if (this.token().length() == other.token().length()) return 0;
+ return -1;
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (other == this) return true;
+ if ( ! (other instanceof Token)) return false;
+ return Objects.equals(this.token, ((Token)other).token);
+ }
+
+ @Override
+ public int hashCode() { return token.hashCode(); }
+
+ @Override
+ public String toString() {
+ return "token '" + token + "'" + (replacement.equals(token) ? "" : " replacement '" + replacement + "'");
+ }
+
+ private void validate() {
+ // XXX not fool proof length test, should test codepoint by codepoint for mixed case user input? not even that will necessarily be 100% robust...
+ String asLow = toLowerCase(token);
+ // TODO: Put along with the global toLowerCase
+ String asHigh = token.toUpperCase(Locale.ENGLISH);
+ if (asLow.length() != token.length() || asHigh.length() != token.length()) {
+ throw new IllegalArgumentException("Special token '" + token + "' has case sensitive length. " +
+ "Please report this to the Vespa team.");
+ }
+ }
+
+ }
+
+}
diff --git a/linguistics/src/main/java/com/yahoo/language/process/TokenType.java b/linguistics/src/main/java/com/yahoo/language/process/TokenType.java
index 57a5b6edb68..ad154d1b003 100644
--- a/linguistics/src/main/java/com/yahoo/language/process/TokenType.java
+++ b/linguistics/src/main/java/com/yahoo/language/process/TokenType.java
@@ -4,7 +4,7 @@ package com.yahoo.language.process;
/**
* An enumeration of token types.
*
- * @author <a href="mailto:mathiasm@yahoo-inc.com">Mathias Mølster Lidal</a>
+ * @author Mathias Mølster Lidal
*/
public enum TokenType {
diff --git a/linguistics/src/main/java/com/yahoo/language/simple/SimpleLinguistics.java b/linguistics/src/main/java/com/yahoo/language/simple/SimpleLinguistics.java
index e1a04b2985d..4ffe2a866d8 100644
--- a/linguistics/src/main/java/com/yahoo/language/simple/SimpleLinguistics.java
+++ b/linguistics/src/main/java/com/yahoo/language/simple/SimpleLinguistics.java
@@ -11,10 +11,14 @@ import com.yahoo.language.process.GramSplitter;
import com.yahoo.language.process.Normalizer;
import com.yahoo.language.process.Segmenter;
import com.yahoo.language.process.SegmenterImpl;
+import com.yahoo.language.process.SpecialTokenRegistry;
import com.yahoo.language.process.Stemmer;
import com.yahoo.language.process.StemmerImpl;
import com.yahoo.language.process.Tokenizer;
import com.yahoo.language.process.Transformer;
+import com.yahoo.vespa.configdefinition.SpecialtokensConfig;
+
+import java.util.List;
/**
* Factory of simple linguistic processor implementations.
@@ -31,6 +35,7 @@ public class SimpleLinguistics implements Linguistics {
private final Detector detector;
private final CharacterClasses characterClasses;
private final GramSplitter gramSplitter;
+ private final SpecialTokenRegistry specialTokenRegistry = new SpecialTokenRegistry(List.of());
@Inject
public SimpleLinguistics() {
@@ -45,7 +50,7 @@ public class SimpleLinguistics implements Linguistics {
public Stemmer getStemmer() { return new StemmerImpl(getTokenizer()); }
@Override
- public Tokenizer getTokenizer() { return new SimpleTokenizer(normalizer, transformer); }
+ public Tokenizer getTokenizer() { return new SimpleTokenizer(normalizer, transformer, specialTokenRegistry); }
@Override
public Normalizer getNormalizer() { return normalizer; }
diff --git a/linguistics/src/main/java/com/yahoo/language/simple/SimpleTokenizer.java b/linguistics/src/main/java/com/yahoo/language/simple/SimpleTokenizer.java
index 7df432f496d..740307c0cca 100644
--- a/linguistics/src/main/java/com/yahoo/language/simple/SimpleTokenizer.java
+++ b/linguistics/src/main/java/com/yahoo/language/simple/SimpleTokenizer.java
@@ -23,11 +23,13 @@ import java.util.logging.Level;
*/
public class SimpleTokenizer implements Tokenizer {
+ private static final Logger log = Logger.getLogger(SimpleTokenizer.class.getName());
private final static int SPACE_CODE = 32;
+
private final Normalizer normalizer;
private final Transformer transformer;
private final KStemmer stemmer = new KStemmer();
- private static final Logger log = Logger.getLogger(SimpleTokenizer.class.getName());
+ private final SpecialTokenRegistry specialTokenRegistry;
public SimpleTokenizer() {
this(new SimpleNormalizer(), new SimpleTransformer());
@@ -38,8 +40,13 @@ public class SimpleTokenizer implements Tokenizer {
}
public SimpleTokenizer(Normalizer normalizer, Transformer transformer) {
+ this(normalizer, transformer, new SpecialTokenRegistry(List.of()));
+ }
+
+ public SimpleTokenizer(Normalizer normalizer, Transformer transformer, SpecialTokenRegistry specialTokenRegistry) {
this.normalizer = normalizer;
this.transformer = transformer;
+ this.specialTokenRegistry = specialTokenRegistry;
}
@Override
@@ -56,8 +63,8 @@ public class SimpleTokenizer implements Tokenizer {
String original = input.substring(prev, next);
String token = processToken(original, language, stemMode, removeAccents);
tokens.add(new SimpleToken(original).setOffset(prev)
- .setType(prevType)
- .setTokenString(token));
+ .setType(prevType)
+ .setTokenString(token));
prev = next;
prevType = nextType;
}
@@ -67,20 +74,20 @@ public class SimpleTokenizer implements Tokenizer {
}
private String processToken(String token, Language language, StemMode stemMode, boolean removeAccents) {
- final String original = token;
- log.log(Level.FINEST, () -> "processToken '"+original+"'");
+ String original = token;
+ log.log(Level.FINEST, () -> "processToken '" + original + "'");
token = normalizer.normalize(token);
token = LinguisticsCase.toLowerCase(token);
if (removeAccents)
token = transformer.accentDrop(token, language);
if (stemMode != StemMode.NONE) {
- final String oldToken = token;
+ String oldToken = token;
token = stemmer.stem(token);
- final String newToken = token;
- log.log(Level.FINEST, () -> "stem '"+oldToken+"' to '"+newToken+"'");
+ String newToken = token;
+ log.log(Level.FINEST, () -> "stem '" + oldToken+"' to '" + newToken+"'");
}
- final String result = token;
- log.log(Level.FINEST, () -> "processed token is: "+result);
+ String result = token;
+ log.log(Level.FINEST, () -> "processed token is: " + result);
return result;
}
diff --git a/linguistics/src/test/java/com/yahoo/language/process/SpecialTokensTestCase.java b/linguistics/src/test/java/com/yahoo/language/process/SpecialTokensTestCase.java
new file mode 100644
index 00000000000..47c3ba7933c
--- /dev/null
+++ b/linguistics/src/test/java/com/yahoo/language/process/SpecialTokensTestCase.java
@@ -0,0 +1,40 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.language.process;
+
+import com.yahoo.vespa.configdefinition.SpecialtokensConfig;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * @author bratseth
+ */
+public class SpecialTokensTestCase {
+
+ @Test
+ public void testSpecialTokensConfig() {
+ var builder = new SpecialtokensConfig.Builder();
+ var tokenBuilder = new SpecialtokensConfig.Tokenlist.Builder();
+ tokenBuilder.name("default");
+
+ var tokenListBuilder1 = new SpecialtokensConfig.Tokenlist.Tokens.Builder();
+ tokenListBuilder1.token("c++");
+ tokenListBuilder1.replace("cpp");
+ tokenBuilder.tokens(tokenListBuilder1);
+
+ var tokenListBuilder2 = new SpecialtokensConfig.Tokenlist.Tokens.Builder();
+ tokenListBuilder2.token("...");
+ tokenBuilder.tokens(tokenListBuilder2);
+
+ builder.tokenlist(tokenBuilder);
+
+ var registry = new SpecialTokenRegistry(builder.build());
+
+ var defaultTokens = registry.getSpecialTokens("default");
+ assertEquals("default", defaultTokens.name());
+ assertEquals(2, defaultTokens.asMap().size());
+ assertEquals("cpp", defaultTokens.asMap().get("c++"));
+ assertEquals("...", defaultTokens.asMap().get("..."));
+ }
+
+}
diff --git a/messagebus/pom.xml b/messagebus/pom.xml
index 9bd123ad6bd..4e9984e482c 100644
--- a/messagebus/pom.xml
+++ b/messagebus/pom.xml
@@ -50,6 +50,13 @@
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-surefire-plugin</artifactId>
+ <configuration>
+ <forkCount>4</forkCount>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
<compilerArgs>
diff --git a/messagebus/src/main/java/com/yahoo/messagebus/Message.java b/messagebus/src/main/java/com/yahoo/messagebus/Message.java
index 6848ffc55d8..696211d7c45 100644
--- a/messagebus/src/main/java/com/yahoo/messagebus/Message.java
+++ b/messagebus/src/main/java/com/yahoo/messagebus/Message.java
@@ -101,7 +101,7 @@ public abstract class Message extends Routable {
}
/**
- * <p>Sets the numer of milliseconds that remain before this message times out. Please see comment on {@link
+ * <p>Sets the number of milliseconds that remain before this message times out. Please see comment on {@link
* #isExpired()} for more information on how to determine whether or not a message has expired.</p>
*
* @param timeRemaining The number of milliseconds until expiration.
diff --git a/model-evaluation/abi-spec.json b/model-evaluation/abi-spec.json
index d465464de7f..63882525808 100644
--- a/model-evaluation/abi-spec.json
+++ b/model-evaluation/abi-spec.json
@@ -39,6 +39,7 @@
"public int size()",
"public java.util.Set names()",
"public java.util.Set arguments()",
+ "public java.util.Map onnxModels()",
"public com.yahoo.searchlib.rankingexpression.evaluation.Value defaultValue()",
"public bridge synthetic com.yahoo.tensor.TensorType getType(com.yahoo.tensor.evaluation.Name)"
],
@@ -66,7 +67,7 @@
"public"
],
"methods": [
- "public void <init>(com.yahoo.vespa.config.search.RankProfilesConfig, com.yahoo.vespa.config.search.core.RankingConstantsConfig, com.yahoo.filedistribution.fileacquirer.FileAcquirer)",
+ "public void <init>(com.yahoo.vespa.config.search.RankProfilesConfig, com.yahoo.vespa.config.search.core.RankingConstantsConfig, com.yahoo.vespa.config.search.core.OnnxModelsConfig, com.yahoo.filedistribution.fileacquirer.FileAcquirer)",
"public void <init>(java.util.Map)",
"public java.util.Map models()",
"public varargs ai.vespa.models.evaluation.FunctionEvaluator evaluatorOf(java.lang.String, java.lang.String[])",
@@ -82,7 +83,7 @@
],
"methods": [
"public void <init>(com.yahoo.filedistribution.fileacquirer.FileAcquirer)",
- "public java.util.Map importFrom(com.yahoo.vespa.config.search.RankProfilesConfig, com.yahoo.vespa.config.search.core.RankingConstantsConfig)",
+ "public java.util.Map importFrom(com.yahoo.vespa.config.search.RankProfilesConfig, com.yahoo.vespa.config.search.core.RankingConstantsConfig, com.yahoo.vespa.config.search.core.OnnxModelsConfig)",
"protected com.yahoo.tensor.Tensor readTensorFromFile(java.lang.String, com.yahoo.tensor.TensorType, com.yahoo.config.FileReference)"
],
"fields": []
diff --git a/model-evaluation/pom.xml b/model-evaluation/pom.xml
index 00560a22bc7..8cdff451b42 100644
--- a/model-evaluation/pom.xml
+++ b/model-evaluation/pom.xml
@@ -68,6 +68,12 @@
<scope>provided</scope>
</dependency>
<dependency>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>model-integration</artifactId>
+ <version>${project.version}</version>
+ <scope>provided</scope>
+ </dependency>
+ <dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<scope>provided</scope>
diff --git a/model-evaluation/src/main/java/ai/vespa/models/evaluation/FunctionEvaluator.java b/model-evaluation/src/main/java/ai/vespa/models/evaluation/FunctionEvaluator.java
index e373a54bcd1..910aca8aa98 100644
--- a/model-evaluation/src/main/java/ai/vespa/models/evaluation/FunctionEvaluator.java
+++ b/model-evaluation/src/main/java/ai/vespa/models/evaluation/FunctionEvaluator.java
@@ -7,6 +7,7 @@ import com.yahoo.searchlib.rankingexpression.evaluation.TensorValue;
import com.yahoo.tensor.Tensor;
import com.yahoo.tensor.TensorType;
+import java.util.HashMap;
import java.util.Map;
import java.util.stream.Collectors;
@@ -100,19 +101,40 @@ public class FunctionEvaluator {
}
public Tensor evaluate() {
+ evaluateOnnxModels();
for (Map.Entry<String, TensorType> argument : function.argumentTypes().entrySet()) {
- if (context.isMissing(argument.getKey()))
- throw new IllegalStateException("Missing argument '" + argument.getKey() +
- "': Must be bound to a value of type " + argument.getValue());
- if (! context.get(argument.getKey()).type().isAssignableTo(argument.getValue()))
- throw new IllegalStateException("Argument '" + argument.getKey() +
- "' must be bound to a value of type " + argument.getValue());
-
+ checkArgument(argument.getKey(), argument.getValue());
}
evaluated = true;
return function.getBody().evaluate(context).asTensor();
}
+ private void checkArgument(String name, TensorType type) {
+ if (context.isMissing(name))
+ throw new IllegalStateException("Missing argument '" + name + "': Must be bound to a value of type " + type);
+ if (! context.get(name).type().isAssignableTo(type))
+ throw new IllegalStateException("Argument '" + name + "' must be bound to a value of type " + type);
+ }
+
+ /**
+ * Evaluate ONNX models (if not already evaluated) and add the result back to the context.
+ */
+ private void evaluateOnnxModels() {
+ for (Map.Entry<String, OnnxModel> entry : context().onnxModels().entrySet()) {
+ String onnxFeature = entry.getKey();
+ OnnxModel onnxModel = entry.getValue();
+ if (context.get(onnxFeature).equals(context.defaultValue())) {
+ Map<String, Tensor> inputs = new HashMap<>();
+ for (Map.Entry<String, TensorType> input: onnxModel.inputs().entrySet()) {
+ checkArgument(input.getKey(), input.getValue());
+ inputs.put(input.getKey(), context.get(input.getKey()).asTensor());
+ }
+ Tensor result = onnxModel.evaluate(inputs, function.getName()); // Function name is output of model
+ context.put(onnxFeature, new TensorValue(result));
+ }
+ }
+ }
+
/** Returns the function evaluated by this */
public ExpressionFunction function() { return function; }
diff --git a/model-evaluation/src/main/java/ai/vespa/models/evaluation/LazyArrayContext.java b/model-evaluation/src/main/java/ai/vespa/models/evaluation/LazyArrayContext.java
index d66315ef457..a5dcd2719c9 100644
--- a/model-evaluation/src/main/java/ai/vespa/models/evaluation/LazyArrayContext.java
+++ b/model-evaluation/src/main/java/ai/vespa/models/evaluation/LazyArrayContext.java
@@ -11,15 +11,18 @@ import com.yahoo.searchlib.rankingexpression.evaluation.DoubleValue;
import com.yahoo.searchlib.rankingexpression.evaluation.TensorValue;
import com.yahoo.searchlib.rankingexpression.evaluation.Value;
import com.yahoo.searchlib.rankingexpression.rule.CompositeNode;
+import com.yahoo.searchlib.rankingexpression.rule.ConstantNode;
import com.yahoo.searchlib.rankingexpression.rule.ExpressionNode;
import com.yahoo.searchlib.rankingexpression.rule.ReferenceNode;
import com.yahoo.tensor.Tensor;
import com.yahoo.tensor.TensorType;
import java.util.Arrays;
+import java.util.HashMap;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
+import java.util.Optional;
import java.util.Set;
/**
@@ -41,9 +44,10 @@ public final class LazyArrayContext extends Context implements ContextIndex {
LazyArrayContext(ExpressionFunction function,
Map<FunctionReference, ExpressionFunction> referencedFunctions,
List<Constant> constants,
+ List<OnnxModel> onnxModels,
Model model) {
this.function = function;
- this.indexedBindings = new IndexedBindings(function, referencedFunctions, constants, this, model);
+ this.indexedBindings = new IndexedBindings(function, referencedFunctions, constants, onnxModels, this, model);
}
/**
@@ -117,6 +121,9 @@ public final class LazyArrayContext extends Context implements ContextIndex {
/** Returns the (immutable) subset of names in this which must be bound when invoking */
public Set<String> arguments() { return indexedBindings.arguments(); }
+ /** Returns the set of ONNX models that need to be evaluated on this context */
+ public Map<String, OnnxModel> onnxModels() { return indexedBindings.onnxModels(); }
+
private Integer requireIndexOf(String name) {
Integer index = indexedBindings.indexOf(name);
if (index == null)
@@ -152,18 +159,24 @@ public final class LazyArrayContext extends Context implements ContextIndex {
/** The current values set */
private final Value[] values;
+ /** ONNX models indexed by rank feature that calls them */
+ private final ImmutableMap<String, OnnxModel> onnxModels;
+
/** The object instance which encodes "no value is set". The actual value of this is never used. */
private static final Value missing = new DoubleValue(Double.NaN).freeze();
/** The value to return for lookups where no value is set (default: NaN) */
private Value missingValue = new DoubleValue(Double.NaN).freeze();
+
private IndexedBindings(ImmutableMap<String, Integer> nameToIndex,
Value[] values,
- ImmutableSet<String> arguments) {
+ ImmutableSet<String> arguments,
+ ImmutableMap<String, OnnxModel> onnxModels) {
this.nameToIndex = nameToIndex;
this.values = values;
this.arguments = arguments;
+ this.onnxModels = onnxModels;
}
/**
@@ -173,13 +186,16 @@ public final class LazyArrayContext extends Context implements ContextIndex {
IndexedBindings(ExpressionFunction function,
Map<FunctionReference, ExpressionFunction> referencedFunctions,
List<Constant> constants,
+ List<OnnxModel> onnxModels,
LazyArrayContext owner,
Model model) {
// 1. Determine and prepare bind targets
Set<String> bindTargets = new LinkedHashSet<>();
Set<String> arguments = new LinkedHashSet<>(); // Arguments: Bind targets which need to be bound before invocation
- extractBindTargets(function.getBody().getRoot(), referencedFunctions, bindTargets, arguments);
+ Map<String, OnnxModel> onnxModelsInUse = new HashMap<>();
+ extractBindTargets(function.getBody().getRoot(), referencedFunctions, bindTargets, arguments, onnxModels, onnxModelsInUse);
+ this.onnxModels = ImmutableMap.copyOf(onnxModelsInUse);
this.arguments = ImmutableSet.copyOf(arguments);
values = new Value[bindTargets.size()];
Arrays.fill(values, missing);
@@ -214,12 +230,18 @@ public final class LazyArrayContext extends Context implements ContextIndex {
private void extractBindTargets(ExpressionNode node,
Map<FunctionReference, ExpressionFunction> functions,
Set<String> bindTargets,
- Set<String> arguments) {
+ Set<String> arguments,
+ List<OnnxModel> onnxModels,
+ Map<String, OnnxModel> onnxModelsInUse) {
if (isFunctionReference(node)) {
FunctionReference reference = FunctionReference.fromSerial(node.toString()).get();
bindTargets.add(reference.serialForm());
- extractBindTargets(functions.get(reference).getBody().getRoot(), functions, bindTargets, arguments);
+ ExpressionNode functionNode = functions.get(reference).getBody().getRoot();
+ extractBindTargets(functionNode, functions, bindTargets, arguments, onnxModels, onnxModelsInUse);
+ }
+ else if (isOnnx(node)) {
+ extractOnnxTargets(node, bindTargets, arguments, onnxModels, onnxModelsInUse);
}
else if (isConstant(node)) {
bindTargets.add(node.toString());
@@ -231,20 +253,81 @@ public final class LazyArrayContext extends Context implements ContextIndex {
else if (node instanceof CompositeNode) {
CompositeNode cNode = (CompositeNode)node;
for (ExpressionNode child : cNode.children())
- extractBindTargets(child, functions, bindTargets, arguments);
+ extractBindTargets(child, functions, bindTargets, arguments, onnxModels, onnxModelsInUse);
+ }
+ }
+
+ /**
+ * Extract the feature used to evaluate the onnx model. e.g. onnxModel(name) and add
+ * that as a bind target and argument. During evaluation, this will be evaluated before
+ * the rest of the expression and the result is added to the context. Also extract the
+ * inputs to the model and add them as bind targets and arguments.
+ */
+ private void extractOnnxTargets(ExpressionNode node,
+ Set<String> bindTargets,
+ Set<String> arguments,
+ List<OnnxModel> onnxModels,
+ Map<String, OnnxModel> onnxModelsInUse) {
+ Optional<String> modelName = getArgument(node);
+ if (modelName.isPresent()) {
+ for (OnnxModel onnxModel : onnxModels) {
+ if (onnxModel.name().equals(modelName.get())) {
+ String onnxFeature = node.toString();
+ bindTargets.add(onnxFeature);
+ arguments.add(onnxFeature);
+
+ // Load the model (if not already loaded) to extract inputs
+ onnxModel.load();
+
+ for(String input : onnxModel.inputs().keySet()) {
+ bindTargets.add(input);
+ arguments.add(input);
+ }
+ onnxModelsInUse.put(onnxFeature, onnxModel);
+ }
+ }
}
}
+ private Optional<String> getArgument(ExpressionNode node) {
+ if (node instanceof ReferenceNode) {
+ ReferenceNode reference = (ReferenceNode) node;
+ if (reference.getArguments().size() > 0) {
+ if (reference.getArguments().expressions().get(0) instanceof ConstantNode) {
+ ConstantNode constantNode = (ConstantNode) reference.getArguments().expressions().get(0);
+ return Optional.of(stripQuotes(constantNode.sourceString()));
+ }
+ if (reference.getArguments().expressions().get(0) instanceof ReferenceNode) {
+ ReferenceNode referenceNode = (ReferenceNode) reference.getArguments().expressions().get(0);
+ return Optional.of(referenceNode.getName());
+ }
+ }
+ }
+ return Optional.empty();
+ }
+
+ public static String stripQuotes(String s) {
+ if (s.codePointAt(0) == '"' && s.codePointAt(s.length()-1) == '"')
+ return s.substring(1, s.length()-1);
+ if (s.codePointAt(0) == '\'' && s.codePointAt(s.length()-1) == '\'')
+ return s.substring(1, s.length()-1);
+ return s;
+ }
+
private boolean isFunctionReference(ExpressionNode node) {
if ( ! (node instanceof ReferenceNode)) return false;
-
ReferenceNode reference = (ReferenceNode)node;
return reference.getName().equals("rankingExpression") && reference.getArguments().size() == 1;
}
- private boolean isConstant(ExpressionNode node) {
+ private boolean isOnnx(ExpressionNode node) {
if ( ! (node instanceof ReferenceNode)) return false;
+ ReferenceNode reference = (ReferenceNode) node;
+ return reference.getName().equals("onnx") || reference.getName().equals("onnxModel");
+ }
+ private boolean isConstant(ExpressionNode node) {
+ if ( ! (node instanceof ReferenceNode)) return false;
ReferenceNode reference = (ReferenceNode)node;
return reference.getName().equals("constant") && reference.getArguments().size() == 1;
}
@@ -261,12 +344,13 @@ public final class LazyArrayContext extends Context implements ContextIndex {
Set<String> names() { return nameToIndex.keySet(); }
Set<String> arguments() { return arguments; }
Integer indexOf(String name) { return nameToIndex.get(name); }
+ Map<String, OnnxModel> onnxModels() { return onnxModels; }
IndexedBindings copy(Context context) {
Value[] valueCopy = new Value[values.length];
for (int i = 0; i < values.length; i++)
valueCopy[i] = values[i] instanceof LazyValue ? ((LazyValue) values[i]).copyFor(context) : values[i];
- return new IndexedBindings(nameToIndex, valueCopy, arguments);
+ return new IndexedBindings(nameToIndex, valueCopy, arguments, onnxModels);
}
}
diff --git a/model-evaluation/src/main/java/ai/vespa/models/evaluation/Model.java b/model-evaluation/src/main/java/ai/vespa/models/evaluation/Model.java
index 03bbb436026..40a84a701ec 100644
--- a/model-evaluation/src/main/java/ai/vespa/models/evaluation/Model.java
+++ b/model-evaluation/src/main/java/ai/vespa/models/evaluation/Model.java
@@ -26,7 +26,7 @@ import java.util.stream.Collectors;
@Beta
public class Model {
- /** The prefix generated by mode-integration/../IntermediateOperation */
+ /** The prefix generated by model-integration/../IntermediateOperation */
private final static String INTERMEDIATE_OPERATION_FUNCTION_PREFIX = "imported_ml_function_";
private final String name;
@@ -50,25 +50,37 @@ public class Model {
this(name,
functions.stream().collect(Collectors.toMap(f -> FunctionReference.fromName(f.getName()), f -> f)),
Collections.emptyMap(),
+ Collections.emptyList(),
Collections.emptyList());
}
Model(String name,
Map<FunctionReference, ExpressionFunction> functions,
Map<FunctionReference, ExpressionFunction> referencedFunctions,
- List<Constant> constants) {
+ List<Constant> constants,
+ List<OnnxModel> onnxModels) {
this.name = name;
// Build context and add missing function arguments (missing because it is legal to omit scalar type arguments)
ImmutableMap.Builder<String, LazyArrayContext> contextBuilder = new ImmutableMap.Builder<>();
for (Map.Entry<FunctionReference, ExpressionFunction> function : functions.entrySet()) {
try {
- LazyArrayContext context = new LazyArrayContext(function.getValue(), referencedFunctions, constants, this);
+ LazyArrayContext context = new LazyArrayContext(function.getValue(), referencedFunctions, constants, onnxModels, this);
contextBuilder.put(function.getValue().getName(), context);
if ( ! function.getValue().returnType().isPresent()) {
functions.put(function.getKey(), function.getValue().withReturnType(TensorType.empty));
}
+ for (Map.Entry<String, OnnxModel> entry : context.onnxModels().entrySet()) {
+ String onnxFeature = entry.getKey();
+ OnnxModel onnxModel = entry.getValue();
+ for(Map.Entry<String, TensorType> input : onnxModel.inputs().entrySet()) {
+ functions.put(function.getKey(), function.getValue().withArgument(input.getKey(), input.getValue()));
+ }
+ TensorType onnxOutputType = onnxModel.outputs().get(function.getKey().functionName());
+ functions.put(function.getKey(), function.getValue().withArgument(onnxFeature, onnxOutputType));
+ }
+
for (String argument : context.arguments()) {
if (function.getValue().getName().startsWith(INTERMEDIATE_OPERATION_FUNCTION_PREFIX)) {
// Internal (generated) functions do not have type info - add arguments
diff --git a/model-evaluation/src/main/java/ai/vespa/models/evaluation/ModelsEvaluator.java b/model-evaluation/src/main/java/ai/vespa/models/evaluation/ModelsEvaluator.java
index a0b859bf930..88766da67fc 100644
--- a/model-evaluation/src/main/java/ai/vespa/models/evaluation/ModelsEvaluator.java
+++ b/model-evaluation/src/main/java/ai/vespa/models/evaluation/ModelsEvaluator.java
@@ -7,6 +7,7 @@ import com.google.inject.Inject;
import com.yahoo.component.AbstractComponent;
import com.yahoo.filedistribution.fileacquirer.FileAcquirer;
import com.yahoo.vespa.config.search.RankProfilesConfig;
+import com.yahoo.vespa.config.search.core.OnnxModelsConfig;
import com.yahoo.vespa.config.search.core.RankingConstantsConfig;
import java.util.Map;
@@ -27,8 +28,9 @@ public class ModelsEvaluator extends AbstractComponent {
@Inject
public ModelsEvaluator(RankProfilesConfig config,
RankingConstantsConfig constantsConfig,
+ OnnxModelsConfig onnxModelsConfig,
FileAcquirer fileAcquirer) {
- this(new RankProfilesConfigImporter(fileAcquirer).importFrom(config, constantsConfig));
+ this(new RankProfilesConfigImporter(fileAcquirer).importFrom(config, constantsConfig, onnxModelsConfig));
}
public ModelsEvaluator(Map<String, Model> models) {
diff --git a/model-evaluation/src/main/java/ai/vespa/models/evaluation/OnnxModel.java b/model-evaluation/src/main/java/ai/vespa/models/evaluation/OnnxModel.java
new file mode 100644
index 00000000000..dc27c43ef70
--- /dev/null
+++ b/model-evaluation/src/main/java/ai/vespa/models/evaluation/OnnxModel.java
@@ -0,0 +1,57 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.models.evaluation;
+
+import ai.vespa.modelintegration.evaluator.OnnxEvaluator;
+import com.yahoo.tensor.Tensor;
+import com.yahoo.tensor.TensorType;
+
+import java.io.File;
+import java.util.Map;
+
+/**
+ * A named ONNX model that should be evaluated with OnnxEvaluator.
+ *
+ * @author lesters
+ */
+class OnnxModel {
+
+ private final String name;
+ private final File modelFile;
+
+ private OnnxEvaluator evaluator;
+
+ OnnxModel(String name, File modelFile) {
+ this.name = name;
+ this.modelFile = modelFile;
+ }
+
+ public String name() {
+ return name;
+ }
+
+ public void load() {
+ if (evaluator == null) {
+ evaluator = new OnnxEvaluator(modelFile.getPath());
+ }
+ }
+
+ public Map<String, TensorType> inputs() {
+ return evaluator().getInputInfo();
+ }
+
+ public Map<String, TensorType> outputs() {
+ return evaluator().getOutputInfo();
+ }
+
+ public Tensor evaluate(Map<String, Tensor> inputs, String output) {
+ return evaluator().evaluate(inputs, output);
+ }
+
+ private OnnxEvaluator evaluator() {
+ if (evaluator == null) {
+ throw new IllegalStateException("ONNX model has not been loaded.");
+ }
+ return evaluator;
+ }
+
+}
diff --git a/model-evaluation/src/main/java/ai/vespa/models/evaluation/RankProfilesConfigImporter.java b/model-evaluation/src/main/java/ai/vespa/models/evaluation/RankProfilesConfigImporter.java
index fb424439592..1bdb2810ddf 100644
--- a/model-evaluation/src/main/java/ai/vespa/models/evaluation/RankProfilesConfigImporter.java
+++ b/model-evaluation/src/main/java/ai/vespa/models/evaluation/RankProfilesConfigImporter.java
@@ -13,6 +13,7 @@ import com.yahoo.tensor.Tensor;
import com.yahoo.tensor.TensorType;
import com.yahoo.tensor.serialization.TypedBinaryFormat;
import com.yahoo.vespa.config.search.RankProfilesConfig;
+import com.yahoo.vespa.config.search.core.OnnxModelsConfig;
import com.yahoo.vespa.config.search.core.RankingConstantsConfig;
import java.io.File;
@@ -48,11 +49,13 @@ public class RankProfilesConfigImporter {
* Returns a map of the models contained in this config, indexed on name.
* The map is modifiable and owned by the caller.
*/
- public Map<String, Model> importFrom(RankProfilesConfig config, RankingConstantsConfig constantsConfig) {
+ public Map<String, Model> importFrom(RankProfilesConfig config,
+ RankingConstantsConfig constantsConfig,
+ OnnxModelsConfig onnxModelsConfig) {
try {
Map<String, Model> models = new HashMap<>();
for (RankProfilesConfig.Rankprofile profile : config.rankprofile()) {
- Model model = importProfile(profile, constantsConfig);
+ Model model = importProfile(profile, constantsConfig, onnxModelsConfig);
models.put(model.name(), model);
}
return models;
@@ -62,9 +65,12 @@ public class RankProfilesConfigImporter {
}
}
- private Model importProfile(RankProfilesConfig.Rankprofile profile, RankingConstantsConfig constantsConfig)
+ private Model importProfile(RankProfilesConfig.Rankprofile profile,
+ RankingConstantsConfig constantsConfig,
+ OnnxModelsConfig onnxModelsConfig)
throws ParseException {
+ List<OnnxModel> onnxModels = readOnnxModelsConfig(onnxModelsConfig);
List<Constant> constants = readLargeConstants(constantsConfig);
Map<FunctionReference, ExpressionFunction> functions = new LinkedHashMap<>();
@@ -76,7 +82,7 @@ public class RankProfilesConfigImporter {
Optional<FunctionReference> reference = FunctionReference.fromSerial(property.name());
Optional<Pair<FunctionReference, String>> argumentType = FunctionReference.fromTypeArgumentSerial(property.name());
Optional<FunctionReference> returnType = FunctionReference.fromReturnTypeSerial(property.name());
- if ( reference.isPresent()) {
+ if (reference.isPresent()) {
RankingExpression expression = new RankingExpression(reference.get().functionName(), property.value());
ExpressionFunction function = new ExpressionFunction(reference.get().functionName(),
Collections.emptyList(),
@@ -122,7 +128,7 @@ public class RankProfilesConfigImporter {
constants.addAll(smallConstantsInfo.asConstants());
try {
- return new Model(profile.name(), functions, referencedFunctions, constants);
+ return new Model(profile.name(), functions, referencedFunctions, constants, onnxModels);
}
catch (RuntimeException e) {
throw new IllegalArgumentException("Could not load model '" + profile.name() + "'", e);
@@ -136,6 +142,26 @@ public class RankProfilesConfigImporter {
return null;
}
+ private List<OnnxModel> readOnnxModelsConfig(OnnxModelsConfig onnxModelsConfig) {
+ List<OnnxModel> onnxModels = new ArrayList<>();
+ if (onnxModelsConfig != null) {
+ for (OnnxModelsConfig.Model onnxModelConfig : onnxModelsConfig.model()) {
+ onnxModels.add(readOnnxModelConfig(onnxModelConfig));
+ }
+ }
+ return onnxModels;
+ }
+
+ private OnnxModel readOnnxModelConfig(OnnxModelsConfig.Model onnxModelConfig) {
+ try {
+ String name = onnxModelConfig.name();
+ File file = fileAcquirer.waitFor(onnxModelConfig.fileref(), 7, TimeUnit.DAYS);
+ return new OnnxModel(name, file);
+ } catch (InterruptedException e) {
+ throw new IllegalStateException("Gave up waiting for ONNX model " + onnxModelConfig.name());
+ }
+ }
+
private List<Constant> readLargeConstants(RankingConstantsConfig constantsConfig) {
List<Constant> constants = new ArrayList<>();
diff --git a/model-evaluation/src/test/java/ai/vespa/models/evaluation/ModelTester.java b/model-evaluation/src/test/java/ai/vespa/models/evaluation/ModelTester.java
index bacdb52a201..d252594e729 100644
--- a/model-evaluation/src/test/java/ai/vespa/models/evaluation/ModelTester.java
+++ b/model-evaluation/src/test/java/ai/vespa/models/evaluation/ModelTester.java
@@ -14,6 +14,7 @@ import com.yahoo.tensor.Tensor;
import com.yahoo.tensor.TensorType;
import com.yahoo.tensor.serialization.TypedBinaryFormat;
import com.yahoo.vespa.config.search.RankProfilesConfig;
+import com.yahoo.vespa.config.search.core.OnnxModelsConfig;
import com.yahoo.vespa.config.search.core.RankingConstantsConfig;
import java.io.IOException;
@@ -45,8 +46,10 @@ public class ModelTester {
RankProfilesConfig.class).getConfig("");
RankingConstantsConfig constantsConfig = new ConfigGetter<>(new FileSource(configDir.append("ranking-constants.cfg").toFile()),
RankingConstantsConfig.class).getConfig("");
+ OnnxModelsConfig onnxModelsConfig = new ConfigGetter<>(new FileSource(configDir.append("onnx-models.cfg").toFile()),
+ OnnxModelsConfig.class).getConfig("");
return new RankProfilesConfigImporterWithMockedConstants(Path.fromString(path).append("constants"), MockFileAcquirer.returnFile(null))
- .importFrom(config, constantsConfig);
+ .importFrom(config, constantsConfig, onnxModelsConfig);
}
public ExpressionFunction assertFunction(String name, String expression, Model model) {
diff --git a/model-evaluation/src/test/java/ai/vespa/models/evaluation/ModelsEvaluatorTest.java b/model-evaluation/src/test/java/ai/vespa/models/evaluation/ModelsEvaluatorTest.java
index 6fcf76d2815..dce033c79b0 100644
--- a/model-evaluation/src/test/java/ai/vespa/models/evaluation/ModelsEvaluatorTest.java
+++ b/model-evaluation/src/test/java/ai/vespa/models/evaluation/ModelsEvaluatorTest.java
@@ -10,6 +10,7 @@ import com.yahoo.searchlib.rankingexpression.RankingExpression;
import com.yahoo.tensor.Tensor;
import com.yahoo.tensor.TensorType;
import com.yahoo.vespa.config.search.RankProfilesConfig;
+import com.yahoo.vespa.config.search.core.OnnxModelsConfig;
import com.yahoo.vespa.config.search.core.RankingConstantsConfig;
import com.yahoo.yolean.Exceptions;
import org.junit.Test;
@@ -131,7 +132,9 @@ public class ModelsEvaluatorTest {
RankProfilesConfig.class).getConfig("");
RankingConstantsConfig constantsConfig = new ConfigGetter<>(new FileSource(configDir.append("ranking-constants.cfg").toFile()),
RankingConstantsConfig.class).getConfig("");
- return new ModelsEvaluator(config, constantsConfig, MockFileAcquirer.returnFile(null));
+ OnnxModelsConfig onnxModelsConfig = new ConfigGetter<>(new FileSource(configDir.append("onnx-models.cfg").toFile()),
+ OnnxModelsConfig.class).getConfig("");
+ return new ModelsEvaluator(config, constantsConfig, onnxModelsConfig, MockFileAcquirer.returnFile(null));
}
}
diff --git a/model-evaluation/src/test/java/ai/vespa/models/evaluation/OnnxEvaluatorTest.java b/model-evaluation/src/test/java/ai/vespa/models/evaluation/OnnxEvaluatorTest.java
new file mode 100644
index 00000000000..1d55fdf9e6a
--- /dev/null
+++ b/model-evaluation/src/test/java/ai/vespa/models/evaluation/OnnxEvaluatorTest.java
@@ -0,0 +1,69 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.models.evaluation;
+
+import com.yahoo.config.subscription.ConfigGetter;
+import com.yahoo.config.subscription.FileSource;
+import com.yahoo.filedistribution.fileacquirer.FileAcquirer;
+import com.yahoo.filedistribution.fileacquirer.MockFileAcquirer;
+import com.yahoo.path.Path;
+import com.yahoo.tensor.Tensor;
+import com.yahoo.vespa.config.search.RankProfilesConfig;
+import com.yahoo.vespa.config.search.core.OnnxModelsConfig;
+import com.yahoo.vespa.config.search.core.RankingConstantsConfig;
+import org.junit.Test;
+
+import java.io.File;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * @author lesters
+ */
+public class OnnxEvaluatorTest {
+
+ private static final double delta = 0.00000000001;
+
+ @Test
+ public void testOnnxEvaluation() {
+ ModelsEvaluator models = createModels("src/test/resources/config/onnx/");
+
+ assertTrue(models.models().containsKey("add_mul"));
+ assertTrue(models.models().containsKey("one_layer"));
+
+ FunctionEvaluator function = models.evaluatorOf("add_mul", "output1");
+ function.bind("input1", Tensor.from("tensor<float>(d0[1]):[2]"));
+ function.bind("input2", Tensor.from("tensor<float>(d0[1]):[3]"));
+ assertEquals(6.0, function.evaluate().sum().asDouble(), delta);
+
+ function = models.evaluatorOf("add_mul", "output2");
+ function.bind("input1", Tensor.from("tensor<float>(d0[1]):[2]"));
+ function.bind("input2", Tensor.from("tensor<float>(d0[1]):[3]"));
+ assertEquals(5.0, function.evaluate().sum().asDouble(), delta);
+
+ function = models.evaluatorOf("one_layer");
+ function.bind("input", Tensor.from("tensor<float>(d0[2],d1[3]):[[0.1, 0.2, 0.3],[0.4,0.5,0.6]]"));
+ assertEquals(function.evaluate(), Tensor.from("tensor<float>(d0[2],d1[1]):[0.63931,0.67574]"));
+ }
+
+ private ModelsEvaluator createModels(String path) {
+ Path configDir = Path.fromString(path);
+ RankProfilesConfig config = new ConfigGetter<>(new FileSource(configDir.append("rank-profiles.cfg").toFile()),
+ RankProfilesConfig.class).getConfig("");
+ RankingConstantsConfig constantsConfig = new ConfigGetter<>(new FileSource(configDir.append("ranking-constants.cfg").toFile()),
+ RankingConstantsConfig.class).getConfig("");
+ OnnxModelsConfig onnxModelsConfig = new ConfigGetter<>(new FileSource(configDir.append("onnx-models.cfg").toFile()),
+ OnnxModelsConfig.class).getConfig("");
+
+ Map<String, File> fileMap = new HashMap<>();
+ for (OnnxModelsConfig.Model onnxModel : onnxModelsConfig.model()) {
+ fileMap.put(onnxModel.fileref().value(), new File(path + onnxModel.fileref().value()));
+ }
+ FileAcquirer fileAcquirer = MockFileAcquirer.returnFiles(fileMap);
+
+ return new ModelsEvaluator(config, constantsConfig, onnxModelsConfig, fileAcquirer);
+ }
+
+}
diff --git a/model-evaluation/src/test/java/ai/vespa/models/handler/HandlerTester.java b/model-evaluation/src/test/java/ai/vespa/models/handler/HandlerTester.java
new file mode 100644
index 00000000000..0da7f2ed096
--- /dev/null
+++ b/model-evaluation/src/test/java/ai/vespa/models/handler/HandlerTester.java
@@ -0,0 +1,76 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.models.handler;
+
+import ai.vespa.models.evaluation.ModelsEvaluator;
+import com.yahoo.container.jdisc.HttpRequest;
+import com.yahoo.container.jdisc.HttpResponse;
+import com.yahoo.tensor.Tensor;
+import com.yahoo.tensor.serialization.JsonFormat;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import java.util.Collections;
+import java.util.Map;
+import java.util.concurrent.Executors;
+
+import static org.junit.Assert.assertEquals;
+
+class HandlerTester {
+
+ private final ModelsEvaluationHandler handler;
+
+ HandlerTester(ModelsEvaluator models) {
+ this.handler = new ModelsEvaluationHandler(models, Executors.newSingleThreadExecutor());
+ }
+
+ void assertResponse(String url, int expectedCode) {
+ assertResponse(url, Collections.emptyMap(), expectedCode, (String)null);
+ }
+
+ void assertResponse(String url, int expectedCode, String expectedResult) {
+ assertResponse(url, Collections.emptyMap(), expectedCode, expectedResult);
+ }
+
+ void assertResponse(String url, Map<String, String> properties, int expectedCode, String expectedResult) {
+ HttpRequest getRequest = HttpRequest.createTestRequest(url, com.yahoo.jdisc.http.HttpRequest.Method.GET, null, properties);
+ HttpRequest postRequest = HttpRequest.createTestRequest(url, com.yahoo.jdisc.http.HttpRequest.Method.POST, null, properties);
+ assertResponse(getRequest, expectedCode, expectedResult);
+ assertResponse(postRequest, expectedCode, expectedResult);
+ }
+
+ void assertResponse(String url, Map<String, String> properties, int expectedCode, Tensor expectedResult) {
+ HttpRequest getRequest = HttpRequest.createTestRequest(url, com.yahoo.jdisc.http.HttpRequest.Method.GET, null, properties);
+ assertResponse(getRequest, expectedCode, expectedResult);
+ }
+
+ void assertResponse(HttpRequest request, int expectedCode, String expectedResult) {
+ HttpResponse response = handler.handle(request);
+ assertEquals("application/json", response.getContentType());
+ assertEquals(expectedCode, response.getStatus());
+ if (expectedResult != null) {
+ assertEquals(expectedResult, getContents(response));
+ }
+ }
+
+ void assertResponse(HttpRequest request, int expectedCode, Tensor expectedResult) {
+ HttpResponse response = handler.handle(request);
+ assertEquals("application/json", response.getContentType());
+ assertEquals(expectedCode, response.getStatus());
+ if (expectedResult != null) {
+ String contents = getContents(response);
+ Tensor result = JsonFormat.decode(expectedResult.type(), contents.getBytes(StandardCharsets.UTF_8));
+ assertEquals(expectedResult, result);
+ }
+ }
+
+ private String getContents(HttpResponse response) {
+ try (ByteArrayOutputStream stream = new ByteArrayOutputStream()) {
+ response.render(stream);
+ return stream.toString();
+ } catch (IOException e) {
+ throw new Error(e);
+ }
+ }
+
+}
diff --git a/model-evaluation/src/test/java/ai/vespa/models/handler/ModelsEvaluationHandlerTest.java b/model-evaluation/src/test/java/ai/vespa/models/handler/ModelsEvaluationHandlerTest.java
index c9e49d3be02..a69a220e532 100644
--- a/model-evaluation/src/test/java/ai/vespa/models/handler/ModelsEvaluationHandlerTest.java
+++ b/model-evaluation/src/test/java/ai/vespa/models/handler/ModelsEvaluationHandlerTest.java
@@ -5,51 +5,41 @@ import ai.vespa.models.evaluation.ModelTester;
import ai.vespa.models.evaluation.ModelsEvaluator;
import com.yahoo.config.subscription.ConfigGetter;
import com.yahoo.config.subscription.FileSource;
-import com.yahoo.container.jdisc.HttpRequest;
-import com.yahoo.container.jdisc.HttpResponse;
import com.yahoo.filedistribution.fileacquirer.MockFileAcquirer;
import com.yahoo.path.Path;
import com.yahoo.tensor.Tensor;
import com.yahoo.tensor.TensorType;
import com.yahoo.vespa.config.search.RankProfilesConfig;
+import com.yahoo.vespa.config.search.core.OnnxModelsConfig;
import com.yahoo.vespa.config.search.core.RankingConstantsConfig;
import org.junit.BeforeClass;
import org.junit.Test;
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
-import java.util.concurrent.Executor;
-import java.util.concurrent.Executors;
-
-import static org.junit.Assert.assertEquals;
public class ModelsEvaluationHandlerTest {
- private static ModelsEvaluationHandler handler;
+ private static HandlerTester handler;
@BeforeClass
static public void setUp() {
- Executor executor = Executors.newSingleThreadExecutor();
- ModelsEvaluator models = createModels("src/test/resources/config/models/");
- handler = new ModelsEvaluationHandler(models, executor);
+ handler = new HandlerTester(createModels("src/test/resources/config/models/"));
}
@Test
public void testUnknownAPI() {
- assertResponse("http://localhost/wrong-api-binding", 404);
+ handler.assertResponse("http://localhost/wrong-api-binding", 404);
}
@Test
public void testUnknownVersion() {
- assertResponse("http://localhost/model-evaluation/v0", 404);
+ handler.assertResponse("http://localhost/model-evaluation/v0", 404);
}
@Test
public void testNonExistingModel() {
- assertResponse("http://localhost/model-evaluation/v1/non-existing-model", 404);
+ handler.assertResponse("http://localhost/model-evaluation/v1/non-existing-model", 404);
}
@Test
@@ -57,14 +47,14 @@ public class ModelsEvaluationHandlerTest {
String url = "http://localhost/model-evaluation/v1";
String expected =
"{\"mnist_softmax\":\"http://localhost/model-evaluation/v1/mnist_softmax\",\"mnist_saved\":\"http://localhost/model-evaluation/v1/mnist_saved\",\"mnist_softmax_saved\":\"http://localhost/model-evaluation/v1/mnist_softmax_saved\",\"xgboost_2_2\":\"http://localhost/model-evaluation/v1/xgboost_2_2\",\"lightgbm_regression\":\"http://localhost/model-evaluation/v1/lightgbm_regression\"}";
- assertResponse(url, 200, expected);
+ handler.assertResponse(url, 200, expected);
}
@Test
public void testXgBoostEvaluationWithoutBindings() {
String url = "http://localhost/model-evaluation/v1/xgboost_2_2/eval"; // only has a single function
String expected = "{\"cells\":[{\"address\":{},\"value\":-4.376589999999999}]}";
- assertResponse(url, 200, expected);
+ handler.assertResponse(url, 200, expected);
}
@Test
@@ -77,7 +67,7 @@ public class ModelsEvaluationHandlerTest {
properties.put("non-existing-binding", "-1");
String url = "http://localhost/model-evaluation/v1/xgboost_2_2/eval";
String expected = "{\"cells\":[{\"address\":{},\"value\":-7.936679999999999}]}";
- assertResponse(url, properties, 200, expected);
+ handler.assertResponse(url, properties, 200, expected);
}
@Test
@@ -90,14 +80,14 @@ public class ModelsEvaluationHandlerTest {
properties.put("non-existing-binding", "-1");
String url = "http://localhost/model-evaluation/v1/xgboost_2_2/eval";
String expected = "{\"cells\":[{\"address\":{},\"value\":-7.936679999999999}]}";
- assertResponse(url, properties, 200, expected);
+ handler.assertResponse(url, properties, 200, expected);
}
@Test
public void testLightGBMEvaluationWithoutBindings() {
String url = "http://localhost/model-evaluation/v1/lightgbm_regression/eval";
String expected = "{\"cells\":[{\"address\":{},\"value\":1.9130086820218188}]}";
- assertResponse(url, 200, expected);
+ handler.assertResponse(url, 200, expected);
}
@Test
@@ -110,7 +100,7 @@ public class ModelsEvaluationHandlerTest {
properties.put("non-existing-binding", "-1");
String url = "http://localhost/model-evaluation/v1/lightgbm_regression/eval";
String expected = "{\"cells\":[{\"address\":{},\"value\":2.054697758469921}]}";
- assertResponse(url, properties, 200, expected);
+ handler.assertResponse(url, properties, 200, expected);
}
@Test
@@ -123,35 +113,35 @@ public class ModelsEvaluationHandlerTest {
properties.put("non-existing-binding", "-1");
String url = "http://localhost/model-evaluation/v1/lightgbm_regression/eval";
String expected = "{\"cells\":[{\"address\":{},\"value\":2.0745534018208094}]}";
- assertResponse(url, properties, 200, expected);
+ handler.assertResponse(url, properties, 200, expected);
}
@Test
public void testMnistSoftmaxDetails() {
String url = "http://localhost:8080/model-evaluation/v1/mnist_softmax";
String expected = "{\"model\":\"mnist_softmax\",\"functions\":[{\"function\":\"default.add\",\"info\":\"http://localhost:8080/model-evaluation/v1/mnist_softmax/default.add\",\"eval\":\"http://localhost:8080/model-evaluation/v1/mnist_softmax/default.add/eval\",\"arguments\":[{\"name\":\"Placeholder\",\"type\":\"tensor(d0[],d1[784])\"}]}]}";
- assertResponse(url, 200, expected);
+ handler.assertResponse(url, 200, expected);
}
@Test
public void testMnistSoftmaxTypeDetails() {
String url = "http://localhost/model-evaluation/v1/mnist_softmax/default.add/";
String expected = "{\"model\":\"mnist_softmax\",\"function\":\"default.add\",\"info\":\"http://localhost/model-evaluation/v1/mnist_softmax/default.add\",\"eval\":\"http://localhost/model-evaluation/v1/mnist_softmax/default.add/eval\",\"arguments\":[{\"name\":\"Placeholder\",\"type\":\"tensor(d0[],d1[784])\"}]}";
- assertResponse(url, 200, expected);
+ handler.assertResponse(url, 200, expected);
}
@Test
public void testMnistSoftmaxEvaluateDefaultFunctionWithoutBindings() {
String url = "http://localhost/model-evaluation/v1/mnist_softmax/eval";
String expected = "{\"error\":\"Argument 'Placeholder' must be bound to a value of type tensor(d0[],d1[784])\"}";
- assertResponse(url, 400, expected);
+ handler.assertResponse(url, 400, expected);
}
@Test
public void testMnistSoftmaxEvaluateSpecificFunctionWithoutBindings() {
String url = "http://localhost/model-evaluation/v1/mnist_softmax/default.add/eval";
String expected = "{\"error\":\"Argument 'Placeholder' must be bound to a value of type tensor(d0[],d1[784])\"}";
- assertResponse(url, 400, expected);
+ handler.assertResponse(url, 400, expected);
}
@Test
@@ -160,7 +150,7 @@ public class ModelsEvaluationHandlerTest {
properties.put("Placeholder", inputTensor());
String url = "http://localhost/model-evaluation/v1/mnist_softmax/eval";
String expected = "{\"cells\":[{\"address\":{\"d0\":\"0\",\"d1\":\"0\"},\"value\":-0.3546536862850189},{\"address\":{\"d0\":\"0\",\"d1\":\"1\"},\"value\":0.3759574592113495},{\"address\":{\"d0\":\"0\",\"d1\":\"2\"},\"value\":0.06054411828517914},{\"address\":{\"d0\":\"0\",\"d1\":\"3\"},\"value\":-0.251544713973999},{\"address\":{\"d0\":\"0\",\"d1\":\"4\"},\"value\":0.017951013520359993},{\"address\":{\"d0\":\"0\",\"d1\":\"5\"},\"value\":1.2899067401885986},{\"address\":{\"d0\":\"0\",\"d1\":\"6\"},\"value\":-0.10389615595340729},{\"address\":{\"d0\":\"0\",\"d1\":\"7\"},\"value\":0.6367976665496826},{\"address\":{\"d0\":\"0\",\"d1\":\"8\"},\"value\":-1.4136744737625122},{\"address\":{\"d0\":\"0\",\"d1\":\"9\"},\"value\":-0.2573896050453186}]}";
- assertResponse(url, properties, 200, expected);
+ handler.assertResponse(url, properties, 200, expected);
}
@Test
@@ -169,28 +159,28 @@ public class ModelsEvaluationHandlerTest {
properties.put("Placeholder", inputTensor());
String url = "http://localhost/model-evaluation/v1/mnist_softmax/default.add/eval";
String expected = "{\"cells\":[{\"address\":{\"d0\":\"0\",\"d1\":\"0\"},\"value\":-0.3546536862850189},{\"address\":{\"d0\":\"0\",\"d1\":\"1\"},\"value\":0.3759574592113495},{\"address\":{\"d0\":\"0\",\"d1\":\"2\"},\"value\":0.06054411828517914},{\"address\":{\"d0\":\"0\",\"d1\":\"3\"},\"value\":-0.251544713973999},{\"address\":{\"d0\":\"0\",\"d1\":\"4\"},\"value\":0.017951013520359993},{\"address\":{\"d0\":\"0\",\"d1\":\"5\"},\"value\":1.2899067401885986},{\"address\":{\"d0\":\"0\",\"d1\":\"6\"},\"value\":-0.10389615595340729},{\"address\":{\"d0\":\"0\",\"d1\":\"7\"},\"value\":0.6367976665496826},{\"address\":{\"d0\":\"0\",\"d1\":\"8\"},\"value\":-1.4136744737625122},{\"address\":{\"d0\":\"0\",\"d1\":\"9\"},\"value\":-0.2573896050453186}]}";
- assertResponse(url, properties, 200, expected);
+ handler.assertResponse(url, properties, 200, expected);
}
@Test
public void testMnistSavedDetails() {
String url = "http://localhost:8080/model-evaluation/v1/mnist_saved";
String expected = "{\"model\":\"mnist_saved\",\"functions\":[{\"function\":\"serving_default.y\",\"info\":\"http://localhost:8080/model-evaluation/v1/mnist_saved/serving_default.y\",\"eval\":\"http://localhost:8080/model-evaluation/v1/mnist_saved/serving_default.y/eval\",\"arguments\":[{\"name\":\"input\",\"type\":\"tensor(d0[],d1[784])\"}]}]}";
- assertResponse(url, 200, expected);
+ handler.assertResponse(url, 200, expected);
}
@Test
public void testMnistSavedTypeDetails() {
String url = "http://localhost/model-evaluation/v1/mnist_saved/serving_default.y/";
String expected = "{\"model\":\"mnist_saved\",\"function\":\"serving_default.y\",\"info\":\"http://localhost/model-evaluation/v1/mnist_saved/serving_default.y\",\"eval\":\"http://localhost/model-evaluation/v1/mnist_saved/serving_default.y/eval\",\"arguments\":[{\"name\":\"input\",\"type\":\"tensor(d0[],d1[784])\"}]}";
- assertResponse(url, 200, expected);
+ handler.assertResponse(url, 200, expected);
}
@Test
public void testMnistSavedEvaluateDefaultFunctionShouldFail() {
String url = "http://localhost/model-evaluation/v1/mnist_saved/eval";
String expected = "{\"error\":\"More than one function is available in model 'mnist_saved', but no name is given. Available functions: imported_ml_function_mnist_saved_dnn_hidden1_add, serving_default.y\"}";
- assertResponse(url, 404, expected);
+ handler.assertResponse(url, 404, expected);
}
@Test
@@ -199,40 +189,7 @@ public class ModelsEvaluationHandlerTest {
properties.put("input", inputTensor());
String url = "http://localhost/model-evaluation/v1/mnist_saved/serving_default.y/eval";
String expected = "{\"cells\":[{\"address\":{\"d0\":\"0\",\"d1\":\"0\"},\"value\":-0.6319251673007533},{\"address\":{\"d0\":\"0\",\"d1\":\"1\"},\"value\":-7.577770600619843E-4},{\"address\":{\"d0\":\"0\",\"d1\":\"2\"},\"value\":-0.010707969042025622},{\"address\":{\"d0\":\"0\",\"d1\":\"3\"},\"value\":-0.6344759233540788},{\"address\":{\"d0\":\"0\",\"d1\":\"4\"},\"value\":-0.17529455385847528},{\"address\":{\"d0\":\"0\",\"d1\":\"5\"},\"value\":0.7490809723192187},{\"address\":{\"d0\":\"0\",\"d1\":\"6\"},\"value\":-0.022790284182901716},{\"address\":{\"d0\":\"0\",\"d1\":\"7\"},\"value\":0.26799240657608936},{\"address\":{\"d0\":\"0\",\"d1\":\"8\"},\"value\":-0.3152438845465862},{\"address\":{\"d0\":\"0\",\"d1\":\"9\"},\"value\":0.05949304847735276}]}";
- assertResponse(url, properties, 200, expected);
- }
-
- static private void assertResponse(String url, int expectedCode) {
- assertResponse(url, Collections.emptyMap(), expectedCode, null);
- }
-
- static private void assertResponse(String url, int expectedCode, String expectedResult) {
- assertResponse(url, Collections.emptyMap(), expectedCode, expectedResult);
- }
-
- static private void assertResponse(String url, Map<String, String> properties, int expectedCode, String expectedResult) {
- HttpRequest getRequest = HttpRequest.createTestRequest(url, com.yahoo.jdisc.http.HttpRequest.Method.GET, null, properties);
- HttpRequest postRequest = HttpRequest.createTestRequest(url, com.yahoo.jdisc.http.HttpRequest.Method.POST, null, properties);
- assertResponse(getRequest, expectedCode, expectedResult);
- assertResponse(postRequest, expectedCode, expectedResult);
- }
-
- static private void assertResponse(HttpRequest request, int expectedCode, String expectedResult) {
- HttpResponse response = handler.handle(request);
- assertEquals("application/json", response.getContentType());
- if (expectedResult != null) {
- assertEquals(expectedResult, getContents(response));
- }
- assertEquals(expectedCode, response.getStatus());
- }
-
- static private String getContents(HttpResponse response) {
- try (ByteArrayOutputStream stream = new ByteArrayOutputStream()) {
- response.render(stream);
- return stream.toString();
- } catch (IOException e) {
- throw new Error(e);
- }
+ handler.assertResponse(url, properties, 200, expected);
}
static private ModelsEvaluator createModels(String path) {
@@ -241,10 +198,12 @@ public class ModelsEvaluationHandlerTest {
RankProfilesConfig.class).getConfig("");
RankingConstantsConfig constantsConfig = new ConfigGetter<>(new FileSource(configDir.append("ranking-constants.cfg").toFile()),
RankingConstantsConfig.class).getConfig("");
+ OnnxModelsConfig onnxModelsConfig = new ConfigGetter<>(new FileSource(configDir.append("onnx-models.cfg").toFile()),
+ OnnxModelsConfig.class).getConfig("");
ModelTester.RankProfilesConfigImporterWithMockedConstants importer =
new ModelTester.RankProfilesConfigImporterWithMockedConstants(Path.fromString(path).append("constants"),
MockFileAcquirer.returnFile(null));
- return new ModelsEvaluator(importer.importFrom(config, constantsConfig));
+ return new ModelsEvaluator(importer.importFrom(config, constantsConfig, onnxModelsConfig));
}
private String inputTensor() {
diff --git a/model-evaluation/src/test/java/ai/vespa/models/handler/OnnxEvaluationHandlerTest.java b/model-evaluation/src/test/java/ai/vespa/models/handler/OnnxEvaluationHandlerTest.java
new file mode 100644
index 00000000000..6cfda4d8ce8
--- /dev/null
+++ b/model-evaluation/src/test/java/ai/vespa/models/handler/OnnxEvaluationHandlerTest.java
@@ -0,0 +1,137 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.models.handler;
+
+import ai.vespa.models.evaluation.ModelsEvaluator;
+import com.yahoo.config.subscription.ConfigGetter;
+import com.yahoo.config.subscription.FileSource;
+import com.yahoo.filedistribution.fileacquirer.FileAcquirer;
+import com.yahoo.filedistribution.fileacquirer.MockFileAcquirer;
+import com.yahoo.path.Path;
+import com.yahoo.tensor.Tensor;
+import com.yahoo.vespa.config.search.RankProfilesConfig;
+import com.yahoo.vespa.config.search.core.OnnxModelsConfig;
+import com.yahoo.vespa.config.search.core.RankingConstantsConfig;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.File;
+import java.util.HashMap;
+import java.util.Map;
+
+public class OnnxEvaluationHandlerTest {
+
+ private static HandlerTester handler;
+
+ @BeforeClass
+ static public void setUp() {
+ handler = new HandlerTester(createModels("src/test/resources/config/onnx/"));
+ }
+
+ @Test
+ public void testListModels() {
+ String url = "http://localhost/model-evaluation/v1";
+ String expected = "{\"one_layer\":\"http://localhost/model-evaluation/v1/one_layer\"," +
+ "\"add_mul\":\"http://localhost/model-evaluation/v1/add_mul\"," +
+ "\"no_model\":\"http://localhost/model-evaluation/v1/no_model\"}";
+ handler.assertResponse(url, 200, expected);
+ }
+
+ @Test
+ public void testModelInfo() {
+ String url = "http://localhost/model-evaluation/v1/add_mul";
+ String expected = "{\"model\":\"add_mul\",\"functions\":[" +
+ "{\"function\":\"output1\"," +
+ "\"info\":\"http://localhost/model-evaluation/v1/add_mul/output1\"," +
+ "\"eval\":\"http://localhost/model-evaluation/v1/add_mul/output1/eval\"," +
+ "\"arguments\":[" +
+ "{\"name\":\"input1\",\"type\":\"tensor<float>(d0[1])\"}," +
+ "{\"name\":\"onnxModel(add_mul).output1\",\"type\":\"tensor<float>(d0[1])\"}," +
+ "{\"name\":\"input2\",\"type\":\"tensor<float>(d0[1])\"}" +
+ "]}," +
+ "{\"function\":\"output2\"," +
+ "\"info\":\"http://localhost/model-evaluation/v1/add_mul/output2\"," +
+ "\"eval\":\"http://localhost/model-evaluation/v1/add_mul/output2/eval\"," +
+ "\"arguments\":[" +
+ "{\"name\":\"input1\",\"type\":\"tensor<float>(d0[1])\"}," +
+ "{\"name\":\"onnxModel(add_mul).output2\",\"type\":\"tensor<float>(d0[1])\"}," +
+ "{\"name\":\"input2\",\"type\":\"tensor<float>(d0[1])\"}" +
+ "]}]}";
+ handler.assertResponse(url, 200, expected);
+ }
+
+ @Test
+ public void testEvaluationWithoutSpecifyingOutput() {
+ String url = "http://localhost/model-evaluation/v1/add_mul/eval";
+ String expected = "{\"error\":\"More than one function is available in model 'add_mul', but no name is given. Available functions: output1, output2\"}";
+ handler.assertResponse(url, 404, expected);
+ }
+
+ @Test
+ public void testEvaluationWithoutBindings() {
+ String url = "http://localhost/model-evaluation/v1/add_mul/output1/eval";
+ String expected = "{\"error\":\"Argument 'input2' must be bound to a value of type tensor<float>(d0[1])\"}";
+ handler.assertResponse(url, 400, expected);
+ }
+
+ @Test
+ public void testEvaluationOutput1() {
+ Map<String, String> properties = new HashMap<>();
+ properties.put("input1", "tensor<float>(d0[1]):[2]");
+ properties.put("input2", "tensor<float>(d0[1]):[3]");
+ String url = "http://localhost/model-evaluation/v1/add_mul/output1/eval";
+ String expected = "{\"cells\":[{\"address\":{\"d0\":\"0\"},\"value\":6.0}]}"; // output1 is a mul
+ handler.assertResponse(url, properties, 200, expected);
+ }
+
+ @Test
+ public void testEvaluationOutput2() {
+ Map<String, String> properties = new HashMap<>();
+ properties.put("input1", "tensor<float>(d0[1]):[2]");
+ properties.put("input2", "tensor<float>(d0[1]):[3]");
+ String url = "http://localhost/model-evaluation/v1/add_mul/output2/eval";
+ String expected = "{\"cells\":[{\"address\":{\"d0\":\"0\"},\"value\":5.0}]}"; // output2 is an add
+ handler.assertResponse(url, properties, 200, expected);
+ }
+
+ @Test
+ public void testBatchDimensionModelInfo() {
+ String url = "http://localhost/model-evaluation/v1/one_layer";
+ String expected = "{\"model\":\"one_layer\",\"functions\":[" +
+ "{\"function\":\"output\"," +
+ "\"info\":\"http://localhost/model-evaluation/v1/one_layer/output\"," +
+ "\"eval\":\"http://localhost/model-evaluation/v1/one_layer/output/eval\"," +
+ "\"arguments\":[" +
+ "{\"name\":\"input\",\"type\":\"tensor<float>(d0[],d1[3])\"}," +
+ "{\"name\":\"onnxModel(one_layer)\",\"type\":\"tensor<float>(d0[],d1[1])\"}" +
+ "]}]}";
+ handler.assertResponse(url, 200, expected);
+ }
+
+ @Test
+ public void testBatchDimensionEvaluation() {
+ Map<String, String> properties = new HashMap<>();
+ properties.put("input", "tensor<float>(d0[],d1[3]):{{d0:0,d1:0}:0.1,{d0:0,d1:1}:0.2,{d0:0,d1:2}:0.3,{d0:1,d1:0}:0.4,{d0:1,d1:1}:0.5,{d0:1,d1:2}:0.6}");
+ String url = "http://localhost/model-evaluation/v1/one_layer/eval"; // output not specified
+ Tensor expected = Tensor.from("tensor<float>(d0[2],d1[1]):[0.6393113,0.67574286]");
+ handler.assertResponse(url, properties, 200, expected);
+ }
+
+ static private ModelsEvaluator createModels(String path) {
+ Path configDir = Path.fromString(path);
+ RankProfilesConfig config = new ConfigGetter<>(new FileSource(configDir.append("rank-profiles.cfg").toFile()),
+ RankProfilesConfig.class).getConfig("");
+ RankingConstantsConfig constantsConfig = new ConfigGetter<>(new FileSource(configDir.append("ranking-constants.cfg").toFile()),
+ RankingConstantsConfig.class).getConfig("");
+ OnnxModelsConfig onnxModelsConfig = new ConfigGetter<>(new FileSource(configDir.append("onnx-models.cfg").toFile()),
+ OnnxModelsConfig.class).getConfig("");
+
+ Map<String, File> fileMap = new HashMap<>();
+ for (OnnxModelsConfig.Model onnxModel : onnxModelsConfig.model()) {
+ fileMap.put(onnxModel.fileref().value(), new File(path + onnxModel.fileref().value()));
+ }
+ FileAcquirer fileAcquirer = MockFileAcquirer.returnFiles(fileMap);
+
+ return new ModelsEvaluator(config, constantsConfig, onnxModelsConfig, fileAcquirer);
+ }
+
+}
diff --git a/model-evaluation/src/test/resources/config/models/onnx-models.cfg b/model-evaluation/src/test/resources/config/models/onnx-models.cfg
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/model-evaluation/src/test/resources/config/models/onnx-models.cfg
diff --git a/model-evaluation/src/test/resources/config/onnx/models/add_mul.onnx b/model-evaluation/src/test/resources/config/onnx/models/add_mul.onnx
new file mode 100644
index 00000000000..ab054d112e9
--- /dev/null
+++ b/model-evaluation/src/test/resources/config/onnx/models/add_mul.onnx
@@ -0,0 +1,24 @@
+
+add_mul.py:£
+
+input1
+input2output1"Mul
+
+input1
+input2output2"Addadd_mulZ
+input1
+
+
+Z
+input2
+
+
+b
+output1
+
+
+b
+output2
+
+
+B \ No newline at end of file
diff --git a/model-evaluation/src/test/resources/config/onnx/models/add_mul.py b/model-evaluation/src/test/resources/config/onnx/models/add_mul.py
new file mode 100755
index 00000000000..3a4522042e8
--- /dev/null
+++ b/model-evaluation/src/test/resources/config/onnx/models/add_mul.py
@@ -0,0 +1,30 @@
+# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+import onnx
+from onnx import helper, TensorProto
+
+INPUT_1 = helper.make_tensor_value_info('input1', TensorProto.FLOAT, [1])
+INPUT_2 = helper.make_tensor_value_info('input2', TensorProto.FLOAT, [1])
+OUTPUT_1 = helper.make_tensor_value_info('output1', TensorProto.FLOAT, [1])
+OUTPUT_2 = helper.make_tensor_value_info('output2', TensorProto.FLOAT, [1])
+
+nodes = [
+ helper.make_node(
+ 'Mul',
+ ['input1', 'input2'],
+ ['output1'],
+ ),
+ helper.make_node(
+ 'Add',
+ ['input1', 'input2'],
+ ['output2'],
+ ),
+]
+graph_def = helper.make_graph(
+ nodes,
+ 'add_mul',
+ [INPUT_1, INPUT_2],
+ [OUTPUT_1, OUTPUT_2],
+)
+model_def = helper.make_model(graph_def, producer_name='add_mul.py', opset_imports=[onnx.OperatorSetIdProto(version=12)])
+onnx.save(model_def, 'add_mul.onnx')
diff --git a/model-evaluation/src/test/resources/config/onnx/models/one_layer.onnx b/model-evaluation/src/test/resources/config/onnx/models/one_layer.onnx
new file mode 100644
index 00000000000..dc9f664b943
--- /dev/null
+++ b/model-evaluation/src/test/resources/config/onnx/models/one_layer.onnx
Binary files differ
diff --git a/model-evaluation/src/test/resources/config/onnx/models/pytorch_one_layer.py b/model-evaluation/src/test/resources/config/onnx/models/pytorch_one_layer.py
new file mode 100755
index 00000000000..1296d84e180
--- /dev/null
+++ b/model-evaluation/src/test/resources/config/onnx/models/pytorch_one_layer.py
@@ -0,0 +1,38 @@
+# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+import torch
+import torch.onnx
+
+
+class MyModel(torch.nn.Module):
+ def __init__(self):
+ super(MyModel, self).__init__()
+ self.linear = torch.nn.Linear(in_features=3, out_features=1)
+ self.logistic = torch.nn.Sigmoid()
+
+ def forward(self, vec):
+ return self.logistic(self.linear(vec))
+
+
+def main():
+ model = MyModel()
+
+ # Omit training - just export randomly initialized network
+
+ data = torch.FloatTensor([[0.1, 0.2, 0.3],[0.4, 0.5, 0.6]])
+ torch.onnx.export(model,
+ data,
+ "one_layer.onnx",
+ input_names = ["input"],
+ output_names = ["output"],
+ dynamic_axes = {
+ "input": {0: "batch"},
+ "output": {0: "batch"},
+ },
+ opset_version=12)
+
+
+if __name__ == "__main__":
+ main()
+
+
diff --git a/model-evaluation/src/test/resources/config/onnx/onnx-models.cfg b/model-evaluation/src/test/resources/config/onnx/onnx-models.cfg
new file mode 100644
index 00000000000..9ad9c7f6a07
--- /dev/null
+++ b/model-evaluation/src/test/resources/config/onnx/onnx-models.cfg
@@ -0,0 +1,16 @@
+model[0].name "add_mul"
+model[0].fileref "models/add_mul.onnx"
+model[0].input[0].name "input1"
+model[0].input[0].source "input1"
+model[0].input[1].name "input2"
+model[0].input[1].source "input2"
+model[0].output[0].name "output1"
+model[0].output[0].as "output1"
+model[0].output[1].name "output2"
+model[0].output[1].as "output2"
+model[1].name "one_layer"
+model[1].fileref "models/one_layer.onnx"
+model[1].input[0].name "input"
+model[1].input[0].source "input"
+model[1].output[0].name "output"
+model[1].output[0].as "output"
diff --git a/model-evaluation/src/test/resources/config/onnx/rank-profiles.cfg b/model-evaluation/src/test/resources/config/onnx/rank-profiles.cfg
new file mode 100644
index 00000000000..047b7c3c77b
--- /dev/null
+++ b/model-evaluation/src/test/resources/config/onnx/rank-profiles.cfg
@@ -0,0 +1,17 @@
+rankprofile[0].name "add_mul"
+rankprofile[0].fef.property[0].name "rankingExpression(output1).rankingScript"
+rankprofile[0].fef.property[0].value "onnxModel(add_mul).output1"
+rankprofile[0].fef.property[1].name "rankingExpression(output1).type"
+rankprofile[0].fef.property[1].value "tensor<float>(d0[1])"
+rankprofile[0].fef.property[2].name "rankingExpression(output2).rankingScript"
+rankprofile[0].fef.property[2].value "onnxModel(add_mul).output2"
+rankprofile[0].fef.property[3].name "rankingExpression(output2).type"
+rankprofile[0].fef.property[3].value "tensor<float>(d0[1])"
+rankprofile[1].name "one_layer"
+rankprofile[1].fef.property[0].name "rankingExpression(output).rankingScript"
+rankprofile[1].fef.property[0].value "onnxModel(one_layer)"
+rankprofile[1].fef.property[1].name "rankingExpression(output).type"
+rankprofile[1].fef.property[1].value "tensor<float>(d0[],d1[1])"
+rankprofile[2].name "no_model"
+rankprofile[2].fef.property[0].name "rankingExpression(output).rankingScript"
+rankprofile[2].fef.property[0].value "onnxModel(no_model)"
diff --git a/model-evaluation/src/test/resources/config/onnx/ranking-constants.cfg b/model-evaluation/src/test/resources/config/onnx/ranking-constants.cfg
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/model-evaluation/src/test/resources/config/onnx/ranking-constants.cfg
diff --git a/model-evaluation/src/test/resources/config/rankexpression/onnx-models.cfg b/model-evaluation/src/test/resources/config/rankexpression/onnx-models.cfg
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/model-evaluation/src/test/resources/config/rankexpression/onnx-models.cfg
diff --git a/model-evaluation/src/test/resources/config/smallconstant/onnx-models.cfg b/model-evaluation/src/test/resources/config/smallconstant/onnx-models.cfg
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/model-evaluation/src/test/resources/config/smallconstant/onnx-models.cfg
diff --git a/model-integration/pom.xml b/model-integration/pom.xml
index 536d3578f8c..dc3154c5c41 100644
--- a/model-integration/pom.xml
+++ b/model-integration/pom.xml
@@ -53,6 +53,10 @@
</dependency>
<dependency>
+ <groupId>com.microsoft.onnxruntime</groupId>
+ <artifactId>onnxruntime</artifactId>
+ </dependency>
+ <dependency>
<groupId>com.google.protobuf</groupId>
<artifactId>protobuf-java</artifactId>
</dependency>
diff --git a/model-integration/src/main/java/ai/vespa/modelintegration/evaluator/OnnxEvaluator.java b/model-integration/src/main/java/ai/vespa/modelintegration/evaluator/OnnxEvaluator.java
new file mode 100644
index 00000000000..59ad20b7714
--- /dev/null
+++ b/model-integration/src/main/java/ai/vespa/modelintegration/evaluator/OnnxEvaluator.java
@@ -0,0 +1,79 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package ai.vespa.modelintegration.evaluator;
+
+import ai.onnxruntime.OnnxTensor;
+import ai.onnxruntime.OnnxValue;
+import ai.onnxruntime.OrtEnvironment;
+import ai.onnxruntime.OrtException;
+import ai.onnxruntime.OrtSession;
+import com.yahoo.tensor.Tensor;
+import com.yahoo.tensor.TensorType;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+
+/**
+ * Evaluates an ONNX Model by deferring to ONNX Runtime.
+ *
+ * @author lesters
+ */
+public class OnnxEvaluator {
+
+ private final OrtEnvironment environment;
+ private final OrtSession session;
+
+ public OnnxEvaluator(String modelPath) {
+ try {
+ environment = OrtEnvironment.getEnvironment();
+ session = environment.createSession(modelPath, new OrtSession.SessionOptions());
+ } catch (OrtException e) {
+ throw new RuntimeException("ONNX Runtime exception", e);
+ }
+ }
+
+ public Tensor evaluate(Map<String, Tensor> inputs, String output) {
+ try {
+ Map<String, OnnxTensor> onnxInputs = TensorConverter.toOnnxTensors(inputs, environment, session);
+ try (OrtSession.Result result = session.run(onnxInputs, Collections.singleton(output))) {
+ return TensorConverter.toVespaTensor(result.get(0));
+ }
+ } catch (OrtException e) {
+ throw new RuntimeException("ONNX Runtime exception", e);
+ }
+ }
+
+ public Map<String, Tensor> evaluate(Map<String, Tensor> inputs) {
+ try {
+ Map<String, OnnxTensor> onnxInputs = TensorConverter.toOnnxTensors(inputs, environment, session);
+ Map<String, Tensor> outputs = new HashMap<>();
+ try (OrtSession.Result result = session.run(onnxInputs)) {
+ for (Map.Entry<String, OnnxValue> output : result) {
+ outputs.put(output.getKey(), TensorConverter.toVespaTensor(output.getValue()));
+ }
+ return outputs;
+ }
+ } catch (OrtException e) {
+ throw new RuntimeException("ONNX Runtime exception", e);
+ }
+ }
+
+ public Map<String, TensorType> getInputInfo() {
+ try {
+ return TensorConverter.toVespaTypes(session.getInputInfo());
+ } catch (OrtException e) {
+ throw new RuntimeException("ONNX Runtime exception", e);
+ }
+ }
+
+ public Map<String, TensorType> getOutputInfo() {
+ try {
+ return TensorConverter.toVespaTypes(session.getOutputInfo());
+ } catch (OrtException e) {
+ throw new RuntimeException("ONNX Runtime exception", e);
+ }
+ }
+
+}
diff --git a/model-integration/src/main/java/ai/vespa/modelintegration/evaluator/TensorConverter.java b/model-integration/src/main/java/ai/vespa/modelintegration/evaluator/TensorConverter.java
new file mode 100644
index 00000000000..c1f973300d6
--- /dev/null
+++ b/model-integration/src/main/java/ai/vespa/modelintegration/evaluator/TensorConverter.java
@@ -0,0 +1,181 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package ai.vespa.modelintegration.evaluator;
+
+import ai.onnxruntime.NodeInfo;
+import ai.onnxruntime.OnnxJavaType;
+import ai.onnxruntime.OnnxTensor;
+import ai.onnxruntime.OnnxValue;
+import ai.onnxruntime.OrtEnvironment;
+import ai.onnxruntime.OrtException;
+import ai.onnxruntime.OrtSession;
+import ai.onnxruntime.TensorInfo;
+import ai.onnxruntime.ValueInfo;
+import com.yahoo.tensor.DimensionSizes;
+import com.yahoo.tensor.IndexedTensor;
+import com.yahoo.tensor.Tensor;
+import com.yahoo.tensor.TensorType;
+
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+import java.nio.DoubleBuffer;
+import java.nio.FloatBuffer;
+import java.nio.IntBuffer;
+import java.nio.LongBuffer;
+import java.nio.ShortBuffer;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.stream.Collectors;
+
+
+/**
+ * @author lesters
+ */
+class TensorConverter {
+
+ static Map<String, OnnxTensor> toOnnxTensors(Map<String, Tensor> tensorMap, OrtEnvironment env, OrtSession session)
+ throws OrtException
+ {
+ Map<String, OnnxTensor> result = new HashMap<>();
+ for (String name : tensorMap.keySet()) {
+ Tensor vespaTensor = tensorMap.get(name);
+ TensorInfo onnxTensorInfo = toTensorInfo(session.getInputInfo().get(name).getInfo());
+ OnnxTensor onnxTensor = toOnnxTensor(vespaTensor, onnxTensorInfo, env);
+ result.put(name, onnxTensor);
+ }
+ return result;
+ }
+
+ static OnnxTensor toOnnxTensor(Tensor vespaTensor, TensorInfo onnxTensorInfo, OrtEnvironment environment)
+ throws OrtException
+ {
+ if ( ! (vespaTensor instanceof IndexedTensor)) {
+ throw new IllegalArgumentException("OnnxEvaluator currently only supports tensors with indexed dimensions");
+ }
+ IndexedTensor tensor = (IndexedTensor) vespaTensor;
+
+ ByteBuffer buffer = ByteBuffer.allocateDirect((int)tensor.size() * onnxTensorInfo.type.size).order(ByteOrder.nativeOrder());
+ if (onnxTensorInfo.type == OnnxJavaType.FLOAT) {
+ for (int i = 0; i < tensor.size(); i++)
+ buffer.putFloat(tensor.getFloat(i));
+ return OnnxTensor.createTensor(environment, buffer.rewind().asFloatBuffer(), tensor.shape());
+ }
+ if (onnxTensorInfo.type == OnnxJavaType.DOUBLE) {
+ for (int i = 0; i < tensor.size(); i++)
+ buffer.putDouble(tensor.get(i));
+ return OnnxTensor.createTensor(environment, buffer.rewind().asDoubleBuffer(), tensor.shape());
+ }
+ if (onnxTensorInfo.type == OnnxJavaType.INT8) {
+ for (int i = 0; i < tensor.size(); i++)
+ buffer.put((byte) tensor.get(i));
+ return OnnxTensor.createTensor(environment, buffer.rewind(), tensor.shape());
+ }
+ if (onnxTensorInfo.type == OnnxJavaType.INT16) {
+ for (int i = 0; i < tensor.size(); i++)
+ buffer.putShort((short) tensor.get(i));
+ return OnnxTensor.createTensor(environment, buffer.rewind().asShortBuffer(), tensor.shape());
+ }
+ if (onnxTensorInfo.type == OnnxJavaType.INT32) {
+ for (int i = 0; i < tensor.size(); i++)
+ buffer.putInt((int) tensor.get(i));
+ return OnnxTensor.createTensor(environment, buffer.rewind().asIntBuffer(), tensor.shape());
+ }
+ if (onnxTensorInfo.type == OnnxJavaType.INT64) {
+ for (int i = 0; i < tensor.size(); i++)
+ buffer.putLong((long) tensor.get(i));
+ return OnnxTensor.createTensor(environment, buffer.rewind().asLongBuffer(), tensor.shape());
+ }
+ throw new IllegalArgumentException("OnnxEvaluator does not currently support value type " + onnxTensorInfo.type);
+ }
+
+ static Tensor toVespaTensor(OnnxValue onnxValue) {
+ if ( ! (onnxValue instanceof OnnxTensor)) {
+ throw new IllegalArgumentException("ONNX value is not a tensor: maps and sequences are not yet supported");
+ }
+ OnnxTensor onnxTensor = (OnnxTensor) onnxValue;
+ TensorInfo tensorInfo = onnxTensor.getInfo();
+
+ TensorType type = toVespaType(onnxTensor.getInfo());
+ DimensionSizes sizes = sizesFromType(type);
+
+ IndexedTensor.BoundBuilder builder = (IndexedTensor.BoundBuilder) Tensor.Builder.of(type, sizes);
+ if (tensorInfo.type == OnnxJavaType.FLOAT) {
+ FloatBuffer buffer = onnxTensor.getFloatBuffer();
+ for (long i = 0; i < sizes.totalSize(); i++)
+ builder.cellByDirectIndex(i, buffer.get());
+ }
+ else if (tensorInfo.type == OnnxJavaType.DOUBLE) {
+ DoubleBuffer buffer = onnxTensor.getDoubleBuffer();
+ for (long i = 0; i < sizes.totalSize(); i++)
+ builder.cellByDirectIndex(i, buffer.get());
+ }
+ else if (tensorInfo.type == OnnxJavaType.INT8) {
+ ByteBuffer buffer = onnxTensor.getByteBuffer();
+ for (long i = 0; i < sizes.totalSize(); i++)
+ builder.cellByDirectIndex(i, buffer.get());
+ }
+ else if (tensorInfo.type == OnnxJavaType.INT16) {
+ ShortBuffer buffer = onnxTensor.getShortBuffer();
+ for (long i = 0; i < sizes.totalSize(); i++)
+ builder.cellByDirectIndex(i, buffer.get());
+ }
+ else if (tensorInfo.type == OnnxJavaType.INT32) {
+ IntBuffer buffer = onnxTensor.getIntBuffer();
+ for (long i = 0; i < sizes.totalSize(); i++)
+ builder.cellByDirectIndex(i, buffer.get());
+ }
+ else if (tensorInfo.type == OnnxJavaType.INT64) {
+ LongBuffer buffer = onnxTensor.getLongBuffer();
+ for (long i = 0; i < sizes.totalSize(); i++)
+ builder.cellByDirectIndex(i, buffer.get());
+ }
+ else {
+ throw new IllegalArgumentException("OnnxEvaluator does not currently support value type " + onnxTensor.getInfo().type);
+ }
+ return builder.build();
+ }
+
+ static private DimensionSizes sizesFromType(TensorType type) {
+ DimensionSizes.Builder builder = new DimensionSizes.Builder(type.dimensions().size());
+ for (int i = 0; i < type.dimensions().size(); i++)
+ builder.set(i, type.dimensions().get(i).size().get());
+ return builder.build();
+ }
+
+ static Map<String, TensorType> toVespaTypes(Map<String, NodeInfo> infoMap) {
+ return infoMap.entrySet().stream().collect(Collectors.toMap(e -> e.getKey(), e -> toVespaType(e.getValue().getInfo())));
+ }
+
+ static TensorType toVespaType(ValueInfo valueInfo) {
+ TensorInfo tensorInfo = toTensorInfo(valueInfo);
+ TensorType.Builder builder = new TensorType.Builder(toVespaValueType(tensorInfo.onnxType));
+ long[] shape = tensorInfo.getShape();
+ for (int i = 0; i < shape.length; ++i) {
+ long dimSize = shape[i];
+ String dimName = "d" + i; // standard naming convention
+ if (dimSize > 0)
+ builder.indexed(dimName, dimSize);
+ else
+ builder.indexed(dimName); // unbound dimension for dim size -1
+ }
+ return builder.build();
+ }
+
+ static private TensorType.Value toVespaValueType(TensorInfo.OnnxTensorType onnxType) {
+ switch (onnxType) {
+ case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT8: return TensorType.Value.INT8;
+ case ONNX_TENSOR_ELEMENT_DATA_TYPE_BFLOAT16: return TensorType.Value.BFLOAT16;
+ case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT: return TensorType.Value.FLOAT;
+ case ONNX_TENSOR_ELEMENT_DATA_TYPE_DOUBLE: return TensorType.Value.DOUBLE;
+ }
+ return TensorType.Value.DOUBLE;
+ }
+
+ static private TensorInfo toTensorInfo(ValueInfo valueInfo) {
+ if ( ! (valueInfo instanceof TensorInfo)) {
+ throw new IllegalArgumentException("ONNX value is not a tensor: maps and sequences are not yet supported");
+ }
+ return (TensorInfo) valueInfo;
+ }
+
+}
diff --git a/model-integration/src/main/java/ai/vespa/modelintegration/evaluator/package-info.java b/model-integration/src/main/java/ai/vespa/modelintegration/evaluator/package-info.java
new file mode 100644
index 00000000000..e44ea96c534
--- /dev/null
+++ b/model-integration/src/main/java/ai/vespa/modelintegration/evaluator/package-info.java
@@ -0,0 +1,5 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+@ExportPackage
+package ai.vespa.modelintegration.evaluator;
+
+import com.yahoo.osgi.annotation.ExportPackage; \ No newline at end of file
diff --git a/model-integration/src/test/java/ai/vespa/modelintegration/evaluator/OnnxEvaluatorTest.java b/model-integration/src/test/java/ai/vespa/modelintegration/evaluator/OnnxEvaluatorTest.java
new file mode 100644
index 00000000000..4b42e18d75e
--- /dev/null
+++ b/model-integration/src/test/java/ai/vespa/modelintegration/evaluator/OnnxEvaluatorTest.java
@@ -0,0 +1,93 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package ai.vespa.modelintegration.evaluator;
+
+import com.yahoo.tensor.Tensor;
+import com.yahoo.tensor.TensorType;
+import org.junit.Test;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * @author lesters
+ */
+public class OnnxEvaluatorTest {
+
+ @Test
+ public void testSimpleMoodel() {
+ OnnxEvaluator evaluator = new OnnxEvaluator("src/test/models/onnx/simple/simple.onnx");
+
+ // Input types
+ Map<String, TensorType> inputTypes = evaluator.getInputInfo();
+ assertEquals(inputTypes.get("query_tensor"), TensorType.fromSpec("tensor<float>(d0[1],d1[4])"));
+ assertEquals(inputTypes.get("attribute_tensor"), TensorType.fromSpec("tensor<float>(d0[4],d1[1])"));
+ assertEquals(inputTypes.get("bias_tensor"), TensorType.fromSpec("tensor<float>(d0[1],d1[1])"));
+
+ // Output types
+ Map<String, TensorType> outputTypes = evaluator.getOutputInfo();
+ assertEquals(outputTypes.get("output"), TensorType.fromSpec("tensor<float>(d0[1],d1[1])"));
+
+ // Evaluation
+ Map<String, Tensor> inputs = new HashMap<>();
+ inputs.put("query_tensor", Tensor.from("tensor(d0[1],d1[4]):[0.1, 0.2, 0.3, 0.4]"));
+ inputs.put("attribute_tensor", Tensor.from("tensor(d0[4],d1[1]):[0.1, 0.2, 0.3, 0.4]"));
+ inputs.put("bias_tensor", Tensor.from("tensor(d0[1],d1[1]):[1.0]"));
+
+ assertEquals(evaluator.evaluate(inputs).get("output"), Tensor.from("tensor(d0[1],d1[1]):[1.3]"));
+ assertEquals(evaluator.evaluate(inputs, "output"), Tensor.from("tensor(d0[1],d1[1]):[1.3]"));
+ }
+
+ @Test
+ public void testBatchDimension() {
+ OnnxEvaluator evaluator = new OnnxEvaluator("src/test/models/onnx/pytorch/one_layer.onnx");
+
+ // Input types
+ Map<String, TensorType> inputTypes = evaluator.getInputInfo();
+ assertEquals(inputTypes.get("input"), TensorType.fromSpec("tensor<float>(d0[],d1[3])"));
+
+ // Output types
+ Map<String, TensorType> outputTypes = evaluator.getOutputInfo();
+ assertEquals(outputTypes.get("output"), TensorType.fromSpec("tensor<float>(d0[],d1[1])"));
+
+ // Evaluation
+ Map<String, Tensor> inputs = new HashMap<>();
+ inputs.put("input", Tensor.from("tensor<float>(d0[2],d1[3]):[[0.1, 0.2, 0.3],[0.4,0.5,0.6]]"));
+ assertEquals(evaluator.evaluate(inputs, "output"), Tensor.from("tensor<float>(d0[2],d1[1]):[0.6393113,0.67574286]"));
+ }
+
+ @Test
+ public void testMatMul() {
+ String expected = "tensor<float>(d0[2],d1[4]):[38,44,50,56,83,98,113,128]";
+ String input1 = "tensor<float>(d0[2],d1[3]):[1,2,3,4,5,6]";
+ String input2 = "tensor<float>(d0[3],d1[4]):[1,2,3,4,5,6,7,8,9,10,11,12]";
+ assertEvaluate("simple/matmul.onnx", expected, input1, input2);
+ }
+
+ @Test
+ public void testTypes() {
+ assertEvaluate("add_double.onnx", "tensor(d0[1]):[3]", "tensor(d0[1]):[1]", "tensor(d0[1]):[2]");
+ assertEvaluate("add_float.onnx", "tensor<float>(d0[1]):[3]", "tensor<float>(d0[1]):[1]", "tensor<float>(d0[1]):[2]");
+ assertEvaluate("add_int64.onnx", "tensor<double>(d0[1]):[3]", "tensor<double>(d0[1]):[1]", "tensor<double>(d0[1]):[2]");
+ assertEvaluate("cast_int8_float.onnx", "tensor<float>(d0[1]):[-128]", "tensor<int8>(d0[1]):[128]");
+ assertEvaluate("cast_float_int8.onnx", "tensor<int8>(d0[1]):[-1]", "tensor<float>(d0[1]):[255]");
+
+ // ONNX Runtime 1.7.0 does not support much of bfloat16 yet
+ // assertEvaluate("cast_bfloat16_float.onnx", "tensor<float>(d0[1]):[1]", "tensor<bfloat16>(d0[1]):[1]");
+ }
+
+ private void assertEvaluate(String model, String output, String... input) {
+ OnnxEvaluator evaluator = new OnnxEvaluator("src/test/models/onnx/" + model);
+ Map<String, Tensor> inputs = new HashMap<>();
+ for (int i = 0; i < input.length; ++i) {
+ inputs.put("input" + (i+1), Tensor.from(input[i]));
+ }
+ Tensor expected = Tensor.from(output);
+ Tensor result = evaluator.evaluate(inputs, "output");
+ assertEquals(expected, result);
+ assertEquals(expected.type().valueType(), result.type().valueType());
+ }
+
+}
diff --git a/model-integration/src/test/models/onnx/add_double.onnx b/model-integration/src/test/models/onnx/add_double.onnx
new file mode 100644
index 00000000000..9264d1eb9f9
--- /dev/null
+++ b/model-integration/src/test/models/onnx/add_double.onnx
@@ -0,0 +1,16 @@
+ add_double.py:f
+
+input1
+input2output"AddaddZ
+input1
+
+ 
+Z
+input2
+
+ 
+b
+output
+
+ 
+B \ No newline at end of file
diff --git a/model-integration/src/test/models/onnx/add_double.py b/model-integration/src/test/models/onnx/add_double.py
new file mode 100755
index 00000000000..fa9aa48f4b2
--- /dev/null
+++ b/model-integration/src/test/models/onnx/add_double.py
@@ -0,0 +1,27 @@
+# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+import onnx
+from onnx import helper, TensorProto
+
+INPUT_1 = helper.make_tensor_value_info('input1', TensorProto.DOUBLE, [1])
+INPUT_2 = helper.make_tensor_value_info('input2', TensorProto.DOUBLE, [1])
+OUTPUT = helper.make_tensor_value_info('output', TensorProto.DOUBLE, [1])
+
+nodes = [
+ helper.make_node(
+ 'Add',
+ ['input1', 'input2'],
+ ['output'],
+ ),
+]
+graph_def = helper.make_graph(
+ nodes,
+ 'add',
+ [
+ INPUT_1,
+ INPUT_2
+ ],
+ [OUTPUT],
+)
+model_def = helper.make_model(graph_def, producer_name='add_double.py', opset_imports=[onnx.OperatorSetIdProto(version=12)])
+onnx.save(model_def, 'add_double.onnx')
diff --git a/model-integration/src/test/models/onnx/add_float.onnx b/model-integration/src/test/models/onnx/add_float.onnx
new file mode 100644
index 00000000000..0e3ad8f900c
--- /dev/null
+++ b/model-integration/src/test/models/onnx/add_float.onnx
@@ -0,0 +1,16 @@
+ add_float.py:f
+
+input1
+input2output"AddaddZ
+input1
+
+
+Z
+input2
+
+
+b
+output
+
+
+B \ No newline at end of file
diff --git a/model-integration/src/test/models/onnx/add_float.py b/model-integration/src/test/models/onnx/add_float.py
new file mode 100755
index 00000000000..e18b2c46d9d
--- /dev/null
+++ b/model-integration/src/test/models/onnx/add_float.py
@@ -0,0 +1,27 @@
+# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+import onnx
+from onnx import helper, TensorProto
+
+INPUT_1 = helper.make_tensor_value_info('input1', TensorProto.FLOAT, [1])
+INPUT_2 = helper.make_tensor_value_info('input2', TensorProto.FLOAT, [1])
+OUTPUT = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1])
+
+nodes = [
+ helper.make_node(
+ 'Add',
+ ['input1', 'input2'],
+ ['output'],
+ ),
+]
+graph_def = helper.make_graph(
+ nodes,
+ 'add',
+ [
+ INPUT_1,
+ INPUT_2
+ ],
+ [OUTPUT],
+)
+model_def = helper.make_model(graph_def, producer_name='add_float.py', opset_imports=[onnx.OperatorSetIdProto(version=12)])
+onnx.save(model_def, 'add_float.onnx')
diff --git a/model-integration/src/test/models/onnx/add_int64.onnx b/model-integration/src/test/models/onnx/add_int64.onnx
new file mode 100644
index 00000000000..7b3a9ec6b95
--- /dev/null
+++ b/model-integration/src/test/models/onnx/add_int64.onnx
@@ -0,0 +1,16 @@
+ add_int64.py:f
+
+input1
+input2output"AddaddZ
+input1
+
+
+Z
+input2
+
+
+b
+output
+
+
+B \ No newline at end of file
diff --git a/model-integration/src/test/models/onnx/add_int64.py b/model-integration/src/test/models/onnx/add_int64.py
new file mode 100755
index 00000000000..87908e292a2
--- /dev/null
+++ b/model-integration/src/test/models/onnx/add_int64.py
@@ -0,0 +1,27 @@
+# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+import onnx
+from onnx import helper, TensorProto
+
+INPUT_1 = helper.make_tensor_value_info('input1', TensorProto.INT64, [1])
+INPUT_2 = helper.make_tensor_value_info('input2', TensorProto.INT64, [1])
+OUTPUT = helper.make_tensor_value_info('output', TensorProto.INT64, [1])
+
+nodes = [
+ helper.make_node(
+ 'Add',
+ ['input1', 'input2'],
+ ['output'],
+ ),
+]
+graph_def = helper.make_graph(
+ nodes,
+ 'add',
+ [
+ INPUT_1,
+ INPUT_2
+ ],
+ [OUTPUT],
+)
+model_def = helper.make_model(graph_def, producer_name='add_int64.py', opset_imports=[onnx.OperatorSetIdProto(version=12)])
+onnx.save(model_def, 'add_int64.onnx')
diff --git a/model-integration/src/test/models/onnx/cast_bfloat16_float.onnx b/model-integration/src/test/models/onnx/cast_bfloat16_float.onnx
new file mode 100644
index 00000000000..cb19592abf4
--- /dev/null
+++ b/model-integration/src/test/models/onnx/cast_bfloat16_float.onnx
@@ -0,0 +1,12 @@
+cast_bfloat16_float.py:U
+!
+input1output"Cast*
+to castZ
+input1
+
+
+b
+output
+
+
+B \ No newline at end of file
diff --git a/model-integration/src/test/models/onnx/cast_bfloat16_float.py b/model-integration/src/test/models/onnx/cast_bfloat16_float.py
new file mode 100755
index 00000000000..14b05347262
--- /dev/null
+++ b/model-integration/src/test/models/onnx/cast_bfloat16_float.py
@@ -0,0 +1,24 @@
+# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+import onnx
+from onnx import helper, TensorProto
+
+INPUT_1 = helper.make_tensor_value_info('input1', TensorProto.BFLOAT16, [1])
+OUTPUT = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1])
+
+nodes = [
+ helper.make_node(
+ 'Cast',
+ ['input1'],
+ ['output'],
+ to=TensorProto.FLOAT
+ ),
+]
+graph_def = helper.make_graph(
+ nodes,
+ 'cast',
+ [INPUT_1],
+ [OUTPUT],
+)
+model_def = helper.make_model(graph_def, producer_name='cast_bfloat16_float.py', opset_imports=[onnx.OperatorSetIdProto(version=12)])
+onnx.save(model_def, 'cast_bfloat16_float.onnx')
diff --git a/model-integration/src/test/models/onnx/cast_float_int8.onnx b/model-integration/src/test/models/onnx/cast_float_int8.onnx
new file mode 100644
index 00000000000..c30b023dd68
--- /dev/null
+++ b/model-integration/src/test/models/onnx/cast_float_int8.onnx
@@ -0,0 +1,12 @@
+cast_float_int8.py:U
+!
+input1output"Cast*
+to castZ
+input1
+
+
+b
+output
+
+
+B \ No newline at end of file
diff --git a/model-integration/src/test/models/onnx/cast_float_int8.py b/model-integration/src/test/models/onnx/cast_float_int8.py
new file mode 100755
index 00000000000..bdc0850d033
--- /dev/null
+++ b/model-integration/src/test/models/onnx/cast_float_int8.py
@@ -0,0 +1,24 @@
+# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+import onnx
+from onnx import helper, TensorProto
+
+INPUT_1 = helper.make_tensor_value_info('input1', TensorProto.FLOAT, [1])
+OUTPUT = helper.make_tensor_value_info('output', TensorProto.INT8, [1])
+
+nodes = [
+ helper.make_node(
+ 'Cast',
+ ['input1'],
+ ['output'],
+ to=TensorProto.INT8
+ ),
+]
+graph_def = helper.make_graph(
+ nodes,
+ 'cast',
+ [INPUT_1],
+ [OUTPUT],
+)
+model_def = helper.make_model(graph_def, producer_name='cast_float_int8.py', opset_imports=[onnx.OperatorSetIdProto(version=12)])
+onnx.save(model_def, 'cast_float_int8.onnx')
diff --git a/model-integration/src/test/models/onnx/cast_int8_float.onnx b/model-integration/src/test/models/onnx/cast_int8_float.onnx
new file mode 100644
index 00000000000..65aea4a36ae
--- /dev/null
+++ b/model-integration/src/test/models/onnx/cast_int8_float.onnx
@@ -0,0 +1,12 @@
+cast_int8_float.py:U
+!
+input1output"Cast*
+to castZ
+input1
+
+
+b
+output
+
+
+B \ No newline at end of file
diff --git a/model-integration/src/test/models/onnx/cast_int8_float.py b/model-integration/src/test/models/onnx/cast_int8_float.py
new file mode 100755
index 00000000000..70bf2cf70ca
--- /dev/null
+++ b/model-integration/src/test/models/onnx/cast_int8_float.py
@@ -0,0 +1,24 @@
+# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+import onnx
+from onnx import helper, TensorProto
+
+INPUT_1 = helper.make_tensor_value_info('input1', TensorProto.INT8, [1])
+OUTPUT = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1])
+
+nodes = [
+ helper.make_node(
+ 'Cast',
+ ['input1'],
+ ['output'],
+ to=TensorProto.FLOAT
+ ),
+]
+graph_def = helper.make_graph(
+ nodes,
+ 'cast',
+ [INPUT_1],
+ [OUTPUT],
+)
+model_def = helper.make_model(graph_def, producer_name='cast_int8_float.py', opset_imports=[onnx.OperatorSetIdProto(version=12)])
+onnx.save(model_def, 'cast_int8_float.onnx')
diff --git a/model-integration/src/test/models/onnx/pytorch/one_layer.onnx b/model-integration/src/test/models/onnx/pytorch/one_layer.onnx
new file mode 100644
index 00000000000..dc9f664b943
--- /dev/null
+++ b/model-integration/src/test/models/onnx/pytorch/one_layer.onnx
Binary files differ
diff --git a/model-integration/src/test/models/onnx/pytorch/pytorch_one_layer.py b/model-integration/src/test/models/onnx/pytorch/pytorch_one_layer.py
new file mode 100755
index 00000000000..1296d84e180
--- /dev/null
+++ b/model-integration/src/test/models/onnx/pytorch/pytorch_one_layer.py
@@ -0,0 +1,38 @@
+# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+import torch
+import torch.onnx
+
+
+class MyModel(torch.nn.Module):
+ def __init__(self):
+ super(MyModel, self).__init__()
+ self.linear = torch.nn.Linear(in_features=3, out_features=1)
+ self.logistic = torch.nn.Sigmoid()
+
+ def forward(self, vec):
+ return self.logistic(self.linear(vec))
+
+
+def main():
+ model = MyModel()
+
+ # Omit training - just export randomly initialized network
+
+ data = torch.FloatTensor([[0.1, 0.2, 0.3],[0.4, 0.5, 0.6]])
+ torch.onnx.export(model,
+ data,
+ "one_layer.onnx",
+ input_names = ["input"],
+ output_names = ["output"],
+ dynamic_axes = {
+ "input": {0: "batch"},
+ "output": {0: "batch"},
+ },
+ opset_version=12)
+
+
+if __name__ == "__main__":
+ main()
+
+
diff --git a/model-integration/src/test/models/onnx/simple/matmul.onnx b/model-integration/src/test/models/onnx/simple/matmul.onnx
new file mode 100644
index 00000000000..9bb88406116
--- /dev/null
+++ b/model-integration/src/test/models/onnx/simple/matmul.onnx
@@ -0,0 +1,16 @@
+ matmul.py:x
+
+input1
+input2output"MatMulmatmulZ
+input1
+ 
+
+Z
+input2
+ 
+
+b
+output
+ 
+
+B \ No newline at end of file
diff --git a/model-integration/src/test/models/onnx/simple/matmul.py b/model-integration/src/test/models/onnx/simple/matmul.py
new file mode 100755
index 00000000000..beec55e9f5a
--- /dev/null
+++ b/model-integration/src/test/models/onnx/simple/matmul.py
@@ -0,0 +1,27 @@
+# Copyright 2020 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+import onnx
+from onnx import helper, TensorProto
+
+INPUT1 = helper.make_tensor_value_info('input1', TensorProto.FLOAT, [2, 3])
+INPUT2 = helper.make_tensor_value_info('input2', TensorProto.FLOAT, [3, 4])
+OUTPUT = helper.make_tensor_value_info('output', TensorProto.FLOAT, [2, 4])
+
+nodes = [
+ helper.make_node(
+ 'MatMul',
+ ['input1', 'input2'],
+ ['output'],
+ ),
+]
+graph_def = helper.make_graph(
+ nodes,
+ 'matmul',
+ [
+ INPUT1,
+ INPUT2,
+ ],
+ [OUTPUT],
+)
+model_def = helper.make_model(graph_def, producer_name='matmul.py', opset_imports=[onnx.OperatorSetIdProto(version=12)])
+onnx.save(model_def, 'matmul.onnx')
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainer.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainer.java
index 93717543a1c..10e0dd50761 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainer.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainer.java
@@ -165,9 +165,9 @@ public class StorageMaintainer {
}
/** Checks if container has any new coredumps, reports and archives them if so */
- public void handleCoreDumpsForContainer(NodeAgentContext context, Optional<Container> container) {
+ public void handleCoreDumpsForContainer(NodeAgentContext context, Optional<Container> container, boolean throwIfCoreBeingWritten) {
if (context.isDisabled(NodeAgentTask.CoreDumps)) return;
- coredumpHandler.converge(context, () -> getCoredumpNodeAttributes(context, container));
+ coredumpHandler.converge(context, () -> getCoredumpNodeAttributes(context, container), throwIfCoreBeingWritten);
}
private Map<String, Object> getCoredumpNodeAttributes(NodeAgentContext context, Optional<Container> container) {
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollector.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollector.java
index eb629ee6165..4c384b09fad 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollector.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollector.java
@@ -3,6 +3,7 @@ package com.yahoo.vespa.hosted.node.admin.maintenance.coredump;
import com.yahoo.vespa.hosted.dockerapi.ProcessResult;
import com.yahoo.vespa.hosted.node.admin.docker.ContainerOperations;
+import com.yahoo.vespa.hosted.node.admin.nodeadmin.ConvergenceException;
import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgentContext;
import java.nio.file.Path;
@@ -45,7 +46,7 @@ public class CoreCollector {
Matcher matcher = CORE_GENERATOR_PATH_PATTERN.matcher(result.getOutput());
if (! matcher.find()) {
- throw new RuntimeException(String.format("Failed to extract binary path from GDB, result: %s, command: %s",
+ throw new ConvergenceException(String.format("Failed to extract binary path from GDB, result: %s, command: %s",
result, Arrays.toString(wrappedCommand)));
}
return Paths.get(matcher.group("path").split(" ")[0]);
@@ -56,7 +57,7 @@ public class CoreCollector {
try {
ProcessResult result = docker.executeCommandInContainerAsRoot(context, command);
if (result.getExitStatus() != 0) {
- throw new RuntimeException("file command failed with " + result);
+ throw new ConvergenceException("file command failed with " + result);
}
Matcher execfnMatcher = EXECFN_PATH_PATTERN.matcher(result.getOutput());
@@ -82,7 +83,7 @@ public class CoreCollector {
ProcessResult result = docker.executeCommandInContainerAsRoot(context, command);
if (result.getExitStatus() != 0)
- throw new RuntimeException("Failed to read backtrace " + result + ", Command: " + Arrays.toString(command));
+ throw new ConvergenceException("Failed to read backtrace " + result + ", Command: " + Arrays.toString(command));
return List.of(result.getOutput().split("\n"));
}
@@ -92,7 +93,7 @@ public class CoreCollector {
ProcessResult result = docker.executeCommandInContainerAsRoot(context, command);
if (result.getExitStatus() != 0)
- throw new RuntimeException("Failed to read jstack " + result + ", Command: " + Arrays.toString(command));
+ throw new ConvergenceException("Failed to read jstack " + result + ", Command: " + Arrays.toString(command));
return List.of(result.getOutput().split("\n"));
}
@@ -118,6 +119,8 @@ public class CoreCollector {
data.put("backtrace", readBacktrace(context, coredumpPath, binPath, false));
data.put("backtrace_all_threads", readBacktrace(context, coredumpPath, binPath, true));
}
+ } catch (ConvergenceException e) {
+ context.log(logger, Level.WARNING, "Failed to extract backtrace: " + e.getMessage());
} catch (RuntimeException e) {
context.log(logger, Level.WARNING, "Failed to extract backtrace", e);
}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoredumpHandler.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoredumpHandler.java
index a912de18b94..09c0a4ae491 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoredumpHandler.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoredumpHandler.java
@@ -5,6 +5,7 @@ import com.fasterxml.jackson.databind.ObjectMapper;
import com.yahoo.vespa.hosted.dockerapi.metrics.Dimensions;
import com.yahoo.vespa.hosted.dockerapi.metrics.Metrics;
import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeSpec;
+import com.yahoo.vespa.hosted.node.admin.nodeadmin.ConvergenceException;
import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgentContext;
import com.yahoo.vespa.hosted.node.admin.task.util.file.FileFinder;
import com.yahoo.vespa.hosted.node.admin.task.util.file.UnixPath;
@@ -84,12 +85,23 @@ public class CoredumpHandler {
}
- public void converge(NodeAgentContext context, Supplier<Map<String, Object>> nodeAttributesSupplier) {
+ public void converge(NodeAgentContext context, Supplier<Map<String, Object>> nodeAttributesSupplier, boolean throwIfCoreBeingWritten) {
Path containerCrashPathOnHost = context.pathOnHostFromPathInNode(crashPatchInContainer);
Path containerProcessingPathOnHost = containerCrashPathOnHost.resolve(PROCESSING_DIRECTORY_NAME);
updateMetrics(context, containerCrashPathOnHost);
+ if (throwIfCoreBeingWritten) {
+ List<String> pendingCores = FileFinder.files(containerCrashPathOnHost)
+ .match(fileAttributes -> !isReadyForProcessing(fileAttributes))
+ .maxDepth(1).stream()
+ .map(FileFinder.FileAttributes::filename)
+ .collect(Collectors.toUnmodifiableList());
+ if (!pendingCores.isEmpty())
+ throw new ConvergenceException(String.format("Cannot process %s coredumps: Still being written",
+ pendingCores.size() < 5 ? pendingCores : pendingCores.size()));
+ }
+
// Check if we have already started to process a core dump or we can enqueue a new core one
getCoredumpToProcess(containerCrashPathOnHost, containerProcessingPathOnHost)
.ifPresent(path -> processAndReportSingleCoredump(context, path, nodeAttributesSupplier));
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdater.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdater.java
index 772e17291ef..58ca4ae3f41 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdater.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdater.java
@@ -3,9 +3,7 @@ package com.yahoo.vespa.hosted.node.admin.nodeadmin;
import com.yahoo.concurrent.ThreadFactoryFactory;
import com.yahoo.config.provision.HostName;
-import com.yahoo.vespa.flags.BooleanFlag;
import com.yahoo.vespa.flags.FlagSource;
-import com.yahoo.vespa.flags.Flags;
import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.Acl;
import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeRepository;
import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeSpec;
@@ -17,18 +15,14 @@ import com.yahoo.yolean.Exceptions;
import java.time.Clock;
import java.time.Duration;
-import java.time.Instant;
import java.util.ArrayList;
import java.util.EnumSet;
import java.util.List;
import java.util.Map;
-import java.util.Optional;
import java.util.Set;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
-import java.util.function.Function;
-import java.util.function.Supplier;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.stream.Collectors;
@@ -50,13 +44,11 @@ public class NodeAdminStateUpdater {
private final ScheduledExecutorService metricsScheduler =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("metricsscheduler"));
- private final CachedSupplier<Map<String, Acl>> cachedAclSupplier;
private final NodeAgentContextFactory nodeAgentContextFactory;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final NodeAdmin nodeAdmin;
private final String hostHostname;
- private final BooleanFlag cacheAclFlag;
public enum State { TRANSITIONING, RESUMED, SUSPENDED_NODE_ADMIN, SUSPENDED }
@@ -75,8 +67,6 @@ public class NodeAdminStateUpdater {
this.orchestrator = orchestrator;
this.nodeAdmin = nodeAdmin;
this.hostHostname = hostHostname.value();
- this.cachedAclSupplier = new CachedSupplier<>(clock, Duration.ofSeconds(115), () -> nodeRepository.getAcls(this.hostHostname));
- this.cacheAclFlag = Flags.CACHE_ACL.bindTo(flagSource);
}
public void start() {
@@ -172,18 +162,10 @@ public class NodeAdminStateUpdater {
void adjustNodeAgentsToRunFromNodeRepository() {
try {
- Map<String, NodeSpec> nodeSpecByHostname = nodeRepository.getNodes(hostHostname).stream()
- .collect(Collectors.toMap(NodeSpec::hostname, Function.identity()));
- Map<String, Acl> aclByHostname = cacheAclFlag.value() ?
- Optional.of(cachedAclSupplier.get())
- .filter(acls -> acls.keySet().containsAll(nodeSpecByHostname.keySet()))
- .orElseGet(cachedAclSupplier::invalidateAndGet) :
- cachedAclSupplier.invalidateAndGet();
-
- Set<NodeAgentContext> nodeAgentContexts = nodeSpecByHostname.keySet().stream()
- .map(hostname -> nodeAgentContextFactory.create(
- nodeSpecByHostname.get(hostname),
- aclByHostname.getOrDefault(hostname, Acl.EMPTY)))
+ Map<String, Acl> aclByHostname = nodeRepository.getAcls(hostHostname);
+
+ Set<NodeAgentContext> nodeAgentContexts = nodeRepository.getNodes(hostHostname).stream()
+ .map(node -> nodeAgentContextFactory.create(node, aclByHostname.getOrDefault(node.hostname(), Acl.EMPTY)))
.collect(Collectors.toSet());
nodeAdmin.refreshContainersToRun(nodeAgentContexts);
} catch (ConvergenceException e) {
@@ -200,33 +182,4 @@ public class NodeAdminStateUpdater {
.map(NodeSpec::hostname)
.collect(Collectors.toList());
}
-
- private static class CachedSupplier<T> implements Supplier<T> {
- private final Clock clock;
- private final Duration expiration;
- private final Supplier<T> supplier;
- private Instant refreshAt;
- private T cachedValue;
-
- private CachedSupplier(Clock clock, Duration expiration, Supplier<T> supplier) {
- this.clock = clock;
- this.expiration = expiration;
- this.supplier = supplier;
- this.refreshAt = Instant.MIN;
- }
-
- @Override
- public T get() {
- if (! clock.instant().isBefore(refreshAt)) {
- cachedValue = supplier.get();
- refreshAt = clock.instant().plus(expiration);
- }
- return cachedValue;
- }
-
- private T invalidateAndGet() {
- refreshAt = Instant.MIN;
- return get();
- }
- }
}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java
index 74ba19a72c5..df3f075e8d9 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java
@@ -366,7 +366,7 @@ public class NodeAgentImpl implements NodeAgent {
}
}
- storageMaintainer.handleCoreDumpsForContainer(context, Optional.of(existingContainer));
+ storageMaintainer.handleCoreDumpsForContainer(context, Optional.of(existingContainer), true);
containerOperations.removeContainer(context, existingContainer);
containerState = ABSENT;
context.log(logger, "Container successfully removed, new containerState is " + containerState);
@@ -405,7 +405,7 @@ public class NodeAgentImpl implements NodeAgent {
}
private boolean noCpuCap(ZoneApi zone) {
- return zone.getEnvironment() == Environment.dev || zone.getSystemName().isCd();
+ return zone.getEnvironment() == Environment.dev;
}
private boolean downloadImageIfNeeded(NodeAgentContext context, Optional<Container> container) {
@@ -469,7 +469,7 @@ public class NodeAgentImpl implements NodeAgent {
case active:
storageMaintainer.syncLogs(context, true);
storageMaintainer.cleanDiskIfFull(context);
- storageMaintainer.handleCoreDumpsForContainer(context, container);
+ storageMaintainer.handleCoreDumpsForContainer(context, container, false);
if (downloadImageIfNeeded(context, container)) {
context.log(logger, "Waiting for image to download " + context.node().wantedDockerImage().get().asString());
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/file/UnixPath.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/file/UnixPath.java
index cb7f1637410..f41d0e8e3bc 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/file/UnixPath.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/file/UnixPath.java
@@ -4,6 +4,7 @@ package com.yahoo.vespa.hosted.node.admin.task.util.file;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.nio.charset.StandardCharsets;
+import java.nio.file.FileAlreadyExistsException;
import java.nio.file.Files;
import java.nio.file.NoSuchFileException;
import java.nio.file.OpenOption;
@@ -203,8 +204,14 @@ public class UnixPath {
return this;
}
+ /** Create directory unless it already exists, and return this. */
public UnixPath createDirectory() {
- uncheck(() -> Files.createDirectory(path));
+ try {
+ Files.createDirectory(path);
+ } catch (FileAlreadyExistsException ignore) {
+ } catch (IOException e) {
+ throw new UncheckedIOException(e);
+ }
return this;
}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/yum/Yum.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/yum/Yum.java
index 482f324a9b5..d8a131f5ed1 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/yum/Yum.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/yum/Yum.java
@@ -2,16 +2,10 @@
package com.yahoo.vespa.hosted.node.admin.task.util.yum;
import com.yahoo.vespa.hosted.node.admin.component.TaskContext;
-import com.yahoo.vespa.hosted.node.admin.task.util.process.CommandResult;
import com.yahoo.vespa.hosted.node.admin.task.util.process.Terminal;
import java.util.List;
import java.util.Optional;
-import java.util.function.Function;
-import java.util.regex.Pattern;
-import java.util.stream.Collectors;
-import java.util.stream.IntStream;
-import java.util.stream.Stream;
import static com.yahoo.vespa.hosted.node.admin.task.util.yum.YumCommand.GenericYumCommand;
import static com.yahoo.vespa.hosted.node.admin.task.util.yum.YumCommand.InstallFixedYumCommand;
@@ -21,20 +15,6 @@ import static com.yahoo.vespa.hosted.node.admin.task.util.yum.YumCommand.Install
*/
public class Yum {
- // Note: "(?dm)" makes newline be \n (only), and enables multiline mode where ^$ match lines with find()
- public static final Pattern INSTALL_NOOP_PATTERN = Pattern.compile("(?dm)^Nothing to do\\.?$");
- public static final Pattern UPGRADE_NOOP_PATTERN = Pattern.compile("(?dm)^No packages marked for update$");
- public static final Pattern REMOVE_NOOP_PATTERN = Pattern.compile("(?dm)^No [pP]ackages marked for removal\\.?$");
-
- // WARNING: These must be in the same order as the supplier below
- private static final String RPM_QUERYFORMAT = Stream.of("NAME", "EPOCH", "VERSION", "RELEASE", "ARCH")
- .map(formatter -> "%{" + formatter + "}")
- .collect(Collectors.joining("\\n"));
- private static final Function<YumPackageName.Builder, List<Function<String, YumPackageName.Builder>>>
- PACKAGE_NAME_BUILDERS_GENERATOR = builder -> List.of(
- builder::setName, builder::setEpoch, builder::setVersion, builder::setRelease, builder::setArchitecture);
-
-
private final Terminal terminal;
public Yum(Terminal terminal) {
@@ -42,21 +22,7 @@ public class Yum {
}
public Optional<YumPackageName> queryInstalled(TaskContext context, String packageName) {
- CommandResult commandResult = terminal.newCommandLine(context)
- .add("rpm", "-q", packageName, "--queryformat", RPM_QUERYFORMAT)
- .ignoreExitCode()
- .executeSilently();
-
- if (commandResult.getExitCode() != 0) return Optional.empty();
-
- YumPackageName.Builder builder = new YumPackageName.Builder();
- List<Function<String, YumPackageName.Builder>> builders = PACKAGE_NAME_BUILDERS_GENERATOR.apply(builder);
- List<Optional<String>> lines = commandResult.mapEachLine(line -> Optional.of(line).filter(s -> !"(none)".equals(s)));
- if (lines.size() != builders.size()) throw new IllegalStateException(String.format(
- "Unexpected response from rpm, expected %d lines, got %s", builders.size(), commandResult.getOutput()));
-
- IntStream.range(0, builders.size()).forEach(i -> lines.get(i).ifPresent(builders.get(i)::apply));
- return Optional.of(builder.build());
+ return YumCommand.queryInstalled(terminal, context, packageName);
}
/** Lock and install, or if necessary downgrade, a package to a given version. */
@@ -65,7 +31,7 @@ public class Yum {
}
public GenericYumCommand install(YumPackageName... packages) {
- return new GenericYumCommand(terminal, "install", List.of(packages), INSTALL_NOOP_PATTERN);
+ return new GenericYumCommand(terminal, GenericYumCommand.CommandType.install, List.of(packages));
}
public GenericYumCommand install(String package1, String... packages) {
@@ -78,7 +44,7 @@ public class Yum {
public GenericYumCommand upgrade(YumPackageName... packages) {
- return new GenericYumCommand(terminal, "upgrade", List.of(packages), INSTALL_NOOP_PATTERN, UPGRADE_NOOP_PATTERN);
+ return new GenericYumCommand(terminal, GenericYumCommand.CommandType.upgrade, List.of(packages));
}
public GenericYumCommand upgrade(String package1, String... packages) {
@@ -91,7 +57,7 @@ public class Yum {
public GenericYumCommand remove(YumPackageName... packages) {
- return new GenericYumCommand(terminal, "remove", List.of(packages), REMOVE_NOOP_PATTERN);
+ return new GenericYumCommand(terminal, GenericYumCommand.CommandType.remove, List.of(packages));
}
public GenericYumCommand remove(String package1, String... packages) {
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/yum/YumCommand.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/yum/YumCommand.java
index f7ca453256f..2a01a5ebcb4 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/yum/YumCommand.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/yum/YumCommand.java
@@ -4,20 +4,37 @@ package com.yahoo.vespa.hosted.node.admin.task.util.yum;
import com.yahoo.component.Version;
import com.yahoo.vespa.hosted.node.admin.component.TaskContext;
import com.yahoo.vespa.hosted.node.admin.task.util.process.CommandLine;
+import com.yahoo.vespa.hosted.node.admin.task.util.process.CommandResult;
import com.yahoo.vespa.hosted.node.admin.task.util.process.Terminal;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
+import java.util.function.Function;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+import java.util.stream.Stream;
/**
* @author freva
*/
public abstract class YumCommand<T extends YumCommand<T>> {
+ // Note: "(?dm)" makes newline be \n (only), and enables multiline mode where ^$ match lines with find()
+ public static final Pattern INSTALL_NOOP_PATTERN = Pattern.compile("(?dm)^Nothing to do\\.?$");
+ public static final Pattern UPGRADE_NOOP_PATTERN = Pattern.compile("(?dm)^No packages marked for update$");
+ public static final Pattern REMOVE_NOOP_PATTERN = Pattern.compile("(?dm)^No [pP]ackages marked for removal\\.?$");
+
+ // WARNING: These must be in the same order as the supplier below
+ private static final String RPM_QUERYFORMAT = Stream.of("NAME", "EPOCH", "VERSION", "RELEASE", "ARCH")
+ .map(formatter -> "%{" + formatter + "}")
+ .collect(Collectors.joining("\\n"));
+ private static final Function<YumPackageName.Builder, List<Function<String, YumPackageName.Builder>>>
+ PACKAGE_NAME_BUILDERS_GENERATOR = builder -> List.of(
+ builder::setName, builder::setEpoch, builder::setVersion, builder::setRelease, builder::setArchitecture);
+
private List<String> enabledRepos = List.of();
private final Terminal terminal;
@@ -54,34 +71,31 @@ public abstract class YumCommand<T extends YumCommand<T>> {
private static final Pattern UNKNOWN_PACKAGE_PATTERN = Pattern.compile("(?dm)^No package ([^ ]+) available\\.$");
private final Terminal terminal;
- private final String yumCommand;
- private final List<Pattern> outputNoopPatterns;
+ private final CommandType yumCommand;
private final List<YumPackageName> packages;
private final List<String> options = new ArrayList<>();
- GenericYumCommand(Terminal terminal, String yumCommand, List<YumPackageName> packages, Pattern... outputNoopPatterns) {
+ GenericYumCommand(Terminal terminal, CommandType yumCommand, List<YumPackageName> packages) {
super(terminal);
this.terminal = terminal;
this.yumCommand = yumCommand;
this.packages = packages;
- this.outputNoopPatterns = List.of(outputNoopPatterns);
switch (yumCommand) {
- case "install": {
+ case install: {
if (packages.size() > 1) options.add("skip_missing_names_on_install=False");
break;
}
- case "upgrade": {
+ case upgrade: {
if (packages.size() > 1) options.add("skip_missing_names_on_update=False");
break;
}
- case "remove": break;
+ case remove: break;
default: throw new IllegalArgumentException("Unknown yum command: " + yumCommand);
}
- if (packages.isEmpty() && ! "upgrade".equals(yumCommand)) {
+ if (packages.isEmpty() && yumCommand != CommandType.upgrade)
throw new IllegalArgumentException("No packages specified");
- }
}
@Override
@@ -92,13 +106,14 @@ public abstract class YumCommand<T extends YumCommand<T>> {
@Override
public boolean converge(TaskContext context) {
- if (packages.isEmpty() && ! "upgrade".equals(yumCommand)) {
- throw new IllegalArgumentException("No packages specified");
- }
+ if (yumCommand == CommandType.install)
+ if (packages.stream().allMatch(pkg -> isInstalled(context, pkg))) return false;
+ if (yumCommand == CommandType.remove)
+ if (packages.stream().noneMatch(pkg -> isInstalled(context, pkg))) return false;
Version yumVersion = version(context);
CommandLine commandLine = terminal.newCommandLine(context);
- commandLine.add("yum", yumCommand);
+ commandLine.add("yum", yumCommand.name());
addParametersToCommandLine(commandLine);
commandLine.add(packages.stream().map(pkg -> pkg.toName(yumVersion)).collect(Collectors.toList()));
@@ -121,10 +136,19 @@ public abstract class YumCommand<T extends YumCommand<T>> {
throw new IllegalArgumentException("Unknown package: " + unknownPackageMatcher.group(1));
}
- return outputNoopPatterns.stream().noneMatch(pattern -> pattern.matcher(output).find());
+ return yumCommand.outputNoopPatterns.stream().noneMatch(pattern -> pattern.matcher(output).find());
}
protected GenericYumCommand getThis() { return this; }
+
+ enum CommandType {
+ install(INSTALL_NOOP_PATTERN), remove(REMOVE_NOOP_PATTERN), upgrade(INSTALL_NOOP_PATTERN, UPGRADE_NOOP_PATTERN);
+
+ private final List<Pattern> outputNoopPatterns;
+ CommandType(Pattern... outputNoopPatterns) {
+ this.outputNoopPatterns = List.of(outputNoopPatterns);
+ }
+ }
}
@@ -211,7 +235,7 @@ public abstract class YumCommand<T extends YumCommand<T>> {
String output = installCommand.executeSilently().getUntrimmedOutput();
- if (Yum.INSTALL_NOOP_PATTERN.matcher(output).find()) {
+ if (INSTALL_NOOP_PATTERN.matcher(output).find()) {
if (CHECKING_FOR_UPDATE_PATTERN.matcher(output).find()) {
// case 3.
var upgradeCommand = terminal.newCommandLine(context).add("yum", "downgrade");
@@ -233,4 +257,25 @@ public abstract class YumCommand<T extends YumCommand<T>> {
protected InstallFixedYumCommand getThis() { return this; }
}
+ protected boolean isInstalled(TaskContext context, YumPackageName yumPackage) {
+ return queryInstalled(terminal, context, yumPackage.getName()).map(yumPackage::isSubsetOf).orElse(false);
+ }
+
+ static Optional<YumPackageName> queryInstalled(Terminal terminal, TaskContext context, String packageName) {
+ CommandResult commandResult = terminal.newCommandLine(context)
+ .add("rpm", "-q", packageName, "--queryformat", RPM_QUERYFORMAT)
+ .ignoreExitCode()
+ .executeSilently();
+
+ if (commandResult.getExitCode() != 0) return Optional.empty();
+
+ YumPackageName.Builder builder = new YumPackageName.Builder();
+ List<Function<String, YumPackageName.Builder>> builders = PACKAGE_NAME_BUILDERS_GENERATOR.apply(builder);
+ List<Optional<String>> lines = commandResult.mapEachLine(line -> Optional.of(line).filter(s -> !"(none)".equals(s)));
+ if (lines.size() != builders.size()) throw new IllegalStateException(String.format(
+ "Unexpected response from rpm, expected %d lines, got %s", builders.size(), commandResult.getOutput()));
+
+ IntStream.range(0, builders.size()).forEach(i -> lines.get(i).ifPresent(builders.get(i)::apply));
+ return Optional.of(builder.build());
+ }
}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/yum/YumTester.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/yum/YumTester.java
index 589362e747f..e47d71cbdf7 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/yum/YumTester.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/yum/YumTester.java
@@ -34,15 +34,15 @@ public class YumTester extends Yum {
}
public GenericYumCommandExpectation expectInstall(String... packages) {
- return new GenericYumCommandExpectation("install", packages);
+ return new GenericYumCommandExpectation(CommandType.install, packages);
}
public GenericYumCommandExpectation expectUpdate(String... packages) {
- return new GenericYumCommandExpectation("upgrade", packages);
+ return new GenericYumCommandExpectation(CommandType.upgrade, packages);
}
public GenericYumCommandExpectation expectRemove(String... packages) {
- return new GenericYumCommandExpectation("remove", packages);
+ return new GenericYumCommandExpectation(CommandType.remove, packages);
}
public InstallFixedCommandExpectation expectInstallFixedVersion(String yumPackage) {
@@ -55,12 +55,12 @@ public class YumTester extends Yum {
public class GenericYumCommandExpectation {
- private final String command;
+ private final CommandType commandType;
protected final List<YumPackageName> packages;
private List<String> enableRepos = List.of();
- private GenericYumCommandExpectation(String command, String... packages) {
- this.command = command;
+ private GenericYumCommandExpectation(CommandType commandType, String... packages) {
+ this.commandType = commandType;
this.packages = Stream.of(packages).map(YumPackageName::fromString).collect(Collectors.toList());
}
@@ -72,11 +72,12 @@ public class YumTester extends Yum {
/** Mock the return value of the converge(TaskContext) method for this operation (true iff system was modified) */
public YumTester andReturn(boolean value) {
if (value) return execute("Success");
- switch (command) {
- case "install": return execute("Nothing to do");
- case "upgrade": return execute("No packages marked for update");
- case "remove": return execute("No Packages marked for removal");
- default: throw new IllegalArgumentException("Unknown command: " + command);
+ switch (commandType) {
+ case installFixed:
+ case install: return execute("Nothing to do");
+ case upgrade: return execute("No packages marked for update");
+ case remove: return execute("No Packages marked for removal");
+ default: throw new IllegalArgumentException("Unknown command type: " + commandType);
}
}
@@ -85,12 +86,24 @@ public class YumTester extends Yum {
}
private YumTester execute(String output) {
+ if (commandType == CommandType.install)
+ terminal.interceptCommand("rpm query", cmd -> new TestChildProcess2(1, "Not installed"));
+ if (commandType == CommandType.remove) { // Pretend the first package is installed so we can continue to yum commands
+ YumPackageName pkg = packages.get(0);
+ terminal.interceptCommand("rpm query", cmd -> new TestChildProcess2(0, String.join("\n",
+ pkg.getName(),
+ pkg.getEpoch().orElse("(none)"),
+ pkg.getVersion().orElse("1.2.3"),
+ pkg.getRelease().orElse("1"),
+ pkg.getArchitecture().orElse("(none)"))));
+ }
+
StringBuilder cmd = new StringBuilder();
- cmd.append("yum ").append(command).append(" --assumeyes");
+ cmd.append("yum ").append(commandType.command).append(" --assumeyes");
enableRepos.forEach(repo -> cmd.append(" --enablerepo=").append(repo));
- if ("install".equals(command) && packages.size() > 1)
+ if (commandType == CommandType.install && packages.size() > 1)
cmd.append(" --setopt skip_missing_names_on_install=False");
- if ("upgrade".equals(command) && packages.size() > 1)
+ if (commandType == CommandType.upgrade && packages.size() > 1)
cmd.append(" --setopt skip_missing_names_on_update=False");
packages.forEach(pkg -> {
String name = pkg.toName(yumVersion);
@@ -109,7 +122,7 @@ public class YumTester extends Yum {
public class InstallFixedCommandExpectation extends GenericYumCommandExpectation {
private InstallFixedCommandExpectation(String yumPackage) {
- super("install", yumPackage);
+ super(CommandType.installFixed, yumPackage);
}
@Override
@@ -150,4 +163,13 @@ public class YumTester extends Yum {
}
}
+ private enum CommandType {
+ install("install"), upgrade("upgrade"), remove("remove"), installFixed("install");
+
+ private final String command;
+ CommandType(String command) {
+ this.command = command;
+ }
+ }
+
}
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdaterTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdaterTest.java
index d348d2b74e9..4a678597e41 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdaterTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdaterTest.java
@@ -3,7 +3,6 @@ package com.yahoo.vespa.hosted.node.admin.nodeadmin;
import com.yahoo.config.provision.HostName;
import com.yahoo.test.ManualClock;
-import com.yahoo.vespa.flags.Flags;
import com.yahoo.vespa.flags.InMemoryFlagSource;
import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.Acl;
import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeRepository;
@@ -179,26 +178,6 @@ public class NodeAdminStateUpdaterTest {
}
@Test
- public void uses_cached_acl() {
- mockNodeRepo(NodeState.active, 1);
- mockAcl(Acl.EMPTY, 1);
-
- updater.adjustNodeAgentsToRunFromNodeRepository();
- verify(nodeRepository, times(1)).getAcls(any());
- clock.advance(Duration.ofSeconds(30));
-
- updater.adjustNodeAgentsToRunFromNodeRepository();
- clock.advance(Duration.ofSeconds(30));
- updater.adjustNodeAgentsToRunFromNodeRepository();
- clock.advance(Duration.ofSeconds(30));
- verify(nodeRepository, times(1)).getAcls(any());
-
- clock.advance(Duration.ofSeconds(30));
- updater.adjustNodeAgentsToRunFromNodeRepository();
- verify(nodeRepository, times(2)).getAcls(any());
- }
-
- @Test
public void node_spec_and_acl_aligned() {
Acl acl = new Acl.Builder().withTrustedPorts(22).build();
mockNodeRepo(NodeState.active, 3);
@@ -212,25 +191,6 @@ public class NodeAdminStateUpdaterTest {
verify(nodeAgentContextFactory, times(3)).create(argThat(spec -> spec.hostname().equals("host2.yahoo.com")), eq(acl));
verify(nodeAgentContextFactory, times(3)).create(argThat(spec -> spec.hostname().equals("host3.yahoo.com")), eq(acl));
verify(nodeRepository, times(3)).getNodes(eq(hostHostname.value()));
- verify(nodeRepository, times(1)).getAcls(eq(hostHostname.value()));
- }
-
- @Test
- public void node_spec_and_acl_aligned_with_acl_cache_disabled() {
- flagSource.withBooleanFlag(Flags.CACHE_ACL.id(), false);
-
- Acl acl = new Acl.Builder().withTrustedPorts(22).build();
- mockNodeRepo(NodeState.active, 3);
- mockAcl(acl, 1, 2, 3);
-
- updater.adjustNodeAgentsToRunFromNodeRepository();
- updater.adjustNodeAgentsToRunFromNodeRepository();
- updater.adjustNodeAgentsToRunFromNodeRepository();
-
- verify(nodeAgentContextFactory, times(3)).create(argThat(spec -> spec.hostname().equals("host1.yahoo.com")), eq(acl));
- verify(nodeAgentContextFactory, times(3)).create(argThat(spec -> spec.hostname().equals("host2.yahoo.com")), eq(acl));
- verify(nodeAgentContextFactory, times(3)).create(argThat(spec -> spec.hostname().equals("host3.yahoo.com")), eq(acl));
- verify(nodeRepository, times(3)).getNodes(eq(hostHostname.value()));
verify(nodeRepository, times(3)).getAcls(eq(hostHostname.value()));
}
@@ -249,7 +209,7 @@ public class NodeAdminStateUpdaterTest {
verify(nodeAgentContextFactory, times(3)).create(argThat(spec -> spec.hostname().equals("host2.yahoo.com")), eq(acl));
verify(nodeAgentContextFactory, times(1)).create(argThat(spec -> spec.hostname().equals("host3.yahoo.com")), eq(Acl.EMPTY));
verify(nodeRepository, times(3)).getNodes(eq(hostHostname.value()));
- verify(nodeRepository, times(2)).getAcls(eq(hostHostname.value())); // During the first tick, the cache is invalidated and retried
+ verify(nodeRepository, times(3)).getAcls(eq(hostHostname.value()));
}
@Test
@@ -265,7 +225,7 @@ public class NodeAdminStateUpdaterTest {
verify(nodeAgentContextFactory, times(3)).create(argThat(spec -> spec.hostname().equals("host1.yahoo.com")), eq(acl));
verify(nodeAgentContextFactory, times(3)).create(argThat(spec -> spec.hostname().equals("host2.yahoo.com")), eq(acl));
verify(nodeRepository, times(3)).getNodes(eq(hostHostname.value()));
- verify(nodeRepository, times(1)).getAcls(eq(hostHostname.value()));
+ verify(nodeRepository, times(3)).getAcls(eq(hostHostname.value()));
}
private void assertConvergeError(NodeAdminStateUpdater.State targetState, String reason) {
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImplTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImplTest.java
index 9475e3720c2..34c4bc15ee9 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImplTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImplTest.java
@@ -449,7 +449,7 @@ public class NodeAgentImplTest {
final InOrder inOrder = inOrder(storageMaintainer, containerOperations, nodeRepository);
inOrder.verify(containerOperations, times(1)).stopServices(eq(context));
- inOrder.verify(storageMaintainer, times(1)).handleCoreDumpsForContainer(eq(context), any());
+ inOrder.verify(storageMaintainer, times(1)).handleCoreDumpsForContainer(eq(context), any(), eq(true));
inOrder.verify(containerOperations, times(1)).removeContainer(eq(context), any());
inOrder.verify(storageMaintainer, times(1)).archiveNodeStorage(eq(context));
inOrder.verify(nodeRepository, times(1)).setNodeState(eq(hostName), eq(NodeState.ready));
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/yum/YumTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/yum/YumTest.java
index 37695ca9504..92f8f78d255 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/yum/YumTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/yum/YumTest.java
@@ -78,6 +78,7 @@ public class YumTest {
@Test
public void testAlreadyInstalled() {
+ mockRpmQuery("package-1", null);
mockYumVersion();
terminal.expectCommand(
"yum install --assumeyes --enablerepo=repo1 --enablerepo=repo2 --setopt skip_missing_names_on_install=False package-1 package-2 2>&1",
@@ -90,6 +91,7 @@ public class YumTest {
.converge(taskContext));
// RHEL 8
+ mockRpmQuery("package-1", null);
mockYumVersion(YumVersion.rhel8);
terminal.expectCommand(
"yum install --assumeyes --enablerepo=repo1 --enablerepo=repo2 --setopt skip_missing_names_on_install=False package-1 package-2 2>&1",
@@ -125,6 +127,7 @@ public class YumTest {
@Test
public void testAlreadyRemoved() {
+ mockRpmQuery("package-1", YumPackageName.fromString("package-1-1.2.3-1"));
mockYumVersion();
terminal.expectCommand(
"yum remove --assumeyes package-1 package-2 2>&1",
@@ -136,6 +139,7 @@ public class YumTest {
.converge(taskContext));
// RHEL 8
+ mockRpmQuery("package-1", YumPackageName.fromString("package-1-1.2.3-1"));
mockYumVersion(YumVersion.rhel8);
terminal.expectCommand(
"yum remove --assumeyes package-1 package-2 2>&1",
@@ -147,7 +151,15 @@ public class YumTest {
}
@Test
+ public void skipsYumRemoveNotInRpm() {
+ mockRpmQuery("package-1", null);
+ mockRpmQuery("package-2", null);
+ assertFalse(yum.remove("package-1", "package-2").converge(taskContext));
+ }
+
+ @Test
public void testInstall() {
+ mockRpmQuery("package-1", null);
mockYumVersion();
terminal.expectCommand(
"yum install --assumeyes --setopt skip_missing_names_on_install=False package-1 package-2 2>&1",
@@ -160,7 +172,15 @@ public class YumTest {
}
@Test
+ public void skipsYumInstallIfInRpm() {
+ mockRpmQuery("package-1", YumPackageName.fromString("package-1-1.2.3-1"));
+ mockRpmQuery("package-2", YumPackageName.fromString("1:package-2-1.2.3-1.el7.x86_64"));
+ assertFalse(yum.install("package-1-1.2.3-1", "package-2").converge(taskContext));
+ }
+
+ @Test
public void testInstallWithEnablerepo() {
+ mockRpmQuery("package-1", null);
mockYumVersion();
terminal.expectCommand(
"yum install --assumeyes --enablerepo=repo-name --setopt skip_missing_names_on_install=False package-1 package-2 2>&1",
@@ -273,6 +293,7 @@ public class YumTest {
@Test(expected = ChildProcessFailureException.class)
public void testFailedInstall() {
+ mockRpmQuery("package-1", null);
mockYumVersion();
terminal.expectCommand(
"yum install --assumeyes --enablerepo=repo-name --setopt skip_missing_names_on_install=False package-1 package-2 2>&1",
@@ -288,6 +309,7 @@ public class YumTest {
@Test
public void testUnknownPackages() {
+ mockRpmQuery("package-1", null);
mockYumVersion();
terminal.expectCommand(
"yum install --assumeyes --setopt skip_missing_names_on_install=False package-1 package-2 package-3 2>&1",
@@ -328,4 +350,7 @@ public class YumTest {
mockYumVersion(YumVersion.rhel7);
}
+ private void mockRpmQuery(String packageName, YumPackageName installedOrNull) {
+ new YumTester(terminal).expectQueryInstalled(packageName).andReturn(installedOrNull);
+ }
}
diff --git a/node-repository/src/main/config/node-repository.xml b/node-repository/src/main/config/node-repository.xml
index a12e2a8b11c..8a6e466fdf3 100644
--- a/node-repository/src/main/config/node-repository.xml
+++ b/node-repository/src/main/config/node-repository.xml
@@ -14,6 +14,7 @@
</handler>
<handler id="com.yahoo.vespa.hosted.provision.restapi.LoadBalancersV1ApiHandler" bundle="node-repository">
+ <binding>http://*/loadbalancers/v1</binding>
<binding>http://*/loadbalancers/v1/*</binding>
</handler>
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeList.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeList.java
index 5ec9ebfa0ad..4e9468925b6 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeList.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeList.java
@@ -193,8 +193,8 @@ public class NodeList extends AbstractFilteringList<Node, NodeList> {
/** Returns the subset of nodes which have a record of being down */
public NodeList down() { return matching(Node::isDown); }
- /** Returns the subset of nodes which have retirement requested */
- public NodeList retirementRequested() {
+ /** Returns the subset of nodes which are being retired */
+ public NodeList retiring() {
return matching(node -> node.status().wantToRetire() || node.status().preferToRetire());
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
index 4cf531b55de..d113ca68d01 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
@@ -84,7 +84,7 @@ public class NodeRepository extends AbstractComponent {
flagSource,
metricsDb,
config.useCuratorClientCache(),
- zone.environment().isProduction() && !zone.getCloud().dynamicProvisioning() ? 1 : 0,
+ zone.environment().isProduction() && !zone.getCloud().dynamicProvisioning() && !zone.system().isCd() ? 1 : 0,
config.nodeCacheSize());
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporter.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporter.java
index 8d8c7ef42f2..a2e02c15737 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporter.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporter.java
@@ -145,6 +145,9 @@ public class MetricsReporter extends NodeRepositoryMaintainer {
metric.set("hostedVespa.pendingRedeployments", pendingRedeploymentsSupplier.get(), null);
}
+ /**
+ * NB: Keep this metric set in sync with internal configserver metric pre-aggregation
+ */
private void updateNodeMetrics(Node node, ServiceModel serviceModel) {
Metric.Context context;
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java
index 46230ed38a4..e9f107dd5f7 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java
@@ -5,7 +5,6 @@ import com.google.inject.Inject;
import com.yahoo.component.AbstractComponent;
import com.yahoo.concurrent.maintenance.Maintainer;
import com.yahoo.config.provision.Deployer;
-import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.HostLivenessTracker;
import com.yahoo.config.provision.InfraDeployer;
import com.yahoo.config.provision.NodeType;
@@ -13,7 +12,6 @@ import com.yahoo.config.provision.Zone;
import com.yahoo.jdisc.Metric;
import com.yahoo.vespa.flags.FlagSource;
import com.yahoo.vespa.hosted.provision.NodeRepository;
-import com.yahoo.vespa.hosted.provision.autoscale.MetricsDb;
import com.yahoo.vespa.hosted.provision.autoscale.MetricsFetcher;
import com.yahoo.vespa.hosted.provision.provisioning.ProvisionServiceProvider;
import com.yahoo.vespa.orchestrator.Orchestrator;
@@ -146,7 +144,6 @@ public class NodeRepositoryMaintenance extends AbstractComponent {
spareCapacityMaintenanceInterval = Duration.ofMinutes(30);
switchRebalancerInterval = Duration.ofHours(1);
throttlePolicy = NodeFailer.ThrottlePolicy.hosted;
- retiredExpiry = Duration.ofDays(4); // give up migrating data after 4 days
inactiveConfigServerExpiry = Duration.ofMinutes(5);
inactiveControllerExpiry = Duration.ofMinutes(5);
@@ -154,12 +151,14 @@ public class NodeRepositoryMaintenance extends AbstractComponent {
inactiveExpiry = Duration.ofHours(4); // enough time for the application owner to discover and redeploy
retiredInterval = Duration.ofMinutes(30);
dirtyExpiry = Duration.ofHours(2); // enough time to clean the node
+ retiredExpiry = Duration.ofDays(4); // give up migrating data after 4 days
} else {
// long enough that nodes aren't reused immediately and delete can happen on all config servers
// with time enough to clean up even with ZK connection issues on config servers
inactiveExpiry = Duration.ofMinutes(1);
retiredInterval = Duration.ofMinutes(1);
dirtyExpiry = Duration.ofMinutes(30);
+ retiredExpiry = Duration.ofDays(1);
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/DelegatingOsUpgrader.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/DelegatingOsUpgrader.java
index 84454e0d06a..4f2012ded8f 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/DelegatingOsUpgrader.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/DelegatingOsUpgrader.java
@@ -37,7 +37,7 @@ public class DelegatingOsUpgrader implements OsUpgrader {
@Override
public void upgradeTo(OsVersionTarget target) {
- NodeList activeNodes = nodeRepository.nodes().list().nodeType(target.nodeType()).state(Node.State.active);
+ NodeList activeNodes = nodeRepository.nodes().list(Node.State.active).nodeType(target.nodeType());
int numberToUpgrade = Math.max(0, maxActiveUpgrades - activeNodes.changingOsVersionTo(target.version()).size());
NodeList nodesToUpgrade = activeNodes.not().changingOsVersionTo(target.version())
.osVersionIsBefore(target.version())
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/OsVersions.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/OsVersions.java
index 748edfd936b..a0b78461b34 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/OsVersions.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/OsVersions.java
@@ -40,18 +40,16 @@ public class OsVersions {
private final CuratorDatabaseClient db;
private final boolean reprovisionToUpgradeOs;
private final int maxDelegatedUpgrades;
- private final int maxRebuilds;
public OsVersions(NodeRepository nodeRepository) {
- this(nodeRepository, nodeRepository.zone().getCloud().reprovisionToUpgradeOs(), MAX_DELEGATED_UPGRADES, MAX_REBUILDS);
+ this(nodeRepository, nodeRepository.zone().getCloud().reprovisionToUpgradeOs(), MAX_DELEGATED_UPGRADES);
}
- OsVersions(NodeRepository nodeRepository, boolean reprovisionToUpgradeOs, int maxDelegatedUpgrades, int maxRebuilds) {
+ OsVersions(NodeRepository nodeRepository, boolean reprovisionToUpgradeOs, int maxDelegatedUpgrades) {
this.nodeRepository = Objects.requireNonNull(nodeRepository);
this.db = nodeRepository.database();
this.reprovisionToUpgradeOs = reprovisionToUpgradeOs;
this.maxDelegatedUpgrades = maxDelegatedUpgrades;
- this.maxRebuilds = maxRebuilds;
// Read and write all versions to make sure they are stored in the latest version of the serialized format
try (var lock = db.lockOsVersionChange()) {
@@ -146,7 +144,7 @@ public class OsVersions {
.anyMatch(osVersion -> osVersion.current().isPresent() &&
osVersion.current().get().getMajor() < target.getMajor());
if (rebuildRequired) {
- return new RebuildingOsUpgrader(nodeRepository, maxRebuilds);
+ return new RebuildingOsUpgrader(nodeRepository);
}
return new DelegatingOsUpgrader(nodeRepository, maxDelegatedUpgrades);
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/RebuildingOsUpgrader.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/RebuildingOsUpgrader.java
index 25e901ebce3..fce9eb562c9 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/RebuildingOsUpgrader.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/RebuildingOsUpgrader.java
@@ -2,50 +2,93 @@
package com.yahoo.vespa.hosted.provision.os;
import com.yahoo.component.Version;
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.config.provision.NodeType;
+import com.yahoo.vespa.flags.IntFlag;
+import com.yahoo.vespa.flags.PermanentFlags;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.node.Agent;
+import com.yahoo.vespa.hosted.provision.node.Allocation;
import com.yahoo.vespa.hosted.provision.node.filter.NodeListFilter;
import java.time.Instant;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Objects;
import java.util.Optional;
+import java.util.Set;
import java.util.logging.Logger;
/**
- * An upgrader that retires and rebuilds hosts on stale OS versions. Retirement of each host is spread out in time,
- * according to a time budget, to avoid potential service impact of retiring too many hosts close together.
+ * An upgrader that retires and rebuilds hosts on stale OS versions.
+ *
+ * - We limit the number of concurrent rebuilds to reduce impact of retiring too many hosts.
+ * - We limit rebuilds by cluster so that at most one node per stateful cluster per application is retired at a time.
*
* Used in cases where performing an OS upgrade requires rebuilding the host, e.g. when upgrading across major versions.
*
* @author mpolden
*/
-public class RebuildingOsUpgrader extends RetiringOsUpgrader {
+public class RebuildingOsUpgrader implements OsUpgrader {
private static final Logger LOG = Logger.getLogger(RebuildingOsUpgrader.class.getName());
- private final int maxRebuilds;
+ private final NodeRepository nodeRepository;
+ private final IntFlag maxRebuilds;
- public RebuildingOsUpgrader(NodeRepository nodeRepository, int maxRebuilds) {
- super(nodeRepository);
- this.maxRebuilds = maxRebuilds;
- if (maxRebuilds < 1) throw new IllegalArgumentException("maxRebuilds must be positive, was " + maxRebuilds);
+ public RebuildingOsUpgrader(NodeRepository nodeRepository) {
+ this.nodeRepository = nodeRepository;
+ this.maxRebuilds = PermanentFlags.MAX_REBUILDS.bindTo(nodeRepository.flagSource());
}
@Override
- protected NodeList candidates(Instant instant, OsVersionTarget target, NodeList allNodes) {
- if (allNodes.nodeType(target.nodeType()).rebuilding().size() < maxRebuilds) {
- return super.candidates(instant, target, allNodes);
- }
- return NodeList.of();
+ public void upgradeTo(OsVersionTarget target) {
+ NodeList allNodes = nodeRepository.nodes().list();
+ Instant now = nodeRepository.clock().instant();
+ rebuildableHosts(target, allNodes).forEach(host -> rebuild(host, target.version(), now));
}
@Override
- protected void upgradeNodes(NodeList candidates, Version version, Instant instant) {
- candidates.not().rebuilding()
- .byIncreasingOsVersion()
- .first(1)
- .forEach(node -> rebuild(node, version, instant));
+ public void disableUpgrade(NodeType type) {
+ // No action needed in this implementation. Hosts that have started rebuilding cannot be halted
+ }
+
+ /** Returns the number of hosts of given type that can be rebuilt concurrently */
+ private int upgradeLimit(NodeType hostType, NodeList hosts) {
+ int limit = hostType == NodeType.host ? maxRebuilds.value() : 1;
+ return Math.max(0, limit - hosts.rebuilding().size());
+ }
+
+ private List<Node> rebuildableHosts(OsVersionTarget target, NodeList allNodes) {
+ NodeList hostsOfTargetType = allNodes.nodeType(target.nodeType());
+ NodeList activeHosts = hostsOfTargetType.state(Node.State.active);
+ int upgradeLimit = upgradeLimit(target.nodeType(), hostsOfTargetType);
+
+ // Find stateful clusters with retiring nodes
+ NodeList activeNodes = allNodes.state(Node.State.active);
+ Set<ClusterId> retiringClusters = statefulClustersOf(activeNodes.nodeType(target.nodeType().childNodeType())
+ .retiring());
+
+ // Upgrade hosts not running stateful clusters that are already retiring
+ List<Node> hostsToUpgrade = new ArrayList<>(upgradeLimit);
+ NodeList candidates = activeHosts.not().rebuilding()
+ .osVersionIsBefore(target.version())
+ .byIncreasingOsVersion();
+ for (Node host : candidates) {
+ if (hostsToUpgrade.size() == upgradeLimit) break;
+ Set<ClusterId> clustersOnHost = statefulClustersOf(activeNodes.childrenOf(host));
+ boolean canUpgrade = Collections.disjoint(retiringClusters, clustersOnHost);
+ if (canUpgrade) {
+ hostsToUpgrade.add(host);
+ retiringClusters.addAll(clustersOnHost);
+ }
+ }
+ return Collections.unmodifiableList(hostsToUpgrade);
}
private void rebuild(Node host, Version target, Instant now) {
@@ -54,7 +97,48 @@ public class RebuildingOsUpgrader extends RetiringOsUpgrader {
", want " + target);
nodeRepository.nodes().rebuild(host.hostname(), Agent.RebuildingOsUpgrader, now);
nodeRepository.nodes().upgradeOs(NodeListFilter.from(host), Optional.of(target));
- nodeRepository.osVersions().writeChange((change) -> change.withRetirementAt(now, host.type()));
+ }
+
+ private static Set<ClusterId> statefulClustersOf(NodeList nodes) {
+ Set<ClusterId> clusters = new HashSet<>();
+ for (Node node : nodes) {
+ if (node.type().isHost()) throw new IllegalArgumentException("All nodes must be children, got host " + node);
+ if (node.allocation().isEmpty()) continue;
+ Allocation allocation = node.allocation().get();
+ if (!allocation.membership().cluster().isStateful()) continue;
+ clusters.add(new ClusterId(allocation.owner(), allocation.membership().cluster().id()));
+ }
+ return clusters;
+ }
+
+ private static class ClusterId {
+
+ private final ApplicationId application;
+ private final ClusterSpec.Id cluster;
+
+ public ClusterId(ApplicationId application, ClusterSpec.Id cluster) {
+ this.application = Objects.requireNonNull(application);
+ this.cluster = Objects.requireNonNull(cluster);
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ ClusterId that = (ClusterId) o;
+ return application.equals(that.application) && cluster.equals(that.cluster);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(application, cluster);
+ }
+
+ @Override
+ public String toString() {
+ return cluster + " of " + application;
+ }
+
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/RetiringOsUpgrader.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/RetiringOsUpgrader.java
index cee52cb2177..1e48be189cd 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/RetiringOsUpgrader.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/RetiringOsUpgrader.java
@@ -37,7 +37,10 @@ public class RetiringOsUpgrader implements OsUpgrader {
NodeList allNodes = nodeRepository.nodes().list();
Instant now = nodeRepository.clock().instant();
NodeList candidates = candidates(now, target, allNodes);
- upgradeNodes(candidates, target.version(), now);
+ candidates.not().deprovisioning()
+ .byIncreasingOsVersion()
+ .first(1)
+ .forEach(node -> deprovision(node, target.version(), now));
}
@Override
@@ -46,7 +49,7 @@ public class RetiringOsUpgrader implements OsUpgrader {
}
/** Returns nodes that are candidates for upgrade */
- protected NodeList candidates(Instant instant, OsVersionTarget target, NodeList allNodes) {
+ private NodeList candidates(Instant instant, OsVersionTarget target, NodeList allNodes) {
NodeList activeNodes = allNodes.state(Node.State.active).nodeType(target.nodeType());
if (activeNodes.isEmpty()) return NodeList.of();
@@ -57,14 +60,6 @@ public class RetiringOsUpgrader implements OsUpgrader {
return activeNodes.osVersionIsBefore(target.version());
}
- /** Trigger upgrade of candidates to given version */
- protected void upgradeNodes(NodeList candidates, Version version, Instant instant) {
- candidates.not().deprovisioning()
- .byIncreasingOsVersion()
- .first(1)
- .forEach(node -> deprovision(node, version, instant));
- }
-
/** Upgrade given host by retiring and deprovisioning it */
private void deprovision(Node host, Version target, Instant now) {
LOG.info("Retiring and deprovisioning " + host + ": On stale OS version " +
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java
index 81a22932ba2..711fe39d056 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java
@@ -55,7 +55,7 @@ public class CapacityPolicies {
// Allow slow storage in zones which are not performance sensitive
if (zone.system().isCd() || zone.environment() == Environment.dev || zone.environment() == Environment.test)
- target = target.with(NodeResources.DiskSpeed.any).with(NodeResources.StorageType.any);
+ target = target.with(NodeResources.DiskSpeed.any).with(NodeResources.StorageType.any).withBandwidthGbps(0.1);
return target;
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeCandidate.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeCandidate.java
index 5adde885276..a364c42eee3 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeCandidate.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeCandidate.java
@@ -1,6 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.provision.provisioning;
+import com.yahoo.component.Version;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ClusterMembership;
import com.yahoo.config.provision.Flavor;
@@ -193,6 +194,12 @@ public abstract class NodeCandidate implements Nodelike, Comparable<NodeCandidat
return Integer.compare(this.allocation().get().membership().index(),
other.allocation().get().membership().index());
+ // Prefer host with latest OS version
+ Version thisHostOsVersion = this.parent.flatMap(host -> host.status().osVersion().current()).orElse(Version.emptyVersion);
+ Version otherHostOsVersion = other.parent.flatMap(host -> host.status().osVersion().current()).orElse(Version.emptyVersion);
+ if (thisHostOsVersion.isAfter(otherHostOsVersion)) return -1;
+ if (otherHostOsVersion.isAfter(thisHostOsVersion)) return 1;
+
return 0;
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/LoadBalancersV1ApiHandler.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/LoadBalancersV1ApiHandler.java
index f81e3240397..e73f97304c1 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/LoadBalancersV1ApiHandler.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/LoadBalancersV1ApiHandler.java
@@ -1,53 +1,37 @@
// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.provision.restapi;
-import com.yahoo.container.jdisc.HttpRequest;
import com.yahoo.container.jdisc.HttpResponse;
import com.yahoo.container.jdisc.LoggingRequestHandler;
-import com.yahoo.restapi.ErrorResponse;
-import com.yahoo.vespa.hosted.provision.NoSuchNodeException;
+import com.yahoo.restapi.RestApi;
+import com.yahoo.restapi.RestApiRequestHandler;
import com.yahoo.vespa.hosted.provision.NodeRepository;
-import com.yahoo.yolean.Exceptions;
import javax.inject.Inject;
-import java.util.logging.Level;
/**
* @author mpolden
+ * @author jonmv
*/
-public class LoadBalancersV1ApiHandler extends LoggingRequestHandler {
+public class LoadBalancersV1ApiHandler extends RestApiRequestHandler<LoadBalancersV1ApiHandler> {
private final NodeRepository nodeRepository;
@Inject
public LoadBalancersV1ApiHandler(LoggingRequestHandler.Context parentCtx, NodeRepository nodeRepository) {
- super(parentCtx);
+ super(parentCtx, LoadBalancersV1ApiHandler::createRestApiDefinition);
this.nodeRepository = nodeRepository;
}
- @Override
- public HttpResponse handle(HttpRequest request) {
- try {
- switch (request.getMethod()) {
- case GET: return handleGET(request);
- default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
- }
- }
- catch (NotFoundException | NoSuchNodeException e) {
- return ErrorResponse.notFoundError(Exceptions.toMessageString(e));
- }
- catch (IllegalArgumentException e) {
- return ErrorResponse.badRequest(Exceptions.toMessageString(e));
- }
- catch (RuntimeException e) {
- log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
- return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
- }
+ private static RestApi createRestApiDefinition(LoadBalancersV1ApiHandler self) {
+ return RestApi.builder()
+ .addRoute(RestApi.route("/loadbalancers/v1")
+ .get(self::getLoadBalancers))
+ .build();
}
- private HttpResponse handleGET(HttpRequest request) {
- String path = request.getUri().getPath();
- if (path.equals("/loadbalancers/v1/")) return new LoadBalancersResponse(request, nodeRepository);
- throw new NotFoundException("Nothing at path '" + path + "'");
+ private HttpResponse getLoadBalancers(RestApi.RequestContext context) {
+ return new LoadBalancersResponse(context.request(), nodeRepository);
}
+
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/ContainerConfig.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/ContainerConfig.java
index 5e40c0bd9ff..ebaf4d47887 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/ContainerConfig.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/ContainerConfig.java
@@ -33,10 +33,10 @@ public class ContainerConfig {
" <component id='com.yahoo.vespa.flags.InMemoryFlagSource'/>\n" +
" <component id='com.yahoo.config.provision.Zone'/>\n" +
" <handler id='com.yahoo.vespa.hosted.provision.restapi.NodesV2ApiHandler'>\n" +
- " <binding>http://*/nodes/v2/*</binding>\n" +
+ " <binding>http://*/nodes/v2*</binding>\n" +
" </handler>\n" +
" <handler id='com.yahoo.vespa.hosted.provision.restapi.LoadBalancersV1ApiHandler'>\n" +
- " <binding>http://*/loadbalancers/v1/*</binding>\n" +
+ " <binding>http://*/loadbalancers/v1*</binding>\n" +
" </handler>\n" +
" <http>\n" +
" <server id='myServer' port='" + port + "'/>\n" +
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockDeployer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockDeployer.java
index cd9e32ea9d2..4fd55fb56fe 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockDeployer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockDeployer.java
@@ -195,7 +195,7 @@ public class MockDeployer implements Deployer {
public long activate() {
lastDeployTimes.put(applicationId, clock.instant());
- for (Node node : nodeRepository.nodes().list().owner(applicationId).state(Node.State.active).retirementRequested()) {
+ for (Node node : nodeRepository.nodes().list().owner(applicationId).state(Node.State.active).retiring()) {
try (NodeMutex lock = nodeRepository.nodes().lockAndGetRequired(node)) {
nodeRepository.nodes().write(lock.node().retire(nodeRepository.clock().instant()), lock);
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java
index b812a547ede..2b180853d83 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java
@@ -21,6 +21,7 @@ import com.yahoo.config.provisioning.FlavorsConfig;
import com.yahoo.transaction.NestedTransaction;
import com.yahoo.vespa.config.ConfigPayload;
import com.yahoo.vespa.hosted.provision.node.Agent;
+import com.yahoo.vespa.hosted.provision.persistence.DnsNameResolver;
import com.yahoo.vespa.hosted.provision.persistence.NodeSerializer;
import com.yahoo.vespa.hosted.provision.provisioning.ProvisioningTester;
import com.yahoo.vespa.model.builder.xml.dom.DomConfigPayloadBuilder;
@@ -62,20 +63,25 @@ public class RealDataScenarioTest {
public void test() {
ProvisioningTester tester = new ProvisioningTester.Builder()
.zone(new Zone(Cloud.builder().dynamicProvisioning(true).build(), SystemName.defaultSystem(), Environment.prod, RegionName.defaultName()))
- .flavorsConfig(parseFlavors(Paths.get("flavors.xml")))
+ .flavorsConfig(parseFlavors(Paths.get(System.getProperty("user.home"), ".flavors.xml")))
+ .nameResolver(new DnsNameResolver())
+ .spareCount(1)
.build();
- initFromZk(tester.nodeRepository(), Paths.get("snapshot"));
+ initFromZk(tester.nodeRepository(), Paths.get(System.getProperty("user.home"), "snapshot"));
ApplicationId app = ApplicationId.from("tenant", "app", "default");
Version version = Version.fromString("7.123.4");
Capacity[] capacities = new Capacity[]{
- Capacity.from(new ClusterResources(1, 1, new NodeResources(0.5, 4, 50, 0.3, any, remote))),
+ Capacity.from(new ClusterResources(1, 1, NodeResources.unspecified())),
+ /** TODO: Change to NodeResources.unspecified() when {@link (com.yahoo.vespa.flags.Flags).DEDICATED_CLUSTER_CONTROLLER_FLAVOR} is gone */
+ Capacity.from(new ClusterResources(3, 1, new NodeResources(0.25, 1.0, 10.0, 0.3, any))),
Capacity.from(new ClusterResources(4, 1, new NodeResources(8, 16, 100, 0.3, fast, remote))),
Capacity.from(new ClusterResources(2, 1, new NodeResources(4, 8, 100, 0.3, fast, local)))
};
ClusterSpec[] specs = new ClusterSpec[]{
ClusterSpec.request(ClusterSpec.Type.admin, ClusterSpec.Id.from("logserver")).vespaVersion(version).build(),
+ ClusterSpec.request(ClusterSpec.Type.admin, ClusterSpec.Id.from("cluster-controllers")).vespaVersion(version).build(),
ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("container")).vespaVersion(version).build(),
ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("content")).vespaVersion(version).build()
};
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java
index 333b1bb3558..b3ee30e4258 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java
@@ -3,12 +3,17 @@ package com.yahoo.vespa.hosted.provision.os;
import com.yahoo.component.Version;
import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.Capacity;
+import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.config.provision.HostSpec;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.NodeType;
import com.yahoo.test.ManualClock;
+import com.yahoo.vespa.flags.PermanentFlags;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.node.Agent;
+import com.yahoo.vespa.hosted.provision.node.Allocation;
import com.yahoo.vespa.hosted.provision.node.OsVersion;
import com.yahoo.vespa.hosted.provision.node.Status;
import com.yahoo.vespa.hosted.provision.provisioning.ProvisioningTester;
@@ -19,6 +24,7 @@ import java.time.temporal.ChronoUnit;
import java.util.Comparator;
import java.util.List;
import java.util.Optional;
+import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.Supplier;
import java.util.stream.Collectors;
@@ -91,7 +97,7 @@ public class OsVersionsTest {
public void max_active_upgrades() {
int totalNodes = 20;
int maxActiveUpgrades = 5;
- var versions = new OsVersions(tester.nodeRepository(), false, maxActiveUpgrades, Integer.MAX_VALUE);
+ var versions = new OsVersions(tester.nodeRepository(), false, maxActiveUpgrades);
provisionInfraApplication(totalNodes);
Supplier<NodeList> hostNodes = () -> tester.nodeRepository().nodes().list().state(Node.State.active).hosts();
@@ -156,7 +162,7 @@ public class OsVersionsTest {
@Test
public void upgrade_by_retiring() {
- var versions = new OsVersions(tester.nodeRepository(), true, Integer.MAX_VALUE, Integer.MAX_VALUE);
+ var versions = new OsVersions(tester.nodeRepository(), true, Integer.MAX_VALUE);
var clock = (ManualClock) tester.nodeRepository().clock();
int hostCount = 10;
// Provision hosts and children
@@ -223,7 +229,7 @@ public class OsVersionsTest {
@Test
public void upgrade_by_retiring_everything_at_once() {
- var versions = new OsVersions(tester.nodeRepository(), true, Integer.MAX_VALUE, Integer.MAX_VALUE);
+ var versions = new OsVersions(tester.nodeRepository(), true, Integer.MAX_VALUE);
int hostCount = 3;
provisionInfraApplication(hostCount, infraApplication, NodeType.confighost);
Supplier<NodeList> hostNodes = () -> tester.nodeRepository().nodes().list()
@@ -246,8 +252,8 @@ public class OsVersionsTest {
@Test
public void upgrade_by_rebuilding() {
- var versions = new OsVersions(tester.nodeRepository(), false, Integer.MAX_VALUE, 1);
- var clock = tester.clock();
+ tester.flagSource().withIntFlag(PermanentFlags.MAX_REBUILDS.id(), 1);
+ var versions = new OsVersions(tester.nodeRepository(), false, Integer.MAX_VALUE);
int hostCount = 10;
provisionInfraApplication(hostCount + 1);
Supplier<NodeList> hostNodes = () -> tester.nodeRepository().nodes().list().nodeType(NodeType.host);
@@ -263,20 +269,13 @@ public class OsVersionsTest {
// Target is set for new major version. Upgrade mechanism switches to rebuilding
var version1 = Version.fromString("8.0");
- Duration totalBudget = Duration.ofHours(12);
- Duration nodeBudget = totalBudget.dividedBy(hostCount);
- versions.setTarget(NodeType.host, version1, totalBudget, false);
+ versions.setTarget(NodeType.host, version1, Duration.ZERO, false);
versions.resumeUpgradeOf(NodeType.host, true);
// One host starts rebuilding
assertEquals(1, hostNodes.get().rebuilding().size());
- // Nothing happens on next resume as first host has not spent its budget
- versions.resumeUpgradeOf(NodeType.host, true);
- assertEquals(1, hostNodes.get().rebuilding().size());
-
- // Time budget has been spent, but we cannot rebuild another host until the current one is done
- clock.advance(nodeBudget);
+ // We cannot rebuild another host until the current one is done
versions.resumeUpgradeOf(NodeType.host, true);
NodeList hostsRebuilding = hostNodes.get().rebuilding();
assertEquals(1, hostsRebuilding.size());
@@ -290,7 +289,6 @@ public class OsVersionsTest {
// The remaining hosts complete their upgrade
for (int i = 0; i < hostCount - 2; i++) {
- clock.advance(nodeBudget);
versions.resumeUpgradeOf(NodeType.host, true);
hostsRebuilding = hostNodes.get().rebuilding();
assertEquals(1, hostsRebuilding.size());
@@ -307,7 +305,7 @@ public class OsVersionsTest {
// Next version is within same major. Upgrade mechanism switches to delegated
var version2 = Version.fromString("8.1");
- versions.setTarget(NodeType.host, version2, totalBudget, false);
+ versions.setTarget(NodeType.host, version2, Duration.ZERO, false);
versions.resumeUpgradeOf(NodeType.host, true);
NodeList nonFailingHosts = hostNodes.get().except(failedHost);
assertTrue("Wanted version is set", nonFailingHosts.stream()
@@ -320,7 +318,6 @@ public class OsVersionsTest {
assertEquals(version0, reactivatedHost.status().osVersion().current().get());
// Resuming upgrades reactivated host. Upgrade mechanism switches to rebuilding
- clock.advance(nodeBudget);
versions.resumeUpgradeOf(NodeType.host, true);
hostsRebuilding = hostNodes.get().rebuilding();
assertEquals(List.of(reactivatedHost), hostsRebuilding.asList());
@@ -329,8 +326,8 @@ public class OsVersionsTest {
@Test
public void upgrade_by_rebuilding_multiple_host_types() {
- var versions = new OsVersions(tester.nodeRepository(), false, Integer.MAX_VALUE, 1);
- var clock = tester.clock();
+ tester.flagSource().withIntFlag(PermanentFlags.MAX_REBUILDS.id(), 1);
+ var versions = new OsVersions(tester.nodeRepository(), false, Integer.MAX_VALUE);
int hostCount = 3;
provisionInfraApplication(hostCount, infraApplication, NodeType.host);
provisionInfraApplication(hostCount, ApplicationId.from("hosted-vespa", "confighost", "default"), NodeType.confighost);
@@ -345,14 +342,11 @@ public class OsVersionsTest {
// Target is set for new major version
var version1 = Version.fromString("8.0");
- Duration totalBudget = Duration.ofHours(12);
- Duration nodeBudget = totalBudget.dividedBy(hostCount);
- versions.setTarget(NodeType.host, version1, totalBudget, false);
- versions.setTarget(NodeType.confighost, version1, totalBudget, false);
+ versions.setTarget(NodeType.host, version1, Duration.ZERO, false);
+ versions.setTarget(NodeType.confighost, version1, Duration.ZERO, false);
// One host of each type is upgraded
for (int i = 0; i < hostCount; i++) {
- clock.advance(nodeBudget);
versions.resumeUpgradeOf(NodeType.host, true);
versions.resumeUpgradeOf(NodeType.confighost, true);
NodeList hostsRebuilding = hosts.get().rebuilding();
@@ -363,6 +357,140 @@ public class OsVersionsTest {
assertEquals("All hosts upgraded", hostCount * 2, hosts.get().onOsVersion(version1).size());
}
+ @Test
+ public void upgrade_by_rebuilding_is_limited_by_stateful_clusters() {
+ tester.flagSource().withIntFlag(PermanentFlags.MAX_REBUILDS.id(), 3);
+ var versions = new OsVersions(tester.nodeRepository(), false, Integer.MAX_VALUE);
+ int hostCount = 5;
+ ApplicationId app1 = ApplicationId.from("t1", "a1", "i1");
+ ApplicationId app2 = ApplicationId.from("t2", "a2", "i2");
+ provisionInfraApplication(hostCount, infraApplication, NodeType.host);
+ deployApplication(app1);
+ deployApplication(app2);
+ Supplier<NodeList> hosts = () -> tester.nodeRepository().nodes().list().nodeType(NodeType.host);
+
+ // All hosts are on initial version
+ var version0 = Version.fromString("7.0");
+ versions.setTarget(NodeType.host, version0, Duration.ZERO, false);
+ setCurrentVersion(hosts.get().asList(), version0);
+
+ // Target is set for new major version
+ var version1 = Version.fromString("8.0");
+ versions.setTarget(NodeType.host, version1, Duration.ZERO, false);
+
+ // Upgrades 1 host per stateful cluster and 1 empty host
+ versions.resumeUpgradeOf(NodeType.host, true);
+ NodeList allNodes = tester.nodeRepository().nodes().list();
+ List<Node> hostsRebuilding = allNodes.nodeType(NodeType.host)
+ .rebuilding()
+ .sortedBy(Comparator.comparing(Node::hostname))
+ .asList();
+ List<Optional<ApplicationId>> owners = List.of(Optional.of(app1), Optional.of(app2), Optional.empty());
+ assertEquals(3, hostsRebuilding.size());
+ for (int i = 0; i < hostsRebuilding.size(); i++) {
+ Optional<ApplicationId> owner = owners.get(i);
+ List<Node> retiringChildren = allNodes.childrenOf(hostsRebuilding.get(i)).retiring().asList();
+ assertEquals(owner.isPresent() ? 1 : 0, retiringChildren.size());
+ assertEquals("Rebuilding host of " + owner.map(ApplicationId::toString)
+ .orElse("no application"),
+ owner,
+ retiringChildren.stream()
+ .findFirst()
+ .flatMap(Node::allocation)
+ .map(Allocation::owner));
+ }
+
+ // Replace any retired nodes
+ replaceNodes(app1);
+ replaceNodes(app2);
+
+ // Complete rebuild
+ completeRebuildOf(hostsRebuilding, NodeType.host);
+ assertEquals(3, hosts.get().onOsVersion(version1).size());
+
+ // Both applications have moved their nodes to the hosts on old OS version
+ allNodes = tester.nodeRepository().nodes().list();
+ NodeList hostsOnOldVersion = allNodes.onOsVersion(version0);
+ assertEquals(2, hostsOnOldVersion.size());
+ for (var host : hostsOnOldVersion) {
+ assertEquals(1, allNodes.childrenOf(host).owner(app1).size());
+ assertEquals(1, allNodes.childrenOf(host).owner(app2).size());
+ }
+
+ // Since both applications now occupy all remaining hosts, we can only upgrade 1 at a time
+ for (int i = 0; i < hostsOnOldVersion.size(); i++) {
+ versions.resumeUpgradeOf(NodeType.host, true);
+ hostsRebuilding = hosts.get().rebuilding().asList();
+ assertEquals(1, hostsRebuilding.size());
+ replaceNodes(app1);
+ replaceNodes(app2);
+ completeRebuildOf(hostsRebuilding, NodeType.host);
+ }
+
+ // Resuming upgrade has no effect as all hosts have upgraded
+ versions.resumeUpgradeOf(NodeType.host, true);
+ NodeList allHosts = hosts.get();
+ assertEquals(0, allHosts.rebuilding().size());
+ assertEquals(allHosts.size(), allHosts.onOsVersion(version1).size());
+ }
+
+ @Test
+ public void upgrade_by_rebuilding_is_limited_by_infrastructure_host() {
+ int hostCount = 3;
+ tester.flagSource().withIntFlag(PermanentFlags.MAX_REBUILDS.id(), hostCount);
+ var versions = new OsVersions(tester.nodeRepository(), false, Integer.MAX_VALUE);
+ ApplicationId routingApp = ApplicationId.from("t1", "a1", "i1");
+ List<Node> proxyHosts = provisionInfraApplication(hostCount, infraApplication, NodeType.proxyhost);
+ for (int i = 0; i < proxyHosts.size(); i++) {
+ tester.makeReadyChildren(1, i, new NodeResources(4,8, 100, 0.3),
+ NodeType.proxy, proxyHosts.get(i).hostname(), (index) -> "proxy" + index);
+ }
+ Capacity capacity = Capacity.fromRequiredNodeType(NodeType.proxy);
+ tester.deploy(routingApp, capacity);
+ Supplier<NodeList> hosts = () -> tester.nodeRepository().nodes().list().nodeType(NodeType.proxyhost);
+
+ // All hosts are on initial version
+ var version0 = Version.fromString("7.0");
+ versions.setTarget(NodeType.proxyhost, version0, Duration.ZERO, false);
+ setCurrentVersion(hosts.get().asList(), version0);
+
+ // Target is set for new major version
+ var version1 = Version.fromString("8.0");
+ versions.setTarget(NodeType.proxyhost, version1, Duration.ZERO, false);
+
+ // Upgrades 1 host at a time
+ for (int i = 0; i < hostCount; i++) {
+ versions.resumeUpgradeOf(NodeType.proxyhost, true);
+ List<Node> hostsRebuilding = hosts.get().rebuilding().asList();
+ assertEquals(1, hostsRebuilding.size());
+ replaceNodes(routingApp, (app) -> tester.deploy(app, capacity));
+ completeRebuildOf(hostsRebuilding, NodeType.proxyhost);
+ }
+ }
+
+ private void deployApplication(ApplicationId application) {
+ ClusterSpec contentSpec = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("content1")).vespaVersion("7").build();
+ List<HostSpec> hostSpecs = tester.prepare(application, contentSpec, 2, 1, new NodeResources(4, 8, 100, 0.3));
+ tester.activate(application, hostSpecs);
+ }
+
+ private void replaceNodes(ApplicationId application) {
+ replaceNodes(application, this::deployApplication);
+ }
+
+ private void replaceNodes(ApplicationId application, Consumer<ApplicationId> deployer) {
+ // Deploy to retire nodes
+ deployer.accept(application);
+ List<Node> retired = tester.nodeRepository().nodes().list().owner(application).retired().asList();
+ assertFalse("At least one node is retired", retired.isEmpty());
+ tester.nodeRepository().nodes().setRemovable(application, retired);
+
+ // Redeploy to deactivate removable nodes and allocate new ones
+ deployer.accept(application);
+ tester.nodeRepository().nodes().list(Node.State.inactive).owner(application)
+ .forEach(node -> tester.nodeRepository().nodes().removeRecursively(node, true));
+ }
+
private NodeList deprovisioningChildrenOf(Node parent) {
return tester.nodeRepository().nodes().list()
.childrenOf(parent)
@@ -374,12 +502,13 @@ public class OsVersionsTest {
}
private List<Node> provisionInfraApplication(int nodeCount, ApplicationId application, NodeType nodeType) {
- var nodes = tester.makeReadyNodes(nodeCount, "default", nodeType, 1);
+ var nodes = tester.makeReadyNodes(nodeCount, new NodeResources(48, 128, 2000, 10), nodeType, 10);
tester.prepareAndActivateInfraApplication(application, nodeType);
return nodes.stream()
.map(Node::hostname)
.flatMap(hostname -> tester.nodeRepository().nodes().node(hostname).stream())
.collect(Collectors.toList());
+
}
private Version minVersion(NodeList nodes, Function<OsVersion, Optional<Version>> versionField) {
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
index d2140055a63..a53c7469a25 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
@@ -150,6 +150,7 @@ public class ProvisioningTester {
public LoadBalancerServiceMock loadBalancerService() { return loadBalancerService; }
public CapacityPolicies capacityPolicies() { return capacityPolicies; }
public NodeList getNodes(ApplicationId id, Node.State ... inState) { return nodeRepository.nodes().list(inState).owner(id); }
+ public InMemoryFlagSource flagSource() { return (InMemoryFlagSource) nodeRepository.flagSource(); }
public Node patchNode(Node node, UnaryOperator<Node> patcher) {
return patchNodes(List.of(node), patcher).get(0);
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/LoadBalancersV1ApiTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/LoadBalancersV1ApiTest.java
index e9811985b7d..1ca552bca94 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/LoadBalancersV1ApiTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/LoadBalancersV1ApiTest.java
@@ -22,6 +22,7 @@ public class LoadBalancersV1ApiTest {
@Test
public void test_load_balancers() throws Exception {
+ tester.assertFile(new Request("http://localhost:8080/loadbalancers/v1"), "load-balancers.json");
tester.assertFile(new Request("http://localhost:8080/loadbalancers/v1/"), "load-balancers.json");
tester.assertFile(new Request("http://localhost:8080/loadbalancers/v1/?application=tenant4.application4.instance4"), "load-balancers-single.json");
tester.assertResponse(new Request("http://localhost:8080/loadbalancers/v1/?application=tenant.nonexistent.default"), "{\"loadBalancers\":[]}");
diff --git a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/OrchestratorImpl.java b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/OrchestratorImpl.java
index a901ecf4e76..6d50002669d 100644
--- a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/OrchestratorImpl.java
+++ b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/OrchestratorImpl.java
@@ -5,6 +5,7 @@ import com.google.common.util.concurrent.UncheckedTimeoutException;
import com.google.inject.Inject;
import com.yahoo.cloud.config.ConfigserverConfig;
import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.Zone;
import com.yahoo.vespa.applicationmodel.ApplicationInstance;
import com.yahoo.vespa.applicationmodel.ApplicationInstanceReference;
import com.yahoo.vespa.applicationmodel.ClusterId;
@@ -74,9 +75,10 @@ public class OrchestratorImpl implements Orchestrator {
OrchestratorConfig orchestratorConfig,
ServiceMonitor serviceMonitor,
ConfigserverConfig configServerConfig,
- FlagSource flagSource)
+ FlagSource flagSource,
+ Zone zone)
{
- this(new HostedVespaPolicy(new HostedVespaClusterPolicy(flagSource),
+ this(new HostedVespaPolicy(new HostedVespaClusterPolicy(flagSource, zone),
clusterControllerClientFactory,
new ApplicationApiFactory(configServerConfig.zookeeperserver().size(), Clock.systemUTC())),
clusterControllerClientFactory,
diff --git a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/ApplicationApi.java b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/ApplicationApi.java
index a6353e39610..a8734dd67e5 100644
--- a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/ApplicationApi.java
+++ b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/ApplicationApi.java
@@ -36,6 +36,6 @@ public interface ApplicationApi {
}
List<StorageNode> getStorageNodesInGroupInClusterOrder();
- List<StorageNode> getUpStorageNodesInGroupInClusterOrder();
+ List<StorageNode> getNoRemarksStorageNodesInGroupInClusterOrder();
List<StorageNode> getSuspendedStorageNodesInGroupInReverseClusterOrder();
}
diff --git a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/ApplicationApiImpl.java b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/ApplicationApiImpl.java
index 29307b36f4b..efbf9ff7981 100644
--- a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/ApplicationApiImpl.java
+++ b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/ApplicationApiImpl.java
@@ -10,12 +10,11 @@ import com.yahoo.vespa.orchestrator.OrchestratorContext;
import com.yahoo.vespa.orchestrator.OrchestratorUtil;
import com.yahoo.vespa.orchestrator.controller.ClusterControllerClientFactory;
import com.yahoo.vespa.orchestrator.status.ApplicationInstanceStatus;
+import com.yahoo.vespa.orchestrator.status.ApplicationLock;
import com.yahoo.vespa.orchestrator.status.HostInfos;
import com.yahoo.vespa.orchestrator.status.HostStatus;
-import com.yahoo.vespa.orchestrator.status.ApplicationLock;
import java.time.Clock;
-import java.util.Collection;
import java.util.Comparator;
import java.util.HashSet;
import java.util.List;
@@ -24,8 +23,6 @@ import java.util.Set;
import java.util.function.Predicate;
import java.util.stream.Collectors;
-import static com.yahoo.vespa.orchestrator.OrchestratorUtil.getHostsUsedByApplicationInstance;
-
/**
* @author hakonhall
*/
@@ -87,11 +84,12 @@ public class ApplicationApiImpl implements ApplicationApi {
}
@Override
- public List<StorageNode> getUpStorageNodesInGroupInClusterOrder() {
+ public List<StorageNode> getNoRemarksStorageNodesInGroupInClusterOrder() {
return clusterInOrder.stream()
- .map(ClusterApi::upStorageNodeInGroup)
+ .map(ClusterApi::storageNodeInGroup)
.filter(Optional::isPresent)
.map(Optional::get)
+ .filter(x -> hostInfos.getOrNoRemarks(x.hostName()).status() == HostStatus.NO_REMARKS)
.collect(Collectors.toList());
}
diff --git a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/ClusterApi.java b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/ClusterApi.java
index 2e7a63ddb2f..78373282df8 100644
--- a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/ClusterApi.java
+++ b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/ClusterApi.java
@@ -1,6 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.orchestrator.model;
+import com.yahoo.config.provision.Zone;
import com.yahoo.vespa.applicationmodel.ClusterId;
import com.yahoo.vespa.applicationmodel.ServiceType;
import com.yahoo.vespa.orchestrator.policy.SuspensionReasons;
diff --git a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/policy/HostedVespaClusterPolicy.java b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/policy/HostedVespaClusterPolicy.java
index bd0075b403c..e3989fd86d4 100644
--- a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/policy/HostedVespaClusterPolicy.java
+++ b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/policy/HostedVespaClusterPolicy.java
@@ -1,6 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.orchestrator.policy;
+import com.yahoo.config.provision.Zone;
import com.yahoo.vespa.applicationmodel.ClusterId;
import com.yahoo.vespa.applicationmodel.ServiceType;
import com.yahoo.vespa.flags.BooleanFlag;
@@ -18,10 +19,12 @@ import static com.yahoo.vespa.orchestrator.policy.HostedVespaPolicy.ENOUGH_SERVI
public class HostedVespaClusterPolicy implements ClusterPolicy {
private final BooleanFlag groupSuspensionFlag;
+ private final Zone zone;
- public HostedVespaClusterPolicy(FlagSource flagSource) {
+ public HostedVespaClusterPolicy(FlagSource flagSource, Zone zone) {
// Note that the "group" in this flag refers to hierarchical groups of a content cluster.
this.groupSuspensionFlag = Flags.GROUP_SUSPENSION.bindTo(flagSource);
+ this.zone = zone;
}
@Override
@@ -155,7 +158,9 @@ public class HostedVespaClusterPolicy implements ClusterPolicy {
return ConcurrentSuspensionLimitForCluster.ONE_NODE;
}
- return ConcurrentSuspensionLimitForCluster.TWENTY_PERCENT;
+ return zone.system().isCd()
+ ? ConcurrentSuspensionLimitForCluster.FIFTY_PERCENT
+ : ConcurrentSuspensionLimitForCluster.TWENTY_PERCENT;
}
// The above should cover all cases, but if not we'll return a reasonable default:
@@ -183,7 +188,9 @@ public class HostedVespaClusterPolicy implements ClusterPolicy {
}
if (clusterApi.getApplication().applicationId().equals(VespaModelUtil.TENANT_HOST_APPLICATION_ID)) {
- return ConcurrentSuspensionLimitForCluster.TWENTY_PERCENT;
+ return zone.system().isCd()
+ ? ConcurrentSuspensionLimitForCluster.FIFTY_PERCENT
+ : ConcurrentSuspensionLimitForCluster.TWENTY_PERCENT;
}
return ConcurrentSuspensionLimitForCluster.TEN_PERCENT;
diff --git a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/policy/HostedVespaPolicy.java b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/policy/HostedVespaPolicy.java
index 8090a4e95c4..d9fc2a989de 100644
--- a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/policy/HostedVespaPolicy.java
+++ b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/policy/HostedVespaPolicy.java
@@ -48,9 +48,9 @@ public class HostedVespaPolicy implements Policy {
suspensionReasons.mergeWith(clusterPolicy.verifyGroupGoingDownIsFine(cluster));
}
- // Ask Cluster Controller to set UP storage nodes in maintenance.
- // These storage nodes are guaranteed to be NO_REMARKS
- for (StorageNode storageNode : application.getUpStorageNodesInGroupInClusterOrder()) {
+ // Ask Cluster Controller to set storage nodes in maintenance, unless the node is already allowed
+ // to be down (or permanently down) in case they are guaranteed to be in maintenance already.
+ for (StorageNode storageNode : application.getNoRemarksStorageNodesInGroupInClusterOrder()) {
storageNode.setNodeState(context, ClusterControllerNodeState.MAINTENANCE);
}
diff --git a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/OrchestratorImplTest.java b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/OrchestratorImplTest.java
index 0dc81904582..7d1b69d7171 100644
--- a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/OrchestratorImplTest.java
+++ b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/OrchestratorImplTest.java
@@ -1,7 +1,8 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.orchestrator;
import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.Zone;
import com.yahoo.jdisc.Metric;
import com.yahoo.jdisc.test.TestTimer;
import com.yahoo.test.ManualClock;
@@ -77,6 +78,8 @@ import static org.mockito.internal.verification.VerificationModeFactory.atLeastO
*/
public class OrchestratorImplTest {
+ private static final Zone zone = Zone.defaultZone();
+
private final ManualClock clock = new ManualClock();
private final ApplicationApiFactory applicationApiFactory = new ApplicationApiFactory(3, clock);
private final InMemoryFlagSource flagSource = new InMemoryFlagSource();
@@ -104,7 +107,9 @@ public class OrchestratorImplTest {
app2 = OrchestratorUtil.toApplicationId(iterator.next().reference());
clustercontroller = new ClusterControllerClientFactoryMock();
- orchestrator = new OrchestratorImpl(new HostedVespaPolicy(new HostedVespaClusterPolicy(flagSource), clustercontroller, applicationApiFactory),
+ orchestrator = new OrchestratorImpl(new HostedVespaPolicy(new HostedVespaClusterPolicy(flagSource, zone),
+ clustercontroller,
+ applicationApiFactory),
clustercontroller,
statusService,
new DummyServiceMonitor(),
@@ -448,7 +453,9 @@ public class OrchestratorImplTest {
when(clusterControllerClientFactory.createClient(List.of(ccHost), "foo")).thenReturn(fooClient);
when(clusterControllerClientFactory.createClient(List.of(ccHost), "bar")).thenReturn(barClient);
- orchestrator = new OrchestratorImpl(new HostedVespaPolicy(new HostedVespaClusterPolicy(flagSource), clusterControllerClientFactory, applicationApiFactory),
+ orchestrator = new OrchestratorImpl(new HostedVespaPolicy(new HostedVespaClusterPolicy(flagSource, zone),
+ clusterControllerClientFactory,
+ applicationApiFactory),
clusterControllerClientFactory,
statusService,
serviceMonitor,
@@ -507,7 +514,9 @@ public class OrchestratorImplTest {
ServiceMonitor serviceMonitor = () -> new ServiceModel(Map.of(reference, applicationInstance));
- orchestrator = new OrchestratorImpl(new HostedVespaPolicy(new HostedVespaClusterPolicy(flagSource), clusterControllerClientFactory, applicationApiFactory),
+ orchestrator = new OrchestratorImpl(new HostedVespaPolicy(new HostedVespaClusterPolicy(flagSource, zone),
+ clusterControllerClientFactory,
+ applicationApiFactory),
clusterControllerClientFactory,
statusService,
serviceMonitor,
diff --git a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/OrchestratorTest.java b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/OrchestratorTest.java
index 953c8d1043e..193798d35fc 100644
--- a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/OrchestratorTest.java
+++ b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/OrchestratorTest.java
@@ -43,6 +43,9 @@ import static org.mockito.Mockito.when;
* @author hakon
*/
public class OrchestratorTest {
+
+ private static final Zone zone = Zone.defaultZone();
+
private final InMemoryStatusService statusService = new InMemoryStatusService();
private DuperModelManager duperModelManager;
private MySuperModelProvider superModelManager;
@@ -55,7 +58,7 @@ public class OrchestratorTest {
var timer = new TestTimer();
var clustercontroller = new ClusterControllerClientFactoryMock();
var applicationApiFactory = new ApplicationApiFactory(3, timer.toUtcClock());
- var policy = new HostedVespaPolicy(new HostedVespaClusterPolicy(flagSource), clustercontroller, applicationApiFactory);
+ var policy = new HostedVespaPolicy(new HostedVespaClusterPolicy(flagSource, zone), clustercontroller, applicationApiFactory);
var zone = new Zone(SystemName.cd, Environment.prod, RegionName.from("cd-us-east-1"));
this.superModelManager = new MySuperModelProvider();
var duperModel = new DuperModel();
diff --git a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/model/ApplicationApiImplTest.java b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/model/ApplicationApiImplTest.java
index 314c21f5aae..c3e53b2f340 100644
--- a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/model/ApplicationApiImplTest.java
+++ b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/model/ApplicationApiImplTest.java
@@ -129,31 +129,31 @@ public class ApplicationApiImplTest {
)
));
- verifyUpStorageNodesInOrder(modelUtils.createScopedApplicationApi(applicationInstance, hostName1), hostName1);
- verifyUpStorageNodesInOrder(modelUtils.createScopedApplicationApi(applicationInstance, hostName2), hostName2);
- verifyUpStorageNodesInOrder(modelUtils.createScopedApplicationApi(applicationInstance, hostName3)); // host3 is DOWN
- verifyUpStorageNodesInOrder(modelUtils.createScopedApplicationApi(applicationInstance, hostName4), hostName4);
- verifyUpStorageNodesInOrder(modelUtils.createScopedApplicationApi(applicationInstance, hostName5)); // not a storage cluster
+ verifyNoRemarksStorageNodesInOrder(modelUtils.createScopedApplicationApi(applicationInstance, hostName1), hostName1);
+ verifyNoRemarksStorageNodesInOrder(modelUtils.createScopedApplicationApi(applicationInstance, hostName2), hostName2);
+ verifyNoRemarksStorageNodesInOrder(modelUtils.createScopedApplicationApi(applicationInstance, hostName3), hostName3); // host3 is DOWN
+ verifyNoRemarksStorageNodesInOrder(modelUtils.createScopedApplicationApi(applicationInstance, hostName4), hostName4);
+ verifyNoRemarksStorageNodesInOrder(modelUtils.createScopedApplicationApi(applicationInstance, hostName5)); // not a storage cluster
- verifyUpStorageNodesInOrder(modelUtils.createScopedApplicationApi(applicationInstance, hostName1, hostName3), hostName1);
+ verifyNoRemarksStorageNodesInOrder(modelUtils.createScopedApplicationApi(applicationInstance, hostName1, hostName3), hostName3, hostName1);
// For the node group (host1, host4), they both have an up storage node (service instance)
// with clusters (cluster-3, cluster-1) respectively, and so the order of the hosts are reversed
// (host4, host1) when sorted by the clusters.
- verifyUpStorageNodesInOrder(modelUtils.createScopedApplicationApi(applicationInstance, hostName1, hostName4), hostName4, hostName1);
+ verifyNoRemarksStorageNodesInOrder(modelUtils.createScopedApplicationApi(applicationInstance, hostName1, hostName4), hostName4, hostName1);
- verifyUpStorageNodesInOrder(modelUtils.createScopedApplicationApi(
+ verifyNoRemarksStorageNodesInOrder(modelUtils.createScopedApplicationApi(
applicationInstance, hostName1, hostName4, hostName5), hostName4, hostName1);
- verifyUpStorageNodesInOrder(modelUtils.createScopedApplicationApi(
- applicationInstance, hostName1, hostName4, hostName5, hostName6), hostName4, hostName1);
- verifyUpStorageNodesInOrder(modelUtils.createScopedApplicationApi(
+ verifyNoRemarksStorageNodesInOrder(modelUtils.createScopedApplicationApi(
+ applicationInstance, hostName1, hostName4, hostName5, hostName6), hostName4, hostName6, hostName1);
+ verifyNoRemarksStorageNodesInOrder(modelUtils.createScopedApplicationApi(
applicationInstance, hostName1, hostName4, hostName5, hostName7), hostName4, hostName7, hostName1);
}
- private void verifyUpStorageNodesInOrder(ScopedApplicationApi scopedApi,
- HostName... expectedHostNames) {
+ private void verifyNoRemarksStorageNodesInOrder(ScopedApplicationApi scopedApi,
+ HostName... expectedHostNames) {
try (scopedApi) {
- List<HostName> upStorageNodes = scopedApi.applicationApi().getUpStorageNodesInGroupInClusterOrder().stream()
+ List<HostName> upStorageNodes = scopedApi.applicationApi().getNoRemarksStorageNodesInGroupInClusterOrder().stream()
.map(storageNode -> storageNode.hostName())
.collect(Collectors.toList());
assertEquals(Arrays.asList(expectedHostNames), upStorageNodes);
@@ -162,15 +162,15 @@ public class ApplicationApiImplTest {
@Test
public void testUpConditionOfStorageNode() {
- verifyUpConditionWith(HostStatus.NO_REMARKS, ServiceStatus.UP, true);
- verifyUpConditionWith(HostStatus.NO_REMARKS, ServiceStatus.NOT_CHECKED, true);
- verifyUpConditionWith(HostStatus.NO_REMARKS, ServiceStatus.DOWN, false);
- verifyUpConditionWith(HostStatus.ALLOWED_TO_BE_DOWN, ServiceStatus.UP, false);
- verifyUpConditionWith(HostStatus.ALLOWED_TO_BE_DOWN, ServiceStatus.NOT_CHECKED, false);
- verifyUpConditionWith(HostStatus.ALLOWED_TO_BE_DOWN, ServiceStatus.DOWN, false);
+ verifyNoRemarksConditionWith(HostStatus.NO_REMARKS, ServiceStatus.UP, true);
+ verifyNoRemarksConditionWith(HostStatus.NO_REMARKS, ServiceStatus.NOT_CHECKED, true);
+ verifyNoRemarksConditionWith(HostStatus.NO_REMARKS, ServiceStatus.DOWN, true);
+ verifyNoRemarksConditionWith(HostStatus.ALLOWED_TO_BE_DOWN, ServiceStatus.UP, false);
+ verifyNoRemarksConditionWith(HostStatus.ALLOWED_TO_BE_DOWN, ServiceStatus.NOT_CHECKED, false);
+ verifyNoRemarksConditionWith(HostStatus.ALLOWED_TO_BE_DOWN, ServiceStatus.DOWN, false);
}
- private void verifyUpConditionWith(HostStatus hostStatus, ServiceStatus serviceStatus, boolean expectUp) {
+ private void verifyNoRemarksConditionWith(HostStatus hostStatus, ServiceStatus serviceStatus, boolean expectUp) {
HostName hostName1 = new HostName("host1");
ApplicationInstance applicationInstance =
modelUtils.createApplicationInstance(Arrays.asList(
@@ -187,7 +187,7 @@ public class ApplicationApiImplTest {
List<HostName> upStorageNodes = expectUp ? Arrays.asList(hostName1) : new ArrayList<>();
List<HostName> actualStorageNodes = scopedApi.applicationApi()
- .getUpStorageNodesInGroupInClusterOrder()
+ .getNoRemarksStorageNodesInGroupInClusterOrder()
.stream()
.map(storageNode -> storageNode.hostName())
.collect(Collectors.toList());
diff --git a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/model/ClusterApiImplTest.java b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/model/ClusterApiImplTest.java
index 6d7c79176fb..6bf46052933 100644
--- a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/model/ClusterApiImplTest.java
+++ b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/model/ClusterApiImplTest.java
@@ -1,4 +1,4 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.orchestrator.model;
import com.yahoo.config.provision.Zone;
@@ -49,6 +49,8 @@ import static org.mockito.Mockito.when;
*/
public class ClusterApiImplTest {
+ private static final Zone zone = Zone.defaultZone();
+
private final ApplicationApi applicationApi = mock(ApplicationApi.class);
private final ModelTestUtils modelUtils = new ModelTestUtils();
private final ManualClock clock = new ManualClock(Instant.ofEpochSecond(1600436659));
@@ -107,7 +109,7 @@ public class ClusterApiImplTest {
public void testCfg1SuspensionFailsWithMissingCfg3() {
ClusterApiImpl clusterApi = makeCfg1ClusterApi(ServiceStatus.UP, ServiceStatus.UP);
- HostedVespaClusterPolicy policy = new HostedVespaClusterPolicy(flagSource);
+ HostedVespaClusterPolicy policy = new HostedVespaClusterPolicy(flagSource, zone);
try {
policy.verifyGroupGoingDownIsFine(clusterApi);
@@ -139,7 +141,7 @@ public class ClusterApiImplTest {
ServiceStatus.UP,
ServiceStatus.UP);
- HostedVespaClusterPolicy policy = new HostedVespaClusterPolicy(flagSource);
+ HostedVespaClusterPolicy policy = new HostedVespaClusterPolicy(flagSource, zone);
try {
policy.verifyGroupGoingDownIsFine(clusterApi);
@@ -167,7 +169,7 @@ public class ClusterApiImplTest {
public void testCfg1SuspendsIfDownWithMissingCfg3() throws HostStateChangeDeniedException {
ClusterApiImpl clusterApi = makeCfg1ClusterApi(ServiceStatus.DOWN, ServiceStatus.UP);
- HostedVespaClusterPolicy policy = new HostedVespaClusterPolicy(flagSource);
+ HostedVespaClusterPolicy policy = new HostedVespaClusterPolicy(flagSource, zone);
policy.verifyGroupGoingDownIsFine(clusterApi);
}
@@ -180,7 +182,7 @@ public class ClusterApiImplTest {
ServiceStatus.DOWN,
ServiceStatus.UP);
- HostedVespaClusterPolicy policy = new HostedVespaClusterPolicy(flagSource);
+ HostedVespaClusterPolicy policy = new HostedVespaClusterPolicy(flagSource, zone);
policy.verifyGroupGoingDownIsFine(clusterApi);
}
@@ -189,7 +191,7 @@ public class ClusterApiImplTest {
public void testSingleConfigServerCanSuspend() {
for (var status : EnumSet.of(ServiceStatus.UP, ServiceStatus.DOWN)) {
var clusterApi = makeConfigClusterApi(1, status);
- var policy = new HostedVespaClusterPolicy(flagSource);
+ var policy = new HostedVespaClusterPolicy(flagSource, zone);
try {
policy.verifyGroupGoingDownIsFine(clusterApi);
} catch (HostStateChangeDeniedException e) {
@@ -307,7 +309,6 @@ public class ClusterApiImplTest {
assertTrue(clusterApi.isStorageCluster());
assertEquals(Optional.of(hostName1), clusterApi.storageNodeInGroup().map(storageNode -> storageNode.hostName()));
- assertEquals(Optional.of(hostName1), clusterApi.upStorageNodeInGroup().map(storageNode -> storageNode.hostName()));
}
private ClusterApiImpl makeConfigClusterApi(int clusterSize, ServiceStatus first, ServiceStatus... rest) {
diff --git a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/model/ModelTestUtils.java b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/model/ModelTestUtils.java
index 0f71def5d2e..7cb20350845 100644
--- a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/model/ModelTestUtils.java
+++ b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/model/ModelTestUtils.java
@@ -1,6 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.orchestrator.model;
+import com.yahoo.config.provision.Zone;
import com.yahoo.jdisc.Metric;
import com.yahoo.jdisc.test.TestTimer;
import com.yahoo.test.ManualClock;
@@ -65,7 +66,9 @@ class ModelTestUtils {
mock(Metric.class),
new TestTimer(),
new DummyAntiServiceMonitor());
- private final Orchestrator orchestrator = new OrchestratorImpl(new HostedVespaPolicy(new HostedVespaClusterPolicy(flagSource), clusterControllerClientFactory, applicationApiFactory()),
+ private final Orchestrator orchestrator = new OrchestratorImpl(new HostedVespaPolicy(new HostedVespaClusterPolicy(flagSource, Zone.defaultZone()),
+ clusterControllerClientFactory,
+ applicationApiFactory()),
clusterControllerClientFactory,
statusService,
serviceMonitor,
diff --git a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/policy/HostedVespaClusterPolicyTest.java b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/policy/HostedVespaClusterPolicyTest.java
index 97c1c7ab5de..a100b3efc52 100644
--- a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/policy/HostedVespaClusterPolicyTest.java
+++ b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/policy/HostedVespaClusterPolicyTest.java
@@ -2,6 +2,8 @@
package com.yahoo.vespa.orchestrator.policy;
import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.SystemName;
+import com.yahoo.config.provision.Zone;
import com.yahoo.vespa.applicationmodel.ClusterId;
import com.yahoo.vespa.applicationmodel.HostName;
import com.yahoo.vespa.applicationmodel.ServiceType;
@@ -23,14 +25,17 @@ import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.when;
public class HostedVespaClusterPolicyTest {
- private ApplicationApi applicationApi = mock(ApplicationApi.class);
- private ClusterApi clusterApi = mock(ClusterApi.class);
+
+ private final ApplicationApi applicationApi = mock(ApplicationApi.class);
+ private final ClusterApi clusterApi = mock(ClusterApi.class);
+ private final Zone zone = mock(Zone.class);
private final InMemoryFlagSource flagSource = new InMemoryFlagSource();
- private HostedVespaClusterPolicy policy = spy(new HostedVespaClusterPolicy(flagSource));
+ private final HostedVespaClusterPolicy policy = spy(new HostedVespaClusterPolicy(flagSource, zone));
@Before
public void setUp() {
when(clusterApi.getApplication()).thenReturn(applicationApi);
+ when(zone.system()).thenReturn(SystemName.main);
}
@Test
@@ -72,6 +77,11 @@ public class HostedVespaClusterPolicyTest {
when(clusterApi.isStorageCluster()).thenReturn(false);
assertEquals(ConcurrentSuspensionLimitForCluster.TWENTY_PERCENT,
policy.getConcurrentSuspensionLimit(clusterApi, false));
+
+
+ when(zone.system()).thenReturn(SystemName.cd);
+ assertEquals(ConcurrentSuspensionLimitForCluster.FIFTY_PERCENT,
+ policy.getConcurrentSuspensionLimit(clusterApi, false));
}
@Test
diff --git a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/policy/HostedVespaPolicyTest.java b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/policy/HostedVespaPolicyTest.java
index 6f34817930c..385f7b238af 100644
--- a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/policy/HostedVespaPolicyTest.java
+++ b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/policy/HostedVespaPolicyTest.java
@@ -68,7 +68,7 @@ public class HostedVespaPolicyTest {
when(storageNode1.hostName()).thenReturn(hostName3);
List<StorageNode> upStorageNodes = Arrays.asList(storageNode1, storageNode3);
- when(applicationApi.getUpStorageNodesInGroupInClusterOrder()).thenReturn(upStorageNodes);
+ when(applicationApi.getNoRemarksStorageNodesInGroupInClusterOrder()).thenReturn(upStorageNodes);
// setHostState
List<HostName> noRemarksHostNames = Arrays.asList(hostName1, hostName2, hostName3);
@@ -84,7 +84,7 @@ public class HostedVespaPolicyTest {
order.verify(clusterPolicy).verifyGroupGoingDownIsFine(clusterApi2);
order.verify(clusterPolicy).verifyGroupGoingDownIsFine(clusterApi3);
- order.verify(applicationApi).getUpStorageNodesInGroupInClusterOrder();
+ order.verify(applicationApi).getNoRemarksStorageNodesInGroupInClusterOrder();
order.verify(storageNode1).setNodeState(context, ClusterControllerNodeState.MAINTENANCE);
order.verify(storageNode3).setNodeState(context, ClusterControllerNodeState.MAINTENANCE);
diff --git a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/resources/ApplicationSuspensionRequestHandlerTest.java b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/resources/ApplicationSuspensionRequestHandlerTest.java
index 864516acbc5..76d00943e07 100644
--- a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/resources/ApplicationSuspensionRequestHandlerTest.java
+++ b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/resources/ApplicationSuspensionRequestHandlerTest.java
@@ -2,6 +2,7 @@ package com.yahoo.vespa.orchestrator.resources;// Copyright Verizon Media. Licen
import com.fasterxml.jackson.core.type.TypeReference;
import com.yahoo.cloud.config.ConfigserverConfig;
+import com.yahoo.config.provision.Zone;
import com.yahoo.container.jdisc.HttpRequestBuilder;
import com.yahoo.container.jdisc.HttpResponse;
import com.yahoo.jdisc.core.SystemTimer;
@@ -48,7 +49,8 @@ class ApplicationSuspensionRequestHandlerTest {
new OrchestratorConfig(new OrchestratorConfig.Builder()),
serviceMonitor,
new ConfigserverConfig(new ConfigserverConfig.Builder()),
- new InMemoryFlagSource());
+ new InMemoryFlagSource(),
+ Zone.defaultZone());
var handler = new ApplicationSuspensionRequestHandler(RestApiTestDriver.createHandlerTestContext(), orchestrator);
testDriver = RestApiTestDriver.newBuilder(handler).build();
}
diff --git a/parent/pom.xml b/parent/pom.xml
index 322548194d9..25851a9553e 100644
--- a/parent/pom.xml
+++ b/parent/pom.xml
@@ -467,6 +467,11 @@
<version>1.4</version>
</dependency>
<dependency>
+ <groupId>com.microsoft.onnxruntime</groupId>
+ <artifactId>onnxruntime</artifactId>
+ <version>${onnxruntime.version}</version>
+ </dependency>
+ <dependency>
<groupId>com.optimaize.languagedetector</groupId>
<artifactId>language-detector</artifactId>
<version>0.6</version>
@@ -537,6 +542,18 @@
<version>${prometheus.client.version}</version>
</dependency>
<dependency>
+ <!-- TODO: Try to remove, as this overlaps with javax.activation. -->
+ <groupId>jakarta.activation</groupId>
+ <artifactId>jakarta.activation-api</artifactId>
+ <version>1.2.1</version>
+ </dependency>
+ <dependency>
+ <!-- TODO: Try to remove, as this conflicts with javax.xml.bind:jaxb-api -->
+ <groupId>jakarta.xml.bind</groupId>
+ <artifactId>jakarta.xml.bind-api</artifactId>
+ <version>2.3.2</version>
+ </dependency>
+ <dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.12</version>
@@ -839,7 +856,7 @@
<apache.httpclient5.version>5.0.3</apache.httpclient5.version>
<asm.version>9.1</asm.version>
<!-- Athenz dependencies. Make sure these dependencies match those in Vespa's internal repositories -->
- <athenz.version>1.10.11</athenz.version>
+ <athenz.version>1.10.14</athenz.version>
<jjwt.version>0.11.2</jjwt.version>
<aws.sdk.version>1.11.974</aws.sdk.version>
<!-- WARNING: If you change curator version, you also need to update
@@ -864,10 +881,11 @@
<maven-javadoc-plugin.version>3.0.1</maven-javadoc-plugin.version>
<maven-plugin-tools.version>3.6.0</maven-plugin-tools.version>
<maven-resources-plugin.version>2.7</maven-resources-plugin.version>
- <maven-shade-plugin.version>3.2.1</maven-shade-plugin.version>
+ <maven-shade-plugin.version>3.2.4</maven-shade-plugin.version>
<maven-site-plugin.version>3.3</maven-site-plugin.version>
<maven-source-plugin.version>3.0.1</maven-source-plugin.version>
<prometheus.client.version>0.6.0</prometheus.client.version>
+ <onnxruntime.version>1.7.0</onnxruntime.version>
<protobuf.version>3.11.4</protobuf.version>
<spifly.version>1.3.3</spifly.version>
<surefire.version>2.22.0</surefire.version>
diff --git a/pom.xml b/pom.xml
index d20aac32ba0..c77842a149d 100644
--- a/pom.xml
+++ b/pom.xml
@@ -132,6 +132,7 @@
<module>vespaclient-java</module>
<module>vespa-athenz</module>
<module>vespa-documentgen-plugin</module>
+ <module>vespa-feed-client</module>
<module>vespa-hadoop</module>
<module>vespa-http-client</module>
<module>vespa-maven-plugin</module>
diff --git a/screwdriver.yaml b/screwdriver.yaml
index 3b492f82fa1..1bf5804e0f6 100644
--- a/screwdriver.yaml
+++ b/screwdriver.yaml
@@ -12,7 +12,7 @@ jobs:
screwdriver.cd/cpu: 7
screwdriver.cd/ram: 16
screwdriver.cd/disk: HIGH
- screwdriver.cd/timeout: 600
+ screwdriver.cd/timeout: 60
environment:
USER_SHELL_BIN: bash
@@ -47,14 +47,7 @@ jobs:
ccache -z
- compile: |
- export TRAVIS_REPO_SLUG="vespa-engine/vespa"
- if [[ -z $SD_PULL_REQUEST ]]; then
- export TRAVIS_PULL_REQUEST=false
- else
- export TRAVIS_PULL_REQUEST=$SD_PULL_REQUEST
- fi
-
- travis/travis-build.sh
+ screwdriver/build-vespa.sh
- save-cache: |
if [[ -z "$SD_PULL_REQUEST" ]]; then
diff --git a/travis/travis-build.sh b/screwdriver/build-vespa.sh
index 9537118ae67..4480b33e6f9 100755
--- a/travis/travis-build.sh
+++ b/screwdriver/build-vespa.sh
@@ -17,7 +17,7 @@ ccache --max-size=1600M
ccache --set-config=compression=true
ccache -p
-if ! source $SOURCE_DIR/travis/detect-what-to-build.sh; then
+if ! source $SOURCE_DIR/screwdriver/detect-what-to-build.sh; then
echo "Could not detect what to build."
SHOULD_BUILD=all
fi
diff --git a/travis/detect-what-to-build.sh b/screwdriver/detect-what-to-build.sh
index 47cb8ccfa00..1c4809f7295 100755
--- a/travis/detect-what-to-build.sh
+++ b/screwdriver/detect-what-to-build.sh
@@ -6,18 +6,18 @@ if (( ${#BASH_SOURCE[@]} == 1 )); then
exit 1
fi
-if [[ $TRAVIS_PULL_REQUEST == false ]]; then
+if [[ -z $SD_PULL_REQUEST == false ]]; then
export SHOULD_BUILD=all
return 0
fi
-JSON=$(curl -sLf https://api.github.com/repos/$TRAVIS_REPO_SLUG/pulls/$TRAVIS_PULL_REQUEST)
+JSON=$(curl -sLf https://api.github.com/repos/vespa-engine/vespa/pulls/$SD_PULL_REQUEST)
PR_TITLE=$(jq -re '.title' <<< "$JSON")
-JSON=$(curl -sLf https://api.github.com/repos/$TRAVIS_REPO_SLUG/pulls/$TRAVIS_PULL_REQUEST/commits)
+JSON=$(curl -sLf https://api.github.com/repos/vespa-engine/vespa/pulls/$SD_PULL_REQUEST/commits)
COMMITS=$(jq -re '.[].sha' <<< "$JSON")
-FILES=$(for C in $COMMITS; do JSON=$(curl -sLf https://api.github.com/repos/$TRAVIS_REPO_SLUG/commits/$C); jq -re '.files[].filename' <<< "$JSON"; done)
+FILES=$(for C in $COMMITS; do JSON=$(curl -sLf https://api.github.com/repos/vespa-engine/vespa/commits/$C); jq -re '.files[].filename' <<< "$JSON"; done)
if [[ $PR_TITLE =~ \[run-systemtest\] ]]; then
SHOULD_BUILD=systemtest
diff --git a/searchcommon/src/vespa/searchcommon/common/compaction_strategy.h b/searchcommon/src/vespa/searchcommon/common/compaction_strategy.h
index 1c680d91da7..1d52e4bf659 100644
--- a/searchcommon/src/vespa/searchcommon/common/compaction_strategy.h
+++ b/searchcommon/src/vespa/searchcommon/common/compaction_strategy.h
@@ -16,7 +16,7 @@ private:
double _maxDeadAddressSpaceRatio; // Max ratio of dead address space before compaction
public:
CompactionStrategy() noexcept
- : _maxDeadBytesRatio(0.15),
+ : _maxDeadBytesRatio(0.05),
_maxDeadAddressSpaceRatio(0.2)
{
}
diff --git a/searchcore/src/apps/vespa-feed-bm/vespa_feed_bm.cpp b/searchcore/src/apps/vespa-feed-bm/vespa_feed_bm.cpp
index 4191d9bc442..6e346bcfa60 100644
--- a/searchcore/src/apps/vespa-feed-bm/vespa_feed_bm.cpp
+++ b/searchcore/src/apps/vespa-feed-bm/vespa_feed_bm.cpp
@@ -286,6 +286,7 @@ class BMParams {
bool _use_storage_chain;
bool _use_async_message_handling_on_schedule;
uint32_t _bucket_db_stripe_bits;
+ uint32_t _distributor_stripes;
uint32_t get_start(uint32_t thread_id) const {
return (_documents / _client_threads) * thread_id + std::min(thread_id, _documents % _client_threads);
}
@@ -310,7 +311,8 @@ public:
_use_message_bus(false),
_use_storage_chain(false),
_use_async_message_handling_on_schedule(false),
- _bucket_db_stripe_bits(0)
+ _bucket_db_stripe_bits(0),
+ _distributor_stripes(0)
{
}
BMRange get_range(uint32_t thread_id) const {
@@ -335,6 +337,7 @@ public:
bool get_use_storage_chain() const { return _use_storage_chain; }
bool get_use_async_message_handling_on_schedule() const { return _use_async_message_handling_on_schedule; }
uint32_t get_bucket_db_stripe_bits() const { return _bucket_db_stripe_bits; }
+ uint32_t get_distributor_stripes() const { return _distributor_stripes; }
void set_documents(uint32_t documents_in) { _documents = documents_in; }
void set_max_pending(uint32_t max_pending_in) { _max_pending = max_pending_in; }
void set_client_threads(uint32_t threads_in) { _client_threads = threads_in; }
@@ -355,6 +358,7 @@ public:
void set_use_storage_chain(bool value) { _use_storage_chain = value; }
void set_use_async_message_handling_on_schedule(bool value) { _use_async_message_handling_on_schedule = value; }
void set_bucket_db_stripe_bits(uint32_t value) { _bucket_db_stripe_bits = value; }
+ void set_distributor_stripes(uint32_t value) { _distributor_stripes = value; }
bool check() const;
bool needs_service_layer() const { return _enable_service_layer || _enable_distributor || _use_storage_chain || _use_message_bus || _use_document_api; }
bool needs_distributor() const { return _enable_distributor || _use_document_api; }
@@ -573,6 +577,7 @@ struct MyDistributorConfig : public MyStorageConfig
stor_distributormanager(),
stor_visitordispatcher()
{
+ stor_distributormanager.numDistributorStripes = params.get_distributor_stripes();
}
~MyDistributorConfig();
@@ -1361,8 +1366,9 @@ App::usage()
"USAGE:\n";
std::cerr <<
"vespa-feed-bm\n"
- "[--bucket-db-stripe-bits]\n"
+ "[--bucket-db-stripe-bits bits]\n"
"[--client-threads threads]\n"
+ "[--distributor-stripes stripes]\n"
"[--get-passes get-passes]\n"
"[--indexing-sequencer [latency,throughput,adaptive]]\n"
"[--max-pending max-pending]\n"
@@ -1392,6 +1398,7 @@ App::get_options()
static struct option long_opts[] = {
{ "bucket-db-stripe-bits", 1, nullptr, 0 },
{ "client-threads", 1, nullptr, 0 },
+ { "distributor-stripes", 1, nullptr, 0 },
{ "documents", 1, nullptr, 0 },
{ "enable-distributor", 0, nullptr, 0 },
{ "enable-service-layer", 0, nullptr, 0 },
@@ -1414,6 +1421,7 @@ App::get_options()
enum longopts_enum {
LONGOPT_BUCKET_DB_STRIPE_BITS,
LONGOPT_CLIENT_THREADS,
+ LONGOPT_DISTRIBUTOR_STRIPES,
LONGOPT_DOCUMENTS,
LONGOPT_ENABLE_DISTRIBUTOR,
LONGOPT_ENABLE_SERVICE_LAYER,
@@ -1445,6 +1453,9 @@ App::get_options()
case LONGOPT_CLIENT_THREADS:
_bm_params.set_client_threads(atoi(opt_argument));
break;
+ case LONGOPT_DISTRIBUTOR_STRIPES:
+ _bm_params.set_distributor_stripes(atoi(opt_argument));
+ break;
case LONGOPT_DOCUMENTS:
_bm_params.set_documents(atoi(opt_argument));
break;
diff --git a/searchcore/src/tests/proton/documentdb/documentbucketmover/CMakeLists.txt b/searchcore/src/tests/proton/documentdb/documentbucketmover/CMakeLists.txt
index d11da09b737..1aa0b1c585d 100644
--- a/searchcore/src/tests/proton/documentdb/documentbucketmover/CMakeLists.txt
+++ b/searchcore/src/tests/proton/documentdb/documentbucketmover/CMakeLists.txt
@@ -17,18 +17,6 @@ vespa_add_executable(searchcore_documentbucketmover_test_app TEST
)
vespa_add_test(NAME searchcore_documentbucketmover_test_app COMMAND searchcore_documentbucketmover_test_app)
-vespa_add_executable(searchcore_documentbucketmover_v2_test_app TEST
- SOURCES
- documentbucketmover_v2_test.cpp
- DEPENDS
- searchcore_bucketmover_test
- searchcore_test
- searchcore_server
- searchcore_feedoperation
- GTest::GTest
-)
-vespa_add_test(NAME searchcore_documentbucketmover_v2_test_app COMMAND searchcore_documentbucketmover_v2_test_app)
-
vespa_add_executable(searchcore_scaniterator_test_app TEST
SOURCES
scaniterator_test.cpp
diff --git a/searchcore/src/tests/proton/documentdb/documentbucketmover/documentbucketmover_test.cpp b/searchcore/src/tests/proton/documentdb/documentbucketmover/documentbucketmover_test.cpp
index bb7180dadf1..64398503dfa 100644
--- a/searchcore/src/tests/proton/documentdb/documentbucketmover/documentbucketmover_test.cpp
+++ b/searchcore/src/tests/proton/documentdb/documentbucketmover/documentbucketmover_test.cpp
@@ -2,10 +2,16 @@
#include "bucketmover_common.h"
#include <vespa/searchcore/proton/server/bucketmovejob.h>
+#include <vespa/searchcore/proton/server/executor_thread_service.h>
#include <vespa/searchcore/proton/server/document_db_maintenance_config.h>
+#include <vespa/persistence/dummyimpl/dummy_bucket_executor.h>
+#include <vespa/vespalib/util/threadstackexecutor.h>
+#include <vespa/vespalib/util/lambdatask.h>
#include <vespa/vespalib/gtest/gtest.h>
#include <vespa/log/log.h>
+#include <vespa/searchcore/proton/metrics/documentdb_tagged_metrics.h>
+
LOG_SETUP("document_bucket_mover_test");
using namespace proton;
@@ -16,47 +22,8 @@ using proton::bucketdb::BucketCreateNotifier;
using storage::spi::BucketInfo;
using BlockedReason = IBlockableMaintenanceJob::BlockedReason;
using MoveOperationVector = std::vector<MoveOperation>;
-
-struct MyFrozenBucketHandler : public IFrozenBucketHandler
-{
- std::set<BucketId> _frozen;
- std::set<IBucketFreezeListener *> _listeners;
-
- MyFrozenBucketHandler()
- : IFrozenBucketHandler(),
- _frozen(),
- _listeners()
- {
- }
-
- ~MyFrozenBucketHandler() override {
- assert(_listeners.empty());
- }
-
- MyFrozenBucketHandler &addFrozen(const BucketId &bucket) {
- _frozen.insert(bucket);
- return *this;
- }
- MyFrozenBucketHandler &remFrozen(const BucketId &bucket) {
- _frozen.erase(bucket);
- for (auto &listener : _listeners) {
- listener->notifyThawedBucket(bucket);
- }
- return *this;
- }
- void addListener(IBucketFreezeListener *listener) override {
- _listeners.insert(listener);
- }
- void removeListener(IBucketFreezeListener *listener) override {
- _listeners.erase(listener);
- }
-
- ExclusiveBucketGuard::UP acquireExclusiveBucket(BucketId bucket) override {
- return (_frozen.count(bucket) != 0)
- ? ExclusiveBucketGuard::UP()
- : std::make_unique<ExclusiveBucketGuard>(bucket);
- }
-};
+using storage::spi::dummy::DummyBucketExecutor;
+using vespalib::ThreadStackExecutor;
struct ControllerFixtureBase : public ::testing::Test
{
@@ -66,13 +33,17 @@ struct ControllerFixtureBase : public ::testing::Test
test::BucketHandler _bucketHandler;
MyBucketModifiedHandler _modifiedHandler;
std::shared_ptr<bucketdb::BucketDBOwner> _bucketDB;
- MyMoveHandler _moveHandler;
MySubDb _ready;
MySubDb _notReady;
- MyFrozenBucketHandler _fbh;
BucketCreateNotifier _bucketCreateNotifier;
test::DiskMemUsageNotifier _diskMemUsageNotifier;
- std::shared_ptr<BucketMoveJob> _bmj;
+ MonitoredRefCount _refCount;
+ ThreadStackExecutor _singleExecutor;
+ ExecutorThreadService _master;
+ DummyBucketExecutor _bucketExecutor;
+ MyMoveHandler _moveHandler;
+ DocumentDBTaggedMetrics _metrics;
+ std::shared_ptr<BucketMoveJob> _bmj;
MyCountJobRunner _runner;
ControllerFixtureBase(const BlockableMaintenanceJobConfig &blockableConfig, bool storeMoveDoneContexts);
~ControllerFixtureBase();
@@ -91,15 +62,6 @@ struct ControllerFixtureBase : public ::testing::Test
_clusterStateHandler.notifyClusterStateChanged(_calc);
return *this;
}
- ControllerFixtureBase &addFrozen(const BucketId &bucket) {
- _fbh.addFrozen(bucket);
- return *this;
- }
- ControllerFixtureBase &remFrozen(const BucketId &bucket) {
- _fbh.remFrozen(bucket);
- _bmj->notifyThawedBucket(bucket);
- return *this;
- }
ControllerFixtureBase &activateBucket(const BucketId &bucket) {
_ready.setBucketState(bucket, true);
_bucketHandler.notifyBucketStateChanged(bucket, BucketInfo::ActiveState::ACTIVE);
@@ -110,6 +72,14 @@ struct ControllerFixtureBase : public ::testing::Test
_bucketHandler.notifyBucketStateChanged(bucket, BucketInfo::ActiveState::NOT_ACTIVE);
return *this;
}
+ void failRetrieveForLid(uint32_t lid) {
+ _ready.failRetrieveForLid(lid);
+ _notReady.failRetrieveForLid(lid);
+ }
+ void fixRetriever() {
+ _ready.failRetrieveForLid(0);
+ _notReady.failRetrieveForLid(0);
+ }
const MoveOperationVector &docsMoved() const {
return _moveHandler._moves;
}
@@ -119,10 +89,24 @@ struct ControllerFixtureBase : public ::testing::Test
const BucketId::List &calcAsked() const {
return _calc->asked();
}
+ size_t numPending() {
+ _bmj->updateMetrics(_metrics);
+ return _metrics.bucketMove.bucketsPending.getLast();
+ }
void runLoop() {
while (!_bmj->isBlocked() && !_bmj->run()) {
}
}
+ void sync() {
+ _bucketExecutor.sync();
+ _master.sync();
+ _master.sync(); // Handle that master schedules onto master again
+ }
+ template <typename FunctionType>
+ void masterExecute(FunctionType &&function) {
+ _master.execute(vespalib::makeLambdaTask(std::forward<FunctionType>(function)));
+ _master.sync();
+ }
};
ControllerFixtureBase::ControllerFixtureBase(const BlockableMaintenanceJobConfig &blockableConfig, bool storeMoveDoneContexts)
@@ -131,15 +115,19 @@ ControllerFixtureBase::ControllerFixtureBase(const BlockableMaintenanceJobConfig
_bucketHandler(),
_modifiedHandler(),
_bucketDB(std::make_shared<bucketdb::BucketDBOwner>()),
- _moveHandler(*_bucketDB, storeMoveDoneContexts),
_ready(_builder.getRepo(), _bucketDB, 1, SubDbType::READY),
_notReady(_builder.getRepo(), _bucketDB, 2, SubDbType::NOTREADY),
- _fbh(),
_bucketCreateNotifier(),
_diskMemUsageNotifier(),
- _bmj(std::make_shared<BucketMoveJob>(_calc, _moveHandler, _modifiedHandler, _ready._subDb, _notReady._subDb,
- _fbh, _bucketCreateNotifier, _clusterStateHandler, _bucketHandler,
- _diskMemUsageNotifier, blockableConfig, "test", makeBucketSpace())),
+ _refCount(),
+ _singleExecutor(1, 0x10000),
+ _master(_singleExecutor),
+ _bucketExecutor(4),
+ _moveHandler(*_bucketDB, storeMoveDoneContexts),
+ _metrics("test", 1),
+ _bmj(BucketMoveJob::create(_calc, RetainGuard(_refCount), _moveHandler, _modifiedHandler, _master, _bucketExecutor, _ready._subDb,
+ _notReady._subDb, _bucketCreateNotifier, _clusterStateHandler, _bucketHandler,
+ _diskMemUsageNotifier, blockableConfig, "test", makeBucketSpace())),
_runner(*_bmj)
{
}
@@ -178,11 +166,14 @@ struct OnlyReadyControllerFixture : public ControllerFixtureBase
TEST_F(ControllerFixture, require_that_nothing_is_moved_if_bucket_state_says_so)
{
- EXPECT_FALSE(_bmj->done());
+ EXPECT_TRUE(_bmj->done());
addReady(_ready.bucket(1));
addReady(_ready.bucket(2));
- _bmj->scanAndMove(4, 3);
- EXPECT_TRUE(_bmj->done());
+ _bmj->recompute();
+ masterExecute([this]() {
+ EXPECT_TRUE(_bmj->scanAndMove(4, 3));
+ EXPECT_TRUE(_bmj->done());
+ });
EXPECT_TRUE(docsMoved().empty());
EXPECT_TRUE(bucketsModified().empty());
}
@@ -193,13 +184,22 @@ TEST_F(ControllerFixture, require_that_not_ready_bucket_is_moved_to_ready_if_buc
addReady(_ready.bucket(1));
addReady(_ready.bucket(2));
addReady(_notReady.bucket(4));
- _bmj->scanAndMove(4, 3);
- EXPECT_TRUE(_bmj->done());
+
+ EXPECT_EQ(0, numPending());
+ _bmj->recompute();
+ EXPECT_EQ(1, numPending());
+ masterExecute([this]() {
+ EXPECT_FALSE(_bmj->done());
+ EXPECT_TRUE(_bmj->scanAndMove(4, 3));
+ EXPECT_TRUE(_bmj->done());
+ });
+ sync();
+ EXPECT_EQ(0, numPending());
EXPECT_EQ(3u, docsMoved().size());
assertEqual(_notReady.bucket(4), _notReady.docs(4)[0], 2, 1, docsMoved()[0]);
assertEqual(_notReady.bucket(4), _notReady.docs(4)[1], 2, 1, docsMoved()[1]);
assertEqual(_notReady.bucket(4), _notReady.docs(4)[2], 2, 1, docsMoved()[2]);
- EXPECT_EQ(1u, bucketsModified().size());
+ ASSERT_EQ(1u, bucketsModified().size());
EXPECT_EQ(_notReady.bucket(4), bucketsModified()[0]);
}
@@ -207,8 +207,13 @@ TEST_F(ControllerFixture, require_that_ready_bucket_is_moved_to_not_ready_if_buc
{
// bucket 2 should be moved
addReady(_ready.bucket(1));
- _bmj->scanAndMove(4, 3);
- EXPECT_FALSE(_bmj->done());
+ _bmj->recompute();
+ masterExecute([this]() {
+ EXPECT_FALSE(_bmj->done());
+ EXPECT_TRUE(_bmj->scanAndMove(4, 3));
+ EXPECT_TRUE(_bmj->done());
+ });
+ sync();
EXPECT_EQ(2u, docsMoved().size());
assertEqual(_ready.bucket(2), _ready.docs(2)[0], 1, 2, docsMoved()[0]);
assertEqual(_ready.bucket(2), _ready.docs(2)[1], 1, 2, docsMoved()[1]);
@@ -216,30 +221,33 @@ TEST_F(ControllerFixture, require_that_ready_bucket_is_moved_to_not_ready_if_buc
EXPECT_EQ(_ready.bucket(2), bucketsModified()[0]);
}
-TEST_F(ControllerFixture, require_that_maxBucketsToScan_is_taken_into_consideration_between_not_ready_and_ready_scanning)
+TEST_F(ControllerFixture, require_that_bucket_is_moved_even_with_error)
{
- // bucket 4 should moved (last bucket)
+ // bucket 2 should be moved
addReady(_ready.bucket(1));
- addReady(_ready.bucket(2));
- addReady(_notReady.bucket(4));
-
- // buckets 1, 2, and 3 considered
- _bmj->scanAndMove(3, 3);
+ _bmj->recompute();
+ failRetrieveForLid(5);
+ masterExecute([this]() {
+ EXPECT_FALSE(_bmj->done());
+ EXPECT_TRUE(_bmj->scanAndMove(4, 3));
+ EXPECT_TRUE(_bmj->done());
+ });
+ sync();
EXPECT_FALSE(_bmj->done());
- EXPECT_EQ(0u, docsMoved().size());
- EXPECT_EQ(0u, bucketsModified().size());
-
- // move bucket 4
- _bmj->scanAndMove(1, 4);
- EXPECT_TRUE(_bmj->done());
- EXPECT_EQ(3u, docsMoved().size());
- assertEqual(_notReady.bucket(4), _notReady.docs(4)[0], 2, 1, docsMoved()[0]);
- assertEqual(_notReady.bucket(4), _notReady.docs(4)[1], 2, 1, docsMoved()[1]);
- assertEqual(_notReady.bucket(4), _notReady.docs(4)[2], 2, 1, docsMoved()[2]);
+ fixRetriever();
+ masterExecute([this]() {
+ EXPECT_TRUE(_bmj->scanAndMove(4, 3));
+ EXPECT_TRUE(_bmj->done());
+ });
+ sync();
+ EXPECT_EQ(2u, docsMoved().size());
+ assertEqual(_ready.bucket(2), _ready.docs(2)[0], 1, 2, docsMoved()[0]);
+ assertEqual(_ready.bucket(2), _ready.docs(2)[1], 1, 2, docsMoved()[1]);
EXPECT_EQ(1u, bucketsModified().size());
- EXPECT_EQ(_notReady.bucket(4), bucketsModified()[0]);
+ EXPECT_EQ(_ready.bucket(2), bucketsModified()[0]);
}
+
TEST_F(ControllerFixture, require_that_we_move_buckets_in_several_steps)
{
// bucket 2, 3, and 4 should be moved
@@ -247,467 +255,190 @@ TEST_F(ControllerFixture, require_that_we_move_buckets_in_several_steps)
addReady(_notReady.bucket(3));
addReady(_notReady.bucket(4));
- // consider move bucket 1
- _bmj->scanAndMove(1, 2);
- EXPECT_FALSE(_bmj->done());
- EXPECT_EQ(0u, docsMoved().size());
- EXPECT_EQ(0u, bucketsModified().size());
+ _bmj->recompute();
+ EXPECT_EQ(3, numPending());
+ masterExecute([this]() {
+ EXPECT_FALSE(_bmj->done());
- // move bucket 2, docs 1,2
- _bmj->scanAndMove(1, 2);
- EXPECT_FALSE(_bmj->done());
+ EXPECT_FALSE(_bmj->scanAndMove(1, 2));
+ EXPECT_FALSE(_bmj->done());
+ });
+ sync();
+ EXPECT_EQ(2, numPending());
EXPECT_EQ(2u, docsMoved().size());
- EXPECT_TRUE(assertEqual(_ready.bucket(2), _ready.docs(2)[0], 1, 2, docsMoved()[0]));
- EXPECT_TRUE(assertEqual(_ready.bucket(2), _ready.docs(2)[1], 1, 2, docsMoved()[1]));
- EXPECT_EQ(1u, bucketsModified().size());
- EXPECT_EQ(_ready.bucket(2), bucketsModified()[0]);
- // move bucket 3, docs 1,2
- _bmj->scanAndMove(1, 2);
- EXPECT_FALSE(_bmj->done());
+ masterExecute([this]() {
+ EXPECT_FALSE(_bmj->scanAndMove(1, 2));
+ EXPECT_FALSE(_bmj->done());
+ });
+ sync();
+ EXPECT_EQ(2, numPending());
EXPECT_EQ(4u, docsMoved().size());
- EXPECT_TRUE(assertEqual(_notReady.bucket(3), _notReady.docs(3)[0], 2, 1, docsMoved()[2]));
- EXPECT_TRUE(assertEqual(_notReady.bucket(3), _notReady.docs(3)[1], 2, 1, docsMoved()[3]));
- EXPECT_EQ(2u, bucketsModified().size());
- EXPECT_EQ(_notReady.bucket(3), bucketsModified()[1]);
- // move bucket 4, docs 1,2
- _bmj->scanAndMove(1, 2);
- EXPECT_FALSE(_bmj->done());
+ masterExecute([this]() {
+ EXPECT_FALSE(_bmj->scanAndMove(1, 2));
+ EXPECT_FALSE(_bmj->done());
+ });
+ sync();
+ EXPECT_EQ(1, numPending());
EXPECT_EQ(6u, docsMoved().size());
- EXPECT_TRUE(assertEqual(_notReady.bucket(4), _notReady.docs(4)[0], 2, 1, docsMoved()[4]));
- EXPECT_TRUE(assertEqual(_notReady.bucket(4), _notReady.docs(4)[1], 2, 1, docsMoved()[5]));
- EXPECT_EQ(2u, bucketsModified().size());
// move bucket 4, docs 3
- _bmj->scanAndMove(1, 2);
- EXPECT_TRUE(_bmj->done());
+ masterExecute([this]() {
+ EXPECT_TRUE(_bmj->scanAndMove(1, 2));
+ EXPECT_TRUE(_bmj->done());
+ });
+ sync();
+ EXPECT_EQ(0, numPending());
EXPECT_EQ(7u, docsMoved().size());
- EXPECT_TRUE(assertEqual(_notReady.bucket(4), _notReady.docs(4)[2], 2, 1, docsMoved()[6]));
EXPECT_EQ(3u, bucketsModified().size());
+ EXPECT_EQ(_ready.bucket(2), bucketsModified()[0]);
+ EXPECT_EQ(_notReady.bucket(3), bucketsModified()[1]);
EXPECT_EQ(_notReady.bucket(4), bucketsModified()[2]);
}
-TEST_F(ControllerFixture, require_that_we_can_change_calculator_and_continue_scanning_where_we_left_off)
-{
- // no buckets should move
- // original scan sequence is bucket1, bucket2, bucket3, bucket4
- addReady(_ready.bucket(1));
- addReady(_ready.bucket(2));
-
- // start with bucket2
- _bmj->scanAndMove(1, 0);
- changeCalc();
- _bmj->scanAndMove(5, 0);
- EXPECT_TRUE(_bmj->done());
- EXPECT_EQ(4u, calcAsked().size());
- EXPECT_EQ(_ready.bucket(2), calcAsked()[0]);
- EXPECT_EQ(_notReady.bucket(3), calcAsked()[1]);
- EXPECT_EQ(_notReady.bucket(4), calcAsked()[2]);
- EXPECT_EQ(_ready.bucket(1), calcAsked()[3]);
-
- // start with bucket3
- changeCalc();
- _bmj->scanAndMove(2, 0);
- changeCalc();
- _bmj->scanAndMove(5, 0);
- EXPECT_TRUE(_bmj->done());
- EXPECT_EQ(4u, calcAsked().size());
- EXPECT_EQ(_notReady.bucket(3), calcAsked()[0]);
- EXPECT_EQ(_notReady.bucket(4), calcAsked()[1]);
- EXPECT_EQ(_ready.bucket(1), calcAsked()[2]);
- EXPECT_EQ(_ready.bucket(2), calcAsked()[3]);
-
- // start with bucket4
- changeCalc();
- _bmj->scanAndMove(3, 0);
- changeCalc();
- _bmj->scanAndMove(5, 0);
- EXPECT_TRUE(_bmj->done());
- EXPECT_EQ(4u, calcAsked().size());
- EXPECT_EQ(_notReady.bucket(4), calcAsked()[0]);
- EXPECT_EQ(_ready.bucket(1), calcAsked()[1]);
- EXPECT_EQ(_ready.bucket(2), calcAsked()[2]);
- EXPECT_EQ(_notReady.bucket(3), calcAsked()[3]);
-
- // start with bucket1
- changeCalc();
- _bmj->scanAndMove(5, 0);
- EXPECT_TRUE(_bmj->done());
- EXPECT_EQ(4u, calcAsked().size());
- EXPECT_EQ(_ready.bucket(1), calcAsked()[0]);
- EXPECT_EQ(_ready.bucket(2), calcAsked()[1]);
- EXPECT_EQ(_notReady.bucket(3), calcAsked()[2]);
- EXPECT_EQ(_notReady.bucket(4), calcAsked()[3]);
-
- // change calc in second pass
- changeCalc();
- _bmj->scanAndMove(3, 0);
- changeCalc();
- _bmj->scanAndMove(2, 0);
- EXPECT_FALSE(_bmj->done());
- EXPECT_EQ(2u, calcAsked().size());
- EXPECT_EQ(_notReady.bucket(4), calcAsked()[0]);
- EXPECT_EQ(_ready.bucket(1), calcAsked()[1]);
- changeCalc();
- _bmj->scanAndMove(5, 0);
- EXPECT_TRUE(_bmj->done());
- EXPECT_EQ(4u, calcAsked().size());
- EXPECT_EQ(_ready.bucket(2), calcAsked()[0]);
- EXPECT_EQ(_notReady.bucket(3), calcAsked()[1]);
- EXPECT_EQ(_notReady.bucket(4), calcAsked()[2]);
- EXPECT_EQ(_ready.bucket(1), calcAsked()[3]);
-
- // check 1 bucket at a time, start with bucket2
- changeCalc();
- _bmj->scanAndMove(1, 0);
- changeCalc();
- _bmj->scanAndMove(1, 0);
- EXPECT_FALSE(_bmj->done());
- EXPECT_EQ(1u, calcAsked().size());
- EXPECT_EQ(_ready.bucket(2), calcAsked()[0]);
- _bmj->scanAndMove(1, 0);
- EXPECT_FALSE(_bmj->done());
- EXPECT_EQ(2u, calcAsked().size());
- EXPECT_EQ(_notReady.bucket(3), calcAsked()[1]);
- _bmj->scanAndMove(1, 0);
- EXPECT_FALSE(_bmj->done());
- EXPECT_EQ(3u, calcAsked().size());
- EXPECT_EQ(_notReady.bucket(4), calcAsked()[2]);
- _bmj->scanAndMove(1, 0);
- EXPECT_TRUE(_bmj->done());
- EXPECT_EQ(4u, calcAsked().size());
- EXPECT_EQ(_ready.bucket(1), calcAsked()[3]);
-}
-
-TEST_F(ControllerFixture, require_that_current_bucket_moving_is_cancelled_when_we_change_calculator)
-{
- // bucket 1 should be moved
- addReady(_ready.bucket(2));
- _bmj->scanAndMove(3, 1);
- EXPECT_FALSE(_bmj->done());
- EXPECT_EQ(1u, docsMoved().size());
- EXPECT_EQ(1u, calcAsked().size());
- changeCalc(); // Not cancelled, bucket 1 still moving to notReady
- EXPECT_EQ(1u, calcAsked().size());
- EXPECT_EQ(_ready.bucket(1), calcAsked()[0]);
- _calc->resetAsked();
- _bmj->scanAndMove(2, 1);
- EXPECT_FALSE(_bmj->done());
- EXPECT_EQ(1u, docsMoved().size());
- EXPECT_EQ(0u, calcAsked().size());
- addReady(_ready.bucket(1));
- changeCalc(); // cancelled, bucket 1 no longer moving to notReady
- EXPECT_EQ(1u, calcAsked().size());
- EXPECT_EQ(_ready.bucket(1), calcAsked()[0]);
- _calc->resetAsked();
- remReady(_ready.bucket(1));
- changeCalc(); // not cancelled. No active bucket move
- EXPECT_EQ(0u, calcAsked().size());
- _calc->resetAsked();
- _bmj->scanAndMove(2, 1);
- EXPECT_EQ(0u, docsMoved().size());
- EXPECT_EQ(2u, calcAsked().size());
- EXPECT_EQ(_ready.bucket(2), calcAsked()[0]);
- EXPECT_EQ(_notReady.bucket(3), calcAsked()[1]);
- _bmj->scanAndMove(2, 3);
- EXPECT_TRUE(_bmj->done());
- EXPECT_EQ(3u, docsMoved().size());
- EXPECT_EQ(4u, calcAsked().size());
- EXPECT_EQ(_notReady.bucket(4), calcAsked()[2]);
- EXPECT_EQ(_ready.bucket(1), calcAsked()[3]);
-}
-
TEST_F(ControllerFixture, require_that_last_bucket_is_moved_before_reporting_done)
{
// bucket 4 should be moved
addReady(_ready.bucket(1));
addReady(_ready.bucket(2));
addReady(_notReady.bucket(4));
- _bmj->scanAndMove(4, 1);
- EXPECT_FALSE(_bmj->done());
+ _bmj->recompute();
+ masterExecute([this]() {
+ EXPECT_FALSE(_bmj->done());
+
+ EXPECT_FALSE(_bmj->scanAndMove(1, 1));
+ EXPECT_FALSE(_bmj->done());
+ });
+ sync();
EXPECT_EQ(1u, docsMoved().size());
EXPECT_EQ(4u, calcAsked().size());
- _bmj->scanAndMove(0, 2);
- EXPECT_TRUE(_bmj->done());
+ masterExecute([this]() {
+ EXPECT_TRUE(_bmj->scanAndMove(1, 2));
+ EXPECT_TRUE(_bmj->done());
+ });
+ sync();
EXPECT_EQ(3u, docsMoved().size());
EXPECT_EQ(4u, calcAsked().size());
}
-TEST_F(ControllerFixture, require_that_frozen_bucket_is_not_moved_until_thawed)
-{
- // bucket 1 should be moved but is frozen
- addReady(_ready.bucket(2));
- addFrozen(_ready.bucket(1));
- _bmj->scanAndMove(4, 3); // scan all, delay frozen bucket 1
- remFrozen(_ready.bucket(1));
- EXPECT_FALSE(_bmj->done());
- EXPECT_EQ(0u, docsMoved().size());
- EXPECT_EQ(0u, bucketsModified().size());
- _bmj->scanAndMove(0, 3); // move delayed and thawed bucket 1
- EXPECT_TRUE(_bmj->done());
- EXPECT_EQ(3u, docsMoved().size());
- EXPECT_EQ(1u, bucketsModified().size());
- EXPECT_EQ(_ready.bucket(1), bucketsModified()[0]);
-}
-TEST_F(ControllerFixture, require_that_thawed_bucket_is_moved_before_other_buckets)
-{
- // bucket 2 should be moved but is frozen.
- // bucket 3 & 4 should also be moved
- addReady(_ready.bucket(1));
- addReady(_notReady.bucket(3));
- addReady(_notReady.bucket(4));
- addFrozen(_ready.bucket(2));
- _bmj->scanAndMove(3, 2); // delay bucket 2, move bucket 3
- remFrozen(_ready.bucket(2));
- EXPECT_FALSE(_bmj->done());
- EXPECT_EQ(2u, docsMoved().size());
- EXPECT_EQ(1u, bucketsModified().size());
- EXPECT_EQ(_notReady.bucket(3), bucketsModified()[0]);
- _bmj->scanAndMove(2, 2); // move thawed bucket 2
- EXPECT_FALSE(_bmj->done());
- EXPECT_EQ(4u, docsMoved().size());
- EXPECT_EQ(2u, bucketsModified().size());
- EXPECT_EQ(_ready.bucket(2), bucketsModified()[1]);
- _bmj->scanAndMove(1, 4); // move bucket 4
- EXPECT_TRUE(_bmj->done());
- EXPECT_EQ(7u, docsMoved().size());
- EXPECT_EQ(3u, bucketsModified().size());
- EXPECT_EQ(_notReady.bucket(4), bucketsModified()[2]);
-}
-
-TEST_F(ControllerFixture, require_that_re_frozen_thawed_bucket_is_not_moved_until_re_thawed)
+TEST_F(ControllerFixture, require_that_active_bucket_is_not_moved_from_ready_to_not_ready_until_being_not_active)
{
- // bucket 1 should be moved but is re-frozen
+ // bucket 1 should be moved but is active
addReady(_ready.bucket(2));
- addFrozen(_ready.bucket(1));
- _bmj->scanAndMove(1, 0); // scan, delay frozen bucket 1
- EXPECT_FALSE(_bmj->done());
- EXPECT_EQ(0u, docsMoved().size());
- EXPECT_EQ(0u, bucketsModified().size());
- EXPECT_EQ(1u, calcAsked().size());
- EXPECT_EQ(_ready.bucket(1), calcAsked()[0]);
- remFrozen(_ready.bucket(1));
- addFrozen(_ready.bucket(1));
- _bmj->scanAndMove(1, 0); // scan, but nothing to move
+ _bmj->recompute();
EXPECT_FALSE(_bmj->done());
+ activateBucket(_ready.bucket(1));
+ masterExecute([this]() {
+ EXPECT_TRUE(_bmj->scanAndMove(4, 3)); // scan all, delay active bucket 1
+ EXPECT_TRUE(_bmj->done());
+ });
+ sync();
EXPECT_EQ(0u, docsMoved().size());
EXPECT_EQ(0u, bucketsModified().size());
- EXPECT_EQ(3u, calcAsked().size());
- EXPECT_EQ(_ready.bucket(1), calcAsked()[1]);
- EXPECT_EQ(_ready.bucket(2), calcAsked()[2]);
- remFrozen(_ready.bucket(1));
- _bmj->scanAndMove(3, 4); // move delayed and thawed bucket 1
- EXPECT_FALSE(_bmj->done());
+
+ deactivateBucket(_ready.bucket(1));
+ masterExecute([this]() {
+ EXPECT_FALSE(_bmj->done());
+ EXPECT_TRUE(_bmj->scanAndMove(4, 3)); // move delayed and de-activated bucket 1
+ EXPECT_TRUE(_bmj->done());
+ });
+ sync();
EXPECT_EQ(3u, docsMoved().size());
EXPECT_EQ(1u, bucketsModified().size());
EXPECT_EQ(_ready.bucket(1), bucketsModified()[0]);
- EXPECT_EQ(4u, calcAsked().size());
- EXPECT_EQ(_ready.bucket(1), calcAsked()[3]);
- _bmj->scanAndMove(2, 0); // scan the rest
- EXPECT_TRUE(_bmj->done());
- EXPECT_EQ(3u, docsMoved().size());
- EXPECT_EQ(1u, bucketsModified().size());
- EXPECT_EQ(6u, calcAsked().size());
-}
-
-TEST_F(ControllerFixture, require_that_thawed_bucket_is_not_moved_if_new_calculator_does_not_say_so)
-{
- // bucket 3 should be moved
- addReady(_ready.bucket(1));
- addReady(_ready.bucket(2));
- addReady(_notReady.bucket(3));
- addFrozen(_notReady.bucket(3));
- _bmj->scanAndMove(4, 3); // scan all, delay frozen bucket 3
- remFrozen(_notReady.bucket(3));
- EXPECT_FALSE(_bmj->done());
- EXPECT_EQ(0u, docsMoved().size());
- EXPECT_EQ(0u, bucketsModified().size());
- EXPECT_EQ(4u, calcAsked().size());
- changeCalc();
- remReady(_notReady.bucket(3));
- _bmj->scanAndMove(0, 3); // consider delayed bucket 3
- EXPECT_EQ(0u, docsMoved().size());
- EXPECT_EQ(0u, bucketsModified().size());
- EXPECT_EQ(1u, calcAsked().size());
- EXPECT_EQ(_notReady.bucket(3), calcAsked()[0]);
}
-TEST_F(ControllerFixture, require_that_current_bucket_mover_is_cancelled_if_bucket_is_frozen)
+TEST_F(ControllerFixture, require_that_current_bucket_moving_is_cancelled_when_we_change_calculator)
{
- // bucket 3 should be moved
- addReady(_ready.bucket(1));
+ // bucket 1 should be moved
addReady(_ready.bucket(2));
- addReady(_notReady.bucket(3));
- _bmj->scanAndMove(3, 1); // move 1 doc from bucket 3
- EXPECT_FALSE(_bmj->done());
- EXPECT_EQ(1u, docsMoved().size());
- EXPECT_EQ(0u, bucketsModified().size());
- EXPECT_EQ(3u, calcAsked().size());
- EXPECT_EQ(_ready.bucket(1), calcAsked()[0]);
- EXPECT_EQ(_ready.bucket(2), calcAsked()[1]);
- EXPECT_EQ(_notReady.bucket(3), calcAsked()[2]);
-
- addFrozen(_notReady.bucket(3));
- _bmj->scanAndMove(1, 3); // done scanning
- EXPECT_FALSE(_bmj->done());
- EXPECT_EQ(1u, docsMoved().size());
- EXPECT_EQ(0u, bucketsModified().size());
- EXPECT_EQ(3u, calcAsked().size());
- _bmj->scanAndMove(1, 3); // done scanning
- remFrozen(_notReady.bucket(3));
- EXPECT_FALSE(_bmj->done());
+ masterExecute([this]() {
+ _bmj->recompute();
+ _bmj->scanAndMove(1, 1);
+ EXPECT_FALSE(_bmj->done());
+ });
+ sync();
EXPECT_EQ(1u, docsMoved().size());
- EXPECT_EQ(0u, bucketsModified().size());
EXPECT_EQ(4u, calcAsked().size());
-
- EXPECT_EQ(_notReady.bucket(4), calcAsked()[3]);
- _bmj->scanAndMove(0, 2); // move all docs from bucket 3 again
- EXPECT_TRUE(_bmj->done());
- EXPECT_EQ(3u, docsMoved().size());
- EXPECT_EQ(1u, bucketsModified().size());
- EXPECT_EQ(_notReady.bucket(3), bucketsModified()[0]);
- EXPECT_EQ(5u, calcAsked().size());
- EXPECT_EQ(_notReady.bucket(3), calcAsked()[4]);
-}
-
-TEST_F(ControllerFixture, require_that_current_bucket_mover_is_not_cancelled_if_another_bucket_is_frozen)
-{
- // bucket 3 and 4 should be moved
+ masterExecute([this]() {
+ changeCalc(); // Not cancelled, bucket 1 still moving to notReady
+ EXPECT_EQ(4u, calcAsked().size());
+ EXPECT_EQ(_ready.bucket(1), calcAsked()[0]);
+ _calc->resetAsked();
+ _bmj->scanAndMove(1, 1);
+ EXPECT_FALSE(_bmj->done());
+ });
+ sync();
+ EXPECT_EQ(1u, docsMoved().size());
+ EXPECT_EQ(0u, calcAsked().size());
addReady(_ready.bucket(1));
- addReady(_ready.bucket(2));
- addReady(_notReady.bucket(3));
- addReady(_notReady.bucket(4));
- _bmj->scanAndMove(3, 1); // move 1 doc from bucket 3
- EXPECT_FALSE(_bmj->done());
+ masterExecute([this]() {
+ changeCalc(); // cancelled, bucket 1 no longer moving to notReady
+ EXPECT_EQ(4u, calcAsked().size());
+ EXPECT_EQ(_ready.bucket(1), calcAsked()[0]);
+ _calc->resetAsked();
+ remReady(_ready.bucket(1));
+ _calc->resetAsked();
+ changeCalc(); // not cancelled. No active bucket move
+ EXPECT_EQ(4u, calcAsked().size());
+ _bmj->scanAndMove(1, 1);
+ });
+ sync();
EXPECT_EQ(1u, docsMoved().size());
- EXPECT_EQ(0u, bucketsModified().size());
- EXPECT_EQ(3u, calcAsked().size());
- addFrozen(_notReady.bucket(4));
- _bmj->scanAndMove(1, 2); // move rest of docs from bucket 3
- EXPECT_FALSE(_bmj->done());
- EXPECT_EQ(2u, docsMoved().size());
- EXPECT_EQ(1u, bucketsModified().size());
- EXPECT_EQ(_notReady.bucket(3), bucketsModified()[0]);
- EXPECT_EQ(3u, calcAsked().size());
-}
-
-TEST_F(ControllerFixture, require_that_active_bucket_is_not_moved_from_ready_to_not_ready_until_being_not_active)
-{
- // bucket 1 should be moved but is active
- addReady(_ready.bucket(2));
- activateBucket(_ready.bucket(1));
- _bmj->scanAndMove(4, 3); // scan all, delay active bucket 1
- EXPECT_TRUE(_bmj->done());
- EXPECT_EQ(0u, docsMoved().size());
- EXPECT_EQ(0u, bucketsModified().size());
-
- deactivateBucket(_ready.bucket(1));
- EXPECT_FALSE(_bmj->done());
- _bmj->scanAndMove(0, 3); // move delayed and de-activated bucket 1
+ EXPECT_EQ(4u, calcAsked().size());
+ EXPECT_EQ(_ready.bucket(2), calcAsked()[1]);
+ EXPECT_EQ(_notReady.bucket(3), calcAsked()[2]);
+ masterExecute([this]() {
+ _bmj->scanAndMove(2, 3);
+ });
EXPECT_TRUE(_bmj->done());
+ sync();
EXPECT_EQ(3u, docsMoved().size());
- EXPECT_EQ(1u, bucketsModified().size());
- EXPECT_EQ(_ready.bucket(1), bucketsModified()[0]);
-}
-
-TEST_F(OnlyReadyControllerFixture, require_that_de_activated_bucket_is_moved_before_other_buckets)
-{
- // bucket 1, 2, 3 should be moved (but bucket 1 is active)
- addReady(_ready.bucket(4));
- activateBucket(_ready.bucket(1));
- _bmj->scanAndMove(2, 4); // delay bucket 1, move bucket 2
- EXPECT_FALSE(_bmj->done());
- EXPECT_EQ(2u, docsMoved().size());
- EXPECT_EQ(1u, bucketsModified().size());
- EXPECT_EQ(_ready.bucket(2), bucketsModified()[0]);
-
- deactivateBucket(_ready.bucket(1));
- _bmj->scanAndMove(2, 4); // move de-activated bucket 1
- EXPECT_FALSE(_bmj->done());
- EXPECT_EQ(3u, docsMoved().size());
- EXPECT_EQ(2u, bucketsModified().size());
- EXPECT_EQ(_ready.bucket(1), bucketsModified()[1]);
-
- _bmj->scanAndMove(2, 4); // move bucket 3
- // EXPECT_TRUE(_bmj->done()); // TODO(geirst): fix this
- EXPECT_EQ(6u, docsMoved().size());
- EXPECT_EQ(3u, bucketsModified().size());
- EXPECT_EQ(_ready.bucket(3), bucketsModified()[2]);
+ EXPECT_EQ(4u, calcAsked().size());
+ EXPECT_EQ(_notReady.bucket(4), calcAsked()[3]);
+ EXPECT_EQ(_ready.bucket(1), calcAsked()[0]);
}
TEST_F(ControllerFixture, require_that_de_activated_bucket_is_not_moved_if_new_calculator_does_not_say_so)
{
// bucket 1 should be moved
addReady(_ready.bucket(2));
- activateBucket(_ready.bucket(1));
- _bmj->scanAndMove(4, 3); // scan all, delay active bucket 1
+ _bmj->recompute();
+ masterExecute([this]() {
+ activateBucket(_ready.bucket(1));
+ _bmj->scanAndMove(4, 3); // scan all, delay active bucket 1
+ });
+ sync();
EXPECT_EQ(0u, docsMoved().size());
EXPECT_EQ(0u, bucketsModified().size());
- deactivateBucket(_ready.bucket(1));
- addReady(_ready.bucket(1));
- changeCalc();
- _bmj->scanAndMove(0, 3); // consider delayed bucket 3
+ masterExecute([this]() {
+ deactivateBucket(_ready.bucket(1));
+ addReady(_ready.bucket(1));
+ changeCalc();
+ _bmj->scanAndMove(4, 3); // consider delayed bucket 3
+ });
+ sync();
EXPECT_EQ(0u, docsMoved().size());
EXPECT_EQ(0u, bucketsModified().size());
- EXPECT_EQ(1u, calcAsked().size());
+ EXPECT_EQ(4u, calcAsked().size());
EXPECT_EQ(_ready.bucket(1), calcAsked()[0]);
}
-TEST_F(ControllerFixture, require_that_de_activated_bucket_is_not_moved_if_frozen_as_well)
-{
- // bucket 1 should be moved
- addReady(_ready.bucket(2));
- activateBucket(_ready.bucket(1));
- _bmj->scanAndMove(4, 3); // scan all, delay active bucket 1
- EXPECT_EQ(0u, docsMoved().size());
- EXPECT_EQ(0u, bucketsModified().size());
-
- addFrozen(_ready.bucket(1));
- deactivateBucket(_ready.bucket(1));
- _bmj->scanAndMove(0, 3); // bucket 1 de-activated but frozen
- EXPECT_EQ(0u, docsMoved().size());
- EXPECT_EQ(0u, bucketsModified().size());
-
- remFrozen(_ready.bucket(1));
- _bmj->scanAndMove(0, 3); // handle thawed bucket 1
- EXPECT_EQ(3u, docsMoved().size());
- EXPECT_EQ(1u, bucketsModified().size());
- EXPECT_EQ(_ready.bucket(1), bucketsModified()[0]);
-}
-
-TEST_F(ControllerFixture, require_that_thawed_bucket_is_not_moved_if_active_as_well)
-{
- // bucket 1 should be moved
- addReady(_ready.bucket(2));
- addFrozen(_ready.bucket(1));
- _bmj->scanAndMove(4, 3); // scan all, delay frozen bucket 1
- EXPECT_EQ(0u, docsMoved().size());
- EXPECT_EQ(0u, bucketsModified().size());
-
- activateBucket(_ready.bucket(1));
- remFrozen(_ready.bucket(1));
- _bmj->scanAndMove(0, 3); // bucket 1 thawed but active
- EXPECT_EQ(0u, docsMoved().size());
- EXPECT_EQ(0u, bucketsModified().size());
-
- deactivateBucket(_ready.bucket(1));
- _bmj->scanAndMove(0, 3); // handle de-activated bucket 1
- EXPECT_EQ(3u, docsMoved().size());
- EXPECT_EQ(1u, bucketsModified().size());
- EXPECT_EQ(_ready.bucket(1), bucketsModified()[0]);
-}
-
TEST_F(ControllerFixture, ready_bucket_not_moved_to_not_ready_if_node_is_marked_as_retired)
{
_calc->setNodeRetired(true);
// Bucket 2 would be moved from ready to not ready in a non-retired case, but not when retired.
addReady(_ready.bucket(1));
- _bmj->scanAndMove(4, 3);
- EXPECT_TRUE(_bmj->done());
+ masterExecute([this]() {
+ _bmj->recompute();
+ _bmj->scanAndMove(4, 3);
+ EXPECT_TRUE(_bmj->done());
+ });
+ sync();
EXPECT_EQ(0u, docsMoved().size());
}
@@ -719,8 +450,12 @@ TEST_F(ControllerFixture, inactive_not_ready_bucket_not_moved_to_ready_if_node_i
addReady(_ready.bucket(1));
addReady(_ready.bucket(2));
addReady(_notReady.bucket(3));
- _bmj->scanAndMove(4, 3);
- EXPECT_TRUE(_bmj->done());
+ masterExecute([this]() {
+ _bmj->recompute();
+ _bmj->scanAndMove(4, 3);
+ EXPECT_TRUE(_bmj->done());
+ });
+ sync();
EXPECT_EQ(0u, docsMoved().size());
}
@@ -730,9 +465,13 @@ TEST_F(ControllerFixture, explicitly_active_not_ready_bucket_can_be_moved_to_rea
addReady(_ready.bucket(1));
addReady(_ready.bucket(2));
addReady(_notReady.bucket(3));
- activateBucket(_notReady.bucket(3));
- _bmj->scanAndMove(4, 3);
- EXPECT_FALSE(_bmj->done());
+ _bmj->recompute();
+ masterExecute([this]() {
+ activateBucket(_notReady.bucket(3));
+ _bmj->scanAndMove(4, 3);
+ EXPECT_TRUE(_bmj->done());
+ });
+ sync();
ASSERT_EQ(2u, docsMoved().size());
assertEqual(_notReady.bucket(3), _notReady.docs(3)[0], 2, 1, docsMoved()[0]);
assertEqual(_notReady.bucket(3), _notReady.docs(3)[1], 2, 1, docsMoved()[1]);
@@ -740,25 +479,37 @@ TEST_F(ControllerFixture, explicitly_active_not_ready_bucket_can_be_moved_to_rea
EXPECT_EQ(_notReady.bucket(3), bucketsModified()[0]);
}
+
TEST_F(ControllerFixture, require_that_notifyCreateBucket_causes_bucket_to_be_reconsidered_by_job)
{
- EXPECT_FALSE(_bmj->done());
+ EXPECT_TRUE(_bmj->done());
addReady(_ready.bucket(1));
addReady(_ready.bucket(2));
runLoop();
EXPECT_TRUE(_bmj->done());
+ sync();
EXPECT_TRUE(docsMoved().empty());
EXPECT_TRUE(bucketsModified().empty());
addReady(_notReady.bucket(3)); // bucket 3 now ready, no notify
EXPECT_TRUE(_bmj->done()); // move job still believes work done
- _bmj->notifyCreateBucket(_bucketDB->takeGuard(), _notReady.bucket(3)); // reconsider bucket 3
- EXPECT_FALSE(_bmj->done());
+ sync();
+ EXPECT_TRUE(bucketsModified().empty());
+ masterExecute([this]() {
+ _bmj->notifyCreateBucket(_bucketDB->takeGuard(), _notReady.bucket(3)); // reconsider bucket 3
+ EXPECT_FALSE(_bmj->done());
+ EXPECT_TRUE(bucketsModified().empty());
+ });
+ sync();
+ EXPECT_TRUE(bucketsModified().empty());
runLoop();
EXPECT_TRUE(_bmj->done());
+ sync();
+
EXPECT_EQ(1u, bucketsModified().size());
EXPECT_EQ(2u, docsMoved().size());
}
+
struct ResourceLimitControllerFixture : public ControllerFixture
{
ResourceLimitControllerFixture(double resourceLimitFactor = RESOURCE_LIMIT_FACTOR) :
@@ -768,18 +519,23 @@ struct ResourceLimitControllerFixture : public ControllerFixture
void testJobStopping(DiskMemUsageState blockingUsageState) {
// Bucket 1 should be moved
addReady(_ready.bucket(2));
+ _bmj->recompute();
+ EXPECT_FALSE(_bmj->done());
// Note: This depends on _bmj->run() moving max 1 documents
- EXPECT_TRUE(!_bmj->run());
+ EXPECT_FALSE(_bmj->run());
+ sync();
EXPECT_EQ(1u, docsMoved().size());
EXPECT_EQ(0u, bucketsModified().size());
// Notify that we've over limit
_diskMemUsageNotifier.notify(blockingUsageState);
EXPECT_TRUE(_bmj->run());
+ sync();
EXPECT_EQ(1u, docsMoved().size());
EXPECT_EQ(0u, bucketsModified().size());
// Notify that we've under limit
_diskMemUsageNotifier.notify(DiskMemUsageState());
- EXPECT_TRUE(!_bmj->run());
+ EXPECT_FALSE(_bmj->run());
+ sync();
EXPECT_EQ(2u, docsMoved().size());
EXPECT_EQ(0u, bucketsModified().size());
}
@@ -787,13 +543,17 @@ struct ResourceLimitControllerFixture : public ControllerFixture
void testJobNotStopping(DiskMemUsageState blockingUsageState) {
// Bucket 1 should be moved
addReady(_ready.bucket(2));
+ _bmj->recompute();
+ EXPECT_FALSE(_bmj->done());
// Note: This depends on _bmj->run() moving max 1 documents
- EXPECT_TRUE(!_bmj->run());
+ EXPECT_FALSE(_bmj->run());
+ sync();
EXPECT_EQ(1u, docsMoved().size());
EXPECT_EQ(0u, bucketsModified().size());
// Notify that we've over limit, but not over adjusted limit
_diskMemUsageNotifier.notify(blockingUsageState);
- EXPECT_TRUE(!_bmj->run());
+ EXPECT_FALSE(_bmj->run());
+ sync();
EXPECT_EQ(2u, docsMoved().size());
EXPECT_EQ(0u, bucketsModified().size());
}
@@ -823,13 +583,25 @@ TEST_F(ResourceLimitControllerFixture_1_2, require_that_bucket_move_uses_resourc
testJobNotStopping(DiskMemUsageState(ResourceUsageState(), ResourceUsageState(0.7, 0.8)));
}
-struct MaxOutstandingMoveOpsFixture : public ControllerFixture
+
+struct MaxOutstandingMoveOpsFixture : public ControllerFixtureBase
{
- MaxOutstandingMoveOpsFixture(uint32_t maxOutstandingOps) :
- ControllerFixture(BlockableMaintenanceJobConfig(RESOURCE_LIMIT_FACTOR, maxOutstandingOps))
+ MaxOutstandingMoveOpsFixture(uint32_t maxOutstandingOps)
+ : ControllerFixtureBase(BlockableMaintenanceJobConfig(RESOURCE_LIMIT_FACTOR, maxOutstandingOps), true)
{
- // Bucket 1 should be moved from ready -> notready
- addReady(_ready.bucket(2));
+ _builder.createDocs(1, 1, 2);
+ _builder.createDocs(2, 2, 3);
+ _builder.createDocs(3, 3, 4);
+ _builder.createDocs(4, 4, 5);
+ _ready.insertDocs(_builder.getDocs());
+ _builder.clearDocs();
+ _builder.createDocs(11, 1, 2);
+ _builder.createDocs(12, 2, 3);
+ _builder.createDocs(13, 3, 4);
+ _builder.createDocs(14, 4, 5);
+ _notReady.insertDocs(_builder.getDocs());
+ addReady(_ready.bucket(3));
+ _bmj->recompute();
}
void assertRunToBlocked() {
@@ -867,39 +639,43 @@ struct MaxOutstandingMoveOpsFixture_2 : public MaxOutstandingMoveOpsFixture {
MaxOutstandingMoveOpsFixture_2() : MaxOutstandingMoveOpsFixture(2) {}
};
-TEST_F(MaxOutstandingMoveOpsFixture_1, require_that_bucket_move_job_is_blocked_if_it_has_too_many_outstanding_move_operations__max_1)
+TEST_F(MaxOutstandingMoveOpsFixture_1, require_that_bucket_move_job_is_blocked_if_it_has_too_many_outstanding_move_operations_max_1)
{
assertRunToBlocked();
+ sync();
assertDocsMoved(1, 1);
assertRunToBlocked();
assertDocsMoved(1, 1);
unblockJob(1);
assertRunToBlocked();
+ sync();
assertDocsMoved(2, 1);
unblockJob(2);
assertRunToBlocked();
+ sync();
assertDocsMoved(3, 1);
unblockJob(3);
assertRunToFinished();
+ sync();
assertDocsMoved(3, 0);
}
TEST_F(MaxOutstandingMoveOpsFixture_2, require_that_bucket_move_job_is_blocked_if_it_has_too_many_outstanding_move_operations_max_2)
{
assertRunToNotBlocked();
+ sync();
assertDocsMoved(1, 1);
assertRunToBlocked();
+ sync();
assertDocsMoved(2, 2);
unblockJob(1);
- assertRunToNotBlocked();
- assertDocsMoved(3, 1);
-
assertRunToFinished();
+ sync();
assertDocsMoved(3, 1);
}
diff --git a/searchcore/src/tests/proton/documentdb/documentbucketmover/documentbucketmover_v2_test.cpp b/searchcore/src/tests/proton/documentdb/documentbucketmover/documentbucketmover_v2_test.cpp
deleted file mode 100644
index 8dcad91f69a..00000000000
--- a/searchcore/src/tests/proton/documentdb/documentbucketmover/documentbucketmover_v2_test.cpp
+++ /dev/null
@@ -1,682 +0,0 @@
-// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include "bucketmover_common.h"
-#include <vespa/searchcore/proton/server/bucketmovejobv2.h>
-#include <vespa/searchcore/proton/server/executor_thread_service.h>
-#include <vespa/searchcore/proton/server/document_db_maintenance_config.h>
-#include <vespa/persistence/dummyimpl/dummy_bucket_executor.h>
-#include <vespa/vespalib/util/threadstackexecutor.h>
-#include <vespa/vespalib/util/lambdatask.h>
-#include <vespa/vespalib/gtest/gtest.h>
-
-#include <vespa/log/log.h>
-#include <vespa/searchcore/proton/metrics/documentdb_tagged_metrics.h>
-
-LOG_SETUP("document_bucket_mover_test");
-
-using namespace proton;
-using namespace proton::move::test;
-using document::BucketId;
-using document::test::makeBucketSpace;
-using proton::bucketdb::BucketCreateNotifier;
-using storage::spi::BucketInfo;
-using BlockedReason = IBlockableMaintenanceJob::BlockedReason;
-using MoveOperationVector = std::vector<MoveOperation>;
-using storage::spi::dummy::DummyBucketExecutor;
-using vespalib::ThreadStackExecutor;
-
-struct ControllerFixtureBase : public ::testing::Test
-{
- test::UserDocumentsBuilder _builder;
- test::BucketStateCalculator::SP _calc;
- test::ClusterStateHandler _clusterStateHandler;
- test::BucketHandler _bucketHandler;
- MyBucketModifiedHandler _modifiedHandler;
- std::shared_ptr<bucketdb::BucketDBOwner> _bucketDB;
- MySubDb _ready;
- MySubDb _notReady;
- BucketCreateNotifier _bucketCreateNotifier;
- test::DiskMemUsageNotifier _diskMemUsageNotifier;
- MonitoredRefCount _refCount;
- ThreadStackExecutor _singleExecutor;
- ExecutorThreadService _master;
- DummyBucketExecutor _bucketExecutor;
- MyMoveHandler _moveHandler;
- DocumentDBTaggedMetrics _metrics;
- std::shared_ptr<BucketMoveJobV2> _bmj;
- MyCountJobRunner _runner;
- ControllerFixtureBase(const BlockableMaintenanceJobConfig &blockableConfig, bool storeMoveDoneContexts);
- ~ControllerFixtureBase();
- ControllerFixtureBase &addReady(const BucketId &bucket) {
- _calc->addReady(bucket);
- return *this;
- }
- ControllerFixtureBase &remReady(const BucketId &bucket) {
- _calc->remReady(bucket);
- return *this;
- }
- ControllerFixtureBase &changeCalc() {
- _calc->resetAsked();
- _moveHandler.reset();
- _modifiedHandler.reset();
- _clusterStateHandler.notifyClusterStateChanged(_calc);
- return *this;
- }
- ControllerFixtureBase &activateBucket(const BucketId &bucket) {
- _ready.setBucketState(bucket, true);
- _bucketHandler.notifyBucketStateChanged(bucket, BucketInfo::ActiveState::ACTIVE);
- return *this;
- }
- ControllerFixtureBase &deactivateBucket(const BucketId &bucket) {
- _ready.setBucketState(bucket, false);
- _bucketHandler.notifyBucketStateChanged(bucket, BucketInfo::ActiveState::NOT_ACTIVE);
- return *this;
- }
- void failRetrieveForLid(uint32_t lid) {
- _ready.failRetrieveForLid(lid);
- _notReady.failRetrieveForLid(lid);
- }
- void fixRetriever() {
- _ready.failRetrieveForLid(0);
- _notReady.failRetrieveForLid(0);
- }
- const MoveOperationVector &docsMoved() const {
- return _moveHandler._moves;
- }
- const std::vector<BucketId> &bucketsModified() const {
- return _modifiedHandler._modified;
- }
- const BucketId::List &calcAsked() const {
- return _calc->asked();
- }
- size_t numPending() {
- _bmj->updateMetrics(_metrics);
- return _metrics.bucketMove.bucketsPending.getLast();
- }
- void runLoop() {
- while (!_bmj->isBlocked() && !_bmj->run()) {
- }
- }
- void sync() {
- _bucketExecutor.sync();
- _master.sync();
- _master.sync(); // Handle that master schedules onto master again
- }
- template <typename FunctionType>
- void masterExecute(FunctionType &&function) {
- _master.execute(vespalib::makeLambdaTask(std::forward<FunctionType>(function)));
- _master.sync();
- }
-};
-
-ControllerFixtureBase::ControllerFixtureBase(const BlockableMaintenanceJobConfig &blockableConfig, bool storeMoveDoneContexts)
- : _builder(),
- _calc(std::make_shared<test::BucketStateCalculator>()),
- _bucketHandler(),
- _modifiedHandler(),
- _bucketDB(std::make_shared<bucketdb::BucketDBOwner>()),
- _ready(_builder.getRepo(), _bucketDB, 1, SubDbType::READY),
- _notReady(_builder.getRepo(), _bucketDB, 2, SubDbType::NOTREADY),
- _bucketCreateNotifier(),
- _diskMemUsageNotifier(),
- _refCount(),
- _singleExecutor(1, 0x10000),
- _master(_singleExecutor),
- _bucketExecutor(4),
- _moveHandler(*_bucketDB, storeMoveDoneContexts),
- _metrics("test", 1),
- _bmj(BucketMoveJobV2::create(_calc, RetainGuard(_refCount), _moveHandler, _modifiedHandler, _master, _bucketExecutor, _ready._subDb,
- _notReady._subDb, _bucketCreateNotifier,_clusterStateHandler, _bucketHandler,
- _diskMemUsageNotifier, blockableConfig, "test", makeBucketSpace())),
- _runner(*_bmj)
-{
-}
-
-ControllerFixtureBase::~ControllerFixtureBase() = default;
-constexpr double RESOURCE_LIMIT_FACTOR = 1.0;
-constexpr uint32_t MAX_OUTSTANDING_OPS = 10;
-const BlockableMaintenanceJobConfig BLOCKABLE_CONFIG(RESOURCE_LIMIT_FACTOR, MAX_OUTSTANDING_OPS);
-
-struct ControllerFixture : public ControllerFixtureBase
-{
- ControllerFixture(const BlockableMaintenanceJobConfig &blockableConfig = BLOCKABLE_CONFIG)
- : ControllerFixtureBase(blockableConfig, blockableConfig.getMaxOutstandingMoveOps() != MAX_OUTSTANDING_OPS)
- {
- _builder.createDocs(1, 1, 4); // 3 docs
- _builder.createDocs(2, 4, 6); // 2 docs
- _ready.insertDocs(_builder.getDocs());
- _builder.clearDocs();
- _builder.createDocs(3, 1, 3); // 2 docs
- _builder.createDocs(4, 3, 6); // 3 docs
- _notReady.insertDocs(_builder.getDocs());
- }
-};
-
-struct OnlyReadyControllerFixture : public ControllerFixtureBase
-{
- OnlyReadyControllerFixture() : ControllerFixtureBase(BLOCKABLE_CONFIG, false)
- {
- _builder.createDocs(1, 1, 2); // 1 docs
- _builder.createDocs(2, 2, 4); // 2 docs
- _builder.createDocs(3, 4, 7); // 3 docs
- _builder.createDocs(4, 7, 11); // 4 docs
- _ready.insertDocs(_builder.getDocs());
- }
-};
-
-TEST_F(ControllerFixture, require_that_nothing_is_moved_if_bucket_state_says_so)
-{
- EXPECT_TRUE(_bmj->done());
- addReady(_ready.bucket(1));
- addReady(_ready.bucket(2));
- _bmj->recompute();
- masterExecute([this]() {
- EXPECT_TRUE(_bmj->scanAndMove(4, 3));
- EXPECT_TRUE(_bmj->done());
- });
- EXPECT_TRUE(docsMoved().empty());
- EXPECT_TRUE(bucketsModified().empty());
-}
-
-TEST_F(ControllerFixture, require_that_not_ready_bucket_is_moved_to_ready_if_bucket_state_says_so)
-{
- // bucket 4 should be moved
- addReady(_ready.bucket(1));
- addReady(_ready.bucket(2));
- addReady(_notReady.bucket(4));
-
- EXPECT_EQ(0, numPending());
- _bmj->recompute();
- EXPECT_EQ(1, numPending());
- masterExecute([this]() {
- EXPECT_FALSE(_bmj->done());
- EXPECT_TRUE(_bmj->scanAndMove(4, 3));
- EXPECT_TRUE(_bmj->done());
- });
- sync();
- EXPECT_EQ(0, numPending());
- EXPECT_EQ(3u, docsMoved().size());
- assertEqual(_notReady.bucket(4), _notReady.docs(4)[0], 2, 1, docsMoved()[0]);
- assertEqual(_notReady.bucket(4), _notReady.docs(4)[1], 2, 1, docsMoved()[1]);
- assertEqual(_notReady.bucket(4), _notReady.docs(4)[2], 2, 1, docsMoved()[2]);
- ASSERT_EQ(1u, bucketsModified().size());
- EXPECT_EQ(_notReady.bucket(4), bucketsModified()[0]);
-}
-
-TEST_F(ControllerFixture, require_that_ready_bucket_is_moved_to_not_ready_if_bucket_state_says_so)
-{
- // bucket 2 should be moved
- addReady(_ready.bucket(1));
- _bmj->recompute();
- masterExecute([this]() {
- EXPECT_FALSE(_bmj->done());
- EXPECT_TRUE(_bmj->scanAndMove(4, 3));
- EXPECT_TRUE(_bmj->done());
- });
- sync();
- EXPECT_EQ(2u, docsMoved().size());
- assertEqual(_ready.bucket(2), _ready.docs(2)[0], 1, 2, docsMoved()[0]);
- assertEqual(_ready.bucket(2), _ready.docs(2)[1], 1, 2, docsMoved()[1]);
- EXPECT_EQ(1u, bucketsModified().size());
- EXPECT_EQ(_ready.bucket(2), bucketsModified()[0]);
-}
-
-TEST_F(ControllerFixture, require_that_bucket_is_moved_even_with_error)
-{
- // bucket 2 should be moved
- addReady(_ready.bucket(1));
- _bmj->recompute();
- failRetrieveForLid(5);
- masterExecute([this]() {
- EXPECT_FALSE(_bmj->done());
- EXPECT_TRUE(_bmj->scanAndMove(4, 3));
- EXPECT_TRUE(_bmj->done());
- });
- sync();
- EXPECT_FALSE(_bmj->done());
- fixRetriever();
- masterExecute([this]() {
- EXPECT_TRUE(_bmj->scanAndMove(4, 3));
- EXPECT_TRUE(_bmj->done());
- });
- sync();
- EXPECT_EQ(2u, docsMoved().size());
- assertEqual(_ready.bucket(2), _ready.docs(2)[0], 1, 2, docsMoved()[0]);
- assertEqual(_ready.bucket(2), _ready.docs(2)[1], 1, 2, docsMoved()[1]);
- EXPECT_EQ(1u, bucketsModified().size());
- EXPECT_EQ(_ready.bucket(2), bucketsModified()[0]);
-}
-
-
-TEST_F(ControllerFixture, require_that_we_move_buckets_in_several_steps)
-{
- // bucket 2, 3, and 4 should be moved
- addReady(_ready.bucket(1));
- addReady(_notReady.bucket(3));
- addReady(_notReady.bucket(4));
-
- _bmj->recompute();
- EXPECT_EQ(3, numPending());
- masterExecute([this]() {
- EXPECT_FALSE(_bmj->done());
-
- EXPECT_FALSE(_bmj->scanAndMove(1, 2));
- EXPECT_FALSE(_bmj->done());
- });
- sync();
- EXPECT_EQ(2, numPending());
- EXPECT_EQ(2u, docsMoved().size());
-
- masterExecute([this]() {
- EXPECT_FALSE(_bmj->scanAndMove(1, 2));
- EXPECT_FALSE(_bmj->done());
- });
- sync();
- EXPECT_EQ(2, numPending());
- EXPECT_EQ(4u, docsMoved().size());
-
- masterExecute([this]() {
- EXPECT_FALSE(_bmj->scanAndMove(1, 2));
- EXPECT_FALSE(_bmj->done());
- });
- sync();
- EXPECT_EQ(1, numPending());
- EXPECT_EQ(6u, docsMoved().size());
-
- // move bucket 4, docs 3
- masterExecute([this]() {
- EXPECT_TRUE(_bmj->scanAndMove(1, 2));
- EXPECT_TRUE(_bmj->done());
- });
- sync();
- EXPECT_EQ(0, numPending());
- EXPECT_EQ(7u, docsMoved().size());
- EXPECT_EQ(3u, bucketsModified().size());
- EXPECT_EQ(_ready.bucket(2), bucketsModified()[0]);
- EXPECT_EQ(_notReady.bucket(3), bucketsModified()[1]);
- EXPECT_EQ(_notReady.bucket(4), bucketsModified()[2]);
-}
-
-TEST_F(ControllerFixture, require_that_last_bucket_is_moved_before_reporting_done)
-{
- // bucket 4 should be moved
- addReady(_ready.bucket(1));
- addReady(_ready.bucket(2));
- addReady(_notReady.bucket(4));
- _bmj->recompute();
- masterExecute([this]() {
- EXPECT_FALSE(_bmj->done());
-
- EXPECT_FALSE(_bmj->scanAndMove(1, 1));
- EXPECT_FALSE(_bmj->done());
- });
- sync();
- EXPECT_EQ(1u, docsMoved().size());
- EXPECT_EQ(4u, calcAsked().size());
- masterExecute([this]() {
- EXPECT_TRUE(_bmj->scanAndMove(1, 2));
- EXPECT_TRUE(_bmj->done());
- });
- sync();
- EXPECT_EQ(3u, docsMoved().size());
- EXPECT_EQ(4u, calcAsked().size());
-}
-
-
-TEST_F(ControllerFixture, require_that_active_bucket_is_not_moved_from_ready_to_not_ready_until_being_not_active)
-{
- // bucket 1 should be moved but is active
- addReady(_ready.bucket(2));
- _bmj->recompute();
- EXPECT_FALSE(_bmj->done());
- activateBucket(_ready.bucket(1));
- masterExecute([this]() {
- EXPECT_TRUE(_bmj->scanAndMove(4, 3)); // scan all, delay active bucket 1
- EXPECT_TRUE(_bmj->done());
- });
- sync();
- EXPECT_EQ(0u, docsMoved().size());
- EXPECT_EQ(0u, bucketsModified().size());
-
- deactivateBucket(_ready.bucket(1));
- masterExecute([this]() {
- EXPECT_FALSE(_bmj->done());
- EXPECT_TRUE(_bmj->scanAndMove(4, 3)); // move delayed and de-activated bucket 1
- EXPECT_TRUE(_bmj->done());
- });
- sync();
- EXPECT_EQ(3u, docsMoved().size());
- EXPECT_EQ(1u, bucketsModified().size());
- EXPECT_EQ(_ready.bucket(1), bucketsModified()[0]);
-}
-
-TEST_F(ControllerFixture, require_that_current_bucket_moving_is_cancelled_when_we_change_calculator)
-{
- // bucket 1 should be moved
- addReady(_ready.bucket(2));
-
- masterExecute([this]() {
- _bmj->recompute();
- _bmj->scanAndMove(1, 1);
- EXPECT_FALSE(_bmj->done());
- });
- sync();
- EXPECT_EQ(1u, docsMoved().size());
- EXPECT_EQ(4u, calcAsked().size());
- masterExecute([this]() {
- changeCalc(); // Not cancelled, bucket 1 still moving to notReady
- EXPECT_EQ(4u, calcAsked().size());
- EXPECT_EQ(_ready.bucket(1), calcAsked()[0]);
- _calc->resetAsked();
- _bmj->scanAndMove(1, 1);
- EXPECT_FALSE(_bmj->done());
- });
- sync();
- EXPECT_EQ(1u, docsMoved().size());
- EXPECT_EQ(0u, calcAsked().size());
- addReady(_ready.bucket(1));
- masterExecute([this]() {
- changeCalc(); // cancelled, bucket 1 no longer moving to notReady
- EXPECT_EQ(4u, calcAsked().size());
- EXPECT_EQ(_ready.bucket(1), calcAsked()[0]);
- _calc->resetAsked();
- remReady(_ready.bucket(1));
- _calc->resetAsked();
- changeCalc(); // not cancelled. No active bucket move
- EXPECT_EQ(4u, calcAsked().size());
- _bmj->scanAndMove(1, 1);
- });
- sync();
- EXPECT_EQ(1u, docsMoved().size());
- EXPECT_EQ(4u, calcAsked().size());
- EXPECT_EQ(_ready.bucket(2), calcAsked()[1]);
- EXPECT_EQ(_notReady.bucket(3), calcAsked()[2]);
- masterExecute([this]() {
- _bmj->scanAndMove(2, 3);
- });
- EXPECT_TRUE(_bmj->done());
- sync();
- EXPECT_EQ(3u, docsMoved().size());
- EXPECT_EQ(4u, calcAsked().size());
- EXPECT_EQ(_notReady.bucket(4), calcAsked()[3]);
- EXPECT_EQ(_ready.bucket(1), calcAsked()[0]);
-}
-
-TEST_F(ControllerFixture, require_that_de_activated_bucket_is_not_moved_if_new_calculator_does_not_say_so)
-{
- // bucket 1 should be moved
- addReady(_ready.bucket(2));
- _bmj->recompute();
- masterExecute([this]() {
- activateBucket(_ready.bucket(1));
- _bmj->scanAndMove(4, 3); // scan all, delay active bucket 1
- });
- sync();
- EXPECT_EQ(0u, docsMoved().size());
- EXPECT_EQ(0u, bucketsModified().size());
-
- masterExecute([this]() {
- deactivateBucket(_ready.bucket(1));
- addReady(_ready.bucket(1));
- changeCalc();
- _bmj->scanAndMove(4, 3); // consider delayed bucket 3
- });
- sync();
- EXPECT_EQ(0u, docsMoved().size());
- EXPECT_EQ(0u, bucketsModified().size());
- EXPECT_EQ(4u, calcAsked().size());
- EXPECT_EQ(_ready.bucket(1), calcAsked()[0]);
-}
-
-TEST_F(ControllerFixture, ready_bucket_not_moved_to_not_ready_if_node_is_marked_as_retired)
-{
- _calc->setNodeRetired(true);
- // Bucket 2 would be moved from ready to not ready in a non-retired case, but not when retired.
- addReady(_ready.bucket(1));
- masterExecute([this]() {
- _bmj->recompute();
- _bmj->scanAndMove(4, 3);
- EXPECT_TRUE(_bmj->done());
- });
- sync();
- EXPECT_EQ(0u, docsMoved().size());
-}
-
-// Technically this should never happen since a retired node is never in the ideal state,
-// but test this case for the sake of completion.
-TEST_F(ControllerFixture, inactive_not_ready_bucket_not_moved_to_ready_if_node_is_marked_as_retired)
-{
- _calc->setNodeRetired(true);
- addReady(_ready.bucket(1));
- addReady(_ready.bucket(2));
- addReady(_notReady.bucket(3));
- masterExecute([this]() {
- _bmj->recompute();
- _bmj->scanAndMove(4, 3);
- EXPECT_TRUE(_bmj->done());
- });
- sync();
- EXPECT_EQ(0u, docsMoved().size());
-}
-
-TEST_F(ControllerFixture, explicitly_active_not_ready_bucket_can_be_moved_to_ready_even_if_node_is_marked_as_retired)
-{
- _calc->setNodeRetired(true);
- addReady(_ready.bucket(1));
- addReady(_ready.bucket(2));
- addReady(_notReady.bucket(3));
- _bmj->recompute();
- masterExecute([this]() {
- activateBucket(_notReady.bucket(3));
- _bmj->scanAndMove(4, 3);
- EXPECT_TRUE(_bmj->done());
- });
- sync();
- ASSERT_EQ(2u, docsMoved().size());
- assertEqual(_notReady.bucket(3), _notReady.docs(3)[0], 2, 1, docsMoved()[0]);
- assertEqual(_notReady.bucket(3), _notReady.docs(3)[1], 2, 1, docsMoved()[1]);
- ASSERT_EQ(1u, bucketsModified().size());
- EXPECT_EQ(_notReady.bucket(3), bucketsModified()[0]);
-}
-
-
-TEST_F(ControllerFixture, require_that_notifyCreateBucket_causes_bucket_to_be_reconsidered_by_job)
-{
- EXPECT_TRUE(_bmj->done());
- addReady(_ready.bucket(1));
- addReady(_ready.bucket(2));
- runLoop();
- EXPECT_TRUE(_bmj->done());
- sync();
- EXPECT_TRUE(docsMoved().empty());
- EXPECT_TRUE(bucketsModified().empty());
- addReady(_notReady.bucket(3)); // bucket 3 now ready, no notify
- EXPECT_TRUE(_bmj->done()); // move job still believes work done
- sync();
- EXPECT_TRUE(bucketsModified().empty());
- masterExecute([this]() {
- _bmj->notifyCreateBucket(_bucketDB->takeGuard(), _notReady.bucket(3)); // reconsider bucket 3
- EXPECT_FALSE(_bmj->done());
- EXPECT_TRUE(bucketsModified().empty());
- });
- sync();
- EXPECT_TRUE(bucketsModified().empty());
- runLoop();
- EXPECT_TRUE(_bmj->done());
- sync();
-
- EXPECT_EQ(1u, bucketsModified().size());
- EXPECT_EQ(2u, docsMoved().size());
-}
-
-
-struct ResourceLimitControllerFixture : public ControllerFixture
-{
- ResourceLimitControllerFixture(double resourceLimitFactor = RESOURCE_LIMIT_FACTOR) :
- ControllerFixture(BlockableMaintenanceJobConfig(resourceLimitFactor, MAX_OUTSTANDING_OPS))
- {}
-
- void testJobStopping(DiskMemUsageState blockingUsageState) {
- // Bucket 1 should be moved
- addReady(_ready.bucket(2));
- _bmj->recompute();
- EXPECT_FALSE(_bmj->done());
- // Note: This depends on _bmj->run() moving max 1 documents
- EXPECT_FALSE(_bmj->run());
- sync();
- EXPECT_EQ(1u, docsMoved().size());
- EXPECT_EQ(0u, bucketsModified().size());
- // Notify that we've over limit
- _diskMemUsageNotifier.notify(blockingUsageState);
- EXPECT_TRUE(_bmj->run());
- sync();
- EXPECT_EQ(1u, docsMoved().size());
- EXPECT_EQ(0u, bucketsModified().size());
- // Notify that we've under limit
- _diskMemUsageNotifier.notify(DiskMemUsageState());
- EXPECT_FALSE(_bmj->run());
- sync();
- EXPECT_EQ(2u, docsMoved().size());
- EXPECT_EQ(0u, bucketsModified().size());
- }
-
- void testJobNotStopping(DiskMemUsageState blockingUsageState) {
- // Bucket 1 should be moved
- addReady(_ready.bucket(2));
- _bmj->recompute();
- EXPECT_FALSE(_bmj->done());
- // Note: This depends on _bmj->run() moving max 1 documents
- EXPECT_FALSE(_bmj->run());
- sync();
- EXPECT_EQ(1u, docsMoved().size());
- EXPECT_EQ(0u, bucketsModified().size());
- // Notify that we've over limit, but not over adjusted limit
- _diskMemUsageNotifier.notify(blockingUsageState);
- EXPECT_FALSE(_bmj->run());
- sync();
- EXPECT_EQ(2u, docsMoved().size());
- EXPECT_EQ(0u, bucketsModified().size());
- }
-};
-
-struct ResourceLimitControllerFixture_1_2 : public ResourceLimitControllerFixture {
- ResourceLimitControllerFixture_1_2() : ResourceLimitControllerFixture(1.2) {}
-};
-
-TEST_F(ResourceLimitControllerFixture, require_that_bucket_move_stops_when_disk_limit_is_reached)
-{
- testJobStopping(DiskMemUsageState(ResourceUsageState(0.7, 0.8), ResourceUsageState()));
-}
-
-TEST_F(ResourceLimitControllerFixture, require_that_bucket_move_stops_when_memory_limit_is_reached)
-{
- testJobStopping(DiskMemUsageState(ResourceUsageState(), ResourceUsageState(0.7, 0.8)));
-}
-
-TEST_F(ResourceLimitControllerFixture_1_2, require_that_bucket_move_uses_resource_limit_factor_for_disk_resource_limit)
-{
- testJobNotStopping(DiskMemUsageState(ResourceUsageState(0.7, 0.8), ResourceUsageState()));
-}
-
-TEST_F(ResourceLimitControllerFixture_1_2, require_that_bucket_move_uses_resource_limit_factor_for_memory_resource_limit)
-{
- testJobNotStopping(DiskMemUsageState(ResourceUsageState(), ResourceUsageState(0.7, 0.8)));
-}
-
-
-struct MaxOutstandingMoveOpsFixture : public ControllerFixtureBase
-{
- MaxOutstandingMoveOpsFixture(uint32_t maxOutstandingOps)
- : ControllerFixtureBase(BlockableMaintenanceJobConfig(RESOURCE_LIMIT_FACTOR, maxOutstandingOps), true)
- {
- _builder.createDocs(1, 1, 2);
- _builder.createDocs(2, 2, 3);
- _builder.createDocs(3, 3, 4);
- _builder.createDocs(4, 4, 5);
- _ready.insertDocs(_builder.getDocs());
- _builder.clearDocs();
- _builder.createDocs(11, 1, 2);
- _builder.createDocs(12, 2, 3);
- _builder.createDocs(13, 3, 4);
- _builder.createDocs(14, 4, 5);
- _notReady.insertDocs(_builder.getDocs());
- addReady(_ready.bucket(3));
- _bmj->recompute();
- }
-
- void assertRunToBlocked() {
- EXPECT_TRUE(_bmj->run()); // job becomes blocked as max outstanding limit is reached
- EXPECT_FALSE(_bmj->done());
- EXPECT_TRUE(_bmj->isBlocked());
- EXPECT_TRUE(_bmj->isBlocked(BlockedReason::OUTSTANDING_OPS));
- }
- void assertRunToNotBlocked() {
- EXPECT_FALSE(_bmj->run());
- EXPECT_FALSE(_bmj->done());
- EXPECT_FALSE(_bmj->isBlocked());
- }
- void assertRunToFinished() {
- EXPECT_TRUE(_bmj->run());
- EXPECT_TRUE(_bmj->done());
- EXPECT_FALSE(_bmj->isBlocked());
- }
- void assertDocsMoved(uint32_t expDocsMovedCnt, uint32_t expMoveContextsCnt) {
- EXPECT_EQ(expDocsMovedCnt, docsMoved().size());
- EXPECT_EQ(expMoveContextsCnt, _moveHandler._moveDoneContexts.size());
- }
- void unblockJob(uint32_t expRunnerCnt) {
- _moveHandler.clearMoveDoneContexts(); // unblocks job and try to execute it via runner
- EXPECT_EQ(expRunnerCnt, _runner.runCount);
- EXPECT_FALSE(_bmj->isBlocked());
- }
-};
-
-struct MaxOutstandingMoveOpsFixture_1 : public MaxOutstandingMoveOpsFixture {
- MaxOutstandingMoveOpsFixture_1() : MaxOutstandingMoveOpsFixture(1) {}
-};
-
-struct MaxOutstandingMoveOpsFixture_2 : public MaxOutstandingMoveOpsFixture {
- MaxOutstandingMoveOpsFixture_2() : MaxOutstandingMoveOpsFixture(2) {}
-};
-
-TEST_F(MaxOutstandingMoveOpsFixture_1, require_that_bucket_move_job_is_blocked_if_it_has_too_many_outstanding_move_operations_max_1)
-{
- assertRunToBlocked();
- sync();
- assertDocsMoved(1, 1);
- assertRunToBlocked();
- assertDocsMoved(1, 1);
-
- unblockJob(1);
- assertRunToBlocked();
- sync();
- assertDocsMoved(2, 1);
-
- unblockJob(2);
- assertRunToBlocked();
- sync();
- assertDocsMoved(3, 1);
-
- unblockJob(3);
- assertRunToFinished();
- sync();
- assertDocsMoved(3, 0);
-}
-
-TEST_F(MaxOutstandingMoveOpsFixture_2, require_that_bucket_move_job_is_blocked_if_it_has_too_many_outstanding_move_operations_max_2)
-{
- assertRunToNotBlocked();
- sync();
- assertDocsMoved(1, 1);
-
- assertRunToBlocked();
- sync();
- assertDocsMoved(2, 2);
-
- unblockJob(1);
- assertRunToFinished();
- sync();
- assertDocsMoved(3, 1);
-}
-
-GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/searchcore/src/tests/proton/documentdb/job_tracked_maintenance_job/job_tracked_maintenance_job_test.cpp b/searchcore/src/tests/proton/documentdb/job_tracked_maintenance_job/job_tracked_maintenance_job_test.cpp
index 4287a6b262a..8862a6d3bb2 100644
--- a/searchcore/src/tests/proton/documentdb/job_tracked_maintenance_job/job_tracked_maintenance_job_test.cpp
+++ b/searchcore/src/tests/proton/documentdb/job_tracked_maintenance_job/job_tracked_maintenance_job_test.cpp
@@ -34,13 +34,11 @@ struct MyMaintenanceJob : public IBlockableMaintenanceJob
GateVector _runGates;
size_t _runIdx;
bool _blocked;
- bool _stopped;
MyMaintenanceJob(size_t numRuns)
: IBlockableMaintenanceJob("myjob", 10s, 20s),
_runGates(getGateVector(numRuns)),
_runIdx(0),
- _blocked(false),
- _stopped(false)
+ _blocked(false)
{}
void block() { setBlocked(BlockedReason::RESOURCE_LIMITS); }
void unBlock() { unBlock(BlockedReason::RESOURCE_LIMITS); }
@@ -51,7 +49,7 @@ struct MyMaintenanceJob : public IBlockableMaintenanceJob
_runGates[_runIdx++]->await(5s);
return _runIdx == _runGates.size();
}
- void onStop() override { _stopped = true; }
+ void onStop() override { }
};
struct Fixture
@@ -65,10 +63,10 @@ struct Fixture
size_t _runIdx;
ThreadStackExecutor _exec;
Fixture(size_t numRuns = 1)
- : _tracker(new SimpleJobTracker(1)),
- _job(new MyMaintenanceJob(numRuns)),
+ : _tracker(std::make_shared<SimpleJobTracker>(1)),
+ _job(std::make_unique<MyMaintenanceJob>(numRuns)),
_myJob(static_cast<MyMaintenanceJob *>(_job.get())),
- _trackedJob(new JobTrackedMaintenanceJob(_tracker, std::move(_job))),
+ _trackedJob(std::make_unique<JobTrackedMaintenanceJob>(_tracker, std::move(_job))),
_runRetval(false),
_runGates(getGateVector(numRuns)),
_runIdx(0),
@@ -144,9 +142,9 @@ TEST_F("require that block calls are sent to underlying jobs", Fixture)
TEST_F("require that stop calls are sent to underlying jobs", Fixture)
{
- EXPECT_FALSE(f._myJob->_stopped);
- f._trackedJob->onStop();
- EXPECT_TRUE(f._myJob->_stopped);
+ EXPECT_FALSE(f._myJob->stopped());
+ f._trackedJob->stop();
+ EXPECT_TRUE(f._myJob->stopped());
}
TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_common.cpp b/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_common.cpp
index cd305c51810..4b9d893eca5 100644
--- a/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_common.cpp
+++ b/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_common.cpp
@@ -163,13 +163,6 @@ MyStorer::startCommit(DoneCallback) {
return CommitResult();
}
-IFrozenBucketHandler::ExclusiveBucketGuard::UP
-MyFrozenBucketHandler::acquireExclusiveBucket(BucketId bucket) {
- return (_bucket == bucket)
- ? ExclusiveBucketGuard::UP()
- : std::make_unique<ExclusiveBucketGuard>(bucket);
-}
-
MyDocumentStore::MyDocumentStore()
: _readDoc(),
_readLid(0)
diff --git a/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_common.h b/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_common.h
index 5063aeff347..348499d937f 100644
--- a/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_common.h
+++ b/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_common.h
@@ -3,7 +3,6 @@
#pragma once
#include <vespa/searchcore/proton/server/i_document_scan_iterator.h>
-#include <vespa/searchcore/proton/server/ifrozenbuckethandler.h>
#include <vespa/searchcore/proton/server/imaintenancejobrunner.h>
#include <vespa/searchcore/proton/server/lid_space_compaction_handler.h>
#include <vespa/searchcore/proton/server/remove_operations_rate_tracker.h>
@@ -103,14 +102,6 @@ struct MyStorer : public IOperationStorer {
CommitResult startCommit(DoneCallback) override;
};
-struct MyFrozenBucketHandler : public IFrozenBucketHandler {
- BucketId _bucket;
- MyFrozenBucketHandler() : _bucket() {}
- ExclusiveBucketGuard::UP acquireExclusiveBucket(BucketId bucket) override;
- void addListener(IBucketFreezeListener *) override { }
- void removeListener(IBucketFreezeListener *) override { }
-};
-
struct MyFeedView : public test::DummyFeedView {
explicit MyFeedView(std::shared_ptr<const DocumentTypeRepo> repo)
: test::DummyFeedView(std::move(repo))
diff --git a/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_compaction_test.cpp b/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_compaction_test.cpp
index 688dd963f61..f1353a30887 100644
--- a/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_compaction_test.cpp
+++ b/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_compaction_test.cpp
@@ -4,12 +4,12 @@
using BlockedReason = IBlockableMaintenanceJob::BlockedReason;
-TEST_P(JobTest, handler_name_is_used_as_part_of_job_name)
+TEST_F(JobTest, handler_name_is_used_as_part_of_job_name)
{
EXPECT_EQ("lid_space_compaction.myhandler", _job->getName());
}
-TEST_P(JobTest, no_move_operation_is_created_if_lid_bloat_factor_is_below_limit)
+TEST_F(JobTest, no_move_operation_is_created_if_lid_bloat_factor_is_below_limit)
{
// 20% bloat < 30% allowed bloat
addStats(10, {1,3,4,5,6,7,9}, {{9,2}});
@@ -17,7 +17,7 @@ TEST_P(JobTest, no_move_operation_is_created_if_lid_bloat_factor_is_below_limit)
assertNoWorkDone();
}
-TEST_P(JobTest, no_move_operation_is_created_if_lid_bloat_is_below_limit)
+TEST_F(JobTest, no_move_operation_is_created_if_lid_bloat_is_below_limit)
{
init(3, 0.1);
// 20% bloat >= 10% allowed bloat BUT lid bloat (2) < allowed lid bloat (3)
@@ -26,7 +26,7 @@ TEST_P(JobTest, no_move_operation_is_created_if_lid_bloat_is_below_limit)
assertNoWorkDone();
}
-TEST_P(JobTest, no_move_operation_is_created_and_compaction_is_initiated)
+TEST_F(JobTest, no_move_operation_is_created_and_compaction_is_initiated)
{
// no documents to move: lowestFreeLid(7) > highestUsedLid(6)
addStats(10, {1,2,3,4,5,6}, {{6,7}});
@@ -36,14 +36,14 @@ TEST_P(JobTest, no_move_operation_is_created_and_compaction_is_initiated)
assertJobContext(0, 0, 0, 7, 1);
}
-TEST_P(JobTest, one_move_operation_is_created_and_compaction_is_initiated)
+TEST_F(JobTest, one_move_operation_is_created_and_compaction_is_initiated)
{
setupOneDocumentToCompact();
EXPECT_FALSE(run()); // scan
assertOneDocumentCompacted();
}
-TEST_P(JobTest, job_returns_false_when_multiple_move_operations_or_compaction_are_needed)
+TEST_F(JobTest, job_returns_false_when_multiple_move_operations_or_compaction_are_needed)
{
setupThreeDocumentsToCompact();
EXPECT_FALSE(run());
@@ -56,29 +56,7 @@ TEST_P(JobTest, job_returns_false_when_multiple_move_operations_or_compaction_ar
assertJobContext(4, 7, 3, 7, 1);
}
-TEST_P(JobTest, job_is_blocked_if_trying_to_move_document_for_frozen_bucket)
-{
- //TODO Remove test once we no longer use the frozen concept.
- if ( ! useBucketDB() ) {
- _frozenHandler._bucket = BUCKET_ID_1;
- EXPECT_FALSE(_job->isBlocked());
- addStats(10, {1, 3, 4, 5, 6, 9}, {{9, 2}}); // 30% bloat: try to move 9 -> 2
- addStats(0, 0, 0, 0);
-
- EXPECT_TRUE(run()); // bucket frozen
- assertNoWorkDone();
- EXPECT_TRUE(_job->isBlocked());
-
- _frozenHandler._bucket = BUCKET_ID_2;
- _job->unBlock(BlockedReason::FROZEN_BUCKET);
-
- EXPECT_FALSE(run()); // unblocked
- assertJobContext(2, 9, 1, 0, 0);
- EXPECT_FALSE(_job->isBlocked());
- }
-}
-
-TEST_P(JobTest, job_can_restart_documents_scan_if_lid_bloat_is_still_to_large)
+TEST_F(JobTest, job_can_restart_documents_scan_if_lid_bloat_is_still_to_large)
{
init(ALLOWED_LID_BLOAT, ALLOWED_LID_BLOAT_FACTOR);
addMultiStats(10, {{1,3,4,5,6,9},{1,2,4,5,6,8}},
@@ -98,7 +76,7 @@ TEST_P(JobTest, job_can_restart_documents_scan_if_lid_bloat_is_still_to_large)
assertJobContext(3, 8, 2, 7, 1);
}
-TEST_P(JobTest, held_lid_is_not_considered_free_and_blocks_job)
+TEST_F(JobTest, held_lid_is_not_considered_free_and_blocks_job)
{
// Lid 1 on hold or pendingHold, i.e. neither free nor used.
addMultiStats(3, {{2}}, {{2, 3}});
@@ -106,7 +84,7 @@ TEST_P(JobTest, held_lid_is_not_considered_free_and_blocks_job)
assertNoWorkDone();
}
-TEST_P(JobTest, held_lid_is_not_considered_free_with_only_compact)
+TEST_F(JobTest, held_lid_is_not_considered_free_with_only_compact)
{
// Lid 1 on hold or pendingHold, i.e. neither free nor used.
addMultiStats(10, {{2}}, {{2, 3}});
@@ -116,7 +94,7 @@ TEST_P(JobTest, held_lid_is_not_considered_free_with_only_compact)
assertJobContext(0, 0, 0, 3, 1);
}
-TEST_P(JobTest, held_lids_are_not_considered_free_with_one_move)
+TEST_F(JobTest, held_lids_are_not_considered_free_with_one_move)
{
// Lids 1,2,3 on hold or pendingHold, i.e. neither free nor used.
addMultiStats(10, {{5}}, {{5, 4}, {4, 5}});
@@ -126,7 +104,7 @@ TEST_P(JobTest, held_lids_are_not_considered_free_with_one_move)
assertJobContext(4, 5, 1, 5, 1);
}
-TEST_P(JobTest, resource_starvation_blocks_lid_space_compaction)
+TEST_F(JobTest, resource_starvation_blocks_lid_space_compaction)
{
setupOneDocumentToCompact();
_diskMemUsageNotifier.notify({{100, 0}, {100, 101}});
@@ -134,7 +112,7 @@ TEST_P(JobTest, resource_starvation_blocks_lid_space_compaction)
assertNoWorkDone();
}
-TEST_P(JobTest, ending_resource_starvation_resumes_lid_space_compaction)
+TEST_F(JobTest, ending_resource_starvation_resumes_lid_space_compaction)
{
setupOneDocumentToCompact();
_diskMemUsageNotifier.notify({{100, 0}, {100, 101}});
@@ -144,7 +122,7 @@ TEST_P(JobTest, ending_resource_starvation_resumes_lid_space_compaction)
assertOneDocumentCompacted();
}
-TEST_P(JobTest, resource_limit_factor_adjusts_limit)
+TEST_F(JobTest, resource_limit_factor_adjusts_limit)
{
init(ALLOWED_LID_BLOAT, ALLOWED_LID_BLOAT_FACTOR, 1.05);
setupOneDocumentToCompact();
@@ -153,21 +131,21 @@ TEST_P(JobTest, resource_limit_factor_adjusts_limit)
assertOneDocumentCompacted();
}
-TEST_P(JobTest, delay_is_set_based_on_interval_and_is_max_300_secs)
+TEST_F(JobTest, delay_is_set_based_on_interval_and_is_max_300_secs)
{
init_with_interval(301s);
EXPECT_EQ(300s, _job->getDelay());
EXPECT_EQ(301s, _job->getInterval());
}
-TEST_P(JobTest, delay_is_set_based_on_interval_and_can_be_less_than_300_secs)
+TEST_F(JobTest, delay_is_set_based_on_interval_and_can_be_less_than_300_secs)
{
init_with_interval(299s);
EXPECT_EQ(299s, _job->getDelay());
EXPECT_EQ(299s, _job->getInterval());
}
-TEST_P(JobTest, job_is_disabled_when_node_is_retired)
+TEST_F(JobTest, job_is_disabled_when_node_is_retired)
{
init_with_node_retired(true);
setupOneDocumentToCompact();
@@ -175,7 +153,7 @@ TEST_P(JobTest, job_is_disabled_when_node_is_retired)
assertNoWorkDone();
}
-TEST_P(JobTest, job_is_disabled_when_node_becomes_retired)
+TEST_F(JobTest, job_is_disabled_when_node_becomes_retired)
{
init_with_node_retired(false);
setupOneDocumentToCompact();
@@ -184,7 +162,7 @@ TEST_P(JobTest, job_is_disabled_when_node_becomes_retired)
assertNoWorkDone();
}
-TEST_P(JobTest, job_is_re_enabled_when_node_is_no_longer_retired)
+TEST_F(JobTest, job_is_re_enabled_when_node_is_no_longer_retired)
{
init_with_node_retired(true);
setupOneDocumentToCompact();
@@ -194,7 +172,7 @@ TEST_P(JobTest, job_is_re_enabled_when_node_is_no_longer_retired)
assertOneDocumentCompacted();
}
-TEST_P(JobDisabledByRemoveOpsTest, config_is_propagated_to_remove_operations_rate_tracker)
+TEST_F(JobDisabledByRemoveOpsTest, config_is_propagated_to_remove_operations_rate_tracker)
{
auto& remove_batch_tracker = _handler->_rm_listener->get_remove_batch_tracker();
EXPECT_EQ(vespalib::from_s(21.0), remove_batch_tracker.get_time_budget_per_op());
@@ -205,38 +183,38 @@ TEST_P(JobDisabledByRemoveOpsTest, config_is_propagated_to_remove_operations_rat
EXPECT_EQ(vespalib::from_s(20.0), remove_tracker.get_time_budget_window());
}
-TEST_P(JobDisabledByRemoveOpsTest, job_is_disabled_while_remove_batch_is_ongoing)
+TEST_F(JobDisabledByRemoveOpsTest, job_is_disabled_while_remove_batch_is_ongoing)
{
job_is_disabled_while_remove_ops_are_ongoing(true);
}
-TEST_P(JobDisabledByRemoveOpsTest, job_becomes_disabled_if_remove_batch_starts)
+TEST_F(JobDisabledByRemoveOpsTest, job_becomes_disabled_if_remove_batch_starts)
{
job_becomes_disabled_if_remove_ops_starts(true);
}
-TEST_P(JobDisabledByRemoveOpsTest, job_is_re_enabled_when_remove_batch_is_no_longer_ongoing)
+TEST_F(JobDisabledByRemoveOpsTest, job_is_re_enabled_when_remove_batch_is_no_longer_ongoing)
{
job_is_re_enabled_when_remove_ops_are_no_longer_ongoing(true);
}
-TEST_P(JobDisabledByRemoveOpsTest, job_is_disabled_while_removes_are_ongoing)
+TEST_F(JobDisabledByRemoveOpsTest, job_is_disabled_while_removes_are_ongoing)
{
job_is_disabled_while_remove_ops_are_ongoing(false);
}
-TEST_P(JobDisabledByRemoveOpsTest, job_becomes_disabled_if_removes_start)
+TEST_F(JobDisabledByRemoveOpsTest, job_becomes_disabled_if_removes_start)
{
job_becomes_disabled_if_remove_ops_starts(false);
}
-TEST_P(JobDisabledByRemoveOpsTest, job_is_re_enabled_when_removes_are_no_longer_ongoing)
+TEST_F(JobDisabledByRemoveOpsTest, job_is_re_enabled_when_removes_are_no_longer_ongoing)
{
job_is_re_enabled_when_remove_ops_are_no_longer_ongoing(false);
}
-TEST_P(MaxOutstandingJobTest, job_is_blocked_if_it_has_too_many_outstanding_move_operations_with_max_1)
+TEST_F(MaxOutstandingJobTest, job_is_blocked_if_it_has_too_many_outstanding_move_operations_with_max_1)
{
init(1);
setupThreeDocumentsToCompact();
@@ -259,7 +237,7 @@ TEST_P(MaxOutstandingJobTest, job_is_blocked_if_it_has_too_many_outstanding_move
assertJobContext(4, 7, 3, 7, 1);
}
-TEST_P(MaxOutstandingJobTest, job_is_blocked_if_it_has_too_many_outstanding_move_operations_with_max_2)
+TEST_F(MaxOutstandingJobTest, job_is_blocked_if_it_has_too_many_outstanding_move_operations_with_max_2)
{
init(2);
setupThreeDocumentsToCompact();
@@ -277,8 +255,4 @@ TEST_P(MaxOutstandingJobTest, job_is_blocked_if_it_has_too_many_outstanding_move
sync();
}
-VESPA_GTEST_INSTANTIATE_TEST_SUITE_P(bool, JobTest, ::testing::Values(false, true));
-VESPA_GTEST_INSTANTIATE_TEST_SUITE_P(bool, JobDisabledByRemoveOpsTest, ::testing::Values(false, true));
-VESPA_GTEST_INSTANTIATE_TEST_SUITE_P(bool, MaxOutstandingJobTest, ::testing::Values(false, true));
-
GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_jobtest.cpp b/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_jobtest.cpp
index d394769c0ee..ef54aafcec2 100644
--- a/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_jobtest.cpp
+++ b/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_jobtest.cpp
@@ -2,7 +2,6 @@
#include "lid_space_jobtest.h"
#include <vespa/searchcore/proton/server/lid_space_compaction_job.h>
-#include <vespa/searchcore/proton/server/lid_space_compaction_job_take2.h>
#include <vespa/searchcore/proton/server/executorthreadingservice.h>
#include <vespa/persistence/dummyimpl/dummy_bucket_executor.h>
#include <vespa/vespalib/util/threadstackexecutor.h>
@@ -33,7 +32,6 @@ JobTestBase::JobTestBase()
_diskMemUsageNotifier(),
_handler(),
_storer(),
- _frozenHandler(),
_job()
{
init(ALLOWED_LID_BLOAT, ALLOWED_LID_BLOAT_FACTOR, RESOURCE_LIMIT_FACTOR, JOB_DELAY, false, MAX_OUTSTANDING_MOVE_OPS);
@@ -47,23 +45,18 @@ JobTestBase::init(uint32_t allowedLidBloat,
bool nodeRetired,
uint32_t maxOutstandingMoveOps)
{
- _handler = std::make_shared<MyHandler>(maxOutstandingMoveOps != MAX_OUTSTANDING_MOVE_OPS, useBucketDB());
+ _handler = std::make_shared<MyHandler>(maxOutstandingMoveOps != MAX_OUTSTANDING_MOVE_OPS, true);
DocumentDBLidSpaceCompactionConfig compactCfg(interval, allowedLidBloat, allowedLidBloatFactor,
- REMOVE_BATCH_BLOCK_RATE, REMOVE_BLOCK_RATE, false, useBucketDB());
+ REMOVE_BATCH_BLOCK_RATE, REMOVE_BLOCK_RATE, false);
BlockableMaintenanceJobConfig blockableCfg(resourceLimitFactor, maxOutstandingMoveOps);
_job.reset();
- if (useBucketDB()) {
- _singleExecutor = std::make_unique<vespalib::ThreadStackExecutor>(1, 0x10000);
- _master = std::make_unique<proton::ExecutorThreadService> (*_singleExecutor);
- _bucketExecutor = std::make_unique<storage::spi::dummy::DummyBucketExecutor>(4);
- _job = lidspace::CompactionJob::create(compactCfg, RetainGuard(_refCount), _handler, _storer, *_master, *_bucketExecutor,
- _diskMemUsageNotifier, blockableCfg, _clusterStateHandler, nodeRetired,
- document::BucketSpace::placeHolder());
- } else {
- _job = std::make_shared<LidSpaceCompactionJob>(compactCfg, _handler, _storer, _frozenHandler, _diskMemUsageNotifier,
- blockableCfg, _clusterStateHandler, nodeRetired);
- }
+ _singleExecutor = std::make_unique<vespalib::ThreadStackExecutor>(1, 0x10000);
+ _master = std::make_unique<proton::ExecutorThreadService> (*_singleExecutor);
+ _bucketExecutor = std::make_unique<storage::spi::dummy::DummyBucketExecutor>(4);
+ _job = lidspace::CompactionJob::create(compactCfg, RetainGuard(_refCount), _handler, _storer, *_master, *_bucketExecutor,
+ _diskMemUsageNotifier, blockableCfg, _clusterStateHandler, nodeRetired,
+ document::BucketSpace::placeHolder());
}
void
diff --git a/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_jobtest.h b/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_jobtest.h
index dde48a0a620..747e5c9faca 100644
--- a/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_jobtest.h
+++ b/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_jobtest.h
@@ -8,7 +8,7 @@
#include <vespa/vespalib/gtest/gtest.h>
namespace storage::spi::dummy { class DummyBucketExecutor; }
-struct JobTestBase : public ::testing::TestWithParam<bool> {
+struct JobTestBase : public ::testing::Test {
MonitoredRefCount _refCount;
test::ClusterStateHandler _clusterStateHandler;
test::DiskMemUsageNotifier _diskMemUsageNotifier;
@@ -17,7 +17,6 @@ struct JobTestBase : public ::testing::TestWithParam<bool> {
std::unique_ptr<searchcorespi::index::IThreadService> _master;
std::shared_ptr<MyHandler> _handler;
MyStorer _storer;
- MyFrozenBucketHandler _frozenHandler;
std::shared_ptr<BlockableMaintenanceJob> _job;
JobTestBase();
~JobTestBase() override;
@@ -51,7 +50,6 @@ struct JobTestBase : public ::testing::TestWithParam<bool> {
void assertOneDocumentCompacted();
JobTestBase &setupThreeDocumentsToCompact();
void sync() const;
- bool useBucketDB() const { return GetParam(); }
};
struct JobTest : public JobTestBase {
diff --git a/searchcore/src/tests/proton/documentdb/maintenancecontroller/CMakeLists.txt b/searchcore/src/tests/proton/documentdb/maintenancecontroller/CMakeLists.txt
index 0864f444ecf..7a2f741c1be 100644
--- a/searchcore/src/tests/proton/documentdb/maintenancecontroller/CMakeLists.txt
+++ b/searchcore/src/tests/proton/documentdb/maintenancecontroller/CMakeLists.txt
@@ -18,20 +18,3 @@ vespa_add_executable(searchcore_maintenancecontroller_test_app TEST
searchlib_test
)
vespa_add_test(NAME searchcore_maintenancecontroller_test_app COMMAND searchcore_maintenancecontroller_test_app)
-vespa_add_executable(searchcore_frozenbucketsmap_test_app TEST
- SOURCES
- frozenbucketsmap_test.cpp
- DEPENDS
- searchcore_server
- searchcore_feedoperation
- searchcore_matching
- searchcore_attribute
- searchcore_documentmetastore
- searchcore_bucketdb
- searchcore_pcommon
- searchcore_persistenceengine
- searchcore_grouping
- searchcore_proton_metrics
- searchcore_fconfig
-)
-vespa_add_test(NAME searchcore_frozenbucketsmap_test_app COMMAND searchcore_frozenbucketsmap_test_app)
diff --git a/searchcore/src/tests/proton/documentdb/maintenancecontroller/frozenbucketsmap_test.cpp b/searchcore/src/tests/proton/documentdb/maintenancecontroller/frozenbucketsmap_test.cpp
deleted file mode 100644
index b9a7e967b74..00000000000
--- a/searchcore/src/tests/proton/documentdb/maintenancecontroller/frozenbucketsmap_test.cpp
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/log/log.h>
-LOG_SETUP("frozenbucketsmap_test");
-#include <vespa/vespalib/testkit/testapp.h>
-#include <vespa/searchcore/proton/server/frozenbuckets.h>
-#include <vespa/vespalib/util/threadstackexecutor.h>
-
-using namespace proton;
-using document::BucketId;
-
-class RWTask : public vespalib::Executor::Task {
-public:
- RWTask(FrozenBucketsMap & m, BucketId b, size_t count) : _b(b), _m(m), _count(count) {}
-protected:
- const BucketId _b;
- FrozenBucketsMap & _m;
- const size_t _count;
-};
-
-class Reader : public RWTask {
-public:
- Reader(FrozenBucketsMap & m, BucketId b, size_t count) :
- RWTask(m, b, count),
- numContended(0)
- {}
- ~Reader() {
- LOG(info, "NumContended = %ld", numContended);
- }
- void run() override {
- for (size_t i(0); i < _count; i++) {
- _m.freezeBucket(_b);
- if (_m.thawBucket(_b)) {
- numContended++;
- }
- }
- }
- size_t numContended;
-};
-
-class Writer : public RWTask {
-public:
- Writer(FrozenBucketsMap & m, BucketId b, size_t count) :
- RWTask(m, b, count),
- numFailed(0),
- numSucces(0)
- {}
- ~Writer() {
- EXPECT_EQUAL(_count, numSucces + numFailed);
- LOG(info, "NumSuccess = %ld, NumFailed = %ld", numSucces, numFailed);
- }
- void run() override {
- for (size_t i(0); i < _count; i++) {
- IFrozenBucketHandler::ExclusiveBucketGuard::UP guard = _m.acquireExclusiveBucket(_b);
- if (guard) {
- numSucces++;
- } else {
- numFailed++;
- }
- }
- }
- size_t numFailed;
- size_t numSucces;
-};
-
-TEST("Race reader and writer on FrozenBucketsMap") {
- FrozenBucketsMap m;
- BucketId a(8, 6);
- constexpr size_t NUM_READERS = 3;
- constexpr size_t NUM_WRITERS = 1;
- constexpr size_t READER_COUNT = 1000000;
- constexpr size_t WRITER_COUNT = 1000000;
- vespalib::ThreadStackExecutor executor(NUM_READERS+NUM_WRITERS, 0x10000);
- for (size_t i(0); i < NUM_READERS; i++) {
- EXPECT_FALSE(bool(executor.execute(std::make_unique<Reader>(m, a, READER_COUNT))));
- }
- for (size_t i(0); i < NUM_WRITERS; i++) {
- EXPECT_FALSE(bool(executor.execute(std::make_unique<Writer>(m, a, WRITER_COUNT))));
- }
- executor.sync();
-}
-
-TEST_MAIN()
-{
- TEST_RUN_ALL();
-}
diff --git a/searchcore/src/tests/proton/documentdb/maintenancecontroller/maintenancecontroller_test.cpp b/searchcore/src/tests/proton/documentdb/maintenancecontroller/maintenancecontroller_test.cpp
index b0c46dbd789..67a7ceeae34 100644
--- a/searchcore/src/tests/proton/documentdb/maintenancecontroller/maintenancecontroller_test.cpp
+++ b/searchcore/src/tests/proton/documentdb/maintenancecontroller/maintenancecontroller_test.cpp
@@ -264,43 +264,18 @@ public:
bool waitIdle(vespalib::duration timeout);
};
-
-class MyFrozenBucket
-{
- IBucketFreezer &_freezer;
- BucketId _bucketId;
-public:
- typedef std::unique_ptr<MyFrozenBucket> UP;
-
- MyFrozenBucket(IBucketFreezer &freezer,
- const BucketId &bucketId)
- : _freezer(freezer),
- _bucketId(bucketId)
- {
- _freezer.freezeBucket(_bucketId);
- }
-
- ~MyFrozenBucket()
- {
- _freezer.thawBucket(_bucketId);
- }
-};
-
struct MySimpleJob : public BlockableMaintenanceJob
{
vespalib::CountDownLatch _latch;
size_t _runCnt;
- bool _stopped;
MySimpleJob(vespalib::duration delay,
vespalib::duration interval,
uint32_t finishCount)
: BlockableMaintenanceJob("my_job", delay, interval),
_latch(finishCount),
- _runCnt(0),
- _stopped(false)
- {
- }
+ _runCnt(0)
+ { }
void block() { setBlocked(BlockedReason::FROZEN_BUCKET); }
bool run() override {
LOG(info, "MySimpleJob::run()");
@@ -308,10 +283,6 @@ struct MySimpleJob : public BlockableMaintenanceJob
++_runCnt;
return true;
}
- void onStop() override {
- BlockableMaintenanceJob::onStop();
- _stopped = true;
- }
};
struct MySplitJob : public MySimpleJob
@@ -333,13 +304,11 @@ struct MySplitJob : public MySimpleJob
struct MyLongRunningJob : public BlockableMaintenanceJob
{
vespalib::Gate _firstRun;
- bool _stopped;
MyLongRunningJob(vespalib::duration delay,
vespalib::duration interval)
: BlockableMaintenanceJob("long_running_job", delay, interval),
- _firstRun(),
- _stopped(false)
+ _firstRun()
{
}
void block() { setBlocked(BlockedReason::FROZEN_BUCKET); }
@@ -348,10 +317,6 @@ struct MyLongRunningJob : public BlockableMaintenanceJob
usleep(10000);
return false;
}
- void onStop() override {
- BlockableMaintenanceJob::onStop();
- _stopped = true;
- }
};
using MyAttributeManager = test::MockAttributeManager;
@@ -405,7 +370,7 @@ public:
void removeDocs(const test::UserDocuments &docs, Timestamp timestamp);
void
- setPruneConfig(const DocumentDBPruneRemovedDocumentsConfig &pruneConfig)
+ setPruneConfig(const DocumentDBPruneConfig &pruneConfig)
{
auto newCfg = std::make_shared<DocumentDBMaintenanceConfig>(
pruneConfig,
@@ -855,7 +820,7 @@ void
MaintenanceControllerFixture::injectMaintenanceJobs()
{
if (_injectDefaultJobs) {
- MaintenanceJobsInjector::injectJobs(_mc, *_mcCfg, _bucketExecutor, _fh, _gsp, _fh, _mc,
+ MaintenanceJobsInjector::injectJobs(_mc, *_mcCfg, _bucketExecutor, _fh, _gsp, _fh,
_bucketCreateNotifier, makeBucketSpace(), _fh, _fh,
_bmc, _clusterStateHandler, _bucketHandler, _calc, _diskMemUsageNotifier,
_jobTrackers, _readyAttributeManager, _notReadyAttributeManager,
@@ -929,57 +894,6 @@ MaintenanceControllerFixture::removeDocs(const test::UserDocuments &docs, Timest
}
}
-TEST_F("require that bucket move controller is active", MaintenanceControllerFixture)
-{
- f._builder.createDocs(1, 1, 4); // 3 docs
- f._builder.createDocs(2, 4, 6); // 2 docs
- test::UserDocuments readyDocs(f._builder.getDocs());
- BucketId bucketId1(readyDocs.getBucket(1));
- BucketId bucketId2(readyDocs.getBucket(2));
- f.insertDocs(readyDocs, f._ready);
- f._builder.clearDocs();
- f._builder.createDocs(3, 1, 3); // 2 docs
- f._builder.createDocs(4, 3, 6); // 3 docs
- test::UserDocuments notReadyDocs(f._builder.getDocs());
- BucketId bucketId4(notReadyDocs.getBucket(4));
- f.insertDocs(notReadyDocs, f._notReady);
- f._builder.clearDocs();
- f.notifyClusterStateChanged();
- EXPECT_TRUE(f._executor.isIdle());
- EXPECT_EQUAL(5u, f._ready.getNumUsedLids());
- EXPECT_EQUAL(5u, f._ready.getDocumentCount());
- EXPECT_EQUAL(5u, f._notReady.getNumUsedLids());
- EXPECT_EQUAL(5u, f._notReady.getDocumentCount());
- f.startMaintenance();
- ASSERT_TRUE(f._executor.waitIdle(TIMEOUT));
- EXPECT_EQUAL(0u, f._ready.getNumUsedLids());
- EXPECT_EQUAL(0u, f._ready.getDocumentCount());
- EXPECT_EQUAL(10u, f._notReady.getNumUsedLids());
- EXPECT_EQUAL(10u, f._notReady.getDocumentCount());
- f._calc->addReady(bucketId1);
- f.notifyClusterStateChanged();
- ASSERT_TRUE(f._executor.waitIdle(TIMEOUT));
- EXPECT_EQUAL(3u, f._ready.getNumUsedLids());
- EXPECT_EQUAL(3u, f._ready.getDocumentCount());
- EXPECT_EQUAL(7u, f._notReady.getNumUsedLids());
- EXPECT_EQUAL(7u, f._notReady.getDocumentCount());
- MyFrozenBucket::UP frozen2(new MyFrozenBucket(f._mc, bucketId2));
- f._calc->addReady(bucketId2);
- f._calc->addReady(bucketId4);
- f.notifyClusterStateChanged();
- ASSERT_TRUE(f._executor.waitIdle(TIMEOUT));
- EXPECT_EQUAL(6u, f._ready.getNumUsedLids());
- EXPECT_EQUAL(6u, f._ready.getDocumentCount());
- EXPECT_EQUAL(4u, f._notReady.getNumUsedLids());
- EXPECT_EQUAL(4u, f._notReady.getDocumentCount());
- frozen2.reset();
- ASSERT_TRUE(f._executor.waitIdle(TIMEOUT));
- EXPECT_EQUAL(8u, f._ready.getNumUsedLids());
- EXPECT_EQUAL(8u, f._ready.getDocumentCount());
- EXPECT_EQUAL(2u, f._notReady.getNumUsedLids());
- EXPECT_EQUAL(2u, f._notReady.getDocumentCount());
-}
-
TEST_F("require that document pruner is active", MaintenanceControllerFixture)
{
uint64_t tshz = 1000000;
@@ -1004,23 +918,15 @@ TEST_F("require that document pruner is active", MaintenanceControllerFixture)
ASSERT_TRUE(f._executor.waitIdle(TIMEOUT));
EXPECT_EQUAL(10u, f._removed.getNumUsedLids());
EXPECT_EQUAL(10u, f._removed.getDocumentCount());
- MyFrozenBucket::UP frozen3(new MyFrozenBucket(f._mc, bucketId3));
- f.setPruneConfig(DocumentDBPruneRemovedDocumentsConfig(200ms, 900s));
- for (uint32_t i = 0; i < 6; ++i) {
- std::this_thread::sleep_for(100ms);
- ASSERT_TRUE(f._executor.waitIdle(TIMEOUT));
- if (f._removed.getNumUsedLids() != 10u)
- break;
- }
- EXPECT_EQUAL(10u, f._removed.getNumUsedLids());
- EXPECT_EQUAL(10u, f._removed.getDocumentCount());
- frozen3.reset();
+ f.setPruneConfig(DocumentDBPruneConfig(200ms, 900s));
for (uint32_t i = 0; i < 600; ++i) {
std::this_thread::sleep_for(100ms);
ASSERT_TRUE(f._executor.waitIdle(TIMEOUT));
if (f._removed.getNumUsedLids() != 10u)
break;
}
+ f._bucketExecutor.sync();
+ f._executor.sync();
EXPECT_EQUAL(5u, f._removed.getNumUsedLids());
EXPECT_EQUAL(5u, f._removed.getDocumentCount());
}
@@ -1054,74 +960,6 @@ TEST_F("require that periodic session prunings are scheduled",
ASSERT_TRUE(f._gsp.isInvoked);
}
-TEST_F("require that active bucket is not moved until de-activated", MaintenanceControllerFixture)
-{
- f._builder.createDocs(1, 1, 4); // 3 docs
- f._builder.createDocs(2, 4, 6); // 2 docs
- test::UserDocuments readyDocs(f._builder.getDocs());
- f.insertDocs(readyDocs, f._ready);
- f._builder.clearDocs();
- f._builder.createDocs(3, 1, 3); // 2 docs
- f._builder.createDocs(4, 3, 6); // 3 docs
- test::UserDocuments notReadyDocs(f._builder.getDocs());
- f.insertDocs(notReadyDocs, f._notReady);
- f._builder.clearDocs();
-
- // bucket 1 (active) should be moved from ready to not ready according to cluster state
- f._calc->addReady(readyDocs.getBucket(2));
- f._ready.setBucketState(readyDocs.getBucket(1), true);
-
- f.notifyClusterStateChanged();
- EXPECT_TRUE(f._executor.isIdle());
- EXPECT_EQUAL(5u, f._ready.getNumUsedLids());
- EXPECT_EQUAL(5u, f._ready.getDocumentCount());
- EXPECT_EQUAL(5u, f._notReady.getNumUsedLids());
- EXPECT_EQUAL(5u, f._notReady.getDocumentCount());
-
- f.startMaintenance();
- ASSERT_TRUE(f._executor.waitIdle(TIMEOUT));
- EXPECT_EQUAL(5u, f._ready.getNumUsedLids());
- EXPECT_EQUAL(5u, f._ready.getDocumentCount());
- EXPECT_EQUAL(5u, f._notReady.getNumUsedLids());
- EXPECT_EQUAL(5u, f._notReady.getDocumentCount());
-
- // de-activate bucket 1
- f._ready.setBucketState(readyDocs.getBucket(1), false);
- f.notifyBucketStateChanged(readyDocs.getBucket(1), BucketInfo::NOT_ACTIVE);
- ASSERT_TRUE(f._executor.waitIdle(TIMEOUT));
- EXPECT_EQUAL(2u, f._ready.getNumUsedLids());
- EXPECT_EQUAL(2u, f._ready.getDocumentCount());
- EXPECT_EQUAL(8u, f._notReady.getNumUsedLids());
- EXPECT_EQUAL(8u, f._notReady.getDocumentCount());
-
- // re-activate bucket 1
- f._ready.setBucketState(readyDocs.getBucket(1), true);
- f.notifyBucketStateChanged(readyDocs.getBucket(1), BucketInfo::ACTIVE);
- ASSERT_TRUE(f._executor.waitIdle(TIMEOUT));
- EXPECT_EQUAL(5u, f._ready.getNumUsedLids());
- EXPECT_EQUAL(5u, f._ready.getDocumentCount());
- EXPECT_EQUAL(5u, f._notReady.getNumUsedLids());
- EXPECT_EQUAL(5u, f._notReady.getDocumentCount());
-
- // de-activate bucket 1
- f._ready.setBucketState(readyDocs.getBucket(1), false);
- f.notifyBucketStateChanged(readyDocs.getBucket(1), BucketInfo::NOT_ACTIVE);
- ASSERT_TRUE(f._executor.waitIdle(TIMEOUT));
- EXPECT_EQUAL(2u, f._ready.getNumUsedLids());
- EXPECT_EQUAL(2u, f._ready.getDocumentCount());
- EXPECT_EQUAL(8u, f._notReady.getNumUsedLids());
- EXPECT_EQUAL(8u, f._notReady.getDocumentCount());
-
- // re-activate bucket 1
- f._ready.setBucketState(readyDocs.getBucket(1), true);
- f.notifyBucketStateChanged(readyDocs.getBucket(1), BucketInfo::ACTIVE);
- ASSERT_TRUE(f._executor.waitIdle(TIMEOUT));
- EXPECT_EQUAL(5u, f._ready.getNumUsedLids());
- EXPECT_EQUAL(5u, f._ready.getDocumentCount());
- EXPECT_EQUAL(5u, f._notReady.getNumUsedLids());
- EXPECT_EQUAL(5u, f._notReady.getDocumentCount());
-}
-
TEST_F("require that a simple maintenance job is executed", MaintenanceControllerFixture)
{
auto job = std::make_unique<MySimpleJob>(200ms, 200ms, 3);
@@ -1146,39 +984,6 @@ TEST_F("require that a split maintenance job is executed", MaintenanceController
EXPECT_EQUAL(0u, myJob._latch.getCount());
}
-TEST_F("require that a blocked job is unblocked and executed after thaw bucket",
- MaintenanceControllerFixture)
-{
- auto job1 = std::make_unique<MySimpleJob>(TIMEOUT * 2, TIMEOUT * 2, 1);
- MySimpleJob &myJob1 = *job1;
- auto job2 = std::make_unique< MySimpleJob>(TIMEOUT * 2, TIMEOUT * 2, 0);
- MySimpleJob &myJob2 = *job2;
- f._mc.registerJobInMasterThread(std::move(job1));
- f._mc.registerJobInMasterThread(std::move(job2));
- f._injectDefaultJobs = false;
- f.startMaintenance();
-
- myJob1.block();
- EXPECT_TRUE(myJob1.isBlocked());
- EXPECT_FALSE(myJob2.isBlocked());
- IBucketFreezer &ibf = f._mc;
- ibf.freezeBucket(BucketId(1));
- ibf.thawBucket(BucketId(1));
- EXPECT_TRUE(myJob1.isBlocked());
- ibf.freezeBucket(BucketId(1));
- IFrozenBucketHandler & fbh = f._mc;
- // This is to simulate contention, as that is required for notification on thawed buckets.
- EXPECT_FALSE(fbh.acquireExclusiveBucket(BucketId(1)));
- ibf.thawBucket(BucketId(1));
- f._executor.sync();
- EXPECT_FALSE(myJob1.isBlocked());
- EXPECT_FALSE(myJob2.isBlocked());
- bool done1 = myJob1._latch.await(TIMEOUT);
- EXPECT_TRUE(done1);
- std::this_thread::sleep_for(2s);
- EXPECT_EQUAL(0u, myJob2._runCnt);
-}
-
TEST_F("require that blocked jobs are not executed", MaintenanceControllerFixture)
{
auto job = std::make_unique<MySimpleJob>(200ms, 200ms, 0);
@@ -1219,39 +1024,6 @@ TEST_F("require that maintenance controller state list jobs", MaintenanceControl
EXPECT_EQUAL("long_running_job", allJobs[1]["name"].asString().make_string());
}
-TEST("Verify FrozenBucketsMap interface") {
- FrozenBucketsMap m;
- BucketId a(8, 6);
- {
- auto guard = m.acquireExclusiveBucket(a);
- EXPECT_TRUE(bool(guard));
- EXPECT_EQUAL(a, guard->getBucket());
- }
- m.freezeBucket(a);
- EXPECT_FALSE(m.thawBucket(a));
- m.freezeBucket(a);
- {
- auto guard = m.acquireExclusiveBucket(a);
- EXPECT_FALSE(bool(guard));
- }
- EXPECT_TRUE(m.thawBucket(a));
- m.freezeBucket(a);
- m.freezeBucket(a);
- m.freezeBucket(a);
- {
- auto guard = m.acquireExclusiveBucket(a);
- EXPECT_FALSE(bool(guard));
- }
- EXPECT_FALSE(m.thawBucket(a));
- EXPECT_FALSE(m.thawBucket(a));
- EXPECT_TRUE(m.thawBucket(a));
- {
- auto guard = m.acquireExclusiveBucket(a);
- EXPECT_TRUE(bool(guard));
- EXPECT_EQUAL(a, guard->getBucket());
- }
-}
-
const MaintenanceJobRunner *
findJob(const MaintenanceController::JobList &jobs, const vespalib::string &jobName)
{
@@ -1308,7 +1080,7 @@ TEST_F("require that maintenance jobs are run by correct executor", MaintenanceC
void
assertPruneRemovedDocumentsConfig(vespalib::duration expDelay, vespalib::duration expInterval, vespalib::duration interval, MaintenanceControllerFixture &f)
{
- f.setPruneConfig(DocumentDBPruneRemovedDocumentsConfig(interval, 1000s));
+ f.setPruneConfig(DocumentDBPruneConfig(interval, 1000s));
const auto *job = findJob(f._mc.getJobList(), "prune_removed_documents.searchdocument");
EXPECT_EQUAL(expDelay, job->getJob().getDelay());
EXPECT_EQUAL(expInterval, job->getJob().getInterval());
diff --git a/searchcore/src/tests/proton/documentdb/move_operation_limiter/move_operation_limiter_test.cpp b/searchcore/src/tests/proton/documentdb/move_operation_limiter/move_operation_limiter_test.cpp
index 1f152f4a257..8466a016fc3 100644
--- a/searchcore/src/tests/proton/documentdb/move_operation_limiter/move_operation_limiter_test.cpp
+++ b/searchcore/src/tests/proton/documentdb/move_operation_limiter/move_operation_limiter_test.cpp
@@ -12,11 +12,9 @@ using namespace proton;
struct MyBlockableMaintenanceJob : public IBlockableMaintenanceJob {
bool blocked;
- bool stopped;
MyBlockableMaintenanceJob()
: IBlockableMaintenanceJob("my_job", 1s, 1s),
- blocked(false),
- stopped(false)
+ blocked(false)
{}
void setBlocked(BlockedReason reason) override {
ASSERT_TRUE(reason == BlockedReason::OUTSTANDING_OPS);
@@ -29,7 +27,7 @@ struct MyBlockableMaintenanceJob : public IBlockableMaintenanceJob {
blocked = false;
}
bool run() override { return true; }
- void onStop() override { stopped = true; }
+ void onStop() override { }
};
struct Fixture {
diff --git a/searchcore/src/tests/proton/persistenceengine/persistence_handler_map/persistence_handler_map_test.cpp b/searchcore/src/tests/proton/persistenceengine/persistence_handler_map/persistence_handler_map_test.cpp
index 8eace5ac657..9f6b82bb277 100644
--- a/searchcore/src/tests/proton/persistenceengine/persistence_handler_map/persistence_handler_map_test.cpp
+++ b/searchcore/src/tests/proton/persistenceengine/persistence_handler_map/persistence_handler_map_test.cpp
@@ -29,7 +29,6 @@ struct DummyPersistenceHandler : public IPersistenceHandler {
void handleJoin(FeedToken, const storage::spi::Bucket &, const storage::spi::Bucket &, const storage::spi::Bucket &) override {}
RetrieversSP getDocumentRetrievers(storage::spi::ReadConsistency) override { return RetrieversSP(); }
- BucketGuard::UP lockBucket(const storage::spi::Bucket &) override { return BucketGuard::UP(); }
void handleListActiveBuckets(IBucketIdListResultHandler &) override {}
void handlePopulateActiveBuckets(document::BucketId::List &, IGenericResultHandler &) override {}
};
diff --git a/searchcore/src/tests/proton/persistenceengine/persistenceengine_test.cpp b/searchcore/src/tests/proton/persistenceengine/persistenceengine_test.cpp
index 90e934bd823..f1e0306a660 100644
--- a/searchcore/src/tests/proton/persistenceengine/persistenceengine_test.cpp
+++ b/searchcore/src/tests/proton/persistenceengine/persistenceengine_test.cpp
@@ -11,9 +11,9 @@
#include <vespa/document/update/assignvalueupdate.h>
#include <vespa/persistence/spi/documentselection.h>
#include <vespa/persistence/spi/test.h>
-#include <vespa/searchcore/proton/persistenceengine/bucket_guard.h>
#include <vespa/searchcore/proton/persistenceengine/ipersistenceengineowner.h>
#include <vespa/searchcore/proton/persistenceengine/persistenceengine.h>
+#include <vespa/searchcore/proton/server/ibucketfreezer.h>
#include <vespa/searchcore/proton/test/disk_mem_usage_notifier.h>
#include <vespa/vdslib/distribution/distribution.h>
#include <vespa/vdslib/state/clusterstate.h>
@@ -268,10 +268,6 @@ struct MyHandler : public IPersistenceHandler, IBucketFreezer {
return ret;
}
- BucketGuard::UP lockBucket(const storage::spi::Bucket &b) override {
- return std::make_unique<BucketGuard>(b.getBucketId(), *this);
- }
-
void handleListActiveBuckets(IBucketIdListResultHandler &resultHandler) override {
BucketIdListResult::List list;
resultHandler.handle(BucketIdListResult(list));
@@ -668,17 +664,6 @@ TEST_F("require that get is sent to all handlers", SimpleFixture) {
EXPECT_EQUAL(docId1, f.hset.handler2.lastDocId);
}
-TEST_F("require that get freezes the bucket", SimpleFixture) {
- EXPECT_FALSE(f.hset.handler1.wasFrozen(bucket1));
- EXPECT_FALSE(f.hset.handler2.wasFrozen(bucket1));
- Context context(storage::spi::Priority(0), storage::spi::Trace::TraceLevel(0));
- f.engine.get(bucket1, document::AllFields(), docId1, context);
- EXPECT_TRUE(f.hset.handler1.wasFrozen(bucket1));
- EXPECT_TRUE(f.hset.handler2.wasFrozen(bucket1));
- EXPECT_FALSE(f.hset.handler1.isFrozen(bucket1));
- EXPECT_FALSE(f.hset.handler2.isFrozen(bucket1));
-}
-
TEST_F("require that get returns the first document found", SimpleFixture) {
f.hset.handler1.setDocument(*doc1, tstamp1);
f.hset.handler2.setDocument(*doc2, tstamp2);
@@ -773,20 +758,6 @@ TEST_F("require that destroyIterator prevents iteration", SimpleFixture) {
EXPECT_EQUAL(msg_prefix, it_result.getErrorMessage().substr(0, msg_prefix.size()));
}
-TEST_F("require that buckets are frozen during iterator life", SimpleFixture) {
- EXPECT_FALSE(f.hset.handler1.isFrozen(bucket1));
- EXPECT_FALSE(f.hset.handler2.isFrozen(bucket1));
- Context context(storage::spi::Priority(0), storage::spi::Trace::TraceLevel(0));
- CreateIteratorResult create_result =
- f.engine.createIterator(bucket1, std::make_shared<document::AllFields>(), selection,
- storage::spi::NEWEST_DOCUMENT_ONLY, context);
- EXPECT_TRUE(f.hset.handler1.isFrozen(bucket1));
- EXPECT_TRUE(f.hset.handler2.isFrozen(bucket1));
- f.engine.destroyIterator(create_result.getIteratorId(), context);
- EXPECT_FALSE(f.hset.handler1.isFrozen(bucket1));
- EXPECT_FALSE(f.hset.handler2.isFrozen(bucket1));
-}
-
TEST_F("require that multiple bucket spaces works", SimpleFixture(altBucketSpace)) {
f.hset.prepareListBuckets();
TEST_DO(assertBucketList(f.engine, makeBucketSpace(), { bckId1, bckId2 }));
diff --git a/searchcore/src/vespa/searchcore/config/CMakeLists.txt b/searchcore/src/vespa/searchcore/config/CMakeLists.txt
index 915ab147978..a616b05e9c2 100644
--- a/searchcore/src/vespa/searchcore/config/CMakeLists.txt
+++ b/searchcore/src/vespa/searchcore/config/CMakeLists.txt
@@ -9,6 +9,8 @@ vespa_generate_config(searchcore_fconfig proton.def)
install_config_definition(proton.def vespa.config.search.core.proton.def)
vespa_generate_config(searchcore_fconfig ranking-constants.def)
install_config_definition(ranking-constants.def vespa.config.search.core.ranking-constants.def)
+vespa_generate_config(searchcore_fconfig ranking-expressions.def)
+install_config_definition(ranking-expressions.def vespa.config.search.core.ranking-expressions.def)
vespa_generate_config(searchcore_fconfig onnx-models.def)
install_config_definition(onnx-models.def vespa.config.search.core.onnx-models.def)
vespa_generate_config(searchcore_fconfig hwinfo.def)
diff --git a/searchcore/src/vespa/searchcore/config/proton.def b/searchcore/src/vespa/searchcore/config/proton.def
index ec7752368e9..3b94bb9984e 100644
--- a/searchcore/src/vespa/searchcore/config/proton.def
+++ b/searchcore/src/vespa/searchcore/config/proton.def
@@ -319,7 +319,7 @@ documentdb[].allocation.amortizecount int default=10000
documentdb[].allocation.multivaluegrowfactor double default=0.2
## The ratio of used bytes that can be dead before attempting to perform compaction.
-documentdb[].allocation.max_dead_bytes_ratio double default=0.15
+documentdb[].allocation.max_dead_bytes_ratio double default=0.05
## The ratio of used address space that can be dead before attempting to perform compaction.
documentdb[].allocation.max_dead_address_space_ratio double default=0.2
@@ -352,6 +352,9 @@ pruneremoveddocumentsinterval double default=0.0
## Default value is 2 weeks (1209600 seconds).
pruneremoveddocumentsage double default=1209600.0
+## Set to true to enable bucket locking via content layer
+pruneremoveddocuments.usebucketexecutor bool default=true
+
## Minimum size of packets to compress (0 means no compression)
##
packetcompresslimit int default=1024
@@ -413,13 +416,15 @@ lidspacecompaction.removebatchblockrate double default=0.5
lidspacecompaction.removeblockrate double default=100.0
## Set to true to enable bucket locking via content layer
-lidspacecompaction.usebucketexecutor bool default=false
+## DEPRECATED and unused
+lidspacecompaction.usebucketexecutor bool default=true
## Maximum docs to move in single operation per bucket
bucketmove.maxdocstomoveperbucket int default=1
## Set to true to enable bucket locking via content layer
-bucketmove.usebucketexecutor bool default=false
+## DEPRECATED and unused
+bucketmove.usebucketexecutor bool default=true
## This is the maximum value visibilitydelay you can have.
## A to higher value here will cost more memory while not improving too much.
@@ -515,7 +520,7 @@ maintenancejobs.resourcelimitfactor double default = 1.05
##
## The job is unblocked (and executed again) when this goes under the limit again.
## Currently used by 'lid_space_compaction' job.
-maintenancejobs.maxoutstandingmoveops int default=10
+maintenancejobs.maxoutstandingmoveops int default=100
## Controls the type of bucket checksum used. Do not change unless
## in depth understanding is present.
diff --git a/searchcore/src/vespa/searchcore/config/ranking-expressions.def b/searchcore/src/vespa/searchcore/config/ranking-expressions.def
new file mode 100644
index 00000000000..c2e20dc5c94
--- /dev/null
+++ b/searchcore/src/vespa/searchcore/config/ranking-expressions.def
@@ -0,0 +1,5 @@
+# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+namespace=vespa.config.search.core
+
+expression[].name string
+expression[].fileref file
diff --git a/searchcore/src/vespa/searchcore/proton/feedoperation/pruneremoveddocumentsoperation.h b/searchcore/src/vespa/searchcore/proton/feedoperation/pruneremoveddocumentsoperation.h
index 1f813c641b9..dc10812062e 100644
--- a/searchcore/src/vespa/searchcore/proton/feedoperation/pruneremoveddocumentsoperation.h
+++ b/searchcore/src/vespa/searchcore/proton/feedoperation/pruneremoveddocumentsoperation.h
@@ -15,36 +15,22 @@ public:
typedef std::unique_ptr<PruneRemovedDocumentsOperation> UP;
PruneRemovedDocumentsOperation();
-
- PruneRemovedDocumentsOperation(search::DocumentIdT docIdLimit,
- uint32_t subDbId);
-
- virtual
- ~PruneRemovedDocumentsOperation()
- {
- }
+ PruneRemovedDocumentsOperation(search::DocumentIdT docIdLimit, uint32_t subDbId);
uint32_t getSubDbId() const { return _subDbId; }
- void setLidsToRemove(const LidVectorContext::SP &lidsToRemove)
- {
+ void setLidsToRemove(const LidVectorContext::SP &lidsToRemove) {
RemoveDocumentsOperation::setLidsToRemove(_subDbId, lidsToRemove);
}
const LidVectorContext::SP
- getLidsToRemove() const
- {
+ getLidsToRemove() const {
return RemoveDocumentsOperation::getLidsToRemove(_subDbId);
}
- virtual void
- serialize(vespalib::nbostream &os) const override;
-
- virtual void
- deserialize(vespalib::nbostream &is,
- const document::DocumentTypeRepo &repo) override;
-
- virtual vespalib::string toString() const override;
+ void serialize(vespalib::nbostream &os) const override;
+ void deserialize(vespalib::nbostream &is, const document::DocumentTypeRepo &repo) override;
+ vespalib::string toString() const override;
};
} // namespace proton
diff --git a/searchcore/src/vespa/searchcore/proton/persistenceengine/bucket_guard.h b/searchcore/src/vespa/searchcore/proton/persistenceengine/bucket_guard.h
deleted file mode 100644
index 7eb864137a1..00000000000
--- a/searchcore/src/vespa/searchcore/proton/persistenceengine/bucket_guard.h
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include <vespa/document/bucket/bucketid.h>
-#include <memory>
-#include <vespa/searchcore/proton/server/ibucketfreezer.h>
-
-namespace proton {
-
-class BucketGuard {
- document::BucketId _bucket;
- IBucketFreezer &_freezer;
-
-public:
- typedef std::unique_ptr<BucketGuard> UP;
- BucketGuard(const BucketGuard &) = delete;
- BucketGuard & operator=(const BucketGuard &) = delete;
- BucketGuard(BucketGuard &&) = delete;
- BucketGuard & operator=(BucketGuard &&) = delete;
-
- BucketGuard(document::BucketId bucket, IBucketFreezer &freezer)
- : _bucket(bucket),
- _freezer(freezer)
- {
- freezer.freezeBucket(bucket);
- }
-
- ~BucketGuard() {
- _freezer.thawBucket(_bucket);
- }
-};
-
-} // namespace proton
-
diff --git a/searchcore/src/vespa/searchcore/proton/persistenceengine/ipersistencehandler.h b/searchcore/src/vespa/searchcore/proton/persistenceengine/ipersistencehandler.h
index 1af6c2e60bc..314a87a150d 100644
--- a/searchcore/src/vespa/searchcore/proton/persistenceengine/ipersistencehandler.h
+++ b/searchcore/src/vespa/searchcore/proton/persistenceengine/ipersistencehandler.h
@@ -1,7 +1,6 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once
-#include "bucket_guard.h"
#include "i_document_retriever.h"
#include "resulthandler.h"
#include <vespa/searchcore/proton/common/feedtoken.h>
@@ -68,7 +67,6 @@ public:
const storage::spi::Bucket &target1, const storage::spi::Bucket &target2) = 0;
virtual RetrieversSP getDocumentRetrievers(storage::spi::ReadConsistency consistency) = 0;
- virtual BucketGuard::UP lockBucket(const storage::spi::Bucket &bucket) = 0;
virtual void handleListActiveBuckets(IBucketIdListResultHandler &resultHandler) = 0;
diff --git a/searchcore/src/vespa/searchcore/proton/persistenceengine/persistenceengine.cpp b/searchcore/src/vespa/searchcore/proton/persistenceengine/persistenceengine.cpp
index 31acf183989..6fccd66e50a 100644
--- a/searchcore/src/vespa/searchcore/proton/persistenceengine/persistenceengine.cpp
+++ b/searchcore/src/vespa/searchcore/proton/persistenceengine/persistenceengine.cpp
@@ -417,7 +417,6 @@ PersistenceEngine::get(const Bucket& b, const document::FieldSet& fields, const
HandlerSnapshot snapshot = getHandlerSnapshot(rguard, b.getBucketSpace());
for (PersistenceHandlerSequence & handlers = snapshot.handlers(); handlers.valid(); handlers.next()) {
- BucketGuard::UP bucket_guard = handlers.get()->lockBucket(b);
IPersistenceHandler::RetrieversSP retrievers = handlers.get()->getDocumentRetrievers(context.getReadConsistency());
for (size_t i = 0; i < retrievers->size(); ++i) {
IDocumentRetriever &retriever = *(*retrievers)[i];
@@ -450,9 +449,7 @@ PersistenceEngine::createIterator(const Bucket &bucket, FieldSetSP fields, const
auto entry = std::make_unique<IteratorEntry>(context.getReadConsistency(), bucket, std::move(fields), selection,
versions, _defaultSerializedSize, _ignoreMaxBytes);
- entry->bucket_guards.reserve(snapshot.size());
for (PersistenceHandlerSequence & handlers = snapshot.handlers(); handlers.valid(); handlers.next()) {
- entry->bucket_guards.push_back(handlers.get()->lockBucket(bucket));
IPersistenceHandler::RetrieversSP retrievers = handlers.get()->getDocumentRetrievers(context.getReadConsistency());
for (const auto & retriever : *retrievers) {
entry->it.add(retriever);
diff --git a/searchcore/src/vespa/searchcore/proton/persistenceengine/persistenceengine.h b/searchcore/src/vespa/searchcore/proton/persistenceengine/persistenceengine.h
index ac7a9847873..041b031478c 100644
--- a/searchcore/src/vespa/searchcore/proton/persistenceengine/persistenceengine.h
+++ b/searchcore/src/vespa/searchcore/proton/persistenceengine/persistenceengine.h
@@ -48,13 +48,11 @@ private:
PersistenceHandlerSequence handler_sequence;
DocumentIterator it;
bool in_use;
- std::vector<BucketGuard::UP> bucket_guards;
IteratorEntry(storage::spi::ReadConsistency readConsistency, const Bucket &b, FieldSetSP f,
const Selection &s, IncludedVersions v, ssize_t defaultSerializedSize, bool ignoreMaxBytes)
: handler_sequence(),
it(b, std::move(f), s, v, defaultSerializedSize, ignoreMaxBytes, readConsistency),
- in_use(false),
- bucket_guards() {}
+ in_use(false) {}
};
struct BucketSpaceHash {
std::size_t operator() (const document::BucketSpace &bucketSpace) const { return bucketSpace.getId(); }
diff --git a/searchcore/src/vespa/searchcore/proton/server/CMakeLists.txt b/searchcore/src/vespa/searchcore/proton/server/CMakeLists.txt
index 86d238c7554..d2ebb7a58a2 100644
--- a/searchcore/src/vespa/searchcore/proton/server/CMakeLists.txt
+++ b/searchcore/src/vespa/searchcore/proton/server/CMakeLists.txt
@@ -6,7 +6,6 @@ vespa_add_library(searchcore_server STATIC
bootstrapconfigmanager.cpp
buckethandler.cpp
bucketmovejob.cpp
- bucketmovejobv2.cpp
clusterstatehandler.cpp
combiningfeedview.cpp
ddbstate.cpp
@@ -51,7 +50,6 @@ vespa_add_library(searchcore_server STATIC
flushhandlerproxy.cpp
forcecommitcontext.cpp
forcecommitdonetask.cpp
- frozenbuckets.cpp
health_adapter.cpp
heart_beat_job.cpp
idocumentdbowner.cpp
@@ -59,8 +57,6 @@ vespa_add_library(searchcore_server STATIC
job_tracked_maintenance_job.cpp
lid_space_compaction_handler.cpp
lid_space_compaction_job.cpp
- lid_space_compaction_job_base.cpp
- lid_space_compaction_job_take2.cpp
maintenance_controller_explorer.cpp
maintenance_jobs_injector.cpp
maintenancecontroller.cpp
diff --git a/searchcore/src/vespa/searchcore/proton/server/bucketmovejob.cpp b/searchcore/src/vespa/searchcore/proton/server/bucketmovejob.cpp
index 1ca4c307bfb..6aa78276598 100644
--- a/searchcore/src/vespa/searchcore/proton/server/bucketmovejob.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/bucketmovejob.cpp
@@ -4,18 +4,28 @@
#include "imaintenancejobrunner.h"
#include "ibucketstatechangednotifier.h"
#include "iclusterstatechangednotifier.h"
-#include "maintenancedocumentsubdb.h"
#include "i_disk_mem_usage_notifier.h"
#include "ibucketmodifiedhandler.h"
#include "move_operation_limiter.h"
+#include "document_db_maintenance_config.h"
+#include <vespa/searchcore/proton/metrics/documentdb_tagged_metrics.h>
#include <vespa/searchcore/proton/bucketdb/i_bucket_create_notifier.h>
+#include <vespa/searchcore/proton/feedoperation/moveoperation.h>
#include <vespa/searchcore/proton/documentmetastore/i_document_meta_store.h>
+#include <vespa/searchcorespi/index/i_thread_service.h>
+#include <vespa/persistence/spi/bucket_tasks.h>
+#include <vespa/vespalib/util/destructor_callbacks.h>
+#include <vespa/vespalib/util/lambdatask.h>
+#include <thread>
#include <vespa/log/log.h>
LOG_SETUP(".proton.server.bucketmovejob");
using document::BucketId;
using storage::spi::BucketInfo;
+using storage::spi::Bucket;
+using proton::bucketdb::BucketMover;
+using vespalib::makeLambdaTask;
using vespalib::Trinary;
namespace proton {
@@ -32,298 +42,349 @@ toStr(Trinary v) {
return (v == Trinary::True) ? "T" : ((v == Trinary::False) ? "F" : "U");
}
-}
-
-void
-BucketMoveJob::checkBucket(const BucketId &bucket,
- ScanIterator &itr,
- DocumentBucketMover &mover,
- IFrozenBucketHandler::ExclusiveBucketGuard::UP & bucketGuard)
-{
- const bool hasReadyDocs = itr.hasReadyBucketDocs();
- const bool hasNotReadyDocs = itr.hasNotReadyBucketDocs();
- if (!hasReadyDocs && !hasNotReadyDocs) {
- return; // No documents for bucket in ready or notready subdbs
- }
- const bool isActive = itr.isActive();
- // No point in moving buckets when node is retired and everything will be deleted soon.
- // However, allow moving of explicitly activated buckets, as this implies a lack of other good replicas.
- if (_calc->nodeRetired() && !isActive) {
- return;
- }
- Trinary shouldBeReady = _calc->shouldBeReady(document::Bucket(_bucketSpace, bucket));
- if (shouldBeReady == vespalib::Trinary::Undefined) {
- return;
- }
- const bool wantReady = (shouldBeReady == Trinary::True) || isActive;
- LOG(spam, "checkBucket(): bucket(%s), shouldBeReady(%s), active(%s)",
- bucket.toString().c_str(), toStr(shouldBeReady), toStr(isActive));
- if (wantReady) {
- if (!hasNotReadyDocs)
- return; // No notready bucket to make ready
- } else {
- if (!hasReadyDocs)
- return; // No ready bucket to make notready
- }
- bucketGuard = _frozenBuckets.acquireExclusiveBucket(bucket);
- if ( ! bucketGuard ) {
- LOG(debug, "checkBucket(): delay frozen bucket: (%s)", bucket.toString().c_str());
- _delayedBucketsFrozen.insert(bucket);
- _delayedBuckets.erase(bucket);
- return;
- }
- const MaintenanceDocumentSubDB &source(wantReady ? _notReady : _ready);
- const MaintenanceDocumentSubDB &target(wantReady ? _ready : _notReady);
- LOG(debug, "checkBucket(): mover.setupForBucket(%s, source:%u, target:%u)",
- bucket.toString().c_str(), source.sub_db_id(), target.sub_db_id());
- mover.setupForBucket(bucket, &source, target.sub_db_id(), _moveHandler);
-}
-
-BucketMoveJob::ScanResult
-BucketMoveJob::scanBuckets(size_t maxBucketsToScan, IFrozenBucketHandler::ExclusiveBucketGuard::UP & bucketGuard)
-{
- size_t bucketsScanned = 0;
- bool passDone = false;
- ScanIterator itr(_ready.meta_store()->getBucketDB().takeGuard(),
- _scanPass, _scanPos._lastBucket, _endPos._lastBucket);
- BucketId bucket;
- for (; itr.valid() &&
- bucketsScanned < maxBucketsToScan && _mover.bucketDone();
- ++itr, ++bucketsScanned)
- {
- bucket = itr.getBucket();
- _scanPos._lastBucket = bucket;
- checkBucket(bucket, itr, _mover, bucketGuard);
- }
- if (!itr.valid()) {
- passDone = true;
- _scanPos._lastBucket = BucketId();
- }
- return ScanResult(bucketsScanned, passDone);
-}
-
-bool
-BucketMoveJob::moveDocuments(DocumentBucketMover &mover,
- size_t maxDocsToMove,
- IFrozenBucketHandler::ExclusiveBucketGuard::UP & bucketGuard)
-{
- if ( ! bucketGuard ) {
- bucketGuard = _frozenBuckets.acquireExclusiveBucket(mover.getBucket());
- if (! bucketGuard) {
- maybeDelayMover(mover, mover.getBucket());
- return true;
- }
- }
- assert(mover.getBucket() == bucketGuard->getBucket());
- if ( ! mover.moveDocuments(maxDocsToMove)) {
- return false;
- }
- if (mover.bucketDone()) {
- _modifiedHandler.notifyBucketModified(mover.getBucket());
- }
- return true;
-}
-
-namespace {
-
bool
blockedDueToClusterState(const std::shared_ptr<IBucketStateCalculator> &calc)
{
- bool clusterUp = calc.get() != nullptr && calc->clusterUp();
- bool nodeUp = calc.get() != nullptr && calc->nodeUp();
- bool nodeInitializing = calc.get() != nullptr && calc->nodeInitializing();
+ bool clusterUp = calc && calc->clusterUp();
+ bool nodeUp = calc && calc->nodeUp();
+ bool nodeInitializing = calc && calc->nodeInitializing();
return !(clusterUp && nodeUp && !nodeInitializing);
}
}
-BucketMoveJob::
-BucketMoveJob(const std::shared_ptr<IBucketStateCalculator> &calc,
- IDocumentMoveHandler &moveHandler,
- IBucketModifiedHandler &modifiedHandler,
- const MaintenanceDocumentSubDB &ready,
- const MaintenanceDocumentSubDB &notReady,
- IFrozenBucketHandler &frozenBuckets,
- bucketdb::IBucketCreateNotifier &bucketCreateNotifier,
- IClusterStateChangedNotifier &clusterStateChangedNotifier,
- IBucketStateChangedNotifier &bucketStateChangedNotifier,
- IDiskMemUsageNotifier &diskMemUsageNotifier,
- const BlockableMaintenanceJobConfig &blockableConfig,
- const vespalib::string &docTypeName,
- document::BucketSpace bucketSpace)
+BucketMoveJob::BucketMoveJob(const std::shared_ptr<IBucketStateCalculator> &calc,
+ RetainGuard dbRetainer,
+ IDocumentMoveHandler &moveHandler,
+ IBucketModifiedHandler &modifiedHandler,
+ IThreadService & master,
+ BucketExecutor & bucketExecutor,
+ const MaintenanceDocumentSubDB &ready,
+ const MaintenanceDocumentSubDB &notReady,
+ bucketdb::IBucketCreateNotifier &bucketCreateNotifier,
+ IClusterStateChangedNotifier &clusterStateChangedNotifier,
+ IBucketStateChangedNotifier &bucketStateChangedNotifier,
+ IDiskMemUsageNotifier &diskMemUsageNotifier,
+ const BlockableMaintenanceJobConfig &blockableConfig,
+ const vespalib::string &docTypeName,
+ document::BucketSpace bucketSpace)
: BlockableMaintenanceJob("move_buckets." + docTypeName, vespalib::duration::zero(), vespalib::duration::zero(), blockableConfig),
IClusterStateChangedHandler(),
- IBucketFreezeListener(),
bucketdb::IBucketCreateListener(),
IBucketStateChangedHandler(),
IDiskMemUsageListener(),
+ std::enable_shared_from_this<BucketMoveJob>(),
_calc(calc),
+ _dbRetainer(std::move(dbRetainer)),
_moveHandler(moveHandler),
_modifiedHandler(modifiedHandler),
+ _master(master),
+ _bucketExecutor(bucketExecutor),
_ready(ready),
_notReady(notReady),
- _mover(getLimiter(), _ready.meta_store()->getBucketDB()),
- _doneScan(false),
- _scanPos(),
- _scanPass(ScanPass::FIRST),
- _endPos(),
_bucketSpace(bucketSpace),
- _delayedBuckets(),
- _delayedBucketsFrozen(),
- _frozenBuckets(frozenBuckets),
+ _iterateCount(0),
+ _movers(),
+ _bucketsInFlight(),
+ _buckets2Move(),
+ _bucketsPending(0),
_bucketCreateNotifier(bucketCreateNotifier),
- _delayedMover(getLimiter(), _ready.meta_store()->getBucketDB()),
_clusterStateChangedNotifier(clusterStateChangedNotifier),
_bucketStateChangedNotifier(bucketStateChangedNotifier),
_diskMemUsageNotifier(diskMemUsageNotifier)
{
+ _movers.reserve(std::min(100u, blockableConfig.getMaxOutstandingMoveOps()));
if (blockedDueToClusterState(_calc)) {
setBlocked(BlockedReason::CLUSTER_STATE);
}
- _frozenBuckets.addListener(this);
_bucketCreateNotifier.addListener(this);
_clusterStateChangedNotifier.addClusterStateChangedHandler(this);
_bucketStateChangedNotifier.addBucketStateChangedHandler(this);
_diskMemUsageNotifier.addDiskMemUsageListener(this);
+ recompute(_ready.meta_store()->getBucketDB().takeGuard());
}
BucketMoveJob::~BucketMoveJob()
{
- _frozenBuckets.removeListener(this);
_bucketCreateNotifier.removeListener(this);
_clusterStateChangedNotifier.removeClusterStateChangedHandler(this);
_bucketStateChangedNotifier.removeBucketStateChangedHandler(this);
_diskMemUsageNotifier.removeDiskMemUsageListener(this);
}
-void
-BucketMoveJob::maybeCancelMover(DocumentBucketMover &mover)
+std::shared_ptr<BucketMoveJob>
+BucketMoveJob::create(const std::shared_ptr<IBucketStateCalculator> &calc,
+ RetainGuard dbRetainer,
+ IDocumentMoveHandler &moveHandler,
+ IBucketModifiedHandler &modifiedHandler,
+ IThreadService & master,
+ BucketExecutor & bucketExecutor,
+ const MaintenanceDocumentSubDB &ready,
+ const MaintenanceDocumentSubDB &notReady,
+ bucketdb::IBucketCreateNotifier &bucketCreateNotifier,
+ IClusterStateChangedNotifier &clusterStateChangedNotifier,
+ IBucketStateChangedNotifier &bucketStateChangedNotifier,
+ IDiskMemUsageNotifier &diskMemUsageNotifier,
+ const BlockableMaintenanceJobConfig &blockableConfig,
+ const vespalib::string &docTypeName,
+ document::BucketSpace bucketSpace)
{
- // Cancel bucket if moving in wrong direction
- if (!mover.bucketDone()) {
- bool ready = mover.getSource() == &_ready;
- Trinary shouldBeReady = _calc->shouldBeReady(document::Bucket(_bucketSpace, mover.getBucket()));
- if (isBlocked() ||
- (shouldBeReady == Trinary::Undefined) ||
- (ready == (shouldBeReady == Trinary::True)))
- {
- mover.cancel();
+ return std::shared_ptr<BucketMoveJob>(
+ new BucketMoveJob(calc, std::move(dbRetainer), moveHandler, modifiedHandler, master, bucketExecutor, ready, notReady,
+ bucketCreateNotifier, clusterStateChangedNotifier, bucketStateChangedNotifier,
+ diskMemUsageNotifier, blockableConfig, docTypeName, bucketSpace),
+ [&master](auto job) {
+ auto failed = master.execute(makeLambdaTask([job]() { delete job; }));
+ assert(!failed);
+ });
+}
+
+BucketMoveJob::NeedResult
+BucketMoveJob::needMove(const ScanIterator &itr) const {
+ NeedResult noMove(false, false);
+ const bool hasReadyDocs = itr.hasReadyBucketDocs();
+ const bool hasNotReadyDocs = itr.hasNotReadyBucketDocs();
+ if (!hasReadyDocs && !hasNotReadyDocs) {
+ return noMove; // No documents for bucket in ready or notready subdbs
+ }
+ const bool isActive = itr.isActive();
+ // No point in moving buckets when node is retired and everything will be deleted soon.
+ // However, allow moving of explicitly activated buckets, as this implies a lack of other good replicas.
+ if (!_calc || (_calc->nodeRetired() && !isActive)) {
+ return noMove;
+ }
+ const Trinary shouldBeReady = _calc->shouldBeReady(document::Bucket(_bucketSpace, itr.getBucket()));
+ if (shouldBeReady == Trinary::Undefined) {
+ return noMove;
+ }
+ const bool wantReady = (shouldBeReady == Trinary::True) || isActive;
+ LOG(spam, "checkBucket(): bucket(%s), shouldBeReady(%s), active(%s)",
+ itr.getBucket().toString().c_str(), toStr(shouldBeReady), toStr(isActive));
+ if (wantReady) {
+ if (!hasNotReadyDocs) {
+ return noMove; // No notready bucket to make ready
+ }
+ } else {
+ if (isActive) {
+ return noMove; // Do not move rom ready to not ready when active
+ }
+ if (!hasReadyDocs) {
+ return noMove; // No ready bucket to make notready
}
}
+ return {true, wantReady};
}
-void
-BucketMoveJob::maybeDelayMover(DocumentBucketMover &mover, BucketId bucket)
-{
- // Delay bucket if being frozen.
- if (!mover.bucketDone() && bucket == mover.getBucket()) {
- mover.cancel();
- _delayedBucketsFrozen.insert(bucket);
- _delayedBuckets.erase(bucket);
+class BucketMoveJob::StartMove : public storage::spi::BucketTask {
+public:
+ using IDestructorCallbackSP = std::shared_ptr<vespalib::IDestructorCallback>;
+ StartMove(std::shared_ptr<BucketMoveJob> job, BucketMover::MoveKeys keys, IDestructorCallbackSP opsTracker)
+ : _job(job),
+ _keys(std::move(keys)),
+ _opsTracker(std::move(opsTracker))
+ {}
+
+ void run(const Bucket &bucket, IDestructorCallbackSP onDone) override {
+ assert(_keys.mover().getBucket() == bucket.getBucketId());
+ using DoneContext = vespalib::KeepAlive<std::pair<IDestructorCallbackSP, IDestructorCallbackSP>>;
+ BucketMoveJob::prepareMove(std::move(_job), std::move(_keys),
+ std::make_shared<DoneContext>(std::make_pair(std::move(_opsTracker), std::move(onDone))));
+ }
+
+ void fail(const Bucket &bucket) override {
+ BucketMoveJob::failOperation(std::move(_job), bucket.getBucketId());
}
+
+private:
+ std::shared_ptr<BucketMoveJob> _job;
+ BucketMover::MoveKeys _keys;
+ IDestructorCallbackSP _opsTracker;
+};
+
+void
+BucketMoveJob::failOperation(std::shared_ptr<BucketMoveJob> job, BucketId bucketId) {
+ auto & master = job->_master;
+ if (job->stopped()) return;
+ master.execute(makeLambdaTask([job=std::move(job), bucketId]() {
+ if (job->stopped()) return;
+ job->considerBucket(job->_ready.meta_store()->getBucketDB().takeGuard(), bucketId);
+ }));
}
void
-BucketMoveJob::notifyThawedBucket(const BucketId &bucket)
-{
- if (_delayedBucketsFrozen.erase(bucket) != 0u) {
- _delayedBuckets.insert(bucket);
- considerRun();
+BucketMoveJob::startMove(BucketMover & mover, size_t maxDocsToMove) {
+ auto [keys, done] = mover.getKeysToMove(maxDocsToMove);
+ if (done) {
+ mover.setAllScheduled();
}
+ if (keys.empty()) return;
+ mover.updateLastValidGid(keys.back()._gid);
+ Bucket spiBucket(document::Bucket(_bucketSpace, mover.getBucket()));
+ auto bucketTask = std::make_unique<StartMove>(shared_from_this(), std::move(keys), getLimiter().beginOperation());
+ _bucketExecutor.execute(spiBucket, std::move(bucketTask));
}
void
-BucketMoveJob::deactivateBucket(BucketId bucket)
+BucketMoveJob::prepareMove(std::shared_ptr<BucketMoveJob> job, BucketMover::MoveKeys keys, IDestructorCallbackSP onDone)
{
- _delayedBuckets.insert(bucket);
+ if (job->stopped()) return; //TODO Remove once lidtracker is no longer in use.
+ auto moveOps = keys.createMoveOperations();
+ auto & master = job->_master;
+ if (job->stopped()) return;
+ master.execute(makeLambdaTask([job=std::move(job), moveOps=std::move(moveOps), onDone=std::move(onDone)]() mutable {
+ if (job->stopped()) return;
+ job->completeMove(std::move(moveOps), std::move(onDone));
+ }));
}
void
-BucketMoveJob::activateBucket(BucketId bucket)
-{
- bucketdb::Guard notReadyBdb(_notReady.meta_store()->getBucketDB().takeGuard());
- if (notReadyBdb->get(bucket).getDocumentCount() == 0) {
- return; // notready bucket already empty. This is the normal case.
+BucketMoveJob::completeMove(GuardedMoveOps ops, IDestructorCallbackSP onDone) {
+ BucketMover & mover = ops.mover();
+ mover.moveDocuments(std::move(ops.success()), std::move(onDone));
+ ops.failed().clear();
+ if (checkIfMoverComplete(mover)) {
+ reconsiderBucket(_ready.meta_store()->getBucketDB().takeGuard(), mover.getBucket());
+ }
+}
+
+bool
+BucketMoveJob::checkIfMoverComplete(const BucketMover & mover) {
+ bool bucketMoveComplete = mover.allScheduled() && mover.inSync();
+ bool needReschedule = mover.needReschedule();
+ if (bucketMoveComplete || needReschedule) {
+ BucketId bucket = mover.getBucket();
+ auto found = _bucketsInFlight.find(bucket);
+ if (needReschedule) {
+ if ((found != _bucketsInFlight.end()) && (&mover == found->second.get())) {
+ //Prevent old disconnected mover from creating havoc.
+ _bucketsInFlight.erase(found);
+ _movers.erase(std::remove_if(_movers.begin(), _movers.end(),
+ [bucket](const BucketMoverSP &cand) {
+ return cand->getBucket() == bucket;
+ }),
+ _movers.end());
+ return true;
+ }
+ } else {
+ assert(found != _bucketsInFlight.end());
+ _bucketsInFlight.erase(found);
+ _modifiedHandler.notifyBucketModified(bucket);
+ }
}
- _delayedBuckets.insert(bucket);
+ updatePending();
+ return false;
}
void
-BucketMoveJob::notifyCreateBucket(const bucketdb::Guard &, const BucketId &bucket)
-{
- _delayedBuckets.insert(bucket);
- considerRun();
+BucketMoveJob::cancelBucket(BucketId bucket) {
+ auto inFlight = _bucketsInFlight.find(bucket);
+ if (inFlight != _bucketsInFlight.end()) {
+ inFlight->second->cancel();
+ checkIfMoverComplete(*inFlight->second);
+ }
}
void
-BucketMoveJob::changedCalculator()
-{
- if (done()) {
- _scanPos = ScanPosition();
- _endPos = ScanPosition();
+BucketMoveJob::considerBucket(const bucketdb::Guard & guard, BucketId bucket) {
+ cancelBucket(bucket);
+ assert( !_bucketsInFlight.contains(bucket));
+ reconsiderBucket(guard, bucket);
+}
+
+void
+BucketMoveJob::reconsiderBucket(const bucketdb::Guard & guard, BucketId bucket) {
+ assert( ! _bucketsInFlight.contains(bucket));
+ ScanIterator itr(guard, bucket);
+ auto [mustMove, wantReady] = needMove(itr);
+ if (mustMove) {
+ _buckets2Move[bucket] = wantReady;
} else {
- _endPos = _scanPos;
+ _buckets2Move.erase(bucket);
}
- _doneScan = false;
- _scanPass = ScanPass::FIRST;
- maybeCancelMover(_mover);
- maybeCancelMover(_delayedMover);
+ updatePending();
+ considerRun();
}
-bool
-BucketMoveJob::scanAndMove(size_t maxBucketsToScan, size_t maxDocsToMove)
+void
+BucketMoveJob::notifyCreateBucket(const bucketdb::Guard & guard, const BucketId &bucket)
+{
+ considerBucket(guard, bucket);
+}
+
+BucketMoveJob::BucketMoveSet
+BucketMoveJob::computeBuckets2Move(const bucketdb::Guard & guard)
{
- IFrozenBucketHandler::ExclusiveBucketGuard::UP bucketGuard;
- // Look for delayed bucket to be processed now
- while (!_delayedBuckets.empty() && _delayedMover.bucketDone()) {
- const BucketId bucket = *_delayedBuckets.begin();
- _delayedBuckets.erase(_delayedBuckets.begin());
- ScanIterator itr(_ready.meta_store()->getBucketDB().takeGuard(), bucket);
- if (itr.getBucket() == bucket) {
- checkBucket(bucket, itr, _delayedMover, bucketGuard);
+ BucketMoveJob::BucketMoveSet toMove;
+ for (ScanIterator itr(guard, BucketId()); itr.valid(); ++itr) {
+ auto [mustMove, wantReady] = needMove(itr);
+ if (mustMove) {
+ toMove[itr.getBucket()] = wantReady;
}
}
- if (!_delayedMover.bucketDone()) {
- return moveDocuments(_delayedMover, maxDocsToMove, bucketGuard);
+ return toMove;
+}
+
+std::shared_ptr<BucketMover>
+BucketMoveJob::createMover(BucketId bucket, bool wantReady) {
+ const MaintenanceDocumentSubDB &source(wantReady ? _notReady : _ready);
+ const MaintenanceDocumentSubDB &target(wantReady ? _ready : _notReady);
+ LOG(debug, "checkBucket(): mover.setupForBucket(%s, source:%u, target:%u)",
+ bucket.toString().c_str(), source.sub_db_id(), target.sub_db_id());
+ return BucketMover::create(bucket, &source, target.sub_db_id(), _moveHandler);
+}
+
+std::shared_ptr<BucketMover>
+BucketMoveJob::greedyCreateMover() {
+ if ( ! _buckets2Move.empty()) {
+ auto next = _buckets2Move.begin();
+ auto mover = createMover(next->first, next->second);
+ _buckets2Move.erase(next);
+ return mover;
}
- if (_mover.bucketDone()) {
- size_t bucketsScanned = 0;
- for (;;) {
- if (_mover.bucketDone()) {
- ScanResult res = scanBuckets(maxBucketsToScan - bucketsScanned, bucketGuard);
- bucketsScanned += res.first;
- if (res.second) {
- if (_scanPass == ScanPass::FIRST &&
- _endPos.validBucket()) {
- _scanPos = ScanPosition();
- _scanPass = ScanPass::SECOND;
- } else {
- _doneScan = true;
- break;
- }
- }
- }
- if (!_mover.bucketDone() || bucketsScanned >= maxBucketsToScan) {
- break;
- }
+ return {};
+}
+
+void
+BucketMoveJob::moveDocs(size_t maxDocsToMove) {
+ backFillMovers();
+ if (_movers.empty()) return;
+
+ // Select mover
+ size_t index = _iterateCount++ % _movers.size();
+ auto & mover = *_movers[index];
+
+ //Move, or reduce movers as we are tailing off
+ if (!mover.allScheduled()) {
+ startMove(mover, maxDocsToMove);
+ if (mover.allScheduled()) {
+ _movers.erase(_movers.begin() + index);
}
}
- if (!_mover.bucketDone()) {
- return moveDocuments(_mover, maxDocsToMove, bucketGuard);
+}
+
+bool
+BucketMoveJob::scanAndMove(size_t maxBuckets2Move, size_t maxDocsToMovePerBucket) {
+ for (size_t i(0); i < maxBuckets2Move; i++) {
+ moveDocs(maxDocsToMovePerBucket);
}
- return true;
+ return isBlocked() || done();
+}
+
+bool
+BucketMoveJob::done() const {
+ return _buckets2Move.empty() && _movers.empty() && !isBlocked();
}
bool
BucketMoveJob::run()
{
- if (isBlocked() || done()) {
+ if (isBlocked()) {
return true; // indicate work is done, since node state is bad
}
/// Returning false here will immediately post the job back on the executor. This will give a busy loop,
/// but this is considered fine as it is very rare and it will be intermingled with multiple feed operations.
- if ( ! scanAndMove(200, 1) ) {
+ if ( ! scanAndMove(1, 1) ) {
return false;
}
@@ -334,30 +395,49 @@ BucketMoveJob::run()
}
void
+BucketMoveJob::recompute() {
+ recompute(_ready.meta_store()->getBucketDB().takeGuard());
+}
+void
+BucketMoveJob::recompute(const bucketdb::Guard & guard) {
+ _buckets2Move = computeBuckets2Move(guard);
+ updatePending();
+}
+
+void
+BucketMoveJob::backFillMovers() {
+ // Ensure we have enough movers.
+ while ( ! _buckets2Move.empty() && (_movers.size() < _movers.capacity())) {
+ auto mover = greedyCreateMover();
+ _movers.push_back(mover);
+ auto bucketId = mover->getBucket();
+ assert( ! _bucketsInFlight.contains(bucketId));
+ _bucketsInFlight[bucketId] = std::move(mover);
+ }
+ updatePending();
+}
+
+void
BucketMoveJob::notifyClusterStateChanged(const std::shared_ptr<IBucketStateCalculator> &newCalc)
{
// Called by master write thread
_calc = newCalc;
- changedCalculator();
if (blockedDueToClusterState(_calc)) {
setBlocked(BlockedReason::CLUSTER_STATE);
} else {
unBlock(BlockedReason::CLUSTER_STATE);
+ _movers.clear();
+ std::for_each(_bucketsInFlight.begin(), _bucketsInFlight.end(), [](auto & entry) { entry.second->cancel();});
+ _bucketsInFlight.clear();
+ recompute(_ready.meta_store()->getBucketDB().takeGuard());
}
}
void
-BucketMoveJob::notifyBucketStateChanged(const BucketId &bucketId, BucketInfo::ActiveState newState)
+BucketMoveJob::notifyBucketStateChanged(const BucketId &bucketId, BucketInfo::ActiveState)
{
// Called by master write thread
- if (newState == BucketInfo::NOT_ACTIVE) {
- deactivateBucket(bucketId);
- } else {
- activateBucket(bucketId);
- }
- if (!done()) {
- considerRun();
- }
+ considerBucket(_ready.meta_store()->getBucketDB().takeGuard(), bucketId);
}
void
@@ -367,4 +447,16 @@ BucketMoveJob::notifyDiskMemUsage(DiskMemUsageState state)
internalNotifyDiskMemUsage(state);
}
+void
+BucketMoveJob::updatePending() {
+ _bucketsPending.store(_bucketsInFlight.size() + _buckets2Move.size(), std::memory_order_relaxed);
+}
+
+void
+BucketMoveJob::updateMetrics(DocumentDBTaggedMetrics & metrics) const {
+ // This is an over estimate to ensure we do not count down to zero until everything has been and completed and acked.
+ metrics.bucketMove.bucketsPending.set(_bucketsPending.load(std::memory_order_relaxed) +
+ getLimiter().numPending());
+}
+
} // namespace proton
diff --git a/searchcore/src/vespa/searchcore/proton/server/bucketmovejob.h b/searchcore/src/vespa/searchcore/proton/server/bucketmovejob.h
index 7ef1a491667..573bdef13ef 100644
--- a/searchcore/src/vespa/searchcore/proton/server/bucketmovejob.h
+++ b/searchcore/src/vespa/searchcore/proton/server/bucketmovejob.h
@@ -5,13 +5,16 @@
#include "blockable_maintenance_job.h"
#include "documentbucketmover.h"
#include "i_disk_mem_usage_listener.h"
-#include "ibucketfreezelistener.h"
#include "ibucketstatechangedhandler.h"
#include "iclusterstatechangedhandler.h"
-#include "ifrozenbuckethandler.h"
+#include "maintenancedocumentsubdb.h"
#include <vespa/searchcore/proton/bucketdb/bucketscaniterator.h>
#include <vespa/searchcore/proton/bucketdb/i_bucket_create_listener.h>
-#include <set>
+#include <vespa/searchcore/proton/common/monitored_refcount.h>
+
+
+namespace storage::spi { struct BucketExecutor; }
+namespace searchcorespi::index { struct IThreadService; }
namespace proton {
@@ -26,81 +29,64 @@ namespace bucketdb { class IBucketCreateNotifier; }
/**
* Class used to control the moving of buckets between the ready and
* not ready sub databases based on the readiness of buckets according to the cluster state.
+ * It will first compute the set of buckets to be moved. Then N of these buckets will be iterated in parallel and
+ * the documents scheduled for move. The movment will happen in 3 phases.
+ * 1 - Collect meta info for documents. Must happend in master thread
+ * 2 - Acquire bucket lock and fetch documents and very against meta data. This is done in BucketExecutor threads.
+ * 3 - Actual movement is then done in master thread while still holding bucket lock. Once bucket has fully moved
+ * bucket modified notification is sent.
*/
class BucketMoveJob : public BlockableMaintenanceJob,
public IClusterStateChangedHandler,
- public IBucketFreezeListener,
public bucketdb::IBucketCreateListener,
public IBucketStateChangedHandler,
- public IDiskMemUsageListener
+ public IDiskMemUsageListener,
+ public std::enable_shared_from_this<BucketMoveJob>
{
private:
- using ScanPosition = bucketdb::ScanPosition;
+ using BucketExecutor = storage::spi::BucketExecutor;
+ using IDestructorCallback = vespalib::IDestructorCallback;
+ using IDestructorCallbackSP = std::shared_ptr<IDestructorCallback>;
+ using IThreadService = searchcorespi::index::IThreadService;
+ using BucketId = document::BucketId;
using ScanIterator = bucketdb::ScanIterator;
- using ScanPass = ScanIterator::Pass;
- using ScanResult = std::pair<size_t, bool>;
+ using BucketMoveSet = std::map<BucketId, bool>;
+ using NeedResult = std::pair<bool, bool>;
+ using ActiveState = storage::spi::BucketInfo::ActiveState;
+ using BucketMover = bucketdb::BucketMover;
+ using BucketMoverSP = std::shared_ptr<BucketMover>;
+ using Bucket2Mover = std::map<BucketId, BucketMoverSP>;
+ using Movers = std::vector<BucketMoverSP>;
+ using GuardedMoveOps = BucketMover::GuardedMoveOps;
std::shared_ptr<IBucketStateCalculator> _calc;
+ RetainGuard _dbRetainer;
IDocumentMoveHandler &_moveHandler;
IBucketModifiedHandler &_modifiedHandler;
- const MaintenanceDocumentSubDB &_ready;
- const MaintenanceDocumentSubDB &_notReady;
- DocumentBucketMover _mover;
- bool _doneScan;
- ScanPosition _scanPos;
- ScanPass _scanPass;
- ScanPosition _endPos;
- document::BucketSpace _bucketSpace;
-
- using DelayedBucketSet = std::set<document::BucketId>;
-
- // Delayed buckets that are no longer frozen or active that can be considered for moving.
- DelayedBucketSet _delayedBuckets;
- // Frozen buckets that cannot be moved at all.
- DelayedBucketSet _delayedBucketsFrozen;
- IFrozenBucketHandler &_frozenBuckets;
+ IThreadService &_master;
+ BucketExecutor &_bucketExecutor;
+ const MaintenanceDocumentSubDB _ready;
+ const MaintenanceDocumentSubDB _notReady;
+ const document::BucketSpace _bucketSpace;
+ size_t _iterateCount;
+ Movers _movers;
+ Bucket2Mover _bucketsInFlight;
+ BucketMoveSet _buckets2Move;
+
+ std::atomic<size_t> _bucketsPending;
+
bucketdb::IBucketCreateNotifier &_bucketCreateNotifier;
- DocumentBucketMover _delayedMover;
IClusterStateChangedNotifier &_clusterStateChangedNotifier;
IBucketStateChangedNotifier &_bucketStateChangedNotifier;
IDiskMemUsageNotifier &_diskMemUsageNotifier;
- ScanResult
- scanBuckets(size_t maxBucketsToScan,
- IFrozenBucketHandler::ExclusiveBucketGuard::UP & bucketGuard);
-
- void maybeCancelMover(DocumentBucketMover &mover);
- void maybeDelayMover(DocumentBucketMover &mover, document::BucketId bucket);
-
- bool
- moveDocuments(DocumentBucketMover &mover,
- size_t maxDocsToMove,
- IFrozenBucketHandler::ExclusiveBucketGuard::UP & bucketGuard);
-
- void
- checkBucket(const document::BucketId &bucket,
- ScanIterator &itr,
- DocumentBucketMover &mover,
- IFrozenBucketHandler::ExclusiveBucketGuard::UP & bucketGuard);
-
- /**
- * Signal that the given bucket should be de-activated.
- * An active bucket is not considered for moving from ready to not ready sub database.
- * A de-activated bucket can be considered for moving.
- **/
- void deactivateBucket(document::BucketId bucket);
-
- /**
- * Signal that the given bucket should be activated.
- */
- void activateBucket(document::BucketId bucket);
-
-public:
BucketMoveJob(const std::shared_ptr<IBucketStateCalculator> &calc,
+ RetainGuard dbRetainer,
IDocumentMoveHandler &moveHandler,
IBucketModifiedHandler &modifiedHandler,
+ IThreadService & master,
+ BucketExecutor & bucketExecutor,
const MaintenanceDocumentSubDB &ready,
const MaintenanceDocumentSubDB &notReady,
- IFrozenBucketHandler &frozenBuckets,
bucketdb::IBucketCreateNotifier &bucketCreateNotifier,
IClusterStateChangedNotifier &clusterStateChangedNotifier,
IBucketStateChangedNotifier &bucketStateChangedNotifier,
@@ -109,37 +95,53 @@ public:
const vespalib::string &docTypeName,
document::BucketSpace bucketSpace);
- ~BucketMoveJob() override;
+ void startMove(BucketMover & mover, size_t maxDocsToMove);
+ static void prepareMove(std::shared_ptr<BucketMoveJob> job, BucketMover::MoveKeys keys, IDestructorCallbackSP context);
+ void completeMove(GuardedMoveOps moveOps, IDestructorCallbackSP context);
+ bool checkIfMoverComplete(const BucketMover & mover);
+ void considerBucket(const bucketdb::Guard & guard, BucketId bucket);
+ void reconsiderBucket(const bucketdb::Guard & guard, BucketId bucket);
+ void updatePending();
+ void cancelBucket(BucketId bucket); // True if something to cancel
+ NeedResult needMove(const ScanIterator &itr) const;
+ BucketMoveSet computeBuckets2Move(const bucketdb::Guard & guard);
+ BucketMoverSP createMover(BucketId bucket, bool wantReady);
+ BucketMoverSP greedyCreateMover();
+ void backFillMovers();
+ void moveDocs(size_t maxDocsToMove);
+ static void failOperation(std::shared_ptr<BucketMoveJob> job, BucketId bucket);
+ void recompute(const bucketdb::Guard & guard);
+ class StartMove;
+public:
+ static std::shared_ptr<BucketMoveJob>
+ create(const std::shared_ptr<IBucketStateCalculator> &calc,
+ RetainGuard dbRetainer,
+ IDocumentMoveHandler &moveHandler,
+ IBucketModifiedHandler &modifiedHandler,
+ IThreadService & master,
+ BucketExecutor & bucketExecutor,
+ const MaintenanceDocumentSubDB &ready,
+ const MaintenanceDocumentSubDB &notReady,
+ bucketdb::IBucketCreateNotifier &bucketCreateNotifier,
+ IClusterStateChangedNotifier &clusterStateChangedNotifier,
+ IBucketStateChangedNotifier &bucketStateChangedNotifier,
+ IDiskMemUsageNotifier &diskMemUsageNotifier,
+ const BlockableMaintenanceJobConfig &blockableConfig,
+ const vespalib::string &docTypeName,
+ document::BucketSpace bucketSpace);
- void changedCalculator();
- bool scanAndMove(size_t maxBucketsToScan, size_t maxDocsToMove);
+ ~BucketMoveJob() override;
- bool done() const {
- // Ignores _delayedBucketsFrozen, since no work can be done there yet
- return
- _doneScan &&
- _mover.bucketDone() &&
- _delayedMover.bucketDone() &&
- _delayedBuckets.empty();
- }
+ bool scanAndMove(size_t maxBuckets2Move, size_t maxDocsToMovePerBucket);
+ bool done() const;
+ void recompute(); // Only for testing
- // IMaintenanceJob API
bool run() override;
-
- // IClusterStateChangedHandler API
void notifyClusterStateChanged(const std::shared_ptr<IBucketStateCalculator> &newCalc) override;
-
- // IBucketFreezeListener API
- void notifyThawedBucket(const document::BucketId &bucket) override;
-
- // IBucketStateChangedHandler API
- void notifyBucketStateChanged(const document::BucketId &bucketId,
- storage::spi::BucketInfo::ActiveState newState) override;
-
+ void notifyBucketStateChanged(const BucketId &bucketId, ActiveState newState) override;
void notifyDiskMemUsage(DiskMemUsageState state) override;
-
- // bucketdb::IBucketCreateListener API
- void notifyCreateBucket(const bucketdb::Guard & guard, const document::BucketId &bucket) override;
+ void notifyCreateBucket(const bucketdb::Guard & guard, const BucketId &bucket) override;
+ void updateMetrics(DocumentDBTaggedMetrics & metrics) const override;
};
} // namespace proton
diff --git a/searchcore/src/vespa/searchcore/proton/server/bucketmovejobv2.cpp b/searchcore/src/vespa/searchcore/proton/server/bucketmovejobv2.cpp
deleted file mode 100644
index 9e34462ae21..00000000000
--- a/searchcore/src/vespa/searchcore/proton/server/bucketmovejobv2.cpp
+++ /dev/null
@@ -1,470 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include "bucketmovejobv2.h"
-#include "imaintenancejobrunner.h"
-#include "ibucketstatechangednotifier.h"
-#include "iclusterstatechangednotifier.h"
-#include "i_disk_mem_usage_notifier.h"
-#include "ibucketmodifiedhandler.h"
-#include "move_operation_limiter.h"
-#include "document_db_maintenance_config.h"
-#include <vespa/searchcore/proton/metrics/documentdb_tagged_metrics.h>
-#include <vespa/searchcore/proton/bucketdb/i_bucket_create_notifier.h>
-#include <vespa/searchcore/proton/feedoperation/moveoperation.h>
-#include <vespa/searchcore/proton/documentmetastore/i_document_meta_store.h>
-#include <vespa/searchcorespi/index/i_thread_service.h>
-#include <vespa/persistence/spi/bucket_tasks.h>
-#include <vespa/vespalib/util/destructor_callbacks.h>
-#include <vespa/vespalib/util/lambdatask.h>
-#include <thread>
-
-#include <vespa/log/log.h>
-LOG_SETUP(".proton.server.bucketmovejob");
-
-using document::BucketId;
-using storage::spi::BucketInfo;
-using storage::spi::Bucket;
-using proton::bucketdb::BucketMover;
-using vespalib::makeLambdaTask;
-using vespalib::Trinary;
-
-namespace proton {
-
-namespace {
-
-const char *
-toStr(bool v) {
- return (v ? "T" : "F");
-}
-
-const char *
-toStr(Trinary v) {
- return (v == Trinary::True) ? "T" : ((v == Trinary::False) ? "F" : "U");
-}
-
-bool
-blockedDueToClusterState(const std::shared_ptr<IBucketStateCalculator> &calc)
-{
- bool clusterUp = calc && calc->clusterUp();
- bool nodeUp = calc && calc->nodeUp();
- bool nodeInitializing = calc && calc->nodeInitializing();
- return !(clusterUp && nodeUp && !nodeInitializing);
-}
-
-}
-
-BucketMoveJobV2::BucketMoveJobV2(const std::shared_ptr<IBucketStateCalculator> &calc,
- RetainGuard dbRetainer,
- IDocumentMoveHandler &moveHandler,
- IBucketModifiedHandler &modifiedHandler,
- IThreadService & master,
- BucketExecutor & bucketExecutor,
- const MaintenanceDocumentSubDB &ready,
- const MaintenanceDocumentSubDB &notReady,
- bucketdb::IBucketCreateNotifier &bucketCreateNotifier,
- IClusterStateChangedNotifier &clusterStateChangedNotifier,
- IBucketStateChangedNotifier &bucketStateChangedNotifier,
- IDiskMemUsageNotifier &diskMemUsageNotifier,
- const BlockableMaintenanceJobConfig &blockableConfig,
- const vespalib::string &docTypeName,
- document::BucketSpace bucketSpace)
- : BlockableMaintenanceJob("move_buckets." + docTypeName, vespalib::duration::zero(), vespalib::duration::zero(), blockableConfig),
- IClusterStateChangedHandler(),
- bucketdb::IBucketCreateListener(),
- IBucketStateChangedHandler(),
- IDiskMemUsageListener(),
- std::enable_shared_from_this<BucketMoveJobV2>(),
- _calc(calc),
- _dbRetainer(std::move(dbRetainer)),
- _moveHandler(moveHandler),
- _modifiedHandler(modifiedHandler),
- _master(master),
- _bucketExecutor(bucketExecutor),
- _ready(ready),
- _notReady(notReady),
- _bucketSpace(bucketSpace),
- _iterateCount(0),
- _movers(),
- _bucketsInFlight(),
- _buckets2Move(),
- _stopped(false),
- _bucketsPending(0),
- _bucketCreateNotifier(bucketCreateNotifier),
- _clusterStateChangedNotifier(clusterStateChangedNotifier),
- _bucketStateChangedNotifier(bucketStateChangedNotifier),
- _diskMemUsageNotifier(diskMemUsageNotifier)
-{
- _movers.reserve(std::min(100u, blockableConfig.getMaxOutstandingMoveOps()));
- if (blockedDueToClusterState(_calc)) {
- setBlocked(BlockedReason::CLUSTER_STATE);
- }
-
- _bucketCreateNotifier.addListener(this);
- _clusterStateChangedNotifier.addClusterStateChangedHandler(this);
- _bucketStateChangedNotifier.addBucketStateChangedHandler(this);
- _diskMemUsageNotifier.addDiskMemUsageListener(this);
- recompute(_ready.meta_store()->getBucketDB().takeGuard());
-}
-
-BucketMoveJobV2::~BucketMoveJobV2()
-{
- _bucketCreateNotifier.removeListener(this);
- _clusterStateChangedNotifier.removeClusterStateChangedHandler(this);
- _bucketStateChangedNotifier.removeBucketStateChangedHandler(this);
- _diskMemUsageNotifier.removeDiskMemUsageListener(this);
-}
-
-std::shared_ptr<BucketMoveJobV2>
-BucketMoveJobV2::create(const std::shared_ptr<IBucketStateCalculator> &calc,
- RetainGuard dbRetainer,
- IDocumentMoveHandler &moveHandler,
- IBucketModifiedHandler &modifiedHandler,
- IThreadService & master,
- BucketExecutor & bucketExecutor,
- const MaintenanceDocumentSubDB &ready,
- const MaintenanceDocumentSubDB &notReady,
- bucketdb::IBucketCreateNotifier &bucketCreateNotifier,
- IClusterStateChangedNotifier &clusterStateChangedNotifier,
- IBucketStateChangedNotifier &bucketStateChangedNotifier,
- IDiskMemUsageNotifier &diskMemUsageNotifier,
- const BlockableMaintenanceJobConfig &blockableConfig,
- const vespalib::string &docTypeName,
- document::BucketSpace bucketSpace)
-{
- return std::shared_ptr<BucketMoveJobV2>(
- new BucketMoveJobV2(calc, std::move(dbRetainer), moveHandler, modifiedHandler, master, bucketExecutor, ready, notReady,
- bucketCreateNotifier, clusterStateChangedNotifier, bucketStateChangedNotifier,
- diskMemUsageNotifier, blockableConfig, docTypeName, bucketSpace),
- [&master](auto job) {
- auto failed = master.execute(makeLambdaTask([job]() { delete job; }));
- assert(!failed);
- });
-}
-
-BucketMoveJobV2::NeedResult
-BucketMoveJobV2::needMove(const ScanIterator &itr) const {
- NeedResult noMove(false, false);
- const bool hasReadyDocs = itr.hasReadyBucketDocs();
- const bool hasNotReadyDocs = itr.hasNotReadyBucketDocs();
- if (!hasReadyDocs && !hasNotReadyDocs) {
- return noMove; // No documents for bucket in ready or notready subdbs
- }
- const bool isActive = itr.isActive();
- // No point in moving buckets when node is retired and everything will be deleted soon.
- // However, allow moving of explicitly activated buckets, as this implies a lack of other good replicas.
- if (!_calc || (_calc->nodeRetired() && !isActive)) {
- return noMove;
- }
- const Trinary shouldBeReady = _calc->shouldBeReady(document::Bucket(_bucketSpace, itr.getBucket()));
- if (shouldBeReady == Trinary::Undefined) {
- return noMove;
- }
- const bool wantReady = (shouldBeReady == Trinary::True) || isActive;
- LOG(spam, "checkBucket(): bucket(%s), shouldBeReady(%s), active(%s)",
- itr.getBucket().toString().c_str(), toStr(shouldBeReady), toStr(isActive));
- if (wantReady) {
- if (!hasNotReadyDocs) {
- return noMove; // No notready bucket to make ready
- }
- } else {
- if (isActive) {
- return noMove; // Do not move rom ready to not ready when active
- }
- if (!hasReadyDocs) {
- return noMove; // No ready bucket to make notready
- }
- }
- return {true, wantReady};
-}
-
-class BucketMoveJobV2::StartMove : public storage::spi::BucketTask {
-public:
- using IDestructorCallbackSP = std::shared_ptr<vespalib::IDestructorCallback>;
- StartMove(std::shared_ptr<BucketMoveJobV2> job, BucketMover::MoveKeys keys, IDestructorCallbackSP opsTracker)
- : _job(job),
- _keys(std::move(keys)),
- _opsTracker(std::move(opsTracker))
- {}
-
- void run(const Bucket &bucket, IDestructorCallbackSP onDone) override {
- assert(_keys.mover().getBucket() == bucket.getBucketId());
- using DoneContext = vespalib::KeepAlive<std::pair<IDestructorCallbackSP, IDestructorCallbackSP>>;
- BucketMoveJobV2::prepareMove(std::move(_job), std::move(_keys),
- std::make_shared<DoneContext>(std::make_pair(std::move(_opsTracker), std::move(onDone))));
- }
-
- void fail(const Bucket &bucket) override {
- BucketMoveJobV2::failOperation(std::move(_job), bucket.getBucketId());
- }
-
-private:
- std::shared_ptr<BucketMoveJobV2> _job;
- BucketMover::MoveKeys _keys;
- IDestructorCallbackSP _opsTracker;
-};
-
-void
-BucketMoveJobV2::failOperation(std::shared_ptr<BucketMoveJobV2> job, BucketId bucketId) {
- auto & master = job->_master;
- if (job->_stopped) return;
- master.execute(makeLambdaTask([job=std::move(job), bucketId]() {
- if (job->_stopped.load(std::memory_order_relaxed)) return;
- job->considerBucket(job->_ready.meta_store()->getBucketDB().takeGuard(), bucketId);
- }));
-}
-
-void
-BucketMoveJobV2::startMove(BucketMover & mover, size_t maxDocsToMove) {
- auto [keys, done] = mover.getKeysToMove(maxDocsToMove);
- if (done) {
- mover.setAllScheduled();
- }
- if (keys.empty()) return;
- mover.updateLastValidGid(keys.back()._gid);
- Bucket spiBucket(document::Bucket(_bucketSpace, mover.getBucket()));
- auto bucketTask = std::make_unique<StartMove>(shared_from_this(), std::move(keys), getLimiter().beginOperation());
- _bucketExecutor.execute(spiBucket, std::move(bucketTask));
-}
-
-void
-BucketMoveJobV2::prepareMove(std::shared_ptr<BucketMoveJobV2> job, BucketMover::MoveKeys keys, IDestructorCallbackSP onDone)
-{
- if (job->_stopped) return; //TODO Remove once lidtracker is no longer in use.
- auto moveOps = keys.createMoveOperations();
- auto & master = job->_master;
- if (job->_stopped) return;
- master.execute(makeLambdaTask([job=std::move(job), moveOps=std::move(moveOps), onDone=std::move(onDone)]() mutable {
- if (job->_stopped.load(std::memory_order_relaxed)) return;
- job->completeMove(std::move(moveOps), std::move(onDone));
- }));
-}
-
-void
-BucketMoveJobV2::completeMove(GuardedMoveOps ops, IDestructorCallbackSP onDone) {
- BucketMover & mover = ops.mover();
- mover.moveDocuments(std::move(ops.success()), std::move(onDone));
- ops.failed().clear();
- if (checkIfMoverComplete(mover)) {
- reconsiderBucket(_ready.meta_store()->getBucketDB().takeGuard(), mover.getBucket());
- }
-}
-
-bool
-BucketMoveJobV2::checkIfMoverComplete(const BucketMover & mover) {
- bool bucketMoveComplete = mover.allScheduled() && mover.inSync();
- bool needReschedule = mover.needReschedule();
- if (bucketMoveComplete || needReschedule) {
- BucketId bucket = mover.getBucket();
- auto found = _bucketsInFlight.find(bucket);
- if (needReschedule) {
- if ((found != _bucketsInFlight.end()) && (&mover == found->second.get())) {
- //Prevent old disconnected mover from creating havoc.
- _bucketsInFlight.erase(found);
- _movers.erase(std::remove_if(_movers.begin(), _movers.end(),
- [bucket](const BucketMoverSP &cand) {
- return cand->getBucket() == bucket;
- }),
- _movers.end());
- return true;
- }
- } else {
- assert(found != _bucketsInFlight.end());
- _bucketsInFlight.erase(found);
- _modifiedHandler.notifyBucketModified(bucket);
- }
- }
- updatePending();
- return false;
-}
-
-void
-BucketMoveJobV2::cancelBucket(BucketId bucket) {
- auto inFlight = _bucketsInFlight.find(bucket);
- if (inFlight != _bucketsInFlight.end()) {
- inFlight->second->cancel();
- checkIfMoverComplete(*inFlight->second);
- }
-}
-
-void
-BucketMoveJobV2::considerBucket(const bucketdb::Guard & guard, BucketId bucket) {
- cancelBucket(bucket);
- assert( !_bucketsInFlight.contains(bucket));
- reconsiderBucket(guard, bucket);
-}
-
-void
-BucketMoveJobV2::reconsiderBucket(const bucketdb::Guard & guard, BucketId bucket) {
- assert( ! _bucketsInFlight.contains(bucket));
- ScanIterator itr(guard, bucket);
- auto [mustMove, wantReady] = needMove(itr);
- if (mustMove) {
- _buckets2Move[bucket] = wantReady;
- } else {
- _buckets2Move.erase(bucket);
- }
- updatePending();
- considerRun();
-}
-
-void
-BucketMoveJobV2::notifyCreateBucket(const bucketdb::Guard & guard, const BucketId &bucket)
-{
- considerBucket(guard, bucket);
-}
-
-BucketMoveJobV2::BucketMoveSet
-BucketMoveJobV2::computeBuckets2Move(const bucketdb::Guard & guard)
-{
- BucketMoveJobV2::BucketMoveSet toMove;
- for (ScanIterator itr(guard, BucketId()); itr.valid(); ++itr) {
- auto [mustMove, wantReady] = needMove(itr);
- if (mustMove) {
- toMove[itr.getBucket()] = wantReady;
- }
- }
- return toMove;
-}
-
-std::shared_ptr<BucketMover>
-BucketMoveJobV2::createMover(BucketId bucket, bool wantReady) {
- const MaintenanceDocumentSubDB &source(wantReady ? _notReady : _ready);
- const MaintenanceDocumentSubDB &target(wantReady ? _ready : _notReady);
- LOG(debug, "checkBucket(): mover.setupForBucket(%s, source:%u, target:%u)",
- bucket.toString().c_str(), source.sub_db_id(), target.sub_db_id());
- return BucketMover::create(bucket, &source, target.sub_db_id(), _moveHandler);
-}
-
-std::shared_ptr<BucketMover>
-BucketMoveJobV2::greedyCreateMover() {
- if ( ! _buckets2Move.empty()) {
- auto next = _buckets2Move.begin();
- auto mover = createMover(next->first, next->second);
- _buckets2Move.erase(next);
- return mover;
- }
- return {};
-}
-
-void
-BucketMoveJobV2::moveDocs(size_t maxDocsToMove) {
- backFillMovers();
- if (_movers.empty()) return;
-
- // Select mover
- size_t index = _iterateCount++ % _movers.size();
- auto & mover = *_movers[index];
-
- //Move, or reduce movers as we are tailing off
- if (!mover.allScheduled()) {
- startMove(mover, maxDocsToMove);
- if (mover.allScheduled()) {
- _movers.erase(_movers.begin() + index);
- }
- }
-}
-
-bool
-BucketMoveJobV2::scanAndMove(size_t maxBuckets2Move, size_t maxDocsToMovePerBucket) {
- for (size_t i(0); i < maxBuckets2Move; i++) {
- moveDocs(maxDocsToMovePerBucket);
- }
- return isBlocked() || done();
-}
-
-bool
-BucketMoveJobV2::done() const {
- return _buckets2Move.empty() && _movers.empty() && !isBlocked();
-}
-
-bool
-BucketMoveJobV2::run()
-{
- if (isBlocked()) {
- return true; // indicate work is done, since node state is bad
- }
- /// Returning false here will immediately post the job back on the executor. This will give a busy loop,
- /// but this is considered fine as it is very rare and it will be intermingled with multiple feed operations.
- if ( ! scanAndMove(1, 1) ) {
- return false;
- }
-
- if (isBlocked(BlockedReason::OUTSTANDING_OPS)) {
- return true;
- }
- return done();
-}
-
-void
-BucketMoveJobV2::recompute() {
- recompute(_ready.meta_store()->getBucketDB().takeGuard());
-}
-void
-BucketMoveJobV2::recompute(const bucketdb::Guard & guard) {
- _buckets2Move = computeBuckets2Move(guard);
- updatePending();
-}
-
-void
-BucketMoveJobV2::backFillMovers() {
- // Ensure we have enough movers.
- while ( ! _buckets2Move.empty() && (_movers.size() < _movers.capacity())) {
- auto mover = greedyCreateMover();
- _movers.push_back(mover);
- auto bucketId = mover->getBucket();
- assert( ! _bucketsInFlight.contains(bucketId));
- _bucketsInFlight[bucketId] = std::move(mover);
- }
- updatePending();
-}
-
-void
-BucketMoveJobV2::notifyClusterStateChanged(const std::shared_ptr<IBucketStateCalculator> &newCalc)
-{
- // Called by master write thread
- _calc = newCalc;
- if (blockedDueToClusterState(_calc)) {
- setBlocked(BlockedReason::CLUSTER_STATE);
- } else {
- unBlock(BlockedReason::CLUSTER_STATE);
- _movers.clear();
- std::for_each(_bucketsInFlight.begin(), _bucketsInFlight.end(), [](auto & entry) { entry.second->cancel();});
- _bucketsInFlight.clear();
- recompute(_ready.meta_store()->getBucketDB().takeGuard());
- }
-}
-
-void
-BucketMoveJobV2::notifyBucketStateChanged(const BucketId &bucketId, BucketInfo::ActiveState)
-{
- // Called by master write thread
- considerBucket(_ready.meta_store()->getBucketDB().takeGuard(), bucketId);
-}
-
-void
-BucketMoveJobV2::notifyDiskMemUsage(DiskMemUsageState state)
-{
- // Called by master write thread
- internalNotifyDiskMemUsage(state);
-}
-
-void
-BucketMoveJobV2::onStop() {
- // Called by master write thread
- BlockableMaintenanceJob::onStop();
- _stopped = true;
-}
-
-void
-BucketMoveJobV2::updatePending() {
- _bucketsPending.store(_bucketsInFlight.size() + _buckets2Move.size(), std::memory_order_relaxed);
-}
-
-void
-BucketMoveJobV2::updateMetrics(DocumentDBTaggedMetrics & metrics) const {
- // This is an over estimate to ensure we do not count down to zero until everything has been and completed and acked.
- metrics.bucketMove.bucketsPending.set(_bucketsPending.load(std::memory_order_relaxed) +
- getLimiter().numPending());
-}
-
-} // namespace proton
diff --git a/searchcore/src/vespa/searchcore/proton/server/bucketmovejobv2.h b/searchcore/src/vespa/searchcore/proton/server/bucketmovejobv2.h
deleted file mode 100644
index df75c8c9766..00000000000
--- a/searchcore/src/vespa/searchcore/proton/server/bucketmovejobv2.h
+++ /dev/null
@@ -1,149 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include "blockable_maintenance_job.h"
-#include "documentbucketmover.h"
-#include "i_disk_mem_usage_listener.h"
-#include "ibucketstatechangedhandler.h"
-#include "iclusterstatechangedhandler.h"
-#include "maintenancedocumentsubdb.h"
-#include <vespa/searchcore/proton/bucketdb/bucketscaniterator.h>
-#include <vespa/searchcore/proton/bucketdb/i_bucket_create_listener.h>
-#include <vespa/searchcore/proton/common/monitored_refcount.h>
-
-
-namespace storage::spi { struct BucketExecutor; }
-namespace searchcorespi::index { struct IThreadService; }
-
-namespace proton {
-
-class BlockableMaintenanceJobConfig;
-class IBucketStateChangedNotifier;
-class IClusterStateChangedNotifier;
-class IDiskMemUsageNotifier;
-class IBucketModifiedHandler;
-
-namespace bucketdb { class IBucketCreateNotifier; }
-
-/**
- * Class used to control the moving of buckets between the ready and
- * not ready sub databases based on the readiness of buckets according to the cluster state.
- * It will first compute the set of buckets to be moved. Then N of these buckets will be iterated in parallel and
- * the documents scheduled for move. The movment will happen in 3 phases.
- * 1 - Collect meta info for documents. Must happend in master thread
- * 2 - Acquire bucket lock and fetch documents and very against meta data. This is done in BucketExecutor threads.
- * 3 - Actual movement is then done in master thread while still holding bucket lock. Once bucket has fully moved
- * bucket modified notification is sent.
- */
-class BucketMoveJobV2 : public BlockableMaintenanceJob,
- public IClusterStateChangedHandler,
- public bucketdb::IBucketCreateListener,
- public IBucketStateChangedHandler,
- public IDiskMemUsageListener,
- public std::enable_shared_from_this<BucketMoveJobV2>
-{
-private:
- using BucketExecutor = storage::spi::BucketExecutor;
- using IDestructorCallback = vespalib::IDestructorCallback;
- using IDestructorCallbackSP = std::shared_ptr<IDestructorCallback>;
- using IThreadService = searchcorespi::index::IThreadService;
- using BucketId = document::BucketId;
- using ScanIterator = bucketdb::ScanIterator;
- using BucketMoveSet = std::map<BucketId, bool>;
- using NeedResult = std::pair<bool, bool>;
- using ActiveState = storage::spi::BucketInfo::ActiveState;
- using BucketMover = bucketdb::BucketMover;
- using BucketMoverSP = std::shared_ptr<BucketMover>;
- using Bucket2Mover = std::map<BucketId, BucketMoverSP>;
- using Movers = std::vector<BucketMoverSP>;
- using GuardedMoveOps = BucketMover::GuardedMoveOps;
- std::shared_ptr<IBucketStateCalculator> _calc;
- RetainGuard _dbRetainer;
- IDocumentMoveHandler &_moveHandler;
- IBucketModifiedHandler &_modifiedHandler;
- IThreadService &_master;
- BucketExecutor &_bucketExecutor;
- const MaintenanceDocumentSubDB _ready;
- const MaintenanceDocumentSubDB _notReady;
- const document::BucketSpace _bucketSpace;
- size_t _iterateCount;
- Movers _movers;
- Bucket2Mover _bucketsInFlight;
- BucketMoveSet _buckets2Move;
-
- std::atomic<bool> _stopped;
- std::atomic<size_t> _bucketsPending;
-
- bucketdb::IBucketCreateNotifier &_bucketCreateNotifier;
- IClusterStateChangedNotifier &_clusterStateChangedNotifier;
- IBucketStateChangedNotifier &_bucketStateChangedNotifier;
- IDiskMemUsageNotifier &_diskMemUsageNotifier;
-
- BucketMoveJobV2(const std::shared_ptr<IBucketStateCalculator> &calc,
- RetainGuard dbRetainer,
- IDocumentMoveHandler &moveHandler,
- IBucketModifiedHandler &modifiedHandler,
- IThreadService & master,
- BucketExecutor & bucketExecutor,
- const MaintenanceDocumentSubDB &ready,
- const MaintenanceDocumentSubDB &notReady,
- bucketdb::IBucketCreateNotifier &bucketCreateNotifier,
- IClusterStateChangedNotifier &clusterStateChangedNotifier,
- IBucketStateChangedNotifier &bucketStateChangedNotifier,
- IDiskMemUsageNotifier &diskMemUsageNotifier,
- const BlockableMaintenanceJobConfig &blockableConfig,
- const vespalib::string &docTypeName,
- document::BucketSpace bucketSpace);
-
- void startMove(BucketMover & mover, size_t maxDocsToMove);
- static void prepareMove(std::shared_ptr<BucketMoveJobV2> job, BucketMover::MoveKeys keys, IDestructorCallbackSP context);
- void completeMove(GuardedMoveOps moveOps, IDestructorCallbackSP context);
- bool checkIfMoverComplete(const BucketMover & mover);
- void considerBucket(const bucketdb::Guard & guard, BucketId bucket);
- void reconsiderBucket(const bucketdb::Guard & guard, BucketId bucket);
- void updatePending();
- void cancelBucket(BucketId bucket); // True if something to cancel
- NeedResult needMove(const ScanIterator &itr) const;
- BucketMoveSet computeBuckets2Move(const bucketdb::Guard & guard);
- BucketMoverSP createMover(BucketId bucket, bool wantReady);
- BucketMoverSP greedyCreateMover();
- void backFillMovers();
- void moveDocs(size_t maxDocsToMove);
- static void failOperation(std::shared_ptr<BucketMoveJobV2> job, BucketId bucket);
- void recompute(const bucketdb::Guard & guard);
- class StartMove;
-public:
- static std::shared_ptr<BucketMoveJobV2>
- create(const std::shared_ptr<IBucketStateCalculator> &calc,
- RetainGuard dbRetainer,
- IDocumentMoveHandler &moveHandler,
- IBucketModifiedHandler &modifiedHandler,
- IThreadService & master,
- BucketExecutor & bucketExecutor,
- const MaintenanceDocumentSubDB &ready,
- const MaintenanceDocumentSubDB &notReady,
- bucketdb::IBucketCreateNotifier &bucketCreateNotifier,
- IClusterStateChangedNotifier &clusterStateChangedNotifier,
- IBucketStateChangedNotifier &bucketStateChangedNotifier,
- IDiskMemUsageNotifier &diskMemUsageNotifier,
- const BlockableMaintenanceJobConfig &blockableConfig,
- const vespalib::string &docTypeName,
- document::BucketSpace bucketSpace);
-
- ~BucketMoveJobV2() override;
-
- bool scanAndMove(size_t maxBuckets2Move, size_t maxDocsToMovePerBucket);
- bool done() const;
- void recompute(); // Only for testing
-
- bool run() override;
- void notifyClusterStateChanged(const std::shared_ptr<IBucketStateCalculator> &newCalc) override;
- void notifyBucketStateChanged(const BucketId &bucketId, ActiveState newState) override;
- void notifyDiskMemUsage(DiskMemUsageState state) override;
- void notifyCreateBucket(const bucketdb::Guard & guard, const BucketId &bucket) override;
- void onStop() override;
- void updateMetrics(DocumentDBTaggedMetrics & metrics) const override;
-};
-
-} // namespace proton
diff --git a/searchcore/src/vespa/searchcore/proton/server/document_db_maintenance_config.cpp b/searchcore/src/vespa/searchcore/proton/server/document_db_maintenance_config.cpp
index f3b0c3ff305..3bcd9f3d85f 100644
--- a/searchcore/src/vespa/searchcore/proton/server/document_db_maintenance_config.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/document_db_maintenance_config.cpp
@@ -7,9 +7,7 @@ namespace proton {
constexpr vespalib::duration MAX_DELAY_SEC = 300s;
DocumentDBPruneConfig::DocumentDBPruneConfig() noexcept
- : _delay(MAX_DELAY_SEC),
- _interval(21600s),
- _age(1209600s)
+ : DocumentDBPruneConfig(21600s, 1209600s)
{
}
@@ -24,9 +22,9 @@ DocumentDBPruneConfig(vespalib::duration interval, vespalib::duration age) noexc
bool
DocumentDBPruneConfig::operator==(const DocumentDBPruneConfig &rhs) const noexcept
{
- return _delay == rhs._delay &&
- _interval == rhs._interval &&
- _age == rhs._age;
+ return (_delay == rhs._delay) &&
+ (_interval == rhs._interval) &&
+ (_age == rhs._age);
}
DocumentDBHeartBeatConfig::DocumentDBHeartBeatConfig() noexcept
@@ -52,8 +50,7 @@ DocumentDBLidSpaceCompactionConfig::DocumentDBLidSpaceCompactionConfig() noexcep
_allowedLidBloatFactor(1.0),
_remove_batch_block_rate(0.5),
_remove_block_rate(100),
- _disabled(false),
- _useBucketExecutor(false)
+ _disabled(false)
{
}
@@ -62,16 +59,14 @@ DocumentDBLidSpaceCompactionConfig::DocumentDBLidSpaceCompactionConfig(vespalib:
double allowedLidBloatFactor,
double remove_batch_block_rate,
double remove_block_rate,
- bool disabled,
- bool useBucketExecutor) noexcept
+ bool disabled) noexcept
: _delay(std::min(MAX_DELAY_SEC, interval)),
_interval(interval),
_allowedLidBloat(allowedLidBloat),
_allowedLidBloatFactor(allowedLidBloatFactor),
_remove_batch_block_rate(remove_batch_block_rate),
_remove_block_rate(remove_block_rate),
- _disabled(disabled),
- _useBucketExecutor(useBucketExecutor)
+ _disabled(disabled)
{
}
@@ -113,19 +108,16 @@ BlockableMaintenanceJobConfig::operator==(const BlockableMaintenanceJobConfig &r
}
BucketMoveConfig::BucketMoveConfig() noexcept
- : _maxDocsToMovePerBucket(1),
- _useBucketExecutor(false)
+ : _maxDocsToMovePerBucket(1)
{}
-BucketMoveConfig::BucketMoveConfig(uint32_t maxDocsToMovePerBucket, bool useBucketExecutor_) noexcept
- : _maxDocsToMovePerBucket(maxDocsToMovePerBucket),
- _useBucketExecutor(useBucketExecutor_)
+BucketMoveConfig::BucketMoveConfig(uint32_t maxDocsToMovePerBucket) noexcept
+ : _maxDocsToMovePerBucket(maxDocsToMovePerBucket)
{}
bool
BucketMoveConfig::operator==(const BucketMoveConfig &rhs) const noexcept
{
- return _maxDocsToMovePerBucket == rhs._maxDocsToMovePerBucket &&
- _useBucketExecutor == rhs._useBucketExecutor;
+ return _maxDocsToMovePerBucket == rhs._maxDocsToMovePerBucket;
}
DocumentDBMaintenanceConfig::DocumentDBMaintenanceConfig() noexcept
@@ -144,7 +136,7 @@ DocumentDBMaintenanceConfig::DocumentDBMaintenanceConfig() noexcept
DocumentDBMaintenanceConfig::~DocumentDBMaintenanceConfig() = default;
DocumentDBMaintenanceConfig::
-DocumentDBMaintenanceConfig(const DocumentDBPruneRemovedDocumentsConfig &pruneRemovedDocuments,
+DocumentDBMaintenanceConfig(const DocumentDBPruneConfig &pruneRemovedDocuments,
const DocumentDBHeartBeatConfig &heartBeat,
vespalib::duration groupingSessionPruneInterval,
vespalib::duration visibilityDelay,
diff --git a/searchcore/src/vespa/searchcore/proton/server/document_db_maintenance_config.h b/searchcore/src/vespa/searchcore/proton/server/document_db_maintenance_config.h
index af8194a4a19..a42bf65ce88 100644
--- a/searchcore/src/vespa/searchcore/proton/server/document_db_maintenance_config.h
+++ b/searchcore/src/vespa/searchcore/proton/server/document_db_maintenance_config.h
@@ -26,7 +26,7 @@ public:
vespalib::duration getAge() const noexcept { return _age; }
};
-typedef DocumentDBPruneConfig DocumentDBPruneRemovedDocumentsConfig;
+using DocumentDBPruneRemovedDocumentsConfig = DocumentDBPruneConfig;
class DocumentDBHeartBeatConfig
{
@@ -51,7 +51,6 @@ private:
double _remove_batch_block_rate;
double _remove_block_rate;
bool _disabled;
- bool _useBucketExecutor;
public:
DocumentDBLidSpaceCompactionConfig() noexcept;
@@ -60,8 +59,7 @@ public:
double allowwedLidBloatFactor,
double remove_batch_block_rate,
double remove_block_rate,
- bool disabled,
- bool useBucketExecutor) noexcept;
+ bool disabled) noexcept;
static DocumentDBLidSpaceCompactionConfig createDisabled() noexcept;
bool operator==(const DocumentDBLidSpaceCompactionConfig &rhs) const noexcept;
@@ -72,7 +70,6 @@ public:
double get_remove_batch_block_rate() const noexcept { return _remove_batch_block_rate; }
double get_remove_block_rate() const noexcept { return _remove_block_rate; }
bool isDisabled() const noexcept { return _disabled; }
- bool useBucketExecutor() const noexcept { return _useBucketExecutor; }
};
class BlockableMaintenanceJobConfig {
@@ -92,13 +89,11 @@ public:
class BucketMoveConfig {
public:
BucketMoveConfig() noexcept;
- BucketMoveConfig(uint32_t _maxDocsToMovePerBucket, bool useBucketExecutor) noexcept;
+ BucketMoveConfig(uint32_t _maxDocsToMovePerBucket) noexcept;
bool operator==(const BucketMoveConfig &rhs) const noexcept;
uint32_t getMaxDocsToMovePerBucket() const noexcept { return _maxDocsToMovePerBucket; }
- bool useBucketExecutor() const noexcept { return _useBucketExecutor; }
private:
uint32_t _maxDocsToMovePerBucket;
- bool _useBucketExecutor;
};
class DocumentDBMaintenanceConfig
@@ -107,7 +102,7 @@ public:
typedef std::shared_ptr<DocumentDBMaintenanceConfig> SP;
private:
- DocumentDBPruneRemovedDocumentsConfig _pruneRemovedDocuments;
+ DocumentDBPruneConfig _pruneRemovedDocuments;
DocumentDBHeartBeatConfig _heartBeat;
vespalib::duration _sessionCachePruneInterval;
vespalib::duration _visibilityDelay;
@@ -120,7 +115,7 @@ private:
public:
DocumentDBMaintenanceConfig() noexcept;
- DocumentDBMaintenanceConfig(const DocumentDBPruneRemovedDocumentsConfig &pruneRemovedDocuments,
+ DocumentDBMaintenanceConfig(const DocumentDBPruneConfig &pruneRemovedDocuments,
const DocumentDBHeartBeatConfig &heartBeat,
vespalib::duration sessionCachePruneInterval,
vespalib::duration visibilityDelay,
@@ -139,7 +134,7 @@ public:
bool
operator==(const DocumentDBMaintenanceConfig &rhs) const noexcept ;
- const DocumentDBPruneRemovedDocumentsConfig &getPruneRemovedDocumentsConfig() const noexcept {
+ const DocumentDBPruneConfig &getPruneRemovedDocumentsConfig() const noexcept {
return _pruneRemovedDocuments;
}
const DocumentDBHeartBeatConfig &getHeartBeatConfig() const noexcept {
diff --git a/searchcore/src/vespa/searchcore/proton/server/documentbucketmover.cpp b/searchcore/src/vespa/searchcore/proton/server/documentbucketmover.cpp
index d0581a5b13f..a0ce668294f 100644
--- a/searchcore/src/vespa/searchcore/proton/server/documentbucketmover.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/documentbucketmover.cpp
@@ -134,7 +134,7 @@ BucketMover::createMoveOperations(MoveKeys toMove) {
void
BucketMover::moveDocuments(std::vector<GuardedMoveOp> moveOps, IDestructorCallbackSP onDone) {
for (auto & moveOp : moveOps) {
- moveDocument(std::move(moveOp.first), std::move(onDone));
+ moveDocument(std::move(moveOp.first), onDone);
}
}
diff --git a/searchcore/src/vespa/searchcore/proton/server/documentdb.cpp b/searchcore/src/vespa/searchcore/proton/server/documentdb.cpp
index 3f3fad55bd6..aa633536419 100644
--- a/searchcore/src/vespa/searchcore/proton/server/documentdb.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/documentdb.cpp
@@ -744,11 +744,6 @@ DocumentDB::startTransactionLogReplay()
LOG(debug, "DocumentDB(%s): Database started.", _docTypeName.toString().c_str());
}
-BucketGuard::UP DocumentDB::lockBucket(const document::BucketId &bucket)
-{
- return std::make_unique<BucketGuard>(bucket, _maintenanceController);
-}
-
std::shared_ptr<std::vector<IDocumentRetriever::SP> >
DocumentDB::getDocumentRetrievers(IDocumentRetriever::ReadConsistency consistency)
{
@@ -952,7 +947,6 @@ DocumentDB::injectMaintenanceJobs(const DocumentDBMaintenanceConfig &config, std
*_feedHandler, // IHeartBeatHandler
*_sessionManager, // ISessionCachePruner
*_feedHandler, // IOperationStorer
- _maintenanceController, // IFrozenBucketHandler
_subDBs.getBucketCreateNotifier(),
_bucketSpace,
*_feedHandler, // IPruneRemovedDocumentsHandler
diff --git a/searchcore/src/vespa/searchcore/proton/server/documentdb.h b/searchcore/src/vespa/searchcore/proton/server/documentdb.h
index ad0225d6f86..999ccad708e 100644
--- a/searchcore/src/vespa/searchcore/proton/server/documentdb.h
+++ b/searchcore/src/vespa/searchcore/proton/server/documentdb.h
@@ -22,7 +22,6 @@
#include <vespa/searchcore/proton/common/monitored_refcount.h>
#include <vespa/searchcore/proton/metrics/documentdb_job_trackers.h>
#include <vespa/searchcore/proton/metrics/documentdb_tagged_metrics.h>
-#include <vespa/searchcore/proton/persistenceengine/bucket_guard.h>
#include <vespa/searchcore/proton/persistenceengine/i_resource_write_filter.h>
#include <vespa/searchcore/proton/index/indexmanager.h>
#include <vespa/searchlib/docstore/cachestats.h>
@@ -365,10 +364,7 @@ public:
return _maintenanceController;
}
- BucketGuard::UP lockBucket(const document::BucketId &bucket);
-
virtual SerialNum getOldestFlushedSerial();
-
virtual SerialNum getNewestFlushedSerial();
std::unique_ptr<search::engine::SearchReply>
diff --git a/searchcore/src/vespa/searchcore/proton/server/documentdbconfigmanager.cpp b/searchcore/src/vespa/searchcore/proton/server/documentdbconfigmanager.cpp
index fbdff698b9b..8d513fde62f 100644
--- a/searchcore/src/vespa/searchcore/proton/server/documentdbconfigmanager.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/documentdbconfigmanager.cpp
@@ -132,9 +132,8 @@ buildMaintenanceConfig(const BootstrapConfig::SP &bootstrapConfig,
isDocumentTypeGlobal = ddbConfig.global;
}
return std::make_shared<DocumentDBMaintenanceConfig>(
- DocumentDBPruneRemovedDocumentsConfig(
- pruneRemovedDocumentsInterval,
- pruneRemovedDocumentsAge),
+ DocumentDBPruneConfig(pruneRemovedDocumentsInterval,
+ pruneRemovedDocumentsAge),
DocumentDBHeartBeatConfig(),
vespalib::from_s(proton.grouping.sessionmanager.pruning.interval),
visibilityDelay,
@@ -144,8 +143,7 @@ buildMaintenanceConfig(const BootstrapConfig::SP &bootstrapConfig,
proton.lidspacecompaction.allowedlidbloatfactor,
proton.lidspacecompaction.removebatchblockrate,
proton.lidspacecompaction.removeblockrate,
- isDocumentTypeGlobal,
- proton.lidspacecompaction.usebucketexecutor),
+ isDocumentTypeGlobal),
AttributeUsageFilterConfig(
proton.writefilter.attribute.enumstorelimit,
proton.writefilter.attribute.multivaluelimit),
@@ -154,7 +152,7 @@ buildMaintenanceConfig(const BootstrapConfig::SP &bootstrapConfig,
proton.maintenancejobs.resourcelimitfactor,
proton.maintenancejobs.maxoutstandingmoveops),
DocumentDBFlushConfig(proton.index.maxflushed,proton.index.maxflushedretired),
- BucketMoveConfig(proton.bucketmove.maxdocstomoveperbucket, proton.bucketmove.usebucketexecutor));
+ BucketMoveConfig(proton.bucketmove.maxdocstomoveperbucket));
}
template<typename T>
diff --git a/searchcore/src/vespa/searchcore/proton/server/forcecommitcontext.cpp b/searchcore/src/vespa/searchcore/proton/server/forcecommitcontext.cpp
index 32554555984..28f416c1732 100644
--- a/searchcore/src/vespa/searchcore/proton/server/forcecommitcontext.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/forcecommitcontext.cpp
@@ -17,8 +17,8 @@ ForceCommitContext::ForceCommitContext(vespalib::Executor &executor,
_task(std::make_unique<ForceCommitDoneTask>(documentMetaStore, std::move(pending_gid_to_lid_changes))),
_committedDocIdLimit(0u),
_docIdLimit(nullptr),
- _lidsToCommit(std::move(lidsToCommit)),
- _onDone(std::move(onDone))
+ _onDone(std::move(onDone)),
+ _lidsToCommit(std::move(lidsToCommit))
{
}
diff --git a/searchcore/src/vespa/searchcore/proton/server/forcecommitcontext.h b/searchcore/src/vespa/searchcore/proton/server/forcecommitcontext.h
index 9da835ea898..cbb4e6e86e1 100644
--- a/searchcore/src/vespa/searchcore/proton/server/forcecommitcontext.h
+++ b/searchcore/src/vespa/searchcore/proton/server/forcecommitcontext.h
@@ -28,8 +28,8 @@ class ForceCommitContext : public vespalib::IDestructorCallback
std::unique_ptr<ForceCommitDoneTask> _task;
uint32_t _committedDocIdLimit;
DocIdLimit *_docIdLimit;
+ std::shared_ptr<IDestructorCallback> _onDone;
PendingLidTrackerBase::Snapshot _lidsToCommit;
- std::shared_ptr<IDestructorCallback> _onDone;
public:
ForceCommitContext(vespalib::Executor &executor,
diff --git a/searchcore/src/vespa/searchcore/proton/server/frozenbuckets.cpp b/searchcore/src/vespa/searchcore/proton/server/frozenbuckets.cpp
deleted file mode 100644
index 54ef4ea2da4..00000000000
--- a/searchcore/src/vespa/searchcore/proton/server/frozenbuckets.cpp
+++ /dev/null
@@ -1,142 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include "frozenbuckets.h"
-#include "ibucketfreezelistener.h"
-#include <vespa/searchcorespi/index/i_thread_service.h>
-#include <vespa/vespalib/util/lambdatask.h>
-#include <algorithm>
-
-using document::BucketId;
-using vespalib::makeLambdaTask;
-
-namespace proton {
-
-FrozenBucketsMap::FrozenBucketsMap()
- : _lock(),
- _cond(),
- _map()
-{ }
-
-FrozenBucketsMap::~FrozenBucketsMap() {
- assert(_map.empty());
-}
-
-void
-FrozenBucketsMap::freezeBucket(BucketId bucket) {
-
- std::unique_lock<std::mutex> guard(_lock);
- std::pair<BucketId, FrozenBucket> tryVal(std::make_pair(bucket, FrozenBucket(FrozenBucket::Reader)));
-
- std::pair<Map::iterator, bool> res;
- for (res = _map.insert(tryVal); !res.second && (res.first->second.isExclusive()); res = _map.insert(tryVal)) {
- _cond.wait(guard);
- }
-
- if (!res.second) {
- res.first->second.addReader();
- }
-}
-
-
-bool
-FrozenBucketsMap::thawBucket(BucketId bucket)
-{
- std::lock_guard<std::mutex> guard(_lock);
- Map::iterator it(_map.find(bucket));
- assert(it != _map.end());
- assert(it->second.hasReaders());
- bool isLastAndContended(false);
- if (it->second.isLast()) {
- if (it->second.getNotifyWriter()) {
- isLastAndContended = true;
- }
- _map.erase(it);
- _cond.notify_all();
- } else {
- it->second.removeReader();
- }
- return isLastAndContended;
-}
-
-
-IFrozenBucketHandler::ExclusiveBucketGuard::UP
-FrozenBucketsMap::acquireExclusiveBucket(document::BucketId bucket)
-{
- std::lock_guard<std::mutex> guard(_lock);
- Map::iterator it(_map.find(bucket));
- if (it != _map.end()) {
- assert(it->second.hasReaders());
- it->second.setNotifyWriter();
- return ExclusiveBucketGuard::UP();
- }
- _map[bucket] = FrozenBucket(FrozenBucket::Writer);
- return std::make_unique<ExclusiveBucketGuard>(*this, bucket);
-}
-
-void
-FrozenBucketsMap::releaseExclusiveBucket(document::BucketId bucket)
-{
- std::lock_guard<std::mutex> guard(_lock);
- Map::const_iterator it(_map.find(bucket));
- assert ((it != _map.end()) && (it->second.isExclusive()));
- _map.erase(it);
- _cond.notify_all();
-}
-
-FrozenBuckets::FrozenBuckets(IThreadService &masterThread) :
- _frozen(),
- _masterThread(masterThread),
- _listeners()
-{
-}
-
-FrozenBuckets::~FrozenBuckets()
-{
- assert(_listeners.empty());
-}
-
-IFrozenBucketHandler::ExclusiveBucketGuard::UP
-FrozenBuckets::acquireExclusiveBucket(document::BucketId bucket) {
- return _frozen.acquireExclusiveBucket(bucket);
-}
-
-void
-FrozenBuckets::notifyThawed(document::BucketId bucket) {
- assert(_masterThread.isCurrentThread());
- for (auto &listener : _listeners) {
- listener->notifyThawedBucket(bucket);
- }
-}
-
-void
-FrozenBuckets::freezeBucket(BucketId bucket)
-{
- _frozen.freezeBucket(bucket);
-}
-
-void
-FrozenBuckets::thawBucket(BucketId bucket)
-{
- if (_frozen.thawBucket(bucket)) {
- _masterThread.execute(makeLambdaTask([this, bucket]() { notifyThawed(bucket); }));
- }
-}
-
-void
-FrozenBuckets::addListener(IBucketFreezeListener *listener)
-{
- // assert(_masterThread.isCurrentThread());
- _listeners.push_back(listener);
-}
-
-void
-FrozenBuckets::removeListener(IBucketFreezeListener *listener)
-{
- // assert(_masterThread.isCurrentThread());
- auto it = std::find(_listeners.begin(), _listeners.end(), listener);
- if (it != _listeners.end()) {
- _listeners.erase(it);
- }
-}
-
-} // namespace proton
diff --git a/searchcore/src/vespa/searchcore/proton/server/frozenbuckets.h b/searchcore/src/vespa/searchcore/proton/server/frozenbuckets.h
deleted file mode 100644
index 339bb7fa5d0..00000000000
--- a/searchcore/src/vespa/searchcore/proton/server/frozenbuckets.h
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include "ifrozenbuckethandler.h"
-#include "ibucketfreezer.h"
-#include <mutex>
-#include <condition_variable>
-#include <cassert>
-#include <map>
-#include <vector>
-
-
-namespace searchcorespi { namespace index {struct IThreadService; }}
-
-namespace proton {
-
-class IBucketFreezeListener;
-
-/**
- * Controls read and write access to buckets.
- */
-class FrozenBucketsMap {
-public:
- FrozenBucketsMap();
- ~FrozenBucketsMap();
- IFrozenBucketHandler::ExclusiveBucketGuard::UP acquireExclusiveBucket(document::BucketId bucket);
- void freezeBucket(document::BucketId bucket);
- // Returns true if it was the last one and it was contended.
- bool thawBucket(document::BucketId bucket);
- class ExclusiveBucketGuard : public IFrozenBucketHandler::ExclusiveBucketGuard {
- public:
- ExclusiveBucketGuard(const ExclusiveBucketGuard &) = delete;
- ExclusiveBucketGuard(ExclusiveBucketGuard &&) = delete;
- ExclusiveBucketGuard & operator=(const ExclusiveBucketGuard &) = delete;
- ExclusiveBucketGuard & operator=(ExclusiveBucketGuard &&) = delete;
-
- ExclusiveBucketGuard(FrozenBucketsMap & handler, document::BucketId & bucketId)
- : IFrozenBucketHandler::ExclusiveBucketGuard(bucketId),
- _handler(handler)
- { }
- ~ExclusiveBucketGuard() { _handler.releaseExclusiveBucket(getBucket());}
- private:
- FrozenBucketsMap & _handler;
- };
-private:
- void releaseExclusiveBucket(document::BucketId bucket);
- class FrozenBucket {
- public:
- enum Type {Reader, Writer};
- explicit FrozenBucket(Type type=Reader) : _refCount((type==Reader) ? 1 : -1), _notifyWriter(false) { }
- ~FrozenBucket() { assert((_refCount == -1) || (_refCount == 1));}
- void setNotifyWriter() { _notifyWriter = true; }
- bool getNotifyWriter() const { return _notifyWriter; }
- bool isLast() const { return _refCount == 1; }
- bool isExclusive() const { return _refCount == -1; }
- bool hasReaders() const { return _refCount >= 1; }
- void addReader() {
- assert(_refCount >= 1);
- _refCount++;
- }
- void removeReader() {
- assert(_refCount > 1);
- _refCount--;
- }
- private:
- int32_t _refCount;
- bool _notifyWriter;
- };
- typedef std::map<document::BucketId, FrozenBucket> Map;
- std::mutex _lock;
- std::condition_variable _cond;
- Map _map;
-};
-
-/**
- * Class that remembers which buckets are frozen and notifies all
- * registered listeners on bucket frozenness changes.
- */
-class FrozenBuckets : public IFrozenBucketHandler,
- public IBucketFreezer
-{
- using IThreadService = searchcorespi::index::IThreadService;
- FrozenBucketsMap _frozen;
- IThreadService &_masterThread;
- std::vector<IBucketFreezeListener *> _listeners;
-
- void notifyThawed(document::BucketId bucket);
-public:
- FrozenBuckets(IThreadService &masterThread);
- virtual ~FrozenBuckets();
-
- virtual ExclusiveBucketGuard::UP acquireExclusiveBucket(document::BucketId bucket) override;
- virtual void freezeBucket(document::BucketId bucket) override;
- virtual void thawBucket(document::BucketId bucket) override;
- virtual void addListener(IBucketFreezeListener *listener) override;
- virtual void removeListener(IBucketFreezeListener *listener) override;
-};
-
-} // namespace proton
diff --git a/searchcore/src/vespa/searchcore/proton/server/i_maintenance_job.h b/searchcore/src/vespa/searchcore/proton/server/i_maintenance_job.h
index 6bea9855c82..7148576b76f 100644
--- a/searchcore/src/vespa/searchcore/proton/server/i_maintenance_job.h
+++ b/searchcore/src/vespa/searchcore/proton/server/i_maintenance_job.h
@@ -4,6 +4,7 @@
#include <vespa/vespalib/stllike/string.h>
#include <vespa/vespalib/util/time.h>
#include <memory>
+#include <atomic>
namespace proton {
@@ -21,7 +22,9 @@ private:
const vespalib::string _name;
const vespalib::duration _delay;
const vespalib::duration _interval;
-
+ std::atomic<bool> _stopped;
+protected:
+ virtual void onStop() = 0;
public:
using UP = std::unique_ptr<IMaintenanceJob>;
using SP = std::shared_ptr<IMaintenanceJob>;
@@ -31,7 +34,8 @@ public:
vespalib::duration interval)
: _name(name),
_delay(delay),
- _interval(interval)
+ _interval(interval),
+ _stopped(false)
{}
virtual ~IMaintenanceJob() = default;
@@ -41,9 +45,12 @@ public:
virtual vespalib::duration getInterval() const { return _interval; }
virtual bool isBlocked() const { return false; }
virtual IBlockableMaintenanceJob *asBlockable() { return nullptr; }
- virtual void onStop() = 0;
virtual void updateMetrics(DocumentDBTaggedMetrics &) const {}
-
+ void stop() {
+ _stopped = true;
+ onStop();
+ }
+ bool stopped() const { return _stopped.load(std::memory_order_relaxed); }
/**
* Register maintenance job runner, in case event passed to the
* job causes it to want to be run again.
diff --git a/searchcore/src/vespa/searchcore/proton/server/ifrozenbuckethandler.h b/searchcore/src/vespa/searchcore/proton/server/ifrozenbuckethandler.h
deleted file mode 100644
index bcd671c95b9..00000000000
--- a/searchcore/src/vespa/searchcore/proton/server/ifrozenbuckethandler.h
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include <vespa/document/bucket/bucketid.h>
-#include <memory>
-
-namespace proton {
-
-class IBucketFreezeListener;
-
-class IFrozenBucketHandler
-{
-public:
- class ExclusiveBucketGuard {
- public:
- typedef std::unique_ptr<ExclusiveBucketGuard> UP;
- ExclusiveBucketGuard(document::BucketId bucketId) : _bucketId(bucketId) { }
- virtual ~ExclusiveBucketGuard() { }
- document::BucketId getBucket() const { return _bucketId; }
- private:
- document::BucketId _bucketId;
- };
-
- virtual ~IFrozenBucketHandler() = default;
- virtual ExclusiveBucketGuard::UP acquireExclusiveBucket(document::BucketId bucket) = 0;
- virtual void addListener(IBucketFreezeListener *listener) = 0;
- virtual void removeListener(IBucketFreezeListener *listener) = 0;
-};
-
-}
diff --git a/searchcore/src/vespa/searchcore/proton/server/job_tracked_maintenance_job.h b/searchcore/src/vespa/searchcore/proton/server/job_tracked_maintenance_job.h
index 0e1b2b00ce5..ecc592a00a4 100644
--- a/searchcore/src/vespa/searchcore/proton/server/job_tracked_maintenance_job.h
+++ b/searchcore/src/vespa/searchcore/proton/server/job_tracked_maintenance_job.h
@@ -9,7 +9,7 @@ namespace proton {
/**
* Class for tracking the start and end of a maintenance job.
*/
-class JobTrackedMaintenanceJob : public IMaintenanceJob
+class JobTrackedMaintenanceJob final : public IMaintenanceJob
{
private:
IJobTracker::SP _tracker;
@@ -26,7 +26,7 @@ public:
_job->registerRunner(runner);
}
bool run() override;
- void onStop() override { _job->onStop(); }
+ void onStop() override { _job->stop(); }
};
} // namespace proton
diff --git a/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job.cpp b/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job.cpp
index 095169b84ce..059408f4a5e 100644
--- a/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job.cpp
@@ -1,62 +1,309 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "lid_space_compaction_job.h"
#include "i_document_scan_iterator.h"
-#include "ifrozenbuckethandler.h"
#include "i_lid_space_compaction_handler.h"
#include "i_operation_storer.h"
+#include "i_disk_mem_usage_notifier.h"
+#include "iclusterstatechangednotifier.h"
+#include "remove_operations_rate_tracker.h"
+#include <vespa/searchcore/proton/feedoperation/compact_lid_space_operation.h>
#include <vespa/searchcore/proton/feedoperation/moveoperation.h>
+#include <vespa/searchcore/proton/common/eventlogger.h>
+#include <vespa/searchcorespi/index/i_thread_service.h>
+#include <vespa/persistence/spi/bucket_tasks.h>
+#include <vespa/document/fieldvalue/document.h>
+#include <vespa/vespalib/util/destructor_callbacks.h>
+#include <vespa/vespalib/util/lambdatask.h>
+#include <vespa/vespalib/util/gate.h>
+#include <cassert>
+#include <thread>
+
+#include <vespa/log/log.h>
+LOG_SETUP(".proton.server.lidspace.compactionjob");
using search::DocumentMetaData;
using search::LidUsageStats;
+using storage::spi::makeBucketTask;
+using storage::spi::Bucket;
+using vespalib::makeLambdaTask;
+
+namespace proton::lidspace {
-namespace proton {
+namespace {
bool
-LidSpaceCompactionJob::scanDocuments(const LidUsageStats &stats)
+isSameDocument(const search::DocumentMetaData &a, const search::DocumentMetaData &b) {
+ return (a.lid == b.lid) &&
+ (a.bucketId == b.bucketId) &&
+ (a.gid == b.gid) &&
+ (a.timestamp ==
+ b.timestamp); // Timestamp check can be removed once logic has proved itself in large scale.
+}
+
+}
+
+class CompactionJob::MoveTask : public storage::spi::BucketTask {
+public:
+ MoveTask(std::shared_ptr<CompactionJob> job, const search::DocumentMetaData & meta, IDestructorCallback::SP opsTracker)
+ : _job(std::move(job)),
+ _meta(meta),
+ _opsTracker(std::move(opsTracker))
+ { }
+ void run(const Bucket & bucket, IDestructorCallback::SP onDone) override {
+ assert(bucket.getBucketId() == _meta.bucketId);
+ using DoneContext = vespalib::KeepAlive<std::pair<IDestructorCallback::SP, IDestructorCallback::SP>>;
+ CompactionJob::moveDocument(std::move(_job), _meta,
+ std::make_shared<DoneContext>(std::make_pair(std::move(_opsTracker), std::move(onDone))));
+ }
+ void fail(const Bucket & bucket) override {
+ assert(bucket.getBucketId() == _meta.bucketId);
+ auto & master = _job->_master;
+ if (_job->stopped()) return;
+ master.execute(makeLambdaTask([job=std::move(_job)] { job->_scanItr.reset(); }));
+ }
+private:
+ std::shared_ptr<CompactionJob> _job;
+ const search::DocumentMetaData _meta;
+ IDestructorCallback::SP _opsTracker;
+};
+
+bool
+CompactionJob::scanDocuments(const LidUsageStats &stats)
{
if (_scanItr->valid()) {
- DocumentMetaData document = getNextDocument(stats, _retryFrozenDocument);
- _retryFrozenDocument = false;
+ DocumentMetaData document = getNextDocument(stats, false);
if (document.valid()) {
- IFrozenBucketHandler::ExclusiveBucketGuard::UP bucketGuard = _frozenHandler.acquireExclusiveBucket(document.bucketId);
- if ( ! bucketGuard ) {
- // the job is blocked until the bucket for this document is thawed
- setBlocked(BlockedReason::FROZEN_BUCKET);
- _retryFrozenDocument = true;
+ Bucket metaBucket(document::Bucket(_bucketSpace, document.bucketId));
+ _bucketExecutor.execute(metaBucket, std::make_unique<MoveTask>(shared_from_this(), document, getLimiter().beginOperation()));
+ if (isBlocked(BlockedReason::OUTSTANDING_OPS)) {
return true;
- } else {
- auto op = _handler->createMoveOperation(document, stats.getLowestFreeLid());
- if ( ! op ) {
- return false;
- }
- vespalib::IDestructorCallback::SP context = getLimiter().beginOperation();
- _opStorer.appendOperation(*op, context);
- _handler->handleMove(*op, std::move(context));
- if (isBlocked(BlockedReason::OUTSTANDING_OPS)) {
- return true;
- }
}
}
}
return false;
}
-LidSpaceCompactionJob::LidSpaceCompactionJob(const DocumentDBLidSpaceCompactionConfig &config,
- std::shared_ptr<ILidSpaceCompactionHandler> handler,
- IOperationStorer &opStorer,
- IFrozenBucketHandler &frozenHandler,
- IDiskMemUsageNotifier &diskMemUsageNotifier,
- const BlockableMaintenanceJobConfig &blockableConfig,
- IClusterStateChangedNotifier &clusterStateChangedNotifier,
- bool nodeRetired)
- : LidSpaceCompactionJobBase(config, std::move(handler), opStorer, diskMemUsageNotifier,
- blockableConfig, clusterStateChangedNotifier, nodeRetired),
- _frozenHandler(frozenHandler),
- _retryFrozenDocument(false)
+void
+CompactionJob::moveDocument(std::shared_ptr<CompactionJob> job, const search::DocumentMetaData & metaThen,
+ std::shared_ptr<IDestructorCallback> context)
{
+ if (job->stopped()) return; //TODO Remove once lidtracker is no longer in use.
+ // The real lid must be sampled in the master thread.
+ //TODO remove target lid from createMoveOperation interface
+ auto op = job->_handler->createMoveOperation(metaThen, 0);
+ if (!op || !op->getDocument()) return;
+ // Early detection and force md5 calculation outside of master thread
+ if (metaThen.gid != op->getDocument()->getId().getGlobalId()) return;
+
+ auto & master = job->_master;
+ if (job->stopped()) return;
+ master.execute(makeLambdaTask([self=std::move(job), meta=metaThen, moveOp=std::move(op), onDone=std::move(context)]() mutable {
+ if (self->stopped()) return;
+ self->completeMove(meta, std::move(moveOp), std::move(onDone));
+ }));
}
-LidSpaceCompactionJob::~LidSpaceCompactionJob() = default;
+void
+CompactionJob::completeMove(const search::DocumentMetaData & metaThen, std::unique_ptr<MoveOperation> moveOp,
+ std::shared_ptr<IDestructorCallback> onDone)
+{
+ // Reread meta data as document might have been altered after move was initiated
+ // If so it will fail the timestamp sanity check later on.
+ search::DocumentMetaData metaNow = _handler->getMetaData(metaThen.lid);
+ // This should be impossible and should probably be an assert
+ if ( ! isSameDocument(metaThen, metaNow)) return;
+ if (metaNow.gid != moveOp->getDocument()->getId().getGlobalId()) return;
+
+ uint32_t lowestLid = _handler->getLidStatus().getLowestFreeLid();
+ if (lowestLid >= metaNow.lid) return;
+ moveOp->setTargetLid(lowestLid);
+ _opStorer.appendOperation(*moveOp, onDone);
+ _handler->handleMove(*moveOp, std::move(onDone));
+}
+CompactionJob::CompactionJob(const DocumentDBLidSpaceCompactionConfig &config,
+ RetainGuard dbRetainer,
+ std::shared_ptr<ILidSpaceCompactionHandler> handler,
+ IOperationStorer &opStorer,
+ IThreadService & master,
+ BucketExecutor & bucketExecutor,
+ IDiskMemUsageNotifier &diskMemUsageNotifier,
+ const BlockableMaintenanceJobConfig &blockableConfig,
+ IClusterStateChangedNotifier &clusterStateChangedNotifier,
+ bool nodeRetired,
+ document::BucketSpace bucketSpace)
+ : BlockableMaintenanceJob("lid_space_compaction." + handler->getName(),
+ config.getDelay(), config.getInterval(), blockableConfig),
+ IDiskMemUsageListener(),
+ IClusterStateChangedHandler(),
+ std::enable_shared_from_this<CompactionJob>(),
+ _cfg(config),
+ _handler(std::move(handler)),
+ _opStorer(opStorer),
+ _scanItr(),
+ _diskMemUsageNotifier(diskMemUsageNotifier),
+ _clusterStateChangedNotifier(clusterStateChangedNotifier),
+ _ops_rate_tracker(std::make_shared<RemoveOperationsRateTracker>(config.get_remove_batch_block_rate(),
+ config.get_remove_block_rate())),
+ _is_disabled(false),
+ _shouldCompactLidSpace(false),
+ _master(master),
+ _bucketExecutor(bucketExecutor),
+ _dbRetainer(std::move(dbRetainer)),
+ _bucketSpace(bucketSpace)
+{
+ _diskMemUsageNotifier.addDiskMemUsageListener(this);
+ _clusterStateChangedNotifier.addClusterStateChangedHandler(this);
+ if (nodeRetired) {
+ setBlocked(BlockedReason::CLUSTER_STATE);
+ }
+ _handler->set_operation_listener(_ops_rate_tracker);
}
+
+CompactionJob::~CompactionJob() {
+ _clusterStateChangedNotifier.removeClusterStateChangedHandler(this);
+ _diskMemUsageNotifier.removeDiskMemUsageListener(this);
+}
+
+std::shared_ptr<CompactionJob>
+CompactionJob::create(const DocumentDBLidSpaceCompactionConfig &config,
+ RetainGuard dbRetainer,
+ std::shared_ptr<ILidSpaceCompactionHandler> handler,
+ IOperationStorer &opStorer,
+ IThreadService & master,
+ BucketExecutor & bucketExecutor,
+ IDiskMemUsageNotifier &diskMemUsageNotifier,
+ const BlockableMaintenanceJobConfig &blockableConfig,
+ IClusterStateChangedNotifier &clusterStateChangedNotifier,
+ bool nodeRetired,
+ document::BucketSpace bucketSpace)
+{
+ return std::shared_ptr<CompactionJob>(
+ new CompactionJob(config, std::move(dbRetainer), std::move(handler), opStorer, master, bucketExecutor,
+ diskMemUsageNotifier, blockableConfig, clusterStateChangedNotifier, nodeRetired, bucketSpace),
+ [&master](auto job) {
+ auto failed = master.execute(makeLambdaTask([job]() { delete job; }));
+ assert(!failed);
+ });
+}
+
+DocumentMetaData
+CompactionJob::getNextDocument(const LidUsageStats &stats, bool retryLastDocument)
+{
+ return _scanItr->next(std::max(stats.getLowestFreeLid(), stats.getUsedLids()), retryLastDocument);
+}
+
+bool
+CompactionJob::run()
+{
+ if (isBlocked()) {
+ return true; // indicate work is done since no work can be done
+ }
+ LidUsageStats stats = _handler->getLidStatus();
+ if (remove_batch_is_ongoing()) {
+ // Note that we don't set the job as blocked as the decision to un-block it is not driven externally.
+ LOG(info, "%s: Lid space compaction is disabled while remove batch (delete buckets) is ongoing",
+ _handler->getName().c_str());
+ _is_disabled = true;
+ return true;
+ }
+ if (remove_is_ongoing()) {
+ // Note that we don't set the job as blocked as the decision to un-block it is not driven externally.
+ LOG(info, "%s: Lid space compaction is disabled while remove operations are ongoing",
+ _handler->getName().c_str());
+ _is_disabled = true;
+ return true;
+ }
+ if (_is_disabled) {
+ LOG(info, "%s: Lid space compaction is re-enabled as remove operations are no longer ongoing",
+ _handler->getName().c_str());
+ _is_disabled = false;
+ }
+
+ if (_scanItr && !_scanItr->valid()) {
+ if (shouldRestartScanDocuments(_handler->getLidStatus())) {
+ _scanItr = _handler->getIterator();
+ } else {
+ _scanItr = IDocumentScanIterator::UP();
+ _shouldCompactLidSpace = true;
+ return false;
+ }
+ }
+
+ if (_scanItr) {
+ return scanDocuments(stats);
+ } else if (_shouldCompactLidSpace) {
+ compactLidSpace(stats);
+ } else if (hasTooMuchLidBloat(stats)) {
+ assert(!_scanItr);
+ _scanItr = _handler->getIterator();
+ return scanDocuments(stats);
+ }
+ return true;
+}
+
+bool
+CompactionJob::remove_batch_is_ongoing() const
+{
+ return _ops_rate_tracker->remove_batch_above_threshold();
+}
+
+bool
+CompactionJob::remove_is_ongoing() const
+{
+ return _ops_rate_tracker->remove_above_threshold();
+}
+
+bool
+CompactionJob::hasTooMuchLidBloat(const LidUsageStats &stats) const
+{
+ return ((stats.getLidBloat() >= _cfg.getAllowedLidBloat()) &&
+ (stats.getLidBloatFactor() >= _cfg.getAllowedLidBloatFactor()) &&
+ (stats.getLidLimit() > stats.getLowestFreeLid()));
+}
+
+bool
+CompactionJob::shouldRestartScanDocuments(const LidUsageStats &stats) const
+{
+ return ((stats.getUsedLids() + _cfg.getAllowedLidBloat()) < stats.getHighestUsedLid()) &&
+ (stats.getLowestFreeLid() < stats.getHighestUsedLid());
+}
+
+void
+CompactionJob::compactLidSpace(const LidUsageStats &stats)
+{
+ uint32_t wantedLidLimit = stats.getHighestUsedLid() + 1;
+ CompactLidSpaceOperation op(_handler->getSubDbId(), wantedLidLimit);
+ vespalib::Gate gate;
+ auto commit_result = _opStorer.appendAndCommitOperation(op, std::make_shared<vespalib::GateCallback>(gate));
+ gate.await();
+ _handler->handleCompactLidSpace(op, std::make_shared<vespalib::KeepAlive<decltype(commit_result)>>(std::move(commit_result)));
+ EventLogger::lidSpaceCompactionComplete(_handler->getName(), wantedLidLimit);
+ _shouldCompactLidSpace = false;
+}
+
+void
+CompactionJob::notifyDiskMemUsage(DiskMemUsageState state)
+{
+ // Called by master write thread
+ internalNotifyDiskMemUsage(state);
+}
+
+void
+CompactionJob::notifyClusterStateChanged(const std::shared_ptr<IBucketStateCalculator> &newCalc)
+{
+ // Called by master write thread
+ bool nodeRetired = newCalc->nodeRetired();
+ if (!nodeRetired) {
+ if (isBlocked(BlockedReason::CLUSTER_STATE)) {
+ LOG(info, "%s: Lid space compaction is un-blocked as node is no longer retired", _handler->getName().c_str());
+ unBlock(BlockedReason::CLUSTER_STATE);
+ }
+ } else {
+ LOG(info, "%s: Lid space compaction is blocked as node is retired", _handler->getName().c_str());
+ setBlocked(BlockedReason::CLUSTER_STATE);
+ }
+}
+
+} // namespace proton
diff --git a/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job.h b/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job.h
index 4116a4cedf0..725c7387bdc 100644
--- a/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job.h
+++ b/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job.h
@@ -1,33 +1,100 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
#pragma once
-#include "lid_space_compaction_job_base.h"
+#include "blockable_maintenance_job.h"
+#include "document_db_maintenance_config.h"
+#include "i_disk_mem_usage_listener.h"
+#include "iclusterstatechangedhandler.h"
+#include <vespa/searchlib/common/idocumentmetastore.h>
+#include <vespa/searchcore/proton/common/monitored_refcount.h>
+#include <vespa/document/bucket/bucketspace.h>
+#include <atomic>
+namespace storage::spi { struct BucketExecutor; }
+namespace searchcorespi::index { struct IThreadService; }
+namespace vespalib { class IDestructorCallback; }
namespace proton {
+ class MoveOperation;
+ class IDiskMemUsageNotifier;
+ class IClusterStateChangedNotifier;
+ struct IOperationStorer;
+ struct ILidSpaceCompactionHandler;
+ struct IDocumentScanIterator;
+ class RemoveOperationsRateTracker;
+}
-class IFrozenBucketHandler;
+namespace proton::lidspace {
/**
- * Moves documents from higher lids to lower lids. It uses a 'frozen' bucket mechanism to ensure that it has exclusive access to the document.
+ * Moves documents from higher lids to lower lids. It uses a BucketExecutor that ensures that the bucket
+ * is locked for changes while the document is moved.
*/
-class LidSpaceCompactionJob : public LidSpaceCompactionJobBase
+class CompactionJob : public BlockableMaintenanceJob,
+ public IDiskMemUsageListener,
+ public IClusterStateChangedHandler,
+ public std::enable_shared_from_this<CompactionJob>
{
private:
- IFrozenBucketHandler &_frozenHandler;
- bool _retryFrozenDocument;
+ using BucketExecutor = storage::spi::BucketExecutor;
+ using IDestructorCallback = vespalib::IDestructorCallback;
+ using IThreadService = searchcorespi::index::IThreadService;
+ const DocumentDBLidSpaceCompactionConfig _cfg;
+ std::shared_ptr<ILidSpaceCompactionHandler> _handler;
+ IOperationStorer &_opStorer;
+ std::unique_ptr<IDocumentScanIterator> _scanItr;
+ IDiskMemUsageNotifier &_diskMemUsageNotifier;
+ IClusterStateChangedNotifier &_clusterStateChangedNotifier;
+ std::shared_ptr<RemoveOperationsRateTracker> _ops_rate_tracker;
+ bool _is_disabled;
+ bool _shouldCompactLidSpace;
+ IThreadService &_master;
+ BucketExecutor &_bucketExecutor;
+ RetainGuard _dbRetainer;
+ document::BucketSpace _bucketSpace;
+
+ bool hasTooMuchLidBloat(const search::LidUsageStats &stats) const;
+ bool shouldRestartScanDocuments(const search::LidUsageStats &stats) const;
+ void compactLidSpace(const search::LidUsageStats &stats);
+ bool remove_batch_is_ongoing() const;
+ bool remove_is_ongoing() const;
+ search::DocumentMetaData getNextDocument(const search::LidUsageStats &stats, bool retryLastDocument);
- bool scanDocuments(const search::LidUsageStats &stats) override;
+ bool scanDocuments(const search::LidUsageStats &stats);
+ static void moveDocument(std::shared_ptr<CompactionJob> job, const search::DocumentMetaData & metaThen,
+ std::shared_ptr<IDestructorCallback> onDone);
+ void completeMove(const search::DocumentMetaData & metaThen, std::unique_ptr<MoveOperation> moveOp,
+ std::shared_ptr<IDestructorCallback> onDone);
+ class MoveTask;
+ CompactionJob(const DocumentDBLidSpaceCompactionConfig &config,
+ RetainGuard dbRetainer,
+ std::shared_ptr<ILidSpaceCompactionHandler> handler,
+ IOperationStorer &opStorer,
+ IThreadService & master,
+ BucketExecutor & bucketExecutor,
+ IDiskMemUsageNotifier &diskMemUsageNotifier,
+ const BlockableMaintenanceJobConfig &blockableConfig,
+ IClusterStateChangedNotifier &clusterStateChangedNotifier,
+ bool nodeRetired,
+ document::BucketSpace bucketSpace);
public:
- LidSpaceCompactionJob(const DocumentDBLidSpaceCompactionConfig &config,
- std::shared_ptr<ILidSpaceCompactionHandler> handler,
- IOperationStorer &opStorer,
- IFrozenBucketHandler &frozenHandler,
- IDiskMemUsageNotifier &diskMemUsageNotifier,
- const BlockableMaintenanceJobConfig &blockableConfig,
- IClusterStateChangedNotifier &clusterStateChangedNotifier,
- bool nodeRetired);
- ~LidSpaceCompactionJob() override;
+ static std::shared_ptr<CompactionJob>
+ create(const DocumentDBLidSpaceCompactionConfig &config,
+ RetainGuard dbRetainer,
+ std::shared_ptr<ILidSpaceCompactionHandler> handler,
+ IOperationStorer &opStorer,
+ IThreadService & master,
+ BucketExecutor & bucketExecutor,
+ IDiskMemUsageNotifier &diskMemUsageNotifier,
+ const BlockableMaintenanceJobConfig &blockableConfig,
+ IClusterStateChangedNotifier &clusterStateChangedNotifier,
+ bool nodeRetired,
+ document::BucketSpace bucketSpace);
+ ~CompactionJob() override;
+ void notifyDiskMemUsage(DiskMemUsageState state) override;
+ void notifyClusterStateChanged(const std::shared_ptr<IBucketStateCalculator> &newCalc) override;
+ bool run() override;
};
} // namespace proton
diff --git a/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job_base.cpp b/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job_base.cpp
deleted file mode 100644
index 225d128f9bf..00000000000
--- a/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job_base.cpp
+++ /dev/null
@@ -1,177 +0,0 @@
-// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include "lid_space_compaction_job_base.h"
-#include "imaintenancejobrunner.h"
-#include "i_document_scan_iterator.h"
-#include "i_lid_space_compaction_handler.h"
-#include "i_operation_storer.h"
-#include "remove_operations_rate_tracker.h"
-#include "i_disk_mem_usage_notifier.h"
-#include "iclusterstatechangednotifier.h"
-#include <vespa/searchcore/proton/feedoperation/compact_lid_space_operation.h>
-#include <vespa/searchcore/proton/common/eventlogger.h>
-#include <vespa/vespalib/util/destructor_callbacks.h>
-#include <vespa/vespalib/util/gate.h>
-#include <cassert>
-
-#include <vespa/log/log.h>
-LOG_SETUP(".proton.server.lid_space_compaction_job");
-
-using search::DocumentMetaData;
-using search::LidUsageStats;
-
-namespace proton {
-
-bool
-LidSpaceCompactionJobBase::hasTooMuchLidBloat(const LidUsageStats &stats) const
-{
- return ((stats.getLidBloat() >= _cfg.getAllowedLidBloat()) &&
- (stats.getLidBloatFactor() >= _cfg.getAllowedLidBloatFactor()) &&
- (stats.getLidLimit() > stats.getLowestFreeLid()));
-}
-
-bool
-LidSpaceCompactionJobBase::shouldRestartScanDocuments(const LidUsageStats &stats) const
-{
- return ((stats.getUsedLids() + _cfg.getAllowedLidBloat()) < stats.getHighestUsedLid()) &&
- (stats.getLowestFreeLid() < stats.getHighestUsedLid());
-}
-
-DocumentMetaData
-LidSpaceCompactionJobBase::getNextDocument(const LidUsageStats &stats, bool retryLastDocument)
-{
- return _scanItr->next(std::max(stats.getLowestFreeLid(), stats.getUsedLids()), retryLastDocument);
-}
-
-void
-LidSpaceCompactionJobBase::compactLidSpace(const LidUsageStats &stats)
-{
- uint32_t wantedLidLimit = stats.getHighestUsedLid() + 1;
- CompactLidSpaceOperation op(_handler->getSubDbId(), wantedLidLimit);
- vespalib::Gate gate;
- auto commit_result = _opStorer.appendAndCommitOperation(op, std::make_shared<vespalib::GateCallback>(gate));
- gate.await();
- _handler->handleCompactLidSpace(op, std::make_shared<vespalib::KeepAlive<decltype(commit_result)>>(std::move(commit_result)));
- EventLogger::lidSpaceCompactionComplete(_handler->getName(), wantedLidLimit);
- _shouldCompactLidSpace = false;
-}
-
-bool
-LidSpaceCompactionJobBase::remove_batch_is_ongoing() const
-{
- return _ops_rate_tracker->remove_batch_above_threshold();
-}
-
-bool
-LidSpaceCompactionJobBase::remove_is_ongoing() const
-{
- return _ops_rate_tracker->remove_above_threshold();
-}
-
-LidSpaceCompactionJobBase::LidSpaceCompactionJobBase(const DocumentDBLidSpaceCompactionConfig &config,
- std::shared_ptr<ILidSpaceCompactionHandler> handler,
- IOperationStorer &opStorer,
- IDiskMemUsageNotifier &diskMemUsageNotifier,
- const BlockableMaintenanceJobConfig &blockableConfig,
- IClusterStateChangedNotifier &clusterStateChangedNotifier,
- bool nodeRetired)
- : BlockableMaintenanceJob("lid_space_compaction." + handler->getName(),
- config.getDelay(), config.getInterval(), blockableConfig),
- _cfg(config),
- _handler(std::move(handler)),
- _opStorer(opStorer),
- _scanItr(),
- _diskMemUsageNotifier(diskMemUsageNotifier),
- _clusterStateChangedNotifier(clusterStateChangedNotifier),
- _ops_rate_tracker(std::make_shared<RemoveOperationsRateTracker>(config.get_remove_batch_block_rate(),
- config.get_remove_block_rate())),
- _is_disabled(false),
- _shouldCompactLidSpace(false)
-{
- _diskMemUsageNotifier.addDiskMemUsageListener(this);
- _clusterStateChangedNotifier.addClusterStateChangedHandler(this);
- if (nodeRetired) {
- setBlocked(BlockedReason::CLUSTER_STATE);
- }
- _handler->set_operation_listener(_ops_rate_tracker);
-}
-
-LidSpaceCompactionJobBase::~LidSpaceCompactionJobBase()
-{
- _clusterStateChangedNotifier.removeClusterStateChangedHandler(this);
- _diskMemUsageNotifier.removeDiskMemUsageListener(this);
-}
-
-bool
-LidSpaceCompactionJobBase::run()
-{
- if (isBlocked()) {
- return true; // indicate work is done since no work can be done
- }
- LidUsageStats stats = _handler->getLidStatus();
- if (remove_batch_is_ongoing()) {
- // Note that we don't set the job as blocked as the decision to un-block it is not driven externally.
- LOG(info, "%s: Lid space compaction is disabled while remove batch (delete buckets) is ongoing",
- _handler->getName().c_str());
- _is_disabled = true;
- return true;
- }
- if (remove_is_ongoing()) {
- // Note that we don't set the job as blocked as the decision to un-block it is not driven externally.
- LOG(info, "%s: Lid space compaction is disabled while remove operations are ongoing",
- _handler->getName().c_str());
- _is_disabled = true;
- return true;
- }
- if (_is_disabled) {
- LOG(info, "%s: Lid space compaction is re-enabled as remove operations are no longer ongoing",
- _handler->getName().c_str());
- _is_disabled = false;
- }
-
- if (_scanItr && !_scanItr->valid()) {
- if (shouldRestartScanDocuments(_handler->getLidStatus())) {
- _scanItr = _handler->getIterator();
- } else {
- _scanItr = IDocumentScanIterator::UP();
- _shouldCompactLidSpace = true;
- return false;
- }
- }
-
- if (_scanItr) {
- return scanDocuments(stats);
- } else if (_shouldCompactLidSpace) {
- compactLidSpace(stats);
- } else if (hasTooMuchLidBloat(stats)) {
- assert(!_scanItr);
- _scanItr = _handler->getIterator();
- return scanDocuments(stats);
- }
- return true;
-}
-
-void
-LidSpaceCompactionJobBase::notifyDiskMemUsage(DiskMemUsageState state)
-{
- // Called by master write thread
- internalNotifyDiskMemUsage(state);
-}
-
-void
-LidSpaceCompactionJobBase::notifyClusterStateChanged(const std::shared_ptr<IBucketStateCalculator> &newCalc)
-{
- // Called by master write thread
- bool nodeRetired = newCalc->nodeRetired();
- if (!nodeRetired) {
- if (isBlocked(BlockedReason::CLUSTER_STATE)) {
- LOG(info, "%s: Lid space compaction is un-blocked as node is no longer retired", _handler->getName().c_str());
- unBlock(BlockedReason::CLUSTER_STATE);
- }
- } else {
- LOG(info, "%s: Lid space compaction is blocked as node is retired", _handler->getName().c_str());
- setBlocked(BlockedReason::CLUSTER_STATE);
- }
-}
-
-} // namespace proton
diff --git a/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job_base.h b/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job_base.h
deleted file mode 100644
index d127025c496..00000000000
--- a/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job_base.h
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include "blockable_maintenance_job.h"
-#include "document_db_maintenance_config.h"
-#include "i_disk_mem_usage_listener.h"
-#include "iclusterstatechangedhandler.h"
-#include <vespa/searchlib/common/idocumentmetastore.h>
-
-namespace proton {
- class IDiskMemUsageNotifier;
- class IClusterStateChangedNotifier;
- struct IOperationStorer;
- struct ILidSpaceCompactionHandler;
- struct IDocumentScanIterator;
- class RemoveOperationsRateTracker;
-}
-
-namespace proton {
-
-/**
- * This is a base class for moving documents from a high lid to a lower free
- * lid in order to keep the lid space compact.
- *
- * Compaction is handled by moving documents from high lids to low free lids.
- * A handler is typically working over a single document sub db.
- */
-class LidSpaceCompactionJobBase : public BlockableMaintenanceJob,
- public IDiskMemUsageListener,
- public IClusterStateChangedHandler
-{
-private:
- const DocumentDBLidSpaceCompactionConfig _cfg;
-protected:
- std::shared_ptr<ILidSpaceCompactionHandler> _handler;
- IOperationStorer &_opStorer;
- std::unique_ptr<IDocumentScanIterator> _scanItr;
-private:
- IDiskMemUsageNotifier &_diskMemUsageNotifier;
- IClusterStateChangedNotifier &_clusterStateChangedNotifier;
- std::shared_ptr<RemoveOperationsRateTracker> _ops_rate_tracker;
- bool _is_disabled;
- bool _shouldCompactLidSpace;
-
-
- bool hasTooMuchLidBloat(const search::LidUsageStats &stats) const;
- bool shouldRestartScanDocuments(const search::LidUsageStats &stats) const;
- virtual bool scanDocuments(const search::LidUsageStats &stats) = 0;
- void compactLidSpace(const search::LidUsageStats &stats);
- bool remove_batch_is_ongoing() const;
- bool remove_is_ongoing() const;
-protected:
- search::DocumentMetaData getNextDocument(const search::LidUsageStats &stats, bool retryLastDocument);
-public:
- LidSpaceCompactionJobBase(const DocumentDBLidSpaceCompactionConfig &config,
- std::shared_ptr<ILidSpaceCompactionHandler> handler,
- IOperationStorer &opStorer,
- IDiskMemUsageNotifier &diskMemUsageNotifier,
- const BlockableMaintenanceJobConfig &blockableConfig,
- IClusterStateChangedNotifier &clusterStateChangedNotifier,
- bool nodeRetired);
- ~LidSpaceCompactionJobBase() override;
-
- void notifyDiskMemUsage(DiskMemUsageState state) override;
- void notifyClusterStateChanged(const std::shared_ptr<IBucketStateCalculator> &newCalc) override;
- bool run() override;
-};
-
-} // namespace proton
-
diff --git a/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job_take2.cpp b/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job_take2.cpp
deleted file mode 100644
index 6ac8f803800..00000000000
--- a/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job_take2.cpp
+++ /dev/null
@@ -1,167 +0,0 @@
-// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include "lid_space_compaction_job_take2.h"
-#include "i_document_scan_iterator.h"
-#include "i_lid_space_compaction_handler.h"
-#include "i_operation_storer.h"
-#include <vespa/searchcore/proton/feedoperation/moveoperation.h>
-#include <vespa/searchcorespi/index/i_thread_service.h>
-#include <vespa/persistence/spi/bucket_tasks.h>
-#include <vespa/document/fieldvalue/document.h>
-#include <vespa/vespalib/util/destructor_callbacks.h>
-#include <vespa/vespalib/util/lambdatask.h>
-#include <cassert>
-#include <thread>
-
-using search::DocumentMetaData;
-using search::LidUsageStats;
-using storage::spi::makeBucketTask;
-using storage::spi::Bucket;
-using vespalib::makeLambdaTask;
-
-namespace proton::lidspace {
-
-namespace {
-
-bool
-isSameDocument(const search::DocumentMetaData &a, const search::DocumentMetaData &b) {
- return (a.lid == b.lid) &&
- (a.bucketId == b.bucketId) &&
- (a.gid == b.gid) &&
- (a.timestamp ==
- b.timestamp); // Timestamp check can be removed once logic has proved itself in large scale.
-}
-
-}
-
-class CompactionJob::MoveTask : public storage::spi::BucketTask {
-public:
- MoveTask(std::shared_ptr<CompactionJob> job, const search::DocumentMetaData & meta, IDestructorCallback::SP opsTracker)
- : _job(std::move(job)),
- _meta(meta),
- _opsTracker(std::move(opsTracker))
- { }
- void run(const Bucket & bucket, IDestructorCallback::SP onDone) override {
- assert(bucket.getBucketId() == _meta.bucketId);
- using DoneContext = vespalib::KeepAlive<std::pair<IDestructorCallback::SP, IDestructorCallback::SP>>;
- CompactionJob::moveDocument(std::move(_job), _meta,
- std::make_shared<DoneContext>(std::make_pair(std::move(_opsTracker), std::move(onDone))));
- }
- void fail(const Bucket & bucket) override {
- assert(bucket.getBucketId() == _meta.bucketId);
- auto & master = _job->_master;
- if (_job->_stopped) return;
- master.execute(makeLambdaTask([job=std::move(_job)] { job->_scanItr.reset(); }));
- }
-private:
- std::shared_ptr<CompactionJob> _job;
- const search::DocumentMetaData _meta;
- IDestructorCallback::SP _opsTracker;
-};
-
-bool
-CompactionJob::scanDocuments(const LidUsageStats &stats)
-{
- if (_scanItr->valid()) {
- DocumentMetaData document = getNextDocument(stats, false);
- if (document.valid()) {
- Bucket metaBucket(document::Bucket(_bucketSpace, document.bucketId));
- _bucketExecutor.execute(metaBucket, std::make_unique<MoveTask>(shared_from_this(), document, getLimiter().beginOperation()));
- if (isBlocked(BlockedReason::OUTSTANDING_OPS)) {
- return true;
- }
- }
- }
- return false;
-}
-
-void
-CompactionJob::moveDocument(std::shared_ptr<CompactionJob> job, const search::DocumentMetaData & metaThen,
- std::shared_ptr<IDestructorCallback> context)
-{
- if (job->_stopped) return; //TODO Remove once lidtracker is no longer in use.
- // The real lid must be sampled in the master thread.
- //TODO remove target lid from createMoveOperation interface
- auto op = job->_handler->createMoveOperation(metaThen, 0);
- if (!op || !op->getDocument()) return;
- // Early detection and force md5 calculation outside of master thread
- if (metaThen.gid != op->getDocument()->getId().getGlobalId()) return;
-
- auto & master = job->_master;
- if (job->_stopped) return;
- master.execute(makeLambdaTask([self=std::move(job), meta=metaThen, moveOp=std::move(op), onDone=std::move(context)]() mutable {
- if (self->_stopped.load(std::memory_order_relaxed)) return;
- self->completeMove(meta, std::move(moveOp), std::move(onDone));
- }));
-}
-
-void
-CompactionJob::completeMove(const search::DocumentMetaData & metaThen, std::unique_ptr<MoveOperation> moveOp,
- std::shared_ptr<IDestructorCallback> onDone)
-{
- // Reread meta data as document might have been altered after move was initiated
- // If so it will fail the timestamp sanity check later on.
- search::DocumentMetaData metaNow = _handler->getMetaData(metaThen.lid);
- // This should be impossible and should probably be an assert
- if ( ! isSameDocument(metaThen, metaNow)) return;
- if (metaNow.gid != moveOp->getDocument()->getId().getGlobalId()) return;
-
- uint32_t lowestLid = _handler->getLidStatus().getLowestFreeLid();
- if (lowestLid >= metaNow.lid) return;
- moveOp->setTargetLid(lowestLid);
- _opStorer.appendOperation(*moveOp, onDone);
- _handler->handleMove(*moveOp, std::move(onDone));
-}
-
-CompactionJob::CompactionJob(const DocumentDBLidSpaceCompactionConfig &config,
- RetainGuard dbRetainer,
- std::shared_ptr<ILidSpaceCompactionHandler> handler,
- IOperationStorer &opStorer,
- IThreadService & master,
- BucketExecutor & bucketExecutor,
- IDiskMemUsageNotifier &diskMemUsageNotifier,
- const BlockableMaintenanceJobConfig &blockableConfig,
- IClusterStateChangedNotifier &clusterStateChangedNotifier,
- bool nodeRetired,
- document::BucketSpace bucketSpace)
- : LidSpaceCompactionJobBase(config, std::move(handler), opStorer, diskMemUsageNotifier,
- blockableConfig, clusterStateChangedNotifier, nodeRetired),
- std::enable_shared_from_this<CompactionJob>(),
- _master(master),
- _bucketExecutor(bucketExecutor),
- _dbRetainer(std::move(dbRetainer)),
- _bucketSpace(bucketSpace),
- _stopped(false)
-{ }
-
-CompactionJob::~CompactionJob() = default;
-
-std::shared_ptr<CompactionJob>
-CompactionJob::create(const DocumentDBLidSpaceCompactionConfig &config,
- RetainGuard dbRetainer,
- std::shared_ptr<ILidSpaceCompactionHandler> handler,
- IOperationStorer &opStorer,
- IThreadService & master,
- BucketExecutor & bucketExecutor,
- IDiskMemUsageNotifier &diskMemUsageNotifier,
- const BlockableMaintenanceJobConfig &blockableConfig,
- IClusterStateChangedNotifier &clusterStateChangedNotifier,
- bool nodeRetired,
- document::BucketSpace bucketSpace)
-{
- return std::shared_ptr<CompactionJob>(
- new CompactionJob(config, std::move(dbRetainer), std::move(handler), opStorer, master, bucketExecutor,
- diskMemUsageNotifier, blockableConfig, clusterStateChangedNotifier, nodeRetired, bucketSpace),
- [&master](auto job) {
- auto failed = master.execute(makeLambdaTask([job]() { delete job; }));
- assert(!failed);
- });
-}
-
-void
-CompactionJob::onStop() {
- BlockableMaintenanceJob::onStop();
- _stopped = true;
-}
-
-} // namespace proton
diff --git a/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job_take2.h b/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job_take2.h
deleted file mode 100644
index aa72d2e84bc..00000000000
--- a/searchcore/src/vespa/searchcore/proton/server/lid_space_compaction_job_take2.h
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include "lid_space_compaction_job_base.h"
-#include <vespa/searchcore/proton/common/monitored_refcount.h>
-#include <vespa/document/bucket/bucketspace.h>
-#include <atomic>
-
-namespace storage::spi { struct BucketExecutor; }
-namespace searchcorespi::index { struct IThreadService; }
-namespace vespalib { class IDestructorCallback; }
-namespace proton {
- class IDiskMemUsageNotifier;
- class IClusterStateChangedNotifier;
- class MoveOperation;
-}
-
-namespace proton::lidspace {
-
-/**
- * Moves documents from higher lids to lower lids. It uses a BucketExecutor that ensures that the bucket
- * is locked for changes while the document is moved.
- */
-class CompactionJob : public LidSpaceCompactionJobBase, public std::enable_shared_from_this<CompactionJob>
-{
-private:
- using BucketExecutor = storage::spi::BucketExecutor;
- using IDestructorCallback = vespalib::IDestructorCallback;
- using IThreadService = searchcorespi::index::IThreadService;
- IThreadService &_master;
- BucketExecutor &_bucketExecutor;
- RetainGuard _dbRetainer;
- document::BucketSpace _bucketSpace;
- std::atomic<bool> _stopped;
-
- bool scanDocuments(const search::LidUsageStats &stats) override;
- static void moveDocument(std::shared_ptr<CompactionJob> job, const search::DocumentMetaData & metaThen,
- std::shared_ptr<IDestructorCallback> onDone);
- void completeMove(const search::DocumentMetaData & metaThen, std::unique_ptr<MoveOperation> moveOp,
- std::shared_ptr<IDestructorCallback> onDone);
- void onStop() override;
- class MoveTask;
-
- CompactionJob(const DocumentDBLidSpaceCompactionConfig &config,
- RetainGuard dbRetainer,
- std::shared_ptr<ILidSpaceCompactionHandler> handler,
- IOperationStorer &opStorer,
- IThreadService & master,
- BucketExecutor & bucketExecutor,
- IDiskMemUsageNotifier &diskMemUsageNotifier,
- const BlockableMaintenanceJobConfig &blockableConfig,
- IClusterStateChangedNotifier &clusterStateChangedNotifier,
- bool nodeRetired,
- document::BucketSpace bucketSpace);
-public:
- static std::shared_ptr<CompactionJob>
- create(const DocumentDBLidSpaceCompactionConfig &config,
- RetainGuard dbRetainer,
- std::shared_ptr<ILidSpaceCompactionHandler> handler,
- IOperationStorer &opStorer,
- IThreadService & master,
- BucketExecutor & bucketExecutor,
- IDiskMemUsageNotifier &diskMemUsageNotifier,
- const BlockableMaintenanceJobConfig &blockableConfig,
- IClusterStateChangedNotifier &clusterStateChangedNotifier,
- bool nodeRetired,
- document::BucketSpace bucketSpace);
- ~CompactionJob() override;
-};
-
-} // namespace proton
-
diff --git a/searchcore/src/vespa/searchcore/proton/server/maintenance_jobs_injector.cpp b/searchcore/src/vespa/searchcore/proton/server/maintenance_jobs_injector.cpp
index 7a5a42b5608..cfe8da18270 100644
--- a/searchcore/src/vespa/searchcore/proton/server/maintenance_jobs_injector.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/maintenance_jobs_injector.cpp
@@ -1,11 +1,9 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "bucketmovejob.h"
-#include "bucketmovejobv2.h"
#include "heart_beat_job.h"
#include "job_tracked_maintenance_job.h"
#include "lid_space_compaction_job.h"
-#include "lid_space_compaction_job_take2.h"
#include "lid_space_compaction_handler.h"
#include "maintenance_jobs_injector.h"
#include "prune_session_cache_job.h"
@@ -31,7 +29,6 @@ injectLidSpaceCompactionJobs(MaintenanceController &controller,
storage::spi::BucketExecutor & bucketExecutor,
ILidSpaceCompactionHandler::Vector lscHandlers,
IOperationStorer &opStorer,
- IFrozenBucketHandler &fbHandler,
IJobTracker::SP tracker,
IDiskMemUsageNotifier &diskMemUsageNotifier,
IClusterStateChangedNotifier &clusterStateChangedNotifier,
@@ -39,21 +36,10 @@ injectLidSpaceCompactionJobs(MaintenanceController &controller,
document::BucketSpace bucketSpace)
{
for (auto &lidHandler : lscHandlers) {
- std::shared_ptr<IMaintenanceJob> job;
- if (config.getLidSpaceCompactionConfig().useBucketExecutor()) {
- job = lidspace::CompactionJob::create(config.getLidSpaceCompactionConfig(), controller.retainDB(),
- std::move(lidHandler), opStorer, controller.masterThread(),
- bucketExecutor, diskMemUsageNotifier,config.getBlockableJobConfig(),
- clusterStateChangedNotifier, (calc ? calc->nodeRetired() : false), bucketSpace);
- } else {
- job = std::make_shared<LidSpaceCompactionJob>(
- config.getLidSpaceCompactionConfig(),
- std::move(lidHandler), opStorer, fbHandler,
- diskMemUsageNotifier,
- config.getBlockableJobConfig(),
- clusterStateChangedNotifier,
- (calc ? calc->nodeRetired() : false));
- }
+ auto job = lidspace::CompactionJob::create(config.getLidSpaceCompactionConfig(), controller.retainDB(),
+ std::move(lidHandler), opStorer, controller.masterThread(),
+ bucketExecutor, diskMemUsageNotifier,config.getBlockableJobConfig(),
+ clusterStateChangedNotifier, (calc ? calc->nodeRetired() : false), bucketSpace);
controller.registerJobInMasterThread(trackJob(tracker, std::move(job)));
}
}
@@ -61,7 +47,6 @@ injectLidSpaceCompactionJobs(MaintenanceController &controller,
void
injectBucketMoveJob(MaintenanceController &controller,
const DocumentDBMaintenanceConfig &config,
- IFrozenBucketHandler &fbHandler,
storage::spi::BucketExecutor & bucketExecutor,
bucketdb::IBucketCreateNotifier &bucketCreateNotifier,
const vespalib::string &docTypeName,
@@ -74,26 +59,10 @@ injectBucketMoveJob(MaintenanceController &controller,
DocumentDBJobTrackers &jobTrackers,
IDiskMemUsageNotifier &diskMemUsageNotifier)
{
- std::shared_ptr<IMaintenanceJob> bmj;
- if (config.getBucketMoveConfig().useBucketExecutor()) {
- bmj = BucketMoveJobV2::create(calc, controller.retainDB(), moveHandler, bucketModifiedHandler, controller.masterThread(),
- bucketExecutor, controller.getReadySubDB(), controller.getNotReadySubDB(),
- bucketCreateNotifier, clusterStateChangedNotifier, bucketStateChangedNotifier,
- diskMemUsageNotifier, config.getBlockableJobConfig(), docTypeName, bucketSpace);
- } else {
- bmj = std::make_shared<BucketMoveJob>(calc,
- moveHandler,
- bucketModifiedHandler,
- controller.getReadySubDB(),
- controller.getNotReadySubDB(),
- fbHandler,
- bucketCreateNotifier,
- clusterStateChangedNotifier,
- bucketStateChangedNotifier,
- diskMemUsageNotifier,
- config.getBlockableJobConfig(),
- docTypeName, bucketSpace);
- }
+ auto bmj = BucketMoveJob::create(calc, controller.retainDB(), moveHandler, bucketModifiedHandler, controller.masterThread(),
+ bucketExecutor, controller.getReadySubDB(), controller.getNotReadySubDB(),
+ bucketCreateNotifier, clusterStateChangedNotifier, bucketStateChangedNotifier,
+ diskMemUsageNotifier, config.getBlockableJobConfig(), docTypeName, bucketSpace);
controller.registerJobInMasterThread(trackJob(jobTrackers.getBucketMove(), std::move(bmj)));
}
@@ -106,7 +75,6 @@ MaintenanceJobsInjector::injectJobs(MaintenanceController &controller,
IHeartBeatHandler &hbHandler,
matching::ISessionCachePruner &scPruner,
IOperationStorer &opStorer,
- IFrozenBucketHandler &fbHandler,
bucketdb::IBucketCreateNotifier &bucketCreateNotifier,
document::BucketSpace bucketSpace,
IPruneRemovedDocumentsHandler &prdHandler,
@@ -128,9 +96,14 @@ MaintenanceJobsInjector::injectJobs(MaintenanceController &controller,
const auto & docTypeName = controller.getDocTypeName().getName();
const MaintenanceDocumentSubDB &mRemSubDB(controller.getRemSubDB());
- auto pruneRDjob = std::make_unique<PruneRemovedDocumentsJob>(config.getPruneRemovedDocumentsConfig(), *mRemSubDB.meta_store(),
- mRemSubDB.sub_db_id(), docTypeName, prdHandler, fbHandler);
- controller.registerJobInMasterThread(trackJob(jobTrackers.getRemovedDocumentsPrune(), std::move(pruneRDjob)));
+
+ controller.registerJobInMasterThread(
+ trackJob(jobTrackers.getRemovedDocumentsPrune(),
+ PruneRemovedDocumentsJob::create(config.getPruneRemovedDocumentsConfig(), controller.retainDB(),
+ *mRemSubDB.meta_store(), mRemSubDB.sub_db_id(), bucketSpace,
+ docTypeName, prdHandler, controller.masterThread(),
+ bucketExecutor)));
+
if (!config.getLidSpaceCompactionConfig().isDisabled()) {
ILidSpaceCompactionHandler::Vector lidSpaceCompactionHandlers;
@@ -138,11 +111,11 @@ MaintenanceJobsInjector::injectJobs(MaintenanceController &controller,
lidSpaceCompactionHandlers.push_back(std::make_shared<LidSpaceCompactionHandler>(controller.getRemSubDB(), docTypeName));
lidSpaceCompactionHandlers.push_back(std::make_shared<LidSpaceCompactionHandler>(controller.getNotReadySubDB(), docTypeName));
injectLidSpaceCompactionJobs(controller, config, bucketExecutor, std::move(lidSpaceCompactionHandlers),
- opStorer, fbHandler, jobTrackers.getLidSpaceCompact(), diskMemUsageNotifier,
+ opStorer, jobTrackers.getLidSpaceCompact(), diskMemUsageNotifier,
clusterStateChangedNotifier, calc, bucketSpace);
}
- injectBucketMoveJob(controller, config, fbHandler, bucketExecutor, bucketCreateNotifier, docTypeName, bucketSpace,
+ injectBucketMoveJob(controller, config, bucketExecutor, bucketCreateNotifier, docTypeName, bucketSpace,
moveHandler, bucketModifiedHandler, clusterStateChangedNotifier, bucketStateChangedNotifier,
calc, jobTrackers, diskMemUsageNotifier);
diff --git a/searchcore/src/vespa/searchcore/proton/server/maintenance_jobs_injector.h b/searchcore/src/vespa/searchcore/proton/server/maintenance_jobs_injector.h
index fb7117d2e66..cbc85613e1e 100644
--- a/searchcore/src/vespa/searchcore/proton/server/maintenance_jobs_injector.h
+++ b/searchcore/src/vespa/searchcore/proton/server/maintenance_jobs_injector.h
@@ -38,7 +38,6 @@ struct MaintenanceJobsInjector
IHeartBeatHandler &hbHandler,
matching::ISessionCachePruner &scPruner,
IOperationStorer &opStorer,
- IFrozenBucketHandler &fbHandler,
bucketdb::IBucketCreateNotifier &bucketCreateNotifier,
document::BucketSpace bucketSpace,
IPruneRemovedDocumentsHandler &prdHandler,
diff --git a/searchcore/src/vespa/searchcore/proton/server/maintenancecontroller.cpp b/searchcore/src/vespa/searchcore/proton/server/maintenancecontroller.cpp
index 3b4526e6f7c..011b6cb4b07 100644
--- a/searchcore/src/vespa/searchcore/proton/server/maintenancecontroller.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/maintenancecontroller.cpp
@@ -42,8 +42,7 @@ MaintenanceController::MaintenanceController(IThreadService &masterThread,
vespalib::Executor & defaultExecutor,
MonitoredRefCount & refCount,
const DocTypeName &docTypeName)
- : IBucketFreezeListener(),
- _masterThread(masterThread),
+ : _masterThread(masterThread),
_defaultExecutor(defaultExecutor),
_refCount(refCount),
_readySubDB(),
@@ -51,19 +50,15 @@ MaintenanceController::MaintenanceController(IThreadService &masterThread,
_notReadySubDB(),
_periodicTimer(),
_config(),
- _frozenBuckets(masterThread),
_state(State::INITIALIZING),
_docTypeName(docTypeName),
_jobs(),
_jobsLock()
-{
- _frozenBuckets.addListener(this); // forward freeze/thaw to bmc
-}
+{ }
MaintenanceController::~MaintenanceController()
{
kill();
- _frozenBuckets.removeListener(this);
}
void
@@ -235,19 +230,4 @@ MaintenanceController::syncSubDBs(const MaintenanceDocumentSubDB &readySubDB,
}
}
-
-void
-MaintenanceController::notifyThawedBucket(const BucketId &bucket)
-{
- (void) bucket;
- // No need to take _jobsLock as modification of _jobs also happens in master write thread.
- for (const auto &jw : _jobs) {
- IBlockableMaintenanceJob *job = jw->getJob().asBlockable();
- if (job && job->isBlocked()) {
- job->unBlock(IBlockableMaintenanceJob::BlockedReason::FROZEN_BUCKET);
- }
- }
-}
-
-
} // namespace proton
diff --git a/searchcore/src/vespa/searchcore/proton/server/maintenancecontroller.h b/searchcore/src/vespa/searchcore/proton/server/maintenancecontroller.h
index 6415c51eeed..049238ae193 100644
--- a/searchcore/src/vespa/searchcore/proton/server/maintenancecontroller.h
+++ b/searchcore/src/vespa/searchcore/proton/server/maintenancecontroller.h
@@ -4,8 +4,6 @@
#include "maintenancedocumentsubdb.h"
#include "i_maintenance_job.h"
-#include "frozenbuckets.h"
-#include "ibucketfreezelistener.h"
#include <vespa/searchcore/proton/common/doctypename.h>
#include <vespa/searchcore/proton/common/monitored_refcount.h>
#include <vespa/vespalib/util/scheduledexecutor.h>
@@ -29,7 +27,7 @@ class MonitoredRefCount;
* and a set of maintenance jobs for a document db.
* The maintenance jobs are independent of the controller.
*/
-class MaintenanceController : public IBucketFreezeListener
+class MaintenanceController
{
public:
using IThreadService = searchcorespi::index::IThreadService;
@@ -40,7 +38,7 @@ public:
MaintenanceController(IThreadService &masterThread, vespalib::Executor & defaultExecutor, MonitoredRefCount & refCount, const DocTypeName &docTypeName);
- ~MaintenanceController() override;
+ ~MaintenanceController();
void registerJobInMasterThread(IMaintenanceJob::UP job);
void registerJobInDefaultPool(IMaintenanceJob::UP job);
@@ -63,10 +61,6 @@ public:
void kill();
- operator IBucketFreezer &() { return _frozenBuckets; }
- operator const IFrozenBucketHandler &() const { return _frozenBuckets; }
- operator IFrozenBucketHandler &() { return _frozenBuckets; }
-
bool getStarted() const { return _state >= State::STARTED; }
bool getStopping() const { return _state == State::STOPPING; }
bool getPaused() const { return _state == State::PAUSED; }
@@ -89,7 +83,6 @@ private:
MaintenanceDocumentSubDB _notReadySubDB;
std::unique_ptr<vespalib::ScheduledExecutor> _periodicTimer;
DocumentDBMaintenanceConfigSP _config;
- FrozenBuckets _frozenBuckets;
State _state;
const DocTypeName &_docTypeName;
JobList _jobs;
@@ -97,7 +90,6 @@ private:
void addJobsToPeriodicTimer();
void restart();
- void notifyThawedBucket(const document::BucketId &bucket) override;
void performHoldJobs(JobList jobs);
void registerJob(vespalib::Executor & executor, IMaintenanceJob::UP job);
};
diff --git a/searchcore/src/vespa/searchcore/proton/server/maintenancejobrunner.cpp b/searchcore/src/vespa/searchcore/proton/server/maintenancejobrunner.cpp
index 658fa9f7482..d508c3193fc 100644
--- a/searchcore/src/vespa/searchcore/proton/server/maintenancejobrunner.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/maintenancejobrunner.cpp
@@ -25,7 +25,7 @@ MaintenanceJobRunner::stop() {
Guard guard(_lock);
_stopped = true;
}
- _job->onStop();
+ _job->stop();
}
void
diff --git a/searchcore/src/vespa/searchcore/proton/server/operationdonecontext.cpp b/searchcore/src/vespa/searchcore/proton/server/operationdonecontext.cpp
index 31e4c87b352..445f21044a8 100644
--- a/searchcore/src/vespa/searchcore/proton/server/operationdonecontext.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/operationdonecontext.cpp
@@ -1,24 +1,14 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "operationdonecontext.h"
-#include <vespa/searchcore/proton/common/feedtoken.h>
namespace proton {
-OperationDoneContext::OperationDoneContext(FeedToken token)
+OperationDoneContext::OperationDoneContext(IDestructorCallback::SP token)
: _token(std::move(token))
{
}
-OperationDoneContext::~OperationDoneContext()
-{
- ack();
-}
-
-void
-OperationDoneContext::ack()
-{
- _token.reset();
-}
+OperationDoneContext::~OperationDoneContext() = default;
} // namespace proton
diff --git a/searchcore/src/vespa/searchcore/proton/server/operationdonecontext.h b/searchcore/src/vespa/searchcore/proton/server/operationdonecontext.h
index e5666daffb6..ccd7721fe25 100644
--- a/searchcore/src/vespa/searchcore/proton/server/operationdonecontext.h
+++ b/searchcore/src/vespa/searchcore/proton/server/operationdonecontext.h
@@ -3,7 +3,6 @@
#pragma once
#include <vespa/vespalib/util/idestructorcallback.h>
-#include <vespa/searchcore/proton/common/feedtoken.h>
namespace proton {
@@ -16,17 +15,14 @@ namespace proton {
*/
class OperationDoneContext : public vespalib::IDestructorCallback
{
- FeedToken _token;
-protected:
- void ack();
- FeedToken steal() { return std::move(_token); }
-
public:
- OperationDoneContext(FeedToken token);
+ using IDestructorCallback = vespalib::IDestructorCallback;
+ OperationDoneContext(IDestructorCallback::SP token);
~OperationDoneContext() override;
bool hasToken() const { return static_cast<bool>(_token); }
+private:
+ IDestructorCallback::SP _token;
};
-
} // namespace proton
diff --git a/searchcore/src/vespa/searchcore/proton/server/persistencehandlerproxy.cpp b/searchcore/src/vespa/searchcore/proton/server/persistencehandlerproxy.cpp
index d298c0fac24..95fbcdaa204 100644
--- a/searchcore/src/vespa/searchcore/proton/server/persistencehandlerproxy.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/persistencehandlerproxy.cpp
@@ -124,12 +124,6 @@ PersistenceHandlerProxy::getDocumentRetrievers(storage::spi::ReadConsistency con
return _documentDB->getDocumentRetrievers(consistency);
}
-BucketGuard::UP
-PersistenceHandlerProxy::lockBucket(const storage::spi::Bucket &bucket)
-{
- return _documentDB->lockBucket(bucket.getBucketId().stripUnused());
-}
-
void
PersistenceHandlerProxy::handleListActiveBuckets(IBucketIdListResultHandler &resultHandler)
{
diff --git a/searchcore/src/vespa/searchcore/proton/server/persistencehandlerproxy.h b/searchcore/src/vespa/searchcore/proton/server/persistencehandlerproxy.h
index ce95cc3bddd..fa32c01fb23 100644
--- a/searchcore/src/vespa/searchcore/proton/server/persistencehandlerproxy.h
+++ b/searchcore/src/vespa/searchcore/proton/server/persistencehandlerproxy.h
@@ -54,7 +54,6 @@ public:
const storage::spi::Bucket &target1, const storage::spi::Bucket &target2) override;
RetrieversSP getDocumentRetrievers(storage::spi::ReadConsistency consistency) override;
- BucketGuard::UP lockBucket(const storage::spi::Bucket &bucket) override;
void handleListActiveBuckets(IBucketIdListResultHandler &resultHandler) override;
diff --git a/searchcore/src/vespa/searchcore/proton/server/pruneremoveddocumentsjob.cpp b/searchcore/src/vespa/searchcore/proton/server/pruneremoveddocumentsjob.cpp
index dfb84af5da5..45e33a965ef 100644
--- a/searchcore/src/vespa/searchcore/proton/server/pruneremoveddocumentsjob.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/pruneremoveddocumentsjob.cpp
@@ -2,64 +2,89 @@
#include "pruneremoveddocumentsjob.h"
#include "ipruneremoveddocumentshandler.h"
-#include "ifrozenbuckethandler.h"
+#include <vespa/persistence/spi/bucket_tasks.h>
#include <vespa/searchcore/proton/feedoperation/pruneremoveddocumentsoperation.h>
#include <vespa/searchcore/proton/documentmetastore/i_document_meta_store.h>
-#include <vespa/vespalib/util/time.h>
+#include <vespa/searchcorespi/index/i_thread_service.h>
+#include <vespa/vespalib/util/destructor_callbacks.h>
+#include <vespa/vespalib/util/lambdatask.h>
#include <vespa/log/log.h>
LOG_SETUP(".proton.server.pruneremoveddocumentsjob");
using document::BucketId;
using storage::spi::Timestamp;
+using storage::spi::Bucket;
+using vespalib::IDestructorCallback;
+using vespalib::makeLambdaTask;
namespace proton {
PruneRemovedDocumentsJob::
-PruneRemovedDocumentsJob(const Config &config,
- const IDocumentMetaStore &metaStore,
- uint32_t subDbId,
- const vespalib::string &docTypeName,
- IPruneRemovedDocumentsHandler &handler,
- IFrozenBucketHandler &frozenHandler)
+PruneRemovedDocumentsJob(const DocumentDBPruneConfig &config, RetainGuard dbRetainer, const IDocumentMetaStore &metaStore,
+ uint32_t subDbId, document::BucketSpace bucketSpace, const vespalib::string &docTypeName,
+ IPruneRemovedDocumentsHandler &handler, IThreadService & master,
+ BucketExecutor & bucketExecutor)
: BlockableMaintenanceJob("prune_removed_documents." + docTypeName,
config.getDelay(), config.getInterval()),
_metaStore(metaStore),
- _subDbId(subDbId),
- _cfgAgeLimit(config.getAge()),
- _docTypeName(docTypeName),
_handler(handler),
- _frozenHandler(frozenHandler),
- _pruneLids(),
+ _master(master),
+ _bucketExecutor(bucketExecutor),
+ _docTypeName(docTypeName),
+ _dbRetainer(std::move(dbRetainer)),
+ _cfgAgeLimit(config.getAge()),
+ _subDbId(subDbId),
+ _bucketSpace(bucketSpace),
_nextLid(1u)
{
}
+class PruneRemovedDocumentsJob::PruneTask : public storage::spi::BucketTask {
+public:
+ PruneTask(std::shared_ptr<PruneRemovedDocumentsJob> job, uint32_t lid, const RawDocumentMetaData & meta, IDestructorCallback::SP opsTracker)
+ : _job(std::move(job)),
+ _lid(lid),
+ _meta(meta),
+ _opsTracker(std::move(opsTracker))
+ { }
+ void run(const Bucket & bucket, IDestructorCallback::SP onDone) override;
+ void fail(const Bucket & bucket) override {
+ assert(bucket.getBucketId() == _meta.getBucketId());
+ }
+private:
+ std::shared_ptr<PruneRemovedDocumentsJob> _job;
+ uint32_t _lid;
+ const RawDocumentMetaData _meta;
+ IDestructorCallback::SP _opsTracker;
+};
void
-PruneRemovedDocumentsJob::flush(DocId lowLid, DocId nextLowLid, const Timestamp ageLimit)
-{
- if (_pruneLids.empty())
- return;
- DocId docIdLimit = _metaStore.getCommittedDocIdLimit();
- PruneRemovedDocumentsOperation pruneOp(docIdLimit, _subDbId);
- LidVectorContext::SP lvCtx(pruneOp.getLidsToRemove());
- for (DocId docId : _pruneLids) {
- lvCtx->addLid(docId);
- }
- _pruneLids.clear();
- LOG(debug,
- "PruneRemovedDocumentsJob::flush called,"
- " doctype(%s)"
- " %u lids to prune,"
- " range [%u..%u) limit %u, timestamp %" PRIu64,
- _docTypeName.c_str(),
- static_cast<uint32_t>(pruneOp.getLidsToRemove()->getNumLids()),
- lowLid, nextLowLid, docIdLimit,
- static_cast<uint64_t>(ageLimit));
- _handler.performPruneRemovedDocuments(pruneOp);
+PruneRemovedDocumentsJob::PruneTask::run(const Bucket & bucket, IDestructorCallback::SP onDone) {
+ assert(bucket.getBucketId() == _meta.getBucketId());
+ using DoneContext = vespalib::KeepAlive<std::pair<IDestructorCallback::SP, IDestructorCallback::SP>>;
+ auto & job = *_job;
+ job._master.execute(makeLambdaTask([job = std::move(_job), lid=_lid, meta = _meta,
+ onDone = std::make_shared<DoneContext>(std::make_pair(std::move(_opsTracker), std::move(onDone)))
+ ]() {
+ (void) onDone;
+ job->remove(lid, meta);
+ }));
}
+void
+PruneRemovedDocumentsJob::remove(uint32_t lid, const RawDocumentMetaData & oldMeta) {
+ if (stopped()) return;
+ if ( ! _metaStore.validLid(lid)) return;
+ const RawDocumentMetaData &meta = _metaStore.getRawMetaData(lid);
+ if (meta.getBucketId() != oldMeta.getBucketId()) return;
+ if (meta.getTimestamp() != oldMeta.getTimestamp()) return;
+ if (meta.getGid() != oldMeta.getGid()) return;
+
+ PruneRemovedDocumentsOperation pruneOp(_metaStore.getCommittedDocIdLimit(), _subDbId);
+ pruneOp.getLidsToRemove()->addLid(lid);
+ _handler.performPruneRemovedDocuments(pruneOp);
+}
bool
PruneRemovedDocumentsJob::run()
@@ -67,33 +92,17 @@ PruneRemovedDocumentsJob::run()
vespalib::system_time now = vespalib::system_clock::now();
const Timestamp ageLimit(static_cast<Timestamp::Type>
(vespalib::count_us(now.time_since_epoch() - _cfgAgeLimit)));
- DocId lid(_nextLid);
- const DocId olid(lid);
const DocId docIdLimit(_metaStore.getCommittedDocIdLimit());
- for (uint32_t pass = 0; pass < 10 && lid < docIdLimit; ++pass) {
- const DocId lidLimit = std::min(lid + 10000u, docIdLimit);
- for (; lid < lidLimit; ++lid) {
- if (!_metaStore.validLid(lid))
- continue;
- const RawDocumentMetaData &metaData = _metaStore.getRawMetaData(lid);
- if (metaData.getTimestamp() >= ageLimit)
- continue;
- BucketId bucket(metaData.getBucketId());
- IFrozenBucketHandler::ExclusiveBucketGuard::UP bucketGuard = _frozenHandler.acquireExclusiveBucket(bucket);
- if ( ! bucketGuard ) {
- setBlocked(BlockedReason::FROZEN_BUCKET);
- _nextLid = lid;
- flush(olid, lid, ageLimit);
- return true;
- }
- _pruneLids.push_back(lid);
- }
- if (_pruneLids.size() >= 500)
- break;
+ const DocId lidLimit = std::min(_nextLid + 1000000u, docIdLimit);
+ for (; ! isBlocked() && _nextLid < lidLimit; _nextLid++) {
+ if ( ! _metaStore.validLid(_nextLid)) continue;
+ const RawDocumentMetaData &meta = _metaStore.getRawMetaData(_nextLid);
+ if (meta.getTimestamp() >= ageLimit) continue;
+
+ _bucketExecutor.execute(Bucket(document::Bucket(_bucketSpace, meta.getBucketId())),
+ std::make_unique<PruneTask>(shared_from_this(), _nextLid, meta, getLimiter().beginOperation()));
}
- _nextLid = lid;
- flush(olid, lid, ageLimit);
- if (lid >= docIdLimit) {
+ if (_nextLid >= docIdLimit) {
_nextLid = 1u;
return true;
}
diff --git a/searchcore/src/vespa/searchcore/proton/server/pruneremoveddocumentsjob.h b/searchcore/src/vespa/searchcore/proton/server/pruneremoveddocumentsjob.h
index 76967635f4a..9216b10e5a9 100644
--- a/searchcore/src/vespa/searchcore/proton/server/pruneremoveddocumentsjob.h
+++ b/searchcore/src/vespa/searchcore/proton/server/pruneremoveddocumentsjob.h
@@ -3,45 +3,63 @@
#include "blockable_maintenance_job.h"
#include "document_db_maintenance_config.h"
+#include <vespa/searchcore/proton/common/monitored_refcount.h>
#include <persistence/spi/types.h>
+#include <vespa/document/bucket/bucketspace.h>
+#include <atomic>
+
+namespace storage::spi { struct BucketExecutor; }
+namespace searchcorespi::index { struct IThreadService; }
namespace proton {
struct IDocumentMetaStore;
class IPruneRemovedDocumentsHandler;
-class IFrozenBucketHandler;
+class RawDocumentMetaData;
/**
* Job that regularly checks whether old removed documents should be
* forgotten.
*/
-class PruneRemovedDocumentsJob : public BlockableMaintenanceJob
+class PruneRemovedDocumentsJob : public BlockableMaintenanceJob,
+ public std::enable_shared_from_this<PruneRemovedDocumentsJob>
{
private:
+ class PruneTask;
+ using Config = DocumentDBPruneRemovedDocumentsConfig;
+ using BucketExecutor = storage::spi::BucketExecutor;
+ using IThreadService = searchcorespi::index::IThreadService;
+ using DocId = uint32_t;
+
const IDocumentMetaStore &_metaStore; // external ownership
- uint32_t _subDbId;
- vespalib::duration _cfgAgeLimit;
- const vespalib::string &_docTypeName;
IPruneRemovedDocumentsHandler &_handler;
- IFrozenBucketHandler &_frozenHandler;
+ IThreadService &_master;
+ BucketExecutor &_bucketExecutor;
+ const vespalib::string _docTypeName;
+ RetainGuard _dbRetainer;
+ const vespalib::duration _cfgAgeLimit;
+ const uint32_t _subDbId;
+ const document::BucketSpace _bucketSpace;
- typedef uint32_t DocId;
- std::vector<DocId> _pruneLids;
DocId _nextLid;
- void flush(DocId lowLid, DocId nextLowLid, const storage::spi::Timestamp ageLimit);
-public:
- using Config = DocumentDBPruneRemovedDocumentsConfig;
-
- PruneRemovedDocumentsJob(const Config &config,
- const IDocumentMetaStore &metaStore,
- uint32_t subDbId,
- const vespalib::string &docTypeName,
- IPruneRemovedDocumentsHandler &handler,
- IFrozenBucketHandler &frozenHandler);
+ void remove(uint32_t lid, const RawDocumentMetaData & meta);
- // Implements IMaintenanceJob
+ PruneRemovedDocumentsJob(const DocumentDBPruneConfig &config, RetainGuard dbRetainer, const IDocumentMetaStore &metaStore,
+ uint32_t subDbId, document::BucketSpace bucketSpace, const vespalib::string &docTypeName,
+ IPruneRemovedDocumentsHandler &handler, IThreadService & master,
+ BucketExecutor & bucketExecutor);
bool run() override;
+public:
+ static std::shared_ptr<PruneRemovedDocumentsJob>
+ create(const Config &config, RetainGuard dbRetainer, const IDocumentMetaStore &metaStore, uint32_t subDbId,
+ document::BucketSpace bucketSpace, const vespalib::string &docTypeName,
+ IPruneRemovedDocumentsHandler &handler, IThreadService & master, BucketExecutor & bucketExecutor)
+ {
+ return std::shared_ptr<PruneRemovedDocumentsJob>(
+ new PruneRemovedDocumentsJob(config, std::move(dbRetainer), metaStore, subDbId, bucketSpace,
+ docTypeName, handler, master, bucketExecutor));
+ }
};
} // namespace proton
diff --git a/searchcore/src/vespa/searchcore/proton/server/putdonecontext.cpp b/searchcore/src/vespa/searchcore/proton/server/putdonecontext.cpp
index 23caaf1250b..ac4f79ca89b 100644
--- a/searchcore/src/vespa/searchcore/proton/server/putdonecontext.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/putdonecontext.cpp
@@ -9,9 +9,8 @@ using document::Document;
namespace proton {
-PutDoneContext::PutDoneContext(FeedToken token, IPendingLidTracker::Token uncommitted,
- std::shared_ptr<const Document> doc,
- uint32_t lid)
+PutDoneContext::PutDoneContext(IDestructorCallback::SP token, IPendingLidTracker::Token uncommitted,
+ std::shared_ptr<const Document> doc, uint32_t lid)
: OperationDoneContext(std::move(token)),
_uncommitted(std::move(uncommitted)),
_lid(lid),
diff --git a/searchcore/src/vespa/searchcore/proton/server/putdonecontext.h b/searchcore/src/vespa/searchcore/proton/server/putdonecontext.h
index e7271d8a1b3..66bae97ad02 100644
--- a/searchcore/src/vespa/searchcore/proton/server/putdonecontext.h
+++ b/searchcore/src/vespa/searchcore/proton/server/putdonecontext.h
@@ -28,9 +28,8 @@ class PutDoneContext : public OperationDoneContext
std::shared_ptr<const document::Document> _doc;
public:
- PutDoneContext(FeedToken token, IPendingLidTracker::Token uncommitted,
- std::shared_ptr<const document::Document> doc,
- uint32_t lid);
+ PutDoneContext(IDestructorCallback::SP token, IPendingLidTracker::Token uncommitted,
+ std::shared_ptr<const document::Document> doc, uint32_t lid);
~PutDoneContext() override;
void registerPutLid(DocIdLimit *docIdLimit) { _docIdLimit = docIdLimit; }
diff --git a/searchcore/src/vespa/searchcore/proton/server/removedonecontext.cpp b/searchcore/src/vespa/searchcore/proton/server/removedonecontext.cpp
index 859d8693f6d..5f98db0ee49 100644
--- a/searchcore/src/vespa/searchcore/proton/server/removedonecontext.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/removedonecontext.cpp
@@ -7,9 +7,8 @@
namespace proton {
-RemoveDoneContext::RemoveDoneContext(FeedToken token, IPendingLidTracker::Token uncommitted, vespalib::Executor &executor,
- IDocumentMetaStore &documentMetaStore,
- uint32_t lid)
+RemoveDoneContext::RemoveDoneContext(IDestructorCallback::SP token, IPendingLidTracker::Token uncommitted,
+ vespalib::Executor &executor, IDocumentMetaStore &documentMetaStore, uint32_t lid)
: OperationDoneContext(std::move(token)),
_executor(executor),
_task(),
@@ -22,7 +21,6 @@ RemoveDoneContext::RemoveDoneContext(FeedToken token, IPendingLidTracker::Token
RemoveDoneContext::~RemoveDoneContext()
{
- ack();
if (_task) {
vespalib::Executor::Task::UP res = _executor.execute(std::move(_task));
assert(!res);
diff --git a/searchcore/src/vespa/searchcore/proton/server/removedonecontext.h b/searchcore/src/vespa/searchcore/proton/server/removedonecontext.h
index 485b82dd141..6aa5a2297d0 100644
--- a/searchcore/src/vespa/searchcore/proton/server/removedonecontext.h
+++ b/searchcore/src/vespa/searchcore/proton/server/removedonecontext.h
@@ -5,8 +5,6 @@
#include "operationdonecontext.h"
#include <vespa/searchcore/proton/common/ipendinglidtracker.h>
#include <vespa/vespalib/util/executor.h>
-#include <vespa/document/base/globalid.h>
-#include <vespa/searchlib/common/serialnum.h>
namespace proton {
@@ -14,7 +12,7 @@ struct IDocumentMetaStore;
/**
- * Context class for document removes that acks remove andschedules a
+ * Context class for document removes that acks remove and schedules a
* task when instance is destroyed. Typically a shared pointer to an
* instance is passed around to multiple worker threads that performs
* portions of a larger task before dropping the shared pointer,
@@ -28,8 +26,8 @@ class RemoveDoneContext : public OperationDoneContext
IPendingLidTracker::Token _uncommitted;
public:
- RemoveDoneContext(FeedToken token, IPendingLidTracker::Token uncommitted, vespalib::Executor &executor, IDocumentMetaStore &documentMetaStore,
- uint32_t lid);
+ RemoveDoneContext(IDestructorCallback::SP token, IPendingLidTracker::Token uncommitted, vespalib::Executor &executor,
+ IDocumentMetaStore &documentMetaStore, uint32_t lid);
~RemoveDoneContext() override;
};
diff --git a/searchcore/src/vespa/searchcore/proton/server/removedonetask.cpp b/searchcore/src/vespa/searchcore/proton/server/removedonetask.cpp
index c66a5d949e6..49a7490b263 100644
--- a/searchcore/src/vespa/searchcore/proton/server/removedonetask.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/removedonetask.cpp
@@ -5,17 +5,14 @@
namespace proton {
-RemoveDoneTask::RemoveDoneTask(IDocumentMetaStore &documentMetaStore,
- uint32_t lid)
+RemoveDoneTask::RemoveDoneTask(IDocumentMetaStore &documentMetaStore, uint32_t lid)
: vespalib::Executor::Task(),
_documentMetaStore(documentMetaStore),
_lid(lid)
{
}
-RemoveDoneTask::~RemoveDoneTask()
-{
-}
+RemoveDoneTask::~RemoveDoneTask() = default;
void
RemoveDoneTask::run()
diff --git a/searchcore/src/vespa/searchcore/proton/server/removedonetask.h b/searchcore/src/vespa/searchcore/proton/server/removedonetask.h
index d9059e6ad6a..1aa59c6d37f 100644
--- a/searchcore/src/vespa/searchcore/proton/server/removedonetask.h
+++ b/searchcore/src/vespa/searchcore/proton/server/removedonetask.h
@@ -29,12 +29,10 @@ class RemoveDoneTask : public vespalib::Executor::Task
public:
- RemoveDoneTask(IDocumentMetaStore &documentMetaStore,
- uint32_t lid);
+ RemoveDoneTask(IDocumentMetaStore &documentMetaStore, uint32_t lid);
+ ~RemoveDoneTask() override;
- virtual ~RemoveDoneTask();
-
- virtual void run() override;
+ void run() override;
};
diff --git a/searchcore/src/vespa/searchcore/proton/server/storeonlyfeedview.cpp b/searchcore/src/vespa/searchcore/proton/server/storeonlyfeedview.cpp
index 6066fef68d8..c050719b361 100644
--- a/searchcore/src/vespa/searchcore/proton/server/storeonlyfeedview.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/storeonlyfeedview.cpp
@@ -43,45 +43,11 @@ namespace proton {
namespace {
-class PutDoneContextForMove : public PutDoneContext {
-private:
- IDestructorCallback::SP _moveDoneCtx;
-
-public:
- PutDoneContextForMove(FeedToken token, IPendingLidTracker::Token uncommitted,
- std::shared_ptr<const Document> doc,
- uint32_t lid,
- IDestructorCallback::SP moveDoneCtx)
- : PutDoneContext(std::move(token), std::move(uncommitted),std::move(doc), lid),
- _moveDoneCtx(std::move(moveDoneCtx))
- {}
- ~PutDoneContextForMove() override = default;
-};
-
-std::shared_ptr<PutDoneContext>
-createPutDoneContext(FeedToken token, IPendingLidTracker::Token uncommitted,
- std::shared_ptr<const Document> doc,
- uint32_t lid,
- IDestructorCallback::SP moveDoneCtx)
-{
- std::shared_ptr<PutDoneContext> result;
- if (moveDoneCtx) {
- result = std::make_shared<PutDoneContextForMove>(std::move(token), std::move(uncommitted),
- std::move(doc), lid, std::move(moveDoneCtx));
- } else {
- result = std::make_shared<PutDoneContext>(std::move(token), std::move(uncommitted),
- std::move(doc), lid);
- }
- return result;
-}
-
std::shared_ptr<PutDoneContext>
-createPutDoneContext(FeedToken token, IPendingLidTracker::Token uncommitted,
- std::shared_ptr<const Document> doc,
- uint32_t lid)
+createPutDoneContext(IDestructorCallback::SP token, IPendingLidTracker::Token uncommitted,
+ std::shared_ptr<const Document> doc, uint32_t lid)
{
- return createPutDoneContext(std::move(token), std::move(uncommitted), std::move(doc),
- lid, IDestructorCallback::SP());
+ return std::make_shared<PutDoneContext>(std::move(token), std::move(uncommitted), std::move(doc), lid);
}
std::shared_ptr<UpdateDoneContext>
@@ -100,34 +66,11 @@ void setPrev(DocumentOperation &op, const documentmetastore::IStore::Result &res
}
}
-class RemoveDoneContextForMove : public RemoveDoneContext {
-private:
- IDestructorCallback::SP _moveDoneCtx;
-
-public:
- RemoveDoneContextForMove(FeedToken token, IPendingLidTracker::Token uncommitted, vespalib::Executor &executor,
- IDocumentMetaStore &documentMetaStore,
- uint32_t lid, IDestructorCallback::SP moveDoneCtx)
- : RemoveDoneContext(std::move(token), std::move(uncommitted), executor,
- documentMetaStore, lid),
- _moveDoneCtx(std::move(moveDoneCtx))
- {}
- ~RemoveDoneContextForMove() override = default;
-};
-
std::shared_ptr<RemoveDoneContext>
-createRemoveDoneContext(FeedToken token, IPendingLidTracker::Token uncommitted, vespalib::Executor &executor,
- IDocumentMetaStore &documentMetaStore,
- uint32_t lid, IDestructorCallback::SP moveDoneCtx)
-{
- if (moveDoneCtx) {
- return std::make_shared<RemoveDoneContextForMove>
- (std::move(token), std::move(uncommitted), executor, documentMetaStore,
- lid, std::move(moveDoneCtx));
- } else {
- return std::make_shared<RemoveDoneContext>
- (std::move(token), std::move(uncommitted), executor, documentMetaStore, lid);
- }
+createRemoveDoneContext(IDestructorCallback::SP token, IPendingLidTracker::Token uncommitted, vespalib::Executor &executor,
+ IDocumentMetaStore &documentMetaStore, uint32_t lid)
+{
+ return std::make_shared<RemoveDoneContext>(std::move(token), std::move(uncommitted), executor, documentMetaStore, lid);
}
class SummaryPutDoneContext : public OperationDoneContext
@@ -163,8 +106,8 @@ void putMetaData(documentmetastore::IStore &meta_store, const DocumentId & doc_i
const DocumentOperation &op, bool is_removed_doc)
{
documentmetastore::IStore::Result putRes(
- meta_store.put(doc_id.getGlobalId(),
- op.getBucketId(), op.getTimestamp(), op.getSerializedDocSize(), op.getLid(), op.get_prepare_serial_num()));
+ meta_store.put(doc_id.getGlobalId(), op.getBucketId(), op.getTimestamp(),
+ op.getSerializedDocSize(), op.getLid(), op.get_prepare_serial_num()));
if (!putRes.ok()) {
throw IllegalStateException(
make_string("Could not put <lid, gid> pair for %sdocument with id '%s' and gid '%s'",
@@ -304,7 +247,6 @@ StoreOnlyFeedView::internalPut(FeedToken token, const PutOperation &putOp)
_params._subDbId, doc->toString(true).size(), doc->toString(true).c_str());
adjustMetaStore(putOp, docId.getGlobalId(), docId);
- auto uncommitted = get_pending_lid_token(putOp);
bool docAlreadyExists = putOp.getValidPrevDbdId(_params._subDbId);
@@ -312,17 +254,15 @@ StoreOnlyFeedView::internalPut(FeedToken token, const PutOperation &putOp)
if (putOp.changedDbdId() && useDocumentMetaStore(serialNum)) {
_gidToLidChangeHandler.notifyPut(token, docId.getGlobalId(), putOp.getLid(), serialNum);
}
- std::shared_ptr<PutDoneContext> onWriteDone =
- createPutDoneContext(std::move(token), std::move(uncommitted),
- doc, putOp.getLid());
+ auto onWriteDone = createPutDoneContext(std::move(token), get_pending_lid_token(putOp), doc, putOp.getLid());
putSummary(serialNum, putOp.getLid(), doc, onWriteDone);
putAttributes(serialNum, putOp.getLid(), *doc, onWriteDone);
putIndexedFields(serialNum, putOp.getLid(), doc, onWriteDone);
}
if (docAlreadyExists && putOp.changedDbdId()) {
+ //TODO, better to have an else than an assert ?
assert(!putOp.getValidDbdId(_params._subDbId));
- internalRemove(std::move(token), _pendingLidsForCommit->produce(putOp.getPrevLid()), serialNum,
- putOp.getPrevLid(), IDestructorCallback::SP());
+ internalRemove(std::move(token), _pendingLidsForCommit->produce(putOp.getPrevLid()), serialNum, putOp.getPrevLid());
}
}
@@ -468,9 +408,8 @@ StoreOnlyFeedView::internalUpdate(FeedToken token, const UpdateOperation &updOp)
(void) updateOk;
_metaStore.commit(CommitParam(serialNum));
}
- auto uncommitted = get_pending_lid_token(updOp);
- auto onWriteDone = createUpdateDoneContext(std::move(token), std::move(uncommitted), updOp.getUpdate());
+ auto onWriteDone = createUpdateDoneContext(std::move(token), get_pending_lid_token(updOp), updOp.getUpdate());
UpdateScope updateScope(*_schema, upd);
updateAttributes(serialNum, lid, upd, onWriteDone, updateScope);
@@ -592,19 +531,18 @@ StoreOnlyFeedView::internalRemove(FeedToken token, const RemoveOperationWithDocI
rmOp.getSubDbId(), rmOp.getLid(), rmOp.getPrevSubDbId(), rmOp.getPrevLid(), _params._subDbId);
adjustMetaStore(rmOp, docId.getGlobalId(), docId);
- auto uncommitted = get_pending_lid_token(rmOp);
if (rmOp.getValidDbdId(_params._subDbId)) {
auto clearDoc = std::make_unique<Document>(*_docType, docId);
clearDoc->setRepo(*_repo);
- putSummary(serialNum, rmOp.getLid(), std::move(clearDoc), std::make_shared<SummaryPutDoneContext>(std::move(token), std::move(uncommitted)));
+ putSummary(serialNum, rmOp.getLid(), std::move(clearDoc), std::make_shared<SummaryPutDoneContext>(std::move(token), get_pending_lid_token(rmOp)));
}
if (rmOp.getValidPrevDbdId(_params._subDbId)) {
if (rmOp.changedDbdId()) {
+ //TODO Prefer else over assert ?
assert(!rmOp.getValidDbdId(_params._subDbId));
- internalRemove(std::move(token), _pendingLidsForCommit->produce(rmOp.getPrevLid()), serialNum,
- rmOp.getPrevLid(), IDestructorCallback::SP());
+ internalRemove(std::move(token), _pendingLidsForCommit->produce(rmOp.getPrevLid()), serialNum, rmOp.getPrevLid());
}
}
}
@@ -617,27 +555,21 @@ StoreOnlyFeedView::internalRemove(FeedToken token, const RemoveOperationWithGid
const SerialNum serialNum = rmOp.getSerialNum();
DocumentId dummy;
adjustMetaStore(rmOp, rmOp.getGlobalId(), dummy);
- auto uncommitted = _pendingLidsForCommit->produce(rmOp.getLid());
if (rmOp.getValidPrevDbdId(_params._subDbId)) {
if (rmOp.changedDbdId()) {
assert(!rmOp.getValidDbdId(_params._subDbId));
- internalRemove(std::move(token), _pendingLidsForCommit->produce(rmOp.getPrevLid()), serialNum,
- rmOp.getPrevLid(), IDestructorCallback::SP());
+ internalRemove(std::move(token), _pendingLidsForCommit->produce(rmOp.getPrevLid()), serialNum, rmOp.getPrevLid());
}
}
}
void
-StoreOnlyFeedView::internalRemove(FeedToken token, IPendingLidTracker::Token uncommitted, SerialNum serialNum,
- Lid lid,
- IDestructorCallback::SP moveDoneCtx)
+StoreOnlyFeedView::internalRemove(IDestructorCallback::SP token, IPendingLidTracker::Token uncommitted, SerialNum serialNum, Lid lid)
{
bool explicitReuseLid = _lidReuseDelayer.delayReuse(lid);
- std::shared_ptr<RemoveDoneContext> onWriteDone;
- onWriteDone = createRemoveDoneContext(std::move(token), std::move(uncommitted),_writeService.master(), _metaStore,
- (explicitReuseLid ? lid : 0u),
- std::move(moveDoneCtx));
+ auto onWriteDone = createRemoveDoneContext(std::move(token), std::move(uncommitted), _writeService.master(), _metaStore,
+ (explicitReuseLid ? lid : 0u));
removeSummary(serialNum, lid, onWriteDone);
removeAttributes(serialNum, lid, onWriteDone);
removeIndexedFields(serialNum, lid, onWriteDone);
@@ -786,15 +718,13 @@ StoreOnlyFeedView::handleMove(const MoveOperation &moveOp, IDestructorCallback::
if (moveOp.changedDbdId() && useDocumentMetaStore(serialNum)) {
_gidToLidChangeHandler.notifyPut(FeedToken(), docId.getGlobalId(), moveOp.getLid(), serialNum);
}
- std::shared_ptr<PutDoneContext> onWriteDone =
- createPutDoneContext(FeedToken(), _pendingLidsForCommit->produce(moveOp.getLid()),
- doc, moveOp.getLid(), doneCtx);
+ auto onWriteDone = createPutDoneContext(doneCtx, _pendingLidsForCommit->produce(moveOp.getLid()), doc, moveOp.getLid());
putSummary(serialNum, moveOp.getLid(), doc, onWriteDone);
putAttributes(serialNum, moveOp.getLid(), *doc, onWriteDone);
putIndexedFields(serialNum, moveOp.getLid(), doc, onWriteDone);
}
if (docAlreadyExists && moveOp.changedDbdId()) {
- internalRemove(FeedToken(), _pendingLidsForCommit->produce(moveOp.getPrevLid()), serialNum, moveOp.getPrevLid(), doneCtx);
+ internalRemove(std::move(doneCtx), _pendingLidsForCommit->produce(moveOp.getPrevLid()), serialNum, moveOp.getPrevLid());
}
}
diff --git a/searchcore/src/vespa/searchcore/proton/server/storeonlyfeedview.h b/searchcore/src/vespa/searchcore/proton/server/storeonlyfeedview.h
index 7f1876cbbdf..5a0257fda57 100644
--- a/searchcore/src/vespa/searchcore/proton/server/storeonlyfeedview.h
+++ b/searchcore/src/vespa/searchcore/proton/server/storeonlyfeedview.h
@@ -53,7 +53,7 @@ public:
using LidVector = LidVectorContext::LidVector;
using Document = document::Document;
using DocumentUpdate = document::DocumentUpdate;
- using OnWriteDoneType =const std::shared_ptr<vespalib::IDestructorCallback> &;
+ using OnWriteDoneType = const std::shared_ptr<vespalib::IDestructorCallback> &;
using OnForceCommitDoneType =const std::shared_ptr<ForceCommitContext> &;
using OnOperationDoneType = const std::shared_ptr<OperationDoneContext> &;
using OnPutDoneType = const std::shared_ptr<PutDoneContext> &;
@@ -66,6 +66,7 @@ public:
using DocumentSP = std::shared_ptr<Document>;
using DocumentUpdateSP = std::shared_ptr<DocumentUpdate>;
using LidReuseDelayer = documentmetastore::LidReuseDelayer;
+ using IDestructorCallbackSP = std::shared_ptr<vespalib::IDestructorCallback>;
using Lid = search::DocumentIdT;
@@ -180,8 +181,7 @@ private:
// returns the number of documents removed.
size_t removeDocuments(const RemoveDocumentsOperation &op, bool remove_index_and_attribute_fields);
- void internalRemove(FeedToken token, IPendingLidTracker::Token uncommitted, SerialNum serialNum,
- Lid lid, std::shared_ptr<vespalib::IDestructorCallback> moveDoneCtx);
+ void internalRemove(IDestructorCallbackSP token, IPendingLidTracker::Token uncommitted, SerialNum serialNum, Lid lid);
IPendingLidTracker::Token get_pending_lid_token(const DocumentOperation &op);
diff --git a/searchcore/src/vespa/searchcore/proton/server/updatedonecontext.cpp b/searchcore/src/vespa/searchcore/proton/server/updatedonecontext.cpp
index 3feaba15c00..8558c19f1ba 100644
--- a/searchcore/src/vespa/searchcore/proton/server/updatedonecontext.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/updatedonecontext.cpp
@@ -7,7 +7,7 @@ using document::Document;
namespace proton {
-UpdateDoneContext::UpdateDoneContext(FeedToken token, IPendingLidTracker::Token uncommitted, const document::DocumentUpdate::SP &upd)
+UpdateDoneContext::UpdateDoneContext(IDestructorCallback::SP token, IPendingLidTracker::Token uncommitted, const document::DocumentUpdate::SP &upd)
: OperationDoneContext(std::move(token)),
_uncommitted(std::move(uncommitted)),
_upd(upd),
diff --git a/searchcore/src/vespa/searchcore/proton/server/updatedonecontext.h b/searchcore/src/vespa/searchcore/proton/server/updatedonecontext.h
index 6dad929aa26..6ca0e118b5b 100644
--- a/searchcore/src/vespa/searchcore/proton/server/updatedonecontext.h
+++ b/searchcore/src/vespa/searchcore/proton/server/updatedonecontext.h
@@ -24,7 +24,7 @@ class UpdateDoneContext : public OperationDoneContext
document::DocumentUpdate::SP _upd;
std::shared_future<std::unique_ptr<const document::Document>> _doc;
public:
- UpdateDoneContext(FeedToken token, IPendingLidTracker::Token uncommitted, const document::DocumentUpdate::SP &upd);
+ UpdateDoneContext(IDestructorCallback::SP token, IPendingLidTracker::Token uncommitted, const document::DocumentUpdate::SP &upd);
~UpdateDoneContext() override;
const document::DocumentUpdate &getUpdate() { return *_upd; }
diff --git a/searchlib/src/apps/vespa-attribute-inspect/vespa-attribute-inspect.cpp b/searchlib/src/apps/vespa-attribute-inspect/vespa-attribute-inspect.cpp
index 6e5e3f75b5e..14584a46a04 100644
--- a/searchlib/src/apps/vespa-attribute-inspect/vespa-attribute-inspect.cpp
+++ b/searchlib/src/apps/vespa-attribute-inspect/vespa-attribute-inspect.cpp
@@ -113,7 +113,7 @@ LoadAttribute::Main()
bool doHuge = false;
int idx = 1;
- char opt;
+ int opt;
const char * arg;
bool optError = false;
while ((opt = GetOpt("pasf:h", arg, idx)) != -1) {
diff --git a/searchlib/src/apps/vespa-fileheader-inspect/vespa-fileheader-inspect.cpp b/searchlib/src/apps/vespa-fileheader-inspect/vespa-fileheader-inspect.cpp
index e512cfcdffb..d67946d29f1 100644
--- a/searchlib/src/apps/vespa-fileheader-inspect/vespa-fileheader-inspect.cpp
+++ b/searchlib/src/apps/vespa-fileheader-inspect/vespa-fileheader-inspect.cpp
@@ -57,7 +57,7 @@ Application::usage()
int
Application::parseOpts()
{
- char c = '?';
+ int c = '?';
const char *optArg = NULL;
int optInd = 0;
while ((c = GetOpt("d:f:qh", optArg, optInd)) != -1) {
diff --git a/searchlib/src/apps/vespa-ranking-expression-analyzer/vespa-ranking-expression-analyzer.cpp b/searchlib/src/apps/vespa-ranking-expression-analyzer/vespa-ranking-expression-analyzer.cpp
index f2a9ee2932f..3445d64c477 100644
--- a/searchlib/src/apps/vespa-ranking-expression-analyzer/vespa-ranking-expression-analyzer.cpp
+++ b/searchlib/src/apps/vespa-ranking-expression-analyzer/vespa-ranking-expression-analyzer.cpp
@@ -95,11 +95,11 @@ struct FunctionInfo {
if (node) {
auto lhs_symbol = as<Symbol>(node->lhs());
auto rhs_symbol = as<Symbol>(node->rhs());
- if (lhs_symbol && node->rhs().is_const()) {
- inputs[lhs_symbol->id()].cmp_with.push_back(node->rhs().get_const_value());
+ if (lhs_symbol && node->rhs().is_const_double()) {
+ inputs[lhs_symbol->id()].cmp_with.push_back(node->rhs().get_const_double_value());
}
- if (node->lhs().is_const() && rhs_symbol) {
- inputs[rhs_symbol->id()].cmp_with.push_back(node->lhs().get_const_value());
+ if (node->lhs().is_const_double() && rhs_symbol) {
+ inputs[rhs_symbol->id()].cmp_with.push_back(node->lhs().get_const_double_value());
}
}
}
@@ -108,7 +108,7 @@ struct FunctionInfo {
if (node) {
if (auto symbol = as<Symbol>(node->child())) {
for (size_t i = 0; i < node->num_entries(); ++i) {
- inputs[symbol->id()].cmp_with.push_back(node->get_entry(i).get_const_value());
+ inputs[symbol->id()].cmp_with.push_back(node->get_entry(i).get_const_double_value());
}
}
}
diff --git a/searchlib/src/tests/aggregator/perdocexpr.cpp b/searchlib/src/tests/aggregator/perdocexpr.cpp
index 039b3ae1fc0..71b6ffb78ee 100644
--- a/searchlib/src/tests/aggregator/perdocexpr.cpp
+++ b/searchlib/src/tests/aggregator/perdocexpr.cpp
@@ -319,7 +319,7 @@ TEST("testNegate") {
testNegate(FloatResultNode(67.0), FloatResultNode(-67.0));
char strnorm[4] = { 102, 111, 111, 0 };
- char strneg[4] = { -102, -111, -111, 0 };
+ char strneg[4] = { (char)-102, (char)-111, (char)-111, 0 };
testNegate(StringResultNode(strnorm), StringResultNode(strneg));
testNegate(RawResultNode(strnorm, 3), RawResultNode(strneg, 3));
}
diff --git a/searchlib/src/tests/attribute/benchmark/attributebenchmark.cpp b/searchlib/src/tests/attribute/benchmark/attributebenchmark.cpp
index fc87e07a4e9..bf5233fd809 100644
--- a/searchlib/src/tests/attribute/benchmark/attributebenchmark.cpp
+++ b/searchlib/src/tests/attribute/benchmark/attributebenchmark.cpp
@@ -486,7 +486,7 @@ AttributeBenchmark::Main()
dc._prefixSearch = false;
int idx = 1;
- char opt;
+ int opt;
const char * arg;
bool optError = false;
while ((opt = GetOpt("n:u:v:s:q:p:r:c:l:h:i:a:e:S:E:D:L:bRPtw", arg, idx)) != -1) {
diff --git a/searchlib/src/tests/bitvector/bitvectorbenchmark.cpp b/searchlib/src/tests/bitvector/bitvectorbenchmark.cpp
index 17300125ed4..dc0dc646d77 100644
--- a/searchlib/src/tests/bitvector/bitvectorbenchmark.cpp
+++ b/searchlib/src/tests/bitvector/bitvectorbenchmark.cpp
@@ -168,7 +168,7 @@ int BitVectorBenchmark::Main()
int idx = 1;
std::string operation;
size_t numBits(8*1000000);
- char opt;
+ int opt;
const char * arg;
bool optError = false;
while ((opt = GetOpt("n:t:", arg, idx)) != -1) {
diff --git a/searchlib/src/tests/diskindex/fieldwriter/fieldwriter_test.cpp b/searchlib/src/tests/diskindex/fieldwriter/fieldwriter_test.cpp
index 8755af9a133..ad0e3d88ad5 100644
--- a/searchlib/src/tests/diskindex/fieldwriter/fieldwriter_test.cpp
+++ b/searchlib/src/tests/diskindex/fieldwriter/fieldwriter_test.cpp
@@ -649,7 +649,7 @@ int
FieldWriterTest::Main()
{
int argi;
- char c;
+ int c;
const char *optArg;
if (_argc > 0) {
diff --git a/searchlib/src/tests/features/constant/constant_test.cpp b/searchlib/src/tests/features/constant/constant_test.cpp
index 9c8480c1da2..1ef985f9e36 100644
--- a/searchlib/src/tests/features/constant/constant_test.cpp
+++ b/searchlib/src/tests/features/constant/constant_test.cpp
@@ -1,4 +1,5 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
#include <vespa/vespalib/testkit/test_kit.h>
#include <iostream>
#include <vespa/searchlib/features/setup.h>
@@ -7,9 +8,11 @@
#include <vespa/searchlib/fef/test/indexenvironment.h>
#include <vespa/eval/eval/function.h>
#include <vespa/eval/eval/simple_value.h>
+#include <vespa/eval/eval/node_types.h>
#include <vespa/eval/eval/tensor_spec.h>
#include <vespa/eval/eval/value.h>
#include <vespa/eval/eval/test/value_compare.h>
+#include <vespa/vespalib/util/stringfmt.h>
using search::feature_t;
using namespace search::fef;
@@ -19,9 +22,11 @@ using namespace search::features;
using vespalib::eval::DoubleValue;
using vespalib::eval::Function;
using vespalib::eval::SimpleValue;
+using vespalib::eval::NodeTypes;
using vespalib::eval::TensorSpec;
using vespalib::eval::Value;
using vespalib::eval::ValueType;
+using vespalib::make_string_short::fmt;
namespace
{
@@ -68,12 +73,18 @@ struct ExecFixture
std::move(type),
std::move(tensor));
}
-
void addDouble(const vespalib::string &name, const double value) {
test.getIndexEnv().addConstantValue(name,
ValueType::double_type(),
std::make_unique<DoubleValue>(value));
}
+ void addTypeValue(const vespalib::string &name, const vespalib::string &type, const vespalib::string &value) {
+ auto &props = test.getIndexEnv().getProperties();
+ auto type_prop = fmt("constant(%s).type", name.c_str());
+ auto value_prop = fmt("constant(%s).value", name.c_str());
+ props.add(type_prop, type);
+ props.add(value_prop, value);
+ }
};
TEST_F("require that missing constant is detected",
@@ -108,5 +119,38 @@ TEST_F("require that existing double constant is detected",
EXPECT_EQUAL(42.0, f.executeDouble());
}
+//-----------------------------------------------------------------------------
+
+TEST_F("require that constants can be functional", ExecFixture("constant(foo)")) {
+ f.addTypeValue("foo", "tensor(x{})", "tensor(x{}):{a:3,b:5,c:7}");
+ EXPECT_TRUE(f.setup());
+ auto expect = make_tensor(TensorSpec("tensor(x{})")
+ .add({{"x","b"}}, 5)
+ .add({{"x","c"}}, 7)
+ .add({{"x","a"}}, 3));
+ EXPECT_EQUAL(*expect, f.executeTensor());
+}
+
+TEST_F("require that functional constant type must match the expression result", ExecFixture("constant(foo)")) {
+ f.addTypeValue("foo", "tensor<float>(x{})", "tensor(x{}):{a:3,b:5,c:7}");
+ EXPECT_TRUE(!f.setup());
+}
+
+TEST_F("require that functional constant must parse without errors", ExecFixture("constant(foo)")) {
+ f.addTypeValue("foo", "double", "this is parse error");
+ EXPECT_TRUE(!f.setup());
+}
+
+TEST_F("require that non-const functional constant is not allowed", ExecFixture("constant(foo)")) {
+ f.addTypeValue("foo", "tensor(x{})", "tensor(x{}):{a:a,b:5,c:7}");
+ EXPECT_TRUE(!f.setup());
+}
+
+TEST_F("require that functional constant must have non-error type", ExecFixture("constant(foo)")) {
+ f.addTypeValue("foo", "error", "impossible to create value with error type");
+ EXPECT_TRUE(!f.setup());
+}
+
+//-----------------------------------------------------------------------------
TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchlib/src/tests/features/featurebenchmark.cpp b/searchlib/src/tests/features/featurebenchmark.cpp
index 6e1c5b1487c..72f693e72f4 100644
--- a/searchlib/src/tests/features/featurebenchmark.cpp
+++ b/searchlib/src/tests/features/featurebenchmark.cpp
@@ -597,7 +597,7 @@ Benchmark::Main()
setup_search_features(_factory);
int idx = 1;
- char opt;
+ int opt;
const char * arg;
bool optError = false;
vespalib::string file;
diff --git a/searchlib/src/tests/groupingengine/groupingengine_test.cpp b/searchlib/src/tests/groupingengine/groupingengine_test.cpp
index d0e2d749d08..81c4a427b4f 100644
--- a/searchlib/src/tests/groupingengine/groupingengine_test.cpp
+++ b/searchlib/src/tests/groupingengine/groupingengine_test.cpp
@@ -311,7 +311,7 @@ Test::testAggregationSimple()
ctx.add(FloatAttrBuilder("float").add(3).add(7).add(15).sp());
ctx.add(StringAttrBuilder("string").add("3").add("7").add("15").sp());
- char strsum[3] = {-101, '5', 0};
+ char strsum[3] = {(char)-101, '5', 0};
testAggregationSimpleSum(ctx, SumAggregationResult(), Int64ResultNode(25), FloatResultNode(25), StringResultNode(strsum));
testAggregationSimpleSum(ctx, MinAggregationResult(), Int64ResultNode(3), FloatResultNode(3), StringResultNode("15"));
testAggregationSimpleSum(ctx, MaxAggregationResult(), Int64ResultNode(15), FloatResultNode(15), StringResultNode("7"));
diff --git a/searchlib/src/tests/postinglistbm/postinglistbm.cpp b/searchlib/src/tests/postinglistbm/postinglistbm.cpp
index 695710873c4..7fa924f7002 100644
--- a/searchlib/src/tests/postinglistbm/postinglistbm.cpp
+++ b/searchlib/src/tests/postinglistbm/postinglistbm.cpp
@@ -110,7 +110,7 @@ int
PostingListBM::Main()
{
int argi;
- char c;
+ int c;
const char *optArg;
argi = 1;
diff --git a/searchlib/src/tests/tensor/distance_functions/distance_functions_test.cpp b/searchlib/src/tests/tensor/distance_functions/distance_functions_test.cpp
index 701f4c91ff2..6e88cf5a115 100644
--- a/searchlib/src/tests/tensor/distance_functions/distance_functions_test.cpp
+++ b/searchlib/src/tests/tensor/distance_functions/distance_functions_test.cpp
@@ -28,10 +28,15 @@ void verify_geo_miles(const DistanceFunction *dist_fun,
double raw_score = dist_fun->to_rawscore(abstract_distance);
double km = ((1.0/raw_score)-1.0);
double d_miles = km / 1.609344;
- EXPECT_GE(d_miles, exp_miles*0.99);
- EXPECT_LE(d_miles, exp_miles*1.01);
- double threshold = dist_fun->convert_threshold(km);
- EXPECT_DOUBLE_EQ(threshold, abstract_distance);
+ if (exp_miles != 0.0) {
+ EXPECT_GE(d_miles, exp_miles*0.99);
+ EXPECT_LE(d_miles, exp_miles*1.01);
+ double threshold = dist_fun->convert_threshold(km);
+ EXPECT_DOUBLE_EQ(threshold, abstract_distance);
+ } else {
+ EXPECT_LE(d_miles, 7e-13);
+ EXPECT_LE(abstract_distance, 6e-33);
+ }
}
diff --git a/searchlib/src/tests/transactionlogstress/translogstress.cpp b/searchlib/src/tests/transactionlogstress/translogstress.cpp
index 5792da7aa18..96772a0ee00 100644
--- a/searchlib/src/tests/transactionlogstress/translogstress.cpp
+++ b/searchlib/src/tests/transactionlogstress/translogstress.cpp
@@ -642,7 +642,7 @@ TransLogStress::Main()
vespalib::duration sleepTime = 4s;
int idx = 1;
- char opt;
+ int opt;
const char * arg;
bool optError = false;
while ((opt = GetOpt("d:p:t:f:s:v:c:e:g:i:a:b:h", arg, idx)) != -1) {
diff --git a/searchlib/src/vespa/searchlib/attribute/enumattribute.h b/searchlib/src/vespa/searchlib/attribute/enumattribute.h
index 552528894fa..443433757b3 100644
--- a/searchlib/src/vespa/searchlib/attribute/enumattribute.h
+++ b/searchlib/src/vespa/searchlib/attribute/enumattribute.h
@@ -6,7 +6,6 @@
#include "loadedenumvalue.h"
#include "enumstore.h"
#include "no_loaded_vector.h"
-#include <set>
namespace search {
@@ -24,25 +23,18 @@ class EnumAttribute : public B
protected:
using BaseClass = B;
using Change = typename B::Change;
- using ChangeDataType = typename B::Change::DataType;
using ChangeVector = typename B::ChangeVector;
- using ChangeVectorIterator = typename B::ChangeVector::const_iterator;
using DocId = typename B::DocId;
using EnumEntryType = typename B::EnumEntryType; // Template argument for enum store
using EnumHandle = typename B::EnumHandle;
- using EnumModifier = typename B::EnumModifier;
using ValueModifier = typename B::ValueModifier;
public:
- using EnumIndexVector = typename B::EnumIndexVector;
using EnumVector = typename B::EnumVector;
- using LoadedValueType = typename B::LoadedValueType;
using LoadedVector = typename B::LoadedVector;
protected:
using generation_t = typename B::generation_t;
- using UniqueSet = std::set<ChangeDataType>;
- using LoadedEnumAttributeVector = attribute::LoadedEnumAttributeVector;
using B::getGenerationHolder;
using B::getStatus;
@@ -52,7 +44,6 @@ public:
protected:
using EnumIndex = IEnumStore::Index;
- using EnumIndexRemapper = IEnumStore::EnumIndexRemapper;
EnumStore _enumStore;
@@ -71,7 +62,7 @@ protected:
* Perform compaction if necessary and insert the new unique values into the EnumStore.
*/
void insertNewUniqueValues(EnumStoreBatchUpdater& updater);
- virtual void considerAttributeChange(const Change & c, UniqueSet & newUniques) = 0;
+ virtual void considerAttributeChange(const Change & c, EnumStoreBatchUpdater & inserter) = 0;
vespalib::MemoryUsage getEnumStoreValuesMemoryUsage() const override;
vespalib::AddressSpace getEnumStoreAddressSpaceUsage() const override;
public:
diff --git a/searchlib/src/vespa/searchlib/attribute/enumattribute.hpp b/searchlib/src/vespa/searchlib/attribute/enumattribute.hpp
index 66ea3058a60..fd576b3a9ba 100644
--- a/searchlib/src/vespa/searchlib/attribute/enumattribute.hpp
+++ b/searchlib/src/vespa/searchlib/attribute/enumattribute.hpp
@@ -28,7 +28,7 @@ void EnumAttribute<B>::load_enum_store(LoadedVector& loaded)
auto loader = _enumStore.make_non_enumerated_loader();
if (!loaded.empty()) {
auto value = loaded.read();
- LoadedValueType prev = value.getValue();
+ typename B::LoadedValueType prev = value.getValue();
uint32_t prevRefCount(0);
EnumIndex index = loader.insert(value.getValue(), value._pidx.ref());
for (size_t i(0), m(loaded.size()); i < m; ++i, loaded.next()) {
@@ -62,16 +62,9 @@ template <typename B>
void
EnumAttribute<B>::insertNewUniqueValues(EnumStoreBatchUpdater& updater)
{
- UniqueSet newUniques;
-
- // find new unique strings
+ // find and insert new unique strings
for (const auto & data : this->_changes) {
- considerAttributeChange(data, newUniques);
- }
-
- // insert new unique values in EnumStore
- for (const auto & data : newUniques) {
- updater.insert(data.raw());
+ considerAttributeChange(data, updater);
}
}
diff --git a/searchlib/src/vespa/searchlib/attribute/enumstore.cpp b/searchlib/src/vespa/searchlib/attribute/enumstore.cpp
index b07515a675e..2b7065e8705 100644
--- a/searchlib/src/vespa/searchlib/attribute/enumstore.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/enumstore.cpp
@@ -29,13 +29,7 @@ EnumStoreT<const char*>::load_unique_value(const void* src, size_t available, In
if (available < sz) {
return -1;
}
- Index prev_idx = idx;
idx = _store.get_allocator().allocate(value);
-
- if (prev_idx.valid()) {
- auto cmp = make_comparator(value);
- assert(cmp.less(prev_idx, Index()));
- }
return sz;
}
diff --git a/searchlib/src/vespa/searchlib/attribute/multienumattribute.h b/searchlib/src/vespa/searchlib/attribute/multienumattribute.h
index fb16005c300..056b1832169 100644
--- a/searchlib/src/vespa/searchlib/attribute/multienumattribute.h
+++ b/searchlib/src/vespa/searchlib/attribute/multienumattribute.h
@@ -43,21 +43,15 @@ protected:
using Change = typename B::BaseClass::Change;
using DocId = typename B::BaseClass::DocId;
using EnumHandle = typename B::BaseClass::EnumHandle;
- using EnumModifier = typename B::BaseClass::EnumModifier;
using LoadedVector = typename B::BaseClass::LoadedVector;
- using UniqueSet = typename B::UniqueSet;
using ValueModifier = typename B::BaseClass::ValueModifier;
using WeightedEnum = typename B::BaseClass::WeightedEnum;
using generation_t = typename B::BaseClass::generation_t;
using DocIndices = typename MultiValueAttribute<B, M>::DocumentValues;
using EnumIndex = IEnumStore::Index;
- using EnumIndexRemapper = IEnumStore::EnumIndexRemapper;
- using EnumIndexVector = IEnumStore::IndexVector;
using EnumStoreBatchUpdater = typename B::EnumStoreBatchUpdater;
using EnumVector = IEnumStore::EnumVector;
- using LoadedEnumAttribute = attribute::LoadedEnumAttribute;
- using LoadedEnumAttributeVector = attribute::LoadedEnumAttributeVector;
using WeightedIndex = typename MultiValueAttribute<B, M>::MultiValueType;
using WeightedIndexArrayRef = typename MultiValueAttribute<B, M>::MultiValueArrayRef;
using WeightedIndexVector = typename MultiValueAttribute<B, M>::ValueVector;
@@ -66,7 +60,7 @@ protected:
bool extractChangeData(const Change & c, EnumIndex & idx) override; // EnumIndex is ValueType. Use EnumStore
// from EnumAttribute
- void considerAttributeChange(const Change & c, UniqueSet & newUniques) override; // same for both string and numeric
+ void considerAttributeChange(const Change & c, EnumStoreBatchUpdater & inserter) override; // same for both string and numeric
virtual void applyValueChanges(const DocIndices& docIndices, EnumStoreBatchUpdater& updater);
diff --git a/searchlib/src/vespa/searchlib/attribute/multienumattribute.hpp b/searchlib/src/vespa/searchlib/attribute/multienumattribute.hpp
index 6646446c3a7..8475451ba60 100644
--- a/searchlib/src/vespa/searchlib/attribute/multienumattribute.hpp
+++ b/searchlib/src/vespa/searchlib/attribute/multienumattribute.hpp
@@ -34,7 +34,7 @@ MultiValueEnumAttribute<B, M>::extractChangeData(const Change & c, EnumIndex & i
template <typename B, typename M>
void
-MultiValueEnumAttribute<B, M>::considerAttributeChange(const Change & c, UniqueSet & newUniques)
+MultiValueEnumAttribute<B, M>::considerAttributeChange(const Change & c, EnumStoreBatchUpdater & inserter)
{
if (c._type == ChangeBase::APPEND ||
(this->getInternalCollectionType().createIfNonExistant() &&
@@ -42,7 +42,7 @@ MultiValueEnumAttribute<B, M>::considerAttributeChange(const Change & c, UniqueS
{
EnumIndex idx;
if (!this->_enumStore.find_index(c._data.raw(), idx)) {
- newUniques.insert(c._data);
+ c._enumScratchPad = inserter.insert(c._data.raw()).ref();
} else {
c._enumScratchPad = idx.ref();
}
diff --git a/searchlib/src/vespa/searchlib/attribute/multinumericenumattribute.h b/searchlib/src/vespa/searchlib/attribute/multinumericenumattribute.h
index 5c3d2e37657..e6b1103d9f4 100644
--- a/searchlib/src/vespa/searchlib/attribute/multinumericenumattribute.h
+++ b/searchlib/src/vespa/searchlib/attribute/multinumericenumattribute.h
@@ -25,10 +25,7 @@ protected:
using DocId = typename B::BaseClass::DocId;
using EnumHandle = typename B::BaseClass::EnumHandle;
using EnumIndex = IEnumStore::Index;
- using EnumIndexVector = IEnumStore::IndexVector;
using EnumVector = IEnumStore::EnumVector;
- using LoadedEnumAttribute = attribute::LoadedEnumAttribute;
- using LoadedEnumAttributeVector = attribute::LoadedEnumAttributeVector;
using LoadedNumericValueT = typename B::BaseClass::LoadedNumericValueT;
using LoadedVector = typename B::BaseClass::LoadedVector;
using LoadedVectorR = SequentialReadModifyWriteVector<LoadedNumericValueT>;
diff --git a/searchlib/src/vespa/searchlib/attribute/multinumericpostattribute.h b/searchlib/src/vespa/searchlib/attribute/multinumericpostattribute.h
index d8d7e7f902c..e6ce775deb4 100644
--- a/searchlib/src/vespa/searchlib/attribute/multinumericpostattribute.h
+++ b/searchlib/src/vespa/searchlib/attribute/multinumericpostattribute.h
@@ -61,7 +61,6 @@ private:
using DocId = typename B::DocId;
using DocIndices = typename MultiValueNumericEnumAttribute<B, M>::DocIndices;
using FrozenDictionary = typename Dictionary::FrozenView;
- using LoadedEnumAttributeVector = attribute::LoadedEnumAttributeVector;
using Posting = typename PostingParent::Posting;
using PostingList = typename PostingParent::PostingList;
using PostingMap = typename PostingParent::PostingMap;
diff --git a/searchlib/src/vespa/searchlib/attribute/multistringattribute.h b/searchlib/src/vespa/searchlib/attribute/multistringattribute.h
index ce1c36681f8..c4fe5d26981 100644
--- a/searchlib/src/vespa/searchlib/attribute/multistringattribute.h
+++ b/searchlib/src/vespa/searchlib/attribute/multistringattribute.h
@@ -26,7 +26,6 @@ template <typename B, typename M>
class MultiValueStringAttributeT : public MultiValueEnumAttribute<B, M> {
protected:
using DocIndices = typename MultiValueAttribute<B, M>::DocumentValues;
- using EnumHintSearchContext = attribute::EnumHintSearchContext;
using EnumIndex = typename MultiValueAttribute<B, M>::ValueType;
using EnumStore = typename B::EnumStore;
using MultiValueMapping = typename MultiValueAttribute<B, M>::MultiValueMapping;
@@ -150,7 +149,7 @@ public:
template <typename BT>
class StringTemplSearchContext : public BT,
- public EnumHintSearchContext
+ public attribute::EnumHintSearchContext
{
using BT::queryTerm;
using AttrType = MultiValueStringAttributeT<B, M>;
diff --git a/searchlib/src/vespa/searchlib/attribute/multistringpostattribute.h b/searchlib/src/vespa/searchlib/attribute/multistringpostattribute.h
index f80d85dadee..a132e2eaff9 100644
--- a/searchlib/src/vespa/searchlib/attribute/multistringpostattribute.h
+++ b/searchlib/src/vespa/searchlib/attribute/multistringpostattribute.h
@@ -50,7 +50,6 @@ private:
using ComparatorType = typename EnumStore::ComparatorType;
using DocId = typename MultiValueStringAttributeT<B, T>::DocId;
using DocIndices = typename MultiValueStringAttributeT<B, T>::DocIndices;
- using LoadedEnumAttributeVector = attribute::LoadedEnumAttributeVector;
using Posting = typename PostingParent::Posting;
using PostingMap = typename PostingParent::PostingMap;
using QueryTermSimpleUP = AttributeVector::QueryTermSimpleUP;
diff --git a/searchlib/src/vespa/searchlib/attribute/numericbase.cpp b/searchlib/src/vespa/searchlib/attribute/numericbase.cpp
index 3a3973a6684..c94ca313aca 100644
--- a/searchlib/src/vespa/searchlib/attribute/numericbase.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/numericbase.cpp
@@ -10,8 +10,6 @@ namespace search {
IMPLEMENT_IDENTIFIABLE_ABSTRACT(NumericAttribute, AttributeVector);
-using attribute::LoadedEnumAttributeVector;
-
void
NumericAttribute::load_enumerated_data(ReaderBase&,
enumstore::EnumeratedPostingsLoader&,
diff --git a/searchlib/src/vespa/searchlib/attribute/numericbase.h b/searchlib/src/vespa/searchlib/attribute/numericbase.h
index 67be41fe98a..d56daed7413 100644
--- a/searchlib/src/vespa/searchlib/attribute/numericbase.h
+++ b/searchlib/src/vespa/searchlib/attribute/numericbase.h
@@ -16,7 +16,6 @@ class NumericAttribute : public AttributeVector
{
protected:
typedef IEnumStore::Index EnumIndex;
- typedef IEnumStore::IndexVector EnumIndexVector;
typedef IEnumStore::EnumVector EnumVector;
NumericAttribute(const vespalib::string & name, const AttributeVector::Config & cfg)
diff --git a/searchlib/src/vespa/searchlib/attribute/postinglistattribute.h b/searchlib/src/vespa/searchlib/attribute/postinglistattribute.h
index 8cd9d1d6bbd..9c00ed9c07e 100644
--- a/searchlib/src/vespa/searchlib/attribute/postinglistattribute.h
+++ b/searchlib/src/vespa/searchlib/attribute/postinglistattribute.h
@@ -43,7 +43,6 @@ protected:
using DocId = AttributeVector::DocId;
using EntryRef = vespalib::datastore::EntryRef;
using EnumIndex = IEnumStore::Index;
- using LoadedEnumAttributeVector = attribute::LoadedEnumAttributeVector;
using PostingList = typename AggregationTraits::PostingList;
using PostingMap = std::map<EnumPostingPair, PostingChange<P> >;
@@ -87,7 +86,6 @@ public:
using EnumIndex = IEnumStore::Index;
using EnumStore = EnumStoreType;
using ComparatorType = typename EnumStore::ComparatorType;
- using LoadedEnumAttributeVector = attribute::LoadedEnumAttributeVector;
using PostingList = typename Parent::PostingList;
using PostingMap = typename Parent::PostingMap;
diff --git a/searchlib/src/vespa/searchlib/attribute/singleenumattribute.h b/searchlib/src/vespa/searchlib/attribute/singleenumattribute.h
index cb3aa1f639f..065da5922a2 100644
--- a/searchlib/src/vespa/searchlib/attribute/singleenumattribute.h
+++ b/searchlib/src/vespa/searchlib/attribute/singleenumattribute.h
@@ -45,16 +45,10 @@ class SingleValueEnumAttribute : public B, public SingleValueEnumAttributeBase {
protected:
using Change = typename B::Change;
using ChangeVector = typename B::ChangeVector;
- using ChangeVectorIterator = typename B::ChangeVector::const_iterator;
using DocId = typename B::DocId;
- using EnumIndexRemapper = IEnumStore::EnumIndexRemapper;
- using EnumModifier = typename B::EnumModifier;
using EnumStore = typename B::EnumStore;
using EnumStoreBatchUpdater = typename EnumStore::BatchUpdater;
- using LoadedEnumAttribute = attribute::LoadedEnumAttribute;
- using LoadedEnumAttributeVector = attribute::LoadedEnumAttributeVector;
using LoadedVector = typename B::LoadedVector;
- using UniqueSet = typename B::UniqueSet;
using ValueModifier = typename B::ValueModifier;
using WeightedEnum = typename B::WeightedEnum;
using generation_t = typename B::generation_t;
@@ -62,16 +56,16 @@ protected:
using B::getGenerationHolder;
private:
- void considerUpdateAttributeChange(const Change & c, UniqueSet & newUniques);
+ void considerUpdateAttributeChange(const Change & c, EnumStoreBatchUpdater & inserter);
void applyUpdateValueChange(const Change& c, EnumStoreBatchUpdater& updater);
protected:
// from EnumAttribute
- void considerAttributeChange(const Change & c, UniqueSet & newUniques) override;
+ void considerAttributeChange(const Change & c, EnumStoreBatchUpdater & inserter) override;
// implemented by single value numeric enum attribute.
virtual void considerUpdateAttributeChange(const Change & c) { (void) c; }
- virtual void considerArithmeticAttributeChange(const Change & c, UniqueSet & newUniques) { (void) c; (void) newUniques; }
+ virtual void considerArithmeticAttributeChange(const Change & c, EnumStoreBatchUpdater & inserter) { (void) c; (void) inserter; }
virtual void applyValueChanges(EnumStoreBatchUpdater& updater) ;
virtual void applyArithmeticValueChange(const Change& c, EnumStoreBatchUpdater& updater) {
diff --git a/searchlib/src/vespa/searchlib/attribute/singleenumattribute.hpp b/searchlib/src/vespa/searchlib/attribute/singleenumattribute.hpp
index 39a12cb57d5..b39bdeb3b00 100644
--- a/searchlib/src/vespa/searchlib/attribute/singleenumattribute.hpp
+++ b/searchlib/src/vespa/searchlib/attribute/singleenumattribute.hpp
@@ -135,26 +135,28 @@ SingleValueEnumAttribute<B>::onUpdateStat()
template <typename B>
void
-SingleValueEnumAttribute<B>::considerUpdateAttributeChange(const Change & c, UniqueSet & newUniques)
+SingleValueEnumAttribute<B>::considerUpdateAttributeChange(const Change & c, EnumStoreBatchUpdater & inserter)
{
EnumIndex idx;
if (!this->_enumStore.find_index(c._data.raw(), idx)) {
- newUniques.insert(c._data);
+ c._enumScratchPad = inserter.insert(c._data.raw()).ref();
+ } else {
+ c._enumScratchPad = idx.ref();
}
considerUpdateAttributeChange(c); // for numeric
}
template <typename B>
void
-SingleValueEnumAttribute<B>::considerAttributeChange(const Change & c, UniqueSet & newUniques)
+SingleValueEnumAttribute<B>::considerAttributeChange(const Change & c, EnumStoreBatchUpdater & inserter)
{
if (c._type == ChangeBase::UPDATE) {
- considerUpdateAttributeChange(c, newUniques);
+ considerUpdateAttributeChange(c, inserter);
} else if (c._type >= ChangeBase::ADD && c._type <= ChangeBase::DIV) {
- considerArithmeticAttributeChange(c, newUniques); // for numeric
+ considerArithmeticAttributeChange(c, inserter); // for numeric
} else if (c._type == ChangeBase::CLEARDOC) {
this->_defaultValue._doc = c._doc;
- considerUpdateAttributeChange(this->_defaultValue, newUniques);
+ considerUpdateAttributeChange(this->_defaultValue, inserter);
}
}
diff --git a/searchlib/src/vespa/searchlib/attribute/singlenumericenumattribute.h b/searchlib/src/vespa/searchlib/attribute/singlenumericenumattribute.h
index 46574871af6..192af9373b6 100644
--- a/searchlib/src/vespa/searchlib/attribute/singlenumericenumattribute.h
+++ b/searchlib/src/vespa/searchlib/attribute/singlenumericenumattribute.h
@@ -22,16 +22,12 @@ protected:
using DocId = typename B::BaseClass::DocId;
using EnumHandle = typename B::BaseClass::EnumHandle;
using EnumIndex = typename SingleValueEnumAttributeBase::EnumIndex;
- using EnumIndexVector = IEnumStore::IndexVector;
using EnumStore = typename SingleValueEnumAttribute<B>::EnumStore;
using EnumStoreBatchUpdater = typename EnumStore::BatchUpdater;
using EnumVector = IEnumStore::EnumVector;
- using LoadedEnumAttribute = attribute::LoadedEnumAttribute;
- using LoadedEnumAttributeVector = attribute::LoadedEnumAttributeVector;
using LoadedNumericValueT = typename B::BaseClass::LoadedNumericValueT;
using LoadedVector = typename B::BaseClass::LoadedVector;
using QueryTermSimpleUP = AttributeVector::QueryTermSimpleUP;
- using UniqueSet = typename SingleValueEnumAttribute<B>::UniqueSet;
using Weighted = typename B::BaseClass::Weighted;
using WeightedFloat = typename B::BaseClass::WeightedFloat;
using WeightedInt = typename B::BaseClass::WeightedInt;
@@ -46,7 +42,7 @@ protected:
// from SingleValueEnumAttribute
void considerUpdateAttributeChange(const Change & c) override;
- void considerArithmeticAttributeChange(const Change & c, UniqueSet & newUniques) override;
+ void considerArithmeticAttributeChange(const Change & c, EnumStoreBatchUpdater & inserter) override;
void applyArithmeticValueChange(const Change& c, EnumStoreBatchUpdater& updater) override;
/*
diff --git a/searchlib/src/vespa/searchlib/attribute/singlenumericenumattribute.hpp b/searchlib/src/vespa/searchlib/attribute/singlenumericenumattribute.hpp
index dc1a6b8f278..096e2146e02 100644
--- a/searchlib/src/vespa/searchlib/attribute/singlenumericenumattribute.hpp
+++ b/searchlib/src/vespa/searchlib/attribute/singlenumericenumattribute.hpp
@@ -24,7 +24,7 @@ SingleValueNumericEnumAttribute<B>::considerUpdateAttributeChange(const Change &
template <typename B>
void
-SingleValueNumericEnumAttribute<B>::considerArithmeticAttributeChange(const Change & c, UniqueSet & newUniques)
+SingleValueNumericEnumAttribute<B>::considerArithmeticAttributeChange(const Change & c, EnumStoreBatchUpdater & inserter)
{
T oldValue;
auto iter = _currDocValues.find(c._doc);
@@ -38,7 +38,9 @@ SingleValueNumericEnumAttribute<B>::considerArithmeticAttributeChange(const Chan
EnumIndex idx;
if (!this->_enumStore.find_index(newValue, idx)) {
- newUniques.insert(newValue);
+ c._enumScratchPad = inserter.insert(newValue).ref();
+ } else {
+ c._enumScratchPad = idx.ref();
}
_currDocValues[c._doc] = newValue;
@@ -158,9 +160,9 @@ SingleValueNumericEnumAttribute<B>::getSearch(QueryTermSimple::UP qTerm,
(void) params;
QueryTermSimple::RangeResult<T> res = qTerm->getRange<T>();
if (res.isEqual()) {
- return AttributeVector::SearchContext::UP (new SingleSearchContext(std::move(qTerm), *this));
+ return std::make_unique<SingleSearchContext>(std::move(qTerm), *this);
} else {
- return AttributeVector::SearchContext::UP (new SingleSearchContext(std::move(qTerm), *this));
+ return std::make_unique<SingleSearchContext>(std::move(qTerm), *this);
}
}
diff --git a/searchlib/src/vespa/searchlib/attribute/singlenumericpostattribute.h b/searchlib/src/vespa/searchlib/attribute/singlenumericpostattribute.h
index 4e7f8040f7f..aed561076ed 100644
--- a/searchlib/src/vespa/searchlib/attribute/singlenumericpostattribute.h
+++ b/searchlib/src/vespa/searchlib/attribute/singlenumericpostattribute.h
@@ -43,7 +43,6 @@ private:
using ComparatorType = typename EnumStore::ComparatorType;
using DocId = typename B::BaseClass::DocId;
using EnumIndex = typename SingleValueEnumAttributeBase::EnumIndex;
- using LoadedEnumAttributeVector = attribute::LoadedEnumAttributeVector;
using PostingMap = typename PostingParent::PostingMap;
using QueryTermSimpleUP = AttributeVector::QueryTermSimpleUP;
using SelfType = SingleValueNumericPostingAttribute<B>;
diff --git a/searchlib/src/vespa/searchlib/attribute/singlestringattribute.h b/searchlib/src/vespa/searchlib/attribute/singlestringattribute.h
index 11d4911bf09..6e1957c0ab2 100644
--- a/searchlib/src/vespa/searchlib/attribute/singlestringattribute.h
+++ b/searchlib/src/vespa/searchlib/attribute/singlestringattribute.h
@@ -22,9 +22,7 @@ protected:
using ChangeVector = StringAttribute::ChangeVector;
using DocId = StringAttribute::DocId;
using EnumHandle = StringAttribute::EnumHandle;
- using EnumHintSearchContext = attribute::EnumHintSearchContext;
using EnumIndex = typename SingleValueEnumAttributeBase::EnumIndex;
- using EnumIndexVector = typename SingleValueEnumAttributeBase::EnumIndexVector;
using EnumStore = typename SingleValueEnumAttribute<B>::EnumStore;
using LoadedVector = StringAttribute::LoadedVector;
using QueryTermSimpleUP = AttributeVector::QueryTermSimpleUP;
@@ -103,7 +101,7 @@ public:
};
class StringTemplSearchContext : public StringSingleImplSearchContext,
- public EnumHintSearchContext
+ public attribute::EnumHintSearchContext
{
using AttrType = SingleValueStringAttributeT<B>;
using StringSingleImplSearchContext::queryTerm;
diff --git a/searchlib/src/vespa/searchlib/attribute/singlestringpostattribute.h b/searchlib/src/vespa/searchlib/attribute/singlestringpostattribute.h
index 748d5bc4567..46a84f966a1 100644
--- a/searchlib/src/vespa/searchlib/attribute/singlestringpostattribute.h
+++ b/searchlib/src/vespa/searchlib/attribute/singlestringpostattribute.h
@@ -37,7 +37,6 @@ private:
using ComparatorType = typename EnumStore::ComparatorType;
using DocId = typename SingleValueStringAttributeT<B>::DocId;
using EnumIndex = typename SingleValueStringAttributeT<B>::EnumIndex;
- using LoadedEnumAttributeVector = attribute::LoadedEnumAttributeVector;
using PostingMap = typename PostingParent::PostingMap;
using QueryTermSimpleUP = AttributeVector::QueryTermSimpleUP;
using SelfType = SingleValueStringPostingAttributeT<B>;
diff --git a/searchlib/src/vespa/searchlib/attribute/stringbase.cpp b/searchlib/src/vespa/searchlib/attribute/stringbase.cpp
index 4be86652541..c3d7ba778ba 100644
--- a/searchlib/src/vespa/searchlib/attribute/stringbase.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/stringbase.cpp
@@ -68,9 +68,6 @@ StringSearchHelper::isMatch(const char *src) const {
IMPLEMENT_IDENTIFIABLE_ABSTRACT(StringAttribute, AttributeVector);
-using attribute::LoadedEnumAttribute;
-using attribute::LoadedEnumAttributeVector;
-
class SortDataChar {
public:
SortDataChar() { }
diff --git a/searchlib/src/vespa/searchlib/attribute/stringbase.h b/searchlib/src/vespa/searchlib/attribute/stringbase.h
index b15dc597fe9..a136a9485cc 100644
--- a/searchlib/src/vespa/searchlib/attribute/stringbase.h
+++ b/searchlib/src/vespa/searchlib/attribute/stringbase.h
@@ -49,7 +49,6 @@ class StringAttribute : public AttributeVector
{
public:
using EnumIndex = IEnumStore::Index;
- using EnumIndexVector = IEnumStore::IndexVector;
using EnumVector = IEnumStore::EnumVector;
using LoadedValueType = const char*;
using LoadedVector = NoLoadedVector;
diff --git a/searchlib/src/vespa/searchlib/bitcompression/compression.cpp b/searchlib/src/vespa/searchlib/bitcompression/compression.cpp
index 21ce8dc6c06..62ae8854880 100644
--- a/searchlib/src/vespa/searchlib/bitcompression/compression.cpp
+++ b/searchlib/src/vespa/searchlib/bitcompression/compression.cpp
@@ -10,23 +10,6 @@
namespace search::bitcompression {
-uint8_t CodingTables::_log2Table[64_Ki];
-
-CodingTables tables; // Static initializer
-
-CodingTables::CodingTables()
-{
- unsigned int x;
- uint8_t log2Val;
-
- for (x = 0; x < 64_Ki; x++) {
- unsigned int val = x;
- for (log2Val = 0; (val >>= 1) != 0; log2Val++) {
- }
- _log2Table[x] = log2Val;
- }
-}
-
uint64_t CodingTables::_intMask64[65] =
{
(UINT64_C(1) << 0) - 1, (UINT64_C(1) << 1) - 1,
diff --git a/searchlib/src/vespa/searchlib/bitcompression/compression.h b/searchlib/src/vespa/searchlib/bitcompression/compression.h
index e6b171ec871..48b12c193ed 100644
--- a/searchlib/src/vespa/searchlib/bitcompression/compression.h
+++ b/searchlib/src/vespa/searchlib/bitcompression/compression.h
@@ -30,9 +30,6 @@ private:
int _bitOffset;
};
-// Use inline assembly for asmlog2 calculations
-#define DO_ASMLOG
-
/*
* The so-called rice2 code is very similar to the well known exp
* golomb code. One difference is that the first bits are inverted.
@@ -82,11 +79,8 @@ private:
class CodingTables
{
public:
- static uint8_t _log2Table[65536];
static uint64_t _intMask64[65];
static uint64_t _intMask64le[65];
-
- CodingTables();
};
#define UC64_DECODECONTEXT(prefix) \
@@ -933,8 +927,7 @@ template <>
inline uint64_t
EncodeContext64EBase<true>::bswap(uint64_t val)
{
- __asm__("bswap %0" : "=r" (val) : "0" (val));
- return val;
+ return __builtin_bswap64(val);
}
@@ -963,42 +956,13 @@ public:
static inline uint32_t
asmlog2(uint64_t x)
{
- uint64_t retVal;
-
-#if (defined(__x86_64__)) && defined(DO_ASMLOG)
- __asm("bsrq %1,%0" : "=r" (retVal) : "r" (x));
-
-#else
- register uint64_t lower = x;
- uint32_t upper32 = lower >> 32;
- if (upper32 != 0) {
- uint32_t upper16 = upper32 >> 16;
- if (upper16 != 0) {
- retVal = 48 + CodingTables::_log2Table[upper16];
- } else {
- retVal = 32 + CodingTables::_log2Table[upper32];
- }
- } else {
- uint32_t lower32 = static_cast<uint32_t>(x);
- uint32_t upper16 = lower32 >> 16;
-
- if (upper16 != 0) {
- retVal = 16 + CodingTables::_log2Table[upper16];
- } else {
- retVal = CodingTables::_log2Table[lower32];
- }
- }
-#endif
-
- return retVal;
+ return sizeof(uint64_t) * 8 - 1 - __builtin_clzl(x);
}
static inline uint64_t
ffsl(uint64_t x)
{
- uint64_t retVal;
- __asm("bsfq %1,%0" : "=r" (retVal) : "r" (x));
- return retVal;
+ return __builtin_ctzl(x);
}
/**
diff --git a/searchlib/src/vespa/searchlib/common/bitvectorcache.cpp b/searchlib/src/vespa/searchlib/common/bitvectorcache.cpp
index 6b873d8a7c1..81f01de0c33 100644
--- a/searchlib/src/vespa/searchlib/common/bitvectorcache.cpp
+++ b/searchlib/src/vespa/searchlib/common/bitvectorcache.cpp
@@ -145,7 +145,7 @@ BitVectorCache::populate(Key2Index & newKeys, CondensedBitVector & chunk, const
accum += percentage;
m.chunkId(0);
m.chunkIndex(index);
- LOG(info, "Populating bitvector %2d with feature %" PRIu64 " and %ld bits set. Cost is %8f = %2.2f%%, accumulated cost is %2.2f%%",
+ LOG(debug, "Populating bitvector %2d with feature %" PRIu64 " and %ld bits set. Cost is %8f = %2.2f%%, accumulated cost is %2.2f%%",
index, e.first, m.bitCount(), m.cost(), percentage, accum);
assert(m.isCached());
assert(newKeys[e.first].isCached());
diff --git a/searchlib/src/vespa/searchlib/features/constant_feature.cpp b/searchlib/src/vespa/searchlib/features/constant_feature.cpp
index 5eedb5834bf..9de4d351584 100644
--- a/searchlib/src/vespa/searchlib/features/constant_feature.cpp
+++ b/searchlib/src/vespa/searchlib/features/constant_feature.cpp
@@ -3,6 +3,8 @@
#include "constant_feature.h"
#include "valuefeature.h"
#include <vespa/searchlib/fef/featureexecutor.h>
+#include <vespa/searchlib/fef/properties.h>
+#include <vespa/eval/eval/function.h>
#include <vespa/eval/eval/value_cache/constant_value.h>
#include <vespa/vespalib/util/stash.h>
@@ -11,6 +13,10 @@ LOG_SETUP(".features.constant_feature");
using namespace search::fef;
+using vespalib::eval::ValueType;
+using vespalib::eval::Function;
+using vespalib::eval::SimpleConstantValue;
+
namespace search::features {
/**
@@ -62,13 +68,26 @@ ConstantBlueprint::setup(const IIndexEnvironment &env,
_key = params[0].getValue();
_value = env.getConstantValue(_key);
if (!_value) {
- fail("Constant '%s' not found", _key.c_str());
+ auto type_prop = env.getProperties().lookup(getName(), "type");
+ auto value_prop = env.getProperties().lookup(getName(), "value");
+ if ((type_prop.size() == 1) && (value_prop.size() == 1)) {
+ auto type = ValueType::from_spec(type_prop.get());
+ auto value = Function::parse(value_prop.get())->root().get_const_value();
+ if (!type.is_error() && value && (value->type() == type)) {
+ _value = std::make_unique<SimpleConstantValue>(std::move(value));
+ } else {
+ fail("Constant '%s' has invalid spec: type='%s', value='%s'",
+ _key.c_str(), type_prop.get().c_str(), value_prop.get().c_str());
+ }
+ } else {
+ fail("Constant '%s' not found", _key.c_str());
+ }
} else if (_value->type().is_error()) {
fail("Constant '%s' has invalid type", _key.c_str());
}
- FeatureType output_type = _value ?
- FeatureType::object(_value->type()) :
- FeatureType::number();
+ FeatureType output_type = _value
+ ? FeatureType::object(_value->type())
+ : FeatureType::number();
describeOutput("out", "The constant looked up in index environment using the given key.",
output_type);
return (_value && !_value->type().is_error());
diff --git a/searchlib/src/vespa/searchlib/features/onnx_feature.h b/searchlib/src/vespa/searchlib/features/onnx_feature.h
index 5a45b26f1f6..6a63e7276c2 100644
--- a/searchlib/src/vespa/searchlib/features/onnx_feature.h
+++ b/searchlib/src/vespa/searchlib/features/onnx_feature.h
@@ -20,7 +20,7 @@ public:
~OnnxBlueprint() override;
void visitDumpFeatures(const fef::IIndexEnvironment &, fef::IDumpFeatureVisitor &) const override {}
fef::Blueprint::UP createInstance() const override {
- return Blueprint::UP(new OnnxBlueprint());
+ return std::make_unique<OnnxBlueprint>();
}
fef::ParameterDescriptions getDescriptions() const override {
return fef::ParameterDescriptions().desc().string();
diff --git a/searchlib/src/vespa/searchlib/queryeval/predicate_search.cpp b/searchlib/src/vespa/searchlib/queryeval/predicate_search.cpp
index a49f44a2853..08056725389 100644
--- a/searchlib/src/vespa/searchlib/queryeval/predicate_search.cpp
+++ b/searchlib/src/vespa/searchlib/queryeval/predicate_search.cpp
@@ -19,6 +19,7 @@ namespace queryeval {
namespace {
+#ifdef __x86_64__
class SkipMinFeatureSSE2 : public SkipMinFeature
{
public:
@@ -83,13 +84,49 @@ SkipMinFeatureSSE2::next()
return -1;
}
}
+#else
+class SkipMinFeatureGeneric : public SkipMinFeature
+{
+ const uint8_t* _min_feature;
+ const uint8_t* _kv;
+ const uint32_t _sz;
+ uint32_t _cur;
+public:
+ SkipMinFeatureGeneric(const uint8_t* min_feature, const uint8_t* kv, size_t sz);
+ uint32_t next() override;
+};
+
+SkipMinFeatureGeneric::SkipMinFeatureGeneric(const uint8_t* min_feature, const uint8_t* kv, size_t sz)
+ : _min_feature(min_feature),
+ _kv(kv),
+ _sz(sz),
+ _cur(0)
+{
+}
+
+uint32_t
+SkipMinFeatureGeneric::next()
+{
+ while (_cur < _sz) {
+ if (_kv[_cur] >= _min_feature[_cur]) {
+ return _cur++;
+ }
+ ++_cur;
+ }
+ return -1;
+}
+#endif
}
SkipMinFeature::UP
SkipMinFeature::create(const uint8_t * min_feature, const uint8_t * kv, size_t sz)
{
- return UP(new SkipMinFeatureSSE2(min_feature, kv, sz));
+#ifdef __x86_64__
+ return std::make_unique<SkipMinFeatureSSE2>(min_feature, kv, sz);
+#else
+ return std::make_unique<SkipMinFeatureGeneric>(min_feature, kv, sz);
+#endif
}
PredicateSearch::PredicateSearch(const uint8_t * minFeatureVector,
diff --git a/searchsummary/src/vespa/searchsummary/docsummary/resultconfig.cpp b/searchsummary/src/vespa/searchsummary/docsummary/resultconfig.cpp
index f3834ef4a12..ead3a4a2f9d 100644
--- a/searchsummary/src/vespa/searchsummary/docsummary/resultconfig.cpp
+++ b/searchsummary/src/vespa/searchsummary/docsummary/resultconfig.cpp
@@ -164,6 +164,8 @@ ResultConfig::ReadConfig(const vespa::config::search::SummaryConfig &cfg, const
rc = resClass->AddConfigEntry(fieldname, RES_STRING);
} else if (strcmp(fieldtype, "data") == 0) {
rc = resClass->AddConfigEntry(fieldname, RES_DATA);
+ } else if (strcmp(fieldtype, "raw") == 0) {
+ rc = resClass->AddConfigEntry(fieldname, RES_DATA);
} else if (strcmp(fieldtype, "longstring") == 0) {
rc = resClass->AddConfigEntry(fieldname, RES_LONG_STRING);
} else if (strcmp(fieldtype, "longdata") == 0) {
diff --git a/service-monitor/src/main/java/com/yahoo/vespa/service/slobrok/SlobrokMonitor.java b/service-monitor/src/main/java/com/yahoo/vespa/service/slobrok/SlobrokMonitor.java
index 62f39084272..3ba505d0ab4 100644
--- a/service-monitor/src/main/java/com/yahoo/vespa/service/slobrok/SlobrokMonitor.java
+++ b/service-monitor/src/main/java/com/yahoo/vespa/service/slobrok/SlobrokMonitor.java
@@ -12,7 +12,6 @@ import com.yahoo.jrt.slobrok.api.Mirror;
import com.yahoo.jrt.slobrok.api.SlobrokList;
import java.util.ArrayList;
-import java.util.Arrays;
import java.util.List;
import java.util.Objects;
@@ -26,8 +25,12 @@ public class SlobrokMonitor implements AutoCloseable {
private final SlobrokList slobrokList;
private final Mirror mirror;
- SlobrokMonitor() {
- this(new SlobrokList());
+ SlobrokMonitor(Supervisor supervisor) {
+ this(new SlobrokList(), supervisor);
+ }
+
+ private SlobrokMonitor(SlobrokList slobrokList, Supervisor supervisor) {
+ this(slobrokList, new Mirror(supervisor, slobrokList));
}
// Package-private for testing.
@@ -36,10 +39,6 @@ public class SlobrokMonitor implements AutoCloseable {
this.mirror = mirror;
}
- private SlobrokMonitor(SlobrokList slobrokList) {
- this(slobrokList, new Mirror(new Supervisor(new Transport("slobrok-monitor")), slobrokList));
- }
-
void updateSlobrokList(ApplicationInfo application) {
List<String> slobrokSpecs = getSlobrokSpecs(application);
slobrokList.setup(slobrokSpecs.toArray(new String[0]));
diff --git a/service-monitor/src/main/java/com/yahoo/vespa/service/slobrok/SlobrokMonitorManagerImpl.java b/service-monitor/src/main/java/com/yahoo/vespa/service/slobrok/SlobrokMonitorManagerImpl.java
index 2cc3359d449..0c9148ad834 100644
--- a/service-monitor/src/main/java/com/yahoo/vespa/service/slobrok/SlobrokMonitorManagerImpl.java
+++ b/service-monitor/src/main/java/com/yahoo/vespa/service/slobrok/SlobrokMonitorManagerImpl.java
@@ -2,8 +2,11 @@
package com.yahoo.vespa.service.slobrok;
import com.google.inject.Inject;
+import com.yahoo.component.AbstractComponent;
import com.yahoo.config.model.api.ApplicationInfo;
import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.jrt.Supervisor;
+import com.yahoo.jrt.Transport;
import com.yahoo.jrt.slobrok.api.Mirror;
import java.util.logging.Level;
import com.yahoo.vespa.applicationmodel.ClusterId;
@@ -21,7 +24,7 @@ import java.util.Optional;
import java.util.function.Supplier;
import java.util.logging.Logger;
-public class SlobrokMonitorManagerImpl implements SlobrokApi, MonitorManager {
+public class SlobrokMonitorManagerImpl extends AbstractComponent implements SlobrokApi, MonitorManager {
private static final Logger logger =
Logger.getLogger(SlobrokMonitorManagerImpl.class.getName());
@@ -30,14 +33,29 @@ public class SlobrokMonitorManagerImpl implements SlobrokApi, MonitorManager {
private final Object monitor = new Object();
private final HashMap<ApplicationId, SlobrokMonitor> slobrokMonitors = new HashMap<>();
private final DuperModelManager duperModel;
+ private final Transport transport;
+
+ private static int getTransportThreadCount() {
+ return Math.max(4, Runtime.getRuntime().availableProcessors());
+ }
@Inject
public SlobrokMonitorManagerImpl(DuperModelManager duperModel) {
- this(SlobrokMonitor::new, duperModel);
+ this(new Transport("slobrok-monitor", getTransportThreadCount() / 4), duperModel);
}
- SlobrokMonitorManagerImpl(Supplier<SlobrokMonitor> slobrokMonitorFactory, DuperModelManager duperModel) {
+ private SlobrokMonitorManagerImpl(Transport transport, DuperModelManager duperModel) {
+ this(transport, new Supervisor(transport), duperModel);
+ }
+
+ private SlobrokMonitorManagerImpl(Transport transport, Supervisor orb, DuperModelManager duperModel) {
+ this(() -> new SlobrokMonitor(orb), transport, duperModel);
+ orb.useSmallBuffers();
+ }
+
+ SlobrokMonitorManagerImpl(Supplier<SlobrokMonitor> slobrokMonitorFactory, Transport transport, DuperModelManager duperModel) {
this.slobrokMonitorFactory = slobrokMonitorFactory;
+ this.transport = transport;
this.duperModel = duperModel;
}
@@ -77,6 +95,11 @@ public class SlobrokMonitorManagerImpl implements SlobrokApi, MonitorManager {
}
@Override
+ public void deconstruct() {
+ transport.shutdown().join();
+ }
+
+ @Override
public List<Mirror.Entry> lookup(ApplicationId id, String pattern) {
synchronized (monitor) {
SlobrokMonitor slobrokMonitor = slobrokMonitors.get(id);
diff --git a/service-monitor/src/test/java/com/yahoo/vespa/service/slobrok/SlobrokMonitorManagerImplTest.java b/service-monitor/src/test/java/com/yahoo/vespa/service/slobrok/SlobrokMonitorManagerImplTest.java
index eca7d695be6..8fabb385bb2 100644
--- a/service-monitor/src/test/java/com/yahoo/vespa/service/slobrok/SlobrokMonitorManagerImplTest.java
+++ b/service-monitor/src/test/java/com/yahoo/vespa/service/slobrok/SlobrokMonitorManagerImplTest.java
@@ -3,6 +3,7 @@ package com.yahoo.vespa.service.slobrok;
import com.yahoo.config.model.api.ApplicationInfo;
import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.jrt.Transport;
import com.yahoo.vespa.applicationmodel.ClusterId;
import com.yahoo.vespa.applicationmodel.ConfigId;
import com.yahoo.vespa.applicationmodel.ServiceStatus;
@@ -28,7 +29,7 @@ public class SlobrokMonitorManagerImplTest {
private final DuperModelManager duperModelManager = mock(DuperModelManager.class);
private final SlobrokMonitorManagerImpl slobrokMonitorManager =
- new SlobrokMonitorManagerImpl(slobrokMonitorFactory, duperModelManager);
+ new SlobrokMonitorManagerImpl(slobrokMonitorFactory, mock(Transport.class), duperModelManager);
private final SlobrokMonitor slobrokMonitor = mock(SlobrokMonitor.class);
private final ApplicationId applicationId = ApplicationId.from("tenant", "app", "instance");
private final ApplicationInfo application = mock(ApplicationInfo.class);
diff --git a/service-monitor/src/test/java/com/yahoo/vespa/service/slobrok/SlobrokMonitorTest.java b/service-monitor/src/test/java/com/yahoo/vespa/service/slobrok/SlobrokMonitorTest.java
index 8bec3bf6cd8..7757aed8ac7 100644
--- a/service-monitor/src/test/java/com/yahoo/vespa/service/slobrok/SlobrokMonitorTest.java
+++ b/service-monitor/src/test/java/com/yahoo/vespa/service/slobrok/SlobrokMonitorTest.java
@@ -14,7 +14,7 @@ import static org.mockito.Mockito.verify;
public class SlobrokMonitorTest {
private final SlobrokList slobrokList = mock(SlobrokList.class);
private final Mirror mirror = mock(Mirror.class);
- private SlobrokMonitor slobrokMonitor = new SlobrokMonitor(slobrokList, mirror);
+ private final SlobrokMonitor slobrokMonitor = new SlobrokMonitor(slobrokList, mirror);
@Test
public void testUpdateSlobrokList() {
diff --git a/slobrok/src/apps/slobrok/slobrok.cpp b/slobrok/src/apps/slobrok/slobrok.cpp
index 5d650fafc96..390b7356410 100644
--- a/slobrok/src/apps/slobrok/slobrok.cpp
+++ b/slobrok/src/apps/slobrok/slobrok.cpp
@@ -53,7 +53,7 @@ App::Main()
int argi = 1;
const char* optArg;
- char c;
+ int c;
while ((c = GetOpt("c:s:p:", optArg, argi)) != -1) {
switch (c) {
case 'c':
diff --git a/slobrok/src/tests/startsome/tstdst.cpp b/slobrok/src/tests/startsome/tstdst.cpp
index c119ed3c026..b6be075cf7b 100644
--- a/slobrok/src/tests/startsome/tstdst.cpp
+++ b/slobrok/src/tests/startsome/tstdst.cpp
@@ -189,7 +189,7 @@ public:
int argi = 1;
const char* optArg;
- char c;
+ int c;
while ((c = GetOpt("n:p:s:", optArg, argi)) != -1) {
switch (c) {
case 'p':
diff --git a/slobrok/src/vespa/slobrok/server/sbenv.cpp b/slobrok/src/vespa/slobrok/server/sbenv.cpp
index 91a283b17f3..579703a6f3f 100644
--- a/slobrok/src/vespa/slobrok/server/sbenv.cpp
+++ b/slobrok/src/vespa/slobrok/server/sbenv.cpp
@@ -248,7 +248,7 @@ SBEnv::addPeer(const std::string &name, const std::string &spec)
vespalib::string peers = toString(_partnerList);
LOG(warning, "got addPeer with non-configured peer %s, check config consistency. configured peers = %s",
spec.c_str(), peers.c_str());
- return OkState(FRTE_RPC_METHOD_FAILED, "configured partner list does not contain peer. configured peers = " + peers);
+ _partnerList.push_back(spec);
}
return _rpcsrvmanager.addPeer(name, spec.c_str());
}
diff --git a/staging_vespalib/src/vespa/vespalib/util/document_runnable.cpp b/staging_vespalib/src/vespa/vespalib/util/document_runnable.cpp
index e9e5cbbf953..53fe3c8a1d0 100644
--- a/staging_vespalib/src/vespa/vespalib/util/document_runnable.cpp
+++ b/staging_vespalib/src/vespa/vespalib/util/document_runnable.cpp
@@ -26,7 +26,7 @@ bool Runnable::start(FastOS_ThreadPool& pool)
if (_state != NOT_RUNNING) return false;
_state = STARTING;
if (pool.NewThread(this) == nullptr) {
- throw vespalib::IllegalStateException("Faled starting a new thread", VESPA_STRLOC);
+ throw vespalib::IllegalStateException("Failed starting a new thread", VESPA_STRLOC);
}
return true;
}
diff --git a/storage/src/tests/common/teststorageapp.cpp b/storage/src/tests/common/teststorageapp.cpp
index e61050c21a3..7891f21a73d 100644
--- a/storage/src/tests/common/teststorageapp.cpp
+++ b/storage/src/tests/common/teststorageapp.cpp
@@ -226,7 +226,7 @@ TestDistributorApp::TestDistributorApp(NodeIndex index, vespalib::stringref conf
TestDistributorApp::~TestDistributorApp() = default;
api::Timestamp
-TestDistributorApp::getUniqueTimestamp()
+TestDistributorApp::generate_unique_timestamp()
{
std::lock_guard guard(_accessLock);
uint64_t timeNow(getClock().getTimeInSeconds().getTime());
diff --git a/storage/src/tests/common/teststorageapp.h b/storage/src/tests/common/teststorageapp.h
index 9e273002580..f30e0b62f4d 100644
--- a/storage/src/tests/common/teststorageapp.h
+++ b/storage/src/tests/common/teststorageapp.h
@@ -101,7 +101,7 @@ public:
private:
// Storage server interface implementation (until we can remove it)
- virtual api::Timestamp getUniqueTimestamp() { abort(); }
+ virtual api::Timestamp generate_unique_timestamp() { abort(); }
[[nodiscard]] virtual StorBucketDatabase& content_bucket_db(document::BucketSpace) { abort(); }
virtual StorBucketDatabase& getStorageBucketDatabase() { abort(); }
virtual BucketDatabase& getBucketDatabase() { abort(); }
@@ -157,7 +157,7 @@ public:
return _compReg;
}
- api::Timestamp getUniqueTimestamp() override;
+ api::Timestamp generate_unique_timestamp() override;
};
} // storageo
diff --git a/storage/src/tests/distributor/CMakeLists.txt b/storage/src/tests/distributor/CMakeLists.txt
index 810ffb550bf..f43280a5b44 100644
--- a/storage/src/tests/distributor/CMakeLists.txt
+++ b/storage/src/tests/distributor/CMakeLists.txt
@@ -13,6 +13,7 @@ vespa_add_executable(storage_distributor_gtest_runner_app TEST
distributor_bucket_space_test.cpp
distributor_host_info_reporter_test.cpp
distributor_message_sender_stub.cpp
+ distributor_stripe_pool_test.cpp
distributortest.cpp
distributortestutil.cpp
externaloperationhandlertest.cpp
@@ -24,6 +25,7 @@ vespa_add_executable(storage_distributor_gtest_runner_app TEST
maintenanceschedulertest.cpp
mergelimitertest.cpp
mergeoperationtest.cpp
+ multi_thread_stripe_access_guard_test.cpp
nodeinfotest.cpp
nodemaintenancestatstrackertest.cpp
operation_sequencer_test.cpp
diff --git a/storage/src/tests/distributor/bucketdbupdatertest.cpp b/storage/src/tests/distributor/bucketdbupdatertest.cpp
index 97fccf58901..7e8fec3b83a 100644
--- a/storage/src/tests/distributor/bucketdbupdatertest.cpp
+++ b/storage/src/tests/distributor/bucketdbupdatertest.cpp
@@ -1865,15 +1865,15 @@ TEST_F(BucketDBUpdaterTest, cluster_state_always_sends_full_fetch_when_distribut
TEST_F(BucketDBUpdaterTest, changed_distribution_config_triggers_recovery_mode) {
ASSERT_NO_FATAL_FAILURE(setAndEnableClusterState(lib::ClusterState("distributor:6 storage:6"), messageCount(6), 20));
_sender.clear();
- EXPECT_TRUE(_distributor->isInRecoveryMode());
+ EXPECT_TRUE(distributor_is_in_recovery_mode());
complete_recovery_mode();
- EXPECT_FALSE(_distributor->isInRecoveryMode());
+ EXPECT_FALSE(distributor_is_in_recovery_mode());
std::string distConfig(getDistConfig6Nodes4Groups());
setDistribution(distConfig);
sortSentMessagesByIndex(_sender);
// No replies received yet, still no recovery mode.
- EXPECT_FALSE(_distributor->isInRecoveryMode());
+ EXPECT_FALSE(distributor_is_in_recovery_mode());
ASSERT_EQ(messageCount(6), _sender.commands().size());
uint32_t numBuckets = 10;
@@ -1884,9 +1884,9 @@ TEST_F(BucketDBUpdaterTest, changed_distribution_config_triggers_recovery_mode)
// Pending cluster state (i.e. distribution) has been enabled, which should
// cause recovery mode to be entered.
- EXPECT_TRUE(_distributor->isInRecoveryMode());
+ EXPECT_TRUE(distributor_is_in_recovery_mode());
complete_recovery_mode();
- EXPECT_FALSE(_distributor->isInRecoveryMode());
+ EXPECT_FALSE(distributor_is_in_recovery_mode());
}
namespace {
@@ -2471,14 +2471,14 @@ TEST_F(BucketDBUpdaterTest, deferred_activated_state_does_not_enable_state_until
"version:2 distributor:1 storage:4", n_buckets, 4));
// Version should not be switched over yet
- EXPECT_EQ(uint32_t(1), getDistributor().getClusterStateBundle().getVersion());
+ EXPECT_EQ(uint32_t(1), current_distributor_cluster_state_bundle().getVersion());
EXPECT_EQ(uint64_t(0), mutable_default_db().size());
EXPECT_EQ(uint64_t(0), mutable_global_db().size());
EXPECT_FALSE(activate_cluster_state_version(2));
- EXPECT_EQ(uint32_t(2), getDistributor().getClusterStateBundle().getVersion());
+ EXPECT_EQ(uint32_t(2), current_distributor_cluster_state_bundle().getVersion());
EXPECT_EQ(uint64_t(n_buckets), mutable_default_db().size());
EXPECT_EQ(uint64_t(n_buckets), mutable_global_db().size());
}
diff --git a/storage/src/tests/distributor/distributor_message_sender_stub.h b/storage/src/tests/distributor/distributor_message_sender_stub.h
index 3791839f3fe..59a5a82b7df 100644
--- a/storage/src/tests/distributor/distributor_message_sender_stub.h
+++ b/storage/src/tests/distributor/distributor_message_sender_stub.h
@@ -10,7 +10,7 @@
namespace storage {
-class DistributorMessageSenderStub : public distributor::DistributorMessageSender {
+class DistributorMessageSenderStub : public distributor::DistributorStripeMessageSender {
MessageSenderStub _stub_impl;
distributor::PendingMessageTracker* _pending_message_tracker;
distributor::OperationSequencer* _operation_sequencer;
diff --git a/storage/src/tests/distributor/distributor_stripe_pool_test.cpp b/storage/src/tests/distributor/distributor_stripe_pool_test.cpp
new file mode 100644
index 00000000000..fb7c446a781
--- /dev/null
+++ b/storage/src/tests/distributor/distributor_stripe_pool_test.cpp
@@ -0,0 +1,92 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include "mock_tickable_stripe.h"
+#include <vespa/storage/distributor/distributor_stripe_pool.h>
+#include <vespa/vespalib/gtest/gtest.h>
+#include <vespa/vespalib/util/time.h>
+#include <atomic>
+#include <thread>
+
+using namespace ::testing;
+
+namespace storage::distributor {
+
+struct DistributorStripePoolThreadingTest : Test {
+ static constexpr vespalib::duration min_test_duration = 50ms;
+
+ DistributorStripePool _pool;
+ vespalib::steady_time _start_time;
+ std::atomic<bool> _is_parked;
+
+ DistributorStripePoolThreadingTest()
+ : _pool(),
+ _start_time(std::chrono::steady_clock::now()),
+ _is_parked(false)
+ {
+ // Set an absurdly high tick wait duration to catch any regressions where
+ // thread wakeups aren't triggering as expected.
+ _pool.set_tick_wait_duration(600s);
+ // Ensure we always trigger a wait if tick() returns false.
+ _pool.set_ticks_before_wait(0);
+ }
+
+ bool min_test_time_reached() const noexcept {
+ return ((std::chrono::steady_clock::now() - _start_time) > min_test_duration);
+ }
+
+ void loop_park_unpark_cycle_until_test_time_expired() {
+ constexpr size_t min_cycles = 100;
+ size_t cycle = 0;
+ // TODO enforce minimum number of actual calls to tick() per thread?
+ while ((cycle < min_cycles) || !min_test_time_reached()) {
+ _pool.park_all_threads();
+ _is_parked = true;
+ std::this_thread::sleep_for(50us);
+ _is_parked = false;
+ _pool.unpark_all_threads();
+ ++cycle;
+ }
+ }
+};
+
+// Optimistic invariant checker that cannot prove correctness, but will hopefully
+// make tests scream if something is obviously incorrect.
+struct ParkingInvariantCheckingMockStripe : MockTickableStripe {
+ std::atomic<bool>& _is_parked;
+ bool _to_return;
+
+ explicit ParkingInvariantCheckingMockStripe(std::atomic<bool>& is_parked)
+ : _is_parked(is_parked),
+ _to_return(true)
+ {}
+
+ bool tick() override {
+ std::this_thread::sleep_for(50us);
+ assert(!_is_parked.load());
+ // Alternate between returning whether or not work was done to trigger
+ // both waiting and non-waiting edges. Note that this depends on the
+ // ticks_before_wait value being 0.
+ _to_return = !_to_return;
+ return _to_return;
+ }
+};
+
+TEST_F(DistributorStripePoolThreadingTest, can_park_and_unpark_single_stripe) {
+ ParkingInvariantCheckingMockStripe stripe(_is_parked);
+
+ _pool.start({{&stripe}});
+ loop_park_unpark_cycle_until_test_time_expired();
+ _pool.stop_and_join();
+}
+
+TEST_F(DistributorStripePoolThreadingTest, can_park_and_unpark_multiple_stripes) {
+ ParkingInvariantCheckingMockStripe s1(_is_parked);
+ ParkingInvariantCheckingMockStripe s2(_is_parked);
+ ParkingInvariantCheckingMockStripe s3(_is_parked);
+ ParkingInvariantCheckingMockStripe s4(_is_parked);
+
+ _pool.start({{&s1, &s2, &s3, &s4}});
+ loop_park_unpark_cycle_until_test_time_expired();
+ _pool.stop_and_join();
+}
+
+}
diff --git a/storage/src/tests/distributor/distributortest.cpp b/storage/src/tests/distributor/distributortest.cpp
index 7958306db5f..5a5ebb8c823 100644
--- a/storage/src/tests/distributor/distributortest.cpp
+++ b/storage/src/tests/distributor/distributortest.cpp
@@ -175,6 +175,10 @@ struct DistributorTest : Test, DistributorTestUtil {
return _distributor->handleMessage(msg);
}
+ uint64_t db_sample_interval_sec() const noexcept {
+ return std::chrono::duration_cast<std::chrono::seconds>(_distributor->db_memory_sample_interval()).count();
+ }
+
void configure_stale_reads_enabled(bool enabled) {
ConfigBuilder builder;
builder.allowStaleReadsDuringClusterStateTransitions = enabled;
@@ -280,19 +284,19 @@ TEST_F(DistributorTest, recovery_mode_on_cluster_state_change) {
"storage:1 .0.s:d distributor:1");
enableDistributorClusterState("storage:1 distributor:1");
- EXPECT_TRUE(_distributor->isInRecoveryMode());
+ EXPECT_TRUE(distributor_is_in_recovery_mode());
for (uint32_t i = 0; i < 3; ++i) {
addNodesToBucketDB(document::BucketId(16, i), "0=1");
}
for (int i = 0; i < 3; ++i) {
tick();
- EXPECT_TRUE(_distributor->isInRecoveryMode());
+ EXPECT_TRUE(distributor_is_in_recovery_mode());
}
tick();
- EXPECT_FALSE(_distributor->isInRecoveryMode());
+ EXPECT_FALSE(distributor_is_in_recovery_mode());
enableDistributorClusterState("storage:2 distributor:1");
- EXPECT_TRUE(_distributor->isInRecoveryMode());
+ EXPECT_TRUE(distributor_is_in_recovery_mode());
}
// TODO -> stripe test
@@ -489,14 +493,6 @@ TEST_F(DistributorTest, metric_update_hook_updates_pending_maintenance_metrics)
}
}
-namespace {
-
-uint64_t db_sample_interval_sec(const Distributor& d) noexcept {
- return std::chrono::duration_cast<std::chrono::seconds>(d.db_memory_sample_interval()).count();
-}
-
-}
-
// TODO -> stripe test
TEST_F(DistributorTest, bucket_db_memory_usage_metrics_only_updated_at_fixed_time_intervals) {
getClock().setAbsoluteTimeInSeconds(1000);
@@ -517,7 +513,7 @@ TEST_F(DistributorTest, bucket_db_memory_usage_metrics_only_updated_at_fixed_tim
// interval has passed. Instead, old metric gauge values should be preserved.
addNodesToBucketDB(document::BucketId(16, 2), "0=1/1/1/t/a,1=2/2/2");
- const auto sample_interval_sec = db_sample_interval_sec(getDistributor());
+ const auto sample_interval_sec = db_sample_interval_sec();
getClock().setAbsoluteTimeInSeconds(1000 + sample_interval_sec - 1); // Not there yet.
tickDistributorNTimes(50);
distributor_metric_update_hook().updateMetrics(metrics::MetricLockGuard(l));
@@ -925,7 +921,7 @@ TEST_F(DistributorTest, merge_busy_inhibit_duration_is_propagated_to_pending_mes
reply->setResult(api::ReturnCode(api::ReturnCode::BUSY));
_distributor->handleReply(std::shared_ptr<api::StorageReply>(std::move(reply)));
- auto& node_info = _distributor->getPendingMessageTracker().getNodeInfo();
+ auto& node_info = pending_message_tracker().getNodeInfo();
EXPECT_TRUE(node_info.isBusy(0));
getClock().addSecondsToTime(99);
@@ -1045,7 +1041,7 @@ TEST_F(DistributorTest, entering_recovery_mode_resets_bucket_space_stats) {
tickDistributorNTimes(5); // 1/3rds into second round through database
enableDistributorClusterState("version:2 distributor:1 storage:3 .1.s:d");
- EXPECT_TRUE(_distributor->isInRecoveryMode());
+ EXPECT_TRUE(distributor_is_in_recovery_mode());
// Bucket space stats should now be invalid per space per node, pending stats
// from state version 2. Exposing stats from version 1 risks reporting stale
// information back to the cluster controller.
@@ -1066,13 +1062,13 @@ TEST_F(DistributorTest, leaving_recovery_mode_immediately_sends_getnodestate_rep
addNodesToBucketDB(document::BucketId(16, 2), "0=1/1/1/t/a");
enableDistributorClusterState("version:2 distributor:1 storage:3 .1.s:d");
- EXPECT_TRUE(_distributor->isInRecoveryMode());
+ EXPECT_TRUE(distributor_is_in_recovery_mode());
EXPECT_EQ(0, explicit_node_state_reply_send_invocations());
tickDistributorNTimes(1); // DB round not yet complete
EXPECT_EQ(0, explicit_node_state_reply_send_invocations());
tickDistributorNTimes(2); // DB round complete after 2nd bucket + "scan done" discovery tick
EXPECT_EQ(1, explicit_node_state_reply_send_invocations());
- EXPECT_FALSE(_distributor->isInRecoveryMode());
+ EXPECT_FALSE(distributor_is_in_recovery_mode());
// Now out of recovery mode, subsequent round completions should not send replies
tickDistributorNTimes(10);
EXPECT_EQ(1, explicit_node_state_reply_send_invocations());
@@ -1080,12 +1076,12 @@ TEST_F(DistributorTest, leaving_recovery_mode_immediately_sends_getnodestate_rep
void DistributorTest::do_test_pending_merge_getnodestate_reply_edge(BucketSpace space) {
setupDistributor(Redundancy(2), NodeCount(2), "version:1 distributor:1 storage:2");
- EXPECT_TRUE(_distributor->isInRecoveryMode());
+ EXPECT_TRUE(distributor_is_in_recovery_mode());
// 2 buckets with missing replicas triggering merge pending stats
addNodesToBucketDB(Bucket(space, BucketId(16, 1)), "0=1/1/1/t/a");
addNodesToBucketDB(Bucket(space, BucketId(16, 2)), "0=1/1/1/t/a");
tickDistributorNTimes(3);
- EXPECT_FALSE(_distributor->isInRecoveryMode());
+ EXPECT_FALSE(distributor_is_in_recovery_mode());
const auto space_name = FixedBucketSpaces::to_string(space);
assertBucketSpaceStats(2, 0, 1, space_name, _distributor->getBucketSpacesStats());
// First completed scan sends off merge stats et al to cluster controller
@@ -1214,7 +1210,7 @@ TEST_F(DistributorTest, gets_are_not_started_outside_main_distributor_logic_if_s
TEST_F(DistributorTest, gets_started_outside_main_thread_are_not_tracked_by_main_pending_message_tracker) {
set_up_and_start_get_op_with_stale_reads_enabled(true);
Bucket bucket(FixedBucketSpaces::default_space(), BucketId(16, 1));
- EXPECT_FALSE(_distributor->getPendingMessageTracker().hasPendingMessage(
+ EXPECT_FALSE(pending_message_tracker().hasPendingMessage(
0, bucket, api::MessageType::GET_ID));
}
@@ -1258,4 +1254,61 @@ TEST_F(DistributorTest, wanted_split_bit_count_is_lower_bounded) {
EXPECT_EQ(getConfig().getMinimalBucketSplit(), 8);
}
+TEST_F(DistributorTest, host_info_sent_immediately_once_all_stripes_first_reported) {
+ set_num_distributor_stripes(3);
+ createLinks();
+ getClock().setAbsoluteTimeInSeconds(1000);
+ // TODO STRIPE can't call this currently since it touches the bucket DB updater directly:
+ // setupDistributor(Redundancy(2), NodeCount(2), "version:1 distributor:1 storage:2");
+
+ tickDistributorNTimes(1);
+ EXPECT_EQ(0, explicit_node_state_reply_send_invocations()); // Nothing yet
+ getDistributor().notify_stripe_wants_to_send_host_info(1);
+ getDistributor().notify_stripe_wants_to_send_host_info(2);
+
+ tickDistributorNTimes(1);
+ // Still nothing. Missing initial report from stripe 0
+ EXPECT_EQ(0, explicit_node_state_reply_send_invocations());
+
+ getDistributor().notify_stripe_wants_to_send_host_info(0);
+ tickDistributorNTimes(1);
+ // All stripes have reported in, it's time to party!
+ EXPECT_EQ(1, explicit_node_state_reply_send_invocations());
+
+ // No further sends if stripes haven't requested it yet.
+ getClock().setAbsoluteTimeInSeconds(2000);
+ tickDistributorNTimes(10);
+ EXPECT_EQ(1, explicit_node_state_reply_send_invocations());
+}
+
+// TODO STRIPE make delay configurable instead of hardcoded
+TEST_F(DistributorTest, non_bootstrap_host_info_send_request_delays_sending) {
+ set_num_distributor_stripes(3);
+ createLinks();
+ getClock().setAbsoluteTimeInSeconds(1000);
+
+ for (uint16_t i = 0; i < 3; ++i) {
+ getDistributor().notify_stripe_wants_to_send_host_info(i);
+ }
+ tickDistributorNTimes(1);
+ // Bootstrap case
+ EXPECT_EQ(1, explicit_node_state_reply_send_invocations());
+
+ // Stripe 1 suddenly really wants to tell the cluster controller something again
+ getDistributor().notify_stripe_wants_to_send_host_info(1);
+ tickDistributorNTimes(1);
+ // But its cry for attention is not yet honored since the delay hasn't passed.
+ EXPECT_EQ(1, explicit_node_state_reply_send_invocations());
+
+ getClock().addMilliSecondsToTime(999);
+ tickDistributorNTimes(1);
+ // 1 sec delay has still not passed
+ EXPECT_EQ(1, explicit_node_state_reply_send_invocations());
+
+ getClock().addMilliSecondsToTime(1);
+ tickDistributorNTimes(1);
+ // But now it has
+ EXPECT_EQ(2, explicit_node_state_reply_send_invocations());
+}
+
}
diff --git a/storage/src/tests/distributor/distributortestutil.cpp b/storage/src/tests/distributor/distributortestutil.cpp
index bdd953b6206..a2f32d8faa2 100644
--- a/storage/src/tests/distributor/distributortestutil.cpp
+++ b/storage/src/tests/distributor/distributortestutil.cpp
@@ -7,6 +7,7 @@
#include <vespa/storage/distributor/distributor_bucket_space.h>
#include <vespa/storage/distributor/distributor_stripe.h>
#include <vespa/storage/distributor/distributor_stripe_component.h>
+#include <vespa/storage/distributor/distributor_stripe_pool.h>
#include <vespa/vdslib/distribution/distribution.h>
#include <vespa/vespalib/text/stringtokenizer.h>
@@ -16,7 +17,8 @@ using document::test::makeDocumentBucket;
namespace storage::distributor {
DistributorTestUtil::DistributorTestUtil()
- : _messageSender(_sender, _senderDown)
+ : _messageSender(_sender, _senderDown),
+ _num_distributor_stripes(0) // TODO STRIPE change default
{
_config = getStandardConfig(false);
}
@@ -27,12 +29,14 @@ DistributorTestUtil::createLinks()
{
_node.reset(new TestDistributorApp(_config.getConfigId()));
_threadPool = framework::TickingThreadPool::createDefault("distributor");
+ _stripe_pool = std::make_unique<DistributorStripePool>();
_distributor.reset(new Distributor(
_node->getComponentRegister(),
_node->node_identity(),
*_threadPool,
+ *_stripe_pool,
*this,
- 0,
+ _num_distributor_stripes,
_hostInfo,
&_messageSender));
_component.reset(new storage::DistributorComponent(_node->getComponentRegister(), "distrtestutil"));
@@ -291,7 +295,7 @@ DistributorTestUtil::insertBucketInfo(document::BucketId id,
if (active) {
info2.setActive();
}
- BucketCopy copy(distributor_component().getUniqueTimestamp(), node, info2);
+ BucketCopy copy(operation_context().generate_unique_timestamp(), node, info2);
entry->addNode(copy.setTrusted(trusted), toVector<uint16_t>(0));
@@ -351,9 +355,8 @@ DistributorTestUtil::getExternalOperationHandler() {
return _distributor->external_operation_handler();
}
-storage::distributor::DistributorStripeComponent&
-DistributorTestUtil::distributor_component() {
- // TODO STRIPE tests use this to indirectly access bucket space repos/DBs!
+const storage::distributor::DistributorNodeContext&
+DistributorTestUtil::node_context() const {
return _distributor->distributor_component();
}
@@ -362,6 +365,11 @@ DistributorTestUtil::operation_context() {
return _distributor->distributor_component();
}
+const DocumentSelectionParser&
+DistributorTestUtil::doc_selection_parser() const {
+ return _distributor->distributor_component();
+}
+
bool
DistributorTestUtil::tick() {
framework::ThreadWaitInfo res(
@@ -425,6 +433,36 @@ DistributorTestUtil::getReadOnlyBucketSpaceRepo() const {
return _distributor->getReadOnlyBucketSpaceRepo();
}
+bool
+DistributorTestUtil::distributor_is_in_recovery_mode() const noexcept {
+ return _distributor->isInRecoveryMode();
+}
+
+const lib::ClusterStateBundle&
+DistributorTestUtil::current_distributor_cluster_state_bundle() const noexcept {
+ return getDistributor().getClusterStateBundle();
+}
+
+std::string
+DistributorTestUtil::active_ideal_state_operations() const {
+ return _distributor->getActiveIdealStateOperations();
+}
+
+const PendingMessageTracker&
+DistributorTestUtil::pending_message_tracker() const noexcept {
+ return _distributor->getPendingMessageTracker();
+}
+
+PendingMessageTracker&
+DistributorTestUtil::pending_message_tracker() noexcept {
+ return _distributor->getPendingMessageTracker();
+}
+
+std::chrono::steady_clock::duration
+DistributorTestUtil::db_memory_sample_interval() const noexcept {
+ return _distributor->db_memory_sample_interval();
+}
+
const lib::Distribution&
DistributorTestUtil::getDistribution() const {
return getBucketSpaceRepo().get(makeBucketSpace()).getDistribution();
@@ -453,4 +491,9 @@ DistributorTestUtil::enable_distributor_cluster_state(const lib::ClusterStateBun
getBucketDBUpdater().simulate_cluster_state_bundle_activation(state);
}
+void
+DistributorTestUtil::setSystemState(const lib::ClusterState& systemState) {
+ _distributor->enableClusterStateBundle(lib::ClusterStateBundle(systemState));
+}
+
}
diff --git a/storage/src/tests/distributor/distributortestutil.h b/storage/src/tests/distributor/distributortestutil.h
index b845456e873..6664b8d823d 100644
--- a/storage/src/tests/distributor/distributortestutil.h
+++ b/storage/src/tests/distributor/distributortestutil.h
@@ -17,16 +17,19 @@ namespace framework { struct TickingThreadPool; }
namespace distributor {
-class StripeBucketDBUpdater;
class Distributor;
class DistributorBucketSpace;
class DistributorBucketSpaceRepo;
-class DistributorStripeOperationContext;
+class DistributorNodeContext;
class DistributorStripe;
class DistributorStripeComponent;
+class DistributorStripeOperationContext;
+class DistributorStripePool;
+class DocumentSelectionParser;
class ExternalOperationHandler;
class IdealStateManager;
class Operation;
+class StripeBucketDBUpdater;
// TODO STRIPE rename to DistributorStripeTestUtil?
class DistributorTestUtil : private DoneInitializeHandler
@@ -115,12 +118,12 @@ public:
StripeBucketDBUpdater& getBucketDBUpdater();
IdealStateManager& getIdealStateManager();
ExternalOperationHandler& getExternalOperationHandler();
- storage::distributor::DistributorStripeComponent& distributor_component();
+ const storage::distributor::DistributorNodeContext& node_context() const;
storage::distributor::DistributorStripeOperationContext& operation_context();
+ const DocumentSelectionParser& doc_selection_parser() const;
- Distributor& getDistributor() {
- return *_distributor;
- }
+ Distributor& getDistributor() noexcept { return *_distributor; }
+ const Distributor& getDistributor() const noexcept { return *_distributor; }
bool tick();
@@ -140,6 +143,12 @@ public:
const DistributorBucketSpaceRepo &getBucketSpaceRepo() const;
DistributorBucketSpaceRepo& getReadOnlyBucketSpaceRepo();
const DistributorBucketSpaceRepo& getReadOnlyBucketSpaceRepo() const;
+ [[nodiscard]] bool distributor_is_in_recovery_mode() const noexcept;
+ [[nodiscard]] const lib::ClusterStateBundle& current_distributor_cluster_state_bundle() const noexcept;
+ [[nodiscard]] std::string active_ideal_state_operations() const;
+ [[nodiscard]] const PendingMessageTracker& pending_message_tracker() const noexcept;
+ [[nodiscard]] PendingMessageTracker& pending_message_tracker() noexcept;
+ [[nodiscard]] std::chrono::steady_clock::duration db_memory_sample_interval() const noexcept;
const lib::Distribution& getDistribution() const;
// "End to end" distribution change trigger, which will invoke the bucket
@@ -190,10 +199,18 @@ public:
DistributorMessageSenderStub& sender() noexcept { return _sender; }
const DistributorMessageSenderStub& sender() const noexcept { return _sender; }
+
+ void setSystemState(const lib::ClusterState& systemState);
+
+ // Must be called prior to createLinks() to have any effect
+ void set_num_distributor_stripes(uint32_t n_stripes) noexcept {
+ _num_distributor_stripes = n_stripes;
+ }
protected:
vdstestlib::DirConfig _config;
std::unique_ptr<TestDistributorApp> _node;
std::unique_ptr<framework::TickingThreadPool> _threadPool;
+ std::unique_ptr<DistributorStripePool> _stripe_pool;
std::unique_ptr<Distributor> _distributor;
std::unique_ptr<storage::DistributorComponent> _component;
DistributorMessageSenderStub _sender;
@@ -214,6 +231,7 @@ protected:
}
};
MessageSenderImpl _messageSender;
+ uint32_t _num_distributor_stripes;
void enableDistributorClusterState(vespalib::stringref state);
void enable_distributor_cluster_state(const lib::ClusterStateBundle& state);
diff --git a/storage/src/tests/distributor/getoperationtest.cpp b/storage/src/tests/distributor/getoperationtest.cpp
index cb671bb07f5..c853dd692a0 100644
--- a/storage/src/tests/distributor/getoperationtest.cpp
+++ b/storage/src/tests/distributor/getoperationtest.cpp
@@ -57,7 +57,7 @@ struct GetOperationTest : Test, DistributorTestUtil {
void sendGet(api::InternalReadConsistency consistency = api::InternalReadConsistency::Strong) {
auto msg = std::make_shared<api::GetCommand>(makeDocumentBucket(BucketId(0)), docId, document::AllFields::NAME);
op = std::make_unique<GetOperation>(
- distributor_component(), getDistributorBucketSpace(),
+ node_context(), getDistributorBucketSpace(),
getDistributorBucketSpace().getBucketDatabase().acquire_read_guard(),
msg, getDistributor().getMetrics().gets,
consistency);
diff --git a/storage/src/tests/distributor/idealstatemanagertest.cpp b/storage/src/tests/distributor/idealstatemanagertest.cpp
index ce9aa0a6800..0a36e5cd0e5 100644
--- a/storage/src/tests/distributor/idealstatemanagertest.cpp
+++ b/storage/src/tests/distributor/idealstatemanagertest.cpp
@@ -38,10 +38,6 @@ struct IdealStateManagerTest : Test, DistributorTestUtil {
close();
}
- void setSystemState(const lib::ClusterState& systemState) {
- _distributor->enableClusterStateBundle(lib::ClusterStateBundle(systemState));
- }
-
bool checkBlock(const IdealStateOperation& op,
const document::Bucket& bucket,
const PendingMessageTracker& tracker,
@@ -120,7 +116,7 @@ TEST_F(IdealStateManagerTest, disabled_state_checker) {
ost.str());
tick();
- EXPECT_EQ("", _distributor->getActiveIdealStateOperations());
+ EXPECT_EQ("", active_ideal_state_operations());
}
@@ -143,13 +139,12 @@ TEST_F(IdealStateManagerTest, clear_active_on_node_down) {
EXPECT_EQ("setbucketstate to [2] Bucket(BucketSpace(0x0000000000000001), BucketId(0x4000000000000001)) (pri 100)\n"
"setbucketstate to [2] Bucket(BucketSpace(0x0000000000000001), BucketId(0x4000000000000002)) (pri 100)\n"
"setbucketstate to [2] Bucket(BucketSpace(0x0000000000000001), BucketId(0x4000000000000003)) (pri 100)\n",
- _distributor->getActiveIdealStateOperations());
+ active_ideal_state_operations());
setSystemState(lib::ClusterState("distributor:1 storage:3 .2.s:d"));
- EXPECT_EQ("", _distributor->getActiveIdealStateOperations());
- EXPECT_EQ(0, _distributor->getPendingMessageTracker()
- .getNodeInfo().getPendingCount(0));
+ EXPECT_EQ("", active_ideal_state_operations());
+ EXPECT_EQ(0, pending_message_tracker().getNodeInfo().getPendingCount(0));
}
TEST_F(IdealStateManagerTest, recheck_when_active) {
@@ -162,17 +157,17 @@ TEST_F(IdealStateManagerTest, recheck_when_active) {
tick();
EXPECT_EQ("setbucketstate to [0] Bucket(BucketSpace(0x0000000000000001), BucketId(0x4000000000000001)) (pri 100)\n",
- _distributor->getActiveIdealStateOperations());
+ active_ideal_state_operations());
tick();
EXPECT_EQ("setbucketstate to [0] Bucket(BucketSpace(0x0000000000000001), BucketId(0x4000000000000001)) (pri 100)\n",
- _distributor->getActiveIdealStateOperations());
+ active_ideal_state_operations());
tick();
EXPECT_EQ("setbucketstate to [0] Bucket(BucketSpace(0x0000000000000001), BucketId(0x4000000000000001)) (pri 100)\n",
- _distributor->getActiveIdealStateOperations());
+ active_ideal_state_operations());
}
TEST_F(IdealStateManagerTest, block_ideal_state_ops_on_full_request_bucket_info) {
diff --git a/storage/src/tests/distributor/maintenancemocks.h b/storage/src/tests/distributor/maintenancemocks.h
index 2bfb4ebb40f..fff798d4413 100644
--- a/storage/src/tests/distributor/maintenancemocks.h
+++ b/storage/src/tests/distributor/maintenancemocks.h
@@ -44,13 +44,13 @@ public:
return _bucket.toString();
}
- void onClose(DistributorMessageSender&) override {}
+ void onClose(DistributorStripeMessageSender&) override {}
const char* getName() const override { return "MockOperation"; }
const std::string& getDetailedReason() const override {
return _reason;
}
- void onStart(DistributorMessageSender&) override {}
- void onReceive(DistributorMessageSender&, const std::shared_ptr<api::StorageReply>&) override {}
+ void onStart(DistributorStripeMessageSender&) override {}
+ void onReceive(DistributorStripeMessageSender&, const std::shared_ptr<api::StorageReply>&) override {}
bool isBlocked(const PendingMessageTracker&, const OperationSequencer&) const override {
return _shouldBlock;
}
diff --git a/storage/src/tests/distributor/mock_tickable_stripe.h b/storage/src/tests/distributor/mock_tickable_stripe.h
new file mode 100644
index 00000000000..532bef5e50d
--- /dev/null
+++ b/storage/src/tests/distributor/mock_tickable_stripe.h
@@ -0,0 +1,42 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#pragma once
+
+#include <vespa/storage/distributor/tickable_stripe.h>
+#include <cstdlib>
+
+namespace storage::distributor {
+
+struct MockTickableStripe : TickableStripe {
+ bool tick() override { abort(); }
+ void flush_and_close() override { abort(); }
+ void update_total_distributor_config(std::shared_ptr<const DistributorConfiguration>) override { abort(); }
+ void update_distribution_config(const BucketSpaceDistributionConfigs&) override { abort(); }
+ void set_pending_cluster_state_bundle(const lib::ClusterStateBundle&) override { abort(); }
+ void clear_pending_cluster_state_bundle() override { abort(); }
+ void enable_cluster_state_bundle(const lib::ClusterStateBundle&) override { abort(); }
+ void notify_distribution_change_enabled() override { abort(); }
+ PotentialDataLossReport remove_superfluous_buckets(document::BucketSpace, const lib::ClusterState&, bool) override {
+ abort();
+ }
+ void merge_entries_into_db(document::BucketSpace,
+ api::Timestamp,
+ const lib::Distribution&,
+ const lib::ClusterState&,
+ const char*,
+ const std::unordered_set<uint16_t>&,
+ const std::vector<dbtransition::Entry>&) override
+ {
+ abort();
+ }
+ void update_read_snapshot_before_db_pruning() override { abort(); }
+ void update_read_snapshot_after_db_pruning(const lib::ClusterStateBundle&) override { abort(); }
+ void update_read_snapshot_after_activation(const lib::ClusterStateBundle&) override { abort(); }
+ void clear_read_only_bucket_repo_databases() override { abort(); }
+
+ void report_bucket_db_status(document::BucketSpace, std::ostream&) const override { abort(); }
+ StripeAccessGuard::PendingOperationStats pending_operation_stats() const override { abort(); }
+ void report_single_bucket_requests(vespalib::xml::XmlOutputStream&) const override { abort(); }
+ void report_delayed_single_bucket_requests(vespalib::xml::XmlOutputStream&) const override { abort(); }
+};
+
+}
diff --git a/storage/src/tests/distributor/multi_thread_stripe_access_guard_test.cpp b/storage/src/tests/distributor/multi_thread_stripe_access_guard_test.cpp
new file mode 100644
index 00000000000..8513186d1e1
--- /dev/null
+++ b/storage/src/tests/distributor/multi_thread_stripe_access_guard_test.cpp
@@ -0,0 +1,59 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include "mock_tickable_stripe.h"
+#include <vespa/document/bucket/fixed_bucket_spaces.h>
+#include <vespa/storage/distributor/distributor_stripe_pool.h>
+#include <vespa/storage/distributor/multi_threaded_stripe_access_guard.h>
+#include <vespa/vdslib/state/clusterstate.h>
+#include <vespa/vespalib/gtest/gtest.h>
+
+using namespace ::testing;
+
+namespace storage::distributor {
+
+struct AggregationTestingMockTickableStripe : MockTickableStripe {
+ PotentialDataLossReport report;
+
+ PotentialDataLossReport remove_superfluous_buckets(document::BucketSpace, const lib::ClusterState&, bool) override {
+ return report;
+ }
+
+ bool tick() override {
+ return false;
+ }
+};
+
+struct MultiThreadedStripeAccessGuardTest : Test {
+ DistributorStripePool _pool;
+ MultiThreadedStripeAccessor _accessor;
+ AggregationTestingMockTickableStripe _stripe1;
+ AggregationTestingMockTickableStripe _stripe2;
+ AggregationTestingMockTickableStripe _stripe3;
+
+ MultiThreadedStripeAccessGuardTest()
+ : _pool(),
+ _accessor(_pool)
+ {}
+
+ ~MultiThreadedStripeAccessGuardTest() {
+ _pool.stop_and_join();
+ }
+
+ void start_pool_with_stripes() {
+ _pool.start({{&_stripe1, &_stripe2, &_stripe3}});
+ }
+};
+
+TEST_F(MultiThreadedStripeAccessGuardTest, remove_superfluous_buckets_aggregates_reports_across_stripes) {
+ _stripe1.report = PotentialDataLossReport(20, 100);
+ _stripe2.report = PotentialDataLossReport(5, 200);
+ _stripe3.report = PotentialDataLossReport(7, 350);
+ start_pool_with_stripes();
+
+ auto guard = _accessor.rendezvous_and_hold_all();
+ auto report = guard->remove_superfluous_buckets(document::FixedBucketSpaces::default_space(),
+ lib::ClusterState(), false);
+ EXPECT_EQ(report.buckets, 32);
+ EXPECT_EQ(report.documents, 650);
+}
+
+}
diff --git a/storage/src/tests/distributor/putoperationtest.cpp b/storage/src/tests/distributor/putoperationtest.cpp
index ffd07ad9d60..b75751c1270 100644
--- a/storage/src/tests/distributor/putoperationtest.cpp
+++ b/storage/src/tests/distributor/putoperationtest.cpp
@@ -74,8 +74,8 @@ public:
}
void sendPut(std::shared_ptr<api::PutCommand> msg) {
- op = std::make_unique<PutOperation>(distributor_component(),
- distributor_component(),
+ op = std::make_unique<PutOperation>(node_context(),
+ operation_context(),
getDistributorBucketSpace(),
msg,
getDistributor().getMetrics().
@@ -402,7 +402,7 @@ TEST_F(PutOperationTest, do_not_send_CreateBucket_if_already_pending) {
// Manually shove sent messages into pending message tracker, since
// this isn't done automatically.
for (size_t i = 0; i < _sender.commands().size(); ++i) {
- distributor_component().getDistributor().getPendingMessageTracker()
+ operation_context().pending_message_tracker()
.insert(_sender.command(i));
}
diff --git a/storage/src/tests/distributor/read_for_write_visitor_operation_test.cpp b/storage/src/tests/distributor/read_for_write_visitor_operation_test.cpp
index 02491b670c6..e6f86f56d47 100644
--- a/storage/src/tests/distributor/read_for_write_visitor_operation_test.cpp
+++ b/storage/src/tests/distributor/read_for_write_visitor_operation_test.cpp
@@ -67,7 +67,7 @@ struct ReadForWriteVisitorOperationStarterTest : Test, DistributorTestUtil {
createLinks();
setupDistributor(1, 1, "version:1 distributor:1 storage:1");
_op_owner = std::make_unique<OperationOwner>(_sender, getClock());
- _sender.setPendingMessageTracker(getDistributor().getPendingMessageTracker());
+ _sender.setPendingMessageTracker(pending_message_tracker());
addNodesToBucketDB(_sub_bucket, "0=1/2/3/t");
}
@@ -84,7 +84,7 @@ struct ReadForWriteVisitorOperationStarterTest : Test, DistributorTestUtil {
cmd->addBucketToBeVisited(BucketId()); // Will be inferred to first sub-bucket in DB
}
return std::make_shared<VisitorOperation>(
- distributor_component(), distributor_component(),
+ node_context(), operation_context(),
getDistributorBucketSpace(), cmd, _default_config,
getDistributor().getMetrics().visits);
}
@@ -96,7 +96,7 @@ struct ReadForWriteVisitorOperationStarterTest : Test, DistributorTestUtil {
std::shared_ptr<ReadForWriteVisitorOperationStarter> create_rfw_op(std::shared_ptr<VisitorOperation> visitor_op) {
return std::make_shared<ReadForWriteVisitorOperationStarter>(
std::move(visitor_op), operation_sequencer(),
- *_op_owner, getDistributor().getPendingMessageTracker(),
+ *_op_owner, pending_message_tracker(),
_mock_uuid_generator);
}
};
@@ -123,7 +123,7 @@ TEST_F(ReadForWriteVisitorOperationStarterTest, visitor_is_bounced_if_merge_pend
std::move(nodes),
api::Timestamp(123456));
merge->setAddress(make_storage_address(0));
- getDistributor().getPendingMessageTracker().insert(merge);
+ pending_message_tracker().insert(merge);
_op_owner->start(op, OperationStarter::Priority(120));
ASSERT_EQ("", _sender.getCommands(true));
EXPECT_EQ("CreateVisitorReply(last=BucketId(0x0000000000000000)) "
@@ -157,13 +157,13 @@ struct ConcurrentMutationFixture {
_mutation = _test.sender().command(0);
// Since pending message tracking normally happens in the distributor itself during sendUp,
// we have to emulate this and explicitly insert the sent message into the pending mapping.
- _test.getDistributor().getPendingMessageTracker().insert(_mutation);
+ _test.pending_message_tracker().insert(_mutation);
}
void unblock_bucket() {
// Pretend update operation completed
auto update_reply = std::shared_ptr<api::StorageReply>(_mutation->makeReply());
- _test.getDistributor().getPendingMessageTracker().reply(*update_reply);
+ _test.pending_message_tracker().reply(*update_reply);
_test._op_owner->handleReply(update_reply);
}
};
diff --git a/storage/src/tests/distributor/removelocationtest.cpp b/storage/src/tests/distributor/removelocationtest.cpp
index 7ba2995d8e3..02164d67a46 100644
--- a/storage/src/tests/distributor/removelocationtest.cpp
+++ b/storage/src/tests/distributor/removelocationtest.cpp
@@ -28,9 +28,9 @@ struct RemoveLocationOperationTest : Test, DistributorTestUtil {
auto msg = std::make_shared<api::RemoveLocationCommand>(selection, makeDocumentBucket(document::BucketId(0)));
op = std::make_unique<RemoveLocationOperation>(
- distributor_component(),
- distributor_component(),
- distributor_component(),
+ node_context(),
+ operation_context(),
+ doc_selection_parser(),
getDistributorBucketSpace(),
msg,
getDistributor().getMetrics().
diff --git a/storage/src/tests/distributor/removeoperationtest.cpp b/storage/src/tests/distributor/removeoperationtest.cpp
index c4892f342e7..77ecb5d7aeb 100644
--- a/storage/src/tests/distributor/removeoperationtest.cpp
+++ b/storage/src/tests/distributor/removeoperationtest.cpp
@@ -36,8 +36,8 @@ struct RemoveOperationTest : Test, DistributorTestUtil {
auto msg = std::make_shared<api::RemoveCommand>(makeDocumentBucket(document::BucketId(0)), dId, 100);
op = std::make_unique<RemoveOperation>(
- distributor_component(),
- distributor_component(),
+ node_context(),
+ operation_context(),
getDistributorBucketSpace(),
msg,
getDistributor().getMetrics().
diff --git a/storage/src/tests/distributor/statecheckerstest.cpp b/storage/src/tests/distributor/statecheckerstest.cpp
index 9b49f1347cc..e4ecb672171 100644
--- a/storage/src/tests/distributor/statecheckerstest.cpp
+++ b/storage/src/tests/distributor/statecheckerstest.cpp
@@ -48,7 +48,7 @@ struct StateCheckersTest : Test, DistributorTestUtil {
};
void enableClusterState(const lib::ClusterState& systemState) {
- _distributor->enableClusterStateBundle(lib::ClusterStateBundle(systemState));
+ setSystemState(systemState);
}
void insertJoinableBuckets();
@@ -240,7 +240,8 @@ struct StateCheckersTest : Test, DistributorTestUtil {
tick(); // Trigger command processing and pending state setup.
}
NodeMaintenanceStatsTracker statsTracker;
- StateChecker::Context c(distributor_component(),
+ StateChecker::Context c(node_context(),
+ operation_context(),
getBucketSpaceRepo().get(params._bucket_space),
statsTracker,
bucket);
@@ -292,7 +293,8 @@ std::string StateCheckersTest::testSplit(uint32_t splitCount,
SplitBucketStateChecker checker;
NodeMaintenanceStatsTracker statsTracker;
- StateChecker::Context c(distributor_component(), getDistributorBucketSpace(), statsTracker, makeDocumentBucket(bid));
+ StateChecker::Context c(node_context(), operation_context(),
+ getDistributorBucketSpace(), statsTracker, makeDocumentBucket(bid));
getConfig().setSplitSize(splitSize);
getConfig().setSplitCount(splitCount);
getConfig().setMinimalBucketSplit(minSplitBits);
@@ -377,7 +379,8 @@ StateCheckersTest::testInconsistentSplit(const document::BucketId& bid,
{
SplitInconsistentStateChecker checker;
NodeMaintenanceStatsTracker statsTracker;
- StateChecker::Context c(distributor_component(), getDistributorBucketSpace(), statsTracker, makeDocumentBucket(bid));
+ StateChecker::Context c(node_context(), operation_context(),
+ getDistributorBucketSpace(), statsTracker, makeDocumentBucket(bid));
return testStateChecker(checker, c, true,
PendingMessage(), includePriority);
}
@@ -435,7 +438,8 @@ StateCheckersTest::testJoin(uint32_t joinCount,
getConfig().setMinimalBucketSplit(minSplitBits);
NodeMaintenanceStatsTracker statsTracker;
- StateChecker::Context c(distributor_component(), getDistributorBucketSpace(), statsTracker, makeDocumentBucket(bid));
+ StateChecker::Context c(node_context(), operation_context(),
+ getDistributorBucketSpace(), statsTracker, makeDocumentBucket(bid));
return testStateChecker(checker, c, true, blocker, includePriority);
}
@@ -588,7 +592,8 @@ StateCheckersTest::testSynchronizeAndMove(const std::string& bucketInfo,
enableDistributorClusterState(clusterState);
NodeMaintenanceStatsTracker statsTracker;
- StateChecker::Context c(distributor_component(), getDistributorBucketSpace(), statsTracker, makeDocumentBucket(bid));
+ StateChecker::Context c(node_context(), operation_context(),
+ getDistributorBucketSpace(), statsTracker, makeDocumentBucket(bid));
return testStateChecker(checker, c, false, blocker, includePriority);
}
@@ -822,7 +827,8 @@ StateCheckersTest::testDeleteExtraCopies(
}
DeleteExtraCopiesStateChecker checker;
NodeMaintenanceStatsTracker statsTracker;
- StateChecker::Context c(distributor_component(), getDistributorBucketSpace(), statsTracker, makeDocumentBucket(bid));
+ StateChecker::Context c(node_context(), operation_context(),
+ getDistributorBucketSpace(), statsTracker, makeDocumentBucket(bid));
return testStateChecker(checker, c, false, blocker, includePriority);
}
@@ -939,7 +945,8 @@ std::string StateCheckersTest::testBucketState(
BucketStateStateChecker checker;
NodeMaintenanceStatsTracker statsTracker;
- StateChecker::Context c(distributor_component(), getDistributorBucketSpace(), statsTracker, makeDocumentBucket(bid));
+ StateChecker::Context c(node_context(), operation_context(),
+ getDistributorBucketSpace(), statsTracker, makeDocumentBucket(bid));
return testStateChecker(checker, c, false, PendingMessage(),
includePriority);
}
@@ -1099,7 +1106,8 @@ std::string StateCheckersTest::testBucketStatePerGroup(
BucketStateStateChecker checker;
NodeMaintenanceStatsTracker statsTracker;
- StateChecker::Context c(distributor_component(), getDistributorBucketSpace(), statsTracker, makeDocumentBucket(bid));
+ StateChecker::Context c(node_context(), operation_context(),
+ getDistributorBucketSpace(), statsTracker, makeDocumentBucket(bid));
return testStateChecker(checker, c, false, PendingMessage(),
includePriority);
}
@@ -1286,7 +1294,8 @@ std::string StateCheckersTest::testGarbageCollection(
getConfig().setGarbageCollection("music", std::chrono::seconds(checkInterval));
getConfig().setLastGarbageCollectionChangeTime(vespalib::steady_time(std::chrono::seconds(lastChangeTime)));
NodeMaintenanceStatsTracker statsTracker;
- StateChecker::Context c(distributor_component(), getDistributorBucketSpace(), statsTracker,
+ StateChecker::Context c(node_context(), operation_context(),
+ getDistributorBucketSpace(), statsTracker,
makeDocumentBucket(e.getBucketId()));
getClock().setAbsoluteTimeInSeconds(nowTimestamp);
return testStateChecker(checker, c, false, PendingMessage(),
@@ -1359,7 +1368,8 @@ TEST_F(StateCheckersTest, gc_inhibited_when_ideal_node_in_maintenance) {
getConfig().setGarbageCollection("music", 3600s);
getConfig().setLastGarbageCollectionChangeTime(vespalib::steady_time(vespalib::duration::zero()));
NodeMaintenanceStatsTracker statsTracker;
- StateChecker::Context c(distributor_component(), getDistributorBucketSpace(), statsTracker,
+ StateChecker::Context c(node_context(), operation_context(),
+ getDistributorBucketSpace(), statsTracker,
makeDocumentBucket(bucket));
getClock().setAbsoluteTimeInSeconds(4000);
// Would normally (in a non-maintenance case) trigger GC due to having
@@ -1503,7 +1513,8 @@ TEST_F(StateCheckersTest, context_populates_ideal_state_containers) {
setupDistributor(2, 100, "distributor:1 storage:4");
NodeMaintenanceStatsTracker statsTracker;
- StateChecker::Context c(distributor_component(), getDistributorBucketSpace(), statsTracker, makeDocumentBucket({17, 0}));
+ StateChecker::Context c(node_context(), operation_context(),
+ getDistributorBucketSpace(), statsTracker, makeDocumentBucket({17, 0}));
ASSERT_THAT(c.idealState, ElementsAre(1, 3));
// TODO replace with UnorderedElementsAre once we can build gmock without issues
@@ -1546,7 +1557,8 @@ public:
// NOTE: resets the bucket database!
void runFor(const document::BucketId& bid) {
Checker checker;
- StateChecker::Context c(_fixture.distributor_component(), _fixture.getDistributorBucketSpace(), _statsTracker, makeDocumentBucket(bid));
+ StateChecker::Context c(_fixture.node_context(), _fixture.operation_context(),
+ _fixture.getDistributorBucketSpace(), _statsTracker, makeDocumentBucket(bid));
_result = _fixture.testStateChecker(
checker, c, false, StateCheckersTest::PendingMessage(), false);
}
diff --git a/storage/src/tests/distributor/statoperationtest.cpp b/storage/src/tests/distributor/statoperationtest.cpp
index a80eb9533bb..be4fe414b8b 100644
--- a/storage/src/tests/distributor/statoperationtest.cpp
+++ b/storage/src/tests/distributor/statoperationtest.cpp
@@ -73,7 +73,7 @@ TEST_F(StatOperationTest, bucket_list) {
StatBucketListOperation op(
getDistributorBucketSpace().getBucketDatabase(),
getIdealStateManager(),
- distributor_component().getIndex(),
+ node_context().node_index(),
msg);
op.start(_sender, framework::MilliSecTime(0));
diff --git a/storage/src/tests/distributor/twophaseupdateoperationtest.cpp b/storage/src/tests/distributor/twophaseupdateoperationtest.cpp
index dae94e41b46..ea170441a13 100644
--- a/storage/src/tests/distributor/twophaseupdateoperationtest.cpp
+++ b/storage/src/tests/distributor/twophaseupdateoperationtest.cpp
@@ -332,9 +332,9 @@ TwoPhaseUpdateOperationTest::sendUpdate(const std::string& bucketState,
msg->setCondition(options._condition);
msg->setTransportContext(std::make_unique<DummyTransportContext>());
- auto& comp = distributor_component();
return std::make_shared<TwoPhaseUpdateOperation>(
- comp, comp, comp, getDistributorBucketSpace(), msg, getDistributor().getMetrics());
+ node_context(), operation_context(), doc_selection_parser(),
+ getDistributorBucketSpace(), msg, getDistributor().getMetrics());
}
TEST_F(TwoPhaseUpdateOperationTest, simple) {
diff --git a/storage/src/tests/distributor/updateoperationtest.cpp b/storage/src/tests/distributor/updateoperationtest.cpp
index 6620cf58571..e31a5f0a768 100644
--- a/storage/src/tests/distributor/updateoperationtest.cpp
+++ b/storage/src/tests/distributor/updateoperationtest.cpp
@@ -66,9 +66,8 @@ UpdateOperationTest::sendUpdate(const std::string& bucketState, bool create_if_m
auto msg = std::make_shared<api::UpdateCommand>(makeDocumentBucket(document::BucketId(0)), update, 100);
- auto& comp = distributor_component();
return std::make_shared<UpdateOperation>(
- comp, comp, getDistributorBucketSpace(), msg, std::vector<BucketDatabase::Entry>(),
+ node_context(), operation_context(), getDistributorBucketSpace(), msg, std::vector<BucketDatabase::Entry>(),
getDistributor().getMetrics().updates);
}
diff --git a/storage/src/tests/distributor/visitoroperationtest.cpp b/storage/src/tests/distributor/visitoroperationtest.cpp
index ccbb64e8970..011bf0e81fd 100644
--- a/storage/src/tests/distributor/visitoroperationtest.cpp
+++ b/storage/src/tests/distributor/visitoroperationtest.cpp
@@ -104,8 +104,8 @@ struct VisitorOperationTest : Test, DistributorTestUtil {
const VisitorOperation::Config& config)
{
return std::make_unique<VisitorOperation>(
- distributor_component(),
- distributor_component(),
+ node_context(),
+ operation_context(),
getDistributorBucketSpace(),
msg,
config,
@@ -835,7 +835,7 @@ TEST_F(VisitorOperationTest, inconsistency_handling) {
TEST_F(VisitorOperationTest, visit_ideal_node) {
ClusterState state("distributor:1 storage:3");
- _distributor->enableClusterStateBundle(lib::ClusterStateBundle(state));
+ enable_distributor_cluster_state(lib::ClusterStateBundle(state));
// Create buckets in bucketdb
for (int i=0; i<32; i++ ) {
diff --git a/storage/src/vespa/storage/common/distributorcomponent.h b/storage/src/vespa/storage/common/distributorcomponent.h
index d5eb3fa56c8..403ffa3376c 100644
--- a/storage/src/vespa/storage/common/distributorcomponent.h
+++ b/storage/src/vespa/storage/common/distributorcomponent.h
@@ -46,7 +46,7 @@ typedef vespa::config::content::core::internal::InternalStorVisitordispatcherTyp
struct UniqueTimeCalculator {
virtual ~UniqueTimeCalculator() {}
- virtual api::Timestamp getUniqueTimestamp() = 0;
+ [[nodiscard]] virtual api::Timestamp generate_unique_timestamp() = 0;
};
struct DistributorManagedComponent
@@ -90,8 +90,8 @@ public:
DistributorComponent(DistributorComponentRegister& compReg, vespalib::stringref name);
~DistributorComponent() override;
- api::Timestamp getUniqueTimestamp() const {
- return _timeCalculator->getUniqueTimestamp();
+ [[nodiscard]] api::Timestamp getUniqueTimestamp() const {
+ return _timeCalculator->generate_unique_timestamp();
}
const DistributorConfig& getDistributorConfig() const {
return _distributorConfig;
diff --git a/storage/src/vespa/storage/distributor/CMakeLists.txt b/storage/src/vespa/storage/distributor/CMakeLists.txt
index 57d6a23c79f..7b048e9f109 100644
--- a/storage/src/vespa/storage/distributor/CMakeLists.txt
+++ b/storage/src/vespa/storage/distributor/CMakeLists.txt
@@ -6,6 +6,7 @@ vespa_add_library(storage_distributor
bucket_db_prune_elision.cpp
bucket_space_distribution_configs.cpp
bucket_space_distribution_context.cpp
+ bucket_space_state_map.cpp
bucketdbupdater.cpp
bucketgctimecalculator.cpp
bucketlistmerger.cpp
@@ -14,18 +15,21 @@ vespa_add_library(storage_distributor
distributor.cpp
distributor_bucket_space.cpp
distributor_bucket_space_repo.cpp
+ distributor_component.cpp
distributor_host_info_reporter.cpp
distributor_status.cpp
distributor_stripe.cpp
distributor_stripe_component.cpp
+ distributor_stripe_pool.cpp
+ distributor_stripe_thread.cpp
distributormessagesender.cpp
distributormetricsset.cpp
externaloperationhandler.cpp
ideal_service_layer_nodes_bundle.cpp
idealstatemanager.cpp
idealstatemetricsset.cpp
- legacy_single_stripe_accessor.cpp
messagetracker.cpp
+ multi_threaded_stripe_access_guard.cpp
nodeinfo.cpp
operation_routing_snapshot.cpp
operation_sequencer.cpp
diff --git a/storage/src/vespa/storage/distributor/bucket_space_state_map.cpp b/storage/src/vespa/storage/distributor/bucket_space_state_map.cpp
new file mode 100644
index 00000000000..63c408f7e1e
--- /dev/null
+++ b/storage/src/vespa/storage/distributor/bucket_space_state_map.cpp
@@ -0,0 +1,67 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "bucket_space_state_map.h"
+#include <vespa/document/bucket/fixed_bucket_spaces.h>
+
+using document::BucketSpace;
+
+namespace storage::distributor {
+
+BucketSpaceState::BucketSpaceState()
+ : _cluster_state(),
+ _distribution()
+{
+}
+
+void
+BucketSpaceState::set_cluster_state(std::shared_ptr<const lib::ClusterState> cluster_state)
+{
+ _cluster_state = std::move(cluster_state);
+}
+
+void
+BucketSpaceState::set_distribution(std::shared_ptr<const lib::Distribution> distribution)
+{
+ _distribution = distribution;
+}
+
+BucketSpaceStateMap::BucketSpaceStateMap()
+ : _map()
+{
+ _map.emplace(document::FixedBucketSpaces::default_space(), std::make_unique<BucketSpaceState>());
+ _map.emplace(document::FixedBucketSpaces::global_space(), std::make_unique<BucketSpaceState>());
+}
+
+void
+BucketSpaceStateMap::set_cluster_state(std::shared_ptr<const lib::ClusterState> cluster_state)
+{
+ for (auto& space : _map) {
+ space.second->set_cluster_state(cluster_state);
+ }
+}
+
+void
+BucketSpaceStateMap::set_distribution(std::shared_ptr<const lib::Distribution> distribution)
+{
+ for (auto& space : _map) {
+ space.second->set_distribution(distribution);
+ }
+}
+
+const lib::ClusterState&
+BucketSpaceStateMap::get_cluster_state(document::BucketSpace space) const
+{
+ auto itr = _map.find(space);
+ assert(itr != _map.end());
+ return itr->second->get_cluster_state();
+}
+
+const lib::Distribution&
+BucketSpaceStateMap::get_distribution(document::BucketSpace space) const
+{
+ auto itr = _map.find(space);
+ assert(itr != _map.end());
+ return itr->second->get_distribution();
+}
+
+}
diff --git a/storage/src/vespa/storage/distributor/bucket_space_state_map.h b/storage/src/vespa/storage/distributor/bucket_space_state_map.h
new file mode 100644
index 00000000000..57eac9eac0d
--- /dev/null
+++ b/storage/src/vespa/storage/distributor/bucket_space_state_map.h
@@ -0,0 +1,74 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#pragma once
+
+#include <vespa/document/bucket/bucketspace.h>
+#include <vespa/persistence/spi/clusterstate.h>
+#include <vespa/vdslib/distribution/distribution.h>
+#include <cassert>
+#include <unordered_map>
+
+namespace storage::lib {
+ class ClusterState;
+ class Distribution;
+}
+
+namespace storage::distributor {
+
+/**
+ * Represents cluster state and distribution for a given bucket space.
+ * TODO STRIPE: Make DistributorBucketSpace inherit this class.
+ */
+class BucketSpaceState {
+private:
+ std::shared_ptr<const lib::ClusterState> _cluster_state;
+ std::shared_ptr<const lib::Distribution> _distribution;
+
+public:
+ explicit BucketSpaceState();
+
+ BucketSpaceState(const BucketSpaceState&) = delete;
+ BucketSpaceState& operator=(const BucketSpaceState&) = delete;
+ BucketSpaceState(BucketSpaceState&&) = delete;
+ BucketSpaceState& operator=(BucketSpaceState&&) = delete;
+
+ void set_cluster_state(std::shared_ptr<const lib::ClusterState> cluster_state);
+ void set_distribution(std::shared_ptr<const lib::Distribution> distribution);
+
+ const lib::ClusterState& get_cluster_state() const noexcept {
+ assert(_cluster_state);
+ return *_cluster_state;
+ }
+ const lib::Distribution& get_distribution() const noexcept {
+ assert(_distribution);
+ return *_distribution;
+ }
+};
+
+/**
+ * Provides mapping from bucket space to state for that space.
+ */
+class BucketSpaceStateMap {
+private:
+ using StateMap = std::unordered_map<document::BucketSpace, std::unique_ptr<BucketSpaceState>, document::BucketSpace::hash>;
+
+ StateMap _map;
+
+public:
+ explicit BucketSpaceStateMap();
+
+ BucketSpaceStateMap(const BucketSpaceStateMap&&) = delete;
+ BucketSpaceStateMap& operator=(const BucketSpaceStateMap&) = delete;
+ BucketSpaceStateMap(BucketSpaceStateMap&&) = delete;
+ BucketSpaceStateMap& operator=(BucketSpaceStateMap&&) = delete;
+
+ StateMap::const_iterator begin() const { return _map.begin(); }
+ StateMap::const_iterator end() const { return _map.end(); }
+
+ void set_cluster_state(std::shared_ptr<const lib::ClusterState> cluster_state);
+ void set_distribution(std::shared_ptr<const lib::Distribution> distribution);
+
+ const lib::ClusterState& get_cluster_state(document::BucketSpace space) const;
+ const lib::Distribution& get_distribution(document::BucketSpace space) const;
+};
+
+}
diff --git a/storage/src/vespa/storage/distributor/bucketdbupdater.cpp b/storage/src/vespa/storage/distributor/bucketdbupdater.cpp
index 90d3d24c240..e407d57fd43 100644
--- a/storage/src/vespa/storage/distributor/bucketdbupdater.cpp
+++ b/storage/src/vespa/storage/distributor/bucketdbupdater.cpp
@@ -27,36 +27,36 @@ using document::BucketSpace;
namespace storage::distributor {
-BucketDBUpdater::BucketDBUpdater(DistributorStripeInterface& owner, // FIXME STRIPE!
- DistributorMessageSender& sender,
- DistributorComponentRegister& comp_reg,
+BucketDBUpdater::BucketDBUpdater(const DistributorNodeContext& node_ctx,
+ DistributorOperationContext& op_ctx,
+ DistributorInterface& distributor_interface,
+ ChainedMessageSender& chained_sender,
+ std::shared_ptr<const lib::Distribution> bootstrap_distribution,
StripeAccessor& stripe_accessor)
- : framework::StatusReporter("temp_bucketdb", "Bucket DB Updater"), // TODO STRIPE rename once duplication is removed
+ : framework::StatusReporter("bucketdb", "Bucket DB Updater"),
_stripe_accessor(stripe_accessor),
_active_state_bundle(lib::ClusterState()),
- _dummy_mutable_bucket_space_repo(std::make_unique<DistributorBucketSpaceRepo>(owner.getDistributorIndex())),
- _dummy_read_only_bucket_space_repo(std::make_unique<DistributorBucketSpaceRepo>(owner.getDistributorIndex())),
- _distributor_component(owner, *_dummy_mutable_bucket_space_repo, *_dummy_read_only_bucket_space_repo, comp_reg, "Bucket DB Updater"),
- _node_ctx(_distributor_component),
- _op_ctx(_distributor_component),
- _distributor_interface(_distributor_component.getDistributor()),
+ _node_ctx(node_ctx),
+ _op_ctx(op_ctx),
+ _distributor_interface(distributor_interface),
_pending_cluster_state(),
_history(),
- _sender(sender),
+ _sender(distributor_interface),
+ _chained_sender(chained_sender),
_outdated_nodes_map(),
_transition_timer(_node_ctx.clock()),
_stale_reads_enabled(false)
{
// FIXME STRIPE top-level Distributor needs a proper way to track the current cluster state bundle!
propagate_active_state_bundle_internally();
- bootstrap_distribution_config(_distributor_component.getDistribution());
+ bootstrap_distribution_config(bootstrap_distribution);
}
BucketDBUpdater::~BucketDBUpdater() = default;
void
BucketDBUpdater::propagate_active_state_bundle_internally() {
- for (auto* repo : {_dummy_mutable_bucket_space_repo.get(), _dummy_read_only_bucket_space_repo.get()}) {
+ for (auto* repo : {&_op_ctx.bucket_space_repo(), &_op_ctx.read_only_bucket_space_repo()}) {
for (auto& iter : *repo) {
iter.second->setClusterState(_active_state_bundle.getDerivedClusterState(iter.first));
}
@@ -66,7 +66,7 @@ BucketDBUpdater::propagate_active_state_bundle_internally() {
void
BucketDBUpdater::bootstrap_distribution_config(std::shared_ptr<const lib::Distribution> distribution) {
auto global_distr = GlobalBucketSpaceDistributionConverter::convert_to_global(*distribution);
- for (auto* repo : {_dummy_mutable_bucket_space_repo.get(), _dummy_read_only_bucket_space_repo.get()}) {
+ for (auto* repo : {&_op_ctx.bucket_space_repo(), &_op_ctx.read_only_bucket_space_repo()}) {
repo->get(document::FixedBucketSpaces::default_space()).setDistribution(distribution);
repo->get(document::FixedBucketSpaces::global_space()).setDistribution(global_distr);
}
@@ -74,6 +74,18 @@ BucketDBUpdater::bootstrap_distribution_config(std::shared_ptr<const lib::Distri
// ... need to take a guard if so, so can probably not be done at ctor time..?
}
+void
+BucketDBUpdater::propagate_distribution_config(const BucketSpaceDistributionConfigs& configs) {
+ for (auto* repo : {&_op_ctx.bucket_space_repo(), &_op_ctx.read_only_bucket_space_repo()}) {
+ if (auto distr = configs.get_or_nullptr(document::FixedBucketSpaces::default_space())) {
+ repo->get(document::FixedBucketSpaces::default_space()).setDistribution(distr);
+ }
+ if (auto distr = configs.get_or_nullptr(document::FixedBucketSpaces::global_space())) {
+ repo->get(document::FixedBucketSpaces::global_space()).setDistribution(distr);
+ }
+ }
+}
+
// FIXME what about bucket DB replica update timestamp allocations?! Replace with u64 counter..?
// Must at the very least ensure we use stripe-local TS generation for DB inserts...! i.e. no global TS
// Or do we have to touch these at all here? Just defer all this via stripe interface?
@@ -108,7 +120,7 @@ BucketDBUpdater::remove_superfluous_buckets(
const lib::ClusterStateBundle& new_state,
bool is_distribution_config_change)
{
- const char* up_states = _op_ctx.storage_node_up_states();
+ const char* up_states = storage_node_up_states();
// TODO STRIPE explicit space -> config mapping, don't get via repo
// ... but we need to get the current cluster state per space..!
for (auto& elem : _op_ctx.bucket_space_repo()) {
@@ -172,24 +184,25 @@ BucketDBUpdater::ensure_transition_timer_started()
void
BucketDBUpdater::complete_transition_timer()
{
- _distributor_interface.getMetrics()
+ _distributor_interface.metrics()
.stateTransitionTime.addValue(_transition_timer.getElapsedTimeAsDouble());
}
void
BucketDBUpdater::storage_distribution_changed(const BucketSpaceDistributionConfigs& configs)
{
+ propagate_distribution_config(configs);
ensure_transition_timer_started();
auto guard = _stripe_accessor.rendezvous_and_hold_all();
// FIXME STRIPE might this cause a mismatch with the component stuff's own distribution config..?!
guard->update_distribution_config(configs);
- remove_superfluous_buckets(*guard, _op_ctx.cluster_state_bundle(), true);
+ remove_superfluous_buckets(*guard, _active_state_bundle, true);
auto clusterInfo = std::make_shared<const SimpleClusterInformation>(
_node_ctx.node_index(),
- _op_ctx.cluster_state_bundle(),
- _op_ctx.storage_node_up_states());
+ _active_state_bundle,
+ storage_node_up_states());
_pending_cluster_state = PendingClusterState::createForDistributionChange(
_node_ctx.clock(),
std::move(clusterInfo),
@@ -205,7 +218,7 @@ void
BucketDBUpdater::reply_to_previous_pending_cluster_state_if_any()
{
if (_pending_cluster_state.get() && _pending_cluster_state->hasCommand()) {
- _distributor_interface.getMessageSender().sendUp(
+ _chained_sender.sendUp(
std::make_shared<api::SetSystemStateReply>(*_pending_cluster_state->getCommand()));
}
}
@@ -217,7 +230,7 @@ BucketDBUpdater::reply_to_activation_with_actual_version(
{
auto reply = std::make_shared<api::ActivateClusterStateVersionReply>(cmd);
reply->setActualVersion(actualVersion);
- _distributor_interface.getMessageSender().sendUp(reply); // TODO let API accept rvalues
+ _chained_sender.sendUp(reply); // TODO let API accept rvalues
}
bool
@@ -245,8 +258,8 @@ BucketDBUpdater::onSetSystemState(
auto clusterInfo = std::make_shared<const SimpleClusterInformation>(
_node_ctx.node_index(),
- _op_ctx.cluster_state_bundle(),
- _op_ctx.storage_node_up_states());
+ _active_state_bundle,
+ storage_node_up_states());
_pending_cluster_state = PendingClusterState::createForClusterStateChange(
_node_ctx.clock(),
std::move(clusterInfo),
@@ -257,7 +270,7 @@ BucketDBUpdater::onSetSystemState(
_op_ctx.generate_unique_timestamp()); // FIXME STRIPE must be atomic across all threads
_outdated_nodes_map = _pending_cluster_state->getOutdatedNodesMap();
- _distributor_interface.getMetrics().set_cluster_state_processing_time.addValue(
+ _distributor_interface.metrics().set_cluster_state_processing_time.addValue(
process_timer.getElapsedTimeAsDouble());
guard->set_pending_cluster_state_bundle(_pending_cluster_state->getNewClusterStateBundle());
@@ -358,7 +371,7 @@ BucketDBUpdater::process_completed_pending_cluster_state(StripeAccessGuard& guar
// taken effect via activation. External operation handler will keep operations from
// actually being scheduled until state has been activated. The external operation handler
// needs to be explicitly aware of the case where no state has yet to be activated.
- _distributor_interface.getMessageSender().sendDown(_pending_cluster_state->getCommand());
+ _chained_sender.sendDown(_pending_cluster_state->getCommand());
_pending_cluster_state->clearCommand();
return;
}
@@ -379,14 +392,13 @@ BucketDBUpdater::activate_pending_cluster_state(StripeAccessGuard& guard)
LOG(debug, "Activating pending cluster state version %u", _pending_cluster_state->clusterStateVersion());
enable_current_cluster_state_bundle_in_distributor_and_stripes(guard);
if (_pending_cluster_state->hasCommand()) {
- _distributor_interface.getMessageSender().sendDown(_pending_cluster_state->getCommand());
+ _chained_sender.sendDown(_pending_cluster_state->getCommand());
}
add_current_state_to_cluster_state_history();
} else {
LOG(debug, "Activating pending distribution config");
// TODO distribution changes cannot currently be deferred as they are not
// initiated by the cluster controller!
- _distributor_interface.notifyDistributionChangeEnabled(); // TODO factor these two out into one func?
guard.notify_distribution_change_enabled();
}
@@ -397,7 +409,7 @@ BucketDBUpdater::activate_pending_cluster_state(StripeAccessGuard& guard)
complete_transition_timer();
guard.clear_read_only_bucket_repo_databases();
- _distributor_interface.getMetrics().activate_cluster_state_processing_time.addValue(
+ _distributor_interface.metrics().activate_cluster_state_processing_time.addValue(
process_timer.getElapsedTimeAsDouble());
}
@@ -412,16 +424,11 @@ BucketDBUpdater::enable_current_cluster_state_bundle_in_distributor_and_stripes(
LOG(debug, "BucketDBUpdater finished processing state %s",
state.getBaselineClusterState()->toString().c_str());
- // First enable the cluster state for the _top-level_ distributor component.
- _distributor_interface.enableClusterStateBundle(state);
- // And then subsequently for all underlying stripes. Technically the order doesn't matter
- // since all threads are blocked at this point.
guard.enable_cluster_state_bundle(state);
}
void BucketDBUpdater::simulate_cluster_state_bundle_activation(const lib::ClusterStateBundle& activated_state) {
auto guard = _stripe_accessor.rendezvous_and_hold_all();
- _distributor_interface.enableClusterStateBundle(activated_state);
guard->enable_cluster_state_bundle(activated_state);
_active_state_bundle = activated_state;
@@ -476,7 +483,7 @@ BucketDBUpdater::report_xml_status(vespalib::xml::XmlOutputStream& xos,
using namespace vespalib::xml;
xos << XmlTag("bucketdb")
<< XmlTag("systemstate_active")
- << XmlContent(_op_ctx.cluster_state_bundle().getBaselineClusterState()->toString())
+ << XmlContent(_active_state_bundle.getBaselineClusterState()->toString())
<< XmlEndTag();
if (_pending_cluster_state) {
xos << *_pending_cluster_state;
@@ -489,6 +496,13 @@ BucketDBUpdater::report_xml_status(vespalib::xml::XmlOutputStream& xos,
<< XmlAttribute("processingtime", i->_processingTime)
<< XmlEndTag();
}
+ xos << XmlEndTag()
+ << XmlTag("single_bucket_requests");
+ auto guard = _stripe_accessor.rendezvous_and_hold_all();
+ guard->report_single_bucket_requests(xos);
+ xos << XmlEndTag()
+ << XmlTag("delayed_single_bucket_requests");
+ guard->report_delayed_single_bucket_requests(xos);
xos << XmlEndTag() << XmlEndTag();
return "";
}
diff --git a/storage/src/vespa/storage/distributor/bucketdbupdater.h b/storage/src/vespa/storage/distributor/bucketdbupdater.h
index 04962e3af9b..1ec48aa4d63 100644
--- a/storage/src/vespa/storage/distributor/bucketdbupdater.h
+++ b/storage/src/vespa/storage/distributor/bucketdbupdater.h
@@ -28,7 +28,7 @@ namespace storage::distributor {
struct BucketSpaceDistributionConfigs;
class BucketSpaceDistributionContext;
-class DistributorStripeInterface;
+class DistributorInterface;
class StripeAccessor;
class StripeAccessGuard;
@@ -37,9 +37,11 @@ class BucketDBUpdater : public framework::StatusReporter,
{
public:
using OutdatedNodesMap = dbtransition::OutdatedNodesMap;
- BucketDBUpdater(DistributorStripeInterface& owner,
- DistributorMessageSender& sender,
- DistributorComponentRegister& comp_reg,
+ BucketDBUpdater(const DistributorNodeContext& node_ctx,
+ DistributorOperationContext& op_ctx,
+ DistributorInterface& distributor_interface,
+ ChainedMessageSender& chained_sender,
+ std::shared_ptr<const lib::Distribution> bootstrap_distribution,
StripeAccessor& stripe_accessor);
~BucketDBUpdater() override;
@@ -55,12 +57,11 @@ public:
void resend_delayed_messages();
void storage_distribution_changed(const BucketSpaceDistributionConfigs& configs);
void bootstrap_distribution_config(std::shared_ptr<const lib::Distribution>);
+ void propagate_distribution_config(const BucketSpaceDistributionConfigs& configs);
vespalib::string report_xml_status(vespalib::xml::XmlOutputStream& xos, const framework::HttpUrlPath&) const;
void print(std::ostream& out, bool verbose, const std::string& indent) const;
- const DistributorNodeContext& node_context() const { return _node_ctx; }
- DistributorStripeOperationContext& operation_context() { return _op_ctx; }
void set_stale_reads_enabled(bool enabled) noexcept {
_stale_reads_enabled.store(enabled, std::memory_order_relaxed);
@@ -106,16 +107,14 @@ private:
// TODO STRIPE remove once distributor component dependencies have been pruned
StripeAccessor& _stripe_accessor;
lib::ClusterStateBundle _active_state_bundle;
- std::unique_ptr<DistributorBucketSpaceRepo> _dummy_mutable_bucket_space_repo;
- std::unique_ptr<DistributorBucketSpaceRepo> _dummy_read_only_bucket_space_repo;
- DistributorStripeComponent _distributor_component;
const DistributorNodeContext& _node_ctx;
- DistributorStripeOperationContext& _op_ctx;
- DistributorStripeInterface& _distributor_interface;
+ DistributorOperationContext& _op_ctx;
+ DistributorInterface& _distributor_interface;
std::unique_ptr<PendingClusterState> _pending_cluster_state;
std::list<PendingClusterState::Summary> _history;
DistributorMessageSender& _sender;
+ ChainedMessageSender& _chained_sender;
OutdatedNodesMap _outdated_nodes_map;
framework::MilliSecTimer _transition_timer;
std::atomic<bool> _stale_reads_enabled;
diff --git a/storage/src/vespa/storage/distributor/distributor.cpp b/storage/src/vespa/storage/distributor/distributor.cpp
index 8dd414e8def..47f7fee5873 100644
--- a/storage/src/vespa/storage/distributor/distributor.cpp
+++ b/storage/src/vespa/storage/distributor/distributor.cpp
@@ -7,9 +7,11 @@
#include "distributor_bucket_space.h"
#include "distributor_status.h"
#include "distributor_stripe.h"
+#include "distributor_stripe_pool.h"
+#include "distributor_stripe_thread.h"
#include "distributormetricsset.h"
#include "idealstatemetricsset.h"
-#include "legacy_single_stripe_accessor.h"
+#include "multi_threaded_stripe_access_guard.h"
#include "operation_sequencer.h"
#include "ownership_transfer_safe_time_point_calculator.h"
#include "throttlingoperationstarter.h"
@@ -18,10 +20,12 @@
#include <vespa/storage/common/hostreporter/hostinfo.h>
#include <vespa/storage/common/node_identity.h>
#include <vespa/storage/common/nodestateupdater.h>
+#include <vespa/storage/config/distributorconfiguration.h>
#include <vespa/storage/distributor/maintenance/simplebucketprioritydatabase.h>
#include <vespa/storageframework/generic/status/xmlstatusreporter.h>
#include <vespa/vdslib/distribution/distribution.h>
#include <vespa/vespalib/util/memoryusage.h>
+#include <vespa/vespalib/util/time.h>
#include <algorithm>
#include <vespa/log/log.h>
@@ -44,6 +48,7 @@ namespace storage::distributor {
Distributor::Distributor(DistributorComponentRegister& compReg,
const NodeIdentity& node_identity,
framework::TickingThreadPool& threadPool,
+ DistributorStripePool& stripe_pool,
DoneInitializeHandler& doneInitHandler,
uint32_t num_distributor_stripes,
HostInfo& hostInfoReporterRegistrar,
@@ -53,13 +58,26 @@ Distributor::Distributor(DistributorComponentRegister& compReg,
_comp_reg(compReg),
_metrics(std::make_shared<DistributorMetricSet>()),
_messageSender(messageSender),
+ _use_legacy_mode(num_distributor_stripes == 0),
_stripe(std::make_unique<DistributorStripe>(compReg, *_metrics, node_identity, threadPool,
- doneInitHandler, *this, (num_distributor_stripes == 0))),
- _stripe_accessor(std::make_unique<LegacySingleStripeAccessor>(*_stripe)),
- _component(compReg, "distributor"),
+ doneInitHandler, *this, *this, _use_legacy_mode)),
+ _stripe_pool(stripe_pool),
+ _stripes(),
+ _stripe_accessor(),
+ _message_queue(),
+ _fetched_messages(),
+ _component(*this, compReg, "distributor"),
+ _total_config(_component.total_distributor_config_sp()),
_bucket_db_updater(),
_distributorStatusDelegate(compReg, *this, *this),
+ _bucket_db_status_delegate(),
_threadPool(threadPool),
+ _status_to_do(),
+ _fetched_status_requests(),
+ _stripe_scan_notify_mutex(),
+ _stripe_scan_stats(),
+ _last_host_info_send_time(),
+ _host_info_send_delay(1000ms),
_tickResult(framework::ThreadWaitInfo::NO_MORE_CRITICAL_WORK_KNOWN),
_metricUpdateHook(*this),
_hostInfoReporter(*this, *this),
@@ -69,13 +87,20 @@ Distributor::Distributor(DistributorComponentRegister& compReg,
{
_component.registerMetric(*_metrics);
_component.registerMetricUpdateHook(_metricUpdateHook, framework::SecondTime(0));
- if (num_distributor_stripes > 0) {
+ if (!_use_legacy_mode) {
LOG(info, "Setting up distributor with %u stripes", num_distributor_stripes); // TODO STRIPE remove once legacy gone
- // FIXME STRIPE using the singular stripe here is a temporary Hack McHack Deluxe 3000!
- _bucket_db_updater = std::make_unique<BucketDBUpdater>(*_stripe, *_stripe, _comp_reg, *_stripe_accessor);
+ _stripe_accessor = std::make_unique<MultiThreadedStripeAccessor>(_stripe_pool);
+ _bucket_db_updater = std::make_unique<BucketDBUpdater>(_component, _component,
+ *this, *this,
+ _component.getDistribution(),
+ *_stripe_accessor);
+ _stripes.emplace_back(std::move(_stripe));
+ _stripe_scan_stats.resize(num_distributor_stripes);
+ _distributorStatusDelegate.registerStatusPage();
+ _bucket_db_status_delegate = std::make_unique<StatusReporterDelegate>(compReg, *this, *_bucket_db_updater);
+ _bucket_db_status_delegate->registerStatusPage();
}
- _hostInfoReporter.enableReporting(getConfig().getEnableHostInfoReporting());
- _distributorStatusDelegate.registerStatusPage();
+ _hostInfoReporter.enableReporting(config().getEnableHostInfoReporting());
hostInfoReporterRegistrar.registerReporter(&_hostInfoReporter);
propagateDefaultDistribution(_component.getDistribution());
};
@@ -86,43 +111,68 @@ Distributor::~Distributor()
closeNextLink();
}
+// TODO STRIPE remove
+DistributorStripe&
+Distributor::first_stripe() noexcept {
+ assert(_stripes.size() == 1);
+ return *_stripes[0];
+}
+
+// TODO STRIPE remove
+const DistributorStripe&
+Distributor::first_stripe() const noexcept {
+ assert(_stripes.size() == 1);
+ return *_stripes[0];
+}
+
+// TODO STRIPE figure out how to handle inspection functions used by tests when legacy mode no longer exists.
+// All functions below that assert on _use_legacy_mode are only currently used by tests
+
bool
Distributor::isInRecoveryMode() const noexcept {
+ assert(_use_legacy_mode); // TODO STRIPE
return _stripe->isInRecoveryMode();
}
const PendingMessageTracker&
Distributor::getPendingMessageTracker() const {
+ assert(_use_legacy_mode); // TODO STRIPE
return _stripe->getPendingMessageTracker();
}
PendingMessageTracker&
Distributor::getPendingMessageTracker() {
+ assert(_use_legacy_mode); // TODO STRIPE
return _stripe->getPendingMessageTracker();
}
DistributorBucketSpaceRepo&
Distributor::getBucketSpaceRepo() noexcept {
+ assert(_use_legacy_mode); // TODO STRIPE
return _stripe->getBucketSpaceRepo();
}
const DistributorBucketSpaceRepo&
Distributor::getBucketSpaceRepo() const noexcept {
+ assert(_use_legacy_mode); // TODO STRIPE
return _stripe->getBucketSpaceRepo();
}
DistributorBucketSpaceRepo&
Distributor::getReadOnlyBucketSpaceRepo() noexcept {
+ assert(_use_legacy_mode); // TODO STRIPE
return _stripe->getReadOnlyBucketSpaceRepo();
}
const DistributorBucketSpaceRepo&
Distributor::getReadyOnlyBucketSpaceRepo() const noexcept {
+ assert(_use_legacy_mode); // TODO STRIPE
return _stripe->getReadOnlyBucketSpaceRepo();;
}
storage::distributor::DistributorStripeComponent&
Distributor::distributor_component() noexcept {
+ assert(_use_legacy_mode); // TODO STRIPE
// TODO STRIPE We need to grab the stripe's component since tests like to access
// these things uncomfortably directly.
return _stripe->_component;
@@ -130,46 +180,55 @@ Distributor::distributor_component() noexcept {
StripeBucketDBUpdater&
Distributor::bucket_db_updater() {
+ assert(_use_legacy_mode); // TODO STRIPE
return _stripe->bucket_db_updater();
}
const StripeBucketDBUpdater&
Distributor::bucket_db_updater() const {
+ assert(_use_legacy_mode); // TODO STRIPE
return _stripe->bucket_db_updater();
}
IdealStateManager&
Distributor::ideal_state_manager() {
+ assert(_use_legacy_mode); // TODO STRIPE
return _stripe->ideal_state_manager();
}
const IdealStateManager&
Distributor::ideal_state_manager() const {
+ assert(_use_legacy_mode); // TODO STRIPE
return _stripe->ideal_state_manager();
}
ExternalOperationHandler&
Distributor::external_operation_handler() {
+ assert(_use_legacy_mode); // TODO STRIPE
return _stripe->external_operation_handler();
}
const ExternalOperationHandler&
Distributor::external_operation_handler() const {
+ assert(_use_legacy_mode); // TODO STRIPE
return _stripe->external_operation_handler();
}
BucketDBMetricUpdater&
Distributor::bucket_db_metric_updater() const noexcept {
+ assert(_use_legacy_mode); // TODO STRIPE
return _stripe->_bucketDBMetricUpdater;
}
const DistributorConfiguration&
Distributor::getConfig() const {
+ assert(_use_legacy_mode); // TODO STRIPE
return _stripe->getConfig();
}
std::chrono::steady_clock::duration
Distributor::db_memory_sample_interval() const noexcept {
+ assert(_use_legacy_mode); // TODO STRIPE
return _stripe->db_memory_sample_interval();
}
@@ -192,6 +251,10 @@ Distributor::onOpen()
if (_component.getDistributorConfig().startDistributorThread) {
_threadPool.addThread(*this);
_threadPool.start(_component.getThreadPool());
+ if (!_use_legacy_mode) {
+ std::vector<TickableStripe*> pool_stripes({_stripes[0].get()});
+ _stripe_pool.start(pool_stripes);
+ }
} else {
LOG(warning, "Not starting distributor thread as it's configured to "
"run. Unless you are just running a test tool, this is a "
@@ -200,9 +263,22 @@ Distributor::onOpen()
}
void Distributor::onClose() {
+ // Note: In a running system this function is called by the main thread in StorageApp as part of shutdown.
+ // The distributor and stripe thread pools are already stopped at this point.
LOG(debug, "Distributor::onClose invoked");
- _stripe->flush_and_close();
- if (_bucket_db_updater) {
+ if (_use_legacy_mode) {
+ _stripe->flush_and_close();
+ } else {
+ // Tests may run with multiple stripes but without threads (for determinism's sake),
+ // so only try to flush stripes if a pool is running.
+ // TODO STRIPE probably also need to flush when running tests to handle any explicit close-tests.
+ if (_stripe_pool.stripe_count() > 0) {
+ assert(_stripe_pool.is_stopped());
+ for (auto& thread : _stripe_pool) {
+ thread->stripe().flush_and_close();
+ }
+ }
+ assert(_bucket_db_updater);
_bucket_db_updater->flush();
}
}
@@ -248,22 +324,31 @@ bool should_be_handled_by_top_level_bucket_db_updater(const api::StorageMessage&
bool
Distributor::onDown(const std::shared_ptr<api::StorageMessage>& msg)
{
- // FIXME STRIPE this MUST be in a separate thread to enforce processing in a single thread
- // regardless of what RPC thread (comm mgr, FRT...) this is called from!
- if (_bucket_db_updater && should_be_handled_by_top_level_bucket_db_updater(*msg)) {
- return msg->callHandler(*_bucket_db_updater, msg);
- }
// TODO STRIPE can we route both requests and responses that are BucketCommand|Reply based on their bucket alone?
// that covers most operations already...
- return _stripe->handle_or_enqueue_message(msg);
+ if (_use_legacy_mode) {
+ return _stripe->handle_or_enqueue_message(msg);
+ } else {
+ if (should_be_handled_by_top_level_bucket_db_updater(*msg)) {
+ dispatch_to_main_distributor_thread_queue(msg);
+ return true;
+ }
+ assert(_stripes.size() == 1);
+ assert(_stripe_pool.stripe_count() == 1);
+ // TODO STRIPE correct routing with multiple stripes
+ bool handled = first_stripe().handle_or_enqueue_message(msg);
+ if (handled) {
+ _stripe_pool.stripe_thread(0).notify_event_has_triggered();
+ }
+ return handled;
+ }
}
bool
Distributor::handleReply(const std::shared_ptr<api::StorageReply>& reply)
{
- if (_bucket_db_updater && should_be_handled_by_top_level_bucket_db_updater(*reply)) {
- return reply->callHandler(*_bucket_db_updater, reply);
- }
+ // TODO STRIPE this is used by tests. Do we need to invoke top-level BucketDBUpdater for any of them?
+ assert(_use_legacy_mode);
return _stripe->handleReply(reply);
}
@@ -271,12 +356,32 @@ Distributor::handleReply(const std::shared_ptr<api::StorageReply>& reply)
bool
Distributor::handleMessage(const std::shared_ptr<api::StorageMessage>& msg)
{
+ assert(_use_legacy_mode); // TODO STRIPE
return _stripe->handleMessage(msg);
}
+const DistributorConfiguration&
+Distributor::config() const
+{
+ return *_total_config;
+}
+
+void
+Distributor::sendCommand(const std::shared_ptr<api::StorageCommand>& cmd)
+{
+ sendUp(cmd);
+}
+
+void
+Distributor::sendReply(const std::shared_ptr<api::StorageReply>& reply)
+{
+ sendUp(reply);
+}
+
const lib::ClusterStateBundle&
Distributor::getClusterStateBundle() const
{
+ assert(_use_legacy_mode); // TODO STRIPE
// TODO STRIPE must offer a single unifying state across stripes
return _stripe->getClusterStateBundle();
}
@@ -284,6 +389,7 @@ Distributor::getClusterStateBundle() const
void
Distributor::enableClusterStateBundle(const lib::ClusterStateBundle& state)
{
+ assert(_use_legacy_mode); // TODO STRIPE
// TODO STRIPE make test injection/force-function
_stripe->enableClusterStateBundle(state);
}
@@ -291,7 +397,7 @@ Distributor::enableClusterStateBundle(const lib::ClusterStateBundle& state)
void
Distributor::storageDistributionChanged()
{
- if (_bucket_db_updater) {
+ if (!_use_legacy_mode) {
if (!_distribution || (*_component.getDistribution() != *_distribution)) {
LOG(debug, "Distribution changed to %s, must re-fetch bucket information",
_component.getDistribution()->toString().c_str());
@@ -310,7 +416,7 @@ Distributor::storageDistributionChanged()
void
Distributor::enableNextDistribution()
{
- if (_bucket_db_updater) {
+ if (!_use_legacy_mode) {
if (_next_distribution) {
_distribution = _next_distribution;
_next_distribution = std::shared_ptr<lib::Distribution>();
@@ -328,113 +434,314 @@ void
Distributor::propagateDefaultDistribution(
std::shared_ptr<const lib::Distribution> distribution)
{
- // TODO STRIPE top-level bucket DB updater
- _stripe->propagateDefaultDistribution(std::move(distribution));
+ // TODO STRIPE cannot directly access stripe when not in legacy mode!
+ if (_use_legacy_mode) {
+ _stripe->propagateDefaultDistribution(std::move(distribution));
+ } else {
+ // Should only be called at ctor time, at which point the pool is not yet running.
+ assert(_stripe_pool.stripe_count() == 0);
+ assert(_stripes.size() == 1); // TODO STRIPE all the stripes yes
+ auto new_configs = BucketSpaceDistributionConfigs::from_default_distribution(std::move(distribution));
+ for (auto& stripe : _stripes) {
+ stripe->update_distribution_config(new_configs);
+ }
+ }
}
std::unordered_map<uint16_t, uint32_t>
Distributor::getMinReplica() const
{
// TODO STRIPE merged snapshot from all stripes
- return _stripe->getMinReplica();
+ if (_use_legacy_mode) {
+ return _stripe->getMinReplica();
+ } else {
+ return first_stripe().getMinReplica();
+ }
}
BucketSpacesStatsProvider::PerNodeBucketSpacesStats
Distributor::getBucketSpacesStats() const
{
// TODO STRIPE merged snapshot from all stripes
- return _stripe->getBucketSpacesStats();
+ if (_use_legacy_mode) {
+ return _stripe->getBucketSpacesStats();
+ } else {
+ return first_stripe().getBucketSpacesStats();
+ }
}
SimpleMaintenanceScanner::PendingMaintenanceStats
Distributor::pending_maintenance_stats() const {
// TODO STRIPE merged snapshot from all stripes
- return _stripe->pending_maintenance_stats();
+ if (_use_legacy_mode) {
+ return _stripe->pending_maintenance_stats();
+ } else {
+ return first_stripe().pending_maintenance_stats();
+ }
}
void
Distributor::propagateInternalScanMetricsToExternal()
{
- _stripe->propagateInternalScanMetricsToExternal();
+ // TODO STRIPE propagate to all stripes
+ // TODO STRIPE reconsider metric wiring...
+ if (_use_legacy_mode) {
+ _stripe->propagateInternalScanMetricsToExternal();
+ } else {
+ first_stripe().propagateInternalScanMetricsToExternal();
+ }
}
void
Distributor::scanAllBuckets()
{
+ assert(_use_legacy_mode); // TODO STRIPE
_stripe->scanAllBuckets();
}
+void
+Distributor::dispatch_to_main_distributor_thread_queue(const std::shared_ptr<api::StorageMessage>& msg)
+{
+ MBUS_TRACE(msg->getTrace(), 9, "Distributor: Added to main thread message queue");
+ framework::TickingLockGuard guard(_threadPool.freezeCriticalTicks());
+ _message_queue.emplace_back(msg);
+ guard.broadcast();
+}
+
+void
+Distributor::fetch_external_messages()
+{
+ assert(!_use_legacy_mode);
+ assert(_fetched_messages.empty());
+ _fetched_messages.swap(_message_queue);
+}
+
+void
+Distributor::process_fetched_external_messages()
+{
+ assert(!_use_legacy_mode);
+ for (auto& msg : _fetched_messages) {
+ MBUS_TRACE(msg->getTrace(), 9, "Distributor: Processing message in main thread");
+ if (!msg->callHandler(*_bucket_db_updater, msg)) {
+ MBUS_TRACE(msg->getTrace(), 9, "Distributor: Not handling it. Sending further down");
+ sendDown(msg);
+ }
+ }
+ if (!_fetched_messages.empty()) {
+ _fetched_messages.clear();
+ signal_work_was_done();
+ }
+}
+
framework::ThreadWaitInfo
Distributor::doCriticalTick(framework::ThreadIndex idx)
{
_tickResult = framework::ThreadWaitInfo::NO_MORE_CRITICAL_WORK_KNOWN;
- if (_bucket_db_updater) {
+ if (!_use_legacy_mode) {
enableNextDistribution();
+ fetch_status_requests();
+ fetch_external_messages();
}
// Propagates any new configs down to stripe(s)
enableNextConfig();
- _stripe->doCriticalTick(idx);
- _tickResult.merge(_stripe->_tickResult);
+ if (_use_legacy_mode) {
+ _stripe->doCriticalTick(idx);
+ _tickResult.merge(_stripe->_tickResult);
+ }
return _tickResult;
}
framework::ThreadWaitInfo
Distributor::doNonCriticalTick(framework::ThreadIndex idx)
{
- if (_bucket_db_updater) {
+ if (_use_legacy_mode) {
+ _stripe->doNonCriticalTick(idx);
+ _tickResult = _stripe->_tickResult;
+ } else {
+ _tickResult = framework::ThreadWaitInfo::NO_MORE_CRITICAL_WORK_KNOWN;
+ handle_status_requests();
+ process_fetched_external_messages();
+ send_host_info_if_appropriate();
_bucket_db_updater->resend_delayed_messages();
}
- // TODO STRIPE stripes need their own thread loops!
- _stripe->doNonCriticalTick(idx);
- _tickResult = _stripe->_tickResult;
return _tickResult;
}
void
-Distributor::enableNextConfig()
+Distributor::enableNextConfig() // TODO STRIPE rename to enable_next_config_if_changed()?
{
// Only lazily trigger a config propagation and internal update if something has _actually changed_.
if (_component.internal_config_generation() != _current_internal_config_generation) {
- if (_bucket_db_updater) {
+ _total_config = _component.total_distributor_config_sp();
+ if (!_use_legacy_mode) {
auto guard = _stripe_accessor->rendezvous_and_hold_all();
guard->update_total_distributor_config(_component.total_distributor_config_sp());
} else {
_stripe->update_total_distributor_config(_component.total_distributor_config_sp());
}
- _hostInfoReporter.enableReporting(getConfig().getEnableHostInfoReporting());
+ _hostInfoReporter.enableReporting(config().getEnableHostInfoReporting());
_current_internal_config_generation = _component.internal_config_generation();
}
- if (!_bucket_db_updater) {
+ if (_use_legacy_mode) {
// TODO STRIPE remove these once tests are fixed to trigger reconfig properly
_hostInfoReporter.enableReporting(getConfig().getEnableHostInfoReporting());
_stripe->enableNextConfig(); // TODO STRIPE avoid redundant call
}
}
+
+void
+Distributor::notify_stripe_wants_to_send_host_info(uint16_t stripe_index)
+{
+ LOG(debug, "Stripe %u has signalled an intent to send host info out-of-band", stripe_index);
+ std::lock_guard lock(_stripe_scan_notify_mutex);
+ assert(!_use_legacy_mode);
+ assert(stripe_index < _stripe_scan_stats.size());
+ auto& stats = _stripe_scan_stats[stripe_index];
+ stats.wants_to_send_host_info = true;
+ stats.has_reported_in_at_least_once = true;
+ // TODO STRIPE consider if we want to wake up distributor thread here. Will be rechecked
+ // every nth millisecond anyway. Not really an issue for out-of-band CC notifications.
+}
+
+bool
+Distributor::may_send_host_info_on_behalf_of_stripes([[maybe_unused]] std::lock_guard<std::mutex>& held_lock) noexcept
+{
+ bool any_stripe_wants_to_send = false;
+ for (const auto& stats : _stripe_scan_stats) {
+ if (!stats.has_reported_in_at_least_once) {
+ // If not all stripes have reported in at least once, they have not all completed their
+ // first recovery mode pass through their DBs. To avoid sending partial stats to the cluster
+ // controller, we wait with sending the first out-of-band host info reply until they have all
+ // reported in.
+ return false;
+ }
+ any_stripe_wants_to_send |= stats.wants_to_send_host_info;
+ }
+ return any_stripe_wants_to_send;
+}
+
+void
+Distributor::send_host_info_if_appropriate()
+{
+ const auto now = _component.getClock().getMonotonicTime();
+ std::lock_guard lock(_stripe_scan_notify_mutex);
+
+ if (may_send_host_info_on_behalf_of_stripes(lock)) {
+ if ((now - _last_host_info_send_time) >= _host_info_send_delay) {
+ LOG(debug, "Sending GetNodeState replies to cluster controllers on behalf of stripes");
+ _component.getStateUpdater().immediately_send_get_node_state_replies();
+ _last_host_info_send_time = now;
+ for (auto& stats : _stripe_scan_stats) {
+ stats.wants_to_send_host_info = false;
+ }
+ }
+ }
+}
+
+void
+Distributor::fetch_status_requests()
+{
+ if (_fetched_status_requests.empty()) {
+ _fetched_status_requests.swap(_status_to_do);
+ }
+}
+
+void
+Distributor::handle_status_requests()
+{
+ for (auto& s : _fetched_status_requests) {
+ s->getReporter().reportStatus(s->getStream(), s->getPath());
+ s->notifyCompleted();
+ }
+ if (!_fetched_status_requests.empty()) {
+ _fetched_status_requests.clear();
+ signal_work_was_done();
+ }
+}
+
+void
+Distributor::signal_work_was_done()
+{
+ _tickResult = framework::ThreadWaitInfo::MORE_WORK_ENQUEUED;
+}
+
vespalib::string
Distributor::getReportContentType(const framework::HttpUrlPath& path) const
{
- return _stripe->getReportContentType(path);
+ assert(!_use_legacy_mode);
+ if (path.hasAttribute("page")) {
+ if (path.getAttribute("page") == "buckets") {
+ return "text/html";
+ } else {
+ return "application/xml";
+ }
+ } else {
+ return "text/html";
+ }
}
std::string
Distributor::getActiveIdealStateOperations() const
{
- return _stripe->getActiveIdealStateOperations();
+ // TODO STRIPE need to aggregate status responses _across_ stripes..!
+ if (_use_legacy_mode) {
+ return _stripe->getActiveIdealStateOperations();
+ } else {
+ auto guard = _stripe_accessor->rendezvous_and_hold_all();
+ return first_stripe().getActiveIdealStateOperations();
+ }
}
bool
Distributor::reportStatus(std::ostream& out,
const framework::HttpUrlPath& path) const
{
- return _stripe->reportStatus(out, path);
+ assert(!_use_legacy_mode);
+ if (!path.hasAttribute("page") || path.getAttribute("page") == "buckets") {
+ framework::PartlyHtmlStatusReporter htmlReporter(*this);
+ htmlReporter.reportHtmlHeader(out, path);
+ if (!path.hasAttribute("page")) {
+ out << "<a href=\"?page=pending\">Count of pending messages to storage nodes</a><br>\n"
+ << "<a href=\"?page=buckets\">List all buckets, highlight non-ideal state</a><br>\n";
+ } else {
+ auto guard = _stripe_accessor->rendezvous_and_hold_all();
+ const auto& op_ctx = _component;
+ for (const auto& space : op_ctx.bucket_space_repo()) {
+ out << "<h2>" << document::FixedBucketSpaces::to_string(space.first) << " - " << space.first << "</h2>\n";
+ guard->report_bucket_db_status(space.first, out);
+ }
+ }
+ htmlReporter.reportHtmlFooter(out, path);
+ } else {
+ framework::PartlyXmlStatusReporter xmlReporter(*this, out, path);
+ using namespace vespalib::xml;
+ std::string page(path.getAttribute("page"));
+
+ if (page == "pending") {
+ auto guard = _stripe_accessor->rendezvous_and_hold_all();
+ auto stats = guard->pending_operation_stats();
+ xmlReporter << XmlTag("pending")
+ << XmlAttribute("externalload", stats.external_load_operations)
+ << XmlAttribute("maintenance", stats.maintenance_operations)
+ << XmlEndTag();
+ }
+ }
+ return true;
}
bool
Distributor::handleStatusRequest(const DelegatedStatusRequest& request) const
{
- // TODO STRIPE need to aggregate status responses _across_ stripes..!
- return _stripe->handleStatusRequest(request);
+ assert(!_use_legacy_mode);
+ auto wrappedRequest = std::make_shared<DistributorStatus>(request);
+ {
+ framework::TickingLockGuard guard(_threadPool.freezeCriticalTicks());
+ _status_to_do.push_back(wrappedRequest);
+ guard.broadcast();
+ }
+ wrappedRequest->waitForCompletion();
+ return true;
}
}
diff --git a/storage/src/vespa/storage/distributor/distributor.h b/storage/src/vespa/storage/distributor/distributor.h
index 074f5fe27d4..50bd2526ff4 100644
--- a/storage/src/vespa/storage/distributor/distributor.h
+++ b/storage/src/vespa/storage/distributor/distributor.h
@@ -4,7 +4,9 @@
#include "bucket_spaces_stats_provider.h"
#include "bucketdbupdater.h"
+#include "distributor_component.h"
#include "distributor_host_info_reporter.h"
+#include "distributor_interface.h"
#include "distributor_stripe_interface.h"
#include "externaloperationhandler.h"
#include "idealstatemanager.h"
@@ -12,6 +14,7 @@
#include "pendingmessagetracker.h"
#include "statusreporterdelegate.h"
#include "stripe_bucket_db_updater.h" // TODO this is temporary
+#include "stripe_host_info_notifier.h"
#include <vespa/config/config.h>
#include <vespa/storage/common/distributorcomponent.h>
#include <vespa/storage/common/doneinitializehandler.h>
@@ -21,6 +24,7 @@
#include <vespa/storageapi/message/state.h>
#include <vespa/storageframework/generic/metric/metricupdatehook.h>
#include <vespa/storageframework/generic/thread/tickingthread.h>
+#include <chrono>
#include <queue>
#include <unordered_map>
@@ -38,24 +42,28 @@ class BucketDBUpdater;
class DistributorBucketSpaceRepo;
class DistributorStatus;
class DistributorStripe;
+class DistributorStripePool;
+class StripeAccessor;
class OperationSequencer;
-class LegacySingleStripeAccessor;
class OwnershipTransferSafeTimePointCalculator;
class SimpleMaintenanceScanner;
class ThrottlingOperationStarter;
class Distributor final
: public StorageLink,
+ public DistributorInterface,
public StatusDelegator,
public framework::StatusReporter,
public framework::TickingThread,
public MinReplicaProvider,
- public BucketSpacesStatsProvider
+ public BucketSpacesStatsProvider,
+ public StripeHostInfoNotifier
{
public:
Distributor(DistributorComponentRegister&,
const NodeIdentity& node_identity,
framework::TickingThreadPool&,
+ DistributorStripePool& stripe_pool,
DoneInitializeHandler&,
uint32_t num_distributor_stripes,
HostInfo& hostInfoReporterRegistrar,
@@ -63,9 +71,6 @@ public:
~Distributor() override;
- const ClusterContext& cluster_context() const {
- return _component.cluster_context();
- }
void onOpen() override;
void onClose() override;
bool onDown(const std::shared_ptr<api::StorageMessage>&) override;
@@ -74,11 +79,14 @@ public:
DistributorMetricSet& getMetrics() { return *_metrics; }
- /**
- * Enables a new cluster state. Called after the bucket db updater has
- * retrieved all bucket info related to the change.
- */
- void enableClusterStateBundle(const lib::ClusterStateBundle& clusterStateBundle);
+ // Implements DistributorInterface and DistributorMessageSender.
+ DistributorMetricSet& metrics() override { return getMetrics(); }
+ const DistributorConfiguration& config() const override;
+
+ void sendCommand(const std::shared_ptr<api::StorageCommand>& cmd) override;
+ void sendReply(const std::shared_ptr<api::StorageReply>& reply) override;
+ int getDistributorIndex() const override { return _component.node_index(); }
+ const ClusterContext& cluster_context() const override { return _component.cluster_context(); }
void storageDistributionChanged() override;
@@ -90,25 +98,12 @@ public:
bool handleStatusRequest(const DelegatedStatusRequest& request) const override;
- std::string getActiveIdealStateOperations() const;
-
virtual framework::ThreadWaitInfo doCriticalTick(framework::ThreadIndex) override;
virtual framework::ThreadWaitInfo doNonCriticalTick(framework::ThreadIndex) override;
- const lib::ClusterStateBundle& getClusterStateBundle() const;
- const DistributorConfiguration& getConfig() const;
-
- bool isInRecoveryMode() const noexcept;
-
- PendingMessageTracker& getPendingMessageTracker();
- const PendingMessageTracker& getPendingMessageTracker() const;
-
- DistributorBucketSpaceRepo& getBucketSpaceRepo() noexcept;
- const DistributorBucketSpaceRepo& getBucketSpaceRepo() const noexcept;
- DistributorBucketSpaceRepo& getReadOnlyBucketSpaceRepo() noexcept;
- const DistributorBucketSpaceRepo& getReadyOnlyBucketSpaceRepo() const noexcept;
-
- storage::distributor::DistributorStripeComponent& distributor_component() noexcept;
+ // Called by DistributorStripe threads when they want to notify the cluster controller of changed stats.
+ // Thread safe.
+ void notify_stripe_wants_to_send_host_info(uint16_t stripe_index) override;
class MetricUpdateHook : public framework::MetricUpdateHook
{
@@ -126,25 +121,44 @@ public:
Distributor& _self;
};
- std::chrono::steady_clock::duration db_memory_sample_interval() const noexcept;
-
private:
friend struct DistributorTest;
friend class BucketDBUpdaterTest;
friend class DistributorTestUtil;
friend class MetricUpdateHook;
+ // TODO STRIPE remove
+ DistributorStripe& first_stripe() noexcept;
+ const DistributorStripe& first_stripe() const noexcept;
+
void setNodeStateUp();
bool handleMessage(const std::shared_ptr<api::StorageMessage>& msg);
+ /**
+ * Enables a new cluster state. Used by tests to bypass BucketDBUpdater.
+ */
+ void enableClusterStateBundle(const lib::ClusterStateBundle& clusterStateBundle);
+
// Accessors used by tests
+ std::string getActiveIdealStateOperations() const;
+ const lib::ClusterStateBundle& getClusterStateBundle() const;
+ const DistributorConfiguration& getConfig() const;
+ bool isInRecoveryMode() const noexcept;
+ PendingMessageTracker& getPendingMessageTracker();
+ const PendingMessageTracker& getPendingMessageTracker() const;
+ DistributorBucketSpaceRepo& getBucketSpaceRepo() noexcept;
+ const DistributorBucketSpaceRepo& getBucketSpaceRepo() const noexcept;
+ DistributorBucketSpaceRepo& getReadOnlyBucketSpaceRepo() noexcept;
+ const DistributorBucketSpaceRepo& getReadyOnlyBucketSpaceRepo() const noexcept;
+ storage::distributor::DistributorStripeComponent& distributor_component() noexcept;
+ std::chrono::steady_clock::duration db_memory_sample_interval() const noexcept;
+
StripeBucketDBUpdater& bucket_db_updater();
const StripeBucketDBUpdater& bucket_db_updater() const;
IdealStateManager& ideal_state_manager();
const IdealStateManager& ideal_state_manager() const;
ExternalOperationHandler& external_operation_handler();
const ExternalOperationHandler& external_operation_handler() const;
-
BucketDBMetricUpdater& bucket_db_metric_updater() const noexcept;
/**
@@ -162,19 +176,49 @@ private:
void propagateInternalScanMetricsToExternal();
void scanAllBuckets();
void enableNextConfig();
+ void fetch_status_requests();
+ void handle_status_requests();
+ void signal_work_was_done();
void enableNextDistribution();
void propagateDefaultDistribution(std::shared_ptr<const lib::Distribution>);
+ void dispatch_to_main_distributor_thread_queue(const std::shared_ptr<api::StorageMessage>& msg);
+ void fetch_external_messages();
+ void process_fetched_external_messages();
+ void send_host_info_if_appropriate();
+ // Precondition: _stripe_scan_notify_mutex is held
+ [[nodiscard]] bool may_send_host_info_on_behalf_of_stripes(std::lock_guard<std::mutex>& held_lock) noexcept;
+
+ struct StripeScanStats {
+ bool wants_to_send_host_info = false;
+ bool has_reported_in_at_least_once = false;
+ };
+
+ using MessageQueue = std::vector<std::shared_ptr<api::StorageMessage>>;
+
DistributorComponentRegister& _comp_reg;
std::shared_ptr<DistributorMetricSet> _metrics;
ChainedMessageSender* _messageSender;
+ const bool _use_legacy_mode;
// TODO STRIPE multiple stripes...! This is for proof of concept of wiring.
- std::unique_ptr<DistributorStripe> _stripe;
- std::unique_ptr<LegacySingleStripeAccessor> _stripe_accessor;
- storage::DistributorComponent _component;
+ std::unique_ptr<DistributorStripe> _stripe;
+ DistributorStripePool& _stripe_pool;
+ std::vector<std::unique_ptr<DistributorStripe>> _stripes;
+ std::unique_ptr<StripeAccessor> _stripe_accessor;
+ MessageQueue _message_queue; // Queue for top-level ops
+ MessageQueue _fetched_messages;
+ distributor::DistributorComponent _component;
+ std::shared_ptr<const DistributorConfiguration> _total_config;
std::unique_ptr<BucketDBUpdater> _bucket_db_updater;
StatusReporterDelegate _distributorStatusDelegate;
+ std::unique_ptr<StatusReporterDelegate> _bucket_db_status_delegate;
framework::TickingThreadPool& _threadPool;
+ mutable std::vector<std::shared_ptr<DistributorStatus>> _status_to_do;
+ mutable std::vector<std::shared_ptr<DistributorStatus>> _fetched_status_requests;
+ mutable std::mutex _stripe_scan_notify_mutex;
+ std::vector<StripeScanStats> _stripe_scan_stats; // Indices are 1-1 with _stripes entries
+ std::chrono::steady_clock::time_point _last_host_info_send_time;
+ std::chrono::milliseconds _host_info_send_delay;
framework::ThreadWaitInfo _tickResult;
MetricUpdateHook _metricUpdateHook;
DistributorHostInfoReporter _hostInfoReporter;
diff --git a/storage/src/vespa/storage/distributor/distributor_bucket_space.cpp b/storage/src/vespa/storage/distributor/distributor_bucket_space.cpp
index 9ec4d31eb32..37e7dc86e43 100644
--- a/storage/src/vespa/storage/distributor/distributor_bucket_space.cpp
+++ b/storage/src/vespa/storage/distributor/distributor_bucket_space.cpp
@@ -22,8 +22,8 @@ DistributorBucketSpace::DistributorBucketSpace()
{
}
-DistributorBucketSpace::DistributorBucketSpace(uint16_t node_index)
- : _bucketDatabase(std::make_unique<BTreeBucketDatabase>()),
+DistributorBucketSpace::DistributorBucketSpace(uint16_t node_index, bool use_bucket_db)
+ : _bucketDatabase(use_bucket_db ? std::make_unique<BTreeBucketDatabase>() : std::unique_ptr<BTreeBucketDatabase>()),
_clusterState(),
_distribution(),
_node_index(node_index),
diff --git a/storage/src/vespa/storage/distributor/distributor_bucket_space.h b/storage/src/vespa/storage/distributor/distributor_bucket_space.h
index 558cbada31f..8898039eb02 100644
--- a/storage/src/vespa/storage/distributor/distributor_bucket_space.h
+++ b/storage/src/vespa/storage/distributor/distributor_bucket_space.h
@@ -47,7 +47,8 @@ class DistributorBucketSpace {
bool owns_bucket_in_state(const lib::Distribution& distribution, const lib::ClusterState& cluster_state, document::BucketId bucket) const;
public:
explicit DistributorBucketSpace();
- explicit DistributorBucketSpace(uint16_t node_index);
+ // TODO STRIPE: Remove the use_bucket_db parameter when legacy mode is gone.
+ explicit DistributorBucketSpace(uint16_t node_index, bool use_bucket_db = true);
~DistributorBucketSpace();
DistributorBucketSpace(const DistributorBucketSpace&) = delete;
@@ -56,9 +57,11 @@ public:
DistributorBucketSpace& operator=(DistributorBucketSpace&&) = delete;
BucketDatabase& getBucketDatabase() noexcept {
+ assert(_bucketDatabase);
return *_bucketDatabase;
}
const BucketDatabase& getBucketDatabase() const noexcept {
+ assert(_bucketDatabase);
return *_bucketDatabase;
}
diff --git a/storage/src/vespa/storage/distributor/distributor_bucket_space_repo.cpp b/storage/src/vespa/storage/distributor/distributor_bucket_space_repo.cpp
index 4f64dab9a68..368483d3f2d 100644
--- a/storage/src/vespa/storage/distributor/distributor_bucket_space_repo.cpp
+++ b/storage/src/vespa/storage/distributor/distributor_bucket_space_repo.cpp
@@ -13,11 +13,11 @@ using document::BucketSpace;
namespace storage::distributor {
-DistributorBucketSpaceRepo::DistributorBucketSpaceRepo(uint16_t node_index)
+DistributorBucketSpaceRepo::DistributorBucketSpaceRepo(uint16_t node_index, bool use_bucket_db)
: _map()
{
- add(document::FixedBucketSpaces::default_space(), std::make_unique<DistributorBucketSpace>(node_index));
- add(document::FixedBucketSpaces::global_space(), std::make_unique<DistributorBucketSpace>(node_index));
+ add(document::FixedBucketSpaces::default_space(), std::make_unique<DistributorBucketSpace>(node_index, use_bucket_db));
+ add(document::FixedBucketSpaces::global_space(), std::make_unique<DistributorBucketSpace>(node_index, use_bucket_db));
}
DistributorBucketSpaceRepo::~DistributorBucketSpaceRepo() = default;
diff --git a/storage/src/vespa/storage/distributor/distributor_bucket_space_repo.h b/storage/src/vespa/storage/distributor/distributor_bucket_space_repo.h
index f012b25e351..e7552f058d8 100644
--- a/storage/src/vespa/storage/distributor/distributor_bucket_space_repo.h
+++ b/storage/src/vespa/storage/distributor/distributor_bucket_space_repo.h
@@ -19,7 +19,8 @@ private:
BucketSpaceMap _map;
public:
- explicit DistributorBucketSpaceRepo(uint16_t node_index);
+ // TODO STRIPE: Remove the use_bucket_db parameter when legacy mode is gone.
+ explicit DistributorBucketSpaceRepo(uint16_t node_index, bool use_bucket_db = true);
~DistributorBucketSpaceRepo();
DistributorBucketSpaceRepo(const DistributorBucketSpaceRepo&&) = delete;
diff --git a/storage/src/vespa/storage/distributor/distributor_component.cpp b/storage/src/vespa/storage/distributor/distributor_component.cpp
new file mode 100644
index 00000000000..e01d7e7cb6d
--- /dev/null
+++ b/storage/src/vespa/storage/distributor/distributor_component.cpp
@@ -0,0 +1,27 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "distributor_bucket_space.h"
+#include "distributor_bucket_space_repo.h"
+#include "distributor_component.h"
+
+namespace storage::distributor {
+
+DistributorComponent::DistributorComponent(DistributorInterface& distributor,
+ DistributorComponentRegister& comp_reg,
+ const std::string& name)
+ : storage::DistributorComponent(comp_reg, name),
+ _distributor(distributor),
+ _bucket_space_repo(std::make_unique<DistributorBucketSpaceRepo>(node_index(), false)),
+ _read_only_bucket_space_repo(std::make_unique<DistributorBucketSpaceRepo>(node_index(), false))
+{
+}
+
+DistributorComponent::~DistributorComponent() = default;
+
+api::StorageMessageAddress
+DistributorComponent::node_address(uint16_t node_index) const noexcept
+{
+ return api::StorageMessageAddress::create(cluster_name_ptr(), lib::NodeType::STORAGE, node_index);
+}
+
+}
diff --git a/storage/src/vespa/storage/distributor/distributor_component.h b/storage/src/vespa/storage/distributor/distributor_component.h
new file mode 100644
index 00000000000..68db5a3c483
--- /dev/null
+++ b/storage/src/vespa/storage/distributor/distributor_component.h
@@ -0,0 +1,67 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "distributor_interface.h"
+#include "distributor_node_context.h"
+#include "distributor_operation_context.h"
+#include <vespa/storage/common/distributorcomponent.h>
+
+namespace storage::distributor {
+
+class DistributorBucketSpaceRepo;
+
+/**
+ * The framework component for the top-level distributor.
+ *
+ * This class should be used directly as little as possible.
+ * Instead the interfaces DistributorNodeContext and DistributorOperationContext should be used where possible.
+ */
+class DistributorComponent : public storage::DistributorComponent,
+ public DistributorNodeContext,
+ public DistributorOperationContext {
+private:
+ DistributorInterface& _distributor;
+ // TODO STRIPE: When legacy mode is removed, replace this with BucketSpaceStateMap.
+ std::unique_ptr<DistributorBucketSpaceRepo> _bucket_space_repo;
+ std::unique_ptr<DistributorBucketSpaceRepo> _read_only_bucket_space_repo;
+
+public:
+ DistributorComponent(DistributorInterface& distributor,
+ DistributorComponentRegister& comp_reg,
+ const std::string& name);
+
+ ~DistributorComponent() override;
+
+ // TODO STRIPE: Unify implementation of this interface between DistributorComponent and DistributorStripeComponent?
+ // Implements DistributorNodeContext
+ const framework::Clock& clock() const noexcept override { return getClock(); }
+ const vespalib::string* cluster_name_ptr() const noexcept override { return cluster_context().cluster_name_ptr(); }
+ const document::BucketIdFactory& bucket_id_factory() const noexcept override { return getBucketIdFactory(); }
+ uint16_t node_index() const noexcept override { return getIndex(); }
+ api::StorageMessageAddress node_address(uint16_t node_index) const noexcept override;
+
+ // Implements DistributorOperationContext
+ api::Timestamp generate_unique_timestamp() override {
+ return getUniqueTimestamp();
+ }
+ const DistributorBucketSpaceRepo& bucket_space_repo() const noexcept override {
+ return *_bucket_space_repo;
+ }
+ DistributorBucketSpaceRepo& bucket_space_repo() noexcept override {
+ return *_bucket_space_repo;
+ }
+ const DistributorBucketSpaceRepo& read_only_bucket_space_repo() const noexcept override {
+ return *_read_only_bucket_space_repo;
+ }
+ DistributorBucketSpaceRepo& read_only_bucket_space_repo() noexcept override {
+ return *_read_only_bucket_space_repo;
+ }
+ const storage::DistributorConfiguration& distributor_config() const noexcept override {
+ return _distributor.config();
+ }
+
+
+};
+
+}
diff --git a/storage/src/vespa/storage/distributor/distributor_interface.h b/storage/src/vespa/storage/distributor/distributor_interface.h
new file mode 100644
index 00000000000..aa9dfd37909
--- /dev/null
+++ b/storage/src/vespa/storage/distributor/distributor_interface.h
@@ -0,0 +1,22 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#pragma once
+
+#include "distributormessagesender.h"
+
+namespace storage { class DistributorConfiguration; }
+
+namespace storage::distributor {
+
+class DistributorMetricSet;
+
+/**
+ * Simple interface to access metrics and config for the top-level distributor.
+ */
+class DistributorInterface : public DistributorMessageSender {
+public:
+ virtual ~DistributorInterface() {}
+ virtual DistributorMetricSet& metrics() = 0;
+ virtual const DistributorConfiguration& config() const = 0;
+};
+
+}
diff --git a/storage/src/vespa/storage/distributor/distributor_operation_context.h b/storage/src/vespa/storage/distributor/distributor_operation_context.h
new file mode 100644
index 00000000000..aa598835cdb
--- /dev/null
+++ b/storage/src/vespa/storage/distributor/distributor_operation_context.h
@@ -0,0 +1,30 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "storage_node_up_states.h"
+#include <vespa/storageapi/defs.h>
+
+namespace storage { class DistributorConfiguration; }
+namespace storage::lib { class ClusterStateBundle; }
+
+namespace storage::distributor {
+
+class DistributorBucketSpaceRepo;
+
+/**
+ * Interface with functionality that is used when handling top-level distributor operations.
+ */
+class DistributorOperationContext {
+public:
+ virtual ~DistributorOperationContext() {}
+ virtual api::Timestamp generate_unique_timestamp() = 0;
+ // TODO STRIPE: Access to bucket space repos is only temporary at this level.
+ virtual const DistributorBucketSpaceRepo& bucket_space_repo() const noexcept= 0;
+ virtual DistributorBucketSpaceRepo& bucket_space_repo() noexcept = 0;
+ virtual const DistributorBucketSpaceRepo& read_only_bucket_space_repo() const noexcept = 0;
+ virtual DistributorBucketSpaceRepo& read_only_bucket_space_repo() noexcept = 0;
+ virtual const DistributorConfiguration& distributor_config() const noexcept = 0;
+};
+
+}
diff --git a/storage/src/vespa/storage/distributor/distributor_stripe.cpp b/storage/src/vespa/storage/distributor/distributor_stripe.cpp
index 87e938efd71..5c6c529fe69 100644
--- a/storage/src/vespa/storage/distributor/distributor_stripe.cpp
+++ b/storage/src/vespa/storage/distributor/distributor_stripe.cpp
@@ -6,6 +6,7 @@
#include "distributor_bucket_space.h"
#include "distributormetricsset.h"
#include "idealstatemetricsset.h"
+#include "stripe_host_info_notifier.h"
#include "operation_sequencer.h"
#include "ownership_transfer_safe_time_point_calculator.h"
#include "throttlingoperationstarter.h"
@@ -39,6 +40,7 @@ DistributorStripe::DistributorStripe(DistributorComponentRegister& compReg,
framework::TickingThreadPool& threadPool,
DoneInitializeHandler& doneInitHandler,
ChainedMessageSender& messageSender,
+ StripeHostInfoNotifier& stripe_host_info_notifier,
bool use_legacy_mode)
: DistributorStripeInterface(),
framework::StatusReporter("distributor", "Distributor"),
@@ -52,14 +54,16 @@ DistributorStripe::DistributorStripe(DistributorComponentRegister& compReg,
_maintenanceOperationOwner(*this, _component.getClock()),
_operation_sequencer(std::make_unique<OperationSequencer>()),
_pendingMessageTracker(compReg),
- _bucketDBUpdater(*this, *_bucketSpaceRepo, *_readOnlyBucketSpaceRepo, *this, compReg, use_legacy_mode),
+ _bucketDBUpdater(_component, _component, *this, *this, use_legacy_mode),
_distributorStatusDelegate(compReg, *this, *this),
_bucketDBStatusDelegate(compReg, *this, _bucketDBUpdater),
_idealStateManager(*this, *_bucketSpaceRepo, *_readOnlyBucketSpaceRepo, compReg),
_messageSender(messageSender),
+ _stripe_host_info_notifier(stripe_host_info_notifier),
_externalOperationHandler(_component, _component, getMetrics(), getMessageSender(),
*_operation_sequencer, *this, _component,
_idealStateManager, _operationOwner),
+ _external_message_mutex(),
_threadPool(threadPool),
_doneInitializeHandler(doneInitHandler),
_doneInitializing(false),
@@ -84,7 +88,10 @@ DistributorStripe::DistributorStripe(DistributorComponentRegister& compReg,
_must_send_updated_host_info(false),
_use_legacy_mode(use_legacy_mode)
{
- _bucketDBStatusDelegate.registerStatusPage();
+ if (use_legacy_mode) {
+ _distributorStatusDelegate.registerStatusPage();
+ _bucketDBStatusDelegate.registerStatusPage();
+ }
propagateDefaultDistribution(_component.getDistribution());
propagateClusterStates();
};
@@ -168,14 +175,19 @@ DistributorStripe::handle_or_enqueue_message(const std::shared_ptr<api::StorageM
if (_externalOperationHandler.try_handle_message_outside_main_thread(msg)) {
return true;
}
- // TODO STRIPE redesign how message queue guarding and wakeup is performed.
- // Currently involves a _thread pool global_ lock transitively via tick guard!
- framework::TickingLockGuard guard(_threadPool.freezeCriticalTicks());
MBUS_TRACE(msg->getTrace(), 9,
"Distributor: Added to message queue. Thread state: "
+ _threadPool.getStatus());
- _messageQueue.push_back(msg);
- guard.broadcast();
+ if (_use_legacy_mode) {
+ // TODO STRIPE remove
+ framework::TickingLockGuard guard(_threadPool.freezeCriticalTicks());
+ _messageQueue.push_back(msg);
+ guard.broadcast();
+ } else {
+ std::lock_guard lock(_external_message_mutex);
+ _messageQueue.push_back(msg);
+ // Caller has the responsibility to wake up correct stripe
+ }
return true;
}
@@ -295,7 +307,7 @@ DistributorStripe::enableClusterStateBundle(const lib::ClusterStateBundle& state
const uint16_t new_node_count = baseline_state.getNodeCount(lib::NodeType::STORAGE);
for (uint16_t i = 0; i < std::max(old_node_count, new_node_count); ++i) {
const auto& node_state = baseline_state.getNodeState(lib::Node(lib::NodeType::STORAGE, i)).getState();
- if (!node_state.oneOf(getStorageNodeUpStates())) {
+ if (!node_state.oneOf(storage_node_up_states())) {
std::vector<uint64_t> msgIds = _pendingMessageTracker.clearMessagesForNode(i);
LOG(debug, "Node %u is down, clearing %zu pending maintenance operations", i, msgIds.size());
@@ -727,7 +739,11 @@ DistributorStripe::scanNextBucket()
void DistributorStripe::send_updated_host_info_if_required() {
if (_must_send_updated_host_info) {
- _component.getStateUpdater().immediately_send_get_node_state_replies();
+ if (_use_legacy_mode) {
+ _component.getStateUpdater().immediately_send_get_node_state_replies();
+ } else {
+ _stripe_host_info_notifier.notify_stripe_wants_to_send_host_info(0); // TODO STRIPE correct stripe index!
+ }
_must_send_updated_host_info = false;
}
}
@@ -745,10 +761,9 @@ framework::ThreadWaitInfo
DistributorStripe::doCriticalTick(framework::ThreadIndex)
{
_tickResult = framework::ThreadWaitInfo::NO_MORE_CRITICAL_WORK_KNOWN;
- if (_use_legacy_mode) {
- enableNextDistribution();
- enableNextConfig();
- }
+ assert(_use_legacy_mode);
+ enableNextDistribution();
+ enableNextConfig();
fetchStatusRequests();
fetchExternalMessages();
return _tickResult;
@@ -758,7 +773,13 @@ framework::ThreadWaitInfo
DistributorStripe::doNonCriticalTick(framework::ThreadIndex)
{
_tickResult = framework::ThreadWaitInfo::NO_MORE_CRITICAL_WORK_KNOWN;
- handleStatusRequests();
+ if (!_use_legacy_mode) {
+ std::lock_guard lock(_external_message_mutex);
+ fetchExternalMessages();
+ }
+ if (_use_legacy_mode) {
+ handleStatusRequests();
+ }
startExternalOperations();
if (initializing()) {
_bucketDBUpdater.resendDelayedMessages();
@@ -780,6 +801,12 @@ DistributorStripe::doNonCriticalTick(framework::ThreadIndex)
return _tickResult;
}
+bool DistributorStripe::tick() {
+ assert(!_use_legacy_mode);
+ auto wait_info = doNonCriticalTick(framework::ThreadIndex(0));
+ return !wait_info.waitWanted(); // If we don't want to wait, we presumably did some useful stuff.
+}
+
bool DistributorStripe::should_inhibit_current_maintenance_scan_tick() const noexcept {
return (workWasDone() && (_inhibited_maintenance_tick_count
< getConfig().max_consecutively_inhibited_maintenance_ticks()));
@@ -824,6 +851,7 @@ DistributorStripe::propagate_config_snapshot_to_internal_components()
void
DistributorStripe::fetchStatusRequests()
{
+ assert(_use_legacy_mode);
if (_fetchedStatusRequests.empty()) {
_fetchedStatusRequests.swap(_statusToDo);
}
@@ -839,6 +867,7 @@ DistributorStripe::fetchExternalMessages()
void
DistributorStripe::handleStatusRequests()
{
+ assert(_use_legacy_mode);
uint32_t sz = _fetchedStatusRequests.size();
for (uint32_t i = 0; i < sz; ++i) {
auto& s = *_fetchedStatusRequests[i];
@@ -854,6 +883,7 @@ DistributorStripe::handleStatusRequests()
vespalib::string
DistributorStripe::getReportContentType(const framework::HttpUrlPath& path) const
{
+ assert(_use_legacy_mode);
if (path.hasAttribute("page")) {
if (path.getAttribute("page") == "buckets") {
return "text/html";
@@ -877,19 +907,18 @@ DistributorStripe::getActiveOperations() const
return _operationOwner.toString();
}
+// TODO STRIPE remove this; delegated to top-level Distributor only
bool
DistributorStripe::reportStatus(std::ostream& out,
const framework::HttpUrlPath& path) const
{
+ assert(_use_legacy_mode);
if (!path.hasAttribute("page") || path.getAttribute("page") == "buckets") {
framework::PartlyHtmlStatusReporter htmlReporter(*this);
htmlReporter.reportHtmlHeader(out, path);
if (!path.hasAttribute("page")) {
- out << "<a href=\"?page=pending\">Count of pending messages to "
- << "storage nodes</a><br><a href=\"?page=maintenance&show=50\">"
- << "List maintenance queue (adjust show parameter to see more "
- << "operations, -1 for all)</a><br>\n<a href=\"?page=buckets\">"
- << "List all buckets, highlight non-ideal state</a><br>\n";
+ out << "<a href=\"?page=pending\">Count of pending messages to storage nodes</a><br>\n"
+ << "<a href=\"?page=buckets\">List all buckets, highlight non-ideal state</a><br>\n";
} else {
const_cast<IdealStateManager&>(_idealStateManager)
.getBucketStatus(out);
@@ -903,20 +932,19 @@ DistributorStripe::reportStatus(std::ostream& out,
if (page == "pending") {
xmlReporter << XmlTag("pending")
<< XmlAttribute("externalload", _operationOwner.size())
- << XmlAttribute("maintenance",
- _maintenanceOperationOwner.size())
+ << XmlAttribute("maintenance",_maintenanceOperationOwner.size())
<< XmlEndTag();
- } else if (page == "maintenance") {
- // Need new page
}
}
return true;
}
+// TODO STRIPE remove this; delegated to top-level Distributor only
bool
DistributorStripe::handleStatusRequest(const DelegatedStatusRequest& request) const
{
+ assert(_use_legacy_mode);
auto wrappedRequest = std::make_shared<DistributorStatus>(request);
{
framework::TickingLockGuard guard(_threadPool.freezeCriticalTicks());
@@ -927,4 +955,99 @@ DistributorStripe::handleStatusRequest(const DelegatedStatusRequest& request) co
return true;
}
+StripeAccessGuard::PendingOperationStats
+DistributorStripe::pending_operation_stats() const
+{
+ return {_operationOwner.size(), _maintenanceOperationOwner.size()};
+}
+
+void
+DistributorStripe::set_pending_cluster_state_bundle(const lib::ClusterStateBundle& pending_state)
+{
+ getBucketSpaceRepo().set_pending_cluster_state_bundle(pending_state);
+}
+
+void
+DistributorStripe::clear_pending_cluster_state_bundle()
+{
+ getBucketSpaceRepo().clear_pending_cluster_state_bundle();
+}
+
+void
+DistributorStripe::enable_cluster_state_bundle(const lib::ClusterStateBundle& new_state)
+{
+ // TODO STRIPE replace legacy func
+ enableClusterStateBundle(new_state);
+}
+
+void
+DistributorStripe::notify_distribution_change_enabled()
+{
+ // TODO STRIPE replace legacy func
+ notifyDistributionChangeEnabled();
+}
+
+PotentialDataLossReport
+DistributorStripe::remove_superfluous_buckets(document::BucketSpace bucket_space,
+ const lib::ClusterState& new_state,
+ bool is_distribution_change)
+{
+ return bucket_db_updater().remove_superfluous_buckets(bucket_space, new_state, is_distribution_change);
+}
+
+void
+DistributorStripe::merge_entries_into_db(document::BucketSpace bucket_space,
+ api::Timestamp gathered_at_timestamp,
+ const lib::Distribution& distribution,
+ const lib::ClusterState& new_state,
+ const char* storage_up_states,
+ const std::unordered_set<uint16_t>& outdated_nodes,
+ const std::vector<dbtransition::Entry>& entries)
+{
+ bucket_db_updater().merge_entries_into_db(bucket_space, gathered_at_timestamp, distribution,
+ new_state, storage_up_states, outdated_nodes, entries);
+}
+
+void
+DistributorStripe::update_read_snapshot_before_db_pruning()
+{
+ bucket_db_updater().update_read_snapshot_before_db_pruning();
+}
+
+void
+DistributorStripe::update_read_snapshot_after_db_pruning(const lib::ClusterStateBundle& new_state)
+{
+ bucket_db_updater().update_read_snapshot_after_db_pruning(new_state);
+}
+
+void
+DistributorStripe::update_read_snapshot_after_activation(const lib::ClusterStateBundle& activated_state)
+{
+ bucket_db_updater().update_read_snapshot_after_activation(activated_state);
+}
+
+void
+DistributorStripe::clear_read_only_bucket_repo_databases()
+{
+ bucket_db_updater().clearReadOnlyBucketRepoDatabases();
+}
+
+void
+DistributorStripe::report_bucket_db_status(document::BucketSpace bucket_space, std::ostream& out) const
+{
+ ideal_state_manager().dump_bucket_space_db_status(bucket_space, out);
+}
+
+void
+DistributorStripe::report_single_bucket_requests(vespalib::xml::XmlOutputStream& xos) const
+{
+ bucket_db_updater().report_single_bucket_requests(xos);
+}
+
+void
+DistributorStripe::report_delayed_single_bucket_requests(vespalib::xml::XmlOutputStream& xos) const
+{
+ bucket_db_updater().report_delayed_single_bucket_requests(xos);
+}
+
}
diff --git a/storage/src/vespa/storage/distributor/distributor_stripe.h b/storage/src/vespa/storage/distributor/distributor_stripe.h
index bc058305c09..b82b5483bd3 100644
--- a/storage/src/vespa/storage/distributor/distributor_stripe.h
+++ b/storage/src/vespa/storage/distributor/distributor_stripe.h
@@ -12,6 +12,7 @@
#include "statusreporterdelegate.h"
#include "stripe_access_guard.h"
#include "stripe_bucket_db_updater.h"
+#include "tickable_stripe.h"
#include <vespa/config/config.h>
#include <vespa/storage/common/doneinitializehandler.h>
#include <vespa/storage/common/messagesender.h>
@@ -21,6 +22,7 @@
#include <vespa/storageapi/message/state.h>
#include <vespa/storageframework/generic/metric/metricupdatehook.h>
#include <vespa/storageframework/generic/thread/tickingthread.h>
+#include <mutex>
#include <queue>
#include <unordered_map>
@@ -39,6 +41,7 @@ class DistributorBucketSpaceRepo;
class OperationSequencer;
class OwnershipTransferSafeTimePointCalculator;
class SimpleMaintenanceScanner;
+class StripeHostInfoNotifier;
class ThrottlingOperationStarter;
/**
@@ -50,7 +53,8 @@ class DistributorStripe final
public framework::StatusReporter,
public MinReplicaProvider,
public BucketSpacesStatsProvider,
- public NonTrackingMessageSender
+ public NonTrackingMessageSender,
+ public TickableStripe
{
public:
DistributorStripe(DistributorComponentRegister&,
@@ -59,6 +63,7 @@ public:
framework::TickingThreadPool&,
DoneInitializeHandler&,
ChainedMessageSender& messageSender,
+ StripeHostInfoNotifier& stripe_host_info_notifier,
bool use_legacy_mode);
~DistributorStripe() override;
@@ -66,7 +71,7 @@ public:
const ClusterContext& cluster_context() const override {
return _component.cluster_context();
}
- void flush_and_close();
+ void flush_and_close() override;
bool handle_or_enqueue_message(const std::shared_ptr<api::StorageMessage>&);
void send_up_with_tracking(const std::shared_ptr<api::StorageMessage>&);
// Bypasses message tracker component. Thread safe.
@@ -114,6 +119,8 @@ public:
bool handleStatusRequest(const DelegatedStatusRequest& request) const override;
+ StripeAccessGuard::PendingOperationStats pending_operation_stats() const override;
+
std::string getActiveIdealStateOperations() const;
std::string getActiveOperations() const;
@@ -131,14 +138,6 @@ public:
const lib::ClusterStateBundle& getClusterStateBundle() const override;
/**
- * @return Returns the states in which the distributors consider
- * storage nodes to be up.
- */
- const char* getStorageNodeUpStates() const override {
- return "uri";
- }
-
- /**
* Called by bucket db updater after a merge has finished, and all the
* request bucket info operations have been performed as well. Passes the
* merge back to the operation that created it.
@@ -190,13 +189,16 @@ public:
return _db_memory_sample_interval;
}
+ bool tick() override;
+
private:
+ // TODO reduce number of friends. DistributorStripe too popular for its own good.
friend struct DistributorTest;
friend class BucketDBUpdaterTest;
friend class DistributorTestUtil;
friend class MetricUpdateHook;
friend class Distributor;
- friend class LegacySingleStripeAccessGuard;
+ friend class MultiThreadedStripeAccessGuard;
bool handleMessage(const std::shared_ptr<api::StorageMessage>& msg);
bool isMaintenanceReply(const api::StorageReply& reply) const;
@@ -253,8 +255,6 @@ private:
void enableNextDistribution(); // TODO STRIPE remove once legacy is gone
void propagateDefaultDistribution(std::shared_ptr<const lib::Distribution>); // TODO STRIPE remove once legacy is gone
void propagateClusterStates();
- void update_distribution_config(const BucketSpaceDistributionConfigs& new_configs);
- void update_total_distributor_config(std::shared_ptr<const DistributorConfiguration> config);
BucketSpacesStatsProvider::BucketSpacesStats make_invalid_stats_per_configured_space() const;
template <typename NodeFunctor>
@@ -263,6 +263,31 @@ private:
void send_updated_host_info_if_required();
void propagate_config_snapshot_to_internal_components();
+ // Additional implementations of TickableStripe:
+ void update_distribution_config(const BucketSpaceDistributionConfigs& new_configs) override;
+ void update_total_distributor_config(std::shared_ptr<const DistributorConfiguration> config) override;
+ void set_pending_cluster_state_bundle(const lib::ClusterStateBundle& pending_state) override;
+ void clear_pending_cluster_state_bundle() override;
+ void enable_cluster_state_bundle(const lib::ClusterStateBundle& new_state) override;
+ void notify_distribution_change_enabled() override;
+ PotentialDataLossReport remove_superfluous_buckets(document::BucketSpace bucket_space,
+ const lib::ClusterState& new_state,
+ bool is_distribution_change) override;
+ void merge_entries_into_db(document::BucketSpace bucket_space,
+ api::Timestamp gathered_at_timestamp,
+ const lib::Distribution& distribution,
+ const lib::ClusterState& new_state,
+ const char* storage_up_states,
+ const std::unordered_set<uint16_t>& outdated_nodes,
+ const std::vector<dbtransition::Entry>& entries) override;
+ void update_read_snapshot_before_db_pruning() override;
+ void update_read_snapshot_after_db_pruning(const lib::ClusterStateBundle& new_state) override;
+ void update_read_snapshot_after_activation(const lib::ClusterStateBundle& activated_state) override;
+ void clear_read_only_bucket_repo_databases() override;
+ void report_bucket_db_status(document::BucketSpace bucket_space, std::ostream& out) const override;
+ void report_single_bucket_requests(vespalib::xml::XmlOutputStream& xos) const override;
+ void report_delayed_single_bucket_requests(vespalib::xml::XmlOutputStream& xos) const override;
+
lib::ClusterStateBundle _clusterStateBundle;
std::unique_ptr<DistributorBucketSpaceRepo> _bucketSpaceRepo;
// Read-only bucket space repo with DBs that only contain buckets transiently
@@ -283,6 +308,7 @@ private:
StatusReporterDelegate _bucketDBStatusDelegate;
IdealStateManager _idealStateManager;
ChainedMessageSender& _messageSender;
+ StripeHostInfoNotifier& _stripe_host_info_notifier;
ExternalOperationHandler _externalOperationHandler;
std::shared_ptr<lib::Distribution> _distribution;
@@ -300,6 +326,7 @@ private:
std::vector<std::shared_ptr<api::StorageMessage>>,
IndirectHigherPriority
>;
+ mutable std::mutex _external_message_mutex;
MessageQueue _messageQueue;
ClientRequestPriorityQueue _client_request_priority_queue;
MessageQueue _fetchedMessages;
diff --git a/storage/src/vespa/storage/distributor/distributor_stripe_component.cpp b/storage/src/vespa/storage/distributor/distributor_stripe_component.cpp
index aef126b8318..59029dec66a 100644
--- a/storage/src/vespa/storage/distributor/distributor_stripe_component.cpp
+++ b/storage/src/vespa/storage/distributor/distributor_stripe_component.cpp
@@ -49,7 +49,7 @@ DistributorStripeComponent::enumerateUnavailableNodes(
const document::Bucket& bucket,
const std::vector<BucketCopy>& candidates) const
{
- const auto* up_states = _distributor.getStorageNodeUpStates();
+ const auto* up_states = storage_node_up_states();
for (uint32_t i = 0; i < candidates.size(); ++i) {
const BucketCopy& copy(candidates[i]);
const lib::NodeState& ns(
@@ -257,7 +257,7 @@ DistributorStripeComponent::has_pending_message(uint16_t node_index,
const document::Bucket& bucket,
uint32_t message_type) const
{
- const auto& sender = static_cast<const DistributorMessageSender&>(getDistributor());
+ const auto& sender = static_cast<const DistributorStripeMessageSender&>(getDistributor());
return sender.getPendingMessageTracker().hasPendingMessage(node_index, bucket, message_type);
}
@@ -273,7 +273,7 @@ DistributorStripeComponent::storage_node_is_up(document::BucketSpace bucket_spac
const lib::NodeState& ns = cluster_state_bundle().getDerivedClusterState(bucket_space)->getNodeState(
lib::Node(lib::NodeType::STORAGE, node_index));
- return ns.getState().oneOf(_distributor.getStorageNodeUpStates());
+ return ns.getState().oneOf(storage_node_up_states());
}
std::unique_ptr<document::select::Node>
diff --git a/storage/src/vespa/storage/distributor/distributor_stripe_component.h b/storage/src/vespa/storage/distributor/distributor_stripe_component.h
index 38fcb4ffef3..31ee9ca88d2 100644
--- a/storage/src/vespa/storage/distributor/distributor_stripe_component.h
+++ b/storage/src/vespa/storage/distributor/distributor_stripe_component.h
@@ -158,8 +158,8 @@ public:
*/
bool storage_node_is_up(document::BucketSpace bucket_space, uint32_t node_index) const override;
- const char* storage_node_up_states() const override {
- return getDistributor().getStorageNodeUpStates();
+ const BucketGcTimeCalculator::BucketIdHasher& bucket_id_hasher() const override {
+ return getDistributor().getBucketIdHasher();
}
// Implements DocumentSelectionParser
diff --git a/storage/src/vespa/storage/distributor/distributor_stripe_interface.h b/storage/src/vespa/storage/distributor/distributor_stripe_interface.h
index d83acfabffc..bd9a4e1de57 100644
--- a/storage/src/vespa/storage/distributor/distributor_stripe_interface.h
+++ b/storage/src/vespa/storage/distributor/distributor_stripe_interface.h
@@ -21,7 +21,7 @@ class PendingMessageTracker;
/**
* TODO STRIPE add class comment.
*/
-class DistributorStripeInterface : public DistributorMessageSender
+class DistributorStripeInterface : public DistributorStripeMessageSender
{
public:
virtual PendingMessageTracker& getPendingMessageTracker() = 0;
@@ -59,7 +59,6 @@ public:
*/
virtual bool initializing() const = 0;
virtual void handleCompletedMerge(const std::shared_ptr<api::MergeBucketReply>&) = 0;
- virtual const char* getStorageNodeUpStates() const = 0;
virtual const DistributorConfiguration& getConfig() const = 0;
virtual ChainedMessageSender& getMessageSender() = 0;
virtual const BucketGcTimeCalculator::BucketIdHasher& getBucketIdHasher() const = 0;
diff --git a/storage/src/vespa/storage/distributor/distributor_stripe_operation_context.h b/storage/src/vespa/storage/distributor/distributor_stripe_operation_context.h
index 8a62a102cc0..8419abeadaa 100644
--- a/storage/src/vespa/storage/distributor/distributor_stripe_operation_context.h
+++ b/storage/src/vespa/storage/distributor/distributor_stripe_operation_context.h
@@ -2,7 +2,9 @@
#pragma once
+#include "bucketgctimecalculator.h"
#include "bucketownership.h"
+#include "distributor_operation_context.h"
#include "operation_routing_snapshot.h"
#include <vespa/document/bucket/bucketspace.h>
#include <vespa/storage/bucketdb/bucketdatabase.h>
@@ -11,21 +13,16 @@
namespace document { class Bucket; }
-namespace storage { class DistributorConfiguration; }
-namespace storage::lib { class ClusterStateBundle; }
-
namespace storage::distributor {
-class DistributorBucketSpaceRepo;
class PendingMessageTracker;
/**
* Interface with functionality that is used when handling distributor stripe operations.
*/
-class DistributorStripeOperationContext {
+class DistributorStripeOperationContext : public DistributorOperationContext {
public:
virtual ~DistributorStripeOperationContext() {}
- virtual api::Timestamp generate_unique_timestamp() = 0;
virtual void update_bucket_database(const document::Bucket& bucket,
const BucketCopy& changed_node,
uint32_t update_flags = 0) = 0;
@@ -35,15 +32,10 @@ public:
virtual void remove_node_from_bucket_database(const document::Bucket& bucket, uint16_t node_index) = 0;
virtual void remove_nodes_from_bucket_database(const document::Bucket& bucket,
const std::vector<uint16_t>& nodes) = 0;
- virtual const DistributorBucketSpaceRepo& bucket_space_repo() const noexcept= 0;
- virtual DistributorBucketSpaceRepo& bucket_space_repo() noexcept = 0;
- virtual const DistributorBucketSpaceRepo& read_only_bucket_space_repo() const noexcept = 0;
- virtual DistributorBucketSpaceRepo& read_only_bucket_space_repo() noexcept = 0;
virtual document::BucketId make_split_bit_constrained_bucket_id(const document::DocumentId& docId) const = 0;
virtual void recheck_bucket_info(uint16_t node_index, const document::Bucket& bucket) = 0;
virtual document::BucketId get_sibling(const document::BucketId& bid) const = 0;
- virtual const DistributorConfiguration& distributor_config() const noexcept = 0;
virtual void send_inline_split_if_bucket_too_large(document::BucketSpace bucket_space,
const BucketDatabase::Entry& entry,
uint8_t pri) = 0;
@@ -55,10 +47,7 @@ public:
virtual const lib::ClusterState* pending_cluster_state_or_null(const document::BucketSpace& bucket_space) const = 0;
virtual const lib::ClusterStateBundle& cluster_state_bundle() const = 0;
virtual bool storage_node_is_up(document::BucketSpace bucket_space, uint32_t node_index) const = 0;
-
- // TODO: Move to being a free function instead.
- virtual const char* storage_node_up_states() const = 0;
-
+ virtual const BucketGcTimeCalculator::BucketIdHasher& bucket_id_hasher() const = 0;
};
}
diff --git a/storage/src/vespa/storage/distributor/distributor_stripe_pool.cpp b/storage/src/vespa/storage/distributor/distributor_stripe_pool.cpp
new file mode 100644
index 00000000000..715d95e70fb
--- /dev/null
+++ b/storage/src/vespa/storage/distributor/distributor_stripe_pool.cpp
@@ -0,0 +1,109 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include "distributor_stripe_pool.h"
+#include "distributor_stripe_thread.h"
+#include <vespa/vespalib/util/size_literals.h>
+#include <cassert>
+
+namespace storage::distributor {
+
+DistributorStripePool::DistributorStripePool()
+ : _thread_pool(512_Ki),
+ _stripes(),
+ _threads(),
+ _mutex(),
+ _parker_cond(),
+ _parked_threads(0),
+ _bootstrap_tick_wait_duration(1ms),
+ _bootstrap_ticks_before_wait(10),
+ _stopped(false)
+{}
+
+DistributorStripePool::~DistributorStripePool() {
+ if (!_stopped) {
+ stop_and_join();
+ }
+}
+
+void DistributorStripePool::park_all_threads() noexcept {
+ assert(!_stripes.empty());
+ // Thread pool is not dynamic and signal_wants_park() is thread safe.
+ for (auto& s : _stripes) {
+ s->signal_wants_park();
+ }
+ std::unique_lock lock(_mutex);
+ _parker_cond.wait(lock, [this]{ return (_parked_threads == _threads.size()); });
+}
+
+void DistributorStripePool::unpark_all_threads() noexcept {
+ // Thread pool is not dynamic and unpark_thread() is thread safe.
+ for (auto& s : _stripes) {
+ s->unpark_thread();
+ }
+ // We have a full unpark barrier here as a pragmatic way to avoid potential ABA issues
+ // caused by back-to-back park->unpark->park calls causing issues with interleaving
+ // up-counts and down-counts for thread parking/unparking.
+ // It's fully possibly to avoid this, but requires a somewhat more finicky solution for
+ // cross-thread coordination.
+ std::unique_lock lock(_mutex);
+ _parker_cond.wait(lock, [this]{ return (_parked_threads == 0); });
+}
+
+void DistributorStripePool::park_thread_until_released(DistributorStripeThread& thread) noexcept {
+ std::unique_lock lock(_mutex);
+ assert(_parked_threads < _threads.size());
+ ++_parked_threads;
+ if (_parked_threads == _threads.size()) {
+ _parker_cond.notify_all();
+ }
+ lock.unlock();
+ thread.wait_until_unparked();
+ lock.lock();
+ --_parked_threads;
+ if (_parked_threads == 0) {
+ _parker_cond.notify_all();
+ }
+};
+
+void DistributorStripePool::start(const std::vector<TickableStripe*>& stripes) {
+ assert(!stripes.empty());
+ assert(_stripes.empty() && _threads.empty());
+ _stripes.reserve(stripes.size());
+ _threads.reserve(stripes.size());
+
+ for (auto* s : stripes) {
+ auto new_stripe = std::make_unique<DistributorStripeThread>(*s, *this);
+ new_stripe->set_tick_wait_duration(_bootstrap_tick_wait_duration);
+ new_stripe->set_ticks_before_wait(_bootstrap_ticks_before_wait);
+ _stripes.emplace_back(std::move(new_stripe));
+ }
+ for (auto& s : _stripes) {
+ _threads.emplace_back(_thread_pool.NewThread(s.get()));
+ }
+}
+
+void DistributorStripePool::stop_and_join() {
+ for (auto& s : _stripes) {
+ s->signal_should_stop();
+ }
+ for (auto* t : _threads) {
+ t->Join();
+ }
+ _stopped = true;
+}
+
+void DistributorStripePool::set_tick_wait_duration(vespalib::duration new_tick_wait_duration) noexcept {
+ _bootstrap_tick_wait_duration = new_tick_wait_duration;
+ // Stripe set may be empty if start() hasn't been called yet.
+ for (auto& s : _stripes) {
+ s->set_tick_wait_duration(new_tick_wait_duration);
+ }
+}
+void DistributorStripePool::set_ticks_before_wait(uint32_t new_ticks_before_wait) noexcept {
+ _bootstrap_ticks_before_wait = new_ticks_before_wait;
+ // Stripe set may be empty if start() hasn't been called yet.
+ for (auto& s : _stripes) {
+ s->set_ticks_before_wait(new_ticks_before_wait);
+ }
+}
+
+}
diff --git a/storage/src/vespa/storage/distributor/distributor_stripe_pool.h b/storage/src/vespa/storage/distributor/distributor_stripe_pool.h
new file mode 100644
index 00000000000..5e72cb47fc4
--- /dev/null
+++ b/storage/src/vespa/storage/distributor/distributor_stripe_pool.h
@@ -0,0 +1,89 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#pragma once
+
+#include <vespa/fastos/thread.h>
+#include <vespa/vespalib/util/time.h>
+#include <atomic>
+#include <condition_variable>
+#include <mutex>
+#include <vector>
+
+namespace storage::distributor {
+
+class DistributorStripeThread;
+class TickableStripe;
+
+/**
+ * Management and coordination of a pool of distributor stripe threads.
+ *
+ * Aside from handling the threads themselves, the pool crucially offers a well-defined
+ * thread synchronization/coordination API meant for ensuring all stripe threads are in
+ * a well defined state before accessing them:
+ *
+ * - park_all_threads() returns once ALL threads are in a "parked" state where they
+ * may not race with any operations performed on them by the caller. In essence, this
+ * acts as if a (very large) mutex is held by the caller that prevents the stripe
+ * from doing anything of its own volition. Must be followed by:
+ * - unpark_all_threads() returns once ALL threads have been confirmed released from
+ * a previously parked state. Must be called after park_all_threads().
+ *
+ * Neither park_all_threads() or unpark_all_threads() may be called prior to calling start().
+ *
+ * It's possible to set stripe thread tick-specific options (wait duration, ticks before
+ * wait) both before and after start() is called. The options will be propagated to any
+ * running stripe threads in a thread-safe, lock-free manner.
+ */
+class DistributorStripePool {
+ using StripeVector = std::vector<std::unique_ptr<DistributorStripeThread>>;
+ using NativeThreadVector = std::vector<FastOS_ThreadInterface*>;
+
+ FastOS_ThreadPool _thread_pool;
+ StripeVector _stripes;
+ NativeThreadVector _threads;
+ std::mutex _mutex;
+ std::condition_variable _parker_cond;
+ size_t _parked_threads; // Must be protected by _park_mutex
+ vespalib::duration _bootstrap_tick_wait_duration;
+ uint32_t _bootstrap_ticks_before_wait;
+ bool _stopped;
+
+ friend class DistributorStripeThread;
+public:
+ using const_iterator = StripeVector::const_iterator;
+
+ DistributorStripePool();
+ ~DistributorStripePool();
+
+ // Set up the stripe pool with a 1-1 relationship between the provided
+ // stripes and running threads. Can only be called once per pool.
+ //
+ // Precondition: stripes.size() > 0
+ void start(const std::vector<TickableStripe*>& stripes);
+ void stop_and_join();
+
+ const_iterator begin() const noexcept { return _stripes.begin(); }
+ const_iterator end() const noexcept { return _stripes.end(); }
+
+ const_iterator cbegin() const noexcept { return _stripes.cbegin(); }
+ const_iterator cend() const noexcept { return _stripes.cend(); }
+
+ void park_all_threads() noexcept;
+ void unpark_all_threads() noexcept;
+
+ [[nodiscard]] const DistributorStripeThread& stripe_thread(size_t idx) const noexcept {
+ return *_stripes[idx];
+ }
+ [[nodiscard]] DistributorStripeThread& stripe_thread(size_t idx) noexcept {
+ return *_stripes[idx];
+ }
+ [[nodiscard]] size_t stripe_count() const noexcept { return _stripes.size(); }
+ [[nodiscard]] bool is_stopped() const noexcept { return _stopped; }
+
+ // Applies to all threads. May be called both before and after start(). Thread safe.
+ void set_tick_wait_duration(vespalib::duration new_tick_wait_duration) noexcept;
+ void set_ticks_before_wait(uint32_t new_ticks_before_wait) noexcept;
+private:
+ void park_thread_until_released(DistributorStripeThread& thread) noexcept;
+};
+
+}
diff --git a/storage/src/vespa/storage/distributor/distributor_stripe_thread.cpp b/storage/src/vespa/storage/distributor/distributor_stripe_thread.cpp
new file mode 100644
index 00000000000..372736b8d7d
--- /dev/null
+++ b/storage/src/vespa/storage/distributor/distributor_stripe_thread.cpp
@@ -0,0 +1,105 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include "distributor_stripe_thread.h"
+#include "distributor_stripe.h"
+#include "distributor_stripe_pool.h"
+#include "tickable_stripe.h"
+#include <cassert>
+
+namespace storage::distributor {
+
+DistributorStripeThread::DistributorStripeThread(TickableStripe& stripe,
+ DistributorStripePool& stripe_pool)
+ : _stripe(stripe),
+ _stripe_pool(stripe_pool),
+ _tick_wait_duration(1ms),
+ _mutex(),
+ _event_cond(),
+ _park_cond(),
+ _ticks_before_wait(10),
+ _should_park(false),
+ _should_stop(false),
+ _waiting_for_event(false)
+{}
+
+DistributorStripeThread::~DistributorStripeThread() = default;
+
+void DistributorStripeThread::Run(FastOS_ThreadInterface*, void*) {
+ uint32_t tick_waits_inhibited = 0;
+ while (!should_stop_thread_relaxed()) {
+ while (should_park_relaxed()) {
+ _stripe_pool.park_thread_until_released(*this);
+ }
+ // TODO consider enum to only trigger "ticks before wait"-behavior when maintenance was done
+ const bool did_work = _stripe.tick();
+ if (did_work) {
+ tick_waits_inhibited = 0;
+ } else if (tick_waits_inhibited >= ticks_before_wait_relaxed()) {
+ wait_until_event_notified_or_timed_out();
+ tick_waits_inhibited = 0;
+ } else {
+ ++tick_waits_inhibited;
+ }
+ }
+}
+
+void DistributorStripeThread::signal_wants_park() noexcept {
+ std::lock_guard lock(_mutex);
+ assert(!should_park_relaxed());
+ _should_park.store(true, std::memory_order_relaxed);
+ if (_waiting_for_event) {
+ _event_cond.notify_one(); // TODO after unlock?
+ }
+}
+
+void DistributorStripeThread::unpark_thread() noexcept {
+ std::lock_guard lock(_mutex);
+ assert(should_park_relaxed());
+ _should_park.store(false, std::memory_order_relaxed);
+ _park_cond.notify_one(); // TODO after unlock?
+}
+
+void DistributorStripeThread::wait_until_event_notified_or_timed_out() noexcept {
+ std::unique_lock lock(_mutex);
+ if (should_stop_thread_relaxed() || should_park_relaxed()) {
+ return;
+ }
+ _waiting_for_event = true;
+ _event_cond.wait_for(lock, tick_wait_duration_relaxed());
+ _waiting_for_event = false;
+}
+
+void DistributorStripeThread::wait_until_unparked() noexcept {
+ std::unique_lock lock(_mutex);
+ // _should_park is always written within _mutex, relaxed load is safe.
+ _park_cond.wait(lock, [this]{ return !should_park_relaxed(); });
+}
+
+void DistributorStripeThread::notify_event_has_triggered() noexcept {
+ // TODO mutex protect and add flag for "should tick immediately next time"
+ // TODO only notify if _waiting_for_event == true
+ _event_cond.notify_one();
+}
+
+void DistributorStripeThread::signal_should_stop() noexcept {
+ std::unique_lock lock(_mutex);
+ assert(!should_park_relaxed());
+ _should_stop.store(true, std::memory_order_relaxed);
+ if (_waiting_for_event) {
+ _event_cond.notify_one();
+ }
+ // TODO if we ever need it, handle pending thread park. For now we assume that
+ // the caller never attempts to concurrently park and stop threads.
+}
+
+void DistributorStripeThread::set_tick_wait_duration(vespalib::duration new_tick_wait_duration) noexcept {
+ static_assert(AtomicDuration::is_always_lock_free);
+ // No memory ordering required for a "lazy" single value setting such as the tick duration
+ _tick_wait_duration.store(new_tick_wait_duration, std::memory_order_relaxed);
+}
+
+void DistributorStripeThread::set_ticks_before_wait(uint32_t new_ticks_before_wait) noexcept {
+ _ticks_before_wait.store(new_ticks_before_wait, std::memory_order_relaxed);
+}
+
+
+}
diff --git a/storage/src/vespa/storage/distributor/distributor_stripe_thread.h b/storage/src/vespa/storage/distributor/distributor_stripe_thread.h
new file mode 100644
index 00000000000..60f10889afd
--- /dev/null
+++ b/storage/src/vespa/storage/distributor/distributor_stripe_thread.h
@@ -0,0 +1,84 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#pragma once
+
+#include <vespa/fastos/thread.h>
+#include <vespa/vespalib/util/time.h>
+#include <atomic>
+#include <condition_variable>
+#include <mutex>
+#include <vector>
+
+namespace storage::distributor {
+
+class DistributorStripe;
+class DistributorStripePool;
+class TickableStripe;
+
+/**
+ * A DistributorStripeThread provides threading resources for a single distributor stripe
+ * and the means of synchronizing access towards it through a DistributorStripePool.
+ *
+ * A DistributorStripeThread instance is bidirectionally bound to a particular pool and
+ * should therefore always be created by the pool itself (never standalone).
+ */
+class DistributorStripeThread : private FastOS_Runnable {
+ using AtomicDuration = std::atomic<vespalib::duration>;
+
+ TickableStripe& _stripe;
+ DistributorStripePool& _stripe_pool;
+ AtomicDuration _tick_wait_duration;
+ std::mutex _mutex;
+ std::condition_variable _event_cond;
+ std::condition_variable _park_cond;
+ std::atomic<uint32_t> _ticks_before_wait;
+ std::atomic<bool> _should_park;
+ std::atomic<bool> _should_stop;
+ bool _waiting_for_event;
+
+ friend class DistributorStripePool;
+public:
+ DistributorStripeThread(TickableStripe& stripe,
+ DistributorStripePool& stripe_pool);
+ ~DistributorStripeThread();
+
+ void Run(FastOS_ThreadInterface*, void*) override;
+
+ // Wakes up stripe thread if it's currently waiting for an external event to be triggered,
+ // such as the arrival of a new RPC message. If thread is parked this call will have no
+ // effect.
+ void notify_event_has_triggered() noexcept;
+
+ void set_tick_wait_duration(vespalib::duration new_tick_wait_duration) noexcept;
+ void set_ticks_before_wait(uint32_t new_ticks_before_wait) noexcept;
+
+ TickableStripe* operator->() noexcept { return &_stripe; }
+ const TickableStripe* operator->() const noexcept { return &_stripe; }
+
+ TickableStripe& stripe() noexcept { return _stripe; }
+ const TickableStripe& stripe() const noexcept { return _stripe; }
+private:
+ [[nodiscard]] bool should_stop_thread_relaxed() const noexcept {
+ return _should_stop.load(std::memory_order_relaxed);
+ }
+
+ [[nodiscard]] bool should_park_relaxed() const noexcept {
+ return _should_park.load(std::memory_order_relaxed);
+ }
+
+ [[nodiscard]] vespalib::duration tick_wait_duration_relaxed() const noexcept {
+ return _tick_wait_duration.load(std::memory_order_relaxed);
+ }
+
+ [[nodiscard]] uint32_t ticks_before_wait_relaxed() const noexcept {
+ return _ticks_before_wait.load(std::memory_order_relaxed);
+ }
+
+ void signal_wants_park() noexcept;
+ void unpark_thread() noexcept;
+ void wait_until_event_notified_or_timed_out() noexcept;
+ void wait_until_unparked() noexcept;
+
+ void signal_should_stop() noexcept;
+};
+
+}
diff --git a/storage/src/vespa/storage/distributor/distributormessagesender.h b/storage/src/vespa/storage/distributor/distributormessagesender.h
index 54be92dc99a..c39e3e8fe8a 100644
--- a/storage/src/vespa/storage/distributor/distributormessagesender.h
+++ b/storage/src/vespa/storage/distributor/distributormessagesender.h
@@ -21,7 +21,11 @@ public:
const std::shared_ptr<api::StorageCommand>& cmd, bool useDocumentAPI = false);
virtual int getDistributorIndex() const = 0;
- virtual const ClusterContext & cluster_context() const = 0;
+ virtual const ClusterContext& cluster_context() const = 0;
+};
+
+class DistributorStripeMessageSender : public DistributorMessageSender {
+public:
virtual const PendingMessageTracker& getPendingMessageTracker() const = 0;
virtual const OperationSequencer& operation_sequencer() const noexcept = 0;
};
diff --git a/storage/src/vespa/storage/distributor/externaloperationhandler.cpp b/storage/src/vespa/storage/distributor/externaloperationhandler.cpp
index 2acd04f7eef..e703c5bfdb8 100644
--- a/storage/src/vespa/storage/distributor/externaloperationhandler.cpp
+++ b/storage/src/vespa/storage/distributor/externaloperationhandler.cpp
@@ -31,7 +31,7 @@ LOG_SETUP(".distributor.manager");
namespace storage::distributor {
-class DirectDispatchSender : public DistributorMessageSender {
+class DirectDispatchSender : public DistributorStripeMessageSender {
DistributorNodeContext& _node_ctx;
NonTrackingMessageSender& _msg_sender;
public:
diff --git a/storage/src/vespa/storage/distributor/idealstatemanager.cpp b/storage/src/vespa/storage/distributor/idealstatemanager.cpp
index a090f00300b..7bebe4c001a 100644
--- a/storage/src/vespa/storage/distributor/idealstatemanager.cpp
+++ b/storage/src/vespa/storage/distributor/idealstatemanager.cpp
@@ -29,13 +29,11 @@ IdealStateManager::IdealStateManager(
DistributorBucketSpaceRepo& bucketSpaceRepo,
DistributorBucketSpaceRepo& readOnlyBucketSpaceRepo,
DistributorComponentRegister& compReg)
- : HtmlStatusReporter("idealstateman", "Ideal state manager"),
- _metrics(new IdealStateMetricSet),
+ : _metrics(new IdealStateMetricSet),
_distributorComponent(owner, bucketSpaceRepo, readOnlyBucketSpaceRepo, compReg, "Ideal state manager"),
_bucketSpaceRepo(bucketSpaceRepo),
_has_logged_phantom_replica_warning(false)
{
- _distributorComponent.registerStatusPage(*this);
_distributorComponent.registerMetric(*_metrics);
LOG(debug, "Adding BucketStateStateChecker to state checkers");
@@ -63,7 +61,7 @@ IdealStateManager::print(std::ostream& out, bool verbose,
bool
IdealStateManager::iAmUp() const
{
- Node node(NodeType::DISTRIBUTOR, _distributorComponent.getIndex());
+ Node node(NodeType::DISTRIBUTOR, node_context().node_index());
// Assume that derived cluster states agree on distributor node being up
const auto &state = *operation_context().cluster_state_bundle().getBaselineClusterState();
const lib::State &nodeState = state.getNodeState(node).getState();
@@ -123,7 +121,7 @@ IdealStateManager::runStateCheckers(StateChecker::Context& c) const
// We go through _all_ active state checkers so that statistics can be
// collected across all checkers, not just the ones that are highest pri.
for (uint32_t i = 0; i < _stateCheckers.size(); i++) {
- if (!_distributorComponent.getDistributor().getConfig().stateCheckerIsActive(
+ if (!operation_context().distributor_config().stateCheckerIsActive(
_stateCheckers[i]->getName()))
{
LOG(spam, "Skipping state checker %s",
@@ -166,7 +164,7 @@ IdealStateManager::generateHighestPriority(
NodeMaintenanceStatsTracker& statsTracker) const
{
auto &distributorBucketSpace(_bucketSpaceRepo.get(bucket.getBucketSpace()));
- StateChecker::Context c(_distributorComponent, distributorBucketSpace, statsTracker, bucket);
+ StateChecker::Context c(node_context(), operation_context(), distributorBucketSpace, statsTracker, bucket);
fillParentAndChildBuckets(c);
fillSiblingBucket(c);
@@ -203,7 +201,7 @@ IdealStateManager::generateInterceptingSplit(BucketSpace bucketSpace,
NodeMaintenanceStatsTracker statsTracker;
document::Bucket bucket(bucketSpace, e.getBucketId());
auto &distributorBucketSpace(_bucketSpaceRepo.get(bucket.getBucketSpace()));
- StateChecker::Context c(_distributorComponent, distributorBucketSpace, statsTracker, bucket);
+ StateChecker::Context c(node_context(), operation_context(), distributorBucketSpace, statsTracker, bucket);
if (e.valid()) {
c.entry = e;
@@ -238,7 +236,7 @@ IdealStateManager::generateAll(const document::Bucket &bucket,
NodeMaintenanceStatsTracker& statsTracker) const
{
auto &distributorBucketSpace(_bucketSpaceRepo.get(bucket.getBucketSpace()));
- StateChecker::Context c(_distributorComponent, distributorBucketSpace, statsTracker, bucket);
+ StateChecker::Context c(node_context(), operation_context(), distributorBucketSpace, statsTracker, bucket);
fillParentAndChildBuckets(c);
fillSiblingBucket(c);
BucketDatabase::Entry* e(getEntryForPrimaryBucket(c));
@@ -288,8 +286,6 @@ IdealStateManager::getBucketStatus(
}
void IdealStateManager::dump_bucket_space_db_status(document::BucketSpace bucket_space, std::ostream& out) const {
- out << "<h2>" << document::FixedBucketSpaces::to_string(bucket_space) << " - " << bucket_space << "</h2>\n";
-
StatusBucketVisitor proc(*this, bucket_space, out);
auto &distributorBucketSpace(_bucketSpaceRepo.get(bucket_space));
distributorBucketSpace.getBucketDatabase().forEach(proc);
@@ -297,9 +293,10 @@ void IdealStateManager::dump_bucket_space_db_status(document::BucketSpace bucket
void IdealStateManager::getBucketStatus(std::ostream& out) const {
LOG(debug, "Dumping bucket database valid at cluster state version %u",
- _distributorComponent.getDistributor().getClusterStateBundle().getVersion());
+ operation_context().cluster_state_bundle().getVersion());
for (auto& space : _bucketSpaceRepo) {
+ out << "<h2>" << document::FixedBucketSpaces::to_string(space.first) << " - " << space.first << "</h2>\n";
dump_bucket_space_db_status(space.first, out);
}
}
diff --git a/storage/src/vespa/storage/distributor/idealstatemanager.h b/storage/src/vespa/storage/distributor/idealstatemanager.h
index ebcaad4cf96..ab7a64142f6 100644
--- a/storage/src/vespa/storage/distributor/idealstatemanager.h
+++ b/storage/src/vespa/storage/distributor/idealstatemanager.h
@@ -28,8 +28,7 @@ class SplitBucketStateChecker;
may generate Operations. Once one does so, the rest of the state checkers
aren't run.
*/
-class IdealStateManager : public framework::HtmlStatusReporter,
- public MaintenancePriorityGenerator,
+class IdealStateManager : public MaintenancePriorityGenerator,
public MaintenanceOperationGenerator
{
public:
@@ -68,13 +67,10 @@ public:
IdealStateMetricSet& getMetrics() { return *_metrics; }
- void getBucketStatus(std::ostream& out) const;
- // HtmlStatusReporter
- void reportHtmlStatus(
- std::ostream& out, const framework::HttpUrlPath&) const override {
- getBucketStatus(out);
- }
+ void dump_bucket_space_db_status(document::BucketSpace bucket_space, std::ostream& out) const;
+
+ void getBucketStatus(std::ostream& out) const;
const DistributorNodeContext& node_context() const { return _distributorComponent; }
DistributorStripeOperationContext& operation_context() { return _distributorComponent; }
@@ -126,7 +122,6 @@ private:
void getBucketStatus(document::BucketSpace bucketSpace, const BucketDatabase::ConstEntryRef& entry,
NodeMaintenanceStatsTracker& statsTracker, std::ostream& out) const;
- void dump_bucket_space_db_status(document::BucketSpace bucket_space, std::ostream& out) const;
};
}
diff --git a/storage/src/vespa/storage/distributor/legacy_single_stripe_accessor.cpp b/storage/src/vespa/storage/distributor/legacy_single_stripe_accessor.cpp
deleted file mode 100644
index 0c6c0206608..00000000000
--- a/storage/src/vespa/storage/distributor/legacy_single_stripe_accessor.cpp
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include "legacy_single_stripe_accessor.h"
-#include "distributor_stripe.h"
-
-namespace storage::distributor {
-
-LegacySingleStripeAccessGuard::LegacySingleStripeAccessGuard(LegacySingleStripeAccessor& accessor,
- DistributorStripe& stripe)
- : _accessor(accessor),
- _stripe(stripe)
-{}
-
-LegacySingleStripeAccessGuard::~LegacySingleStripeAccessGuard() {
- _accessor.mark_guard_released();
-}
-
-void LegacySingleStripeAccessGuard::update_total_distributor_config(std::shared_ptr<const DistributorConfiguration> config) {
- _stripe.update_total_distributor_config(std::move(config));
-}
-
-void LegacySingleStripeAccessGuard::update_distribution_config(const BucketSpaceDistributionConfigs& new_configs) {
- _stripe.update_distribution_config(new_configs);
-}
-
-void LegacySingleStripeAccessGuard::set_pending_cluster_state_bundle(const lib::ClusterStateBundle& pending_state) {
- _stripe.getBucketSpaceRepo().set_pending_cluster_state_bundle(pending_state);
- // TODO STRIPE also read only repo?
-}
-
-void LegacySingleStripeAccessGuard::clear_pending_cluster_state_bundle() {
- _stripe.getBucketSpaceRepo().clear_pending_cluster_state_bundle();
- // TODO STRIPE also read only repo?
-}
-
-void LegacySingleStripeAccessGuard::enable_cluster_state_bundle(const lib::ClusterStateBundle& new_state) {
- _stripe.enableClusterStateBundle(new_state);
-}
-
-void LegacySingleStripeAccessGuard::notify_distribution_change_enabled() {
- _stripe.notifyDistributionChangeEnabled();
-}
-
-PotentialDataLossReport
-LegacySingleStripeAccessGuard::remove_superfluous_buckets(document::BucketSpace bucket_space,
- const lib::ClusterState& new_state,
- bool is_distribution_change)
-{
- return _stripe.bucket_db_updater().remove_superfluous_buckets(bucket_space, new_state, is_distribution_change);
-}
-
-void
-LegacySingleStripeAccessGuard::merge_entries_into_db(document::BucketSpace bucket_space,
- api::Timestamp gathered_at_timestamp,
- const lib::Distribution& distribution,
- const lib::ClusterState& new_state,
- const char* storage_up_states,
- const std::unordered_set<uint16_t>& outdated_nodes,
- const std::vector<dbtransition::Entry>& entries)
-{
- _stripe.bucket_db_updater().merge_entries_into_db(bucket_space, gathered_at_timestamp, distribution,
- new_state, storage_up_states, outdated_nodes, entries);
-}
-
-void LegacySingleStripeAccessGuard::update_read_snapshot_before_db_pruning() {
- _stripe.bucket_db_updater().update_read_snapshot_before_db_pruning();
-}
-
-void LegacySingleStripeAccessGuard::update_read_snapshot_after_db_pruning(const lib::ClusterStateBundle& new_state) {
- _stripe.bucket_db_updater().update_read_snapshot_after_db_pruning(new_state);
-}
-
-void LegacySingleStripeAccessGuard::update_read_snapshot_after_activation(const lib::ClusterStateBundle& activated_state) {
- _stripe.bucket_db_updater().update_read_snapshot_after_activation(activated_state);
-}
-
-void LegacySingleStripeAccessGuard::clear_read_only_bucket_repo_databases() {
- _stripe.bucket_db_updater().clearReadOnlyBucketRepoDatabases();
-}
-
-std::unique_ptr<StripeAccessGuard> LegacySingleStripeAccessor::rendezvous_and_hold_all() {
- // For sanity checking during development.
- assert(!_guard_held);
- _guard_held = true;
- return std::make_unique<LegacySingleStripeAccessGuard>(*this, _stripe);
-}
-
-void LegacySingleStripeAccessor::mark_guard_released() {
- assert(_guard_held);
- _guard_held = false;
-}
-
-}
diff --git a/storage/src/vespa/storage/distributor/legacy_single_stripe_accessor.h b/storage/src/vespa/storage/distributor/legacy_single_stripe_accessor.h
deleted file mode 100644
index caf1e397e5b..00000000000
--- a/storage/src/vespa/storage/distributor/legacy_single_stripe_accessor.h
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#pragma once
-
-#include "stripe_access_guard.h"
-
-namespace storage::distributor {
-
-class DistributorStripe;
-class LegacySingleStripeAccessor;
-
-/**
- * Very simple stripe access guard which expects the caller and its single stripe to run in the
- * same thread. This means there's no actual striping of operations or any thread synchronization
- * performed. Only intended as a stop-gap while we have legacy stripe behavior.
- */
-class LegacySingleStripeAccessGuard : public StripeAccessGuard {
- LegacySingleStripeAccessor& _accessor;
- DistributorStripe& _stripe;
-public:
- LegacySingleStripeAccessGuard(LegacySingleStripeAccessor& accessor,
- DistributorStripe& stripe);
- ~LegacySingleStripeAccessGuard() override;
-
- void update_total_distributor_config(std::shared_ptr<const DistributorConfiguration> config) override;
-
- void update_distribution_config(const BucketSpaceDistributionConfigs& new_configs) override;
- void set_pending_cluster_state_bundle(const lib::ClusterStateBundle& pending_state) override;
- void clear_pending_cluster_state_bundle() override;
- void enable_cluster_state_bundle(const lib::ClusterStateBundle& new_state) override;
- void notify_distribution_change_enabled() override;
-
- PotentialDataLossReport remove_superfluous_buckets(document::BucketSpace bucket_space,
- const lib::ClusterState& new_state,
- bool is_distribution_change) override;
- void merge_entries_into_db(document::BucketSpace bucket_space,
- api::Timestamp gathered_at_timestamp,
- const lib::Distribution& distribution,
- const lib::ClusterState& new_state,
- const char* storage_up_states,
- const std::unordered_set<uint16_t>& outdated_nodes,
- const std::vector<dbtransition::Entry>& entries) override;
-
- void update_read_snapshot_before_db_pruning() override;
- void update_read_snapshot_after_db_pruning(const lib::ClusterStateBundle& new_state) override;
- void update_read_snapshot_after_activation(const lib::ClusterStateBundle& activated_state) override;
- void clear_read_only_bucket_repo_databases() override;
-};
-
-/**
- * Impl of StripeAccessor which creates LegacySingleStripeAccessGuards bound to a single stripe.
- */
-class LegacySingleStripeAccessor : public StripeAccessor {
- DistributorStripe& _stripe;
- bool _guard_held;
-
- friend class LegacySingleStripeAccessGuard;
-public:
- explicit LegacySingleStripeAccessor(DistributorStripe& stripe)
- : _stripe(stripe),
- _guard_held(false)
- {}
- ~LegacySingleStripeAccessor() override = default;
-
- std::unique_ptr<StripeAccessGuard> rendezvous_and_hold_all() override;
-private:
- void mark_guard_released();
-};
-
-}
diff --git a/storage/src/vespa/storage/distributor/multi_threaded_stripe_access_guard.cpp b/storage/src/vespa/storage/distributor/multi_threaded_stripe_access_guard.cpp
new file mode 100644
index 00000000000..a5adf732824
--- /dev/null
+++ b/storage/src/vespa/storage/distributor/multi_threaded_stripe_access_guard.cpp
@@ -0,0 +1,174 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include "multi_threaded_stripe_access_guard.h"
+#include "distributor_stripe.h"
+#include "distributor_stripe_pool.h"
+#include "distributor_stripe_thread.h"
+
+namespace storage::distributor {
+
+MultiThreadedStripeAccessGuard::MultiThreadedStripeAccessGuard(
+ MultiThreadedStripeAccessor& accessor,
+ DistributorStripePool& stripe_pool)
+ : _accessor(accessor),
+ _stripe_pool(stripe_pool)
+{
+ assert(_stripe_pool.stripe_count() > 0);
+ _stripe_pool.park_all_threads();
+}
+
+MultiThreadedStripeAccessGuard::~MultiThreadedStripeAccessGuard() {
+ _stripe_pool.unpark_all_threads();
+ _accessor.mark_guard_released();
+}
+
+void MultiThreadedStripeAccessGuard::flush_and_close() {
+ for_each_stripe([](TickableStripe& stripe) {
+ stripe.flush_and_close();
+ });
+}
+
+void MultiThreadedStripeAccessGuard::update_total_distributor_config(std::shared_ptr<const DistributorConfiguration> config) {
+ for_each_stripe([&](TickableStripe& stripe) {
+ stripe.update_total_distributor_config(config);
+ });
+}
+
+void MultiThreadedStripeAccessGuard::update_distribution_config(const BucketSpaceDistributionConfigs& new_configs) {
+ for_each_stripe([&](TickableStripe& stripe) {
+ stripe.update_distribution_config(new_configs);
+ });
+}
+
+void MultiThreadedStripeAccessGuard::set_pending_cluster_state_bundle(const lib::ClusterStateBundle& pending_state) {
+ for_each_stripe([&](TickableStripe& stripe) {
+ stripe.set_pending_cluster_state_bundle(pending_state);
+ });
+}
+
+void MultiThreadedStripeAccessGuard::clear_pending_cluster_state_bundle() {
+ for_each_stripe([](TickableStripe& stripe) {
+ stripe.clear_pending_cluster_state_bundle();
+ });
+}
+
+void MultiThreadedStripeAccessGuard::enable_cluster_state_bundle(const lib::ClusterStateBundle& new_state) {
+ for_each_stripe([&](TickableStripe& stripe) {
+ stripe.enable_cluster_state_bundle(new_state);
+ });
+}
+
+void MultiThreadedStripeAccessGuard::notify_distribution_change_enabled() {
+ for_each_stripe([](TickableStripe& stripe) {
+ stripe.notify_distribution_change_enabled();
+ });
+}
+
+PotentialDataLossReport
+MultiThreadedStripeAccessGuard::remove_superfluous_buckets(document::BucketSpace bucket_space,
+ const lib::ClusterState& new_state,
+ bool is_distribution_change)
+{
+ PotentialDataLossReport report;
+ for_each_stripe([&](TickableStripe& stripe) {
+ report.merge(stripe.remove_superfluous_buckets(bucket_space, new_state, is_distribution_change));
+ });
+ return report;
+}
+
+void
+MultiThreadedStripeAccessGuard::merge_entries_into_db(document::BucketSpace bucket_space,
+ api::Timestamp gathered_at_timestamp,
+ const lib::Distribution& distribution,
+ const lib::ClusterState& new_state,
+ const char* storage_up_states,
+ const std::unordered_set<uint16_t>& outdated_nodes,
+ const std::vector<dbtransition::Entry>& entries)
+{
+ // TODO STRIPE multiple stripes
+ first_stripe().merge_entries_into_db(bucket_space, gathered_at_timestamp, distribution,
+ new_state, storage_up_states, outdated_nodes, entries);
+}
+
+void MultiThreadedStripeAccessGuard::update_read_snapshot_before_db_pruning() {
+ for_each_stripe([](TickableStripe& stripe) {
+ stripe.update_read_snapshot_before_db_pruning();
+ });
+}
+
+void MultiThreadedStripeAccessGuard::update_read_snapshot_after_db_pruning(const lib::ClusterStateBundle& new_state) {
+ for_each_stripe([&](TickableStripe& stripe) {
+ stripe.update_read_snapshot_after_db_pruning(new_state);
+ });
+}
+
+void MultiThreadedStripeAccessGuard::update_read_snapshot_after_activation(const lib::ClusterStateBundle& activated_state) {
+ for_each_stripe([&](TickableStripe& stripe) {
+ stripe.update_read_snapshot_after_activation(activated_state);
+ });
+}
+
+void MultiThreadedStripeAccessGuard::clear_read_only_bucket_repo_databases() {
+ for_each_stripe([](TickableStripe& stripe) {
+ stripe.clear_read_only_bucket_repo_databases();
+ });
+}
+
+void MultiThreadedStripeAccessGuard::report_bucket_db_status(document::BucketSpace bucket_space, std::ostream& out) const {
+ for_each_stripe([&](TickableStripe& stripe) {
+ stripe.report_bucket_db_status(bucket_space, out);
+ });
+}
+
+StripeAccessGuard::PendingOperationStats
+MultiThreadedStripeAccessGuard::pending_operation_stats() const {
+ // TODO STRIPE multiple stripes
+ return first_stripe().pending_operation_stats();
+}
+
+void MultiThreadedStripeAccessGuard::report_single_bucket_requests(vespalib::xml::XmlOutputStream& xos) const {
+ for_each_stripe([&](TickableStripe& stripe) {
+ stripe.report_single_bucket_requests(xos);
+ });
+}
+
+void MultiThreadedStripeAccessGuard::report_delayed_single_bucket_requests(vespalib::xml::XmlOutputStream& xos) const {
+ for_each_stripe([&](TickableStripe& stripe) {
+ stripe.report_delayed_single_bucket_requests(xos);
+ });
+}
+
+TickableStripe& MultiThreadedStripeAccessGuard::first_stripe() noexcept {
+ return _stripe_pool.stripe_thread(0).stripe();
+}
+
+const TickableStripe& MultiThreadedStripeAccessGuard::first_stripe() const noexcept {
+ return _stripe_pool.stripe_thread(0).stripe();
+}
+
+template <typename Func>
+void MultiThreadedStripeAccessGuard::for_each_stripe(Func&& f) {
+ for (auto& stripe_thread : _stripe_pool) {
+ f(stripe_thread->stripe());
+ }
+}
+
+template <typename Func>
+void MultiThreadedStripeAccessGuard::for_each_stripe(Func&& f) const {
+ for (const auto& stripe_thread : _stripe_pool) {
+ f(stripe_thread->stripe());
+ }
+}
+
+std::unique_ptr<StripeAccessGuard> MultiThreadedStripeAccessor::rendezvous_and_hold_all() {
+ // For sanity checking of invariant of only one guard being allowed at any given time.
+ assert(!_guard_held);
+ _guard_held = true;
+ return std::make_unique<MultiThreadedStripeAccessGuard>(*this, _stripe_pool);
+}
+
+void MultiThreadedStripeAccessor::mark_guard_released() {
+ assert(_guard_held);
+ _guard_held = false;
+}
+
+}
diff --git a/storage/src/vespa/storage/distributor/multi_threaded_stripe_access_guard.h b/storage/src/vespa/storage/distributor/multi_threaded_stripe_access_guard.h
new file mode 100644
index 00000000000..da5fd8e5f37
--- /dev/null
+++ b/storage/src/vespa/storage/distributor/multi_threaded_stripe_access_guard.h
@@ -0,0 +1,94 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#pragma once
+
+#include "stripe_access_guard.h"
+
+namespace storage::distributor {
+
+class MultiThreadedStripeAccessor;
+class DistributorStripePool;
+class TickableStripe;
+
+/**
+ * StripeAccessGuard implementation which provides exclusive access to a set of stripes
+ * by ensuring that all stripe threads are safely parked upon guard construction. This
+ * means that as long as a guard exists, access to stripes is guaranteed to not cause
+ * data races.
+ *
+ * Threads are automatically un-parked upon guard destruction.
+ *
+ * At most one guard instance may exist at any given time.
+ */
+class MultiThreadedStripeAccessGuard : public StripeAccessGuard {
+ MultiThreadedStripeAccessor& _accessor;
+ DistributorStripePool& _stripe_pool;
+public:
+ MultiThreadedStripeAccessGuard(MultiThreadedStripeAccessor& accessor,
+ DistributorStripePool& stripe_pool);
+ ~MultiThreadedStripeAccessGuard() override;
+
+ void flush_and_close() override;
+
+ void update_total_distributor_config(std::shared_ptr<const DistributorConfiguration> config) override;
+
+ void update_distribution_config(const BucketSpaceDistributionConfigs& new_configs) override;
+ void set_pending_cluster_state_bundle(const lib::ClusterStateBundle& pending_state) override;
+ void clear_pending_cluster_state_bundle() override;
+ void enable_cluster_state_bundle(const lib::ClusterStateBundle& new_state) override;
+ void notify_distribution_change_enabled() override;
+
+ PotentialDataLossReport remove_superfluous_buckets(document::BucketSpace bucket_space,
+ const lib::ClusterState& new_state,
+ bool is_distribution_change) override;
+ void merge_entries_into_db(document::BucketSpace bucket_space,
+ api::Timestamp gathered_at_timestamp,
+ const lib::Distribution& distribution,
+ const lib::ClusterState& new_state,
+ const char* storage_up_states,
+ const std::unordered_set<uint16_t>& outdated_nodes,
+ const std::vector<dbtransition::Entry>& entries) override;
+
+ void update_read_snapshot_before_db_pruning() override;
+ void update_read_snapshot_after_db_pruning(const lib::ClusterStateBundle& new_state) override;
+ void update_read_snapshot_after_activation(const lib::ClusterStateBundle& activated_state) override;
+ void clear_read_only_bucket_repo_databases() override;
+
+ void report_bucket_db_status(document::BucketSpace bucket_space, std::ostream& out) const override;
+ PendingOperationStats pending_operation_stats() const override;
+ void report_single_bucket_requests(vespalib::xml::XmlOutputStream& xos) const override;
+ void report_delayed_single_bucket_requests(vespalib::xml::XmlOutputStream& xos) const override;
+
+private:
+ // TODO STRIPE remove once multi threaded stripe support is implemented
+ TickableStripe& first_stripe() noexcept;
+ const TickableStripe& first_stripe() const noexcept;
+
+ template <typename Func>
+ void for_each_stripe(Func&& f);
+
+ template <typename Func>
+ void for_each_stripe(Func&& f) const;
+};
+
+/**
+ * Impl of StripeAccessor which creates MultiThreadedStripeAccessGuards that cover all threads
+ * in the provided stripe pool.
+ */
+class MultiThreadedStripeAccessor : public StripeAccessor {
+ DistributorStripePool& _stripe_pool;
+ bool _guard_held;
+
+ friend class MultiThreadedStripeAccessGuard;
+public:
+ explicit MultiThreadedStripeAccessor(DistributorStripePool& stripe_pool)
+ : _stripe_pool(stripe_pool),
+ _guard_held(false)
+ {}
+ ~MultiThreadedStripeAccessor() override = default;
+
+ std::unique_ptr<StripeAccessGuard> rendezvous_and_hold_all() override;
+private:
+ void mark_guard_released();
+};
+
+}
diff --git a/storage/src/vespa/storage/distributor/operationowner.h b/storage/src/vespa/storage/distributor/operationowner.h
index 56a5f28f2b6..d3f46343ebc 100644
--- a/storage/src/vespa/storage/distributor/operationowner.h
+++ b/storage/src/vespa/storage/distributor/operationowner.h
@@ -18,10 +18,10 @@ class Operation;
class OperationOwner : public OperationStarter {
public:
- class Sender : public DistributorMessageSender {
+ class Sender : public DistributorStripeMessageSender {
public:
Sender(OperationOwner& owner,
- DistributorMessageSender& sender,
+ DistributorStripeMessageSender& sender,
const std::shared_ptr<Operation>& cb)
: _owner(owner),
_sender(sender),
@@ -53,11 +53,11 @@ public:
private:
OperationOwner& _owner;
- DistributorMessageSender& _sender;
+ DistributorStripeMessageSender& _sender;
std::shared_ptr<Operation> _cb;
};
- OperationOwner(DistributorMessageSender& sender,
+ OperationOwner(DistributorStripeMessageSender& sender,
const framework::Clock& clock)
: _sender(sender),
_clock(clock) {
@@ -85,7 +85,7 @@ public:
*/
void erase(api::StorageMessage::Id msgId);
- [[nodiscard]] DistributorMessageSender& sender() noexcept { return _sender; }
+ [[nodiscard]] DistributorStripeMessageSender& sender() noexcept { return _sender; }
void onClose();
uint32_t size() const { return _sentMessageMap.size(); }
@@ -93,7 +93,7 @@ public:
private:
SentMessageMap _sentMessageMap;
- DistributorMessageSender& _sender;
+ DistributorStripeMessageSender& _sender;
const framework::Clock& _clock;
};
diff --git a/storage/src/vespa/storage/distributor/operations/external/getoperation.cpp b/storage/src/vespa/storage/distributor/operations/external/getoperation.cpp
index 61bdcd4444d..db84575db31 100644
--- a/storage/src/vespa/storage/distributor/operations/external/getoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/external/getoperation.cpp
@@ -45,7 +45,7 @@ GetOperation::GroupId::operator==(const GroupId& other) const
&& _node == other._node);
}
-GetOperation::GetOperation(DistributorNodeContext& node_ctx,
+GetOperation::GetOperation(const DistributorNodeContext& node_ctx,
const DistributorBucketSpace &bucketSpace,
std::shared_ptr<BucketDatabase::ReadGuard> read_guard,
std::shared_ptr<api::GetCommand> msg,
@@ -68,7 +68,7 @@ GetOperation::GetOperation(DistributorNodeContext& node_ctx,
}
void
-GetOperation::onClose(DistributorMessageSender& sender)
+GetOperation::onClose(DistributorStripeMessageSender& sender)
{
_returnCode = api::ReturnCode(api::ReturnCode::ABORTED, "Process is shutting down");
sendReply(sender);
@@ -99,7 +99,7 @@ GetOperation::findBestUnsentTarget(const GroupVector& candidates) const
}
bool
-GetOperation::sendForChecksum(DistributorMessageSender& sender, const document::BucketId& id, GroupVector& res)
+GetOperation::sendForChecksum(DistributorStripeMessageSender& sender, const document::BucketId& id, GroupVector& res)
{
const int best = findBestUnsentTarget(res);
@@ -122,7 +122,7 @@ GetOperation::sendForChecksum(DistributorMessageSender& sender, const document::
}
void
-GetOperation::onStart(DistributorMessageSender& sender)
+GetOperation::onStart(DistributorStripeMessageSender& sender)
{
// Send one request for each unique group (BucketId/checksum)
bool sent = false;
@@ -138,7 +138,7 @@ GetOperation::onStart(DistributorMessageSender& sender)
};
void
-GetOperation::onReceive(DistributorMessageSender& sender, const std::shared_ptr<api::StorageReply>& msg)
+GetOperation::onReceive(DistributorStripeMessageSender& sender, const std::shared_ptr<api::StorageReply>& msg)
{
auto* getreply = dynamic_cast<api::GetReply*>(msg.get());
assert(getreply != nullptr);
@@ -225,7 +225,7 @@ void GetOperation::update_internal_metrics() {
}
void
-GetOperation::sendReply(DistributorMessageSender& sender)
+GetOperation::sendReply(DistributorStripeMessageSender& sender)
{
if (_msg.get()) {
const auto newest = _newest_replica.value_or(NewestReplica::make_empty());
diff --git a/storage/src/vespa/storage/distributor/operations/external/getoperation.h b/storage/src/vespa/storage/distributor/operations/external/getoperation.h
index cc6fd6680e0..10d457922c9 100644
--- a/storage/src/vespa/storage/distributor/operations/external/getoperation.h
+++ b/storage/src/vespa/storage/distributor/operations/external/getoperation.h
@@ -26,16 +26,16 @@ class DistributorBucketSpace;
class GetOperation : public Operation
{
public:
- GetOperation(DistributorNodeContext& node_ctx,
+ GetOperation(const DistributorNodeContext& node_ctx,
const DistributorBucketSpace &bucketSpace,
std::shared_ptr<BucketDatabase::ReadGuard> read_guard,
std::shared_ptr<api::GetCommand> msg,
PersistenceOperationMetricSet& metric,
api::InternalReadConsistency desired_read_consistency = api::InternalReadConsistency::Strong);
- void onClose(DistributorMessageSender& sender) override;
- void onStart(DistributorMessageSender& sender) override;
- void onReceive(DistributorMessageSender& sender, const std::shared_ptr<api::StorageReply> & msg) override;
+ void onClose(DistributorStripeMessageSender& sender) override;
+ void onStart(DistributorStripeMessageSender& sender) override;
+ void onReceive(DistributorStripeMessageSender& sender, const std::shared_ptr<api::StorageReply> & msg) override;
const char* getName() const override { return "get"; }
std::string getStatus() const override { return ""; }
@@ -97,7 +97,7 @@ private:
// within that bucket.
std::map<GroupId, GroupVector> _responses;
- DistributorNodeContext& _node_ctx;
+ const DistributorNodeContext& _node_ctx;
const DistributorBucketSpace &_bucketSpace;
std::shared_ptr<api::GetCommand> _msg;
@@ -114,8 +114,8 @@ private:
bool _has_replica_inconsistency;
bool _any_replicas_failed;
- void sendReply(DistributorMessageSender& sender);
- bool sendForChecksum(DistributorMessageSender& sender, const document::BucketId& id, GroupVector& res);
+ void sendReply(DistributorStripeMessageSender& sender);
+ bool sendForChecksum(DistributorStripeMessageSender& sender, const document::BucketId& id, GroupVector& res);
void assignTargetNodeGroups(const BucketDatabase::ReadGuard& read_guard);
bool copyIsOnLocalNode(const BucketCopy&) const;
diff --git a/storage/src/vespa/storage/distributor/operations/external/putoperation.cpp b/storage/src/vespa/storage/distributor/operations/external/putoperation.cpp
index de36bb60b2c..9d9a04e9dcc 100644
--- a/storage/src/vespa/storage/distributor/operations/external/putoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/external/putoperation.cpp
@@ -21,7 +21,7 @@ using namespace storage::distributor;
using namespace storage;
using document::BucketSpace;
-PutOperation::PutOperation(DistributorNodeContext& node_ctx,
+PutOperation::PutOperation(const DistributorNodeContext& node_ctx,
DistributorStripeOperationContext& op_ctx,
DistributorBucketSpace &bucketSpace,
std::shared_ptr<api::PutCommand> msg,
@@ -156,14 +156,14 @@ bool PutOperation::has_unavailable_targets_in_pending_state(const OperationTarge
if (!pending_state) {
return false;
}
- const char* up_states = _op_ctx.storage_node_up_states();
+ const char* up_states = storage_node_up_states();
return std::any_of(targets.begin(), targets.end(), [pending_state, up_states](const auto& target){
return !pending_state->getNodeState(target.getNode()).getState().oneOf(up_states);
});
}
void
-PutOperation::onStart(DistributorMessageSender& sender)
+PutOperation::onStart(DistributorStripeMessageSender& sender)
{
document::BucketIdFactory bucketIdFactory;
document::BucketId bid = bucketIdFactory.getBucketId(_msg->getDocumentId());
@@ -176,7 +176,7 @@ PutOperation::onStart(DistributorMessageSender& sender)
bool up = false;
for (uint16_t i = 0; i < systemState.getNodeCount(lib::NodeType::STORAGE); i++) {
if (systemState.getNodeState(lib::Node(lib::NodeType::STORAGE, i))
- .getState().oneOf(_op_ctx.storage_node_up_states()))
+ .getState().oneOf(storage_node_up_states()))
{
up = true;
}
@@ -268,14 +268,14 @@ PutOperation::shouldImplicitlyActivateReplica(const OperationTargetList& targets
}
void
-PutOperation::onReceive(DistributorMessageSender& sender, const std::shared_ptr<api::StorageReply> & msg)
+PutOperation::onReceive(DistributorStripeMessageSender& sender, const std::shared_ptr<api::StorageReply> & msg)
{
LOG(debug, "Received %s", msg->toString(true).c_str());
_tracker.receiveReply(sender, static_cast<api::BucketInfoReply&>(*msg));
}
void
-PutOperation::onClose(DistributorMessageSender& sender)
+PutOperation::onClose(DistributorStripeMessageSender& sender)
{
const char* error = "Process is shutting down";
LOG(debug, "%s", error);
diff --git a/storage/src/vespa/storage/distributor/operations/external/putoperation.h b/storage/src/vespa/storage/distributor/operations/external/putoperation.h
index c9cfc08d63d..57ae2e3ba9f 100644
--- a/storage/src/vespa/storage/distributor/operations/external/putoperation.h
+++ b/storage/src/vespa/storage/distributor/operations/external/putoperation.h
@@ -23,18 +23,18 @@ class OperationTargetList;
class PutOperation : public SequencedOperation
{
public:
- PutOperation(DistributorNodeContext& node_ctx,
+ PutOperation(const DistributorNodeContext& node_ctx,
DistributorStripeOperationContext& op_ctx,
DistributorBucketSpace &bucketSpace,
std::shared_ptr<api::PutCommand> msg,
PersistenceOperationMetricSet& metric,
SequencingHandle sequencingHandle = SequencingHandle());
- void onStart(DistributorMessageSender& sender) override;
+ void onStart(DistributorStripeMessageSender& sender) override;
const char* getName() const override { return "put"; };
std::string getStatus() const override { return ""; };
- void onReceive(DistributorMessageSender& sender, const std::shared_ptr<api::StorageReply> &) override;
- void onClose(DistributorMessageSender& sender) override;
+ void onReceive(DistributorStripeMessageSender& sender, const std::shared_ptr<api::StorageReply> &) override;
+ void onClose(DistributorStripeMessageSender& sender) override;
static void getTargetNodes(const std::vector<uint16_t>& idealNodes, std::vector<uint16_t>& targetNodes,
std::vector<uint16_t>& createNodes, const BucketInfo& bucketInfo, uint32_t redundancy);
diff --git a/storage/src/vespa/storage/distributor/operations/external/read_for_write_visitor_operation.cpp b/storage/src/vespa/storage/distributor/operations/external/read_for_write_visitor_operation.cpp
index 04e64703c19..1d6b0fed6f9 100644
--- a/storage/src/vespa/storage/distributor/operations/external/read_for_write_visitor_operation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/external/read_for_write_visitor_operation.cpp
@@ -29,11 +29,11 @@ ReadForWriteVisitorOperationStarter::ReadForWriteVisitorOperationStarter(
ReadForWriteVisitorOperationStarter::~ReadForWriteVisitorOperationStarter() = default;
-void ReadForWriteVisitorOperationStarter::onClose(DistributorMessageSender& sender) {
+void ReadForWriteVisitorOperationStarter::onClose(DistributorStripeMessageSender& sender) {
_visitor_op->onClose(sender);
}
-void ReadForWriteVisitorOperationStarter::onStart(DistributorMessageSender& sender) {
+void ReadForWriteVisitorOperationStarter::onStart(DistributorStripeMessageSender& sender) {
if (_visitor_op->verify_command_and_expand_buckets(sender)) {
assert(!_visitor_op->has_sent_reply());
auto maybe_bucket = _visitor_op->first_bucket_to_visit();
@@ -78,7 +78,7 @@ void ReadForWriteVisitorOperationStarter::onStart(DistributorMessageSender& send
}
}
-void ReadForWriteVisitorOperationStarter::onReceive(DistributorMessageSender& sender,
+void ReadForWriteVisitorOperationStarter::onReceive(DistributorStripeMessageSender& sender,
const std::shared_ptr<api::StorageReply> & msg) {
_visitor_op->onReceive(sender, msg);
}
diff --git a/storage/src/vespa/storage/distributor/operations/external/read_for_write_visitor_operation.h b/storage/src/vespa/storage/distributor/operations/external/read_for_write_visitor_operation.h
index e9391f9f133..28474bd52f1 100644
--- a/storage/src/vespa/storage/distributor/operations/external/read_for_write_visitor_operation.h
+++ b/storage/src/vespa/storage/distributor/operations/external/read_for_write_visitor_operation.h
@@ -42,9 +42,9 @@ public:
~ReadForWriteVisitorOperationStarter() override;
const char* getName() const override { return "ReadForWriteVisitorOperationStarter"; }
- void onClose(DistributorMessageSender& sender) override;
- void onStart(DistributorMessageSender& sender) override;
- void onReceive(DistributorMessageSender& sender,
+ void onClose(DistributorStripeMessageSender& sender) override;
+ void onStart(DistributorStripeMessageSender& sender) override;
+ void onReceive(DistributorStripeMessageSender& sender,
const std::shared_ptr<api::StorageReply> & msg) override;
private:
bool bucket_has_pending_merge(const document::Bucket&, const PendingMessageTracker& tracker) const;
diff --git a/storage/src/vespa/storage/distributor/operations/external/removelocationoperation.cpp b/storage/src/vespa/storage/distributor/operations/external/removelocationoperation.cpp
index f8609dedde4..54bc6782893 100644
--- a/storage/src/vespa/storage/distributor/operations/external/removelocationoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/external/removelocationoperation.cpp
@@ -18,9 +18,9 @@ using namespace storage;
using document::BucketSpace;
RemoveLocationOperation::RemoveLocationOperation(
- DistributorNodeContext& node_ctx,
+ const DistributorNodeContext& node_ctx,
DistributorStripeOperationContext& op_ctx,
- DocumentSelectionParser& parser,
+ const DocumentSelectionParser& parser,
DistributorBucketSpace &bucketSpace,
std::shared_ptr<api::RemoveLocationCommand> msg,
PersistenceOperationMetricSet& metric)
@@ -41,8 +41,8 @@ RemoveLocationOperation::~RemoveLocationOperation() = default;
int
RemoveLocationOperation::getBucketId(
- DistributorNodeContext& node_ctx,
- DocumentSelectionParser& parser,
+ const DistributorNodeContext& node_ctx,
+ const DocumentSelectionParser& parser,
const api::RemoveLocationCommand& cmd, document::BucketId& bid)
{
document::BucketSelector bucketSel(node_ctx.bucket_id_factory());
@@ -60,7 +60,7 @@ RemoveLocationOperation::getBucketId(
}
void
-RemoveLocationOperation::onStart(DistributorMessageSender& sender)
+RemoveLocationOperation::onStart(DistributorStripeMessageSender& sender)
{
document::BucketId bid;
int count = getBucketId(_node_ctx, _parser, *_msg, bid);
@@ -108,14 +108,14 @@ RemoveLocationOperation::onStart(DistributorMessageSender& sender)
void
RemoveLocationOperation::onReceive(
- DistributorMessageSender& sender,
+ DistributorStripeMessageSender& sender,
const std::shared_ptr<api::StorageReply> & msg)
{
_tracker.receiveReply(sender, static_cast<api::BucketInfoReply&>(*msg));
}
void
-RemoveLocationOperation::onClose(DistributorMessageSender& sender)
+RemoveLocationOperation::onClose(DistributorStripeMessageSender& sender)
{
_tracker.fail(sender, api::ReturnCode(api::ReturnCode::ABORTED,
"Process is shutting down"));
diff --git a/storage/src/vespa/storage/distributor/operations/external/removelocationoperation.h b/storage/src/vespa/storage/distributor/operations/external/removelocationoperation.h
index bf09a95933f..8cac26c5669 100644
--- a/storage/src/vespa/storage/distributor/operations/external/removelocationoperation.h
+++ b/storage/src/vespa/storage/distributor/operations/external/removelocationoperation.h
@@ -15,32 +15,32 @@ class DistributorBucketSpace;
class RemoveLocationOperation : public Operation
{
public:
- RemoveLocationOperation(DistributorNodeContext& node_ctx,
+ RemoveLocationOperation(const DistributorNodeContext& node_ctx,
DistributorStripeOperationContext& op_ctx,
- DocumentSelectionParser& parser,
+ const DocumentSelectionParser& parser,
DistributorBucketSpace &bucketSpace,
std::shared_ptr<api::RemoveLocationCommand> msg,
PersistenceOperationMetricSet& metric);
~RemoveLocationOperation() override;
- static int getBucketId(DistributorNodeContext& node_ctx,
- DocumentSelectionParser& parser,
+ static int getBucketId(const DistributorNodeContext& node_ctx,
+ const DocumentSelectionParser& parser,
const api::RemoveLocationCommand& cmd,
document::BucketId& id);
- void onStart(DistributorMessageSender& sender) override;
+ void onStart(DistributorStripeMessageSender& sender) override;
const char* getName() const override { return "removelocation"; };
std::string getStatus() const override { return ""; };
- void onReceive(DistributorMessageSender& sender, const std::shared_ptr<api::StorageReply> &) override;
- void onClose(DistributorMessageSender& sender) override;
+ void onReceive(DistributorStripeMessageSender& sender, const std::shared_ptr<api::StorageReply> &) override;
+ void onClose(DistributorStripeMessageSender& sender) override;
private:
PersistenceMessageTrackerImpl _trackerInstance;
PersistenceMessageTracker& _tracker;
std::shared_ptr<api::RemoveLocationCommand> _msg;
- DistributorNodeContext& _node_ctx;
- DocumentSelectionParser& _parser;
+ const DistributorNodeContext& _node_ctx;
+ const DocumentSelectionParser& _parser;
DistributorBucketSpace &_bucketSpace;
};
diff --git a/storage/src/vespa/storage/distributor/operations/external/removeoperation.cpp b/storage/src/vespa/storage/distributor/operations/external/removeoperation.cpp
index 25c73d88e37..6626e6f7171 100644
--- a/storage/src/vespa/storage/distributor/operations/external/removeoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/external/removeoperation.cpp
@@ -12,7 +12,7 @@ using namespace storage::distributor;
using namespace storage;
using document::BucketSpace;
-RemoveOperation::RemoveOperation(DistributorNodeContext& node_ctx,
+RemoveOperation::RemoveOperation(const DistributorNodeContext& node_ctx,
DistributorStripeOperationContext& op_ctx,
DistributorBucketSpace &bucketSpace,
std::shared_ptr<api::RemoveCommand> msg,
@@ -32,7 +32,7 @@ RemoveOperation::RemoveOperation(DistributorNodeContext& node_ctx,
RemoveOperation::~RemoveOperation() = default;
void
-RemoveOperation::onStart(DistributorMessageSender& sender)
+RemoveOperation::onStart(DistributorStripeMessageSender& sender)
{
LOG(spam, "Started remove on document %s", _msg->getDocumentId().toString().c_str());
@@ -79,7 +79,7 @@ RemoveOperation::onStart(DistributorMessageSender& sender)
void
-RemoveOperation::onReceive(DistributorMessageSender& sender, const std::shared_ptr<api::StorageReply> & msg)
+RemoveOperation::onReceive(DistributorStripeMessageSender& sender, const std::shared_ptr<api::StorageReply> & msg)
{
api::RemoveReply& reply(static_cast<api::RemoveReply&>(*msg));
@@ -96,7 +96,7 @@ RemoveOperation::onReceive(DistributorMessageSender& sender, const std::shared_p
}
void
-RemoveOperation::onClose(DistributorMessageSender& sender)
+RemoveOperation::onClose(DistributorStripeMessageSender& sender)
{
_tracker.fail(sender, api::ReturnCode(api::ReturnCode::ABORTED, "Process is shutting down"));
}
diff --git a/storage/src/vespa/storage/distributor/operations/external/removeoperation.h b/storage/src/vespa/storage/distributor/operations/external/removeoperation.h
index 32eb5bd3d70..2ac26b0d719 100644
--- a/storage/src/vespa/storage/distributor/operations/external/removeoperation.h
+++ b/storage/src/vespa/storage/distributor/operations/external/removeoperation.h
@@ -15,7 +15,7 @@ class DistributorBucketSpace;
class RemoveOperation : public SequencedOperation
{
public:
- RemoveOperation(DistributorNodeContext& node_ctx,
+ RemoveOperation(const DistributorNodeContext& node_ctx,
DistributorStripeOperationContext& op_ctx,
DistributorBucketSpace &bucketSpace,
std::shared_ptr<api::RemoveCommand> msg,
@@ -23,12 +23,12 @@ public:
SequencingHandle sequencingHandle = SequencingHandle());
~RemoveOperation() override;
- void onStart(DistributorMessageSender& sender) override;
+ void onStart(DistributorStripeMessageSender& sender) override;
const char* getName() const override { return "remove"; };
std::string getStatus() const override { return ""; };
- void onReceive(DistributorMessageSender& sender, const std::shared_ptr<api::StorageReply> &) override;
- void onClose(DistributorMessageSender& sender) override;
+ void onReceive(DistributorStripeMessageSender& sender, const std::shared_ptr<api::StorageReply> &) override;
+ void onClose(DistributorStripeMessageSender& sender) override;
private:
PersistenceMessageTrackerImpl _trackerInstance;
@@ -36,7 +36,7 @@ private:
std::shared_ptr<api::RemoveCommand> _msg;
- DistributorNodeContext& _node_ctx;
+ const DistributorNodeContext& _node_ctx;
DistributorBucketSpace &_bucketSpace;
};
diff --git a/storage/src/vespa/storage/distributor/operations/external/statbucketlistoperation.cpp b/storage/src/vespa/storage/distributor/operations/external/statbucketlistoperation.cpp
index 4b7cff41ad1..12d1cc3f216 100644
--- a/storage/src/vespa/storage/distributor/operations/external/statbucketlistoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/external/statbucketlistoperation.cpp
@@ -40,7 +40,7 @@ StatBucketListOperation::getBucketStatus(const BucketDatabase::Entry& entry,
}
void
-StatBucketListOperation::onStart(DistributorMessageSender& sender)
+StatBucketListOperation::onStart(DistributorStripeMessageSender& sender)
{
api::GetBucketListReply::SP reply(new api::GetBucketListReply(*_command));
diff --git a/storage/src/vespa/storage/distributor/operations/external/statbucketlistoperation.h b/storage/src/vespa/storage/distributor/operations/external/statbucketlistoperation.h
index aa38a0d2319..831ebe0f9ce 100644
--- a/storage/src/vespa/storage/distributor/operations/external/statbucketlistoperation.h
+++ b/storage/src/vespa/storage/distributor/operations/external/statbucketlistoperation.h
@@ -26,13 +26,13 @@ public:
const char* getName() const override { return "statBucketList"; }
std::string getStatus() const override { return ""; }
- void onStart(DistributorMessageSender& sender) override;
- void onReceive(DistributorMessageSender&, const std::shared_ptr<api::StorageReply>&) override
+ void onStart(DistributorStripeMessageSender& sender) override;
+ void onReceive(DistributorStripeMessageSender&, const std::shared_ptr<api::StorageReply>&) override
{
// Never called.
HDR_ABORT("should not be reached");
}
- void onClose(DistributorMessageSender&) override {}
+ void onClose(DistributorStripeMessageSender&) override {}
private:
void getBucketStatus(const BucketDatabase::Entry& entry, std::ostream& os) const;
diff --git a/storage/src/vespa/storage/distributor/operations/external/statbucketoperation.cpp b/storage/src/vespa/storage/distributor/operations/external/statbucketoperation.cpp
index d0fdd539b72..9c97f12b89a 100644
--- a/storage/src/vespa/storage/distributor/operations/external/statbucketoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/external/statbucketoperation.cpp
@@ -22,7 +22,7 @@ StatBucketOperation::StatBucketOperation(
StatBucketOperation::~StatBucketOperation() = default;
void
-StatBucketOperation::onClose(DistributorMessageSender& sender)
+StatBucketOperation::onClose(DistributorStripeMessageSender& sender)
{
api::StatBucketReply* rep = (api::StatBucketReply*)_command->makeReply().release();
rep->setResult(api::ReturnCode(api::ReturnCode::ABORTED, "Process is shutting down"));
@@ -30,7 +30,7 @@ StatBucketOperation::onClose(DistributorMessageSender& sender)
}
void
-StatBucketOperation::onStart(DistributorMessageSender& sender)
+StatBucketOperation::onStart(DistributorStripeMessageSender& sender)
{
std::vector<uint16_t> nodes;
@@ -68,7 +68,7 @@ StatBucketOperation::onStart(DistributorMessageSender& sender)
};
void
-StatBucketOperation::onReceive(DistributorMessageSender& sender, const std::shared_ptr<api::StorageReply> & msg)
+StatBucketOperation::onReceive(DistributorStripeMessageSender& sender, const std::shared_ptr<api::StorageReply> & msg)
{
assert(msg->getType() == api::MessageType::STATBUCKET_REPLY);
api::StatBucketReply& myreply(dynamic_cast<api::StatBucketReply&>(*msg));
diff --git a/storage/src/vespa/storage/distributor/operations/external/statbucketoperation.h b/storage/src/vespa/storage/distributor/operations/external/statbucketoperation.h
index beb9e9c3445..d0c299d88bc 100644
--- a/storage/src/vespa/storage/distributor/operations/external/statbucketoperation.h
+++ b/storage/src/vespa/storage/distributor/operations/external/statbucketoperation.h
@@ -25,9 +25,9 @@ public:
const char* getName() const override { return "statBucket"; }
std::string getStatus() const override { return ""; }
- void onClose(DistributorMessageSender& sender) override;
- void onStart(DistributorMessageSender& sender) override;
- void onReceive(DistributorMessageSender& sender, const std::shared_ptr<api::StorageReply> & msg) override;
+ void onClose(DistributorStripeMessageSender& sender) override;
+ void onStart(DistributorStripeMessageSender& sender) override;
+ void onReceive(DistributorStripeMessageSender& sender, const std::shared_ptr<api::StorageReply> & msg) override;
private:
DistributorBucketSpace &_bucketSpace;
diff --git a/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.cpp b/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.cpp
index 1f8da7a0589..9077f3dc288 100644
--- a/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.cpp
@@ -25,9 +25,9 @@ namespace storage::distributor {
TwoPhaseUpdateOperation::TwoPhaseUpdateOperation(
- DistributorNodeContext& node_ctx,
+ const DistributorNodeContext& node_ctx,
DistributorStripeOperationContext& op_ctx,
- DocumentSelectionParser& parser,
+ const DocumentSelectionParser& parser,
DistributorBucketSpace &bucketSpace,
std::shared_ptr<api::UpdateCommand> msg,
DistributorMetricSet& metrics,
@@ -60,13 +60,13 @@ TwoPhaseUpdateOperation::~TwoPhaseUpdateOperation() = default;
namespace {
-struct IntermediateMessageSender : DistributorMessageSender {
+struct IntermediateMessageSender : DistributorStripeMessageSender {
SentMessageMap& msgMap;
std::shared_ptr<Operation> callback;
- DistributorMessageSender& forward;
+ DistributorStripeMessageSender& forward;
std::shared_ptr<api::StorageReply> _reply;
- IntermediateMessageSender(SentMessageMap& mm, std::shared_ptr<Operation> cb, DistributorMessageSender & fwd);
+ IntermediateMessageSender(SentMessageMap& mm, std::shared_ptr<Operation> cb, DistributorStripeMessageSender & fwd);
~IntermediateMessageSender() override;
void sendCommand(const std::shared_ptr<api::StorageCommand>& cmd) override {
@@ -97,7 +97,7 @@ struct IntermediateMessageSender : DistributorMessageSender {
IntermediateMessageSender::IntermediateMessageSender(SentMessageMap& mm,
std::shared_ptr<Operation> cb,
- DistributorMessageSender & fwd)
+ DistributorStripeMessageSender & fwd)
: msgMap(mm),
callback(std::move(cb)),
forward(fwd)
@@ -141,7 +141,7 @@ TwoPhaseUpdateOperation::ensureUpdateReplyCreated()
void
TwoPhaseUpdateOperation::sendReply(
- DistributorMessageSender& sender,
+ DistributorStripeMessageSender& sender,
std::shared_ptr<api::StorageReply>& reply)
{
assert(!_replySent);
@@ -152,7 +152,7 @@ TwoPhaseUpdateOperation::sendReply(
void
TwoPhaseUpdateOperation::sendReplyWithResult(
- DistributorMessageSender& sender,
+ DistributorStripeMessageSender& sender,
const api::ReturnCode& result)
{
ensureUpdateReplyCreated();
@@ -179,7 +179,7 @@ TwoPhaseUpdateOperation::isFastPathPossible(const std::vector<BucketDatabase::En
}
void
-TwoPhaseUpdateOperation::startFastPathUpdate(DistributorMessageSender& sender, std::vector<BucketDatabase::Entry> entries)
+TwoPhaseUpdateOperation::startFastPathUpdate(DistributorStripeMessageSender& sender, std::vector<BucketDatabase::Entry> entries)
{
_mode = Mode::FAST_PATH;
LOG(debug, "Update(%s) fast path: sending Update commands", update_doc_id().c_str());
@@ -196,7 +196,7 @@ TwoPhaseUpdateOperation::startFastPathUpdate(DistributorMessageSender& sender, s
}
void
-TwoPhaseUpdateOperation::startSafePathUpdate(DistributorMessageSender& sender)
+TwoPhaseUpdateOperation::startSafePathUpdate(DistributorStripeMessageSender& sender)
{
if (_op_ctx.cluster_state_bundle().block_feed_in_cluster()) {
send_feed_blocked_error_reply(sender);
@@ -248,7 +248,7 @@ TwoPhaseUpdateOperation::create_initial_safe_path_get_operation() {
}
void
-TwoPhaseUpdateOperation::onStart(DistributorMessageSender& sender) {
+TwoPhaseUpdateOperation::onStart(DistributorStripeMessageSender& sender) {
auto entries = get_bucket_database_entries();
if (isFastPathPossible(entries)) {
startFastPathUpdate(sender, std::move(entries));
@@ -274,7 +274,7 @@ TwoPhaseUpdateOperation::lostBucketOwnershipBetweenPhases() const
}
void
-TwoPhaseUpdateOperation::sendLostOwnershipTransientErrorReply(DistributorMessageSender& sender)
+TwoPhaseUpdateOperation::sendLostOwnershipTransientErrorReply(DistributorStripeMessageSender& sender)
{
sendReplyWithResult(sender,
api::ReturnCode(api::ReturnCode::BUCKET_NOT_FOUND,
@@ -284,7 +284,7 @@ TwoPhaseUpdateOperation::sendLostOwnershipTransientErrorReply(DistributorMessage
}
void
-TwoPhaseUpdateOperation::send_feed_blocked_error_reply(DistributorMessageSender& sender)
+TwoPhaseUpdateOperation::send_feed_blocked_error_reply(DistributorStripeMessageSender& sender)
{
sendReplyWithResult(sender,
api::ReturnCode(api::ReturnCode::NO_SPACE,
@@ -294,7 +294,7 @@ TwoPhaseUpdateOperation::send_feed_blocked_error_reply(DistributorMessageSender&
void
TwoPhaseUpdateOperation::schedulePutsWithUpdatedDocument(std::shared_ptr<document::Document> doc,
- api::Timestamp putTimestamp, DistributorMessageSender& sender)
+ api::Timestamp putTimestamp, DistributorStripeMessageSender& sender)
{
if (lostBucketOwnershipBetweenPhases()) {
sendLostOwnershipTransientErrorReply(sender);
@@ -318,7 +318,7 @@ TwoPhaseUpdateOperation::schedulePutsWithUpdatedDocument(std::shared_ptr<documen
}
void
-TwoPhaseUpdateOperation::onReceive(DistributorMessageSender& sender, const std::shared_ptr<api::StorageReply>& msg)
+TwoPhaseUpdateOperation::onReceive(DistributorStripeMessageSender& sender, const std::shared_ptr<api::StorageReply>& msg)
{
if (_mode == Mode::FAST_PATH) {
handleFastPathReceive(sender, msg);
@@ -328,7 +328,7 @@ TwoPhaseUpdateOperation::onReceive(DistributorMessageSender& sender, const std::
}
void
-TwoPhaseUpdateOperation::handleFastPathReceive(DistributorMessageSender& sender,
+TwoPhaseUpdateOperation::handleFastPathReceive(DistributorStripeMessageSender& sender,
const std::shared_ptr<api::StorageReply>& msg)
{
if (msg->getType() == api::MessageType::GET_REPLY) {
@@ -396,7 +396,7 @@ TwoPhaseUpdateOperation::handleFastPathReceive(DistributorMessageSender& sender,
}
void
-TwoPhaseUpdateOperation::handleSafePathReceive(DistributorMessageSender& sender,
+TwoPhaseUpdateOperation::handleSafePathReceive(DistributorStripeMessageSender& sender,
const std::shared_ptr<api::StorageReply>& msg)
{
// No explicit operation is associated with the direct replica Get operation,
@@ -434,7 +434,7 @@ TwoPhaseUpdateOperation::handleSafePathReceive(DistributorMessageSender& sender,
}
void TwoPhaseUpdateOperation::handle_safe_path_received_single_full_get(
- DistributorMessageSender& sender,
+ DistributorStripeMessageSender& sender,
api::GetReply& reply)
{
LOG(spam, "Received single full Get reply for '%s'", update_doc_id().c_str());
@@ -453,7 +453,7 @@ void TwoPhaseUpdateOperation::handle_safe_path_received_single_full_get(
}
void TwoPhaseUpdateOperation::handle_safe_path_received_metadata_get(
- DistributorMessageSender& sender, api::GetReply& reply,
+ DistributorStripeMessageSender& sender, api::GetReply& reply,
const std::optional<NewestReplica>& newest_replica,
bool any_replicas_failed)
{
@@ -511,7 +511,7 @@ void TwoPhaseUpdateOperation::handle_safe_path_received_metadata_get(
}
void
-TwoPhaseUpdateOperation::handleSafePathReceivedGet(DistributorMessageSender& sender, api::GetReply& reply)
+TwoPhaseUpdateOperation::handleSafePathReceivedGet(DistributorStripeMessageSender& sender, api::GetReply& reply)
{
LOG(debug, "Update(%s): got Get reply with code %s",
_updateCmd->getDocumentId().toString().c_str(),
@@ -585,7 +585,7 @@ bool TwoPhaseUpdateOperation::replica_set_unchanged_after_get_operation() const
return (replicas_in_db_now == _replicas_at_get_send_time);
}
-void TwoPhaseUpdateOperation::restart_with_fast_path_due_to_consistent_get_timestamps(DistributorMessageSender& sender) {
+void TwoPhaseUpdateOperation::restart_with_fast_path_due_to_consistent_get_timestamps(DistributorStripeMessageSender& sender) {
LOG(debug, "Update(%s): all Gets returned in initial safe path were consistent, restarting in fast path mode",
update_doc_id().c_str());
if (lostBucketOwnershipBetweenPhases()) {
@@ -600,7 +600,7 @@ void TwoPhaseUpdateOperation::restart_with_fast_path_due_to_consistent_get_times
}
bool
-TwoPhaseUpdateOperation::processAndMatchTasCondition(DistributorMessageSender& sender,
+TwoPhaseUpdateOperation::processAndMatchTasCondition(DistributorStripeMessageSender& sender,
const document::Document& candidateDoc)
{
if (!hasTasCondition()) {
@@ -631,7 +631,7 @@ TwoPhaseUpdateOperation::hasTasCondition() const noexcept
}
void
-TwoPhaseUpdateOperation::replyWithTasFailure(DistributorMessageSender& sender, vespalib::stringref message)
+TwoPhaseUpdateOperation::replyWithTasFailure(DistributorStripeMessageSender& sender, vespalib::stringref message)
{
sendReplyWithResult(sender, api::ReturnCode(api::ReturnCode::TEST_AND_SET_CONDITION_FAILED, message));
}
@@ -651,7 +651,7 @@ TwoPhaseUpdateOperation::createBlankDocument() const
}
void
-TwoPhaseUpdateOperation::handleSafePathReceivedPut(DistributorMessageSender& sender, const api::PutReply& reply)
+TwoPhaseUpdateOperation::handleSafePathReceivedPut(DistributorStripeMessageSender& sender, const api::PutReply& reply)
{
sendReplyWithResult(sender, reply.getResult());
}
@@ -681,7 +681,7 @@ TwoPhaseUpdateOperation::addTraceFromReply(api::StorageReply & reply)
}
void
-TwoPhaseUpdateOperation::onClose(DistributorMessageSender& sender) {
+TwoPhaseUpdateOperation::onClose(DistributorStripeMessageSender& sender) {
while (true) {
std::shared_ptr<Operation> cb = _sentMessageMap.pop();
diff --git a/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.h b/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.h
index 7817eb7bffd..ff49e2068a6 100644
--- a/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.h
+++ b/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.h
@@ -55,25 +55,25 @@ class GetOperation;
class TwoPhaseUpdateOperation : public SequencedOperation
{
public:
- TwoPhaseUpdateOperation(DistributorNodeContext& node_ctx,
+ TwoPhaseUpdateOperation(const DistributorNodeContext& node_ctx,
DistributorStripeOperationContext& op_ctx,
- DocumentSelectionParser& parser,
+ const DocumentSelectionParser& parser,
DistributorBucketSpace &bucketSpace,
std::shared_ptr<api::UpdateCommand> msg,
DistributorMetricSet& metrics,
SequencingHandle sequencingHandle = SequencingHandle());
~TwoPhaseUpdateOperation() override;
- void onStart(DistributorMessageSender& sender) override;
+ void onStart(DistributorStripeMessageSender& sender) override;
const char* getName() const override { return "twophaseupdate"; }
std::string getStatus() const override { return ""; }
- void onReceive(DistributorMessageSender&,
+ void onReceive(DistributorStripeMessageSender&,
const std::shared_ptr<api::StorageReply>&) override;
- void onClose(DistributorMessageSender& sender) override;
+ void onClose(DistributorStripeMessageSender& sender) override;
private:
enum class SendState {
@@ -93,49 +93,49 @@ private:
void transitionTo(SendState newState);
static const char* stateToString(SendState);
- void sendReply(DistributorMessageSender&,
+ void sendReply(DistributorStripeMessageSender&,
std::shared_ptr<api::StorageReply>&);
- void sendReplyWithResult(DistributorMessageSender&, const api::ReturnCode&);
+ void sendReplyWithResult(DistributorStripeMessageSender&, const api::ReturnCode&);
void ensureUpdateReplyCreated();
std::vector<BucketDatabase::Entry> get_bucket_database_entries() const;
bool isFastPathPossible(const std::vector<BucketDatabase::Entry>& entries) const;
- void startFastPathUpdate(DistributorMessageSender& sender, std::vector<BucketDatabase::Entry> entries);
- void startSafePathUpdate(DistributorMessageSender&);
+ void startFastPathUpdate(DistributorStripeMessageSender& sender, std::vector<BucketDatabase::Entry> entries);
+ void startSafePathUpdate(DistributorStripeMessageSender&);
bool lostBucketOwnershipBetweenPhases() const;
- void sendLostOwnershipTransientErrorReply(DistributorMessageSender&);
- void send_feed_blocked_error_reply(DistributorMessageSender& sender);
+ void sendLostOwnershipTransientErrorReply(DistributorStripeMessageSender&);
+ void send_feed_blocked_error_reply(DistributorStripeMessageSender& sender);
void schedulePutsWithUpdatedDocument(
std::shared_ptr<document::Document>,
api::Timestamp,
- DistributorMessageSender&);
+ DistributorStripeMessageSender&);
void applyUpdateToDocument(document::Document&) const;
std::shared_ptr<document::Document> createBlankDocument() const;
void setUpdatedForTimestamp(api::Timestamp);
- void handleFastPathReceive(DistributorMessageSender&,
+ void handleFastPathReceive(DistributorStripeMessageSender&,
const std::shared_ptr<api::StorageReply>&);
- void handleSafePathReceive(DistributorMessageSender&,
+ void handleSafePathReceive(DistributorStripeMessageSender&,
const std::shared_ptr<api::StorageReply>&);
std::shared_ptr<GetOperation> create_initial_safe_path_get_operation();
- void handle_safe_path_received_metadata_get(DistributorMessageSender&,
+ void handle_safe_path_received_metadata_get(DistributorStripeMessageSender&,
api::GetReply&,
const std::optional<NewestReplica>&,
bool any_replicas_failed);
- void handle_safe_path_received_single_full_get(DistributorMessageSender&, api::GetReply&);
- void handleSafePathReceivedGet(DistributorMessageSender&, api::GetReply&);
- void handleSafePathReceivedPut(DistributorMessageSender&, const api::PutReply&);
+ void handle_safe_path_received_single_full_get(DistributorStripeMessageSender&, api::GetReply&);
+ void handleSafePathReceivedGet(DistributorStripeMessageSender&, api::GetReply&);
+ void handleSafePathReceivedPut(DistributorStripeMessageSender&, const api::PutReply&);
bool shouldCreateIfNonExistent() const;
bool processAndMatchTasCondition(
- DistributorMessageSender& sender,
+ DistributorStripeMessageSender& sender,
const document::Document& candidateDoc);
bool satisfiesUpdateTimestampConstraint(api::Timestamp) const;
void addTraceFromReply(api::StorageReply& reply);
bool hasTasCondition() const noexcept;
- void replyWithTasFailure(DistributorMessageSender& sender,
+ void replyWithTasFailure(DistributorStripeMessageSender& sender,
vespalib::stringref message);
bool may_restart_with_fast_path(const api::GetReply& reply);
bool replica_set_unchanged_after_get_operation() const;
- void restart_with_fast_path_due_to_consistent_get_timestamps(DistributorMessageSender& sender);
+ void restart_with_fast_path_due_to_consistent_get_timestamps(DistributorStripeMessageSender& sender);
// Precondition: reply has not yet been sent.
vespalib::string update_doc_id() const;
@@ -145,9 +145,9 @@ private:
PersistenceOperationMetricSet& _metadata_get_metrics;
std::shared_ptr<api::UpdateCommand> _updateCmd;
std::shared_ptr<api::StorageReply> _updateReply;
- DistributorNodeContext& _node_ctx;
+ const DistributorNodeContext& _node_ctx;
DistributorStripeOperationContext& _op_ctx;
- DocumentSelectionParser& _parser;
+ const DocumentSelectionParser& _parser;
DistributorBucketSpace &_bucketSpace;
SentMessageMap _sentMessageMap;
SendState _sendState;
diff --git a/storage/src/vespa/storage/distributor/operations/external/updateoperation.cpp b/storage/src/vespa/storage/distributor/operations/external/updateoperation.cpp
index 4530d7b2864..fbdd26b9eff 100644
--- a/storage/src/vespa/storage/distributor/operations/external/updateoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/external/updateoperation.cpp
@@ -18,7 +18,7 @@ using document::BucketSpace;
namespace storage::distributor {
-UpdateOperation::UpdateOperation(DistributorNodeContext& node_ctx,
+UpdateOperation::UpdateOperation(const DistributorNodeContext& node_ctx,
DistributorStripeOperationContext& op_ctx,
DistributorBucketSpace& bucketSpace,
const std::shared_ptr<api::UpdateCommand>& msg,
@@ -60,7 +60,7 @@ UpdateOperation::anyStorageNodesAvailable() const
}
void
-UpdateOperation::onStart(DistributorMessageSender& sender)
+UpdateOperation::onStart(DistributorStripeMessageSender& sender)
{
LOG(debug, "Received UPDATE %s for bucket %" PRIx64,
_msg->getDocumentId().toString().c_str(),
@@ -123,7 +123,7 @@ UpdateOperation::onStart(DistributorMessageSender& sender)
};
void
-UpdateOperation::onReceive(DistributorMessageSender& sender,
+UpdateOperation::onReceive(DistributorStripeMessageSender& sender,
const std::shared_ptr<api::StorageReply> & msg)
{
auto& reply = static_cast<api::UpdateReply&>(*msg);
@@ -186,7 +186,7 @@ UpdateOperation::onReceive(DistributorMessageSender& sender,
}
void
-UpdateOperation::onClose(DistributorMessageSender& sender)
+UpdateOperation::onClose(DistributorStripeMessageSender& sender)
{
_tracker.fail(sender, api::ReturnCode(api::ReturnCode::ABORTED, "Process is shutting down"));
}
diff --git a/storage/src/vespa/storage/distributor/operations/external/updateoperation.h b/storage/src/vespa/storage/distributor/operations/external/updateoperation.h
index 4e875b27133..533992e926c 100644
--- a/storage/src/vespa/storage/distributor/operations/external/updateoperation.h
+++ b/storage/src/vespa/storage/distributor/operations/external/updateoperation.h
@@ -23,18 +23,18 @@ class DistributorBucketSpace;
class UpdateOperation : public Operation
{
public:
- UpdateOperation(DistributorNodeContext& node_ctx,
+ UpdateOperation(const DistributorNodeContext& node_ctx,
DistributorStripeOperationContext& op_ctx,
DistributorBucketSpace& bucketSpace,
const std::shared_ptr<api::UpdateCommand>& msg,
std::vector<BucketDatabase::Entry> entries,
UpdateMetricSet& metric);
- void onStart(DistributorMessageSender& sender) override;
+ void onStart(DistributorStripeMessageSender& sender) override;
const char* getName() const override { return "update"; };
std::string getStatus() const override { return ""; };
- void onReceive(DistributorMessageSender& sender, const std::shared_ptr<api::StorageReply> & msg) override;
- void onClose(DistributorMessageSender& sender) override;
+ void onReceive(DistributorStripeMessageSender& sender, const std::shared_ptr<api::StorageReply> & msg) override;
+ void onClose(DistributorStripeMessageSender& sender) override;
std::pair<document::BucketId, uint16_t> getNewestTimestampLocation() const {
return _newestTimestampLocation;
@@ -48,7 +48,7 @@ private:
const api::Timestamp _new_timestamp;
const bool _is_auto_create_update;
- DistributorNodeContext& _node_ctx;
+ const DistributorNodeContext& _node_ctx;
DistributorBucketSpace &_bucketSpace;
std::pair<document::BucketId, uint16_t> _newestTimestampLocation;
api::BucketInfo _infoAtSendTime; // Should be same across all replicas
diff --git a/storage/src/vespa/storage/distributor/operations/external/visitoroperation.cpp b/storage/src/vespa/storage/distributor/operations/external/visitoroperation.cpp
index b4ad16f3323..7679d4bc0e5 100644
--- a/storage/src/vespa/storage/distributor/operations/external/visitoroperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/external/visitoroperation.cpp
@@ -63,7 +63,7 @@ matches_visitor_library(vespalib::stringref input, vespalib::stringref expected)
}
VisitorOperation::VisitorOperation(
- DistributorNodeContext& node_ctx,
+ const DistributorNodeContext& node_ctx,
DistributorStripeOperationContext& op_ctx,
DistributorBucketSpace &bucketSpace,
const api::CreateVisitorCommand::SP& m,
@@ -190,7 +190,7 @@ VisitorOperation::markOperationAsFailedDueToNodeError(
void
VisitorOperation::onReceive(
- DistributorMessageSender& sender,
+ DistributorStripeMessageSender& sender,
const api::StorageReply::SP& r)
{
api::CreateVisitorReply& reply = static_cast<api::CreateVisitorReply&>(*r);
@@ -348,7 +348,7 @@ VisitorOperation::verifyOperationSentToCorrectDistributor()
}
bool
-VisitorOperation::verifyCreateVisitorCommand(DistributorMessageSender& sender)
+VisitorOperation::verifyCreateVisitorCommand(DistributorStripeMessageSender& sender)
{
try {
verifyOperationContainsBuckets();
@@ -587,7 +587,7 @@ VisitorOperation::pickTargetNode(
}
void
-VisitorOperation::onStart(DistributorMessageSender& sender)
+VisitorOperation::onStart(DistributorStripeMessageSender& sender)
{
if (!_verified_and_expanded) {
if (!verify_command_and_expand_buckets(sender)) {
@@ -598,7 +598,7 @@ VisitorOperation::onStart(DistributorMessageSender& sender)
}
bool
-VisitorOperation::verify_command_and_expand_buckets(DistributorMessageSender& sender)
+VisitorOperation::verify_command_and_expand_buckets(DistributorStripeMessageSender& sender)
{
assert(!_verified_and_expanded);
_verified_and_expanded = true;
@@ -636,7 +636,7 @@ VisitorOperation::maySendNewStorageVisitors() const noexcept
}
void
-VisitorOperation::startNewVisitors(DistributorMessageSender& sender)
+VisitorOperation::startNewVisitors(DistributorStripeMessageSender& sender)
{
LOG(spam,
"Starting new visitors: Superbucket: %s, last subbucket: %s",
@@ -764,7 +764,7 @@ VisitorOperation::getNumVisitorsToSendForNode(uint16_t node, uint32_t totalBucke
bool
VisitorOperation::sendStorageVisitors(const NodeToBucketsMap& nodeToBucketsMap,
- DistributorMessageSender& sender)
+ DistributorStripeMessageSender& sender)
{
bool visitorsSent = false;
for (const auto & entry : nodeToBucketsMap ) {
@@ -800,7 +800,7 @@ void
VisitorOperation::sendStorageVisitor(uint16_t node,
const std::vector<document::BucketId>& buckets,
uint32_t pending,
- DistributorMessageSender& sender)
+ DistributorStripeMessageSender& sender)
{
// TODO rewrite to not use copy ctor and remove wonky StorageCommand copy ctor impl
auto cmd = std::make_shared<api::CreateVisitorCommand>(*_msg);
@@ -839,7 +839,7 @@ VisitorOperation::sendStorageVisitor(uint16_t node,
}
void
-VisitorOperation::sendReply(const api::ReturnCode& code, DistributorMessageSender& sender)
+VisitorOperation::sendReply(const api::ReturnCode& code, DistributorStripeMessageSender& sender)
{
if (!_sentReply) {
// Send create visitor reply
@@ -880,20 +880,20 @@ VisitorOperation::updateReplyMetrics(const api::ReturnCode& result)
}
void
-VisitorOperation::onClose(DistributorMessageSender& sender)
+VisitorOperation::onClose(DistributorStripeMessageSender& sender)
{
sendReply(api::ReturnCode(api::ReturnCode::ABORTED, "Process is shutting down"), sender);
}
void
-VisitorOperation::fail_with_bucket_already_locked(DistributorMessageSender& sender)
+VisitorOperation::fail_with_bucket_already_locked(DistributorStripeMessageSender& sender)
{
assert(is_read_for_write());
sendReply(api::ReturnCode(api::ReturnCode::BUSY, "This bucket is already locked by another operation"), sender);
}
void
-VisitorOperation::fail_with_merge_pending(DistributorMessageSender& sender)
+VisitorOperation::fail_with_merge_pending(DistributorStripeMessageSender& sender)
{
assert(is_read_for_write());
sendReply(api::ReturnCode(api::ReturnCode::BUSY, "A merge operation is pending for this bucket"), sender);
diff --git a/storage/src/vespa/storage/distributor/operations/external/visitoroperation.h b/storage/src/vespa/storage/distributor/operations/external/visitoroperation.h
index 794436a28e6..9ea4cdfc4ae 100644
--- a/storage/src/vespa/storage/distributor/operations/external/visitoroperation.h
+++ b/storage/src/vespa/storage/distributor/operations/external/visitoroperation.h
@@ -35,7 +35,7 @@ public:
uint32_t maxVisitorsPerNodePerVisitor;
};
- VisitorOperation(DistributorNodeContext& node_ctx,
+ VisitorOperation(const DistributorNodeContext& node_ctx,
DistributorStripeOperationContext& op_ctx,
DistributorBucketSpace &bucketSpace,
const std::shared_ptr<api::CreateVisitorCommand>& msg,
@@ -44,16 +44,16 @@ public:
~VisitorOperation() override;
- void onClose(DistributorMessageSender& sender) override;
- void onStart(DistributorMessageSender& sender) override;
- void onReceive(DistributorMessageSender& sender,
+ void onClose(DistributorStripeMessageSender& sender) override;
+ void onStart(DistributorStripeMessageSender& sender) override;
+ void onReceive(DistributorStripeMessageSender& sender,
const std::shared_ptr<api::StorageReply> & msg) override;
// Only valid to call if is_read_for_write() == true
- void fail_with_bucket_already_locked(DistributorMessageSender& sender);
- void fail_with_merge_pending(DistributorMessageSender& sender);
+ void fail_with_bucket_already_locked(DistributorStripeMessageSender& sender);
+ void fail_with_merge_pending(DistributorStripeMessageSender& sender);
- [[nodiscard]] bool verify_command_and_expand_buckets(DistributorMessageSender& sender);
+ [[nodiscard]] bool verify_command_and_expand_buckets(DistributorStripeMessageSender& sender);
const char* getName() const override { return "visit"; }
std::string getStatus() const override { return ""; }
@@ -97,7 +97,7 @@ private:
using NodeToBucketsMap = std::map<uint16_t, std::vector<document::BucketId>>;
using SentMessagesMap = std::map<uint64_t, api::CreateVisitorCommand::SP>;
- void sendReply(const api::ReturnCode& code, DistributorMessageSender& sender);
+ void sendReply(const api::ReturnCode& code, DistributorStripeMessageSender& sender);
void updateReplyMetrics(const api::ReturnCode& result);
void verifyDistributorsAreAvailable();
void verifyVisitorDistributionBitCount(const document::BucketId&);
@@ -106,7 +106,7 @@ private:
void verifyOperationContainsBuckets();
void verifyOperationHasSuperbucketAndProgress();
void verifyOperationSentToCorrectDistributor();
- bool verifyCreateVisitorCommand(DistributorMessageSender& sender);
+ bool verifyCreateVisitorCommand(DistributorStripeMessageSender& sender);
bool pickBucketsToVisit(const std::vector<BucketDatabase::Entry>& buckets);
bool expandBucketContaining();
bool expandBucketContained();
@@ -115,7 +115,7 @@ private:
const BucketDatabase::Entry& entry,
const std::vector<uint16_t>& triedNodes);
bool maySendNewStorageVisitors() const noexcept;
- void startNewVisitors(DistributorMessageSender& sender);
+ void startNewVisitors(DistributorStripeMessageSender& sender);
void initializeActiveNodes();
bool shouldSkipBucket(const BucketInfo& bucketInfo) const;
bool bucketIsValidAndConsistent(const BucketDatabase::Entry& entry) const;
@@ -125,11 +125,11 @@ private:
int getNumVisitorsToSendForNode(uint16_t node, uint32_t totalBucketsOnNode) const;
vespalib::duration computeVisitorQueueTimeoutMs() const noexcept;
bool sendStorageVisitors(const NodeToBucketsMap& nodeToBucketsMap,
- DistributorMessageSender& sender);
+ DistributorStripeMessageSender& sender);
void sendStorageVisitor(uint16_t node,
const std::vector<document::BucketId>& buckets,
uint32_t pending,
- DistributorMessageSender& sender);
+ DistributorStripeMessageSender& sender);
void markCompleted(const document::BucketId& bid, const api::ReturnCode& code);
/**
* Operation failed and we can pin the blame on a specific node. Updates
@@ -152,7 +152,7 @@ private:
*/
vespalib::duration timeLeft() const noexcept;
- DistributorNodeContext& _node_ctx;
+ const DistributorNodeContext& _node_ctx;
DistributorStripeOperationContext& _op_ctx;
DistributorBucketSpace &_bucketSpace;
SentMessagesMap _sentMessages;
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/garbagecollectionoperation.cpp b/storage/src/vespa/storage/distributor/operations/idealstate/garbagecollectionoperation.cpp
index c9e983d4284..f611968b481 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/garbagecollectionoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/garbagecollectionoperation.cpp
@@ -21,7 +21,7 @@ GarbageCollectionOperation::GarbageCollectionOperation(const ClusterContext& clu
GarbageCollectionOperation::~GarbageCollectionOperation() = default;
-void GarbageCollectionOperation::onStart(DistributorMessageSender& sender) {
+void GarbageCollectionOperation::onStart(DistributorStripeMessageSender& sender) {
BucketDatabase::Entry entry = _bucketSpace->getBucketDatabase().get(getBucketId());
std::vector<uint16_t> nodes = entry->getNodes();
@@ -42,7 +42,7 @@ void GarbageCollectionOperation::onStart(DistributorMessageSender& sender) {
}
void
-GarbageCollectionOperation::onReceive(DistributorMessageSender&,
+GarbageCollectionOperation::onReceive(DistributorStripeMessageSender&,
const std::shared_ptr<api::StorageReply>& reply)
{
auto* rep = dynamic_cast<api::RemoveLocationReply*>(reply.get());
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/garbagecollectionoperation.h b/storage/src/vespa/storage/distributor/operations/idealstate/garbagecollectionoperation.h
index 545dd10b539..01ef607912c 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/garbagecollectionoperation.h
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/garbagecollectionoperation.h
@@ -17,8 +17,8 @@ public:
const BucketAndNodes& nodes);
~GarbageCollectionOperation() override;
- void onStart(DistributorMessageSender& sender) override;
- void onReceive(DistributorMessageSender& sender, const std::shared_ptr<api::StorageReply> &) override;
+ void onStart(DistributorStripeMessageSender& sender) override;
+ void onReceive(DistributorStripeMessageSender& sender, const std::shared_ptr<api::StorageReply> &) override;
const char* getName() const override { return "garbagecollection"; };
Type getType() const override { return GARBAGE_COLLECTION; }
bool shouldBlockThisOperation(uint32_t, uint8_t) const override;
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/idealstateoperation.h b/storage/src/vespa/storage/distributor/operations/idealstate/idealstateoperation.h
index dcdc2f32374..7906150d0cb 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/idealstateoperation.h
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/idealstateoperation.h
@@ -114,7 +114,7 @@ public:
virtual ~IdealStateOperation();
- void onClose(DistributorMessageSender&) override {}
+ void onClose(DistributorStripeMessageSender&) override {}
/**
Returns true if the operation was performed successfully.
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/joinoperation.cpp b/storage/src/vespa/storage/distributor/operations/idealstate/joinoperation.cpp
index c2c43f86c42..d9e411bc44e 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/joinoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/joinoperation.cpp
@@ -21,7 +21,7 @@ JoinOperation::JoinOperation(const ClusterContext &clusterName,
JoinOperation::~JoinOperation() = default;
void
-JoinOperation::onStart(DistributorMessageSender& sender)
+JoinOperation::onStart(DistributorStripeMessageSender& sender)
{
_ok = false;
@@ -96,7 +96,7 @@ JoinOperation::enqueueJoinMessagePerTargetNode(
}
void
-JoinOperation::onReceive(DistributorMessageSender&, const api::StorageReply::SP& msg)
+JoinOperation::onReceive(DistributorStripeMessageSender&, const api::StorageReply::SP& msg)
{
api::JoinBucketsReply& rep = static_cast<api::JoinBucketsReply&>(*msg);
uint16_t node = _tracker.handleReply(rep);
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/joinoperation.h b/storage/src/vespa/storage/distributor/operations/idealstate/joinoperation.h
index ad133a937e4..5796b8d3fa1 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/joinoperation.h
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/joinoperation.h
@@ -22,9 +22,9 @@ public:
~JoinOperation() override;
- void onStart(DistributorMessageSender& sender) override;
+ void onStart(DistributorStripeMessageSender& sender) override;
- void onReceive(DistributorMessageSender& sender,
+ void onReceive(DistributorStripeMessageSender& sender,
const std::shared_ptr<api::StorageReply>&) override;
const char* getName() const override {
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.cpp b/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.cpp
index afb806e903a..481506096eb 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.cpp
@@ -106,7 +106,7 @@ struct NodeIndexComparator
}
void
-MergeOperation::onStart(DistributorMessageSender& sender)
+MergeOperation::onStart(DistributorStripeMessageSender& sender)
{
BucketDatabase::Entry entry = _bucketSpace->getBucketDatabase().get(getBucketId());
if (!entry.valid()) {
@@ -209,7 +209,7 @@ MergeOperation::sourceOnlyCopyChangedDuringMerge(
void
MergeOperation::deleteSourceOnlyNodes(
const BucketDatabase::Entry& currentState,
- DistributorMessageSender& sender)
+ DistributorStripeMessageSender& sender)
{
assert(currentState.valid());
std::vector<uint16_t> sourceOnlyNodes;
@@ -253,7 +253,7 @@ MergeOperation::deleteSourceOnlyNodes(
}
void
-MergeOperation::onReceive(DistributorMessageSender& sender,
+MergeOperation::onReceive(DistributorStripeMessageSender& sender,
const std::shared_ptr<api::StorageReply> & msg)
{
if (_removeOperation.get()) {
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.h b/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.h
index a5f7d352eea..5df9421e815 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.h
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.h
@@ -34,8 +34,8 @@ public:
~MergeOperation();
- void onStart(DistributorMessageSender& sender) override;
- void onReceive(DistributorMessageSender& sender, const api::StorageReply::SP&) override;
+ void onStart(DistributorStripeMessageSender& sender) override;
+ void onReceive(DistributorStripeMessageSender& sender, const api::StorageReply::SP&) override;
const char* getName() const override { return "merge"; };
std::string getStatus() const override;
Type getType() const override { return MERGE_BUCKET; }
@@ -60,7 +60,7 @@ private:
std::vector<MergeMetaData>& result);
void deleteSourceOnlyNodes(const BucketDatabase::Entry& currentState,
- DistributorMessageSender& sender);
+ DistributorStripeMessageSender& sender);
};
}
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/removebucketoperation.cpp b/storage/src/vespa/storage/distributor/operations/idealstate/removebucketoperation.cpp
index 6b06657d713..f6458bc0522 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/removebucketoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/removebucketoperation.cpp
@@ -12,7 +12,7 @@ LOG_SETUP(".distributor.operation.idealstate.remove");
using namespace storage::distributor;
bool
-RemoveBucketOperation::onStartInternal(DistributorMessageSender& sender)
+RemoveBucketOperation::onStartInternal(DistributorStripeMessageSender& sender)
{
std::vector<std::pair<uint16_t, std::shared_ptr<api::DeleteBucketCommand> > > msgs;
@@ -51,7 +51,7 @@ RemoveBucketOperation::onStartInternal(DistributorMessageSender& sender)
void
-RemoveBucketOperation::onStart(DistributorMessageSender& sender)
+RemoveBucketOperation::onStart(DistributorStripeMessageSender& sender)
{
if (onStartInternal(sender)) {
done();
@@ -104,7 +104,7 @@ RemoveBucketOperation::onReceiveInternal(const std::shared_ptr<api::StorageReply
void
-RemoveBucketOperation::onReceive(DistributorMessageSender&, const std::shared_ptr<api::StorageReply> &msg)
+RemoveBucketOperation::onReceive(DistributorStripeMessageSender&, const std::shared_ptr<api::StorageReply> &msg)
{
if (onReceiveInternal(msg)) {
done();
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/removebucketoperation.h b/storage/src/vespa/storage/distributor/operations/idealstate/removebucketoperation.h
index 5b79a465f4e..a9d7f4ebf04 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/removebucketoperation.h
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/removebucketoperation.h
@@ -18,16 +18,16 @@ public:
/**
Sends messages, returns true if we are done (sent nothing).
*/
- bool onStartInternal(DistributorMessageSender& sender);
+ bool onStartInternal(DistributorStripeMessageSender& sender);
/**
Sends messages, calls done() if we are done (sent nothing).
*/
- void onStart(DistributorMessageSender& sender) override;
+ void onStart(DistributorStripeMessageSender& sender) override;
bool onReceiveInternal(const std::shared_ptr<api::StorageReply> &);
- void onReceive(DistributorMessageSender& sender, const std::shared_ptr<api::StorageReply> &) override;
+ void onReceive(DistributorStripeMessageSender& sender, const std::shared_ptr<api::StorageReply> &) override;
const char* getName() const override { return "remove"; };
Type getType() const override { return DELETE_BUCKET; }
bool shouldBlockThisOperation(uint32_t, uint8_t) const override;
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/setbucketstateoperation.cpp b/storage/src/vespa/storage/distributor/operations/idealstate/setbucketstateoperation.cpp
index d244521140a..88e53f7da06 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/setbucketstateoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/setbucketstateoperation.cpp
@@ -43,7 +43,7 @@ SetBucketStateOperation::shouldBeActive(uint16_t node) const
}
void
-SetBucketStateOperation::activateNode(DistributorMessageSender& sender) {
+SetBucketStateOperation::activateNode(DistributorStripeMessageSender& sender) {
for (uint32_t i=0; i<_wantedActiveNodes.size(); ++i) {
enqueueSetBucketStateCommand(_wantedActiveNodes[i], true);
}
@@ -53,7 +53,7 @@ SetBucketStateOperation::activateNode(DistributorMessageSender& sender) {
void
-SetBucketStateOperation::deactivateNodes(DistributorMessageSender& sender) {
+SetBucketStateOperation::deactivateNodes(DistributorStripeMessageSender& sender) {
const std::vector<uint16_t>& nodes(getNodes());
for (size_t i = 0; i < nodes.size(); ++i) {
if (!shouldBeActive(nodes[i])) {
@@ -64,13 +64,13 @@ SetBucketStateOperation::deactivateNodes(DistributorMessageSender& sender) {
}
void
-SetBucketStateOperation::onStart(DistributorMessageSender& sender)
+SetBucketStateOperation::onStart(DistributorStripeMessageSender& sender)
{
activateNode(sender);
}
void
-SetBucketStateOperation::onReceive(DistributorMessageSender& sender,
+SetBucketStateOperation::onReceive(DistributorStripeMessageSender& sender,
const std::shared_ptr<api::StorageReply>& reply)
{
api::SetBucketStateReply& rep(
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/setbucketstateoperation.h b/storage/src/vespa/storage/distributor/operations/idealstate/setbucketstateoperation.h
index 1c818f9198d..5c13aaf5c05 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/setbucketstateoperation.h
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/setbucketstateoperation.h
@@ -14,8 +14,8 @@ public:
const std::vector<uint16_t>& wantedActiveNodes);
~SetBucketStateOperation() override;
- void onStart(DistributorMessageSender&) override;
- void onReceive(DistributorMessageSender&, const std::shared_ptr<api::StorageReply>&) override;
+ void onStart(DistributorStripeMessageSender&) override;
+ void onReceive(DistributorStripeMessageSender&, const std::shared_ptr<api::StorageReply>&) override;
const char* getName() const override { return "setbucketstate"; }
Type getType() const override { return SET_BUCKET_STATE; }
protected:
@@ -24,8 +24,8 @@ protected:
private:
void enqueueSetBucketStateCommand(uint16_t node, bool active);
- void activateNode(DistributorMessageSender& sender);
- void deactivateNodes(DistributorMessageSender& sender);
+ void activateNode(DistributorStripeMessageSender& sender);
+ void deactivateNodes(DistributorStripeMessageSender& sender);
bool shouldBeActive(uint16_t node) const;
};
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/splitoperation.cpp b/storage/src/vespa/storage/distributor/operations/idealstate/splitoperation.cpp
index a75e954c118..437c4ed6033 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/splitoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/splitoperation.cpp
@@ -22,7 +22,7 @@ SplitOperation::SplitOperation(const ClusterContext &cluster_ctx, const BucketAn
SplitOperation::~SplitOperation() = default;
void
-SplitOperation::onStart(DistributorMessageSender& sender)
+SplitOperation::onStart(DistributorStripeMessageSender& sender)
{
_ok = false;
@@ -49,7 +49,7 @@ SplitOperation::onStart(DistributorMessageSender& sender)
}
void
-SplitOperation::onReceive(DistributorMessageSender&, const api::StorageReply::SP& msg)
+SplitOperation::onReceive(DistributorStripeMessageSender&, const api::StorageReply::SP& msg)
{
api::SplitBucketReply& rep = static_cast<api::SplitBucketReply&>(*msg);
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/splitoperation.h b/storage/src/vespa/storage/distributor/operations/idealstate/splitoperation.h
index 1bb82c2a39e..eccbdc69869 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/splitoperation.h
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/splitoperation.h
@@ -16,8 +16,8 @@ public:
SplitOperation & operator = (const SplitOperation &) = delete;
~SplitOperation();
- void onStart(DistributorMessageSender& sender) override;
- void onReceive(DistributorMessageSender& sender, const std::shared_ptr<api::StorageReply> &) override;
+ void onStart(DistributorStripeMessageSender& sender) override;
+ void onReceive(DistributorStripeMessageSender& sender, const std::shared_ptr<api::StorageReply> &) override;
const char* getName() const override { return "split"; };
Type getType() const override { return SPLIT_BUCKET; }
bool isBlocked(const PendingMessageTracker&, const OperationSequencer&) const override;
diff --git a/storage/src/vespa/storage/distributor/operations/operation.cpp b/storage/src/vespa/storage/distributor/operations/operation.cpp
index ee695dae606..bcc9a36b010 100644
--- a/storage/src/vespa/storage/distributor/operations/operation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/operation.cpp
@@ -26,7 +26,7 @@ Operation::getStatus() const
}
void
-Operation::start(DistributorMessageSender& sender,
+Operation::start(DistributorStripeMessageSender& sender,
framework::MilliSecTime startTime)
{
_startTime = startTime;
diff --git a/storage/src/vespa/storage/distributor/operations/operation.h b/storage/src/vespa/storage/distributor/operations/operation.h
index e9320817a8e..75d72a2b5c9 100644
--- a/storage/src/vespa/storage/distributor/operations/operation.h
+++ b/storage/src/vespa/storage/distributor/operations/operation.h
@@ -33,13 +33,13 @@ public:
Tell the callback that storage is shutting down. Reply to any pending
stuff.
*/
- virtual void onClose(DistributorMessageSender&) = 0;
+ virtual void onClose(DistributorStripeMessageSender&) = 0;
/**
When a reply has been received, the storagelink will call receive()
on the owner of the message that was replied to.
*/
- virtual void receive(DistributorMessageSender& sender,
+ virtual void receive(DistributorStripeMessageSender& sender,
const std::shared_ptr<api::StorageReply> & msg)
{
onReceive(sender, msg);
@@ -56,7 +56,7 @@ public:
/**
Starts the callback, sending any messages etc. Sets _startTime to current time
*/
- virtual void start(DistributorMessageSender& sender, framework::MilliSecTime startTime);
+ virtual void start(DistributorStripeMessageSender& sender, framework::MilliSecTime startTime);
/**
* Returns true if we are blocked to start this operation given
@@ -81,9 +81,9 @@ private:
/**
Implementation of start for the callback
*/
- virtual void onStart(DistributorMessageSender& sender) = 0;
+ virtual void onStart(DistributorStripeMessageSender& sender) = 0;
- virtual void onReceive(DistributorMessageSender& sender,
+ virtual void onReceive(DistributorStripeMessageSender& sender,
const std::shared_ptr<api::StorageReply> & msg) = 0;
protected:
diff --git a/storage/src/vespa/storage/distributor/persistencemessagetracker.cpp b/storage/src/vespa/storage/distributor/persistencemessagetracker.cpp
index 9328a43cc6b..746a0012d0d 100644
--- a/storage/src/vespa/storage/distributor/persistencemessagetracker.cpp
+++ b/storage/src/vespa/storage/distributor/persistencemessagetracker.cpp
@@ -15,7 +15,7 @@ namespace storage::distributor {
PersistenceMessageTrackerImpl::PersistenceMessageTrackerImpl(
PersistenceOperationMetricSet& metric,
std::shared_ptr<api::BucketInfoReply> reply,
- DistributorNodeContext& node_ctx,
+ const DistributorNodeContext& node_ctx,
DistributorStripeOperationContext& op_ctx,
api::Timestamp revertTimestamp)
: MessageTracker(node_ctx),
diff --git a/storage/src/vespa/storage/distributor/persistencemessagetracker.h b/storage/src/vespa/storage/distributor/persistencemessagetracker.h
index 57d0a77364b..3d7838d2a45 100644
--- a/storage/src/vespa/storage/distributor/persistencemessagetracker.h
+++ b/storage/src/vespa/storage/distributor/persistencemessagetracker.h
@@ -36,7 +36,7 @@ private:
public:
PersistenceMessageTrackerImpl(PersistenceOperationMetricSet& metric,
std::shared_ptr<api::BucketInfoReply> reply,
- DistributorNodeContext& node_ctx,
+ const DistributorNodeContext& node_ctx,
DistributorStripeOperationContext& op_ctx,
api::Timestamp revertTimestamp = 0);
~PersistenceMessageTrackerImpl() override;
diff --git a/storage/src/vespa/storage/distributor/potential_data_loss_report.h b/storage/src/vespa/storage/distributor/potential_data_loss_report.h
index 96abd787649..a5fb0adae62 100644
--- a/storage/src/vespa/storage/distributor/potential_data_loss_report.h
+++ b/storage/src/vespa/storage/distributor/potential_data_loss_report.h
@@ -13,6 +13,13 @@ struct PotentialDataLossReport {
size_t buckets = 0;
size_t documents = 0;
+ constexpr PotentialDataLossReport() noexcept = default;
+
+ constexpr PotentialDataLossReport(size_t buckets_, size_t documents_) noexcept
+ : buckets(buckets_),
+ documents(documents_)
+ {}
+
void merge(const PotentialDataLossReport& other) noexcept {
buckets += other.buckets;
documents += other.documents;
diff --git a/storage/src/vespa/storage/distributor/statechecker.cpp b/storage/src/vespa/storage/distributor/statechecker.cpp
index bbbe283077f..82b8371e163 100644
--- a/storage/src/vespa/storage/distributor/statechecker.cpp
+++ b/storage/src/vespa/storage/distributor/statechecker.cpp
@@ -61,19 +61,21 @@ StateChecker::Result::createStoredResult(
return Result(std::unique_ptr<ResultImpl>(new StoredResultImpl(std::move(operation), MaintenancePriority(priority))));
}
-StateChecker::Context::Context(const DistributorStripeComponent& c,
+StateChecker::Context::Context(const DistributorNodeContext& node_ctx_in,
+ const DistributorStripeOperationContext& op_ctx_in,
const DistributorBucketSpace &distributorBucketSpace,
NodeMaintenanceStatsTracker& statsTracker,
const document::Bucket &bucket_)
: bucket(bucket_),
- siblingBucket(c.get_sibling(bucket.getBucketId())),
+ siblingBucket(op_ctx_in.get_sibling(bucket.getBucketId())),
systemState(distributorBucketSpace.getClusterState()),
- pending_cluster_state(c.getDistributor().pendingClusterStateOrNull(bucket_.getBucketSpace())),
- distributorConfig(c.getDistributor().getConfig()),
+ pending_cluster_state(op_ctx_in.pending_cluster_state_or_null(bucket_.getBucketSpace())),
+ distributorConfig(op_ctx_in.distributor_config()),
distribution(distributorBucketSpace.getDistribution()),
- gcTimeCalculator(c.getDistributor().getBucketIdHasher(),
+ gcTimeCalculator(op_ctx_in.bucket_id_hasher(),
std::chrono::duration_cast<std::chrono::seconds>(distributorConfig.getGarbageCollectionInterval())),
- component(c),
+ node_ctx(node_ctx_in),
+ op_ctx(op_ctx_in),
db(distributorBucketSpace.getBucketDatabase()),
stats(statsTracker)
{
diff --git a/storage/src/vespa/storage/distributor/statechecker.h b/storage/src/vespa/storage/distributor/statechecker.h
index 44c45e62ec8..47d5a5b55ed 100644
--- a/storage/src/vespa/storage/distributor/statechecker.h
+++ b/storage/src/vespa/storage/distributor/statechecker.h
@@ -17,8 +17,9 @@ namespace storage { class DistributorConfiguration; }
namespace storage::distributor {
-class DistributorStripeComponent;
class DistributorBucketSpace;
+class DistributorNodeContext;
+class DistributorStripeOperationContext;
class NodeMaintenanceStatsTracker;
/**
@@ -44,7 +45,8 @@ public:
*/
struct Context
{
- Context(const DistributorStripeComponent&,
+ Context(const DistributorNodeContext& node_ctx_in,
+ const DistributorStripeOperationContext& op_ctx_in,
const DistributorBucketSpace &distributorBucketSpace,
NodeMaintenanceStatsTracker&,
const document::Bucket &bucket_);
@@ -76,7 +78,8 @@ public:
std::vector<uint16_t> idealState;
std::unordered_set<uint16_t> unorderedIdealState;
- const DistributorStripeComponent& component;
+ const DistributorNodeContext& node_ctx;
+ const DistributorStripeOperationContext& op_ctx;
const BucketDatabase& db;
NodeMaintenanceStatsTracker& stats;
diff --git a/storage/src/vespa/storage/distributor/statecheckers.cpp b/storage/src/vespa/storage/distributor/statecheckers.cpp
index ed3e47fcafb..49b07a6f356 100644
--- a/storage/src/vespa/storage/distributor/statecheckers.cpp
+++ b/storage/src/vespa/storage/distributor/statecheckers.cpp
@@ -85,7 +85,7 @@ SplitBucketStateChecker::generateMinimumBucketSplitOperation(
StateChecker::Context& c)
{
auto so = std::make_unique<SplitOperation>(
- c.component.cluster_context(),
+ c.node_ctx,
BucketAndNodes(c.getBucket(), c.entry->getNodes()),
c.distributorConfig.getMinimalBucketSplit(),
0,
@@ -103,7 +103,7 @@ SplitBucketStateChecker::generateMaxSizeExceededSplitOperation(
StateChecker::Context& c)
{
auto so = std::make_unique<SplitOperation>(
- c.component.cluster_context(),
+ c.node_ctx,
BucketAndNodes(c.getBucket(), c.entry->getNodes()),
58,
c.distributorConfig.getSplitCount(),
@@ -466,7 +466,7 @@ JoinBucketsStateChecker::check(StateChecker::Context& c)
}
sourceBuckets.push_back(c.getBucketId());
auto op = std::make_unique<JoinOperation>(
- c.component.cluster_context(),
+ c.node_ctx,
BucketAndNodes(joinedBucket, c.entry->getNodes()),
sourceBuckets);
op->setPriority(c.distributorConfig.getMaintenancePriorities().joinBuckets);
@@ -570,7 +570,7 @@ SplitInconsistentStateChecker::check(StateChecker::Context& c)
}
auto op = std::make_unique<SplitOperation>(
- c.component.cluster_context(),
+ c.node_ctx,
BucketAndNodes(c.getBucket(), c.entry->getNodes()),
getHighestUsedBits(c.entries),
0,
@@ -1009,7 +1009,7 @@ DeleteExtraCopiesStateChecker::check(StateChecker::Context& c)
if (!removedCopies.empty()) {
auto ro = std::make_unique<RemoveBucketOperation>(
- c.component.cluster_context(),
+ c.node_ctx,
BucketAndNodes(c.getBucket(), removedCopies));
ro->setPriority(c.distributorConfig.getMaintenancePriorities().deleteBucketCopy);
@@ -1110,7 +1110,7 @@ BucketStateStateChecker::check(StateChecker::Context& c)
activeNodeIndexes.push_back(activeNodes[i]._nodeIndex);
}
auto op = std::make_unique<SetBucketStateOperation>(
- c.component.cluster_context(),
+ c.node_ctx,
BucketAndNodes(c.getBucket(), operationNodes),
activeNodeIndexes);
@@ -1137,7 +1137,7 @@ GarbageCollectionStateChecker::needsGarbageCollection(const Context& c) const
}
std::chrono::seconds lastRunAt(c.entry->getLastGarbageCollectionTime());
std::chrono::seconds currentTime(
- c.component.getClock().getTimeInSeconds().getTime());
+ c.node_ctx.clock().getTimeInSeconds().getTime());
return c.gcTimeCalculator.shouldGc(c.getBucketId(), currentTime, lastRunAt);
}
@@ -1147,14 +1147,14 @@ GarbageCollectionStateChecker::check(Context& c)
{
if (needsGarbageCollection(c)) {
auto op = std::make_unique<GarbageCollectionOperation>(
- c.component.cluster_context(),
+ c.node_ctx,
BucketAndNodes(c.getBucket(), c.entry->getNodes()));
vespalib::asciistream reason;
reason << "[Needs garbage collection: Last check at "
<< c.entry->getLastGarbageCollectionTime()
<< ", current time "
- << c.component.getClock().getTimeInSeconds().getTime()
+ << c.node_ctx.clock().getTimeInSeconds().getTime()
<< ", configured interval "
<< vespalib::to_s(c.distributorConfig.getGarbageCollectionInterval()) << "]";
diff --git a/storage/src/vespa/storage/distributor/storage_node_up_states.h b/storage/src/vespa/storage/distributor/storage_node_up_states.h
new file mode 100644
index 00000000000..a68b275e31d
--- /dev/null
+++ b/storage/src/vespa/storage/distributor/storage_node_up_states.h
@@ -0,0 +1,14 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+namespace storage::distributor {
+
+/**
+ * Returns the states in which the distributors consider storage nodes to be up.
+ */
+constexpr const char* storage_node_up_states() noexcept {
+ return "uri";
+}
+
+}
diff --git a/storage/src/vespa/storage/distributor/stripe_access_guard.h b/storage/src/vespa/storage/distributor/stripe_access_guard.h
index 69aae755dec..a1779a4eb4f 100644
--- a/storage/src/vespa/storage/distributor/stripe_access_guard.h
+++ b/storage/src/vespa/storage/distributor/stripe_access_guard.h
@@ -16,6 +16,8 @@ class Distribution;
namespace storage { class DistributorConfiguration; }
+namespace vespalib::xml { class XmlOutputStream; }
+
namespace storage::distributor {
/**
@@ -28,6 +30,8 @@ class StripeAccessGuard {
public:
virtual ~StripeAccessGuard() = default;
+ virtual void flush_and_close() = 0;
+
virtual void update_total_distributor_config(std::shared_ptr<const DistributorConfiguration> config) = 0;
virtual void update_distribution_config(const BucketSpaceDistributionConfigs& new_configs) = 0;
@@ -52,6 +56,22 @@ public:
virtual void update_read_snapshot_after_activation(const lib::ClusterStateBundle& activated_state) = 0;
virtual void clear_read_only_bucket_repo_databases() = 0;
+ // TODO STRIPE: Add merge() function.
+ struct PendingOperationStats {
+ size_t external_load_operations;
+ size_t maintenance_operations;
+ PendingOperationStats(size_t external_load_operations_in,
+ size_t maintenance_operations_in)
+ : external_load_operations(external_load_operations_in),
+ maintenance_operations(maintenance_operations_in) {}
+ };
+
+ // Functions used for state reporting
+ virtual void report_bucket_db_status(document::BucketSpace bucket_space, std::ostream& out) const = 0;
+ virtual PendingOperationStats pending_operation_stats() const = 0;
+ virtual void report_single_bucket_requests(vespalib::xml::XmlOutputStream& xos) const = 0;
+ virtual void report_delayed_single_bucket_requests(vespalib::xml::XmlOutputStream& xos) const = 0;
+
};
/**
diff --git a/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.cpp b/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.cpp
index 61ff11d5ac3..95272752ace 100644
--- a/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.cpp
+++ b/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.cpp
@@ -21,23 +21,22 @@
#include <vespa/log/bufferedlogger.h>
LOG_SETUP(".distributor.stripe_bucket_db_updater");
+using document::BucketSpace;
using storage::lib::Node;
using storage::lib::NodeType;
-using document::BucketSpace;
+using vespalib::xml::XmlAttribute;
namespace storage::distributor {
-StripeBucketDBUpdater::StripeBucketDBUpdater(DistributorStripeInterface& owner,
- DistributorBucketSpaceRepo& bucketSpaceRepo,
- DistributorBucketSpaceRepo& readOnlyBucketSpaceRepo,
+StripeBucketDBUpdater::StripeBucketDBUpdater(const DistributorNodeContext& node_ctx,
+ DistributorStripeOperationContext& op_ctx,
+ DistributorStripeInterface& owner,
DistributorMessageSender& sender,
- DistributorComponentRegister& compReg,
bool use_legacy_mode)
: framework::StatusReporter("bucketdb", "Bucket DB Updater"),
- _distributorComponent(owner, bucketSpaceRepo, readOnlyBucketSpaceRepo, compReg, "Bucket DB Updater"),
- _node_ctx(_distributorComponent),
- _op_ctx(_distributorComponent),
- _distributor_interface(_distributorComponent.getDistributor()),
+ _node_ctx(node_ctx),
+ _op_ctx(op_ctx),
+ _distributor_interface(owner),
_delayedRequests(),
_sentMessages(),
_pendingClusterState(),
@@ -225,7 +224,7 @@ StripeBucketDBUpdater::removeSuperfluousBuckets(
{
assert(_use_legacy_mode);
const bool move_to_read_only_db = shouldDeferStateEnabling();
- const char* up_states = _op_ctx.storage_node_up_states();
+ const char* up_states = storage_node_up_states();
for (auto& elem : _op_ctx.bucket_space_repo()) {
const auto& newDistribution(elem.second->getDistribution());
const auto& oldClusterState(elem.second->getClusterState());
@@ -273,7 +272,7 @@ StripeBucketDBUpdater::remove_superfluous_buckets(
assert(!_use_legacy_mode);
(void)is_distribution_change; // TODO remove if not needed
const bool move_to_read_only_db = shouldDeferStateEnabling();
- const char* up_states = _op_ctx.storage_node_up_states();
+ const char* up_states = storage_node_up_states();
auto& s = _op_ctx.bucket_space_repo().get(bucket_space);
const auto& new_distribution = s.getDistribution();
@@ -377,7 +376,7 @@ StripeBucketDBUpdater::storageDistributionChanged()
auto clusterInfo = std::make_shared<const SimpleClusterInformation>(
_node_ctx.node_index(),
_op_ctx.cluster_state_bundle(),
- _op_ctx.storage_node_up_states());
+ storage_node_up_states());
_pendingClusterState = PendingClusterState::createForDistributionChange(
_node_ctx.clock(),
std::move(clusterInfo),
@@ -489,7 +488,7 @@ StripeBucketDBUpdater::onSetSystemState(
auto clusterInfo = std::make_shared<const SimpleClusterInformation>(
_node_ctx.node_index(),
_op_ctx.cluster_state_bundle(),
- _op_ctx.storage_node_up_states());
+ storage_node_up_states());
_pendingClusterState = PendingClusterState::createForClusterStateChange(
_node_ctx.clock(),
std::move(clusterInfo),
@@ -960,20 +959,30 @@ StripeBucketDBUpdater::reportXmlStatus(vespalib::xml::XmlOutputStream& xos,
}
xos << XmlEndTag()
<< XmlTag("single_bucket_requests");
- for (const auto & entry : _sentMessages)
- {
- entry.second.print_xml_tag(xos, XmlAttribute("sendtimestamp", entry.second.timestamp));
- }
+ report_single_bucket_requests(xos);
xos << XmlEndTag()
<< XmlTag("delayed_single_bucket_requests");
- for (const auto & entry : _delayedRequests)
- {
- entry.second.print_xml_tag(xos, XmlAttribute("resendtimestamp", entry.first.getTime()));
- }
+ report_delayed_single_bucket_requests(xos);
xos << XmlEndTag() << XmlEndTag();
return "";
}
+void
+StripeBucketDBUpdater::report_single_bucket_requests(vespalib::xml::XmlOutputStream& xos) const
+{
+ for (const auto& entry : _sentMessages) {
+ entry.second.print_xml_tag(xos, XmlAttribute("sendtimestamp", entry.second.timestamp));
+ }
+}
+
+void
+StripeBucketDBUpdater::report_delayed_single_bucket_requests(vespalib::xml::XmlOutputStream& xos) const
+{
+ for (const auto& entry : _delayedRequests) {
+ entry.second.print_xml_tag(xos, XmlAttribute("resendtimestamp", entry.first.getTime()));
+ }
+}
+
StripeBucketDBUpdater::MergingNodeRemover::MergingNodeRemover(
const lib::ClusterState& oldState,
const lib::ClusterState& s,
diff --git a/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.h b/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.h
index 67b31343f6f..ed059a32d14 100644
--- a/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.h
+++ b/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.h
@@ -36,11 +36,10 @@ class StripeBucketDBUpdater final
{
public:
using OutdatedNodesMap = dbtransition::OutdatedNodesMap;
- StripeBucketDBUpdater(DistributorStripeInterface& owner,
- DistributorBucketSpaceRepo& bucketSpaceRepo,
- DistributorBucketSpaceRepo& readOnlyBucketSpaceRepo,
+ StripeBucketDBUpdater(const DistributorNodeContext& node_ctx,
+ DistributorStripeOperationContext& op_ctx,
+ DistributorStripeInterface& owner,
DistributorMessageSender& sender,
- DistributorComponentRegister& compReg,
bool use_legacy_mode);
~StripeBucketDBUpdater() override;
@@ -59,6 +58,11 @@ public:
vespalib::string reportXmlStatus(vespalib::xml::XmlOutputStream&, const framework::HttpUrlPath&) const;
vespalib::string getReportContentType(const framework::HttpUrlPath&) const override;
bool reportStatus(std::ostream&, const framework::HttpUrlPath&) const override;
+
+ // Functions used for state reporting when a StripeAccessGuard is held.
+ void report_single_bucket_requests(vespalib::xml::XmlOutputStream& xos) const;
+ void report_delayed_single_bucket_requests(vespalib::xml::XmlOutputStream& xos) const;
+
void print(std::ostream& out, bool verbose, const std::string& indent) const;
const DistributorNodeContext& node_context() const { return _node_ctx; }
DistributorStripeOperationContext& operation_context() { return _op_ctx; }
@@ -140,7 +144,8 @@ private:
friend class DistributorTestUtil;
// TODO refactor and rewire to avoid needing this direct meddling
- friend class LegacySingleStripeAccessGuard;
+ friend class DistributorStripe;
+
// Only to be used by tests that want to ensure both the BucketDBUpdater _and_ the Distributor
// components agree on the currently active cluster state bundle.
// Transitively invokes Distributor::enableClusterStateBundle
@@ -258,7 +263,6 @@ private:
mutable bool _cachedOwned;
};
- DistributorStripeComponent _distributorComponent;
const DistributorNodeContext& _node_ctx;
DistributorStripeOperationContext& _op_ctx;
DistributorStripeInterface& _distributor_interface;
diff --git a/storage/src/vespa/storage/distributor/stripe_host_info_notifier.h b/storage/src/vespa/storage/distributor/stripe_host_info_notifier.h
new file mode 100644
index 00000000000..3f10188827a
--- /dev/null
+++ b/storage/src/vespa/storage/distributor/stripe_host_info_notifier.h
@@ -0,0 +1,24 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#pragma once
+
+#include <cstdint>
+
+namespace storage::distributor {
+
+/**
+ * Used by stripes to signal that the distributor node should immediately respond to
+ * any pending GetNodeState long-poll RPCs from the cluster controller. This is generally
+ * done when a stripe has completed initializing or if all merging has completed for
+ * a bucket space.
+ *
+ * Implementations of this interface may batch and/or throttle actual host info sends,
+ * but shall attempt to send new host info within a reasonable amount of time (on the
+ * order of seconds).
+ */
+class StripeHostInfoNotifier {
+public:
+ virtual ~StripeHostInfoNotifier() = default;
+ virtual void notify_stripe_wants_to_send_host_info(uint16_t stripe_index) = 0;
+};
+
+}
diff --git a/storage/src/vespa/storage/distributor/throttlingoperationstarter.h b/storage/src/vespa/storage/distributor/throttlingoperationstarter.h
index 0b6d89e0570..e25141be214 100644
--- a/storage/src/vespa/storage/distributor/throttlingoperationstarter.h
+++ b/storage/src/vespa/storage/distributor/throttlingoperationstarter.h
@@ -26,7 +26,7 @@ class ThrottlingOperationStarter : public OperationStarter
ThrottlingOperation(const ThrottlingOperation&);
ThrottlingOperation& operator=(const ThrottlingOperation&);
- void onClose(DistributorMessageSender& sender) override {
+ void onClose(DistributorStripeMessageSender& sender) override {
_operation->onClose(sender);
}
const char* getName() const override {
@@ -38,21 +38,21 @@ class ThrottlingOperationStarter : public OperationStarter
std::string toString() const override {
return _operation->toString();
}
- void start(DistributorMessageSender& sender, framework::MilliSecTime startTime) override {
+ void start(DistributorStripeMessageSender& sender, framework::MilliSecTime startTime) override {
_operation->start(sender, startTime);
}
- void receive(DistributorMessageSender& sender, const std::shared_ptr<api::StorageReply> & msg) override {
+ void receive(DistributorStripeMessageSender& sender, const std::shared_ptr<api::StorageReply> & msg) override {
_operation->receive(sender, msg);
}
framework::MilliSecTime getStartTime() const {
return _operation->getStartTime();
}
- void onStart(DistributorMessageSender&) override {
+ void onStart(DistributorStripeMessageSender&) override {
// Should never be called directly on the throttled operation
// instance, but rather on its wrapped implementation.
HDR_ABORT("should not be reached");
}
- void onReceive(DistributorMessageSender&,
+ void onReceive(DistributorStripeMessageSender&,
const std::shared_ptr<api::StorageReply>&) override {
HDR_ABORT("should not be reached");
}
diff --git a/storage/src/vespa/storage/distributor/tickable_stripe.h b/storage/src/vespa/storage/distributor/tickable_stripe.h
new file mode 100644
index 00000000000..8d077455a48
--- /dev/null
+++ b/storage/src/vespa/storage/distributor/tickable_stripe.h
@@ -0,0 +1,68 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#pragma once
+
+#include "stripe_access_guard.h" // TODO STRIPE break up
+
+namespace storage::lib {
+class ClusterState;
+class ClusterStateBundle;
+class Distribution;
+}
+
+namespace storage { class DistributorConfiguration; }
+
+namespace vespalib::xml { class XmlOutputStream; }
+
+namespace storage::distributor {
+
+/**
+ * A tickable stripe is the minimal binding glue between the stripe's worker thread and
+ * the actual implementation. Primarily allows for easier testing without having to
+ * fake an entire actual DistributorStripe.
+ */
+class TickableStripe {
+public:
+ virtual ~TickableStripe() = default;
+
+ // Perform a single operation tick of the stripe logic.
+ // If function returns true, the caller should not perform any waiting before calling
+ // tick() again. This generally means that the stripe is processing client operations
+ // and wants to continue doing so as quickly as possible.
+ // Only used for multi-threaded striped setups.
+ // TODO return an enum indicating type of last processed event? E.g. external, maintenance, none, ...
+ virtual bool tick() = 0;
+
+ virtual void flush_and_close() = 0;
+
+ virtual void update_total_distributor_config(std::shared_ptr<const DistributorConfiguration> config) = 0;
+
+ virtual void update_distribution_config(const BucketSpaceDistributionConfigs& new_configs) = 0;
+ virtual void set_pending_cluster_state_bundle(const lib::ClusterStateBundle& pending_state) = 0;
+ virtual void clear_pending_cluster_state_bundle() = 0;
+ virtual void enable_cluster_state_bundle(const lib::ClusterStateBundle& new_state) = 0;
+ virtual void notify_distribution_change_enabled() = 0;
+
+ virtual PotentialDataLossReport remove_superfluous_buckets(document::BucketSpace bucket_space,
+ const lib::ClusterState& new_state,
+ bool is_distribution_change) = 0;
+ virtual void merge_entries_into_db(document::BucketSpace bucket_space,
+ api::Timestamp gathered_at_timestamp,
+ const lib::Distribution& distribution,
+ const lib::ClusterState& new_state,
+ const char* storage_up_states,
+ const std::unordered_set<uint16_t>& outdated_nodes,
+ const std::vector<dbtransition::Entry>& entries) = 0;
+
+ virtual void update_read_snapshot_before_db_pruning() = 0;
+ virtual void update_read_snapshot_after_db_pruning(const lib::ClusterStateBundle& new_state) = 0;
+ virtual void update_read_snapshot_after_activation(const lib::ClusterStateBundle& activated_state) = 0;
+ virtual void clear_read_only_bucket_repo_databases() = 0;
+ // Functions used for state reporting
+ virtual void report_bucket_db_status(document::BucketSpace bucket_space, std::ostream& out) const = 0;
+ virtual StripeAccessGuard::PendingOperationStats pending_operation_stats() const = 0;
+ virtual void report_single_bucket_requests(vespalib::xml::XmlOutputStream& xos) const = 0;
+ virtual void report_delayed_single_bucket_requests(vespalib::xml::XmlOutputStream& xos) const = 0;
+
+};
+
+}
diff --git a/storage/src/vespa/storage/storageserver/distributornode.cpp b/storage/src/vespa/storage/storageserver/distributornode.cpp
index 8f4f0422f44..51fa0c01c96 100644
--- a/storage/src/vespa/storage/storageserver/distributornode.cpp
+++ b/storage/src/vespa/storage/storageserver/distributornode.cpp
@@ -5,9 +5,10 @@
#include "communicationmanager.h"
#include "opslogger.h"
#include "statemanager.h"
+#include <vespa/storage/common/hostreporter/hostinfo.h>
#include <vespa/storage/common/i_storage_chain_builder.h>
#include <vespa/storage/distributor/distributor.h>
-#include <vespa/storage/common/hostreporter/hostinfo.h>
+#include <vespa/storage/distributor/distributor_stripe_pool.h>
#include <vespa/vespalib/util/exceptions.h>
#include <vespa/log/log.h>
@@ -26,9 +27,11 @@ DistributorNode::DistributorNode(
std::make_unique<HostInfo>(),
!communicationManager ? NORMAL : SINGLE_THREADED_TEST_MODE),
_threadPool(framework::TickingThreadPool::createDefault("distributor")),
+ _stripe_pool(std::make_unique<distributor::DistributorStripePool>()),
_context(context),
- _lastUniqueTimestampRequested(0),
- _uniqueTimestampCounter(0),
+ _timestamp_mutex(),
+ _timestamp_second_counter(0),
+ _intra_second_pseudo_usec_counter(0),
_num_distributor_stripes(num_distributor_stripes),
_retrievedCommunicationManager(std::move(communicationManager))
{
@@ -52,6 +55,7 @@ void
DistributorNode::shutdownDistributor()
{
_threadPool->stop();
+ _stripe_pool->stop_and_join();
shutdown();
}
@@ -100,30 +104,39 @@ DistributorNode::createChain(IStorageChainBuilder &builder)
// manager, which is safe since the lifetime of said state manager
// extends to the end of the process.
builder.add(std::make_unique<storage::distributor::Distributor>
- (dcr, *_node_identity, *_threadPool, getDoneInitializeHandler(),
+ (dcr, *_node_identity, *_threadPool, *_stripe_pool, getDoneInitializeHandler(),
_num_distributor_stripes,
stateManager->getHostInfo()));
builder.add(std::move(stateManager));
}
-// FIXME STRIPE not thread safe!!
api::Timestamp
-DistributorNode::getUniqueTimestamp()
+DistributorNode::generate_unique_timestamp()
{
- uint64_t timeNow(_component->getClock().getTimeInSeconds().getTime());
- if (timeNow == _lastUniqueTimestampRequested) {
- ++_uniqueTimestampCounter;
- } else {
- if (timeNow < _lastUniqueTimestampRequested) {
- LOG(error, "Time has moved backwards, from %" PRIu64 " to %" PRIu64 ".",
- _lastUniqueTimestampRequested, timeNow);
+ uint64_t now_seconds = _component->getClock().getTimeInSeconds().getTime();
+ std::lock_guard lock(_timestamp_mutex);
+ // We explicitly handle a seemingly decreased wall clock time, as multiple threads may
+ // race with each other over a second change edge. In this case, pretend an earlier
+ // timestamp took place in the same second as the newest observed wall clock time.
+ if (now_seconds <= _timestamp_second_counter) {
+ // ... but if we're stuck too far in the past, we trigger a process restart.
+ if ((_timestamp_second_counter - now_seconds) > SanityCheckMaxWallClockSecondSkew) {
+ LOG(error, "Current wall clock time is more than %u seconds in the past "
+ "compared to the highest observed wall clock time (%" PRIu64 " < %" PRIu64 "). "
+ "%u timestamps were generated within this time period.",
+ SanityCheckMaxWallClockSecondSkew, now_seconds,_timestamp_second_counter,
+ _intra_second_pseudo_usec_counter);
+ abort();
}
- _lastUniqueTimestampRequested = timeNow;
- _uniqueTimestampCounter = 0;
+ assert(_intra_second_pseudo_usec_counter < 1'000'000);
+ ++_intra_second_pseudo_usec_counter;
+ } else {
+ _timestamp_second_counter = now_seconds;
+ _intra_second_pseudo_usec_counter = 0;
}
- return _lastUniqueTimestampRequested * 1000000ll + _uniqueTimestampCounter;
+ return _timestamp_second_counter * 1'000'000LL + _intra_second_pseudo_usec_counter;
}
ResumeGuard
diff --git a/storage/src/vespa/storage/storageserver/distributornode.h b/storage/src/vespa/storage/storageserver/distributornode.h
index 267d4400ac7..21e9589b760 100644
--- a/storage/src/vespa/storage/storageserver/distributornode.h
+++ b/storage/src/vespa/storage/storageserver/distributornode.h
@@ -12,9 +12,12 @@
#include "storagenode.h"
#include <vespa/storage/common/distributorcomponent.h>
#include <vespa/storageframework/generic/thread/tickingthread.h>
+#include <mutex>
namespace storage {
+namespace distributor { class DistributorStripePool; }
+
class IStorageChainBuilder;
class DistributorNode
@@ -22,12 +25,21 @@ class DistributorNode
private UniqueTimeCalculator
{
framework::TickingThreadPool::UP _threadPool;
+ std::unique_ptr<distributor::DistributorStripePool> _stripe_pool;
DistributorNodeContext& _context;
- uint64_t _lastUniqueTimestampRequested;
- uint32_t _uniqueTimestampCounter;
+ std::mutex _timestamp_mutex;
+ uint64_t _timestamp_second_counter;
+ uint32_t _intra_second_pseudo_usec_counter;
uint32_t _num_distributor_stripes;
std::unique_ptr<StorageLink> _retrievedCommunicationManager;
+ // If the current wall clock is more than the below number of seconds into the
+ // past when compared to the highest recorded wall clock second time stamp, abort
+ // the process. This is a sanity checking measure to prevent a process running
+ // on a wall clock that transiently is set far into the future from (hopefully)
+ // generating a massive amount of broken future timestamps.
+ constexpr static uint32_t SanityCheckMaxWallClockSecondSkew = 120;
+
public:
typedef std::unique_ptr<DistributorNode> UP;
@@ -49,7 +61,7 @@ private:
void initializeNodeSpecific() override;
void perform_post_chain_creation_init_steps() override { /* no-op */ }
void createChain(IStorageChainBuilder &builder) override;
- api::Timestamp getUniqueTimestamp() override;
+ api::Timestamp generate_unique_timestamp() override;
/**
* Shut down necessary distributor-specific components before shutting
diff --git a/travis/travis.sh b/travis/travis.sh
deleted file mode 100755
index 1ea4d43dd66..00000000000
--- a/travis/travis.sh
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/bash
-# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-set -e
-
-# Workaround for Travis log output timeout (jobs without output over 10 minutes are killed)
-function bell() {
- while true; do
- echo "."
- sleep 300
- done
-}
-
-DOCKER_IMAGE=vespaengine/vespa-build-centos7:latest
-
-bell &
-docker run --rm -v ${HOME}/.m2:/root/.m2 -v ${HOME}/.ccache:/root/.ccache -v $(pwd):/source \
- -e TRAVIS_REPO_SLUG=$TRAVIS_REPO_SLUG -e TRAVIS_PULL_REQUEST=$TRAVIS_PULL_REQUEST \
- --entrypoint /source/travis/travis-build.sh ${DOCKER_IMAGE}
-exit $?
diff --git a/vespa-feed-client/CMakeLists.txt b/vespa-feed-client/CMakeLists.txt
new file mode 100644
index 00000000000..ee6dfdeff05
--- /dev/null
+++ b/vespa-feed-client/CMakeLists.txt
@@ -0,0 +1,2 @@
+# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+# TODO Install fat jar \ No newline at end of file
diff --git a/vespa-feed-client/OWNERS b/vespa-feed-client/OWNERS
new file mode 100644
index 00000000000..606d074d8a8
--- /dev/null
+++ b/vespa-feed-client/OWNERS
@@ -0,0 +1,2 @@
+bjorncs
+jonmv
diff --git a/vespa-feed-client/README.md b/vespa-feed-client/README.md
new file mode 100644
index 00000000000..cef0815da66
--- /dev/null
+++ b/vespa-feed-client/README.md
@@ -0,0 +1,2 @@
+# vespa-feed-client
+Java client library for feeding over HTTP/2 at `/document/v1/`
diff --git a/vespa-feed-client/pom.xml b/vespa-feed-client/pom.xml
new file mode 100644
index 00000000000..4a44f1c0240
--- /dev/null
+++ b/vespa-feed-client/pom.xml
@@ -0,0 +1,107 @@
+<?xml version="1.0"?>
+<!-- Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>parent</artifactId>
+ <version>7-SNAPSHOT</version>
+ <relativePath>../parent/pom.xml</relativePath>
+ </parent>
+ <artifactId>vespa-feed-client</artifactId>
+ <packaging>jar</packaging>
+ <version>7-SNAPSHOT</version>
+
+ <properties>
+ <!-- Used by internal properties that are still using JDK8-->
+ <maven.compiler.release>8</maven.compiler.release>
+ </properties>
+
+ <dependencies>
+ <!-- provided -->
+ <dependency>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-api</artifactId>
+ <scope>provided</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.bouncycastle</groupId>
+ <artifactId>bcpkix-jdk15on</artifactId>
+ <scope>provided</scope>
+ </dependency>
+
+ <!-- compile scope -->
+ <dependency>
+ <groupId>org.apache.httpcomponents.client5</groupId>
+ <artifactId>httpclient5</artifactId>
+ <scope>compile</scope>
+ <exclusions>
+ <exclusion>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-api</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+ <dependency>
+ <groupId>commons-cli</groupId>
+ <artifactId>commons-cli</artifactId>
+ <scope>compile</scope>
+ </dependency>
+
+ <dependency>
+ <groupId>com.fasterxml.jackson.core</groupId>
+ <artifactId>jackson-core</artifactId>
+ <scope>compile</scope>
+ </dependency>
+
+ <!-- test scope -->
+ <dependency>
+ <groupId>org.junit.jupiter</groupId>
+ <artifactId>junit-jupiter</artifactId>
+ <scope>test</scope>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-compiler-plugin</artifactId>
+ <configuration>
+ <jdkToolchain>
+ <version>${java.version}</version>
+ </jdkToolchain>
+ <source>${java.version}</source>
+ <target>${java.version}</target>
+ <showDeprecation>true</showDeprecation>
+ <compilerArgs>
+ <arg>-Xlint:all</arg>
+ <arg>-Xlint:-serial</arg>
+ <arg>-Werror</arg>
+ </compilerArgs>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>exec-maven-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>generate-simplified-vtag</id>
+ <phase>generate-sources</phase>
+ <goals>
+ <goal>exec</goal>
+ </goals>
+ <configuration>
+ <executable>src/main/sh/vespa-version-generator.sh</executable>
+ <arguments>
+ <argument>${project.basedir}/../dist/vtag.map</argument>
+ <argument>${project.build.directory}/generated-sources/vespa-version/ai/vespa/feed/client/Vespa.java</argument>
+ </arguments>
+ <sourceRoot>${project.build.directory}/generated-sources/vespa-version</sourceRoot>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+</project>
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/CliArguments.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/CliArguments.java
new file mode 100644
index 00000000000..63b438134d6
--- /dev/null
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/CliArguments.java
@@ -0,0 +1,199 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.CommandLineParser;
+import org.apache.commons.cli.DefaultParser;
+import org.apache.commons.cli.HelpFormatter;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
+
+import java.io.File;
+import java.io.OutputStream;
+import java.io.PrintWriter;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.nio.file.Path;
+import java.util.Optional;
+import java.util.OptionalInt;
+
+/**
+ * Parses command line arguments
+ *
+ * @author bjorncs
+ */
+class CliArguments {
+
+ private static final Options optionsDefinition = createOptions();
+
+ private static final String HELP_OPTION = "help";
+ private static final String VERSION_OPTION = "version";
+ private static final String ENDPOINT_OPTION = "endpoint";
+ private static final String FILE_OPTION = "file";
+ private static final String CONNECTIONS_OPTION = "connections";
+ private static final String MAX_STREAMS_PER_CONNECTION = "max-streams-per-connection";
+ private static final String CERTIFICATE_OPTION = "certificate";
+ private static final String PRIVATE_KEY_OPTION = "private-key";
+ private static final String CA_CERTIFICATES_OPTION = "ca-certificates";
+ private static final String DISABLE_SSL_HOSTNAME_VERIFICATION_OPTION = "disable-ssl-hostname-verification";
+
+ private final CommandLine arguments;
+
+ private CliArguments(CommandLine arguments) {
+ this.arguments = arguments;
+ }
+
+ static CliArguments fromRawArgs(String[] rawArgs) throws CliArgumentsException {
+ CommandLineParser parser = new DefaultParser();
+ try {
+ return new CliArguments(parser.parse(optionsDefinition, rawArgs));
+ } catch (ParseException e) {
+ throw new CliArgumentsException(e);
+ }
+ }
+
+ URI endpoint() throws CliArgumentsException {
+ try {
+ URL url = (URL) arguments.getParsedOptionValue(ENDPOINT_OPTION);
+ if (url == null) throw new CliArgumentsException("Endpoint must be specified");
+ return url.toURI();
+ } catch (ParseException | URISyntaxException e) {
+ throw new CliArgumentsException("Invalid endpoint: " + e.getMessage(), e);
+ }
+ }
+
+ boolean helpSpecified() { return has(HELP_OPTION); }
+
+ boolean versionSpecified() { return has(VERSION_OPTION); }
+
+ OptionalInt connections() throws CliArgumentsException { return intValue(CONNECTIONS_OPTION); }
+
+ OptionalInt maxStreamsPerConnection() throws CliArgumentsException { return intValue(MAX_STREAMS_PER_CONNECTION); }
+
+ Optional<CertificateAndKey> certificateAndKey() throws CliArgumentsException {
+ Path certificateFile = fileValue(CERTIFICATE_OPTION).orElse(null);
+ Path privateKeyFile = fileValue(PRIVATE_KEY_OPTION).orElse(null);
+ if ((certificateFile == null) != (privateKeyFile == null)) {
+ throw new CliArgumentsException(String.format("Both '%s' and '%s' must be specified together", CERTIFICATE_OPTION, PRIVATE_KEY_OPTION));
+ }
+ if (privateKeyFile == null && certificateFile == null) return Optional.empty();
+ return Optional.of(new CertificateAndKey(certificateFile, privateKeyFile));
+ }
+
+ Optional<Path> caCertificates() throws CliArgumentsException { return fileValue(CA_CERTIFICATES_OPTION); }
+
+ Path inputFile() throws CliArgumentsException {
+ return fileValue(FILE_OPTION)
+ .orElseThrow(() -> new CliArgumentsException("Feed file must be specified"));
+ }
+
+ boolean sslHostnameVerificationDisabled() { return has(DISABLE_SSL_HOSTNAME_VERIFICATION_OPTION); }
+
+ private OptionalInt intValue(String option) throws CliArgumentsException {
+ try {
+ Number number = (Number) arguments.getParsedOptionValue(option);
+ return number != null ? OptionalInt.of(number.intValue()) : OptionalInt.empty();
+ } catch (ParseException e) {
+ throw new CliArgumentsException(String.format("Invalid value for '%s': %s", option, e.getMessage()), e);
+ }
+ }
+
+ private Optional<Path> fileValue(String option) throws CliArgumentsException {
+ try {
+ File certificateFile = (File) arguments.getParsedOptionValue(option);
+ if (certificateFile == null) return Optional.empty();
+ return Optional.of(certificateFile.toPath());
+ } catch (ParseException e) {
+ throw new CliArgumentsException(String.format("Invalid value for '%s': %s", option, e.getMessage()), e);
+ }
+ }
+
+ private boolean has(String option) { return arguments.hasOption(option); }
+
+ private static Options createOptions() {
+ return new Options()
+ .addOption(Option.builder()
+ .longOpt(HELP_OPTION)
+ .build())
+ .addOption(Option.builder()
+ .longOpt(VERSION_OPTION)
+ .build())
+ .addOption(Option.builder()
+ .longOpt(ENDPOINT_OPTION)
+ .hasArg()
+ .type(URL.class)
+ .build())
+ .addOption(Option.builder()
+ .longOpt(FILE_OPTION)
+ .type(File.class)
+ .hasArg()
+ .build())
+ .addOption(Option.builder()
+ .longOpt(CONNECTIONS_OPTION)
+ .hasArg()
+ .type(Number.class)
+ .build())
+ .addOption(Option.builder()
+ .longOpt(MAX_STREAMS_PER_CONNECTION)
+ .hasArg()
+ .type(Number.class)
+ .build())
+ .addOption(Option.builder()
+ .longOpt(CONNECTIONS_OPTION)
+ .hasArg()
+ .type(Number.class)
+ .build())
+ .addOption(Option.builder()
+ .longOpt(CERTIFICATE_OPTION)
+ .type(File.class)
+ .hasArg()
+ .build())
+ .addOption(Option.builder()
+ .longOpt(PRIVATE_KEY_OPTION)
+ .type(File.class)
+ .hasArg()
+ .build())
+ .addOption(Option.builder()
+ .longOpt(CA_CERTIFICATES_OPTION)
+ .type(File.class)
+ .hasArg()
+ .build())
+ .addOption(Option.builder()
+ .longOpt(DISABLE_SSL_HOSTNAME_VERIFICATION_OPTION)
+ .build());
+ }
+
+ void printHelp(OutputStream out) {
+ HelpFormatter formatter = new HelpFormatter();
+ PrintWriter writer = new PrintWriter(out);
+ formatter.printHelp(
+ writer,
+ formatter.getWidth(),
+ "vespa-feed-client <options>",
+ "Vespa feed client",
+ optionsDefinition,
+ formatter.getLeftPadding(),
+ formatter.getDescPadding(),
+ "");
+ writer.flush();
+ }
+
+ static class CliArgumentsException extends Exception {
+ CliArgumentsException(String message, Throwable cause) { super(message, cause); }
+ CliArgumentsException(Throwable cause) { super(cause.getMessage(), cause); }
+ CliArgumentsException(String message) { super(message); }
+ }
+
+ static class CertificateAndKey {
+ final Path certificateFile;
+ final Path privateKeyFile;
+
+ CertificateAndKey(Path certificateFile, Path privateKeyFile) {
+ this.certificateFile = certificateFile;
+ this.privateKeyFile = privateKeyFile;
+ }
+ }
+
+}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/CliClient.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/CliClient.java
new file mode 100644
index 00000000000..83a0c650318
--- /dev/null
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/CliClient.java
@@ -0,0 +1,92 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client;
+
+import javax.net.ssl.HostnameVerifier;
+import javax.net.ssl.SSLSession;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.nio.file.Path;
+import java.util.Properties;
+
+/**
+ * Main method for CLI interface
+ *
+ * @author bjorncs
+ */
+class CliClient {
+
+ private final PrintStream systemOut;
+ private final PrintStream systemError;
+ private final Properties systemProperties;
+
+ CliClient(PrintStream systemOut, PrintStream systemError, Properties systemProperties) {
+ this.systemOut = systemOut;
+ this.systemError = systemError;
+ this.systemProperties = systemProperties;
+ }
+
+ public static void main(String[] args) {
+ CliClient client = new CliClient(System.out, System.err, System.getProperties());
+ int exitCode = client.run(args);
+ System.exit(exitCode);
+ }
+
+ int run(String[] rawArgs) {
+ try {
+ CliArguments cliArgs = CliArguments.fromRawArgs(rawArgs);
+ if (cliArgs.helpSpecified()) {
+ cliArgs.printHelp(systemOut);
+ return 0;
+ }
+ if (cliArgs.versionSpecified()) {
+ systemOut.println(Vespa.VERSION);
+ return 0;
+ }
+ FeedClient feedClient = createFeedClient(cliArgs);
+ return 0;
+ } catch (CliArguments.CliArgumentsException | IOException e) {
+ return handleException(e);
+ }
+ }
+
+ private static FeedClient createFeedClient(CliArguments cliArgs) throws CliArguments.CliArgumentsException, IOException {
+ FeedClientBuilder builder = FeedClientBuilder.create(cliArgs.endpoint());
+ cliArgs.connections().ifPresent(builder::setMaxConnections);
+ cliArgs.maxStreamsPerConnection().ifPresent(builder::setMaxConnections);
+ if (cliArgs.sslHostnameVerificationDisabled()) {
+ builder.setHostnameVerifier(AcceptAllHostnameVerifier.INSTANCE);
+ }
+ CliArguments.CertificateAndKey certificateAndKey = cliArgs.certificateAndKey().orElse(null);
+ Path caCertificates = cliArgs.caCertificates().orElse(null);
+ if (certificateAndKey != null || caCertificates != null) {
+ SslContextBuilder sslContextBuilder = new SslContextBuilder();
+ if (certificateAndKey != null) {
+ sslContextBuilder.withCertificateAndKey(certificateAndKey.certificateFile, certificateAndKey.privateKeyFile);
+ }
+ if (caCertificates != null) {
+ sslContextBuilder.withCaCertificates(caCertificates);
+ }
+ builder.setSslContext(sslContextBuilder.build());
+ }
+ return builder.build();
+ }
+
+ private int handleException(Exception e) { return handleException(e.getMessage(), e); }
+
+ private int handleException(String message, Exception exception) {
+ systemError.println(message);
+ if (debugMode()) {
+ exception.printStackTrace(systemError);
+ }
+ return 1;
+ }
+
+ private boolean debugMode() {
+ return Boolean.parseBoolean(systemProperties.getProperty("VESPA_DEBUG", Boolean.FALSE.toString()));
+ }
+
+ private static class AcceptAllHostnameVerifier implements HostnameVerifier {
+ static final AcceptAllHostnameVerifier INSTANCE = new AcceptAllHostnameVerifier();
+ @Override public boolean verify(String hostname, SSLSession session) { return true; }
+ }
+}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/DocumentId.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/DocumentId.java
new file mode 100644
index 00000000000..21513a5dac2
--- /dev/null
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/DocumentId.java
@@ -0,0 +1,96 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client;
+
+import java.util.Objects;
+import java.util.Optional;
+import java.util.OptionalLong;
+
+import static java.util.Objects.requireNonNull;
+
+/**
+ * @author jonmv
+ */
+public class DocumentId {
+
+ private final String documentType;
+ private final String namespace;
+ private final OptionalLong number;
+ private final Optional<String> group;
+ private final String userSpecific;
+
+ private DocumentId(String documentType, String namespace, OptionalLong number, Optional<String> group, String userSpecific) {
+ this.documentType = requireNonNull(documentType);
+ this.namespace = requireNonNull(namespace);
+ this.number = requireNonNull(number);
+ this.group = requireNonNull(group);
+ this.userSpecific = requireNonNull(userSpecific);
+ }
+
+ public static DocumentId of(String namespace, String documentType, String userSpecific) {
+ return new DocumentId(documentType, namespace, OptionalLong.empty(), Optional.empty(), userSpecific);
+ }
+
+ public static DocumentId of(String namespace, String documentType, long number, String userSpecific) {
+ return new DocumentId(documentType, namespace, OptionalLong.of(number), Optional.empty(), userSpecific);
+ }
+
+ public static DocumentId of(String namespace, String documentType, String group, String userSpecific) {
+ return new DocumentId(documentType, namespace, OptionalLong.empty(), Optional.of(group), userSpecific);
+ }
+
+ public static DocumentId of(String serialized) {
+ String[] parts = serialized.split(":");
+ while (parts.length >= 5 && parts[0].equals("id")) {
+ if (parts[3].startsWith("n="))
+ return DocumentId.of(parts[1], parts[2], Long.parseLong(parts[3]), parts[4]);
+ if (parts[3].startsWith("g="))
+ return DocumentId.of(parts[1], parts[2], parts[3], parts[4]);
+ else if (parts[3].isEmpty())
+ return DocumentId.of(parts[1], parts[2], parts[4]);
+ }
+ throw new IllegalArgumentException("Document ID must be on the form " +
+ "'id:<namespace>:<document-type>:[n=number|g=group]:<user-specific>', " +
+ "but was '" + serialized + "'");
+ }
+
+ public String documentType() {
+ return documentType;
+ }
+
+ public String namespace() {
+ return namespace;
+ }
+
+ public OptionalLong number() {
+ return number;
+ }
+
+ public Optional<String> group() {
+ return group;
+ }
+
+ public String userSpecific() {
+ return userSpecific;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ DocumentId that = (DocumentId) o;
+ return documentType.equals(that.documentType) && namespace.equals(that.namespace) && number.equals(that.number) && group.equals(that.group) && userSpecific.equals(that.userSpecific);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(documentType, namespace, number, group, userSpecific);
+ }
+
+ @Override
+ public String toString() {
+ return "id:" + namespace + ":" + documentType + ":" +
+ (number.isPresent() ? "n=" + number.getAsLong() : group.map("g="::concat).orElse("")) +
+ ":" + userSpecific;
+ }
+
+}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClient.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClient.java
new file mode 100644
index 00000000000..1b616a70da9
--- /dev/null
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClient.java
@@ -0,0 +1,33 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client;
+
+import java.io.Closeable;
+import java.util.concurrent.CompletableFuture;
+
+/**
+ * @author bjorncs
+ * @author jonmv
+ */
+public interface FeedClient extends Closeable {
+
+ CompletableFuture<Result> put(DocumentId documentId, String documentJson, OperationParameters params);
+ CompletableFuture<Result> update(DocumentId documentId, String updateJson, OperationParameters params);
+ CompletableFuture<Result> remove(DocumentId documentId, OperationParameters params);
+
+ interface RetryStrategy {
+
+ /** Whether to retry operations of the given type. */
+ default boolean retry(OperationType type) { return true; }
+
+ /** Number of retries per operation for non-backpressure problems. */
+ default int retries() { return 5; }
+
+ }
+
+ enum OperationType {
+ put,
+ update,
+ remove;
+ }
+
+}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClientBuilder.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClientBuilder.java
new file mode 100644
index 00000000000..95a49abcc25
--- /dev/null
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClientBuilder.java
@@ -0,0 +1,93 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client;
+
+import javax.net.ssl.HostnameVerifier;
+import javax.net.ssl.SSLContext;
+import java.net.URI;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.function.Supplier;
+
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Builder for creating a {@link FeedClient} instance.
+ *
+ * @author bjorncs
+ * @author jonmv
+ */
+public class FeedClientBuilder {
+
+ FeedClient.RetryStrategy defaultRetryStrategy = new FeedClient.RetryStrategy() { };
+
+ final URI endpoint;
+ final Map<String, Supplier<String>> requestHeaders = new HashMap<>();
+ SSLContext sslContext;
+ HostnameVerifier hostnameVerifier;
+ int maxConnections = 4;
+ int maxStreamsPerConnection = 1024;
+ FeedClient.RetryStrategy retryStrategy = defaultRetryStrategy;
+
+ public static FeedClientBuilder create(URI endpoint) { return new FeedClientBuilder(endpoint); }
+
+ private FeedClientBuilder(URI endpoint) {
+ requireNonNull(endpoint.getHost());
+ this.endpoint = endpoint;
+ }
+
+ /**
+ * Sets the maximum number of connections this client will use.
+ *
+ * A reasonable value here is a small multiple of the numbers of containers in the
+ * cluster to feed, so load can be balanced across these.
+ * In general, this value should be kept as low as possible, but poor connectivity
+ * between feeder and cluster may also warrant a higher number of connections.
+ */
+ public FeedClientBuilder setMaxConnections(int max) {
+ if (max < 1) throw new IllegalArgumentException("Max connections must be at least 1, but was " + max);
+ this.maxConnections = max;
+ return this;
+ }
+
+ /**
+ * Sets the maximum number of streams per HTTP/2 connection for this client.
+ *
+ * This determines the maximum number of concurrent, inflight requests for this client,
+ * which is {@code maxConnections * maxStreamsPerConnection}. Prefer more streams over
+ * more connections, when possible. The server's maximum is usually around 128-256.
+ */
+ public FeedClientBuilder setMaxStreamPerConnection(int max) {
+ if (max < 1) throw new IllegalArgumentException("Max streams per connection must be at least 1, but was " + max);
+ this.maxStreamsPerConnection = max;
+ return this;
+ }
+
+ public FeedClientBuilder setSslContext(SSLContext context) {
+ this.sslContext = requireNonNull(context);
+ return this;
+ }
+
+ public FeedClientBuilder setHostnameVerifier(HostnameVerifier verifier) {
+ this.hostnameVerifier = requireNonNull(verifier);
+ return this;
+ }
+
+ public FeedClientBuilder addRequestHeader(String name, String value) {
+ return addRequestHeader(name, () -> requireNonNull(value));
+ }
+
+ public FeedClientBuilder addRequestHeader(String name, Supplier<String> valueSupplier) {
+ this.requestHeaders.put(requireNonNull(name), requireNonNull(valueSupplier));
+ return this;
+ }
+
+ public FeedClientBuilder setRetryStrategy(FeedClient.RetryStrategy strategy) {
+ this.retryStrategy = requireNonNull(strategy);
+ return this;
+ }
+
+ public FeedClient build() {
+ return new HttpFeedClient(this);
+ }
+
+}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedException.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedException.java
new file mode 100644
index 00000000000..eb31d1aa808
--- /dev/null
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedException.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client;
+
+/**
+ * @author bjorncs
+ */
+public class FeedException extends RuntimeException {
+}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpFeedClient.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpFeedClient.java
new file mode 100644
index 00000000000..fc1637fe17f
--- /dev/null
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpFeedClient.java
@@ -0,0 +1,197 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client;
+
+import org.apache.hc.client5.http.async.methods.SimpleHttpRequest;
+import org.apache.hc.client5.http.async.methods.SimpleHttpResponse;
+import org.apache.hc.client5.http.config.RequestConfig;
+import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient;
+import org.apache.hc.client5.http.impl.async.HttpAsyncClientBuilder;
+import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManagerBuilder;
+import org.apache.hc.client5.http.ssl.ClientTlsStrategyBuilder;
+import org.apache.hc.core5.concurrent.FutureCallback;
+import org.apache.hc.core5.http.ContentType;
+import org.apache.hc.core5.http.message.BasicHeader;
+import org.apache.hc.core5.http2.config.H2Config;
+import org.apache.hc.core5.net.URIBuilder;
+import org.apache.hc.core5.pool.PoolConcurrencyPolicy;
+import org.apache.hc.core5.reactor.IOReactorConfig;
+import org.apache.hc.core5.util.TimeValue;
+import org.apache.hc.core5.util.Timeout;
+
+import java.io.IOException;
+import java.io.UncheckedIOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.function.Supplier;
+
+import static java.util.Objects.requireNonNull;
+
+/**
+ * HTTP implementation of {@link FeedClient}
+ *
+ * @author bjorncs
+ * @author jonmv
+ */
+class HttpFeedClient implements FeedClient {
+
+ private final URI endpoint;
+ private final Map<String, Supplier<String>> requestHeaders;
+ private final HttpRequestStrategy requestStrategy;
+ private final CloseableHttpAsyncClient httpClient;
+ private final AtomicBoolean closed = new AtomicBoolean();
+
+ HttpFeedClient(FeedClientBuilder builder) {
+ this.endpoint = builder.endpoint;
+ this.requestHeaders = new HashMap<>(builder.requestHeaders);
+
+ this.requestStrategy = new HttpRequestStrategy(builder);
+ this.httpClient = createHttpClient(builder, requestStrategy);
+ this.httpClient.start();
+ }
+
+ private static CloseableHttpAsyncClient createHttpClient(FeedClientBuilder builder, HttpRequestStrategy retryStrategy) {
+ HttpAsyncClientBuilder httpClientBuilder = HttpAsyncClientBuilder.create()
+ .setUserAgent(String.format("vespa-feed-client/%s", Vespa.VERSION))
+ .setDefaultHeaders(Collections.singletonList(new BasicHeader("Vespa-Client-Version", Vespa.VERSION)))
+ .disableCookieManagement()
+ .disableRedirectHandling()
+ .disableConnectionState()
+ .setRetryStrategy(retryStrategy)
+ .setIOReactorConfig(IOReactorConfig.custom()
+ .setSoTimeout(Timeout.ofSeconds(10))
+ .build())
+ .setDefaultRequestConfig(
+ RequestConfig.custom()
+ .setConnectTimeout(Timeout.ofSeconds(10))
+ .setConnectionRequestTimeout(Timeout.DISABLED)
+ .setResponseTimeout(Timeout.ofMinutes(5))
+ .build())
+ .setH2Config(H2Config.custom()
+ .setMaxConcurrentStreams(builder.maxStreamsPerConnection)
+ .setCompressionEnabled(true)
+ .setPushEnabled(false)
+ .build());
+
+ int maxConnections = builder.maxConnections;
+ PoolingAsyncClientConnectionManagerBuilder connectionManagerBuilder = PoolingAsyncClientConnectionManagerBuilder.create()
+ .setConnectionTimeToLive(TimeValue.ofMinutes(10))
+ .setMaxConnTotal(maxConnections)
+ .setMaxConnPerRoute(maxConnections)
+ .setPoolConcurrencyPolicy(PoolConcurrencyPolicy.LAX);
+ if (builder.sslContext != null) {
+ ClientTlsStrategyBuilder tlsStrategyBuilder = ClientTlsStrategyBuilder.create()
+ .setSslContext(builder.sslContext);
+ if (builder.hostnameVerifier != null) {
+ tlsStrategyBuilder.setHostnameVerifier(builder.hostnameVerifier);
+ }
+ connectionManagerBuilder.setTlsStrategy(tlsStrategyBuilder.build());
+ }
+ httpClientBuilder.setConnectionManager(connectionManagerBuilder.build());
+ return httpClientBuilder.build();
+ }
+
+ @Override
+ public CompletableFuture<Result> put(DocumentId documentId, String documentJson, OperationParameters params) {
+ return send("POST", documentId, requireNonNull(documentJson), params);
+ }
+
+ @Override
+ public CompletableFuture<Result> update(DocumentId documentId, String updateJson, OperationParameters params) {
+ return send("PUT", documentId, requireNonNull(updateJson), params);
+ }
+
+ @Override
+ public CompletableFuture<Result> remove(DocumentId documentId, OperationParameters params) {
+ return send("DELETE", documentId, null, params);
+ }
+
+ @Override
+ public void close() throws IOException {
+ if ( ! closed.getAndSet(true))
+ httpClient.close();
+ }
+
+ private CompletableFuture<Result> send(String method, DocumentId documentId, String operationJson, OperationParameters params) {
+ SimpleHttpRequest request = new SimpleHttpRequest(method, operationUrl(endpoint, documentId, params));
+ requestHeaders.forEach((name, value) -> request.setHeader(name, value.get()));
+ if (operationJson != null)
+ request.setBody(operationJson, ContentType.APPLICATION_JSON);
+
+ return requestStrategy.enqueue(documentId, future -> {
+ httpClient.execute(request,
+ new FutureCallback<SimpleHttpResponse>() {
+ @Override public void completed(SimpleHttpResponse response) { future.complete(response); }
+ @Override public void failed(Exception ex) { future.completeExceptionally(ex); }
+ @Override public void cancelled() { future.cancel(false); }
+ });
+ }).handle((response, thrown) -> {
+ if (thrown != null) {
+ if (requestStrategy.hasFailed()) {
+ try { close(); }
+ catch (IOException exception) { throw new UncheckedIOException(exception); }
+ }
+ return new Result(Result.Type.failure, documentId, thrown.getMessage(), null);
+ }
+ return toResult(response, documentId);
+ });
+ }
+
+ static Result toResult(SimpleHttpResponse response, DocumentId documentId) {
+ Result.Type type;
+ switch (response.getCode()) {
+ case 200: type = Result.Type.success; break;
+ case 412: type = Result.Type.conditionNotMet; break;
+ default: type = Result.Type.failure;
+ }
+ Map<String, String> responseJson = null; // TODO: parse JSON.
+ return new Result(type, documentId, response.getBodyText(), "trace");
+ }
+
+ static List<String> toPath(DocumentId documentId) {
+ List<String> path = new ArrayList<>();
+ path.add("document");
+ path.add("v1");
+ path.add(documentId.namespace());
+ path.add(documentId.documentType());
+ if (documentId.number().isPresent()) {
+ path.add("number");
+ path.add(Long.toUnsignedString(documentId.number().getAsLong()));
+ }
+ else if (documentId.group().isPresent()) {
+ path.add("group");
+ path.add(documentId.group().get());
+ }
+ else {
+ path.add("docid");
+ }
+ path.add(documentId.userSpecific());
+
+ return path;
+ }
+
+ static URI operationUrl(URI endpoint, DocumentId documentId, OperationParameters params) {
+ URIBuilder url = new URIBuilder(endpoint);
+ url.setPathSegments(toPath(documentId));
+
+ if (params.createIfNonExistent()) url.addParameter("create", "true");
+ params.testAndSetCondition().ifPresent(condition -> url.addParameter("condition", condition));
+ params.timeout().ifPresent(timeout -> url.addParameter("timeout", timeout.toMillis() + "ms"));
+ params.route().ifPresent(route -> url.addParameter("route", route));
+ params.tracelevel().ifPresent(tracelevel -> url.addParameter("tracelevel", Integer.toString(tracelevel)));
+
+ try {
+ return url.build();
+ }
+ catch (URISyntaxException e) {
+ throw new IllegalStateException(e);
+ }
+ }
+
+}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpRequestStrategy.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpRequestStrategy.java
new file mode 100644
index 00000000000..0512d6a64c9
--- /dev/null
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpRequestStrategy.java
@@ -0,0 +1,156 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client;
+
+import org.apache.hc.client5.http.HttpRequestRetryStrategy;
+import org.apache.hc.client5.http.async.methods.SimpleHttpResponse;
+import org.apache.hc.core5.http.HttpRequest;
+import org.apache.hc.core5.http.HttpResponse;
+import org.apache.hc.core5.http.protocol.HttpContext;
+import org.apache.hc.core5.util.TimeValue;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.concurrent.locks.Condition;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+import java.util.function.Consumer;
+
+/**
+ * Controls request execution and retries:
+ * <ul>
+ * <li>Retry all IO exceptions; however</li>
+ * <li>abort everything if more than 10% of requests result in an exception for some time.</li>
+ * <li>Whenever throttled, limit inflight to 99% of current; and</li>
+ * <li>on every successful response, increase inflight limit by 0.1.</li>
+ * </ul>
+ *
+ * @author jonmv
+ */
+class HttpRequestStrategy implements RequestStrategy<SimpleHttpResponse>, HttpRequestRetryStrategy {
+
+ private final Map<DocumentId, CompletableFuture<SimpleHttpResponse>> byId = new ConcurrentHashMap<>();
+ private final FeedClient.RetryStrategy wrapped;
+ private final long maxInflight;
+ private double targetInflight;
+ private long inflight;
+ private final AtomicReference<Double> errorRate;
+ private final double errorThreshold;
+ private final Lock lock;
+ private final Condition available;
+
+ HttpRequestStrategy(FeedClientBuilder builder) {
+ this.wrapped = builder.retryStrategy;
+ this.maxInflight = builder.maxConnections * (long) builder.maxStreamsPerConnection;
+ this.targetInflight = maxInflight;
+ this.inflight = 0;
+ this.errorRate = new AtomicReference<>(0.0);
+ this.errorThreshold = 0.1;
+ this.lock = new ReentrantLock(true);
+ this.available = lock.newCondition();
+ }
+
+ private double cycle() {
+ return targetInflight; // TODO: tune this--could start way too high if limit is set too high.
+ }
+
+ @Override
+ public boolean retryRequest(HttpRequest request, IOException exception, int execCount, HttpContext context) {
+ if (errorRate.updateAndGet(rate -> rate + (1 - rate) / cycle()) > errorThreshold)
+ return false;
+
+ if (execCount > wrapped.retries())
+ return false;
+
+ switch (request.getMethod().toUpperCase()) {
+ case "POST": return wrapped.retry(FeedClient.OperationType.put);
+ case "PUT": return wrapped.retry(FeedClient.OperationType.update);
+ case "DELETE": return wrapped.retry(FeedClient.OperationType.remove);
+ default: throw new IllegalStateException("Unexpected HTTP method: " + request.getMethod());
+ }
+ }
+
+ /**
+ * Called when a response is successfully obtained.
+ */
+ void success() {
+ errorRate.updateAndGet(rate -> rate - rate / cycle());
+ lock.lock();
+ targetInflight = Math.min(targetInflight + 0.1, maxInflight);
+ lock.unlock();
+ }
+
+ @Override
+ public boolean retryRequest(HttpResponse response, int execCount, HttpContext context) {
+ if (response.getCode() == 429 || response.getCode() == 503) {
+ lock.lock();
+ targetInflight = Math.max(100, 99 * inflight / 100);
+ lock.unlock();
+ return true;
+ }
+ return false;
+ }
+
+ @Override
+ public TimeValue getRetryInterval(HttpResponse response, int execCount, HttpContext context) {
+ return TimeValue.ofMilliseconds(100);
+ }
+
+ void acquireSlot() {
+ lock.lock();
+ try {
+ while (inflight >= targetInflight)
+ available.awaitUninterruptibly();
+
+ ++inflight;
+ }
+ finally {
+ lock.unlock();
+ }
+ }
+
+ void releaseSlot() {
+ lock.lock();
+ try {
+ --inflight;
+
+ if (inflight < targetInflight)
+ available.signal();
+ }
+ finally {
+ lock.unlock();
+ }
+ }
+
+ @Override
+ public boolean hasFailed() {
+ return errorRate.get() > errorThreshold;
+ }
+
+ @Override
+ public CompletableFuture<SimpleHttpResponse> enqueue(DocumentId documentId, Consumer<CompletableFuture<SimpleHttpResponse>> dispatch) {
+ acquireSlot();
+
+ Consumer<CompletableFuture<SimpleHttpResponse>> safeDispatch = vessel -> {
+ try { dispatch.accept(vessel); }
+ catch (Throwable t) { vessel.completeExceptionally(t); }
+ };
+ CompletableFuture<SimpleHttpResponse> vessel = new CompletableFuture<>();
+ byId.compute(documentId, (id, previous) -> {
+ if (previous == null) safeDispatch.accept(vessel);
+ else previous.whenComplete((__, ___) -> safeDispatch.accept(vessel));
+ return vessel;
+ });
+
+ return vessel.whenComplete((__, thrown) -> {
+ releaseSlot();
+ if (thrown == null)
+ success();
+
+ byId.compute(documentId, (id, current) -> current == vessel ? null : current);
+ });
+ }
+
+}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/JsonStreamFeeder.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/JsonStreamFeeder.java
new file mode 100644
index 00000000000..7ece0d4ef73
--- /dev/null
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/JsonStreamFeeder.java
@@ -0,0 +1,100 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client;
+
+import com.fasterxml.jackson.core.JsonParser;
+
+import java.io.InputStream;
+import java.time.Duration;
+
+import static java.util.Objects.requireNonNull;
+
+/**
+ * @author jonmv
+ */
+public class JsonStreamFeeder {
+
+ private final FeedClient client;
+ private final OperationParameters protoParameters;
+
+ private JsonStreamFeeder(FeedClient client, OperationParameters protoParameters) {
+ this.client = client;
+ this.protoParameters = protoParameters;
+ }
+
+ public static Builder builder(FeedClient client) { return new Builder(client); }
+
+ /** Feeds a stream containing a JSON array of feed operations on the form
+ * <pre>
+ * [
+ * {
+ * "id": "id:ns:type::boo",
+ * "fields": { ... document fields ... }
+ * },
+ * {
+ * "put": "id:ns:type::foo",
+ * "fields": { ... document fields ... }
+ * },
+ * {
+ * "update": "id:ns:type:n=4:bar",
+ * "create": true,
+ * "fields": { ... partial update fields ... }
+ * },
+ * {
+ * "remove": "id:ns:type:g=foo:bar",
+ * "condition": "type.baz = \"bax\""
+ * },
+ * ...
+ * ]
+ * </pre>
+ * Note that {@code "id"} is an alias for the document put operation.
+ */
+ public void feed(InputStream jsonStream) {
+
+ }
+
+
+ static class Tokenizer {
+
+ private final InputStream in;
+ private final JsonParser json;
+
+ public Tokenizer(InputStream in, JsonParser json) {
+ this.in = in;
+ this.json = json;
+ }
+
+ }
+
+
+
+ public static class Builder {
+
+ final FeedClient client;
+ OperationParameters parameters;
+
+ private Builder(FeedClient client) {
+ this.client = requireNonNull(client);
+ }
+
+ public Builder withTimeout(Duration timeout) {
+ parameters = parameters.timeout(timeout);
+ return this;
+ }
+
+ public Builder withRoute(String route) {
+ parameters = parameters.route(route);
+ return this;
+ }
+
+ public Builder withTracelevel(int tracelevel) {
+ parameters = parameters.tracelevel(tracelevel);
+ return this;
+ }
+
+ public JsonStreamFeeder build() {
+ return new JsonStreamFeeder(client, parameters);
+ }
+
+ }
+
+}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationParameters.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationParameters.java
new file mode 100644
index 00000000000..78450caf204
--- /dev/null
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationParameters.java
@@ -0,0 +1,70 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client;
+
+import java.time.Duration;
+import java.util.Optional;
+import java.util.OptionalInt;
+
+/**
+ * @author bjorncs
+ * @author jonmv
+ */
+public class OperationParameters {
+
+ static final OperationParameters empty = new OperationParameters(false, null, null, null, 0);
+
+ private final boolean create;
+ private final String condition;
+ private final Duration timeout;
+ private final String route;
+ private final int tracelevel;
+
+ private OperationParameters(boolean create, String condition, Duration timeout, String route, int tracelevel) {
+ this.create = create;
+ this.condition = condition;
+ this.timeout = timeout;
+ this.route = route;
+ this.tracelevel = tracelevel;
+ }
+
+ public static OperationParameters empty() { return empty; }
+
+ public OperationParameters createIfNonExistent(boolean create) {
+ return new OperationParameters(create, condition, timeout, route, tracelevel);
+ }
+
+ public OperationParameters testAndSetCondition(String condition) {
+ if (condition.isEmpty())
+ throw new IllegalArgumentException("TestAndSetCondition must be non-empty");
+
+ return new OperationParameters(create, condition, timeout, route, tracelevel);
+ }
+
+ public OperationParameters timeout(Duration timeout) {
+ if (timeout.isNegative() || timeout.isZero())
+ throw new IllegalArgumentException("Timeout must be positive, but was " + timeout);
+
+ return new OperationParameters(create, condition, timeout, route, tracelevel);
+ }
+
+ public OperationParameters route(String route) {
+ if (route.isEmpty())
+ throw new IllegalArgumentException("Route must be non-empty");
+
+ return new OperationParameters(create, condition, timeout, route, tracelevel);
+ }
+
+ public OperationParameters tracelevel(int tracelevel) {
+ if (tracelevel < 1 || tracelevel > 9)
+ throw new IllegalArgumentException("Tracelevel must be in [1, 9]");
+
+ return new OperationParameters(create, condition, timeout, route, tracelevel);
+ }
+
+ public boolean createIfNonExistent() { return create; }
+ public Optional<String> testAndSetCondition() { return Optional.ofNullable(condition); }
+ public Optional<Duration> timeout() { return Optional.ofNullable(timeout); }
+ public Optional<String> route() { return Optional.ofNullable(route); }
+ public OptionalInt tracelevel() { return tracelevel == 0 ? OptionalInt.empty() : OptionalInt.of(tracelevel); }
+
+}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/RequestStrategy.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/RequestStrategy.java
new file mode 100644
index 00000000000..e5eb956114e
--- /dev/null
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/RequestStrategy.java
@@ -0,0 +1,20 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client;
+
+import java.util.concurrent.CompletableFuture;
+import java.util.function.Consumer;
+
+/**
+ * Controls execution of feed operations.
+ *
+ * @author jonmv
+ */
+public interface RequestStrategy<T> {
+
+ /** Whether this has failed, and we should stop. */
+ boolean hasFailed();
+
+ /** Enqueue the given operation, which is dispatched to a vessel future when ready. */
+ CompletableFuture<T> enqueue(DocumentId documentId, Consumer<CompletableFuture<T>> dispatch);
+
+}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/Result.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/Result.java
new file mode 100644
index 00000000000..31a6cf6e893
--- /dev/null
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/Result.java
@@ -0,0 +1,35 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client;
+
+import java.util.Optional;
+
+/**
+ * @author bjorncs
+ * @author jonmv
+ */
+public class Result {
+
+ private final Type type;
+ private final DocumentId documentId;
+ private final String resultMessage;
+ private final String traceMessage;
+
+ Result(Type type, DocumentId documentId, String resultMessage, String traceMessage) {
+ this.type = type;
+ this.documentId = documentId;
+ this.resultMessage = resultMessage;
+ this.traceMessage = traceMessage;
+ }
+
+ public enum Type {
+ success,
+ conditionNotMet,
+ failure
+ }
+
+ public Type type() { return type; }
+ public DocumentId documentId() { return documentId; }
+ public Optional<String> resultMessage() { return Optional.ofNullable(resultMessage); }
+ public Optional<String> traceMessage() { return Optional.ofNullable(traceMessage); }
+
+}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/SslContextBuilder.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/SslContextBuilder.java
new file mode 100644
index 00000000000..1a5f27c5d66
--- /dev/null
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/SslContextBuilder.java
@@ -0,0 +1,126 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client;
+
+import org.bouncycastle.asn1.ASN1ObjectIdentifier;
+import org.bouncycastle.asn1.pkcs.PKCSObjectIdentifiers;
+import org.bouncycastle.asn1.pkcs.PrivateKeyInfo;
+import org.bouncycastle.asn1.x9.X9ObjectIdentifiers;
+import org.bouncycastle.cert.X509CertificateHolder;
+import org.bouncycastle.cert.jcajce.JcaX509CertificateConverter;
+import org.bouncycastle.jce.provider.BouncyCastleProvider;
+import org.bouncycastle.openssl.PEMKeyPair;
+import org.bouncycastle.openssl.PEMParser;
+
+import javax.net.ssl.KeyManagerFactory;
+import javax.net.ssl.SSLContext;
+import javax.net.ssl.TrustManagerFactory;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.security.GeneralSecurityException;
+import java.security.KeyFactory;
+import java.security.KeyStore;
+import java.security.PrivateKey;
+import java.security.cert.Certificate;
+import java.security.cert.X509Certificate;
+import java.security.spec.PKCS8EncodedKeySpec;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * BouncyCastle integration for creating a {@link SSLContext} instance from PEM encoded material
+ *
+ * @author bjorncs
+ */
+class SslContextBuilder {
+
+ private static final BouncyCastleProvider bcProvider = new BouncyCastleProvider();
+
+ private Path certificateFile;
+ private Path privateKeyFile;
+ private Path caCertificatesFile;
+
+ SslContextBuilder withCertificateAndKey(Path certificate, Path privateKey) {
+ this.certificateFile = certificate;
+ this.privateKeyFile = privateKey;
+ return this;
+ }
+
+ SslContextBuilder withCaCertificates(Path caCertificates) {
+ this.caCertificatesFile = caCertificates;
+ return this;
+ }
+
+ SSLContext build() throws IOException {
+ try {
+ KeyStore keystore = KeyStore.getInstance("PKCS12");
+ if (certificateFile != null && privateKeyFile != null) {
+ keystore.setKeyEntry("cert", privateKey(privateKeyFile), new char[0], certificates(certificateFile));
+ }
+ if (caCertificatesFile != null) {
+ keystore.setCertificateEntry("ca-cert", certificates(caCertificatesFile)[0]);
+ }
+ KeyManagerFactory kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm());
+ kmf.init(keystore, new char[0]);
+ TrustManagerFactory tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm());
+ tmf.init(keystore);
+ SSLContext sslContext = SSLContext.getDefault();
+ sslContext.init(kmf.getKeyManagers(), tmf.getTrustManagers(), null);
+ return sslContext;
+ } catch (GeneralSecurityException e) {
+ throw new IOException(e);
+ }
+ }
+
+ private static Certificate[] certificates(Path file) throws IOException, GeneralSecurityException {
+ try (PEMParser parser = new PEMParser(Files.newBufferedReader(file))) {
+ List<X509Certificate> result = new ArrayList<>();
+ Object pemObject;
+ while ((pemObject = parser.readObject()) != null) {
+ result.add(toX509Certificate(pemObject));
+ }
+ if (result.isEmpty()) throw new IOException("File contains no PEM encoded certificates: " + file);
+ return result.toArray(new Certificate[0]);
+ }
+ }
+
+ private static PrivateKey privateKey(Path file) throws IOException, GeneralSecurityException {
+ try (PEMParser parser = new PEMParser(Files.newBufferedReader(file))) {
+ Object pemObject;
+ while ((pemObject = parser.readObject()) != null) {
+ if (pemObject instanceof PrivateKeyInfo) {
+ PrivateKeyInfo keyInfo = (PrivateKeyInfo) pemObject;
+ PKCS8EncodedKeySpec keySpec = new PKCS8EncodedKeySpec(keyInfo.getEncoded());
+ return createKeyFactory(keyInfo).generatePrivate(keySpec);
+ } else if (pemObject instanceof PEMKeyPair) {
+ PEMKeyPair pemKeypair = (PEMKeyPair) pemObject;
+ PrivateKeyInfo keyInfo = pemKeypair.getPrivateKeyInfo();
+ return createKeyFactory(keyInfo).generatePrivate(new PKCS8EncodedKeySpec(keyInfo.getEncoded()));
+ }
+ }
+ throw new IOException("Could not find private key in PEM file");
+ }
+ }
+
+ private static X509Certificate toX509Certificate(Object pemObject) throws IOException, GeneralSecurityException {
+ if (pemObject instanceof X509Certificate) return (X509Certificate) pemObject;
+ if (pemObject instanceof X509CertificateHolder) {
+ return new JcaX509CertificateConverter()
+ .setProvider(bcProvider)
+ .getCertificate((X509CertificateHolder) pemObject);
+ }
+ throw new IOException("Invalid type of PEM object: " + pemObject);
+ }
+
+ private static KeyFactory createKeyFactory(PrivateKeyInfo info) throws IOException, GeneralSecurityException {
+ ASN1ObjectIdentifier algorithm = info.getPrivateKeyAlgorithm().getAlgorithm();
+ if (X9ObjectIdentifiers.id_ecPublicKey.equals(algorithm)) {
+ return KeyFactory.getInstance("EC", bcProvider);
+ } else if (PKCSObjectIdentifiers.rsaEncryption.equals(algorithm)) {
+ return KeyFactory.getInstance("RSA", bcProvider);
+ } else {
+ throw new IOException("Unknown key algorithm: " + algorithm);
+ }
+ }
+
+}
diff --git a/vespa-feed-client/src/main/sh/vespa-version-generator.sh b/vespa-feed-client/src/main/sh/vespa-version-generator.sh
new file mode 100755
index 00000000000..7203212297a
--- /dev/null
+++ b/vespa-feed-client/src/main/sh/vespa-version-generator.sh
@@ -0,0 +1,24 @@
+#!/usr/bin/env bash
+# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+# Extracts the current version number (V_TAG_COMPONENT) from vtag.map and outputs this into a Java class.
+# This replaces vespajlib/../VersionTagger.java as this module cannot depend on that, nor the dependencies
+# of the resulting class.
+#
+# Author: bjorncs
+
+source=$1
+destination=$2
+destinationDir=$(dirname $destination)
+
+mkdir -p $destinationDir
+
+versionNumber=$(cat $source | grep V_TAG_COMPONENT | awk '{print $2}' )
+
+cat > $destination <<- END
+package ai.vespa.feed.client;
+
+class Vespa {
+ static final String VERSION = "$versionNumber";
+}
+END
diff --git a/vespa-feed-client/src/test/java/.gitignore b/vespa-feed-client/src/test/java/.gitignore
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/vespa-feed-client/src/test/java/.gitignore
diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/CliArgumentsTest.java b/vespa-feed-client/src/test/java/ai/vespa/feed/client/CliArgumentsTest.java
new file mode 100644
index 00000000000..b8dda66bf96
--- /dev/null
+++ b/vespa-feed-client/src/test/java/ai/vespa/feed/client/CliArgumentsTest.java
@@ -0,0 +1,56 @@
+package ai.vespa.feed.client;// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+import org.junit.jupiter.api.Test;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.net.URI;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+
+import static org.junit.jupiter.api.Assertions.*;
+
+/**
+ * @author bjorncs
+ */
+class CliArgumentsTest {
+
+ @Test
+ void parses_parameters_correctly() throws CliArguments.CliArgumentsException {
+ CliArguments args = CliArguments.fromRawArgs(new String[]{
+ "--endpoint=https://vespa.ai:4443/", "--file=feed.json", "--connections=10",
+ "--max-streams-per-connection=128", "--certificate=cert.pem", "--private-key=key.pem",
+ "--ca-certificates=ca-certs.pem", "--disable-ssl-hostname-verification"});
+ assertEquals(URI.create("https://vespa.ai:4443/"), args.endpoint());
+ assertEquals(Paths.get("feed.json"), args.inputFile());
+ assertEquals(10, args.connections().getAsInt());
+ assertEquals(128, args.maxStreamsPerConnection().getAsInt());
+ assertEquals(Paths.get("cert.pem"), args.certificateAndKey().get().certificateFile);
+ assertEquals(Paths.get("key.pem"), args.certificateAndKey().get().privateKeyFile);
+ assertEquals(Paths.get("ca-certs.pem"), args.caCertificates().get());
+ assertTrue(args.sslHostnameVerificationDisabled());
+ assertFalse(args.helpSpecified());
+ }
+
+ @Test
+ void fails_on_missing_parameters() throws CliArguments.CliArgumentsException {
+ CliArguments cliArguments = CliArguments.fromRawArgs(new String[0]);
+ CliArguments.CliArgumentsException exception = assertThrows(CliArguments.CliArgumentsException.class, cliArguments::endpoint);
+ assertEquals("Endpoint must be specified", exception.getMessage());
+ exception = assertThrows(CliArguments.CliArgumentsException.class, cliArguments::inputFile);
+ assertEquals("Feed file must be specified", exception.getMessage());
+ }
+
+ @Test
+ void generated_help_page_contains_expected_description() throws CliArguments.CliArgumentsException, IOException {
+ CliArguments args = CliArguments.fromRawArgs(new String[]{"--help"});
+ assertTrue(args.helpSpecified());
+
+ ByteArrayOutputStream out = new ByteArrayOutputStream();
+ args.printHelp(out);
+ String text = out.toString();
+ String expectedHelp = new String(Files.readAllBytes(Paths.get("src", "test", "resources", "help.txt")));
+ assertEquals(expectedHelp, text);
+ }
+
+} \ No newline at end of file
diff --git a/vespa-feed-client/src/test/resources/help.txt b/vespa-feed-client/src/test/resources/help.txt
new file mode 100644
index 00000000000..d0017003a11
--- /dev/null
+++ b/vespa-feed-client/src/test/resources/help.txt
@@ -0,0 +1,12 @@
+usage: vespa-feed-client <options>
+Vespa feed client
+ --ca-certificates <arg>
+ --certificate <arg>
+ --connections <arg>
+ --disable-ssl-hostname-verification
+ --endpoint <arg>
+ --file <arg>
+ --help
+ --max-streams-per-connection <arg>
+ --private-key <arg>
+ --version
diff --git a/vespa_jersey2/pom.xml b/vespa_jersey2/pom.xml
index 61777d745f1..c39b92cd371 100644
--- a/vespa_jersey2/pom.xml
+++ b/vespa_jersey2/pom.xml
@@ -30,6 +30,18 @@
<dependency>
<groupId>org.glassfish.jersey.media</groupId>
<artifactId>jersey-media-json-jackson</artifactId>
+ <exclusions>
+ <exclusion>
+ <!-- Conflicts with javax.activation:javax.activation-api:1.2.0, which is "exported" via jdisc_core. -->
+ <groupId>jakarta.activation</groupId>
+ <artifactId>jakarta.activation-api</artifactId>
+ </exclusion>
+ <exclusion>
+ <!-- Conflicts with javax.xml.bind:jaxb-api:2.3, which is "exported" via jdisc_core.-->
+ <groupId>jakarta.xml.bind</groupId>
+ <artifactId>jakarta.xml.bind-api</artifactId>
+ </exclusion>
+ </exclusions>
</dependency>
<dependency>
<groupId>org.glassfish.jersey.media</groupId>
diff --git a/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/DocumentV1ApiHandler.java b/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/DocumentV1ApiHandler.java
index 520e95d2792..8cb927b8989 100644
--- a/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/DocumentV1ApiHandler.java
+++ b/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/DocumentV1ApiHandler.java
@@ -126,6 +126,7 @@ public class DocumentV1ApiHandler extends AbstractRequestHandler {
private static final Logger log = Logger.getLogger(DocumentV1ApiHandler.class.getName());
private static final Parser<Integer> integerParser = Integer::parseInt;
+ private static final Parser<Long> unsignedLongParser = Long::parseUnsignedLong;
private static final Parser<Long> timeoutMillisParser = value -> ParameterParser.asMilliSeconds(value, defaultTimeout.toMillis());
private static final Parser<Boolean> booleanParser = Boolean::parseBoolean;
@@ -478,6 +479,9 @@ public class DocumentV1ApiHandler extends AbstractRequestHandler {
private DocumentOperationParameters parametersFromRequest(HttpRequest request, String... names) {
DocumentOperationParameters parameters = getProperty(request, TRACELEVEL, integerParser).map(parameters()::withTraceLevel)
.orElse(parameters());
+ parameters = getProperty(request, TIMEOUT, timeoutMillisParser).map(clock.instant()::plusMillis)
+ .map(parameters::withDeadline)
+ .orElse(parameters);
for (String name : names) switch (name) {
case CLUSTER:
parameters = getProperty(request, CLUSTER).map(cluster -> resolveCluster(Optional.of(cluster), clusters).name())
@@ -903,6 +907,9 @@ public class DocumentV1ApiHandler extends AbstractRequestHandler {
case INSUFFICIENT_STORAGE:
jsonResponse.commit(Response.Status.INSUFFICIENT_STORAGE);
break;
+ case TIMEOUT:
+ jsonResponse.commit(Response.Status.GATEWAY_TIMEOUT);
+ break;
default:
log.log(WARNING, "Unexpected document API operation outcome '" + response.outcome() + "'");
case ERROR:
@@ -1021,6 +1028,7 @@ public class DocumentV1ApiHandler extends AbstractRequestHandler {
break; // This is all OK — the latter two are due to mitigating races.
case ERROR:
case INSUFFICIENT_STORAGE:
+ case TIMEOUT:
onError.accept(operationResponse.getTextMessage());
break;
default:
@@ -1269,7 +1277,7 @@ public class DocumentV1ApiHandler extends AbstractRequestHandler {
DocumentPath(Path path) {
this.path = requireNonNull(path);
- this.group = Optional.ofNullable(path.get("number")).map(integerParser::parse).map(Group::of)
+ this.group = Optional.ofNullable(path.get("number")).map(unsignedLongParser::parse).map(Group::of)
.or(() -> Optional.ofNullable(path.get("group")).map(Group::of));
}
@@ -1289,22 +1297,26 @@ public class DocumentV1ApiHandler extends AbstractRequestHandler {
static class Group {
- private final String value;
private final String docIdPart;
private final String selection;
- private Group(String value, String docIdPart, String selection) {
- Text.validateTextString(value)
- .ifPresent(codePoint -> { throw new IllegalArgumentException(String.format("Illegal code point U%04X in group", codePoint)); });
- this.value = value;
+ private Group(String docIdPart, String selection) {
this.docIdPart = docIdPart;
this.selection = selection;
}
- public static Group of(long value) { return new Group(Long.toString(value), "n=" + value, "id.user==" + value); }
- public static Group of(String value) { return new Group(value, "g=" + value, "id.group=='" + value.replaceAll("'", "\\\\'") + "'"); }
+ public static Group of(long value) {
+ String stringValue = Long.toUnsignedString(value);
+ return new Group("n=" + stringValue, "id.user==" + stringValue);
+ }
+
+ public static Group of(String value) {
+ Text.validateTextString(value)
+ .ifPresent(codePoint -> { throw new IllegalArgumentException(String.format("Illegal code point U%04X in group", codePoint)); });
+
+ return new Group("g=" + value, "id.group=='" + value.replaceAll("'", "\\\\'") + "'");
+ }
- public String value() { return value; }
public String docIdPart() { return docIdPart; }
public String selection() { return selection; }
@@ -1313,21 +1325,19 @@ public class DocumentV1ApiHandler extends AbstractRequestHandler {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Group group = (Group) o;
- return value.equals(group.value) &&
- docIdPart.equals(group.docIdPart) &&
+ return docIdPart.equals(group.docIdPart) &&
selection.equals(group.selection);
}
@Override
public int hashCode() {
- return Objects.hash(value, docIdPart, selection);
+ return Objects.hash(docIdPart, selection);
}
@Override
public String toString() {
return "Group{" +
- "value='" + value + '\'' +
- ", docIdPart='" + docIdPart + '\'' +
+ "docIdPart='" + docIdPart + '\'' +
", selection='" + selection + '\'' +
'}';
}
diff --git a/vespaclient-container-plugin/src/main/resources/configdefinitions/document-operation-executor.def b/vespaclient-container-plugin/src/main/resources/configdefinitions/document-operation-executor.def
index 686b33d8cd5..6850b5213a1 100644
--- a/vespaclient-container-plugin/src/main/resources/configdefinitions/document-operation-executor.def
+++ b/vespaclient-container-plugin/src/main/resources/configdefinitions/document-operation-executor.def
@@ -4,12 +4,6 @@ package=com.yahoo.document.restapi
# Duration for which resender thread sleeps after an operation is throttled
resendDelayMillis int default=10
-# Bound on number of document operations to keep in retry queue — further operations are rejected
-maxThrottled int default=1000
-
-# Whether to perform dispatch from Jetty threads
-doDispatchWithEnqueue bool default=true
-
-# Number of workers in the resender thread pool
-resenderCount int default=1
+# Bound on number of document operations to keep in retry queue — further operations are rejected
+maxThrottled int default=4096
diff --git a/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/DocumentV1ApiTest.java b/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/DocumentV1ApiTest.java
index c4777b26072..6f1b0466350 100644
--- a/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/DocumentV1ApiTest.java
+++ b/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/DocumentV1ApiTest.java
@@ -577,6 +577,20 @@ public class DocumentV1ApiTest {
"}", response.readAll());
assertEquals(400, response.getStatus());
+ // TIMEOUT is a 504
+ access.session.expect((id, parameters) -> {
+ assertEquals(clock.instant().plusSeconds(1000), parameters.deadline().get());
+ parameters.responseHandler().get().handleResponse(new Response(0, "timeout", Response.Outcome.TIMEOUT));
+ return new Result(Result.ResultType.SUCCESS, null);
+ });
+ response = driver.sendRequest("http://localhost/document/v1/space/music/number/1/two?timeout=1ks");
+ assertSameJson("{" +
+ " \"pathId\": \"/document/v1/space/music/number/1/two\"," +
+ " \"id\": \"id:space:music:n=1:two\"," +
+ " \"message\": \"timeout\"" +
+ "}", response.readAll());
+ assertEquals(504, response.getStatus());
+
// INSUFFICIENT_STORAGE is a 507
access.session.expect((id, parameters) -> {
parameters.responseHandler().get().handleResponse(new Response(0, "disk full", Response.Outcome.INSUFFICIENT_STORAGE));
diff --git a/vespajlib/abi-spec.json b/vespajlib/abi-spec.json
index ebca0a4d852..4ddf8b83cdc 100644
--- a/vespajlib/abi-spec.json
+++ b/vespajlib/abi-spec.json
@@ -891,11 +891,13 @@
"public varargs double get(long[])",
"public varargs float getFloat(long[])",
"public double get(com.yahoo.tensor.TensorAddress)",
+ "public boolean has(com.yahoo.tensor.TensorAddress)",
"public abstract double get(long)",
"public abstract float getFloat(long)",
"public com.yahoo.tensor.TensorType type()",
"public abstract com.yahoo.tensor.IndexedTensor withType(com.yahoo.tensor.TensorType)",
"public com.yahoo.tensor.DimensionSizes dimensionSizes()",
+ "public long[] shape()",
"public java.util.Map cells()",
"public com.yahoo.tensor.Tensor remove(java.util.Set)",
"public java.lang.String toString()",
@@ -941,6 +943,7 @@
"public com.yahoo.tensor.TensorType type()",
"public long size()",
"public double get(com.yahoo.tensor.TensorAddress)",
+ "public boolean has(com.yahoo.tensor.TensorAddress)",
"public java.util.Iterator cellIterator()",
"public java.util.Iterator valueIterator()",
"public java.util.Map cells()",
@@ -1031,6 +1034,7 @@
"public com.yahoo.tensor.TensorType type()",
"public long size()",
"public double get(com.yahoo.tensor.TensorAddress)",
+ "public boolean has(com.yahoo.tensor.TensorAddress)",
"public java.util.Iterator cellIterator()",
"public java.util.Iterator valueIterator()",
"public java.util.Map cells()",
@@ -1151,6 +1155,7 @@
"public boolean isEmpty()",
"public abstract long size()",
"public abstract double get(com.yahoo.tensor.TensorAddress)",
+ "public abstract boolean has(com.yahoo.tensor.TensorAddress)",
"public abstract java.util.Iterator cellIterator()",
"public abstract java.util.Iterator valueIterator()",
"public abstract java.util.Map cells()",
diff --git a/vespajlib/src/main/java/com/yahoo/compress/Compressor.java b/vespajlib/src/main/java/com/yahoo/compress/Compressor.java
index 3220b81a3a9..ad51ac1ca38 100644
--- a/vespajlib/src/main/java/com/yahoo/compress/Compressor.java
+++ b/vespajlib/src/main/java/com/yahoo/compress/Compressor.java
@@ -6,6 +6,7 @@ import net.jpountz.lz4.LZ4Factory;
import net.jpountz.lz4.LZ4FastDecompressor;
import net.jpountz.lz4.LZ4SafeDecompressor;
+import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.Optional;
import java.util.Random;
@@ -159,7 +160,16 @@ public class Compressor {
}
public byte[] compressUnconditionally(byte[] input) {
- return getCompressor().compress(input);
+ return getCompressor().compress(input, 0, input.length);
+ }
+ public byte[] compressUnconditionally(ByteBuffer input) {
+ return getCompressor().compress(input.array(), input.arrayOffset()+input.position(), input.remaining());
+ }
+
+ public void decompressUnconditionally(ByteBuffer input, ByteBuffer output) {
+ if (input.remaining() > 0) {
+ factory.fastDecompressor().decompress(input, output);
+ }
}
public byte [] decompressUnconditionally(byte[] input, int srcOffset, int uncompressedLen) {
diff --git a/vespajlib/src/main/java/com/yahoo/io/ByteWriter.java b/vespajlib/src/main/java/com/yahoo/io/ByteWriter.java
index de9c88a713d..a14e448f922 100644
--- a/vespajlib/src/main/java/com/yahoo/io/ByteWriter.java
+++ b/vespajlib/src/main/java/com/yahoo/io/ByteWriter.java
@@ -28,7 +28,7 @@ public class ByteWriter extends AbstractByteWriter {
@Override
public void send(final ByteBuffer b) throws IOException {
// we know from how BufferChain works we have a backing array
- stream.write(b.array(), b.position() + b.arrayOffset(), b.limit() - b.position());
+ stream.write(b.array(), b.position() + b.arrayOffset(), b.remaining());
}
@Override
diff --git a/vespajlib/src/main/java/com/yahoo/io/Utf8ByteWriter.java b/vespajlib/src/main/java/com/yahoo/io/Utf8ByteWriter.java
new file mode 100644
index 00000000000..949cd88e580
--- /dev/null
+++ b/vespajlib/src/main/java/com/yahoo/io/Utf8ByteWriter.java
@@ -0,0 +1,47 @@
+package com.yahoo.io;
+
+import com.yahoo.text.Utf8;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+public class Utf8ByteWriter extends AbstractByteWriter {
+ private ByteBuffer myBuf;
+ public Utf8ByteWriter(int initialBuffer) {
+ super(Utf8.getNewEncoder());
+ myBuf = ByteBuffer.allocate(initialBuffer);
+ }
+ @Override
+ public void send(ByteBuffer src) throws IOException {
+ if (myBuf.remaining() < src.remaining()) {
+ ByteBuffer newBuf = ByteBuffer.allocate(Integer.highestOneBit(myBuf.position()+src.remaining()) << 1);
+ myBuf.flip();
+ newBuf.put(myBuf);
+ myBuf = newBuf;
+ }
+ myBuf.put(src);
+ }
+
+ @Override
+ public void flush() throws IOException {
+ buffer.flush();
+ }
+
+ @Override
+ public void close() throws IOException {
+ buffer.flush();
+ myBuf.flip();
+ }
+
+ /**
+ * Return a buffer ready for read. Must only be called after writer has been closed.
+ * @return A flipped ByteBuffer
+ */
+ public ByteBuffer getBuf() {
+ if (myBuf.position() != 0) {
+ throw new IllegalStateException("Call close() befor getBuf(), pos=" + myBuf.position() + ", limit=" + myBuf.limit());
+ }
+ return myBuf;
+ }
+
+}
diff --git a/vespajlib/src/main/java/com/yahoo/slime/JsonDecoder.java b/vespajlib/src/main/java/com/yahoo/slime/JsonDecoder.java
index f677ae23a45..b7f11f53cd5 100644
--- a/vespajlib/src/main/java/com/yahoo/slime/JsonDecoder.java
+++ b/vespajlib/src/main/java/com/yahoo/slime/JsonDecoder.java
@@ -5,6 +5,7 @@ import com.yahoo.text.Text;
import com.yahoo.text.Utf8;
import java.io.ByteArrayOutputStream;
+import java.nio.ByteBuffer;
/**
* A port of the C++ json decoder intended to be fast.
@@ -34,7 +35,10 @@ public class JsonDecoder {
public JsonDecoder() {}
public Slime decode(Slime slime, byte[] bytes) {
- in = new BufferedInput(bytes);
+ return decode(slime, ByteBuffer.wrap(bytes));
+ }
+ public Slime decode(Slime slime, ByteBuffer buf) {
+ in = new BufferedInput(buf.array(), buf.arrayOffset()+buf.position(), buf.remaining());
next();
decodeValue(slimeInserter.adjust(slime));
if (in.failed()) {
diff --git a/vespajlib/src/main/java/com/yahoo/tensor/IndexedTensor.java b/vespajlib/src/main/java/com/yahoo/tensor/IndexedTensor.java
index dfc26cf0282..d822a5c6b8b 100644
--- a/vespajlib/src/main/java/com/yahoo/tensor/IndexedTensor.java
+++ b/vespajlib/src/main/java/com/yahoo/tensor/IndexedTensor.java
@@ -106,7 +106,7 @@ public abstract class IndexedTensor implements Tensor {
return getFloat((int)toValueIndex(indexes, dimensionSizes));
}
- /** Returns the value at this address, or NaN if there is no value at this address */
+ /** Returns the value at this address, or 0.0 if there is no value at this address */
@Override
public double get(TensorAddress address) {
// optimize for fast lookup within bounds:
@@ -114,7 +114,18 @@ public abstract class IndexedTensor implements Tensor {
return get((int)toValueIndex(address, dimensionSizes, type));
}
catch (IllegalArgumentException e) {
- return Double.NaN;
+ return 0.0;
+ }
+ }
+
+ @Override
+ public boolean has(TensorAddress address) {
+ try {
+ long index = toValueIndex(address, dimensionSizes, type);
+ if (index < 0) return false;
+ return (index < size());
+ } catch (IllegalArgumentException e) {
+ return false;
}
}
@@ -180,6 +191,14 @@ public abstract class IndexedTensor implements Tensor {
public DimensionSizes dimensionSizes() { return dimensionSizes; }
+ public long[] shape() {
+ long[] result = new long[dimensionSizes.dimensions()];
+ for (int i = 0; i < result.length; ++i) {
+ result[i] = dimensionSizes.size(i);
+ }
+ return result;
+ }
+
@Override
public Map<TensorAddress, Double> cells() {
if (dimensionSizes.dimensions() == 0)
diff --git a/vespajlib/src/main/java/com/yahoo/tensor/MappedTensor.java b/vespajlib/src/main/java/com/yahoo/tensor/MappedTensor.java
index 33f904efd42..996fad8a19d 100644
--- a/vespajlib/src/main/java/com/yahoo/tensor/MappedTensor.java
+++ b/vespajlib/src/main/java/com/yahoo/tensor/MappedTensor.java
@@ -32,7 +32,10 @@ public class MappedTensor implements Tensor {
public long size() { return cells.size(); }
@Override
- public double get(TensorAddress address) { return cells.getOrDefault(address, Double.NaN); }
+ public double get(TensorAddress address) { return cells.getOrDefault(address, 0.0); }
+
+ @Override
+ public boolean has(TensorAddress address) { return cells.containsKey(address); }
@Override
public Iterator<Cell> cellIterator() { return new CellIteratorAdaptor(cells.entrySet().iterator()); }
diff --git a/vespajlib/src/main/java/com/yahoo/tensor/MixedTensor.java b/vespajlib/src/main/java/com/yahoo/tensor/MixedTensor.java
index 7631a2e4eab..6307acb88b4 100644
--- a/vespajlib/src/main/java/com/yahoo/tensor/MixedTensor.java
+++ b/vespajlib/src/main/java/com/yahoo/tensor/MixedTensor.java
@@ -54,13 +54,24 @@ public class MixedTensor implements Tensor {
public double get(TensorAddress address) {
long cellIndex = index.indexOf(address);
if (cellIndex < 0 || cellIndex >= cells.size())
- return Double.NaN;
+ return 0.0;
Cell cell = cells.get((int)cellIndex);
if ( ! address.equals(cell.getKey()))
- return Double.NaN;
+ return 0.0;
return cell.getValue();
}
+ @Override
+ public boolean has(TensorAddress address) {
+ long cellIndex = index.indexOf(address);
+ if (cellIndex < 0 || cellIndex >= cells.size())
+ return false;
+ Cell cell = cells.get((int)cellIndex);
+ if ( ! address.equals(cell.getKey()))
+ return false;
+ return true;
+ }
+
/**
* Returns an iterator over the cells of this tensor.
* Cells are returned in order of increasing indexes in the
diff --git a/vespajlib/src/main/java/com/yahoo/tensor/Tensor.java b/vespajlib/src/main/java/com/yahoo/tensor/Tensor.java
index 3378520dc91..3133752bc49 100644
--- a/vespajlib/src/main/java/com/yahoo/tensor/Tensor.java
+++ b/vespajlib/src/main/java/com/yahoo/tensor/Tensor.java
@@ -67,9 +67,12 @@ public interface Tensor {
/** Returns the number of cells in this */
long size();
- /** Returns the value of a cell, or NaN if this cell does not exist/have no value */
+ /** Returns the value of a cell, or 0.0 if this cell does not exist */
double get(TensorAddress address);
+ /** Returns true if this cell exists */
+ boolean has(TensorAddress address);
+
/**
* Returns the cell of this in some undefined order.
* A cell instances is only valid until next() is called.
@@ -365,6 +368,8 @@ public interface Tensor {
}
static boolean approxEquals(double x, double y, double tolerance) {
+ if (x == y) return true;
+ if (Double.isNaN(x) && Double.isNaN(y)) return true;
return Math.abs(x-y) < tolerance;
}
diff --git a/vespajlib/src/main/java/com/yahoo/tensor/functions/Join.java b/vespajlib/src/main/java/com/yahoo/tensor/functions/Join.java
index d43b7889982..0cbcfbb7ad6 100644
--- a/vespajlib/src/main/java/com/yahoo/tensor/functions/Join.java
+++ b/vespajlib/src/main/java/com/yahoo/tensor/functions/Join.java
@@ -126,9 +126,10 @@ public class Join<NAMETYPE extends Name> extends PrimitiveTensorFunction<NAMETYP
Tensor.Builder builder = Tensor.Builder.of(joinedType);
for (Iterator<Tensor.Cell> i = a.cellIterator(); i.hasNext(); ) {
Map.Entry<TensorAddress, Double> aCell = i.next();
- double bCellValue = b.get(aCell.getKey());
- if (Double.isNaN(bCellValue)) continue; // no match
- builder.cell(aCell.getKey(), combinator.applyAsDouble(aCell.getValue(), bCellValue));
+ var key = aCell.getKey();
+ if (b.has(key)) {
+ builder.cell(key, combinator.applyAsDouble(aCell.getValue(), b.get(key)));
+ }
}
return builder.build();
}
@@ -203,11 +204,12 @@ public class Join<NAMETYPE extends Name> extends PrimitiveTensorFunction<NAMETYP
for (Iterator<Tensor.Cell> i = superspace.cellIterator(); i.hasNext(); ) {
Map.Entry<TensorAddress, Double> supercell = i.next();
TensorAddress subaddress = mapAddressToSubspace(supercell.getKey(), subspaceIndexes);
- double subspaceValue = subspace.get(subaddress);
- if ( ! Double.isNaN(subspaceValue))
+ if (subspace.has(subaddress)) {
+ double subspaceValue = subspace.get(subaddress);
builder.cell(supercell.getKey(),
reversedArgumentOrder ? combinator.applyAsDouble(supercell.getValue(), subspaceValue)
- : combinator.applyAsDouble(subspaceValue, supercell.getValue()));
+ : combinator.applyAsDouble(subspaceValue, supercell.getValue()));
+ }
}
return builder.build();
}
diff --git a/vespajlib/src/main/java/com/yahoo/tensor/functions/Merge.java b/vespajlib/src/main/java/com/yahoo/tensor/functions/Merge.java
index 4aa09f3f4e3..a2387affa67 100644
--- a/vespajlib/src/main/java/com/yahoo/tensor/functions/Merge.java
+++ b/vespajlib/src/main/java/com/yahoo/tensor/functions/Merge.java
@@ -125,11 +125,12 @@ public class Merge<NAMETYPE extends Name> extends PrimitiveTensorFunction<NAMETY
private static void addCellsOf(Tensor a, Tensor b, Tensor.Builder builder, DoubleBinaryOperator combinator) {
for (Iterator<Tensor.Cell> i = a.cellIterator(); i.hasNext(); ) {
Map.Entry<TensorAddress, Double> aCell = i.next();
- double bCellValue = b.get(aCell.getKey());
- if (Double.isNaN(bCellValue))
- builder.cell(aCell.getKey(), aCell.getValue());
- else if (combinator != null)
- builder.cell(aCell.getKey(), combinator.applyAsDouble(aCell.getValue(), bCellValue));
+ var key = aCell.getKey();
+ if (! b.has(key)) {
+ builder.cell(key, aCell.getValue());
+ } else if (combinator != null) {
+ builder.cell(key, combinator.applyAsDouble(aCell.getValue(), b.get(key)));
+ }
}
}
diff --git a/vespajlib/src/main/java/com/yahoo/tensor/functions/Slice.java b/vespajlib/src/main/java/com/yahoo/tensor/functions/Slice.java
index 607c9a0ab44..343cec7fe84 100644
--- a/vespajlib/src/main/java/com/yahoo/tensor/functions/Slice.java
+++ b/vespajlib/src/main/java/com/yahoo/tensor/functions/Slice.java
@@ -63,8 +63,9 @@ public class Slice<NAMETYPE extends Name> extends PrimitiveTensorFunction<NAMETY
TensorType resultType = resultType(tensor.type());
PartialAddress subspaceAddress = subspaceToAddress(tensor.type(), context);
- if (resultType.rank() == 0) // shortcut common case
+ if (resultType.rank() == 0) { // shortcut common case
return Tensor.from(tensor.get(subspaceAddress.asAddress(tensor.type())));
+ }
Tensor.Builder b = Tensor.Builder.of(resultType);
for (Iterator<Tensor.Cell> i = tensor.cellIterator(); i.hasNext(); ) {
diff --git a/vespajlib/src/main/java/com/yahoo/tensor/serialization/JsonFormat.java b/vespajlib/src/main/java/com/yahoo/tensor/serialization/JsonFormat.java
index fa2094e9d2a..461e73e3611 100644
--- a/vespajlib/src/main/java/com/yahoo/tensor/serialization/JsonFormat.java
+++ b/vespajlib/src/main/java/com/yahoo/tensor/serialization/JsonFormat.java
@@ -103,10 +103,17 @@ public class JsonFormat {
if ( ! (builder instanceof IndexedTensor.BoundBuilder))
throw new IllegalArgumentException("The 'values' field can only be used with dense tensors. " +
"Use 'cells' or 'blocks' instead");
+ IndexedTensor.BoundBuilder indexedBuilder = (IndexedTensor.BoundBuilder)builder;
+ if (values.type() == Type.STRING) {
+ double[] decoded = decodeHexString(values.asString(), builder.type().valueType());
+ for (int i = 0; i < decoded.length; i++) {
+ indexedBuilder.cellByDirectIndex(i, decoded[i]);
+ }
+ return;
+ }
if ( values.type() != Type.ARRAY)
throw new IllegalArgumentException("Excepted 'values' to contain an array, not " + values.type());
- IndexedTensor.BoundBuilder indexedBuilder = (IndexedTensor.BoundBuilder)builder;
MutableInteger index = new MutableInteger(0);
values.traverse((ArrayTraverser) (__, value) -> {
if (value.type() != Type.LONG && value.type() != Type.DOUBLE)
@@ -143,11 +150,99 @@ public class JsonFormat {
decodeValues(value, mixedBuilder));
}
+ private static byte decodeHex(String input, int index) {
+ int d = Character.digit(input.charAt(index), 16);
+ if (d < 0) {
+ throw new IllegalArgumentException("Invalid digit '"+input.charAt(index)+"' at index "+index+" in input "+input);
+ }
+ return (byte)d;
+ }
+
+ private static double[] decodeHexStringAsBytes(String input) {
+ int l = input.length() / 2;
+ double[] result = new double[l];
+ int idx = 0;
+ for (int i = 0; i < l; i++) {
+ byte v = decodeHex(input, idx++);
+ v <<= 4;
+ v += decodeHex(input, idx++);
+ result[i] = v;
+ }
+ return result;
+ }
+
+ private static double[] decodeHexStringAsBFloat16s(String input) {
+ int l = input.length() / 4;
+ double[] result = new double[l];
+ int idx = 0;
+ for (int i = 0; i < l; i++) {
+ int v = decodeHex(input, idx++);
+ v <<= 4; v += decodeHex(input, idx++);
+ v <<= 4; v += decodeHex(input, idx++);
+ v <<= 4; v += decodeHex(input, idx++);
+ v <<= 16;
+ result[i] = Float.intBitsToFloat(v);
+ }
+ return result;
+ }
+
+ private static double[] decodeHexStringAsFloats(String input) {
+ int l = input.length() / 8;
+ double[] result = new double[l];
+ int idx = 0;
+ for (int i = 0; i < l; i++) {
+ int v = 0;
+ for (int j = 0; j < 8; j++) {
+ v <<= 4;
+ v += decodeHex(input, idx++);
+ }
+ result[i] = Float.intBitsToFloat(v);
+ }
+ return result;
+ }
+
+ private static double[] decodeHexStringAsDoubles(String input) {
+ int l = input.length() / 16;
+ double[] result = new double[l];
+ int idx = 0;
+ for (int i = 0; i < l; i++) {
+ long v = 0;
+ for (int j = 0; j < 16; j++) {
+ v <<= 4;
+ v += decodeHex(input, idx++);
+ }
+ result[i] = Double.longBitsToDouble(v);
+ }
+ return result;
+ }
+
+ public static double[] decodeHexString(String input, TensorType.Value valueType) {
+ switch(valueType) {
+ case INT8:
+ return decodeHexStringAsBytes(input);
+ case BFLOAT16:
+ return decodeHexStringAsBFloat16s(input);
+ case FLOAT:
+ return decodeHexStringAsFloats(input);
+ case DOUBLE:
+ return decodeHexStringAsDoubles(input);
+ default:
+ throw new IllegalArgumentException("Cannot handle value type: "+valueType);
+ }
+ }
+
private static double[] decodeValues(Inspector valuesField, MixedTensor.BoundBuilder mixedBuilder) {
- if (valuesField.type() != Type.ARRAY)
- throw new IllegalArgumentException("Expected a block to contain a 'values' array");
double[] values = new double[(int)mixedBuilder.denseSubspaceSize()];
- valuesField.traverse((ArrayTraverser) (index, value) -> values[index] = decodeNumeric(value));
+ if (valuesField.type() == Type.ARRAY) {
+ valuesField.traverse((ArrayTraverser) (index, value) -> values[index] = decodeNumeric(value));
+ } else if (valuesField.type() == Type.STRING) {
+ double[] decoded = decodeHexString(valuesField.asString(), mixedBuilder.type().valueType());
+ for (int i = 0; i < decoded.length; i++) {
+ values[i] = decoded[i];
+ }
+ } else {
+ throw new IllegalArgumentException("Expected a block to contain a 'values' array");
+ }
return values;
}
diff --git a/vespajlib/src/test/java/com/yahoo/tensor/serialization/JsonFormatTestCase.java b/vespajlib/src/test/java/com/yahoo/tensor/serialization/JsonFormatTestCase.java
index 3ca20661587..011c4b1fe12 100644
--- a/vespajlib/src/test/java/com/yahoo/tensor/serialization/JsonFormatTestCase.java
+++ b/vespajlib/src/test/java/com/yahoo/tensor/serialization/JsonFormatTestCase.java
@@ -82,6 +82,131 @@ public class JsonFormatTestCase {
}
@Test
+ public void testInt8VectorInHexForm() {
+ Tensor.Builder builder = Tensor.Builder.of(TensorType.fromSpec("tensor<int8>(x[2],y[3])"));
+ builder.cell().label("x", 0).label("y", 0).value(2.0);
+ builder.cell().label("x", 0).label("y", 1).value(127.0);
+ builder.cell().label("x", 0).label("y", 2).value(-1.0);
+ builder.cell().label("x", 1).label("y", 0).value(-128.0);
+ builder.cell().label("x", 1).label("y", 1).value(0.0);
+ builder.cell().label("x", 1).label("y", 2).value(42.0);
+ Tensor expected = builder.build();
+ String denseJson = "{\"values\":\"027FFF80002A\"}";
+ Tensor decoded = JsonFormat.decode(expected.type(), denseJson.getBytes(StandardCharsets.UTF_8));
+ assertEquals(expected, decoded);
+ }
+
+ @Test
+ public void testInt8VectorInvalidHex() {
+ var type = TensorType.fromSpec("tensor<int8>(x[2])");
+ String denseJson = "{\"values\":\"abXc\"}";
+ try {
+ Tensor decoded = JsonFormat.decode(type, denseJson.getBytes(StandardCharsets.UTF_8));
+ fail("did not get exception as expected, decoded as: "+decoded);
+ } catch (IllegalArgumentException e) {
+ assertEquals(e.getMessage(), "Invalid digit 'X' at index 2 in input abXc");
+ }
+ }
+
+ @Test
+ public void testMixedInt8TensorWithHexForm() {
+ Tensor.Builder builder = Tensor.Builder.of(TensorType.fromSpec("tensor<int8>(x{},y[3])"));
+ builder.cell().label("x", 0).label("y", 0).value(2.0);
+ builder.cell().label("x", 0).label("y", 1).value(3.0);
+ builder.cell().label("x", 0).label("y", 2).value(4.0);
+ builder.cell().label("x", 1).label("y", 0).value(5.0);
+ builder.cell().label("x", 1).label("y", 1).value(6.0);
+ builder.cell().label("x", 1).label("y", 2).value(7.0);
+ Tensor expected = builder.build();
+ String mixedJson = "{\"blocks\":[" +
+ "{\"address\":{\"x\":\"0\"},\"values\":\"020304\"}," +
+ "{\"address\":{\"x\":\"1\"},\"values\":\"050607\"}" +
+ "]}";
+ Tensor decoded = JsonFormat.decode(expected.type(), mixedJson.getBytes(StandardCharsets.UTF_8));
+ assertEquals(expected, decoded);
+ }
+
+ @Test
+ public void testBFloat16VectorInHexForm() {
+ var builder = Tensor.Builder.of(TensorType.fromSpec("tensor<bfloat16>(x[3],y[4])"));
+ builder.cell().label("x", 0).label("y", 0).value(42.0);
+ builder.cell().label("x", 0).label("y", 1).value(1048576.0);
+ builder.cell().label("x", 0).label("y", 2).value(0.00000095367431640625);
+ builder.cell().label("x", 0).label("y", 3).value(-255.00);
+
+ builder.cell().label("x", 1).label("y", 0).value(0.0);
+ builder.cell().label("x", 1).label("y", 1).value(-0.0);
+ builder.cell().label("x", 1).label("y", 2).value(Float.MIN_NORMAL);
+ builder.cell().label("x", 1).label("y", 3).value(0x1.feP+127);
+
+ builder.cell().label("x", 2).label("y", 0).value(Float.POSITIVE_INFINITY);
+ builder.cell().label("x", 2).label("y", 1).value(Float.NEGATIVE_INFINITY);
+ builder.cell().label("x", 2).label("y", 2).value(Float.NaN);
+ builder.cell().label("x", 2).label("y", 3).value(-Float.NaN);
+ Tensor expected = builder.build();
+
+ String denseJson = "{\"values\":\"422849803580c37f0000800000807f7f7f80ff807fc0ffc0\"}";
+ Tensor decoded = JsonFormat.decode(expected.type(), denseJson.getBytes(StandardCharsets.UTF_8));
+ assertEquals(expected, decoded);
+ }
+
+ @Test
+ public void testFloatVectorInHexForm() {
+ var builder = Tensor.Builder.of(TensorType.fromSpec("tensor<float>(x[3],y[4])"));
+ builder.cell().label("x", 0).label("y", 0).value(42.0);
+ builder.cell().label("x", 0).label("y", 1).value(1048577.0);
+ builder.cell().label("x", 0).label("y", 2).value(0.00000095367431640625);
+ builder.cell().label("x", 0).label("y", 3).value(-255.00);
+
+ builder.cell().label("x", 1).label("y", 0).value(0.0);
+ builder.cell().label("x", 1).label("y", 1).value(-0.0);
+ builder.cell().label("x", 1).label("y", 2).value(Float.MIN_VALUE);
+ builder.cell().label("x", 1).label("y", 3).value(Float.MAX_VALUE);
+
+ builder.cell().label("x", 2).label("y", 0).value(Float.POSITIVE_INFINITY);
+ builder.cell().label("x", 2).label("y", 1).value(Float.NEGATIVE_INFINITY);
+ builder.cell().label("x", 2).label("y", 2).value(Float.NaN);
+ builder.cell().label("x", 2).label("y", 3).value(-Float.NaN);
+ Tensor expected = builder.build();
+
+ String denseJson = "{\"values\":\""
+ +"42280000"+"49800008"+"35800000"+"c37f0000"
+ +"00000000"+"80000000"+"00000001"+"7f7fffff"
+ +"7f800000"+"ff800000"+"7fc00000"+"ffc00000"
+ +"\"}";
+ Tensor decoded = JsonFormat.decode(expected.type(), denseJson.getBytes(StandardCharsets.UTF_8));
+ assertEquals(expected, decoded);
+ }
+
+ @Test
+ public void testDoubleVectorInHexForm() {
+ var builder = Tensor.Builder.of(TensorType.fromSpec("tensor<double>(x[3],y[4])"));
+ builder.cell().label("x", 0).label("y", 0).value(42.0);
+ builder.cell().label("x", 0).label("y", 1).value(1048577.0);
+ builder.cell().label("x", 0).label("y", 2).value(0.00000095367431640625);
+ builder.cell().label("x", 0).label("y", 3).value(-255.00);
+
+ builder.cell().label("x", 1).label("y", 0).value(0.0);
+ builder.cell().label("x", 1).label("y", 1).value(-0.0);
+ builder.cell().label("x", 1).label("y", 2).value(Double.MIN_VALUE);
+ builder.cell().label("x", 1).label("y", 3).value(Double.MAX_VALUE);
+
+ builder.cell().label("x", 2).label("y", 0).value(Double.POSITIVE_INFINITY);
+ builder.cell().label("x", 2).label("y", 1).value(Double.NEGATIVE_INFINITY);
+ builder.cell().label("x", 2).label("y", 2).value(Double.NaN);
+ builder.cell().label("x", 2).label("y", 3).value(-Double.NaN);
+ Tensor expected = builder.build();
+
+ String denseJson = "{\"values\":\""
+ +"4045000000000000"+"4130000100000000"+"3eb0000000000000"+"c06fe00000000000"
+ +"0000000000000000"+"8000000000000000"+"0000000000000001"+"7fefffffffffffff"
+ +"7ff0000000000000"+"fff0000000000000"+"7ff8000000000000"+"fff8000000000000"
+ +"\"}";
+ Tensor decoded = JsonFormat.decode(expected.type(), denseJson.getBytes(StandardCharsets.UTF_8));
+ assertEquals(expected, decoded);
+ }
+
+ @Test
public void testMixedTensorInMixedForm() {
Tensor.Builder builder = Tensor.Builder.of(TensorType.fromSpec("tensor(x{},y[3])"));
builder.cell().label("x", 0).label("y", 0).value(2.0);
diff --git a/vespalib/src/vespa/vespalib/io/fileutil.cpp b/vespalib/src/vespa/vespalib/io/fileutil.cpp
index 17ca6d7e488..9c6cca68e02 100644
--- a/vespalib/src/vespa/vespalib/io/fileutil.cpp
+++ b/vespalib/src/vespa/vespalib/io/fileutil.cpp
@@ -185,8 +185,7 @@ File::open(int flags, bool autoCreateDirectories) {
| ((flags & File::DIRECTIO) != 0 ? O_DIRECT : 0)
#endif
| ((flags & File::TRUNC) != 0 ? O_TRUNC: 0);
- int fd = openAndCreateDirsIfMissing(_filename, openflags,
- autoCreateDirectories);
+ int fd = openAndCreateDirsIfMissing(_filename, openflags, autoCreateDirectories);
#ifdef __linux__
if (fd < 0 && ((flags & File::DIRECTIO) != 0)) {
openflags = (openflags ^ O_DIRECT);
@@ -194,8 +193,7 @@ File::open(int flags, bool autoCreateDirectories) {
LOG(debug, "open(%s, %d): Retrying without direct IO due to failure "
"opening with errno(%d): %s",
_filename.c_str(), flags, errno, safeStrerror(errno).c_str());
- fd = openAndCreateDirsIfMissing(_filename, openflags,
- autoCreateDirectories);
+ fd = openAndCreateDirsIfMissing(_filename, openflags, autoCreateDirectories);
}
#endif
if (fd < 0) {
@@ -203,8 +201,7 @@ File::open(int flags, bool autoCreateDirectories) {
ost << "open(" << _filename << ", 0x"
<< hex << flags << dec << "): Failed, errno(" << errno
<< "): " << safeStrerror(errno);
- throw IoException(ost.str(), IoException::getErrorType(errno),
- VESPA_STRLOC);
+ throw IoException(ost.str(), IoException::getErrorType(errno), VESPA_STRLOC);
}
_flags = flags;
if (_close && _fd != -1) close();
@@ -250,8 +247,7 @@ File::resize(off_t size)
asciistream ost;
ost << "resize(" << _filename << ", " << size << "): Failed, errno("
<< errno << "): " << safeStrerror(errno);
- throw IoException(ost.str(), IoException::getErrorType(errno),
- VESPA_STRLOC);
+ throw IoException(ost.str(), IoException::getErrorType(errno), VESPA_STRLOC);
}
LOG(debug, "resize(%s): Resized to %" PRIu64 " bytes.",
_filename.c_str(), size);
@@ -312,8 +308,7 @@ File::write(const void *buf, size_t bufsize, off_t offset)
ost << "write(" << _fd << ", " << buf
<< ", " << left << ", " << offset << "), Failed, errno("
<< errno << "): " << safeStrerror(errno);
- throw IoException(ost.str(), IoException::getErrorType(errno),
- VESPA_STRLOC);
+ throw IoException(ost.str(), IoException::getErrorType(errno), VESPA_STRLOC);
}
}
return bufsize;
@@ -339,16 +334,13 @@ File::read(void *buf, size_t bufsize, off_t offset) const
remaining -= bytesread;
buf = ((char*) buf) + bytesread;
offset += bytesread;
- if (((_flags & DIRECTIO) != 0) && ((bytesread % 512) != 0) &&
- (offset == getFileSize())) {
- LOG(spam, "read(%s): Found EOF. Directio read to unaligned "
- "file end at offset %" PRIu64 ".",
- _filename.c_str(), offset);
- break;
+ if (((_flags & DIRECTIO) != 0) && ((bytesread % 512) != 0) && (offset == getFileSize())) {
+ LOG(spam, "read(%s): Found EOF. Directio read to unaligned file end at offset %" PRIu64 ".",
+ _filename.c_str(), offset);
+ break;
}
} else if (bytesread == 0) { // EOF
- LOG(spam, "read(%s): Found EOF. Zero bytes read from offset %"
- PRIu64 ".",
+ LOG(spam, "read(%s): Found EOF. Zero bytes read from offset %" PRIu64 ".",
_filename.c_str(), offset);
break;
} else if (errno != EINTR && errno != EAGAIN) {
@@ -356,8 +348,7 @@ File::read(void *buf, size_t bufsize, off_t offset) const
ost << "read(" << _fd << ", " << buf << ", " << remaining << ", "
<< offset << "): Failed, errno(" << errno << "): "
<< safeStrerror(errno);
- throw IoException(ost.str(), IoException::getErrorType(errno),
- VESPA_STRLOC);
+ throw IoException(ost.str(), IoException::getErrorType(errno), VESPA_STRLOC);
}
}
return bufsize - remaining;
@@ -398,18 +389,13 @@ File::sync()
{
if (_fd != -1) {
if (::fsync(_fd) == 0) {
- LOG(debug, "sync(%s): File synchronized with disk.",
- _filename.c_str());
+ LOG(debug, "sync(%s): File synchronized with disk.", _filename.c_str());
} else {
- asciistream ost;
- ost << "sync(" << _filename << "): Failed, errno(" << errno << "): "
- << safeStrerror(errno);
- throw IoException(ost.str(), IoException::getErrorType(errno),
- VESPA_STRLOC);
+ LOG(warning, "fsync(%s): Failed to sync file. errno(%d): %s",
+ _filename.c_str(), errno, safeStrerror(errno).c_str());
}
} else {
- LOG(debug, "sync(%s): Called on closed file.",
- _filename.c_str());
+ LOG(debug, "sync(%s): Called on closed file.", _filename.c_str());
}
}
@@ -427,8 +413,7 @@ File::close()
{
if (_fd != -1) {
if (::close(_fd) == 0) {
- LOG(debug, "close(%s): Closed file with descriptor %i.",
- _filename.c_str(), _fd);
+ LOG(debug, "close(%s): Closed file with descriptor %i.", _filename.c_str(), _fd);
_fd = -1;
return true;
} else {
@@ -438,8 +423,7 @@ File::close()
return false;
}
} else {
- LOG(debug, "close(%s): Called on closed file.",
- _filename.c_str());
+ LOG(debug, "close(%s): Called on closed file.", _filename.c_str());
}
return true;
}
@@ -459,10 +443,8 @@ getCurrentDirectory()
return string(static_cast<char*>(ptr.get()));
}
asciistream ost;
- ost << "getCurrentDirectory(): Failed, errno(" << errno
- << "): " << safeStrerror(errno);
- throw IoException(ost.str(), IoException::getErrorType(errno),
- VESPA_STRLOC);
+ ost << "getCurrentDirectory(): Failed, errno(" << errno << "): " << safeStrerror(errno);
+ throw IoException(ost.str(), IoException::getErrorType(errno), VESPA_STRLOC);
}
bool
@@ -479,8 +461,7 @@ mkdir(const string & directory, bool recursive)
string superdir = directory.substr(0, slashpos);
mkdir(superdir, recursive);
if (::mkdir(directory.c_str(), 0777) == 0) {
- LOG(debug, "mkdir(%s): Created directory recursively",
- directory.c_str());
+ LOG(debug, "mkdir(%s): Created directory recursively", directory.c_str());
return true;
}
}
@@ -501,20 +482,17 @@ mkdir(const string & directory, bool recursive)
} else {
ost << " A file of some sort already exist.";
}
- throw IoException(
- ost.str(), IoException::ILLEGAL_PATH, VESPA_STRLOC);
+ throw IoException(ost.str(), IoException::ILLEGAL_PATH, VESPA_STRLOC);
}
}
asciistream ost;
ost << "mkdir(" << directory << (recursive ? ", recursive" : "")
<< "): Failed, errno(" << errno << "): " << safeStrerror(errno);
- throw IoException(ost.str(), IoException::getErrorType(errno),
- VESPA_STRLOC);
+ throw IoException(ost.str(), IoException::getErrorType(errno), VESPA_STRLOC);
}
void
-symlink(const string & oldPath,
- const string & newPath)
+symlink(const string & oldPath, const string & newPath)
{
if (::symlink(oldPath.c_str(), newPath.c_str())) {
asciistream ss;
@@ -548,8 +526,7 @@ chdir(const string & directory)
asciistream ost;
ost << "chdir(" << directory << "): Failed, errno(" << errno << "): "
<< safeStrerror(errno);
- throw IoException(ost.str(), IoException::getErrorType(errno),
- VESPA_STRLOC);
+ throw IoException(ost.str(), IoException::getErrorType(errno), VESPA_STRLOC);
}
LOG(debug, "chdir(%s): Working directory changed.", directory.c_str());
}
@@ -562,8 +539,7 @@ rmdir(const string & directory, bool recursive)
dirname.resize(dirname.size() - 1);
}
if (dirname.empty()) {
- LOG(debug, "rmdir(%s): Not allowing deletion of '/'.",
- directory.c_str());
+ LOG(debug, "rmdir(%s): Not allowing deletion of '/'.", directory.c_str());
return false;
}
if (recursive) {
@@ -591,20 +567,17 @@ rmdir(const string & directory, bool recursive)
}
}
if (::rmdir(dirname.c_str()) == 0) {
- LOG(debug, "rmdir(%s): Directory deleted.",
- directory.c_str());
+ LOG(debug, "rmdir(%s): Directory deleted.", directory.c_str());
return true;
}
if (errno == ENOENT) {
- LOG(debug, "rmdir(%s): No directory to delete.",
- directory.c_str());
+ LOG(debug, "rmdir(%s): No directory to delete.", directory.c_str());
return false;
}
asciistream ost;
ost << "rmdir(" << dirname << (recursive ? ", recursive" : "")
<< "): Failed, errno(" << errno << "): " << safeStrerror(errno);
- throw IoException(ost.str(), IoException::getErrorType(errno),
- VESPA_STRLOC);
+ throw IoException(ost.str(), IoException::getErrorType(errno), VESPA_STRLOC);
}
FileInfo::UP
@@ -636,8 +609,7 @@ unlink(const string & filename)
asciistream ost;
ost << "unlink(" << filename << "): Failed, errno(" << errno << "): "
<< safeStrerror(errno);
- throw IoException(ost.str(), IoException::getErrorType(errno),
- VESPA_STRLOC);
+ throw IoException(ost.str(), IoException::getErrorType(errno), VESPA_STRLOC);
}
LOG(debug, "unlink(%s): File deleted.", filename.c_str());
return true;
@@ -659,11 +631,9 @@ rename(const string & frompath, const string & topath,
if (pos != string::npos) {
string path(topath.substr(0, pos));
vespalib::mkdir(path);
- LOG(debug, "rename(%s, %s): Created target directory. "
- "Calling recursively.",
+ LOG(debug, "rename(%s, %s): Created target directory. Calling recursively.",
frompath.c_str(), topath.c_str());
- return rename(frompath, topath,
- copyDeleteBetweenFilesystems, false);
+ return rename(frompath, topath, copyDeleteBetweenFilesystems, false);
}
} else {
asciistream ost;
@@ -768,11 +738,11 @@ listDirectory(const string & path)
MallocAutoPtr
getAlignedBuffer(size_t size)
{
- void *ptr;
- int result = posix_memalign(&ptr, diskAlignmentSize, size);
- assert(result == 0);
- (void)result;
- return MallocAutoPtr(ptr);
+ void *ptr;
+ int result = posix_memalign(&ptr, diskAlignmentSize, size);
+ assert(result == 0);
+ (void)result;
+ return MallocAutoPtr(ptr);
}
string dirname(stringref name)
diff --git a/vespamalloc/src/tests/stacktrace/CMakeLists.txt b/vespamalloc/src/tests/stacktrace/CMakeLists.txt
index 491b0d4089e..0cc0107ce84 100644
--- a/vespamalloc/src/tests/stacktrace/CMakeLists.txt
+++ b/vespamalloc/src/tests/stacktrace/CMakeLists.txt
@@ -2,7 +2,6 @@
vespa_add_executable(vespamalloc_stacktrace_test_app TEST
SOURCES
stacktrace.cpp
- backtrace.c
DEPENDS
)
vespa_add_test(
diff --git a/vespamalloc/src/tests/stacktrace/backtrace.c b/vespamalloc/src/tests/stacktrace/backtrace.c
deleted file mode 100644
index 44b33ca4572..00000000000
--- a/vespamalloc/src/tests/stacktrace/backtrace.c
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include "backtrace.h"
-
-#if defined(__i386__)
-// use GLIBC version, hope it works
-extern int backtrace(void **buffer, int size);
-#define HAVE_BACKTRACE
-#endif
-
-#if defined(__x86_64__)
-
-/**
- Written by Arne H. J. based on docs:
-
- http://www.kernel.org/pub/linux/devel/gcc/unwind/
- http://www.codesourcery.com/public/cxx-abi/abi-eh.html
- http://refspecs.freestandards.org/LSB_3.1.0/LSB-Core-generic/LSB-Core-generic/libgcc-s-ddefs.html
-**/
-
-#include <unwind.h>
-
-struct trace_context {
- void **array;
- int size;
- int index;
-};
-
-static _Unwind_Reason_Code
-trace_fn(struct _Unwind_Context *ctxt, void *arg)
-{
- struct trace_context *tp = (struct trace_context *)arg;
- void *ip = (void *)_Unwind_GetIP(ctxt);
-
- if (ip == 0) {
- return _URC_END_OF_STACK;
- }
- if (tp->index <= tp->size) {
- // there's no point filling in the address of the backtrace()
- // function itself, that doesn't provide any extra information,
- // so skip one level
- if (tp->index > 0) {
- tp->array[tp->index - 1] = ip;
- }
- tp->index++;
- } else {
- return _URC_NORMAL_STOP;
- }
- return _URC_NO_REASON; // "This is not the destination frame" -> try next frame
-}
-
-#define HAVE_BACKTRACE
-int
-backtrace (void **array, int size)
-{
- struct trace_context t;
- t.array = array;
- t.size = size;
- t.index = 0;
- _Unwind_Backtrace(trace_fn, &t);
- return t.index - 1;
-}
-#endif // x86_64
-
-
-#ifdef HAVE_BACKTRACE
-
-int
-FastOS_backtrace (void **array, int size)
-{
- return backtrace(array, size);
-}
-
-#else
-
-# warning "backtrace not supported on this CPU"
-int
-FastOS_backtrace (void **array, int size)
-{
- (void) array;
- (void) size;
- return 0;
-}
-
-#endif
diff --git a/vespamalloc/src/tests/stacktrace/backtrace.h b/vespamalloc/src/tests/stacktrace/backtrace.h
deleted file mode 100644
index 3700319162e..00000000000
--- a/vespamalloc/src/tests/stacktrace/backtrace.h
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#pragma once
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-int FastOS_backtrace (void **array, int size);
-
-#if defined(__x86_64__)
-int backtrace (void **array, int size);
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-
diff --git a/vsm/src/tests/textutil/textutil.cpp b/vsm/src/tests/textutil/textutil.cpp
index 42857b8430d..77d13ec3461 100644
--- a/vsm/src/tests/textutil/textutil.cpp
+++ b/vsm/src/tests/textutil/textutil.cpp
@@ -32,8 +32,10 @@ private:
void assertSkipSeparators(const char * input, size_t len, const UCS4V & expdstbuf, const SizeV & expoffsets);
void assertAnsiFold(const std::string & toFold, const std::string & exp);
void assertAnsiFold(char c, char exp);
+#ifdef __x86_64__
void assert_sse2_foldua(const std::string & toFold, size_t charFolded, const std::string & exp);
void assert_sse2_foldua(unsigned char c, unsigned char exp, size_t charFolded = 16);
+#endif
template <typename BW, bool OFF>
void testSkipSeparators();
@@ -41,7 +43,9 @@ private:
void testSeparatorCharacter();
void testAnsiFold();
void test_lfoldua();
+#ifdef __x86_64__
void test_sse2_foldua();
+#endif
public:
int Main() override;
@@ -91,6 +95,7 @@ TextUtilTest::assertAnsiFold(char c, char exp)
EXPECT_EQUAL((int32_t)folded, (int32_t)exp);
}
+#ifdef __x86_64__
void
TextUtilTest::assert_sse2_foldua(const std::string & toFold, size_t charFolded, const std::string & exp)
{
@@ -116,6 +121,7 @@ TextUtilTest::assert_sse2_foldua(unsigned char c, unsigned char exp, size_t char
EXPECT_EQUAL((int32_t)folded[i + alignedStart], (int32_t)exp);
}
}
+#endif
template <typename BW, bool OFF>
void
@@ -227,6 +233,7 @@ TextUtilTest::test_lfoldua()
EXPECT_EQUAL(std::string(folded + alignedStart, len), "abcdefghijklmnopqrstuvwxyz");
}
+#ifdef __x86_64__
void
TextUtilTest::test_sse2_foldua()
{
@@ -255,6 +262,7 @@ TextUtilTest::test_sse2_foldua()
assert_sse2_foldua(i, '?', 0);
}
}
+#endif
int
TextUtilTest::Main()
@@ -265,7 +273,9 @@ TextUtilTest::Main()
testSeparatorCharacter();
testAnsiFold();
test_lfoldua();
+#ifdef __x86_64__
test_sse2_foldua();
+#endif
TEST_DONE();
}
diff --git a/vsm/src/vespa/vsm/searcher/CMakeLists.txt b/vsm/src/vespa/vsm/searcher/CMakeLists.txt
index 22c89299721..3589e9e3baf 100644
--- a/vsm/src/vespa/vsm/searcher/CMakeLists.txt
+++ b/vsm/src/vespa/vsm/searcher/CMakeLists.txt
@@ -1,10 +1,17 @@
# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+if(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64")
+ set(SSE2_FILES "fold.cpp")
+else()
+ unset(SSE2_FILES)
+endif()
+
vespa_add_library(vsm_vsmsearcher OBJECT
SOURCES
boolfieldsearcher.cpp
fieldsearcher.cpp
floatfieldsearcher.cpp
- fold.cpp
+ ${SSE2_FILES}
futf8strchrfieldsearcher.cpp
intfieldsearcher.cpp
strchrfieldsearcher.cpp
diff --git a/vsm/src/vespa/vsm/searcher/futf8strchrfieldsearcher.cpp b/vsm/src/vespa/vsm/searcher/futf8strchrfieldsearcher.cpp
index e58fab40070..3acf065a8f1 100644
--- a/vsm/src/vespa/vsm/searcher/futf8strchrfieldsearcher.cpp
+++ b/vsm/src/vespa/vsm/searcher/futf8strchrfieldsearcher.cpp
@@ -1,7 +1,9 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "futf8strchrfieldsearcher.h"
+#ifdef __x86_64__
#include "fold.h"
+#endif
#include <vespa/vespalib/util/size_literals.h>
using search::byte;
@@ -42,8 +44,9 @@ FUTF8StrChrFieldSearcher::ansiFold(const char * toFold, size_t sz, char * folded
bool
FUTF8StrChrFieldSearcher::lfoldaa(const char * toFold, size_t sz, char * folded, size_t & unalignedStart)
{
- bool retval(true);
unalignedStart = (size_t(toFold) & 0xF);
+#ifdef __x86_64__
+ bool retval(true);
size_t unalignedsz = std::min(sz, (16 - unalignedStart) & 0xF);
size_t foldedUnaligned = (size_t(folded) & 0xF);
@@ -65,14 +68,17 @@ FUTF8StrChrFieldSearcher::lfoldaa(const char * toFold, size_t sz, char * folded,
retval = ansiFold(toFold + unalignedsz + alignsz16, rest, folded+alignedStart+alignsz16);
}
return retval;
+#else
+ return ansiFold(toFold, sz, folded + unalignedStart);
+#endif
}
bool
FUTF8StrChrFieldSearcher::lfoldua(const char * toFold, size_t sz, char * folded, size_t & alignedStart)
{
- bool retval(true);
-
alignedStart = 0xF - (size_t(folded + 0xF) % 0x10);
+#ifdef __x86_64__
+ bool retval(true);
size_t alignsz16 = sz & 0xFFFFFFF0;
size_t rest = sz - alignsz16;
@@ -85,10 +91,14 @@ FUTF8StrChrFieldSearcher::lfoldua(const char * toFold, size_t sz, char * folded,
retval = ansiFold(toFold + alignsz16, rest, folded+alignedStart+alignsz16);
}
return retval;
+#else
+ return ansiFold(toFold, sz, folded + alignedStart);
+#endif
}
namespace {
+#ifdef __x86_64__
inline const char * advance(const char * n, const v16qi zero)
{
uint32_t charMap = 0;
@@ -136,12 +146,30 @@ inline const char * advance(const char * n, const v16qi zero)
}
return n + sum;
}
+#else
+inline const char* advance(const char* n)
+{
+ const char* p = n;
+ const char* zero = static_cast<const char *>(memchr(p, 0, 64_Ki));
+ while (zero == nullptr) {
+ p += 64_Ki;
+ zero = static_cast<const char *>(memchr(p, 0, 64_Ki));
+ }
+ p = zero;
+ while (*p == '\0') {
+ ++p;
+ }
+ return p;
+}
+#endif
}
size_t FUTF8StrChrFieldSearcher::match(const char *folded, size_t sz, QueryTerm & qt)
{
+#ifdef __x86_64__
const v16qi _G_zero = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+#endif
termcount_t words(0);
const char * term;
termsize_t tsz = qt.term(term);
@@ -177,7 +205,11 @@ size_t FUTF8StrChrFieldSearcher::match(const char *folded, size_t sz, QueryTerm
}
#endif
words++;
+#ifdef __x86_64__
n = advance(n, _G_zero);
+#else
+ n = advance(n);
+#endif
}
return words;
}
@@ -185,7 +217,9 @@ size_t FUTF8StrChrFieldSearcher::match(const char *folded, size_t sz, QueryTerm
size_t FUTF8StrChrFieldSearcher::match(const char *folded, size_t sz, size_t mintsz, QueryTerm ** qtl, size_t qtlSize)
{
(void) mintsz;
+#ifdef __x86_64__
const v16qi _G_zero = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+#endif
termcount_t words(0);
const char * n = folded;
const char *e = n + sz;
@@ -228,7 +262,11 @@ size_t FUTF8StrChrFieldSearcher::match(const char *folded, size_t sz, size_t min
}
#endif
words++;
+#ifdef __x86_64__
n = advance(n, _G_zero);
+#else
+ n = advance(n);
+#endif
}
return words;
}
diff --git a/zookeeper-server/zookeeper-server-common/src/main/java/com/yahoo/vespa/zookeeper/Configurator.java b/zookeeper-server/zookeeper-server-common/src/main/java/com/yahoo/vespa/zookeeper/Configurator.java
index f302798589c..9705c8c379e 100644
--- a/zookeeper-server/zookeeper-server-common/src/main/java/com/yahoo/vespa/zookeeper/Configurator.java
+++ b/zookeeper-server/zookeeper-server-common/src/main/java/com/yahoo/vespa/zookeeper/Configurator.java
@@ -40,6 +40,8 @@ public class Configurator {
System.setProperty(ZOOKEEPER_JUTE_MAX_BUFFER, Integer.valueOf(zookeeperServerConfig.juteMaxBuffer()).toString());
// Need to set this as a system property instead of config, config does not work
System.setProperty("zookeeper.authProvider.x509", "com.yahoo.vespa.zookeeper.VespaMtlsAuthenticationProvider");
+ // Need to set this as a system property, otherwise it will be parsed for _every_ packet and an exception will be thrown (and handled)
+ System.setProperty("zookeeper.globalOutstandingLimit", "1000");
}
void writeConfigToDisk() { writeConfigToDisk(VespaTlsConfig.fromSystem()); }