summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--cloud-tenant-base-dependencies-enforcer/pom.xml2
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/RPCCommunicator.java2
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/RpcServer.java2
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/SlobrokClient.java2
-rw-r--r--config-application-package/src/main/java/com/yahoo/config/application/Xml.java10
-rw-r--r--config-application-package/src/main/java/com/yahoo/config/model/application/provider/MockFileRegistry.java11
-rw-r--r--config-application-package/src/main/java/com/yahoo/config/model/application/provider/PreGeneratedFileRegistry.java5
-rw-r--r--config-application-package/src/test/java/com/yahoo/config/application/IncludeProcessorTest.java17
-rw-r--r--config-application-package/src/test/java/com/yahoo/config/model/application/provider/PreGeneratedFileRegistryTestCase.java12
-rw-r--r--config-model-api/abi-spec.json5
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/application/api/ApplicationPackage.java10
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/application/api/FileRegistry.java2
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/application/api/ValidationId.java4
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/application/api/ValidationOverrides.java9
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java6
-rw-r--r--config-model-api/src/test/java/com/yahoo/config/application/api/ValidationOverrideTest.java15
-rw-r--r--config-model/src/main/java/com/yahoo/config/model/deploy/DeployState.java8
-rw-r--r--config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java33
-rw-r--r--config-model/src/main/java/com/yahoo/documentmodel/NewDocumentType.java81
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/DistributableResource.java41
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/DocumentModelBuilder.java27
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/ImmutableSearch.java2
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/LargeRankExpressions.java38
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/RankExpressionBody.java10
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/RankExpressionFile.java35
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/RankExpressionFiles.java47
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/Search.java8
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/derived/RankProfileList.java21
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/derived/RawRankProfile.java31
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/AbstractService.java5
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/ConfigSentinel.java8
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/VespaModel.java8
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/VespaModelFactory.java10
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainer.java7
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java9
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/application/validation/RankSetupValidator.java4
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/application/validation/Validation.java7
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/application/validation/Validator.java2
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/ChangeValidator.java1
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/RedundancyIncreaseValidator.java45
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/application/validation/first/RedundancyOnFirstDeploymentValidator.java44
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/ClusterResourceLimits.java7
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/Content.java20
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java6
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/DispatchTuning.java11
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java6
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/cluster/DomResourceLimitsBuilder.java18
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/cluster/DomTuningDispatchBuilder.java6
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/storagecluster/StorServerProducer.java16
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/filedistribution/FileDistributor.java11
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/search/DispatchGroup.java7
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/search/IndexedSearchCluster.java16
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/utils/FileSender.java15
-rw-r--r--config-model/src/main/resources/schema/content.rnc2
-rw-r--r--config-model/src/test/derived/namecollision/collision.sd8
-rw-r--r--config-model/src/test/derived/namecollision/collisionstruct.sd15
-rw-r--r--config-model/src/test/derived/namecollision/documentmanager.cfg55
-rw-r--r--config-model/src/test/java/com/yahoo/config/model/MockModelContext.java5
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/RankProfileTestCase.java3
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/derived/NameCollisionTestCase.java20
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionWithTransformerTokensTestCase.java2
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionsTestCase.java3
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/VespaModelFactoryTest.java1
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerClusterTest.java49
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyModelTester.java2
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/ContentTypeRemovalValidatorTest.java8
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/RedundancyIncreaseValidatorTest.java64
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/application/validation/first/RedundancyOnFirstDeploymentValidatorTest.java64
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/content/ClusterResourceLimitsTest.java58
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/content/DispatchTuningTest.java5
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/content/FleetControllerClusterTest.java5
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/content/IndexedHierarchicDistributionTest.java1
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/content/IndexingAndDocprocRoutingTest.java154
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/content/StorageClusterTest.java338
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/content/cluster/ClusterTest.java3
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/content/cluster/DomDispatchTuningBuilderTest.java3
-rw-r--r--config-provisioning/src/main/java/com/yahoo/config/provision/ApplicationLockException.java6
-rw-r--r--config-proxy/src/main/java/com/yahoo/vespa/config/proxy/ProxyServer.java2
-rw-r--r--config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileDistributionAndUrlDownload.java6
-rw-r--r--config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileDistributionRpcServer.java6
-rw-r--r--config/src/main/java/com/yahoo/vespa/config/JRTConnectionPool.java12
-rw-r--r--config/src/main/java/com/yahoo/vespa/config/benchmark/LoadTester.java78
-rw-r--r--config/src/vespa/config/common/configvalue.cpp12
-rw-r--r--config/src/vespa/config/subscription/configsubscriptionset.cpp4
-rw-r--r--configd/src/apps/cmd/main.cpp98
-rw-r--r--configd/src/apps/sentinel/CMakeLists.txt2
-rw-r--r--configd/src/apps/sentinel/cc-result.h9
-rw-r--r--configd/src/apps/sentinel/config-owner.cpp31
-rw-r--r--configd/src/apps/sentinel/config-owner.h11
-rw-r--r--configd/src/apps/sentinel/connectivity.cpp199
-rw-r--r--configd/src/apps/sentinel/connectivity.h34
-rw-r--r--configd/src/apps/sentinel/env.cpp122
-rw-r--r--configd/src/apps/sentinel/env.h4
-rw-r--r--configd/src/apps/sentinel/manager.cpp3
-rw-r--r--configd/src/apps/sentinel/model-owner.cpp66
-rw-r--r--configd/src/apps/sentinel/model-owner.h32
-rw-r--r--configd/src/apps/sentinel/output-connection.cpp2
-rw-r--r--configd/src/apps/sentinel/outward-check.cpp23
-rw-r--r--configd/src/apps/sentinel/outward-check.h17
-rw-r--r--configd/src/apps/sentinel/peer-check.cpp12
-rw-r--r--configd/src/apps/sentinel/peer-check.h4
-rw-r--r--configd/src/apps/sentinel/report-connectivity.cpp53
-rw-r--r--configd/src/apps/sentinel/report-connectivity.h33
-rw-r--r--configd/src/apps/sentinel/rpchooks.cpp27
-rw-r--r--configd/src/apps/sentinel/rpchooks.h5
-rw-r--r--configd/src/apps/sentinel/rpcserver.cpp6
-rw-r--r--configd/src/apps/sentinel/rpcserver.h3
-rw-r--r--configd/src/apps/sentinel/sentinel.cpp24
-rw-r--r--configd/src/apps/sentinel/service.cpp2
-rw-r--r--configdefinitions/src/vespa/dispatch.def4
-rw-r--r--configdefinitions/src/vespa/sentinel.def8
-rw-r--r--configserver-client/src/test/java/ai/vespa/hosted/client/HttpConfigServerClientTest.java1
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/ConfigServerBootstrap.java9
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/deploy/Deployment.java2
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java12
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ZooKeeperClient.java14
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ZooKeeperDeployer.java2
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/AddFileInterface.java3
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/ApplicationFileManager.java27
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDBRegistry.java21
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDistributionUtil.java6
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileServer.java4
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ApplicationPackageMaintainer.java7
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetriever.java6
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ModelsBuilder.java7
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/rpc/GetConfigProcessor.java3
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/session/PrepareParams.java2
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionRepository.java38
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/tenant/TenantRepository.java12
-rw-r--r--configserver/src/test/apps/app_sdbundles/services.xml2
-rw-r--r--configserver/src/test/apps/deprecated-features-app/hosts.xml7
-rw-r--r--configserver/src/test/apps/deprecated-features-app/searchdefinitions/music.sd50
-rw-r--r--configserver/src/test/apps/deprecated-features-app/services.xml38
-rw-r--r--configserver/src/test/apps/hosted-no-write-access-control/services.xml2
-rw-r--r--configserver/src/test/apps/hosted/services.xml2
-rw-r--r--configserver/src/test/apps/zkapp/services.xml2
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/ApplicationRepositoryTest.java4
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/deploy/DeployTester.java2
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/deploy/ZooKeeperClientTest.java2
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/FileServerTest.java9
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/MockFileRegistry.java12
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/session/PrepareParamsTest.java22
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionRepositoryTest.java49
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/zookeeper/ZKApplicationPackageTest.java4
-rw-r--r--configserver/src/test/resources/deploy/advancedapp/services.xml2
-rw-r--r--container-core/src/main/java/com/yahoo/container/jdisc/RequestHandlerTestDriver.java4
-rw-r--r--container-core/src/main/java/com/yahoo/container/logging/ConnectionLogEntry.java8
-rw-r--r--container-core/src/main/java/com/yahoo/container/logging/JsonConnectionLogWriter.java10
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ConnectorFactory.java5
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JettyConnectionLogger.java11
-rw-r--r--container-core/src/test/java/com/yahoo/container/logging/JsonConnectionLogWriterTest.java3
-rw-r--r--container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/ConnectorFactoryTest.java81
-rw-r--r--container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerTest.java15
-rw-r--r--container-disc/src/main/java/com/yahoo/container/jdisc/ConfiguredApplication.java2
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/query/NearestNeighborItem.java2
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/searcher/FillSearcher.java1
-rw-r--r--container-search/src/main/java/com/yahoo/search/Searcher.java1
-rw-r--r--container-search/src/main/java/com/yahoo/search/cluster/BaseNodeMonitor.java2
-rw-r--r--container-search/src/main/java/com/yahoo/search/cluster/ClusterMonitor.java4
-rw-r--r--container-search/src/main/java/com/yahoo/search/cluster/MonitorConfiguration.java5
-rw-r--r--container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchCluster.java5
-rw-r--r--container-search/src/main/java/com/yahoo/search/grouping/request/GroupingOperation.java7
-rw-r--r--container-search/src/main/java/com/yahoo/search/query/Presentation.java4
-rw-r--r--container-search/src/main/java/com/yahoo/search/searchchain/Execution.java1
-rw-r--r--container-search/src/main/java/com/yahoo/search/searchers/ValidateNearestNeighborSearcher.java44
-rw-r--r--container-search/src/test/java/com/yahoo/search/dispatch/MockSearchCluster.java1
-rw-r--r--container-search/src/test/java/com/yahoo/search/searchers/ValidateNearestNeighborTestCase.java26
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/ServiceRegistry.java3
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/JobType.java15
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/horizon/HorizonClient.java25
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/horizon/HorizonResponse.java36
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/horizon/MockHorizonClient.java48
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/horizon/package-info.java5
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/NodeHistory.java24
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/user/User.java31
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/PathGroup.java9
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/Policy.java6
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/RoleDefinition.java3
-rw-r--r--controller-api/src/test/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/JobTypeTest.java28
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java4
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java4
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java5
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgradeScheduler.java16
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgrader.java19
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/CuratorDb.java2
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/SupportAccessSerializer.java69
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java8
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/horizon/HorizonApiHandler.java121
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/horizon/TsdbQueryRewriter.java112
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/user/UserApiHandler.java23
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/certificate/EndpointCertificatesTest.java3
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ServiceRegistryMock.java8
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentExpirerTest.java2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgradeSchedulerTest.java11
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgraderTest.java12
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java30
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/SupportAccessSerializerTest.java36
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/horizon/TsdbQueryRewriterTest.java59
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/application-roles.json2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/tenant-roles.json2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-athenz.json3
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-cloud.json3
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-without-applications.json3
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-without-trial-capacity-cloud.json3
-rw-r--r--controller-server/src/test/resources/horizon/filter-in-execution-graph.expected.json37
-rw-r--r--controller-server/src/test/resources/horizon/filter-in-execution-graph.expected.operator.json32
-rw-r--r--controller-server/src/test/resources/horizon/filter-in-execution-graph.json21
-rw-r--r--controller-server/src/test/resources/horizon/filters-complex.expected.json56
-rw-r--r--controller-server/src/test/resources/horizon/filters-complex.json46
-rw-r--r--controller-server/src/test/resources/horizon/filters-meta-query.expected.json39
-rw-r--r--controller-server/src/test/resources/horizon/filters-meta-query.json29
-rw-r--r--controller-server/src/test/resources/horizon/no-filters.expected.json32
-rw-r--r--controller-server/src/test/resources/horizon/no-filters.json16
-rw-r--r--default_build_settings.cmake4
-rw-r--r--dist/vespa.spec2
-rw-r--r--document/src/main/java/com/yahoo/document/StructDataType.java2
-rw-r--r--document/src/main/java/com/yahoo/document/StructuredDataType.java2
-rw-r--r--document/src/main/java/com/yahoo/document/TemporaryStructuredDataType.java3
-rwxr-xr-xdocumentapi/src/main/java/com/yahoo/documentapi/messagebus/protocol/ExternPolicy.java2
-rw-r--r--documentgen-test/src/test/java/com/yahoo/vespa/config/DocumentGenPluginTest.java92
-rw-r--r--eval/CMakeLists.txt1
-rw-r--r--eval/src/apps/analyze_onnx_model/.gitignore1
-rw-r--r--eval/src/apps/analyze_onnx_model/CMakeLists.txt8
-rw-r--r--eval/src/apps/analyze_onnx_model/analyze_onnx_model.cpp208
-rw-r--r--eval/src/tests/tensor/onnx_wrapper/onnx_wrapper_test.cpp20
-rw-r--r--eval/src/vespa/eval/onnx/CMakeLists.txt1
-rw-r--r--eval/src/vespa/eval/onnx/onnx_model_cache.cpp51
-rw-r--r--eval/src/vespa/eval/onnx/onnx_model_cache.h58
-rw-r--r--eval/src/vespa/eval/onnx/onnx_wrapper.cpp48
-rw-r--r--eval/src/vespa/eval/onnx/onnx_wrapper.h2
-rw-r--r--filedistribution/src/main/java/com/yahoo/vespa/filedistribution/Downloads.java141
-rw-r--r--filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileDownloader.java67
-rw-r--r--filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReceiver.java26
-rw-r--r--filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReferenceDownloader.java142
-rw-r--r--filedistribution/src/test/java/com/yahoo/vespa/filedistribution/FileDownloaderTest.java47
-rw-r--r--filedistribution/src/test/java/com/yahoo/vespa/filedistribution/FileReceiverTest.java6
-rw-r--r--flags/src/main/java/com/yahoo/vespa/flags/Flags.java54
-rw-r--r--fnet/src/vespa/fnet/connection.cpp8
-rw-r--r--fnet/src/vespa/fnet/connection.h5
-rw-r--r--fnet/src/vespa/fnet/frt/supervisor.cpp4
-rw-r--r--fnet/src/vespa/fnet/frt/supervisor.h1
-rw-r--r--indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/linguistics/LinguisticsAnnotator.java14
-rw-r--r--indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/linguistics/LinguisticsAnnotatorTestCase.java27
-rw-r--r--jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/athenz/AthenzAuthorizationFilter.java79
-rw-r--r--jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/athenz/RequestResourceMapper.java13
-rw-r--r--jdisc-security-filters/src/test/java/com/yahoo/jdisc/http/filter/security/athenz/AthenzAuthorizationFilterTest.java4
-rw-r--r--jrt/src/com/yahoo/jrt/Buffer.java19
-rw-r--r--jrt/src/com/yahoo/jrt/Connection.java40
-rw-r--r--jrt/src/com/yahoo/jrt/Supervisor.java23
-rw-r--r--jrt/src/com/yahoo/jrt/TlsCryptoSocket.java8
-rw-r--r--jrt/src/com/yahoo/jrt/slobrok/server/Slobrok.java2
-rw-r--r--jrt/tests/com/yahoo/jrt/BufferTest.java58
-rw-r--r--linguistics/src/main/java/com/yahoo/language/process/Token.java8
-rw-r--r--linguistics/src/main/java/com/yahoo/language/process/Tokenizer.java11
-rw-r--r--linguistics/src/main/java/com/yahoo/language/simple/SimpleToken.java4
-rw-r--r--messagebus/src/main/java/com/yahoo/messagebus/StaticThrottlePolicy.java2
-rw-r--r--metrics-proxy/src/main/java/ai/vespa/metricsproxy/rpc/RpcConnector.java2
-rw-r--r--metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/ConfigSentinelClient.java2
-rw-r--r--metrics/src/tests/summetrictest.cpp41
-rw-r--r--metrics/src/vespa/metrics/countmetric.h2
-rw-r--r--metrics/src/vespa/metrics/metric.cpp7
-rw-r--r--metrics/src/vespa/metrics/metric.h2
-rw-r--r--metrics/src/vespa/metrics/metricvalueset.h9
-rw-r--r--metrics/src/vespa/metrics/metricvalueset.hpp8
-rw-r--r--metrics/src/vespa/metrics/summetric.h1
-rw-r--r--metrics/src/vespa/metrics/summetric.hpp13
-rw-r--r--model-integration/src/test/java/ai/vespa/modelintegration/evaluator/OnnxEvaluatorTest.java2
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeReports.java13
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeSpec.java2
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImpl.java21
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Applications.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerService.java5
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerServiceMock.java14
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/PassthroughLoadBalancerService.java6
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/SharedLoadBalancerService.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ExpeditedChangeApplicationMaintainer.java (renamed from node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/OperatorChangeApplicationMaintainer.java)30
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostEncrypter.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirer.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporter.java10
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java6
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ProvisionedExpirer.java30
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Agent.java1
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java8
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java25
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java100
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java1
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTest.java10
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ExpeditedChangeApplicationMaintainerTest.java (renamed from node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/OperatorChangeApplicationMaintainerTest.java)10
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporterTest.java24
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ProvisionedExpirerTest.java50
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClientTest.java5
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningCompleteHostCalculatorTest.java14
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java14
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisionTest.java4
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java7
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/maintenance.json6
-rw-r--r--parent/pom.xml2
-rw-r--r--searchcore/src/tests/proton/documentdb/documentdb_test.cpp124
-rw-r--r--searchcore/src/vespa/searchcore/grouping/groupingcontext.cpp10
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/documentdb.cpp14
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/fileconfigmanager.cpp37
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/fileconfigmanager.h44
-rw-r--r--searchlib/abi-spec.json2
-rw-r--r--searchlib/src/main/java/com/yahoo/searchlib/aggregation/Group.java40
-rwxr-xr-xsearchlib/src/main/java/com/yahoo/searchlib/rankingexpression/RankingExpression.java14
-rw-r--r--searchlib/src/test/java/com/yahoo/searchlib/aggregation/GroupingTestCase.java2
-rwxr-xr-xsearchlib/src/test/java/com/yahoo/searchlib/rankingexpression/RankingExpressionTestCase.java18
-rw-r--r--searchlib/src/tests/predicate/predicate_bounds_posting_list_test.cpp4
-rw-r--r--searchlib/src/tests/predicate/predicate_index_test.cpp50
-rw-r--r--searchlib/src/tests/predicate/predicate_interval_posting_list_test.cpp4
-rw-r--r--searchlib/src/tests/predicate/predicate_zero_constraint_posting_list_test.cpp4
-rw-r--r--searchlib/src/tests/predicate/predicate_zstar_compressed_posting_list_test.cpp4
-rw-r--r--searchlib/src/tests/queryeval/predicate/predicate_blueprint_test.cpp56
-rw-r--r--searchlib/src/vespa/searchlib/aggregation/group.h2
-rw-r--r--searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.cpp76
-rw-r--r--searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.h38
-rw-r--r--searchlib/src/vespa/searchlib/attribute/enumstore.h10
-rw-r--r--searchlib/src/vespa/searchlib/attribute/enumstore.hpp16
-rw-r--r--searchlib/src/vespa/searchlib/attribute/i_enum_store.h12
-rw-r--r--searchlib/src/vespa/searchlib/attribute/i_enum_store_dictionary.h20
-rw-r--r--searchlib/src/vespa/searchlib/attribute/postingstore.cpp5
-rw-r--r--searchlib/src/vespa/searchlib/attribute/predicate_attribute.cpp27
-rw-r--r--searchlib/src/vespa/searchlib/attribute/predicate_attribute.h1
-rw-r--r--searchlib/src/vespa/searchlib/attribute/singleenumattribute.hpp6
-rw-r--r--searchlib/src/vespa/searchlib/common/bitvectorcache.cpp4
-rw-r--r--searchlib/src/vespa/searchlib/common/bitvectorcache.h12
-rw-r--r--searchlib/src/vespa/searchlib/common/condensedbitvectors.cpp6
-rw-r--r--searchlib/src/vespa/searchlib/common/condensedbitvectors.h4
-rw-r--r--searchlib/src/vespa/searchlib/common/indexmetainfo.cpp54
-rw-r--r--searchlib/src/vespa/searchlib/features/onnx_feature.cpp15
-rw-r--r--searchlib/src/vespa/searchlib/features/onnx_feature.h8
-rw-r--r--searchlib/src/vespa/searchlib/predicate/document_features_store.cpp60
-rw-r--r--searchlib/src/vespa/searchlib/predicate/document_features_store.h16
-rw-r--r--searchlib/src/vespa/searchlib/predicate/predicate_bounds_posting_list.h6
-rw-r--r--searchlib/src/vespa/searchlib/predicate/predicate_index.cpp50
-rw-r--r--searchlib/src/vespa/searchlib/predicate/predicate_index.h24
-rw-r--r--searchlib/src/vespa/searchlib/predicate/predicate_interval.cpp6
-rw-r--r--searchlib/src/vespa/searchlib/predicate/predicate_interval_posting_list.h11
-rw-r--r--searchlib/src/vespa/searchlib/predicate/predicate_interval_store.cpp15
-rw-r--r--searchlib/src/vespa/searchlib/predicate/predicate_interval_store.h5
-rw-r--r--searchlib/src/vespa/searchlib/predicate/predicate_posting_list.h6
-rw-r--r--searchlib/src/vespa/searchlib/predicate/predicate_zstar_compressed_posting_list.h6
-rw-r--r--searchlib/src/vespa/searchlib/predicate/simple_index.cpp6
-rw-r--r--searchlib/src/vespa/searchlib/predicate/simple_index.h20
-rw-r--r--searchlib/src/vespa/searchlib/predicate/simple_index.hpp71
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/predicate_blueprint.cpp114
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/predicate_blueprint.h31
-rw-r--r--searchlib/src/vespa/searchlib/tensor/hnsw_index.cpp4
-rw-r--r--service-monitor/src/main/java/com/yahoo/vespa/service/slobrok/SlobrokMonitorManagerImpl.java2
-rw-r--r--slobrok/src/tests/registerapi/registerapi.cpp2
-rw-r--r--slobrok/src/vespa/slobrok/sbmirror.cpp1
-rw-r--r--slobrok/src/vespa/slobrok/sbmirror.h1
-rw-r--r--standalone-container/src/main/java/com/yahoo/container/standalone/LocalFileDb.java11
-rw-r--r--storage/src/tests/distributor/CMakeLists.txt4
-rw-r--r--storage/src/tests/distributor/blockingoperationstartertest.cpp85
-rw-r--r--storage/src/tests/distributor/distributor_host_info_reporter_test.cpp68
-rw-r--r--storage/src/tests/distributor/distributor_message_sender_stub.h5
-rw-r--r--storage/src/tests/distributor/distributortestutil.cpp13
-rw-r--r--storage/src/tests/distributor/distributortestutil.h6
-rw-r--r--storage/src/tests/distributor/idealstatemanagertest.cpp76
-rw-r--r--storage/src/tests/distributor/legacy_bucket_db_updater_test.cpp (renamed from storage/src/tests/distributor/bucketdbupdatertest.cpp)186
-rw-r--r--storage/src/tests/distributor/legacy_distributor_test.cpp (renamed from storage/src/tests/distributor/distributortest.cpp)139
-rw-r--r--storage/src/tests/distributor/maintenancemocks.h2
-rw-r--r--storage/src/tests/distributor/mergeoperationtest.cpp28
-rw-r--r--storage/src/tests/distributor/simplemaintenancescannertest.cpp85
-rw-r--r--storage/src/tests/distributor/splitbuckettest.cpp18
-rw-r--r--storage/src/tests/storageserver/mergethrottlertest.cpp2
-rw-r--r--storage/src/vespa/storage/config/distributorconfiguration.h4
-rw-r--r--storage/src/vespa/storage/distributor/CMakeLists.txt4
-rw-r--r--storage/src/vespa/storage/distributor/blockingoperationstarter.cpp2
-rw-r--r--storage/src/vespa/storage/distributor/blockingoperationstarter.h12
-rw-r--r--storage/src/vespa/storage/distributor/bucket_spaces_stats_provider.cpp40
-rw-r--r--storage/src/vespa/storage/distributor/bucket_spaces_stats_provider.h21
-rw-r--r--storage/src/vespa/storage/distributor/distributor.cpp118
-rw-r--r--storage/src/vespa/storage/distributor/distributor.h24
-rw-r--r--storage/src/vespa/storage/distributor/distributor_operation_context.h2
-rw-r--r--storage/src/vespa/storage/distributor/distributor_stripe.cpp7
-rw-r--r--storage/src/vespa/storage/distributor/distributor_stripe.h9
-rw-r--r--storage/src/vespa/storage/distributor/distributor_stripe_component.h3
-rw-r--r--storage/src/vespa/storage/distributor/distributor_stripe_interface.h1
-rw-r--r--storage/src/vespa/storage/distributor/distributor_stripe_operation_context.h3
-rw-r--r--storage/src/vespa/storage/distributor/distributor_total_metrics.cpp54
-rw-r--r--storage/src/vespa/storage/distributor/distributor_total_metrics.h29
-rw-r--r--storage/src/vespa/storage/distributor/distributormessagesender.h1
-rw-r--r--storage/src/vespa/storage/distributor/externaloperationhandler.cpp3
-rw-r--r--storage/src/vespa/storage/distributor/ideal_state_total_metrics.cpp51
-rw-r--r--storage/src/vespa/storage/distributor/ideal_state_total_metrics.h28
-rw-r--r--storage/src/vespa/storage/distributor/idealstatemanager.cpp29
-rw-r--r--storage/src/vespa/storage/distributor/idealstatemanager.h26
-rw-r--r--storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.cpp33
-rw-r--r--storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.h7
-rw-r--r--storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.cpp22
-rw-r--r--storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.h5
-rw-r--r--storage/src/vespa/storage/distributor/min_replica_provider.cpp19
-rw-r--r--storage/src/vespa/storage/distributor/min_replica_provider.h10
-rw-r--r--storage/src/vespa/storage/distributor/operationowner.h4
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.cpp4
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/idealstateoperation.cpp48
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/idealstateoperation.h6
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/joinoperation.cpp8
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/joinoperation.h2
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.cpp8
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.h2
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/splitoperation.cpp4
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/splitoperation.h2
-rw-r--r--storage/src/vespa/storage/distributor/operations/operation.h3
-rw-r--r--storage/src/vespa/storage/storageserver/mergethrottler.cpp2
-rw-r--r--storage/src/vespa/storage/storageserver/mergethrottler.h3
-rw-r--r--storageapi/src/vespa/storageapi/message/bucket.cpp9
-rw-r--r--storageapi/src/vespa/storageapi/message/bucket.h3
-rw-r--r--vespa-athenz/src/main/java/com/yahoo/vespa/athenz/api/AthenzAccessToken.java11
-rw-r--r--vespa-documentgen-plugin/src/main/java/com/yahoo/vespa/DocumentGenMojo.java2
-rw-r--r--vespa-feed-client-cli/CMakeLists.txt2
-rw-r--r--vespa-feed-client-cli/pom.xml37
-rw-r--r--vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/CliClient.java58
-rwxr-xr-xvespa-feed-client-cli/src/main/sh/vespa-feed-client-standalone.sh9
-rwxr-xr-xvespa-feed-client-cli/src/main/sh/vespa-feed-client.sh2
-rw-r--r--vespa-feed-client-cli/src/maven/create-zip.xml24
-rw-r--r--vespa-feed-client/abi-spec.json319
-rw-r--r--vespa-feed-client/pom.xml9
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/ApacheCluster.java (renamed from vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpCluster.java)56
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/BenchmarkingCluster.java102
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/Cluster.java12
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/DocumentId.java2
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClient.java8
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClientBuilder.java118
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedException.java9
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/GracePeriodCircuitBreaker.java33
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpFeedClient.java111
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpRequest.java42
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpRequestStrategy.java100
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpResponse.java16
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/JsonFeeder.java472
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/JsonParseException.java15
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/JsonStreamFeeder.java364
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationParameters.java2
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationStats.java96
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/RequestStrategy.java14
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/Result.java2
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/SslContextBuilder.java30
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/package-info.java9
-rw-r--r--vespa-feed-client/src/test/java/ai/vespa/feed/client/GracePeriodCircuitBreakerTest.java60
-rw-r--r--vespa-feed-client/src/test/java/ai/vespa/feed/client/HttpFeedClientTest.java101
-rw-r--r--vespa-feed-client/src/test/java/ai/vespa/feed/client/HttpRequestStrategyTest.java190
-rw-r--r--vespa-feed-client/src/test/java/ai/vespa/feed/client/JsonFeederTest.java124
-rw-r--r--vespa-feed-client/src/test/java/ai/vespa/feed/client/JsonStreamFeederTest.java69
-rw-r--r--vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/JsonFileFeederExample.java89
-rw-r--r--vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/JsonStreamFeederExample.java117
-rw-r--r--vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/SimpleExample.java34
-rw-r--r--vespa-hadoop/pom.xml11
-rw-r--r--vespa-hadoop/src/main/java/ai/vespa/feed/client/DryrunResult.java18
-rw-r--r--vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/LegacyVespaRecordWriter.java235
-rw-r--r--vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/VespaOutputFormat.java6
-rw-r--r--vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/VespaRecordWriter.java291
-rw-r--r--vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/util/VespaConfiguration.java2
-rw-r--r--vespa-hadoop/src/test/java/com/yahoo/vespa/hadoop/pig/MapReduceTest.java34
-rw-r--r--vespa-hadoop/src/test/java/com/yahoo/vespa/hadoop/pig/VespaDocumentOperationTest.java18
-rw-r--r--vespa-hadoop/src/test/java/com/yahoo/vespa/hadoop/pig/VespaQueryTest.java10
-rw-r--r--vespa-hadoop/src/test/java/com/yahoo/vespa/hadoop/pig/VespaStorageTest.java26
-rw-r--r--vespa-hadoop/src/test/java/com/yahoo/vespa/hadoop/util/TupleToolsTest.java4
-rw-r--r--vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/DocumentV1ApiHandler.java41
-rw-r--r--vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/DocumentV1ApiTest.java115
-rwxr-xr-xvespaclient-java/src/main/sh/vespa-visit.sh13
-rw-r--r--vespalib/src/tests/datastore/datastore/datastore_test.cpp33
-rw-r--r--vespalib/src/vespa/vespalib/datastore/datastorebase.h6
-rw-r--r--vespalib/src/vespa/vespalib/datastore/entryref.h1
-rw-r--r--vespalib/src/vespa/vespalib/net/tls/crypto_codec_adapter.h2
-rw-r--r--vespalib/src/vespa/vespalib/util/exception.h3
471 files changed, 8917 insertions, 3611 deletions
diff --git a/cloud-tenant-base-dependencies-enforcer/pom.xml b/cloud-tenant-base-dependencies-enforcer/pom.xml
index 16045d5dc75..340123ae659 100644
--- a/cloud-tenant-base-dependencies-enforcer/pom.xml
+++ b/cloud-tenant-base-dependencies-enforcer/pom.xml
@@ -34,7 +34,7 @@
<jetty-alpn.version>1.1.3.v20160715</jetty-alpn.version>
<junit5.version>5.7.0</junit5.version>
<junit5.platform.version>1.7.0</junit5.platform.version>
- <onnxruntime.version>1.7.0</onnxruntime.version>
+ <onnxruntime.version>1.8.0</onnxruntime.version>
<org.lz4.version>1.7.1</org.lz4.version>
<org.json.version>20090211</org.json.version><!-- TODO Vespa 8: remove as provided dependency -->
<slf4j.version>1.7.30</slf4j.version>
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/RPCCommunicator.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/RPCCommunicator.java
index bb45de37ce3..ec05ac1ed29 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/RPCCommunicator.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/RPCCommunicator.java
@@ -54,7 +54,7 @@ public class RPCCommunicator implements Communicator {
private final int fleetControllerIndex;
public static Supervisor createRealSupervisor() {
- return new Supervisor(new Transport("rpc-communicator")).useSmallBuffers();
+ return new Supervisor(new Transport("rpc-communicator")).setDropEmptyBuffers(true);
}
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/RpcServer.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/RpcServer.java
index 73597e995d4..ce710a29180 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/RpcServer.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/RpcServer.java
@@ -99,7 +99,7 @@ public class RpcServer {
disconnect();
log.log(Level.FINE, () -> "Fleetcontroller " + fleetControllerIndex + ": Connecting RPC server.");
if (supervisor != null) disconnect();
- supervisor = new Supervisor(new Transport("rpc" + port)).useSmallBuffers();
+ supervisor = new Supervisor(new Transport("rpc" + port)).setDropEmptyBuffers(true);
addMethods();
log.log(Level.FINE, () -> "Fleetcontroller " + fleetControllerIndex + ": Attempting to bind to port " + port);
acceptor = supervisor.listen(new Spec(port));
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/SlobrokClient.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/SlobrokClient.java
index b4e9a760d8e..3fa1b32cada 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/SlobrokClient.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/SlobrokClient.java
@@ -53,7 +53,7 @@ public class SlobrokClient implements NodeLookup {
this.connectionSpecs = slobrokConnectionSpecs;
shutdown();
supervisor = new Supervisor(new Transport("slobrok-client"));
- supervisor.useSmallBuffers();
+ supervisor.setDropEmptyBuffers(true);
SlobrokList slist = new SlobrokList();
slist.setup(slobrokConnectionSpecs);
mirror = new Mirror(supervisor, slist);
diff --git a/config-application-package/src/main/java/com/yahoo/config/application/Xml.java b/config-application-package/src/main/java/com/yahoo/config/application/Xml.java
index c48a41083c7..f2a837026ea 100644
--- a/config-application-package/src/main/java/com/yahoo/config/application/Xml.java
+++ b/config-application-package/src/main/java/com/yahoo/config/application/Xml.java
@@ -1,4 +1,4 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.config.application;
import com.yahoo.config.application.api.ApplicationPackage;
@@ -75,10 +75,6 @@ public class Xml {
return factory.newDocumentBuilder();
}
- static File getServices(File app) {
- return new File(app, "services.xml"); // TODO Do not hard-code
- }
-
static Document copyDocument(Document input) throws TransformerException {
Transformer transformer = TransformerFactory.newInstance().newTransformer();
DOMSource source = new DOMSource(input);
@@ -142,9 +138,7 @@ public class Xml {
List<Element> children = XML.getChildren(parent, name);
List<Element> allFromFiles = allElemsFromPath(app, pathFromAppRoot);
for (Element fromFile : allFromFiles) {
- for (Element inThatFile : XML.getChildren(fromFile, name)) {
- children.add(inThatFile);
- }
+ children.addAll(XML.getChildren(fromFile, name));
}
return children;
}
diff --git a/config-application-package/src/main/java/com/yahoo/config/model/application/provider/MockFileRegistry.java b/config-application-package/src/main/java/com/yahoo/config/model/application/provider/MockFileRegistry.java
index b3d2b061430..5d71376aa5b 100644
--- a/config-application-package/src/main/java/com/yahoo/config/model/application/provider/MockFileRegistry.java
+++ b/config-application-package/src/main/java/com/yahoo/config/model/application/provider/MockFileRegistry.java
@@ -4,7 +4,9 @@ package com.yahoo.config.model.application.provider;
import com.yahoo.config.FileReference;
import com.yahoo.config.application.api.FileRegistry;
import com.yahoo.net.HostName;
+import net.jpountz.xxhash.XXHashFactory;
+import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.List;
@@ -33,4 +35,13 @@ public class MockFileRegistry implements FileRegistry {
throw new IllegalArgumentException("FileReference addUri(String uri) is not implemented for " + getClass().getCanonicalName());
}
+ @Override
+ public FileReference addBlob(ByteBuffer blob) {
+ long blobHash = XXHashFactory.fastestJavaInstance().hash64().hash(blob, 0);
+ String relativePath = Long.toHexString(blobHash) + ".blob";
+ FileReference fileReference = new FileReference(relativePath);
+ entries.add(new Entry(relativePath, fileReference));
+ return fileReference;
+ }
+
}
diff --git a/config-application-package/src/main/java/com/yahoo/config/model/application/provider/PreGeneratedFileRegistry.java b/config-application-package/src/main/java/com/yahoo/config/model/application/provider/PreGeneratedFileRegistry.java
index e779d59ba24..75482ded05d 100644
--- a/config-application-package/src/main/java/com/yahoo/config/model/application/provider/PreGeneratedFileRegistry.java
+++ b/config-application-package/src/main/java/com/yahoo/config/model/application/provider/PreGeneratedFileRegistry.java
@@ -7,6 +7,7 @@ import com.yahoo.config.application.api.FileRegistry;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.Reader;
+import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
@@ -81,6 +82,10 @@ public class PreGeneratedFileRegistry implements FileRegistry {
public FileReference addUri(String uri) {
return new FileReference(path2Hash.get(uri));
}
+ @Override
+ public FileReference addBlob(ByteBuffer blob) {
+ return new FileReference(path2Hash.get(blob));
+ }
@Override
public String fileSourceHost() {
diff --git a/config-application-package/src/test/java/com/yahoo/config/application/IncludeProcessorTest.java b/config-application-package/src/test/java/com/yahoo/config/application/IncludeProcessorTest.java
index 562970c266f..f8484a8e455 100644
--- a/config-application-package/src/test/java/com/yahoo/config/application/IncludeProcessorTest.java
+++ b/config-application-package/src/test/java/com/yahoo/config/application/IncludeProcessorTest.java
@@ -1,15 +1,16 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.config.application;
+import com.yahoo.config.application.api.ApplicationPackage;
import org.junit.Test;
import org.w3c.dom.Document;
import org.xml.sax.SAXException;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.ParserConfigurationException;
-import javax.xml.stream.XMLStreamException;
-import javax.xml.transform.*;
-import java.io.*;
+import javax.xml.transform.TransformerException;
+import java.io.File;
+import java.io.IOException;
import java.nio.file.NoSuchFileException;
/**
@@ -72,7 +73,7 @@ public class IncludeProcessorTest {
" </nodes>\n" +
"</container></services>";
- Document doc = new IncludeProcessor(app).process(docBuilder.parse(Xml.getServices(app)));
+ Document doc = new IncludeProcessor(app).process(docBuilder.parse(getServices(app)));
// System.out.println(Xml.documentAsString(doc));
TestBase.assertDocument(expected, doc);
}
@@ -81,7 +82,11 @@ public class IncludeProcessorTest {
public void testRequiredIncludeIsDefault() throws ParserConfigurationException, IOException, SAXException, TransformerException {
File app = new File("src/test/resources/multienvapp_failrequired");
DocumentBuilder docBuilder = Xml.getPreprocessDocumentBuilder();
- new IncludeProcessor(app).process(docBuilder.parse(Xml.getServices(app)));
+ new IncludeProcessor(app).process(docBuilder.parse(getServices(app)));
+ }
+
+ static File getServices(File app) {
+ return new File(app, ApplicationPackage.SERVICES);
}
}
diff --git a/config-application-package/src/test/java/com/yahoo/config/model/application/provider/PreGeneratedFileRegistryTestCase.java b/config-application-package/src/test/java/com/yahoo/config/model/application/provider/PreGeneratedFileRegistryTestCase.java
index 4b2f5890a4e..7996efaa60e 100644
--- a/config-application-package/src/test/java/com/yahoo/config/model/application/provider/PreGeneratedFileRegistryTestCase.java
+++ b/config-application-package/src/test/java/com/yahoo/config/model/application/provider/PreGeneratedFileRegistryTestCase.java
@@ -1,35 +1,37 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.config.model.application.provider;
-import com.yahoo.config.FileReference;
import com.yahoo.config.application.api.FileRegistry;
import org.junit.Test;
import java.io.StringReader;
-import java.util.List;
+import java.nio.ByteBuffer;
+import java.nio.charset.StandardCharsets;
import java.util.Set;
import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
/**
* @author Tony Vaagenes
*/
public class PreGeneratedFileRegistryTestCase {
+ private static final String BLOB = "Some blob";
@Test
public void importAndExport() {
FileRegistry fileRegistry = new MockFileRegistry();
fileRegistry.addFile("1234");
+ fileRegistry.addBlob(ByteBuffer.wrap(BLOB.getBytes(StandardCharsets.UTF_8)));
String serializedRegistry = PreGeneratedFileRegistry.exportRegistry(fileRegistry);
PreGeneratedFileRegistry importedRegistry = PreGeneratedFileRegistry.importRegistry(new StringReader(serializedRegistry));
- assertEquals(Set.of("1234"), importedRegistry.getPaths());
+ assertEquals(Set.of("1234", "c5674b55c15c9c95.blob"), importedRegistry.getPaths());
- assertEquals(1, importedRegistry.getPaths().size());
+ assertEquals(2, importedRegistry.getPaths().size());
checkConsistentEntry(fileRegistry.export().get(0), importedRegistry);
+ checkConsistentEntry(fileRegistry.export().get(1), importedRegistry);
assertEquals(fileRegistry.fileSourceHost(), importedRegistry.fileSourceHost());
}
diff --git a/config-model-api/abi-spec.json b/config-model-api/abi-spec.json
index 58d2693aace..735778f4d46 100644
--- a/config-model-api/abi-spec.json
+++ b/config-model-api/abi-spec.json
@@ -422,6 +422,7 @@
"methods": [
"public abstract com.yahoo.config.FileReference addFile(java.lang.String)",
"public abstract com.yahoo.config.FileReference addUri(java.lang.String)",
+ "public abstract com.yahoo.config.FileReference addBlob(java.nio.ByteBuffer)",
"public com.yahoo.config.FileReference addApplicationPackage()",
"public abstract java.lang.String fileSourceHost()",
"public abstract java.util.List export()"
@@ -538,7 +539,9 @@
"public static final enum com.yahoo.config.application.api.ValidationId configModelVersionMismatch",
"public static final enum com.yahoo.config.application.api.ValidationId skipOldConfigModels",
"public static final enum com.yahoo.config.application.api.ValidationId accessControl",
- "public static final enum com.yahoo.config.application.api.ValidationId globalEndpointChange"
+ "public static final enum com.yahoo.config.application.api.ValidationId globalEndpointChange",
+ "public static final enum com.yahoo.config.application.api.ValidationId redundancyIncrease",
+ "public static final enum com.yahoo.config.application.api.ValidationId redundancyOne"
]
},
"com.yahoo.config.application.api.ValidationOverrides$Allow": {
diff --git a/config-model-api/src/main/java/com/yahoo/config/application/api/ApplicationPackage.java b/config-model-api/src/main/java/com/yahoo/config/application/api/ApplicationPackage.java
index bc7dbbe2069..2aefc985f4b 100644
--- a/config-model-api/src/main/java/com/yahoo/config/application/api/ApplicationPackage.java
+++ b/config-model-api/src/main/java/com/yahoo/config/application/api/ApplicationPackage.java
@@ -1,4 +1,4 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.config.application.api;
import com.yahoo.component.Version;
@@ -87,14 +87,14 @@ public interface ApplicationPackage {
/**
* Contents of services.xml. Caller must close reader after use.
*
- * @return a Reader, or null if no services.xml/vespa-services.xml present
+ * @return a Reader, or null if no services.xml present
*/
Reader getServices();
/**
* Contents of hosts.xml. Caller must close reader after use.
*
- * @return a Reader, or null if no hosts.xml/vespa-hosts.xml present
+ * @return a Reader, or null if no hosts.xml present
*/
Reader getHosts();
@@ -146,10 +146,11 @@ public interface ApplicationPackage {
/** Returns the major version this application is valid for, or empty if it is valid for all versions */
default Optional<Integer> getMajorVersion() {
- if ( ! getDeployment().isPresent()) return Optional.empty();
+ if (getDeployment().isEmpty()) return Optional.empty();
Element deployElement = XML.getDocument(getDeployment().get()).getDocumentElement();
if (deployElement == null) return Optional.empty();
+
String majorVersionString = deployElement.getAttribute("major-version");
if (majorVersionString == null || majorVersionString.isEmpty())
return Optional.empty();
@@ -181,7 +182,6 @@ public interface ApplicationPackage {
/** Returns handle for the file containing client certificate authorities */
default ApplicationFile getClientSecurityFile() { return getFile(SECURITY_DIR.append("clients.pem")); }
- //For generating error messages
String getHostSource();
String getServicesSource();
diff --git a/config-model-api/src/main/java/com/yahoo/config/application/api/FileRegistry.java b/config-model-api/src/main/java/com/yahoo/config/application/api/FileRegistry.java
index 8415781b827..9d049ae0847 100644
--- a/config-model-api/src/main/java/com/yahoo/config/application/api/FileRegistry.java
+++ b/config-model-api/src/main/java/com/yahoo/config/application/api/FileRegistry.java
@@ -1,6 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.config.application.api;
+import java.nio.ByteBuffer;
import java.util.List;
import com.yahoo.config.FileReference;
@@ -13,6 +14,7 @@ public interface FileRegistry {
FileReference addFile(String relativePath);
FileReference addUri(String uri);
+ FileReference addBlob(ByteBuffer blob);
default FileReference addApplicationPackage() { return addFile(""); }
/**
diff --git a/config-model-api/src/main/java/com/yahoo/config/application/api/ValidationId.java b/config-model-api/src/main/java/com/yahoo/config/application/api/ValidationId.java
index 4c76d42a17e..7aa6788b86d 100644
--- a/config-model-api/src/main/java/com/yahoo/config/application/api/ValidationId.java
+++ b/config-model-api/src/main/java/com/yahoo/config/application/api/ValidationId.java
@@ -23,7 +23,9 @@ public enum ValidationId {
configModelVersionMismatch("config-model-version-mismatch"), // Internal use
skipOldConfigModels("skip-old-config-models"), // Internal use
accessControl("access-control"), // Internal use, used in zones where there should be no access-control
- globalEndpointChange("global-endpoint-change"); // Changing global endpoints
+ globalEndpointChange("global-endpoint-change"), // Changing global endpoints
+ redundancyIncrease("redundancy-increase"), // Increasing redundancy - may easily cause feed blocked
+ redundancyOne("redundancy-one"); // redundancy=1 requires a validation override on first deployment
private final String id;
diff --git a/config-model-api/src/main/java/com/yahoo/config/application/api/ValidationOverrides.java b/config-model-api/src/main/java/com/yahoo/config/application/api/ValidationOverrides.java
index 8845431c71b..3221df38d4f 100644
--- a/config-model-api/src/main/java/com/yahoo/config/application/api/ValidationOverrides.java
+++ b/config-model-api/src/main/java/com/yahoo/config/application/api/ValidationOverrides.java
@@ -1,4 +1,4 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.config.application.api;
import com.google.common.collect.ImmutableList;
@@ -67,7 +67,7 @@ public class ValidationOverrides {
public boolean allows(String validationIdString, Instant now) {
Optional<ValidationId> validationId = ValidationId.from(validationIdString);
- if ( ! validationId.isPresent()) return false; // unknown id -> not allowed
+ if (validationId.isEmpty()) return false; // unknown id -> not allowed
return allows(validationId.get(), now);
}
@@ -125,8 +125,8 @@ public class ValidationOverrides {
.atStartOfDay().atZone(ZoneOffset.UTC).toInstant()
.plus(Duration.ofDays(1)); // Make the override valid *on* the "until" date
Optional<ValidationId> validationId = ValidationId.from(XML.getValue(allow));
- if (validationId.isPresent()) // skip unknown ids as they may be valid for other model versions
- overrides.add(new ValidationOverrides.Allow(validationId.get(), until));
+ // skip unknown ids as they may be valid for other model versions
+ validationId.ifPresent(id -> overrides.add(new Allow(id, until)));
}
return new ValidationOverrides(overrides, xmlForm);
}
@@ -177,6 +177,7 @@ public class ValidationOverrides {
}
+ // TODO: Remove this class after June 2021
public static class AllowAllValidationOverrides extends ValidationOverrides {
private final DeployLogger logger;
diff --git a/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java b/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java
index 34a880302ad..81ee0a4c4c3 100644
--- a/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java
+++ b/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java
@@ -81,11 +81,14 @@ public interface ModelContext {
@ModelFeatureFlag(owners = {"baldersheim"}) default boolean useAsyncMessageHandlingOnSchedule() { throw new UnsupportedOperationException("TODO specify default value"); }
@ModelFeatureFlag(owners = {"baldersheim"}) default double feedConcurrency() { throw new UnsupportedOperationException("TODO specify default value"); }
@ModelFeatureFlag(owners = {"baldersheim"}) default boolean useBucketExecutorForPruneRemoved() { return true; }
+ @ModelFeatureFlag(owners = {"baldersheim"}) default int largeRankExpressionLimit() { return 0x10000; }
@ModelFeatureFlag(owners = {"baldersheim"}) default boolean useExternalRankExpressions() { return false; }
@ModelFeatureFlag(owners = {"baldersheim"}) default boolean distributeExternalRankExpressions() { return false; }
+ @ModelFeatureFlag(owners = {"baldersheim"}) default int maxConcurrentMergesPerNode() { throw new UnsupportedOperationException("TODO specify default value"); }
+ @ModelFeatureFlag(owners = {"baldersheim"}) default int maxMergeQueueSize() { throw new UnsupportedOperationException("TODO specify default value"); }
@ModelFeatureFlag(owners = {"geirst"}) default boolean enableFeedBlockInDistributor() { return true; }
@ModelFeatureFlag(owners = {"hmusum"}, removeAfter = "7.406") default int clusterControllerMaxHeapSizeInMb() { return 128; }
- @ModelFeatureFlag(owners = {"hmusum"}) default int metricsProxyMaxHeapSizeInMb(ClusterSpec.Type type) { return 256; }
+ @ModelFeatureFlag(owners = {"hmusum"}, removeAfter = "7.422") default int metricsProxyMaxHeapSizeInMb(ClusterSpec.Type type) { return 256; }
@ModelFeatureFlag(owners = {"bjorncs", "tokle"}) default List<String> allowedAthenzProxyIdentities() { return List.of(); }
@ModelFeatureFlag(owners = {"tokle"}) default boolean tenantIamRole() { return false; }
@ModelFeatureFlag(owners = {"vekterli"}) default int maxActivationInhibitedOutOfSyncGroups() { return 0; }
@@ -94,6 +97,7 @@ public interface ModelContext {
@ModelFeatureFlag(owners = {"tokle", "bjorncs"}) default boolean enableCustomAclMapping() { return false; }
@ModelFeatureFlag(owners = {"geirst", "vekterli"}) default int numDistributorStripes() { return 0; }
@ModelFeatureFlag(owners = {"arnej"}) default boolean requireConnectivityCheck() { return false; }
+ @ModelFeatureFlag(owners = {"hmusum"}) default boolean throwIfResourceLimitsSpecified() { return false; }
}
/** Warning: As elsewhere in this package, do not make backwards incompatible changes that will break old config models! */
diff --git a/config-model-api/src/test/java/com/yahoo/config/application/api/ValidationOverrideTest.java b/config-model-api/src/test/java/com/yahoo/config/application/api/ValidationOverrideTest.java
index 2943b0bab34..f7ef059c5f2 100644
--- a/config-model-api/src/test/java/com/yahoo/config/application/api/ValidationOverrideTest.java
+++ b/config-model-api/src/test/java/com/yahoo/config/application/api/ValidationOverrideTest.java
@@ -4,16 +4,10 @@ package com.yahoo.config.application.api;
import com.yahoo.test.ManualClock;
import org.junit.Assert;
import org.junit.Test;
-import org.xml.sax.SAXException;
-
-import java.io.IOException;
import java.io.StringReader;
-import java.time.Clock;
import java.time.Instant;
-import java.util.Optional;
import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
/**
* @author bratseth
@@ -82,15 +76,6 @@ public class ValidationOverrideTest {
assertEquals(empty.xmlForm(), emptyReserialized.xmlForm());
}
- @Test
- public void testAll() {
- ValidationOverrides all = ValidationOverrides.all;
- assertTrue(all.allows(ValidationId.deploymentRemoval, Clock.systemUTC().instant()));
- assertTrue(all.allows(ValidationId.contentClusterRemoval, Clock.systemUTC().instant()));
- assertTrue(all.allows(ValidationId.indexModeChange, Clock.systemUTC().instant()));
- assertTrue(all.allows(ValidationId.fieldTypeChange, Clock.systemUTC().instant()));
- }
-
private void assertOverridden(String validationId, ValidationOverrides overrides, Instant now) {
overrides.invalid(ValidationId.from(validationId).get(), "message", now); // should not throw exception
}
diff --git a/config-model/src/main/java/com/yahoo/config/model/deploy/DeployState.java b/config-model/src/main/java/com/yahoo/config/model/deploy/DeployState.java
index 68924dde3e1..dd66861f2ce 100644
--- a/config-model/src/main/java/com/yahoo/config/model/deploy/DeployState.java
+++ b/config-model/src/main/java/com/yahoo/config/model/deploy/DeployState.java
@@ -142,12 +142,8 @@ public class DeployState implements ConfigDefinitionStore {
this.semanticRules = semanticRules; // TODO: Remove this by seeing how pagetemplates are propagated
this.importedModels = importMlModels(applicationPackage, modelImporters, deployLogger);
- ValidationOverrides suppliedValidationOverrides = applicationPackage.getValidationOverrides().map(ValidationOverrides::fromXml)
- .orElse(ValidationOverrides.empty);
- this.validationOverrides =
- zone.environment().isManuallyDeployed() // // Warn but allow in manually deployed zones
- ? new ValidationOverrides.AllowAllValidationOverrides(suppliedValidationOverrides, deployLogger)
- : suppliedValidationOverrides;
+ this.validationOverrides = applicationPackage.getValidationOverrides().map(ValidationOverrides::fromXml)
+ .orElse(ValidationOverrides.empty);
this.wantedNodeVespaVersion = wantedNodeVespaVersion;
this.now = now;
diff --git a/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java b/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java
index 304855e545d..fe1bf93f32b 100644
--- a/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java
+++ b/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java
@@ -42,6 +42,7 @@ public class TestProperties implements ModelContext.Properties, ModelContext.Fea
private double defaultTermwiseLimit = 1.0;
private String jvmGCOptions = null;
private String sequencerType = "LATENCY";
+ private boolean firstTimeDeployment = false;
private String responseSequencerType = "ADAPTIVE";
private int responseNumThreads = 2;
private Optional<EndpointCertificateSecrets> endpointCertificateSecrets = Optional.empty();
@@ -54,11 +55,13 @@ public class TestProperties implements ModelContext.Properties, ModelContext.Fea
private boolean enableFeedBlockInDistributor = true;
private boolean useExternalRankExpression = false;
private int clusterControllerMaxHeapSizeInMb = 128;
- private int metricsProxyMaxHeapSizeInMb = 256;
private int maxActivationInhibitedOutOfSyncGroups = 0;
private List<TenantSecretStore> tenantSecretStores = Collections.emptyList();
private String jvmOmitStackTraceInFastThrowOption;
private int numDistributorStripes = 0;
+ private int maxConcurrentMergesPerNode = 16;
+ private int maxMergeQueueSize = 1024;
+ private int largeRankExpressionLimit = 0x10000;
private boolean allowDisableMtls = true;
private List<X509Certificate> operatorCertificates = Collections.emptyList();
@@ -75,7 +78,7 @@ public class TestProperties implements ModelContext.Properties, ModelContext.Fea
@Override public String jvmGCOptions(Optional<ClusterSpec.Type> clusterType) { return jvmGCOptions; }
@Override public String feedSequencerType() { return sequencerType; }
@Override public boolean isBootstrap() { return false; }
- @Override public boolean isFirstTimeDeployment() { return false; }
+ @Override public boolean isFirstTimeDeployment() { return firstTimeDeployment; }
@Override public boolean useDedicatedNodeForLogserver() { return useDedicatedNodeForLogserver; }
@Override public Optional<EndpointCertificateSecrets> endpointCertificateSecrets() { return endpointCertificateSecrets; }
@Override public double defaultTermwiseLimit() { return defaultTermwiseLimit; }
@@ -93,7 +96,6 @@ public class TestProperties implements ModelContext.Properties, ModelContext.Fea
@Override public double feedConcurrency() { return feedConcurrency; }
@Override public boolean enableFeedBlockInDistributor() { return enableFeedBlockInDistributor; }
@Override public int clusterControllerMaxHeapSizeInMb() { return clusterControllerMaxHeapSizeInMb; }
- @Override public int metricsProxyMaxHeapSizeInMb(ClusterSpec.Type type) { return metricsProxyMaxHeapSizeInMb; }
@Override public int maxActivationInhibitedOutOfSyncGroups() { return maxActivationInhibitedOutOfSyncGroups; }
@Override public List<TenantSecretStore> tenantSecretStores() { return tenantSecretStores; }
@Override public String jvmOmitStackTraceInFastThrowOption(ClusterSpec.Type type) { return jvmOmitStackTraceInFastThrowOption; }
@@ -102,11 +104,18 @@ public class TestProperties implements ModelContext.Properties, ModelContext.Fea
@Override public List<X509Certificate> operatorCertificates() { return operatorCertificates; }
@Override public boolean useExternalRankExpressions() { return useExternalRankExpression; }
@Override public boolean distributeExternalRankExpressions() { return useExternalRankExpression; }
+ @Override public int largeRankExpressionLimit() { return largeRankExpressionLimit; }
+ @Override public int maxConcurrentMergesPerNode() { return maxConcurrentMergesPerNode; }
+ @Override public int maxMergeQueueSize() { return maxMergeQueueSize; }
public TestProperties useExternalRankExpression(boolean value) {
useExternalRankExpression = value;
return this;
}
+ public TestProperties largeRankExpressionLimit(int value) {
+ largeRankExpressionLimit = value;
+ return this;
+ }
public TestProperties setFeedConcurrency(double feedConcurrency) {
this.feedConcurrency = feedConcurrency;
return this;
@@ -129,11 +138,24 @@ public class TestProperties implements ModelContext.Properties, ModelContext.Fea
responseSequencerType = type;
return this;
}
+ public TestProperties setFirstTimeDeployment(boolean firstTimeDeployment) {
+ this.firstTimeDeployment = firstTimeDeployment;
+ return this;
+ }
public TestProperties setResponseNumThreads(int numThreads) {
responseNumThreads = numThreads;
return this;
}
+ public TestProperties setMaxConcurrentMergesPerNode(int maxConcurrentMergesPerNode) {
+ this.maxConcurrentMergesPerNode = maxConcurrentMergesPerNode;
+ return this;
+ }
+ public TestProperties setMaxMergeQueueSize(int maxMergeQueueSize) {
+ this.maxMergeQueueSize = maxMergeQueueSize;
+ return this;
+ }
+
public TestProperties setDefaultTermwiseLimit(double limit) {
defaultTermwiseLimit = limit;
return this;
@@ -209,11 +231,6 @@ public class TestProperties implements ModelContext.Properties, ModelContext.Fea
return this;
}
- public TestProperties metricsProxyMaxHeapSizeInMb(int heapSize) {
- metricsProxyMaxHeapSizeInMb = heapSize;
- return this;
- }
-
public TestProperties maxActivationInhibitedOutOfSyncGroups(int nGroups) {
maxActivationInhibitedOutOfSyncGroups = nGroups;
return this;
diff --git a/config-model/src/main/java/com/yahoo/documentmodel/NewDocumentType.java b/config-model/src/main/java/com/yahoo/documentmodel/NewDocumentType.java
index 38d831a0b28..da338ad3107 100644
--- a/config-model/src/main/java/com/yahoo/documentmodel/NewDocumentType.java
+++ b/config-model/src/main/java/com/yahoo/documentmodel/NewDocumentType.java
@@ -3,7 +3,6 @@ package com.yahoo.documentmodel;
import com.yahoo.document.DataType;
import com.yahoo.document.Document;
-import com.yahoo.document.DocumentId;
import com.yahoo.document.Field;
import com.yahoo.document.StructDataType;
import com.yahoo.document.StructuredDataType;
@@ -32,34 +31,6 @@ import static java.util.Collections.emptySet;
*/
public final class NewDocumentType extends StructuredDataType implements DataTypeCollection {
- public static final class Name {
-
- private final String name;
- private final int id;
-
- public Name(String name) {
- this(name.hashCode(), name);
- }
-
- public Name(int id, String name) {
- this.id = id;
- this.name = name;
- }
-
- public String toString() { return name; }
-
- public final String getName() { return name; }
-
- public final int getId() { return id; }
-
- public int hashCode() { return name.hashCode(); }
-
- public boolean equals(Object other) {
- if ( ! (other instanceof Name)) return false;
- return name.equals(((Name)other).getName());
- }
- }
-
private final Name name;
private final DataTypeRepo dataTypes = new DataTypeRepo();
private final Map<Integer, NewDocumentType> inherits = new LinkedHashMap<>();
@@ -139,7 +110,7 @@ public final class NewDocumentType extends StructuredDataType implements DataTyp
}
@Override
- public Class getValueClass() {
+ public Class<Document> getValueClass() {
return Document.class;
}
@@ -148,7 +119,8 @@ public final class NewDocumentType extends StructuredDataType implements DataTyp
if (!(value instanceof Document)) {
return false;
}
- /** Temporary disabled due to clash with document and covariant return type
+ /*
+ Temporary disabled due to clash with document and covariant return type
Document doc = (Document) value;
if (((NewDocumentType) doc.getDataType()).inherits(this)) {
//the value is of this type; or the supertype of the value is of this type, etc....
@@ -162,28 +134,31 @@ public final class NewDocumentType extends StructuredDataType implements DataTyp
for (Field f : getFields()) {
Field inhF = inherited.getField(f.getName());
if (inhF != null && !inhF.equals(f)) {
- throw new IllegalArgumentException("Inherited document '" + inherited.toString() + "' already contains field '" +
- inhF.getName() + "'. Can not override with '" + f.getName() + "'.");
+ throw new IllegalArgumentException("Inherited document '" + inherited + "' already contains field '" +
+ inhF.getName() + "'. Can not override with '" + f.getName() + "'.");
}
}
for (Field f : inherited.getAllFields()) {
for (NewDocumentType side : inherits.values()) {
Field sideF = side.getField(f.getName());
if (sideF != null && !sideF.equals(f)) {
- throw new IllegalArgumentException("Inherited document '" + side.toString() + "' already contains field '" +
- sideF.getName() + "'. Document '" + inherited.toString() + "' also defines field '" + f.getName() +
- "'.Multiple inheritance must be disjunctive.");
+ throw new IllegalArgumentException("Inherited document '" + side + "' already contains field '" +
+ sideF.getName() + "'. Document '" + inherited +
+ "' also defines field '" + f.getName() +
+ "'.Multiple inheritance must be disjunctive.");
}
}
}
return true;
}
+
public void inherit(NewDocumentType inherited) {
if ( ! inherits.containsKey(inherited.getId())) {
verifyInheritance(inherited);
inherits.put(inherited.getId(), inherited);
}
}
+
public boolean inherits(NewDocumentType superType) {
if (getId() == superType.getId()) return true;
for (NewDocumentType type : inherits.values()) {
@@ -243,7 +218,7 @@ public final class NewDocumentType extends StructuredDataType implements DataTyp
@Override
public Document createFieldValue() {
- return new Document(null, (DocumentId)null);
+ throw new RuntimeException("Cannot create an instance of " + this);
}
@Override
@@ -375,4 +350,36 @@ public final class NewDocumentType extends StructuredDataType implements DataTyp
return importedFieldNames;
}
+ public static final class Name {
+
+ private final String name;
+ private final int id;
+
+ public Name(String name) {
+ this(name.hashCode(), name);
+ }
+
+ public Name(int id, String name) {
+ this.id = id;
+ this.name = name;
+ }
+
+ @Override
+ public String toString() { return name; }
+
+ public final String getName() { return name; }
+
+ public final int getId() { return id; }
+
+ @Override
+ public int hashCode() { return name.hashCode(); }
+
+ @Override
+ public boolean equals(Object other) {
+ if ( ! (other instanceof Name)) return false;
+ return name.equals(((Name)other).getName());
+ }
+
+ }
+
}
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/DistributableResource.java b/config-model/src/main/java/com/yahoo/searchdefinition/DistributableResource.java
index 77ce2dd41b5..ffa9cbe9ba5 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/DistributableResource.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/DistributableResource.java
@@ -5,15 +5,17 @@ import com.yahoo.path.Path;
import com.yahoo.vespa.model.AbstractService;
import com.yahoo.vespa.model.utils.FileSender;
+import java.nio.ByteBuffer;
import java.util.Collection;
import java.util.Objects;
public class DistributableResource {
- public enum PathType { FILE, URI };
+ public enum PathType { FILE, URI, BLOB };
/** The search definition-unique name of this constant */
private final String name;
- private String path = null;
+ private final ByteBuffer blob;
+ private String path;
private String fileReference = "";
private PathType pathType = PathType.FILE;
@@ -22,11 +24,20 @@ public class DistributableResource {
}
public DistributableResource(String name) {
- this(name, null);
+ this.name = name;
+ blob = null;
}
public DistributableResource(String name, String path) {
this.name = name;
this.path = path;
+ blob = null;
+ }
+ public DistributableResource(String name, ByteBuffer blob) {
+ Objects.requireNonNull(name, "Blob name cannot be null");
+ Objects.requireNonNull(blob, "Blob cannot be null");
+ this.name = name;
+ this.blob = blob;
+ pathType = PathType.BLOB;
}
public void setFileName(String fileName) {
@@ -41,16 +52,24 @@ public class DistributableResource {
this.pathType = PathType.URI;
}
- protected void setFileReference(String fileReference) { this.fileReference = fileReference; }
/** Initiate sending of this constant to some services over file distribution */
public void sendTo(Collection<? extends AbstractService> services) {
- FileReference reference = (pathType == PathType.FILE)
- ? FileSender.sendFileToServices(path, services)
- : FileSender.sendUriToServices(path, services);
- this.fileReference = reference.value();
+ fileReference = sendToServices(services).value();
+ }
+ private FileReference sendToServices(Collection<? extends AbstractService> services) {
+ switch (pathType) {
+ case FILE:
+ return FileSender.sendFileToServices(path, services);
+ case URI:
+ return FileSender.sendUriToServices(path, services);
+ case BLOB:
+ return FileSender.sendBlobToServices(blob, services);
+ }
+ throw new IllegalArgumentException("Unknown path type " + pathType);
}
public String getName() { return name; }
+ public ByteBuffer getBlob() { return blob; }
public String getFileName() { return path; }
public Path getFilePath() { return Path.fromString(path); }
public String getUri() { return path; }
@@ -63,10 +82,8 @@ public class DistributableResource {
public String toString() {
StringBuilder b = new StringBuilder();
- b.append("resource '").append(name)
- .append(pathType == PathType.FILE ? "' from file '" : " from uri ").append(path)
- .append("' with ref '").append(fileReference)
- .append("'");
+ b.append("resource '").append(name).append(" of type '").append(pathType)
+ .append("' with ref '").append(fileReference).append("'");
return b.toString();
}
}
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/DocumentModelBuilder.java b/config-model/src/main/java/com/yahoo/searchdefinition/DocumentModelBuilder.java
index fed35382b21..9b752c4179f 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/DocumentModelBuilder.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/DocumentModelBuilder.java
@@ -209,17 +209,13 @@ public class DocumentModelBuilder {
private static DataType resolveTemporariesRecurse(DataType type, DataTypeCollection repo,
Collection<NewDocumentType> docs) {
if (type instanceof TemporaryStructuredDataType) {
- NewDocumentType docType = getDocumentType(docs, type.getId());
- if (docType != null) {
- type = docType;
- return type;
- }
- DataType real = repo.getDataType(type.getId());
- if (real == null) {
- throw new NullPointerException("Can not find type '" + type.toString() + "', impossible.");
- }
- type = real;
- } else if (type instanceof StructDataType) {
+ DataType struct = repo.getDataType(type.getId());
+ if (struct != null)
+ type = struct;
+ else
+ type = getDocumentType(docs, type.getId());
+ }
+ else if (type instanceof StructDataType) {
StructDataType dt = (StructDataType) type;
for (com.yahoo.document.Field field : dt.getFields()) {
if (field.getDataType() != type) {
@@ -227,14 +223,17 @@ public class DocumentModelBuilder {
field.setDataType(resolveTemporariesRecurse(field.getDataType(), repo, docs));
}
}
- } else if (type instanceof MapDataType) {
+ }
+ else if (type instanceof MapDataType) {
MapDataType t = (MapDataType) type;
t.setKeyType(resolveTemporariesRecurse(t.getKeyType(), repo, docs));
t.setValueType(resolveTemporariesRecurse(t.getValueType(), repo, docs));
- } else if (type instanceof CollectionDataType) {
+ }
+ else if (type instanceof CollectionDataType) {
CollectionDataType t = (CollectionDataType) type;
t.setNestedType(resolveTemporariesRecurse(t.getNestedType(), repo, docs));
- } else if (type instanceof ReferenceDataType) {
+ }
+ else if (type instanceof ReferenceDataType) {
ReferenceDataType t = (ReferenceDataType) type;
if (t.getTargetType() instanceof TemporaryStructuredDataType) {
DataType targetType = resolveTemporariesRecurse(t.getTargetType(), repo, docs);
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/ImmutableSearch.java b/config-model/src/main/java/com/yahoo/searchdefinition/ImmutableSearch.java
index 6b40289e17d..24bc081aded 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/ImmutableSearch.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/ImmutableSearch.java
@@ -32,7 +32,7 @@ public interface ImmutableSearch {
DeployLogger getDeployLogger();
ModelContext.Properties getDeployProperties();
RankingConstants rankingConstants();
- RankExpressionFiles rankExpressionFiles();
+ LargeRankExpressions rankExpressionFiles();
OnnxModels onnxModels();
Stream<ImmutableSDField> allImportedFields();
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/LargeRankExpressions.java b/config-model/src/main/java/com/yahoo/searchdefinition/LargeRankExpressions.java
new file mode 100644
index 00000000000..6fadcb39d11
--- /dev/null
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/LargeRankExpressions.java
@@ -0,0 +1,38 @@
+package com.yahoo.searchdefinition;
+
+import com.yahoo.vespa.model.AbstractService;
+
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+public class LargeRankExpressions {
+ private final Map<String, RankExpressionBody> expressions = new HashMap<>();
+
+ public void add(RankExpressionBody expression) {
+ expression.validate();
+ String name = expression.getName();
+ if (expressions.containsKey(name)) {
+ throw new IllegalArgumentException("Rank expression '" + name +
+ "' defined twice. Previous blob with " + expressions.get(name).getBlob().remaining() +
+ " bytes, while current has " + expression.getBlob().remaining() + " bytes");
+ }
+ expressions.put(name, expression);
+ }
+
+ /** Returns the ranking constant with the given name, or null if not present */
+ public RankExpressionBody get(String name) {
+ return expressions.get(name);
+ }
+
+ /** Returns a read-only map of the ranking constants in this indexed by name */
+ public Map<String, RankExpressionBody> asMap() {
+ return Collections.unmodifiableMap(expressions);
+ }
+
+ /** Initiate sending of these constants to some services over file distribution */
+ public void sendTo(Collection<? extends AbstractService> services) {
+ expressions.values().forEach(constant -> constant.sendTo(services));
+ }
+}
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/RankExpressionBody.java b/config-model/src/main/java/com/yahoo/searchdefinition/RankExpressionBody.java
new file mode 100644
index 00000000000..8c6830de815
--- /dev/null
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/RankExpressionBody.java
@@ -0,0 +1,10 @@
+package com.yahoo.searchdefinition;
+
+import java.nio.ByteBuffer;
+
+public class RankExpressionBody extends DistributableResource {
+
+ public RankExpressionBody(String name, ByteBuffer body) {
+ super(name, body);
+ }
+}
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/RankExpressionFile.java b/config-model/src/main/java/com/yahoo/searchdefinition/RankExpressionFile.java
deleted file mode 100644
index 56385efeb0b..00000000000
--- a/config-model/src/main/java/com/yahoo/searchdefinition/RankExpressionFile.java
+++ /dev/null
@@ -1,35 +0,0 @@
-package com.yahoo.searchdefinition;
-
-import com.yahoo.config.application.api.ApplicationPackage;
-import com.yahoo.vespa.model.AbstractService;
-import com.yahoo.vespa.model.utils.FileSender;
-
-import java.util.Collection;
-
-public class RankExpressionFile extends DistributableResource {
-
- public RankExpressionFile(String name, String path) {
- super(name, path);
- validate();
- }
-
- @Override
- public void sendTo(Collection<? extends AbstractService> services) {
- /*
- * TODO This is a very dirty hack due to using both SEARCH_DEFINITIONS_DIR and SCHEMA_DIR
- * and doing so inconsistently, combined with using both fields from application package on disk and in zookeeper.
- * The mess is spread out nicely, but ZookeeperClient, and writeSearchDefinitions and ZkApplicationPackage and FilesApplicationPackage
- * should be consolidated
- */
- try {
- setFileReference(FileSender.sendFileToServices(ApplicationPackage.SCHEMAS_DIR + "/" + getFileName(), services).value());
- } catch (IllegalArgumentException e1) {
- try {
- setFileReference(FileSender.sendFileToServices(ApplicationPackage.SEARCH_DEFINITIONS_DIR + "/" + getFileName(), services).value());
- } catch (IllegalArgumentException e2) {
- throw new IllegalArgumentException("Failed to find expression file '" + getFileName() + "' in '"
- + ApplicationPackage.SEARCH_DEFINITIONS_DIR + "' or '" + ApplicationPackage.SCHEMAS_DIR + "'.", e2);
- }
- }
- }
-}
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/RankExpressionFiles.java b/config-model/src/main/java/com/yahoo/searchdefinition/RankExpressionFiles.java
deleted file mode 100644
index 34ad912dd00..00000000000
--- a/config-model/src/main/java/com/yahoo/searchdefinition/RankExpressionFiles.java
+++ /dev/null
@@ -1,47 +0,0 @@
-package com.yahoo.searchdefinition;
-
-import com.yahoo.config.application.api.DeployLogger;
-import com.yahoo.vespa.model.AbstractService;
-
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.logging.Level;
-
-public class RankExpressionFiles {
- private final Map<String, RankExpressionFile> expressions = new HashMap<>();
-
- //TODO Deploy logger should not be necessary, as redefinition is illegal, but legacy prevents enforcement starting now.
- public void add(RankExpressionFile expression, DeployLogger deployLogger) {
- expression.validate();
- String name = expression.getName();
- if (expressions.containsKey(name)) {
- if ( expressions.get(name).getFileName().equals(expression.getFileName()) ) {
- //TODO Throw instead, No later than Vespa 8
- deployLogger.logApplicationPackage(Level.WARNING, "Rank expression file '" + name +
- "' defined twice with identical expression (illegal and will be enforced soon) '" + expression.getFileName() + "'.");
- } else {
- throw new IllegalArgumentException("Rank expression file '" + name +
- "' defined twice (illegal but not enforced), but redefinition is not matching (illegal and enforced), " +
- "previous = '" + expressions.get(name).getFileName() + "', new = '" + expression.getFileName() + "'.");
- }
- }
- expressions.put(name, expression);
- }
-
- /** Returns the ranking constant with the given name, or null if not present */
- public RankExpressionFile get(String name) {
- return expressions.get(name);
- }
-
- /** Returns a read-only map of the ranking constants in this indexed by name */
- public Map<String, RankExpressionFile> asMap() {
- return Collections.unmodifiableMap(expressions);
- }
-
- /** Initiate sending of these constants to some services over file distribution */
- public void sendTo(Collection<? extends AbstractService> services) {
- expressions.values().forEach(constant -> constant.sendTo(services));
- }
-}
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/Search.java b/config-model/src/main/java/com/yahoo/searchdefinition/Search.java
index f11afef0eb2..9ce1b8bb330 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/Search.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/Search.java
@@ -83,7 +83,7 @@ public class Search implements ImmutableSearch {
private final Map<String, DocumentSummary> summaries = new LinkedHashMap<>();
/** External rank expression files of this */
- private final RankExpressionFiles rankExpressionFiles = new RankExpressionFiles();
+ private final LargeRankExpressions largeRankExpressions = new LargeRankExpressions();
/** Ranking constants of this */
private final RankingConstants rankingConstants = new RankingConstants();
@@ -98,7 +98,7 @@ public class Search implements ImmutableSearch {
private final DeployLogger deployLogger;
private final ModelContext.Properties properties;
- /** Testin only */
+ /** Testing only */
public Search(String name) {
this(name, null, new BaseDeployLogger(), new TestProperties());
}
@@ -188,7 +188,7 @@ public class Search implements ImmutableSearch {
}
@Override
- public RankExpressionFiles rankExpressionFiles() { return rankExpressionFiles; }
+ public LargeRankExpressions rankExpressionFiles() { return largeRankExpressions; }
@Override
public RankingConstants rankingConstants() { return rankingConstants; }
@@ -198,7 +198,7 @@ public class Search implements ImmutableSearch {
public void sendTo(Collection<? extends AbstractService> services) {
rankingConstants.sendTo(services);
- rankExpressionFiles.sendTo(services);
+ largeRankExpressions.sendTo(services);
onnxModels.sendTo(services);
}
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/derived/RankProfileList.java b/config-model/src/main/java/com/yahoo/searchdefinition/derived/RankProfileList.java
index d414b9ed79f..7c533cce006 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/derived/RankProfileList.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/derived/RankProfileList.java
@@ -6,8 +6,7 @@ import com.yahoo.config.model.api.ModelContext;
import com.yahoo.search.query.profile.QueryProfileRegistry;
import com.yahoo.searchdefinition.OnnxModel;
import com.yahoo.searchdefinition.OnnxModels;
-import com.yahoo.searchdefinition.RankExpressionFile;
-import com.yahoo.searchdefinition.RankExpressionFiles;
+import com.yahoo.searchdefinition.LargeRankExpressions;
import com.yahoo.searchdefinition.RankProfileRegistry;
import com.yahoo.searchdefinition.RankingConstant;
import com.yahoo.searchdefinition.RankingConstants;
@@ -34,14 +33,14 @@ public class RankProfileList extends Derived implements RankProfilesConfig.Produ
private final Map<String, RawRankProfile> rankProfiles = new java.util.LinkedHashMap<>();
private final RankingConstants rankingConstants;
- private final RankExpressionFiles rankExpressionFiles;
+ private final LargeRankExpressions largeRankExpressions;
private final OnnxModels onnxModels;
public static RankProfileList empty = new RankProfileList();
private RankProfileList() {
rankingConstants = new RankingConstants();
- rankExpressionFiles = new RankExpressionFiles();
+ largeRankExpressions = new LargeRankExpressions();
onnxModels = new OnnxModels();
}
@@ -53,7 +52,7 @@ public class RankProfileList extends Derived implements RankProfilesConfig.Produ
*/
public RankProfileList(Search search,
RankingConstants rankingConstants,
- RankExpressionFiles rankExpressionFiles,
+ LargeRankExpressions largeRankExpressions,
AttributeFields attributeFields,
RankProfileRegistry rankProfileRegistry,
QueryProfileRegistry queryProfiles,
@@ -61,7 +60,7 @@ public class RankProfileList extends Derived implements RankProfilesConfig.Produ
ModelContext.Properties deployProperties) {
setName(search == null ? "default" : search.getName());
this.rankingConstants = rankingConstants;
- this.rankExpressionFiles = rankExpressionFiles;
+ this.largeRankExpressions = largeRankExpressions;
onnxModels = search == null ? new OnnxModels() : search.onnxModels(); // as ONNX models come from parsing rank expressions
deriveRankProfiles(rankProfileRegistry, queryProfiles, importedModels, search, attributeFields, deployProperties);
}
@@ -74,7 +73,8 @@ public class RankProfileList extends Derived implements RankProfilesConfig.Produ
ModelContext.Properties deployProperties) {
if (search != null) { // profiles belonging to a search have a default profile
RawRankProfile defaultProfile = new RawRankProfile(rankProfileRegistry.get(search, "default"),
- queryProfiles, importedModels, attributeFields, deployProperties);
+ largeRankExpressions, queryProfiles, importedModels,
+ attributeFields, deployProperties);
rankProfiles.put(defaultProfile.getName(), defaultProfile);
}
@@ -84,7 +84,8 @@ public class RankProfileList extends Derived implements RankProfilesConfig.Produ
this.onnxModels.add(rank.onnxModels());
}
- RawRankProfile rawRank = new RawRankProfile(rank, queryProfiles, importedModels, attributeFields, deployProperties);
+ RawRankProfile rawRank = new RawRankProfile(rank, largeRankExpressions, queryProfiles, importedModels,
+ attributeFields, deployProperties);
rankProfiles.put(rawRank.getName(), rawRank);
}
}
@@ -100,7 +101,7 @@ public class RankProfileList extends Derived implements RankProfilesConfig.Produ
public void sendTo(Collection<? extends AbstractService> services) {
rankingConstants.sendTo(services);
- rankExpressionFiles.sendTo(services);
+ largeRankExpressions.sendTo(services);
onnxModels.sendTo(services);
}
@@ -115,7 +116,7 @@ public class RankProfileList extends Derived implements RankProfilesConfig.Produ
}
public void getConfig(RankingExpressionsConfig.Builder builder) {
- rankExpressionFiles.asMap().values().forEach((expr) -> builder.expression.add(new RankingExpressionsConfig.Expression.Builder().name(expr.getName()).fileref(expr.getFileReference())));
+ largeRankExpressions.asMap().values().forEach((expr) -> builder.expression.add(new RankingExpressionsConfig.Expression.Builder().name(expr.getName()).fileref(expr.getFileReference())));
}
public void getConfig(RankingConstantsConfig.Builder builder) {
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/derived/RawRankProfile.java b/config-model/src/main/java/com/yahoo/searchdefinition/derived/RawRankProfile.java
index 6b589a22de5..97d695cead9 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/derived/RawRankProfile.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/derived/RawRankProfile.java
@@ -9,6 +9,8 @@ import com.yahoo.config.model.api.ModelContext;
import com.yahoo.config.model.deploy.TestProperties;
import com.yahoo.search.query.profile.QueryProfileRegistry;
import com.yahoo.searchdefinition.OnnxModel;
+import com.yahoo.searchdefinition.LargeRankExpressions;
+import com.yahoo.searchdefinition.RankExpressionBody;
import com.yahoo.searchdefinition.document.RankType;
import com.yahoo.searchdefinition.RankProfile;
import com.yahoo.searchdefinition.expressiontransforms.OnnxModelTransformer;
@@ -20,6 +22,7 @@ import com.yahoo.searchlib.rankingexpression.rule.SerializationContext;
import com.yahoo.tensor.TensorType;
import com.yahoo.vespa.config.search.RankProfilesConfig;
+import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.HashSet;
@@ -27,6 +30,7 @@ import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.LinkedHashSet;
import java.util.List;
+import java.util.ListIterator;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
@@ -55,19 +59,20 @@ public class RawRankProfile implements RankProfilesConfig.Producer {
/**
* Creates a raw rank profile from the given rank profile
*/
- public RawRankProfile(RankProfile rankProfile, QueryProfileRegistry queryProfiles, ImportedMlModels importedModels,
+ public RawRankProfile(RankProfile rankProfile, LargeRankExpressions largeExpressions,
+ QueryProfileRegistry queryProfiles, ImportedMlModels importedModels,
AttributeFields attributeFields, ModelContext.Properties deployProperties) {
this.name = rankProfile.getName();
compressedProperties = compress(new Deriver(rankProfile.compile(queryProfiles, importedModels),
- attributeFields, deployProperties).derive());
+ attributeFields, deployProperties).derive(largeExpressions));
}
/**
* Only for testing
*/
- public RawRankProfile(RankProfile rankProfile, QueryProfileRegistry queryProfiles,
- ImportedMlModels importedModels, AttributeFields attributeFields) {
- this(rankProfile, queryProfiles, importedModels, attributeFields, new TestProperties());
+ public RawRankProfile(RankProfile rankProfile, QueryProfileRegistry queryProfiles, ImportedMlModels importedModels,
+ AttributeFields attributeFields) {
+ this(rankProfile, new LargeRankExpressions(), queryProfiles, importedModels, attributeFields, new TestProperties());
}
private Compressor.Compression compress(List<Pair<String, String>> properties) {
@@ -142,6 +147,9 @@ public class RawRankProfile implements RankProfilesConfig.Producer {
private final int numSearchPartitions;
private final double termwiseLimit;
private final double rankScoreDropLimit;
+ private final int largeRankExpressionLimit;
+ private final boolean distributeLargeRankExpressions;
+ private final boolean useDistributedRankExpressions;
/**
* The rank type definitions used to derive settings for the native rank features
@@ -150,6 +158,7 @@ public class RawRankProfile implements RankProfilesConfig.Producer {
private final Map<String, String> attributeTypes;
private final Map<String, String> queryFeatureTypes;
private final Set<String> filterFields = new java.util.LinkedHashSet<>();
+ private final String rankprofileName;
private RankingExpression firstPhaseRanking;
private RankingExpression secondPhaseRanking;
@@ -159,6 +168,7 @@ public class RawRankProfile implements RankProfilesConfig.Producer {
*/
Deriver(RankProfile compiled, AttributeFields attributeFields, ModelContext.Properties deployProperties)
{
+ rankprofileName = compiled.getName();
attributeTypes = compiled.getAttributeTypes();
queryFeatureTypes = compiled.getQueryFeatureTypes();
firstPhaseRanking = compiled.getFirstPhaseRanking();
@@ -174,6 +184,9 @@ public class RawRankProfile implements RankProfilesConfig.Producer {
keepRankCount = compiled.getKeepRankCount();
rankScoreDropLimit = compiled.getRankScoreDropLimit();
ignoreDefaultRankFeatures = compiled.getIgnoreDefaultRankFeatures();
+ largeRankExpressionLimit = deployProperties.featureFlags().largeRankExpressionLimit();
+ distributeLargeRankExpressions = deployProperties.featureFlags().distributeExternalRankExpressions();
+ useDistributedRankExpressions = deployProperties.featureFlags().useExternalRankExpressions();
rankProperties = new ArrayList<>(compiled.getRankProperties());
Map<String, RankProfile.RankingExpressionFunction> functions = compiled.getFunctions();
@@ -319,10 +332,10 @@ public class RawRankProfile implements RankProfilesConfig.Producer {
}
/** Derives the properties this produces */
- public List<Pair<String, String>> derive() {
+ public List<Pair<String, String>> derive(LargeRankExpressions largeRankExpressions) {
List<Pair<String, String>> properties = new ArrayList<>();
for (RankProfile.RankProperty property : rankProperties) {
- if (("rankingExpression(" + RankProfile.FIRST_PHASE + ").rankingScript").equals(property.getName())) {
+ if (RankingExpression.propertyName(RankProfile.FIRST_PHASE).equals(property.getName())) {
// Could have been set by function expansion. Set expressions, then skip this property.
try {
firstPhaseRanking = new RankingExpression(property.getValue());
@@ -330,7 +343,7 @@ public class RawRankProfile implements RankProfilesConfig.Producer {
throw new IllegalArgumentException("Could not parse first phase expression", e);
}
}
- else if (("rankingExpression(" + RankProfile.SECOND_PHASE + ").rankingScript").equals(property.getName())) {
+ else if (RankingExpression.propertyName(RankProfile.SECOND_PHASE).equals(property.getName())) {
try {
secondPhaseRanking = new RankingExpression(property.getValue());
} catch (ParseException e) {
@@ -419,7 +432,7 @@ public class RawRankProfile implements RankProfilesConfig.Producer {
properties.add(new Pair<>("vespa.rank." + phase, expression.getRoot().toString()));
} else {
properties.add(new Pair<>("vespa.rank." + phase, "rankingExpression(" + name + ")"));
- properties.add(new Pair<>("rankingExpression(" + name + ").rankingScript", expression.getRoot().toString()));
+ properties.add(new Pair<>(RankingExpression.propertyName(name), expression.getRoot().toString()));
}
return properties;
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/AbstractService.java b/config-model/src/main/java/com/yahoo/vespa/model/AbstractService.java
index 8dea1b65079..ea0452a6c49 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/AbstractService.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/AbstractService.java
@@ -8,6 +8,7 @@ import com.yahoo.config.model.api.ServiceInfo;
import com.yahoo.config.model.producer.AbstractConfigProducer;
import com.yahoo.vespa.defaults.Defaults;
+import java.nio.ByteBuffer;
import java.util.Collection;
import java.util.LinkedHashMap;
import java.util.LinkedHashSet;
@@ -470,6 +471,10 @@ public abstract class AbstractService extends AbstractConfigProducer<AbstractCon
return getRoot().getFileDistributor().sendUriToHost(uri, getHost().getHost());
}
+ public FileReference sendBlob(ByteBuffer blob) {
+ return getRoot().getFileDistributor().sendBlobToHost(blob, getHost().getHost());
+ }
+
/** The service HTTP port for health status */
public int getHealthPort() { return -1;}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/ConfigSentinel.java b/config-model/src/main/java/com/yahoo/vespa/model/ConfigSentinel.java
index 800bf73cdbb..6203f78fc0c 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/ConfigSentinel.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/ConfigSentinel.java
@@ -86,11 +86,11 @@ public class ConfigSentinel extends AbstractService implements SentinelConfig.Pr
private SentinelConfig.Connectivity.Builder getConnectivityConfig(boolean enable) {
var builder = new SentinelConfig.Connectivity.Builder();
if (enable) {
- builder.maxBadOutPercent(60);
- builder.maxBadReverseCount(3);
+ builder.minOkPercent(50);
+ builder.maxBadCount(2);
} else {
- builder.maxBadOutPercent(100);
- builder.maxBadReverseCount(Integer.MAX_VALUE);
+ builder.minOkPercent(0);
+ builder.maxBadCount(Integer.MAX_VALUE);
}
return builder;
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/VespaModel.java b/config-model/src/main/java/com/yahoo/vespa/model/VespaModel.java
index ab00e9d295f..d20247b79fc 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/VespaModel.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/VespaModel.java
@@ -33,7 +33,7 @@ import com.yahoo.container.QrConfig;
import com.yahoo.path.Path;
import com.yahoo.searchdefinition.OnnxModel;
import com.yahoo.searchdefinition.OnnxModels;
-import com.yahoo.searchdefinition.RankExpressionFiles;
+import com.yahoo.searchdefinition.LargeRankExpressions;
import com.yahoo.searchdefinition.RankProfile;
import com.yahoo.searchdefinition.RankProfileRegistry;
import com.yahoo.searchdefinition.RankingConstants;
@@ -131,7 +131,7 @@ public final class VespaModel extends AbstractConfigProducerRoot implements Seri
private final RankingConstants rankingConstants = new RankingConstants();
/** External rank expression files of this */
- private final RankExpressionFiles rankExpressionFiles = new RankExpressionFiles();
+ private final LargeRankExpressions largeRankExpressions = new LargeRankExpressions();
/** The validation overrides of this. This is never null. */
private final ValidationOverrides validationOverrides;
@@ -187,7 +187,7 @@ public final class VespaModel extends AbstractConfigProducerRoot implements Seri
deployState.rankProfileRegistry(), deployState.getQueryProfiles());
rankProfileList = new RankProfileList(null, // null search -> global
rankingConstants,
- rankExpressionFiles,
+ largeRankExpressions,
AttributeFields.empty,
deployState.rankProfileRegistry(),
deployState.getQueryProfiles().getRegistry(),
@@ -266,7 +266,7 @@ public final class VespaModel extends AbstractConfigProducerRoot implements Seri
/** Returns the global ranking constants of this */
public RankingConstants rankingConstants() { return rankingConstants; }
- public RankExpressionFiles rankExpressionFiles() { return rankExpressionFiles; }
+ public LargeRankExpressions rankExpressionFiles() { return largeRankExpressions; }
/** Creates a mutable model with no services instantiated */
public static VespaModel createIncomplete(DeployState deployState) throws IOException, SAXException {
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/VespaModelFactory.java b/config-model/src/main/java/com/yahoo/vespa/model/VespaModelFactory.java
index d6673cd49e9..b576d1cb5d2 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/VespaModelFactory.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/VespaModelFactory.java
@@ -6,6 +6,7 @@ import com.google.inject.Inject;
import com.yahoo.component.Version;
import com.yahoo.component.provider.ComponentRegistry;
import com.yahoo.config.application.api.ApplicationPackage;
+import com.yahoo.config.application.api.ValidationOverrides;
import com.yahoo.config.model.ConfigModelRegistry;
import com.yahoo.config.model.MapConfigModelRegistry;
import com.yahoo.config.model.NullConfigModelRegistry;
@@ -23,6 +24,7 @@ import com.yahoo.config.provision.TransientException;
import com.yahoo.config.provision.Zone;
import com.yahoo.vespa.config.VespaVersion;
import com.yahoo.vespa.model.application.validation.Validation;
+import com.yahoo.yolean.Exceptions;
import org.xml.sax.SAXException;
import java.io.IOException;
@@ -31,6 +33,7 @@ import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
+import java.util.logging.Level;
import java.util.logging.Logger;
/**
@@ -170,6 +173,13 @@ public class VespaModelFactory implements ModelFactory {
private List<ConfigChangeAction> validateModel(VespaModel model, DeployState deployState, ValidationParameters validationParameters) {
try {
return Validation.validate(model, validationParameters, deployState);
+ } catch (ValidationOverrides.ValidationException e) {
+ if (deployState.isHosted() && zone.environment().isManuallyDeployed())
+ deployState.getDeployLogger().logApplicationPackage(Level.WARNING,
+ "Auto-overriding validation which would be disallowed in production: " +
+ Exceptions.toMessageString(e));
+ else
+ rethrowUnlessIgnoreErrors(e, validationParameters.ignoreValidationErrors());
} catch (IllegalArgumentException | TransientException e) {
rethrowUnlessIgnoreErrors(e, validationParameters.ignoreValidationErrors());
} catch (Exception e) {
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainer.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainer.java
index 234892c5cc3..9dec27e17fe 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainer.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainer.java
@@ -45,16 +45,13 @@ public class MetricsProxyContainer extends Container implements
final boolean isHostedVespa;
private final Optional<ClusterMembership> clusterMembership;
- private final ModelContext.FeatureFlags featureFlags;
private final MetricsProxyContainerCluster cluster;
private final String jvmGCOptions;
-
public MetricsProxyContainer(MetricsProxyContainerCluster cluster, HostResource host, int index, DeployState deployState) {
super(cluster, host.getHostname(), index, deployState);
this.isHostedVespa = deployState.isHosted();
this.clusterMembership = host.spec().membership();
- this.featureFlags = deployState.featureFlags();
this.cluster = cluster;
this.jvmGCOptions = deployState.getProperties().jvmGCOptions(clusterMembership.map(membership -> membership.cluster().type()));
setProp("clustertype", "admin");
@@ -157,7 +154,9 @@ public class MetricsProxyContainer extends Container implements
cluster.getConfig(builder);
if (clusterMembership.isPresent()) {
- int maxHeapSize = featureFlags.metricsProxyMaxHeapSizeInMb(clusterMembership.get().cluster().type());
+ int maxHeapSize = clusterMembership.get().cluster().type() == ClusterSpec.Type.admin
+ ? 128
+ : 256;
builder.jvm
.gcopts(jvmGCOptions)
.heapsize(maxHeapSize);
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java
index c5aba8388b4..114a3e380ef 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java
@@ -765,6 +765,15 @@ public class VespaMetricSet {
metrics.add(new Metric("vds.bouncer.clock_skew_aborts.count"));
+ metrics.add(new Metric("vds.mergethrottler.averagequeuewaitingtime.max"));
+ metrics.add(new Metric("vds.mergethrottler.averagequeuewaitingtime.sum"));
+ metrics.add(new Metric("vds.mergethrottler.averagequeuewaitingtime.count"));
+ metrics.add(new Metric("vds.mergethrottler.queuesize.max"));
+ metrics.add(new Metric("vds.mergethrottler.queuesize.sum"));
+ metrics.add(new Metric("vds.mergethrottler.queuesize.count"));
+ metrics.add(new Metric("vds.mergethrottler.bounced_due_to_back_pressure.rate"));
+ metrics.add(new Metric("vds.mergethrottler.locallyexecutedmerges.ok.rate"));
+ metrics.add(new Metric("vds.mergethrottler.mergechains.ok.rate"));
return metrics;
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/RankSetupValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/RankSetupValidator.java
index d87c6596fa4..52dccbe96b5 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/RankSetupValidator.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/RankSetupValidator.java
@@ -8,7 +8,7 @@ import com.yahoo.log.InvalidLogFormatException;
import com.yahoo.log.LogMessage;
import com.yahoo.path.Path;
import com.yahoo.searchdefinition.OnnxModel;
-import com.yahoo.searchdefinition.RankExpressionFile;
+import com.yahoo.searchdefinition.RankExpressionBody;
import com.yahoo.vespa.config.search.core.RankingExpressionsConfig;
import com.yahoo.vespa.defaults.Defaults;
import com.yahoo.yolean.Exceptions;
@@ -165,7 +165,7 @@ public class RankSetupValidator extends Validator {
config.add(String.format("file[%d].path \"%s\"", config.size() / 2, modelPath));
}
- for (RankExpressionFile expr : db.getDerivedConfiguration().getSearch().rankExpressionFiles().asMap().values()) {
+ for (RankExpressionBody expr : db.getDerivedConfiguration().getSearch().rankExpressionFiles().asMap().values()) {
String modelPath = getFileRepositoryPath(expr.getFilePath(), expr.getFileReference());
config.add(String.format("file[%d].ref \"%s\"", config.size() / 2, expr.getFileReference()));
config.add(String.format("file[%d].path \"%s\"", config.size() / 2, modelPath));
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/Validation.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/Validation.java
index 55443d4b260..84c7a48a998 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/Validation.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/Validation.java
@@ -20,10 +20,12 @@ import com.yahoo.vespa.model.application.validation.change.GlobalDocumentChangeV
import com.yahoo.vespa.model.application.validation.change.IndexedSearchClusterChangeValidator;
import com.yahoo.vespa.model.application.validation.change.IndexingModeChangeValidator;
import com.yahoo.vespa.model.application.validation.change.NodeResourceChangeValidator;
+import com.yahoo.vespa.model.application.validation.change.RedundancyIncreaseValidator;
import com.yahoo.vespa.model.application.validation.change.ResourcesReductionValidator;
import com.yahoo.vespa.model.application.validation.change.StartupCommandChangeValidator;
import com.yahoo.vespa.model.application.validation.change.StreamingSearchClusterChangeValidator;
import com.yahoo.vespa.model.application.validation.first.AccessControlOnFirstDeploymentValidator;
+import com.yahoo.vespa.model.application.validation.first.RedundancyOnFirstDeploymentValidator;
import java.time.Instant;
import java.util.Arrays;
@@ -53,6 +55,7 @@ public class Validation {
* between the previous and current model
*
* @return a list of required changes needed to make this configuration live
+ * @throws ValidationOverrides.ValidationException if the change fails validation
*/
public static List<ConfigChangeAction> validate(VespaModel model, ValidationParameters validationParameters, DeployState deployState) {
if (validationParameters.checkRouting()) {
@@ -105,7 +108,8 @@ public class Validation {
new ClusterSizeReductionValidator(),
new ResourcesReductionValidator(),
new ContainerRestartValidator(),
- new NodeResourceChangeValidator()
+ new NodeResourceChangeValidator(),
+ new RedundancyIncreaseValidator()
};
List<ConfigChangeAction> actions = Arrays.stream(validators)
.flatMap(v -> v.validate(currentModel, nextModel, overrides, now).stream())
@@ -122,6 +126,7 @@ public class Validation {
private static void validateFirstTimeDeployment(VespaModel model, DeployState deployState) {
new AccessControlOnFirstDeploymentValidator().validate(model, deployState);
+ new RedundancyOnFirstDeploymentValidator().validate(model, deployState);
}
private static void deferConfigChangesForClustersToBeRestarted(List<ConfigChangeAction> actions, VespaModel model) {
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/Validator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/Validator.java
index f3bebbe7fb9..fee63828670 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/Validator.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/Validator.java
@@ -15,7 +15,7 @@ public abstract class Validator {
* Validates the input vespamodel
*
* @param model a VespaModel object
- * @param deployState The {@link DeployState} built from building the model
+ * @param deployState the {@link DeployState} built from building the model
*/
public abstract void validate(VespaModel model, DeployState deployState);
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/ChangeValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/ChangeValidator.java
index b720cc13f42..4222d22563d 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/ChangeValidator.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/ChangeValidator.java
@@ -25,6 +25,7 @@ public interface ChangeValidator {
* @param now the instant to use as now
* @return a list of actions specifying what needs to be done in order to activate the new model.
* Return an empty list if nothing needs to be done
+ * @throws IllegalArgumentException if the change fails validation
*/
List<ConfigChangeAction> validate(VespaModel current, VespaModel next, ValidationOverrides overrides, Instant now);
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/RedundancyIncreaseValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/RedundancyIncreaseValidator.java
new file mode 100644
index 00000000000..dcf16222d35
--- /dev/null
+++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/RedundancyIncreaseValidator.java
@@ -0,0 +1,45 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.model.application.validation.change;
+
+import com.yahoo.config.application.api.ValidationId;
+import com.yahoo.config.application.api.ValidationOverrides;
+import com.yahoo.config.model.api.ConfigChangeAction;
+import com.yahoo.config.provision.Capacity;
+import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.vespa.model.VespaModel;
+import com.yahoo.vespa.model.content.cluster.ContentCluster;
+
+import java.time.Instant;
+import java.util.List;
+
+/**
+ * Checks that redundancy is not increased (without a validation override),
+ * as that may easily cause the cluster to run out of reasources.
+ *
+ * @author bratseth
+ */
+public class RedundancyIncreaseValidator implements ChangeValidator {
+
+ @Override
+ public List<ConfigChangeAction> validate(VespaModel current, VespaModel next, ValidationOverrides overrides, Instant now) {
+ for (ContentCluster currentCluster : current.getContentClusters().values()) {
+ ContentCluster nextCluster = next.getContentClusters().get(currentCluster.getSubId());
+ if (nextCluster == null) continue;
+ if (redundancyOf(nextCluster) > redundancyOf(currentCluster)) {
+ overrides.invalid(ValidationId.redundancyIncrease,
+ "Increasing redundancy from " + redundancyOf(currentCluster) + " to " +
+ redundancyOf(nextCluster) + " in '" + currentCluster + ". " +
+ "This is a safe operation but verify that you have room for a " +
+ redundancyOf(nextCluster) + "/" + redundancyOf(currentCluster) + "x increase " +
+ "in content size",
+ now);
+ }
+ }
+ return List.of();
+ }
+
+ private int redundancyOf(ContentCluster cluster) {
+ return cluster.redundancy().finalRedundancy();
+ }
+
+}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/first/RedundancyOnFirstDeploymentValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/first/RedundancyOnFirstDeploymentValidator.java
new file mode 100644
index 00000000000..e6117299269
--- /dev/null
+++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/first/RedundancyOnFirstDeploymentValidator.java
@@ -0,0 +1,44 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.model.application.validation.first;
+
+import com.yahoo.config.application.api.ValidationId;
+import com.yahoo.config.model.ConfigModelContext.ApplicationType;
+import com.yahoo.config.model.deploy.DeployState;
+import com.yahoo.vespa.model.VespaModel;
+import com.yahoo.vespa.model.application.validation.Validator;
+import com.yahoo.vespa.model.container.ApplicationContainerCluster;
+import com.yahoo.vespa.model.container.Container;
+import com.yahoo.vespa.model.container.ContainerCluster;
+import com.yahoo.vespa.model.content.cluster.ContentCluster;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static com.yahoo.collections.CollectionUtil.mkString;
+import static com.yahoo.config.provision.InstanceName.defaultName;
+import static com.yahoo.vespa.model.container.http.AccessControl.hasHandlerThatNeedsProtection;
+
+/**
+ * Validates that applications in prod zones do not have redundancy 1 (without a validation override).
+ *
+ * @author bratseth
+ */
+public class RedundancyOnFirstDeploymentValidator extends Validator {
+
+ @Override
+ public void validate(VespaModel model, DeployState deployState) {
+ if ( ! deployState.isHosted()) return;
+ if ( ! deployState.zone().environment().isProduction()) return;
+
+ for (ContentCluster cluster : model.getContentClusters().values()) {
+ if (cluster.redundancy().finalRedundancy() == 1
+ && cluster.redundancy().totalNodes() > cluster.redundancy().groups())
+ deployState.validationOverrides().invalid(ValidationId.redundancyOne,
+ cluster + " has redundancy 1, which will cause it to lose data " +
+ "if a node fails. This requires an override on first deployment " +
+ "in a production zone",
+ deployState.now());
+ }
+ }
+
+}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/ClusterResourceLimits.java b/config-model/src/main/java/com/yahoo/vespa/model/content/ClusterResourceLimits.java
index 70f2acd3c7b..638864d85bb 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/ClusterResourceLimits.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/ClusterResourceLimits.java
@@ -37,13 +37,16 @@ public class ClusterResourceLimits {
private final boolean enableFeedBlockInDistributor;
private final boolean hostedVespa;
+ private final boolean throwIfSpecified;
private final DeployLogger deployLogger;
+
private ResourceLimits.Builder ctrlBuilder = new ResourceLimits.Builder();
private ResourceLimits.Builder nodeBuilder = new ResourceLimits.Builder();
- public Builder(boolean enableFeedBlockInDistributor, boolean hostedVespa, DeployLogger deployLogger) {
+ public Builder(boolean enableFeedBlockInDistributor, boolean hostedVespa, boolean throwIfSpecified, DeployLogger deployLogger) {
this.enableFeedBlockInDistributor = enableFeedBlockInDistributor;
this.hostedVespa = hostedVespa;
+ this.throwIfSpecified = throwIfSpecified;
this.deployLogger = deployLogger;
}
@@ -58,7 +61,7 @@ public class ClusterResourceLimits {
private ResourceLimits.Builder createBuilder(ModelElement element) {
return element == null
? new ResourceLimits.Builder()
- : DomResourceLimitsBuilder.createBuilder(element, hostedVespa, deployLogger);
+ : DomResourceLimitsBuilder.createBuilder(element, hostedVespa, throwIfSpecified, deployLogger);
}
public void setClusterControllerBuilder(ResourceLimits.Builder builder) {
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/Content.java b/config-model/src/main/java/com/yahoo/vespa/model/content/Content.java
index ea52f9689ff..4a8002ba3dc 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/Content.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/Content.java
@@ -111,13 +111,15 @@ public class Content extends ConfigModel {
return null;
}
- private static void checkThatExplicitIndexingChainInheritsCorrectly(ComponentRegistry<DocprocChain> allChains, ChainSpecification chainSpec) {
+ private static void checkThatExplicitIndexingChainInheritsCorrectly(ComponentRegistry<DocprocChain> allChains,
+ ChainSpecification chainSpec) {
ChainSpecification.Inheritance inheritance = chainSpec.inheritance;
for (ComponentSpecification componentSpec : inheritance.chainSpecifications) {
ChainSpecification parentSpec = getChainSpec(allChains, componentSpec);
if (containsIndexingChain(allChains, parentSpec)) return;
}
- throw new IllegalArgumentException("Docproc chain '" + chainSpec.componentId + "' does not inherit from 'indexing' chain.");
+ throw new IllegalArgumentException("Docproc chain '" + chainSpec.componentId +
+ "' must inherit from the 'indexing' chain");
}
public static List<Content> getContent(ConfigModelRepo pc) {
@@ -261,9 +263,17 @@ public class Content extends ConfigModel {
if (cluster.hasExplicitIndexingChain()) {
indexingChain = allChains.getComponent(cluster.getIndexingChainName());
if (indexingChain == null) {
- throw new RuntimeException("Indexing cluster " + cluster.getClusterName() + " refers to docproc " +
- "chain " + cluster.getIndexingChainName() + " for indexing, which does not exist.");
- } else {
+ throw new IllegalArgumentException(cluster + " refers to docproc " +
+ "chain '" + cluster.getIndexingChainName() +
+ "' for indexing, but this chain does not exist");
+ }
+ else if (indexingChain.getId().getName().equals("default")) {
+ throw new IllegalArgumentException(cluster + " specifies the chain " +
+ "'default' as indexing chain. As the 'default' chain is run by default, " +
+ "using it as the indexing chain will run it twice. " +
+ "Use a different name for the indexing chain.");
+ }
+ else {
checkThatExplicitIndexingChainInheritsCorrectly(allChains, indexingChain.getChainSpecification());
}
} else {
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java
index 51949e78838..efb47e97ccb 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java
@@ -316,12 +316,8 @@ public class ContentSearchCluster extends AbstractConfigProducer<SearchCluster>
}
public void handleRedundancy(Redundancy redundancy) {
- if (hasIndexedCluster()) {
- if (usesHierarchicDistribution()) {
- indexedCluster.setMaxNodesDownPerFixedRow((redundancy.effectiveFinalRedundancy() / groupToSpecMap.size()) - 1);
- }
+ if (hasIndexedCluster())
indexedCluster.setSearchableCopies(redundancy.readyCopies());
- }
this.redundancy = redundancy;
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/DispatchTuning.java b/config-model/src/main/java/com/yahoo/vespa/model/content/DispatchTuning.java
index 3b694f8986c..786d032578f 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/DispatchTuning.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/DispatchTuning.java
@@ -15,7 +15,6 @@ public class DispatchTuning {
private final Integer maxHitsPerPartition;
private DispatchPolicy dispatchPolicy;
- private final Double minGroupCoverage;
private final Double minActiveDocsCoverage;
public Double getTopkProbability() {
@@ -27,7 +26,6 @@ public class DispatchTuning {
private DispatchTuning(Builder builder) {
maxHitsPerPartition = builder.maxHitsPerPartition;
dispatchPolicy = builder.dispatchPolicy;
- minGroupCoverage = builder.minGroupCoverage;
minActiveDocsCoverage = builder.minActiveDocsCoverage;
topkProbability = builder.topKProbability;
}
@@ -41,9 +39,6 @@ public class DispatchTuning {
@SuppressWarnings("unused")
public void setDispatchPolicy(DispatchPolicy dispatchPolicy) { this.dispatchPolicy = dispatchPolicy; }
- /** Returns the percentage of nodes in a group which must be up for that group to receive queries */
- public Double getMinGroupCoverage() { return minGroupCoverage; }
-
/** Returns the percentage of documents which must be available in a group for that group to receive queries */
public Double getMinActiveDocsCoverage() { return minActiveDocsCoverage; }
@@ -51,7 +46,6 @@ public class DispatchTuning {
private Integer maxHitsPerPartition;
private DispatchPolicy dispatchPolicy;
- private Double minGroupCoverage;
private Double minActiveDocsCoverage;
private Double topKProbability;
@@ -81,11 +75,6 @@ public class DispatchTuning {
}
}
- public Builder setMinGroupCoverage(Double minGroupCoverage) {
- this.minGroupCoverage = minGroupCoverage;
- return this;
- }
-
public Builder setMinActiveDocsCoverage(Double minCoverage) {
this.minActiveDocsCoverage = minCoverage;
return this;
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java
index e0d311e6df6..c298b7f5f5a 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java
@@ -123,6 +123,7 @@ public class ContentCluster extends AbstractConfigProducer<AbstractConfigProduce
boolean enableFeedBlockInDistributor = deployState.getProperties().featureFlags().enableFeedBlockInDistributor();
var resourceLimits = new ClusterResourceLimits.Builder(enableFeedBlockInDistributor,
stateIsHosted(deployState),
+ deployState.featureFlags().throwIfResourceLimitsSpecified(),
deployState.getDeployLogger())
.build(contentElement);
c.clusterControllerConfig = new ClusterControllerConfig.Builder(getClusterId(contentElement),
@@ -674,4 +675,9 @@ public class ContentCluster extends AbstractConfigProducer<AbstractConfigProduce
// TODO
}
+ @Override
+ public String toString() {
+ return "content cluster '" + clusterId + "'";
+ }
+
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/DomResourceLimitsBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/DomResourceLimitsBuilder.java
index 9f4852629d0..37adb73bc15 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/DomResourceLimitsBuilder.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/DomResourceLimitsBuilder.java
@@ -1,4 +1,4 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.model.content.cluster;
import com.yahoo.config.application.api.DeployLogger;
@@ -14,18 +14,22 @@ import java.util.logging.Level;
*/
public class DomResourceLimitsBuilder {
- public static ResourceLimits.Builder createBuilder(ModelElement contentXml, boolean hostedVespa, DeployLogger deployLogger) {
+ public static ResourceLimits.Builder createBuilder(ModelElement contentXml,
+ boolean hostedVespa,
+ boolean throwIfSpecified,
+ DeployLogger deployLogger) {
ResourceLimits.Builder builder = new ResourceLimits.Builder();
ModelElement resourceLimits = contentXml.child("resource-limits");
if (resourceLimits == null) { return builder; }
if (hostedVespa) {
- deployLogger.logApplicationPackage(Level.WARNING, "Element " + resourceLimits +
- " is not allowed, default limits will be used");
- // TODO: Throw exception when we are sure nobody is using this
- //throw new IllegalArgumentException("Element " + element + " is not allowed to be set, default limits will be used");
- return builder;
+ String message = "Element '" + resourceLimits + "' is not allowed to be set";
+ if (throwIfSpecified)
+ throw new IllegalArgumentException(message);
+ else
+ deployLogger.logApplicationPackage(Level.WARNING, message);
}
+
if (resourceLimits.child("disk") != null) {
builder.setDiskLimit(resourceLimits.childAsDouble("disk"));
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/DomTuningDispatchBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/DomTuningDispatchBuilder.java
index 64911acae1f..f429e40baa9 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/DomTuningDispatchBuilder.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/DomTuningDispatchBuilder.java
@@ -25,12 +25,14 @@ public class DomTuningDispatchBuilder {
builder.setMaxHitsPerPartition(dispatchElement.childAsInteger("max-hits-per-partition"));
builder.setTopKProbability(dispatchElement.childAsDouble("top-k-probability"));
builder.setDispatchPolicy(dispatchElement.childAsString("dispatch-policy"));
- builder.setMinGroupCoverage(dispatchElement.childAsDouble("min-group-coverage"));
builder.setMinActiveDocsCoverage(dispatchElement.childAsDouble("min-active-docs-coverage"));
+ if (dispatchElement.child("min-group-coverage") != null)
+ logger.logApplicationPackage(Level.WARNING, "Attribute 'min-group-coverage' is deprecated and ignored: " +
+ "Use min-active-docs-coverage instead.");
if (dispatchElement.child("use-local-node") != null)
logger.logApplicationPackage(Level.WARNING, "Attribute 'use-local-node' is deprecated and ignored: " +
- "The local node will automatically be preferred when appropriate.");
+ "The local node will automatically be preferred when appropriate.");
return builder.build();
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/storagecluster/StorServerProducer.java b/config-model/src/main/java/com/yahoo/vespa/model/content/storagecluster/StorServerProducer.java
index 40a634fbfe8..e89d45e8b83 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/storagecluster/StorServerProducer.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/storagecluster/StorServerProducer.java
@@ -6,6 +6,8 @@ import com.yahoo.vespa.config.content.core.StorServerConfig;
import com.yahoo.vespa.model.content.cluster.ContentCluster;
import com.yahoo.vespa.model.builder.xml.dom.ModelElement;
+import java.util.Optional;
+
/**
* Serves config for stor-server for storage clusters (clusters of storage nodes).
*/
@@ -14,7 +16,7 @@ public class StorServerProducer implements StorServerConfig.Producer {
StorServerProducer build(ModelContext.Properties properties, ModelElement element) {
ModelElement tuning = element.child("tuning");
- StorServerProducer producer = new StorServerProducer(ContentCluster.getClusterId(element));
+ StorServerProducer producer = new StorServerProducer(ContentCluster.getClusterId(element), properties.featureFlags());
if (tuning == null) return producer;
ModelElement merges = tuning.child("merges");
@@ -32,11 +34,15 @@ public class StorServerProducer implements StorServerConfig.Producer {
private Integer bucketDBStripeBits;
private StorServerProducer setMaxMergesPerNode(Integer value) {
- maxMergesPerNode = value;
+ if (value != null) {
+ maxMergesPerNode = value;
+ }
return this;
}
private StorServerProducer setMaxQueueSize(Integer value) {
- queueSize = value;
+ if (value != null) {
+ queueSize = value;
+ }
return this;
}
private StorServerProducer setBucketDBStripeBits(Integer value) {
@@ -44,8 +50,10 @@ public class StorServerProducer implements StorServerConfig.Producer {
return this;
}
- public StorServerProducer(String clusterName) {
+ StorServerProducer(String clusterName, ModelContext.FeatureFlags featureFlags) {
this.clusterName = clusterName;
+ maxMergesPerNode = featureFlags.maxConcurrentMergesPerNode();
+ queueSize = featureFlags.maxMergeQueueSize();
}
@Override
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/filedistribution/FileDistributor.java b/config-model/src/main/java/com/yahoo/vespa/model/filedistribution/FileDistributor.java
index 5bb57f4ff6c..d8da911e32f 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/filedistribution/FileDistributor.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/filedistribution/FileDistributor.java
@@ -8,6 +8,7 @@ import com.yahoo.config.application.api.FileRegistry;
import com.yahoo.vespa.model.ConfigProxy;
import com.yahoo.vespa.model.Host;
+import java.nio.ByteBuffer;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.List;
@@ -58,6 +59,16 @@ public class FileDistributor {
return addFileReference(fileRegistry.addUri(uri), host);
}
+ /**
+ * Adds the given blob to the associated application packages' registry of file and marks the file
+ * for distribution to the given host.
+ *
+ * @return the reference to the file, created by the application package
+ */
+ public FileReference sendBlobToHost(ByteBuffer blob, Host host) {
+ return addFileReference(fileRegistry.addBlob(blob), host);
+ }
+
private FileReference addFileReference(FileReference reference, Host host) {
filesToHosts.computeIfAbsent(reference, k -> new HashSet<>()).add(host);
return reference;
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/search/DispatchGroup.java b/config-model/src/main/java/com/yahoo/vespa/model/search/DispatchGroup.java
index 384f77737c1..3e70bda216b 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/search/DispatchGroup.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/search/DispatchGroup.java
@@ -54,19 +54,18 @@ public class DispatchGroup {
public int getSearchableCopies() { return sc.getSearchableCopies(); }
- public int getMaxNodesDownPerFixedRow() {
- return sc.getMaxNodesDownPerFixedRow();
- }
-
static class Iterator implements java.util.Iterator<SearchInterface> {
+
private java.util.Iterator<Map<Integer, SearchInterface>> it1;
private java.util.Iterator<SearchInterface> it2;
+
Iterator(Map<Integer, Map<Integer, SearchInterface> > s) {
it1 = s.values().iterator();
if (it1.hasNext()) {
it2 = it1.next().values().iterator();
}
}
+
@Override
public boolean hasNext() {
if (it2 == null) {
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/search/IndexedSearchCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/search/IndexedSearchCluster.java
index 99f1b3ad34e..c99549e82e9 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/search/IndexedSearchCluster.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/search/IndexedSearchCluster.java
@@ -46,7 +46,6 @@ public class IndexedSearchCluster extends SearchCluster
private String routingSelector = null;
private final List<DocumentDatabase> documentDbs = new LinkedList<>();
private final UnionConfiguration unionCfg;
- private int maxNodesDownPerFixedRow = 0;
private int searchableCopies = 1;
@@ -261,13 +260,6 @@ public class IndexedSearchCluster extends SearchCluster
return false;
}
- int getMaxNodesDownPerFixedRow() {
- return maxNodesDownPerFixedRow;
- }
-
- public void setMaxNodesDownPerFixedRow(int value) {
- maxNodesDownPerFixedRow = value;
- }
public int getSearchableCopies() {
return searchableCopies;
}
@@ -305,8 +297,6 @@ public class IndexedSearchCluster extends SearchCluster
}
if (tuning.dispatch.getMinActiveDocsCoverage() != null)
builder.minActivedocsPercentage(tuning.dispatch.getMinActiveDocsCoverage());
- if (tuning.dispatch.getMinGroupCoverage() != null)
- builder.minGroupCoverage(tuning.dispatch.getMinGroupCoverage());
if (tuning.dispatch.getDispatchPolicy() != null) {
switch (tuning.dispatch.getDispatchPolicy()) {
case ADAPTIVE:
@@ -320,7 +310,6 @@ public class IndexedSearchCluster extends SearchCluster
if (tuning.dispatch.getMaxHitsPerPartition() != null)
builder.maxHitsPerNode(tuning.dispatch.getMaxHitsPerPartition());
- builder.maxNodesDownPerGroup(rootDispatch.getMaxNodesDownPerFixedRow());
builder.searchableCopies(rootDispatch.getSearchableCopies());
if (searchCoverage != null) {
if (searchCoverage.getMinimum() != null)
@@ -336,6 +325,11 @@ public class IndexedSearchCluster extends SearchCluster
@Override
public int getRowBits() { return 8; }
+ @Override
+ public String toString() {
+ return "Indexing cluster '" + getClusterName() + "'";
+ }
+
/**
* Class used to retrieve combined configuration from multiple document databases.
* It is not a {@link com.yahoo.config.ConfigInstance.Producer} of those configs,
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/utils/FileSender.java b/config-model/src/main/java/com/yahoo/vespa/model/utils/FileSender.java
index 5e7ac0cabec..52edec7114b 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/utils/FileSender.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/utils/FileSender.java
@@ -12,6 +12,7 @@ import com.yahoo.vespa.config.ConfigPayloadBuilder;
import com.yahoo.vespa.model.AbstractService;
import java.io.Serializable;
+import java.nio.ByteBuffer;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
@@ -61,6 +62,20 @@ public class FileSender implements Serializable {
return fileref;
}
+ public static FileReference sendBlobToServices(ByteBuffer blob, Collection<? extends AbstractService> services) {
+ if (services.isEmpty()) {
+ throw new IllegalStateException("No service instances. Probably a standalone cluster setting up <nodes> " +
+ "using 'count' instead of <node> tags.");
+ }
+
+ FileReference fileref = null;
+ for (AbstractService service : services) {
+ // The same reference will be returned from each call.
+ fileref = service.sendBlob(blob);
+ }
+ return fileref;
+ }
+
/**
* Sends all user configured files for a producer to all given services.
*/
diff --git a/config-model/src/main/resources/schema/content.rnc b/config-model/src/main/resources/schema/content.rnc
index 7f52eae6da8..36db55c206c 100644
--- a/config-model/src/main/resources/schema/content.rnc
+++ b/config-model/src/main/resources/schema/content.rnc
@@ -83,7 +83,7 @@ ClusterControllerTuning = element cluster-controller {
DispatchTuning = element dispatch {
element max-hits-per-partition { xsd:nonNegativeInteger }? &
element dispatch-policy { string "round-robin" | string "adaptive" | string "random" }? &
- element min-group-coverage { xsd:double }? &
+ element min-group-coverage { xsd:double }? & # TODO: Ignored, remove on Vespa 8
element min-active-docs-coverage { xsd:double }? &
element top-k-probability { xsd:double }? &
element use-local-node { string "true" | string "false" }?
diff --git a/config-model/src/test/derived/namecollision/collision.sd b/config-model/src/test/derived/namecollision/collision.sd
new file mode 100644
index 00000000000..43dd4830204
--- /dev/null
+++ b/config-model/src/test/derived/namecollision/collision.sd
@@ -0,0 +1,8 @@
+# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+search collision {
+
+ document collision {
+
+ }
+
+}
diff --git a/config-model/src/test/derived/namecollision/collisionstruct.sd b/config-model/src/test/derived/namecollision/collisionstruct.sd
new file mode 100644
index 00000000000..c98efb0b582
--- /dev/null
+++ b/config-model/src/test/derived/namecollision/collisionstruct.sd
@@ -0,0 +1,15 @@
+# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+search collisionstruct {
+
+ document collisionstruct {
+
+ struct collision {
+ }
+
+ field structarray type array<collision> {
+ indexing: summary
+ }
+
+ }
+
+}
diff --git a/config-model/src/test/derived/namecollision/documentmanager.cfg b/config-model/src/test/derived/namecollision/documentmanager.cfg
new file mode 100644
index 00000000000..8d0d89dde35
--- /dev/null
+++ b/config-model/src/test/derived/namecollision/documentmanager.cfg
@@ -0,0 +1,55 @@
+enablecompression false
+datatype[].id 1381038251
+datatype[].structtype[].name "position"
+datatype[].structtype[].version 0
+datatype[].structtype[].compresstype NONE
+datatype[].structtype[].compresslevel 0
+datatype[].structtype[].compressthreshold 95
+datatype[].structtype[].compressminsize 800
+datatype[].structtype[].field[].name "x"
+datatype[].structtype[].field[].datatype 0
+datatype[].structtype[].field[].detailedtype ""
+datatype[].structtype[].field[].name "y"
+datatype[].structtype[].field[].datatype 0
+datatype[].structtype[].field[].detailedtype ""
+datatype[].id -379118517
+datatype[].structtype[].name "collision.header"
+datatype[].structtype[].version 0
+datatype[].structtype[].compresstype NONE
+datatype[].structtype[].compresslevel 0
+datatype[].structtype[].compressthreshold 95
+datatype[].structtype[].compressminsize 800
+datatype[].id 1557022836
+datatype[].documenttype[].name "collision"
+datatype[].documenttype[].version 0
+datatype[].documenttype[].inherits[].name "document"
+datatype[].documenttype[].inherits[].version 0
+datatype[].documenttype[].headerstruct -379118517
+datatype[].documenttype[].bodystruct 0
+datatype[].id 1557022836
+datatype[].structtype[].name "collision"
+datatype[].structtype[].version 0
+datatype[].structtype[].compresstype NONE
+datatype[].structtype[].compresslevel 0
+datatype[].structtype[].compressthreshold 95
+datatype[].structtype[].compressminsize 800
+datatype[].id -1730522993
+datatype[].arraytype[].datatype 1557022836
+datatype[].id -1270379114
+datatype[].structtype[].name "collisionstruct.header"
+datatype[].structtype[].version 0
+datatype[].structtype[].compresstype NONE
+datatype[].structtype[].compresslevel 0
+datatype[].structtype[].compressthreshold 95
+datatype[].structtype[].compressminsize 800
+datatype[].structtype[].field[].name "structarray"
+datatype[].structtype[].field[].datatype -1730522993
+datatype[].structtype[].field[].detailedtype ""
+datatype[].id -1723079287
+datatype[].documenttype[].name "collisionstruct"
+datatype[].documenttype[].version 0
+datatype[].documenttype[].inherits[].name "document"
+datatype[].documenttype[].inherits[].version 0
+datatype[].documenttype[].headerstruct -1270379114
+datatype[].documenttype[].bodystruct 0
+datatype[].documenttype[].fieldsets{[]}.fields[] "structarray"
diff --git a/config-model/src/test/java/com/yahoo/config/model/MockModelContext.java b/config-model/src/test/java/com/yahoo/config/model/MockModelContext.java
index f8469aa6fa1..59af3193b79 100644
--- a/config-model/src/test/java/com/yahoo/config/model/MockModelContext.java
+++ b/config-model/src/test/java/com/yahoo/config/model/MockModelContext.java
@@ -20,8 +20,8 @@ import com.yahoo.config.model.test.MockApplicationPackage;
import java.util.Optional;
/**
-* @author hmusum
-*/
+ * @author hmusum
+ */
public class MockModelContext implements ModelContext {
private final ApplicationPackage applicationPackage;
@@ -82,4 +82,5 @@ public class MockModelContext implements ModelContext {
public Properties properties() {
return new TestProperties();
}
+
}
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/RankProfileTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/RankProfileTestCase.java
index 91e8640308a..d5ef3779493 100644
--- a/config-model/src/test/java/com/yahoo/searchdefinition/RankProfileTestCase.java
+++ b/config-model/src/test/java/com/yahoo/searchdefinition/RankProfileTestCase.java
@@ -102,7 +102,8 @@ public class RankProfileTestCase extends SchemaTestCase {
assertEquals(8, rankProfile.getNumThreadsPerSearch());
assertEquals(70, rankProfile.getMinHitsPerThread());
assertEquals(1200, rankProfile.getNumSearchPartitions());
- RawRankProfile rawRankProfile = new RawRankProfile(rankProfile, new QueryProfileRegistry(), new ImportedMlModels(), attributeFields, deployProperties);
+ RawRankProfile rawRankProfile = new RawRankProfile(rankProfile, new LargeRankExpressions(), new QueryProfileRegistry(),
+ new ImportedMlModels(), attributeFields, deployProperties);
if (expectedTermwiseLimit != null) {
assertTrue(findProperty(rawRankProfile.configProperties(), "vespa.matching.termwise_limit").isPresent());
assertEquals(String.valueOf(expectedTermwiseLimit), findProperty(rawRankProfile.configProperties(), "vespa.matching.termwise_limit").get());
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/derived/NameCollisionTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/derived/NameCollisionTestCase.java
new file mode 100644
index 00000000000..fda9e6327ce
--- /dev/null
+++ b/config-model/src/test/java/com/yahoo/searchdefinition/derived/NameCollisionTestCase.java
@@ -0,0 +1,20 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package com.yahoo.searchdefinition.derived;
+
+import org.junit.Test;
+
+/**
+ * Verifies that a struct in a document type is preferred over another dopcument type
+ * of the same name.
+ *
+ * @author bratseth
+ */
+public class NameCollisionTestCase extends AbstractExportingTestCase {
+
+ @Test
+ public void testNameCollision() throws Exception {
+ assertCorrectDeriving("namecollision", "collisionstruct", new TestableDeployLogger());
+ }
+
+}
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionWithTransformerTokensTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionWithTransformerTokensTestCase.java
index 2d8630e3ff7..e285d796882 100644
--- a/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionWithTransformerTokensTestCase.java
+++ b/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionWithTransformerTokensTestCase.java
@@ -91,7 +91,7 @@ public class RankingExpressionWithTransformerTokensTestCase {
searchBuilder.build();
Search search = searchBuilder.getSearch();
RankProfile rp = rankProfileRegistry.get(search, "my_profile");
- return new RankProfileTransformContext(rp, queryProfileRegistry, Collections.EMPTY_MAP, null, Collections.EMPTY_MAP, Collections.EMPTY_MAP);
+ return new RankProfileTransformContext(rp, queryProfileRegistry, Collections.emptyMap(), null, Collections.emptyMap(), Collections.emptyMap());
}
}
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionsTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionsTestCase.java
index 85ef70132b5..021d2931414 100644
--- a/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionsTestCase.java
+++ b/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionsTestCase.java
@@ -7,6 +7,7 @@ import com.yahoo.config.model.api.ModelContext;
import com.yahoo.config.model.application.provider.BaseDeployLogger;
import com.yahoo.config.model.deploy.TestProperties;
import com.yahoo.search.query.profile.QueryProfileRegistry;
+import com.yahoo.searchdefinition.LargeRankExpressions;
import com.yahoo.searchdefinition.RankProfile;
import com.yahoo.searchdefinition.RankProfileRegistry;
import com.yahoo.searchdefinition.SchemaTestCase;
@@ -49,7 +50,7 @@ public class RankingExpressionsTestCase extends SchemaTestCase {
functions.get("artistmatch").function().getBody().getRoot().toString());
assertEquals(0, functions.get("artistmatch").function().arguments().size());
- RawRankProfile rawRankProfile = new RawRankProfile(functionsRankProfile, new QueryProfileRegistry(),
+ RawRankProfile rawRankProfile = new RawRankProfile(functionsRankProfile, new LargeRankExpressions(), new QueryProfileRegistry(),
new ImportedMlModels(), new AttributeFields(search), deployProperties);
List<Pair<String, String>> rankProperties = rawRankProfile.configProperties();
assertEquals(6, rankProperties.size());
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/VespaModelFactoryTest.java b/config-model/src/test/java/com/yahoo/vespa/model/VespaModelFactoryTest.java
index e3e0edd7896..a3e3a768b05 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/VespaModelFactoryTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/VespaModelFactoryTest.java
@@ -19,7 +19,6 @@ import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.HostSpec;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.ProvisionLogger;
-import com.yahoo.vespa.model.builder.xml.dom.NodesSpecification;
import org.junit.Before;
import org.junit.Test;
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerClusterTest.java b/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerClusterTest.java
index 1a7258db7e2..413daefdf75 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerClusterTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerClusterTest.java
@@ -1,8 +1,4 @@
-// Copyright 2020 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-/*
- * Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
- */
-
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.model.admin.metricsproxy;
import ai.vespa.metricsproxy.http.application.ApplicationMetricsHandler;
@@ -13,14 +9,10 @@ import ai.vespa.metricsproxy.http.yamas.YamasHandler;
import ai.vespa.metricsproxy.metric.dimensions.ApplicationDimensionsConfig;
import ai.vespa.metricsproxy.metric.dimensions.PublicDimensions;
import com.yahoo.component.ComponentSpecification;
-import com.yahoo.config.model.api.HostInfo;
-import com.yahoo.config.model.deploy.DeployState;
-import com.yahoo.config.model.deploy.TestProperties;
import com.yahoo.config.model.test.MockApplicationPackage;
import com.yahoo.config.provision.Zone;
import com.yahoo.container.core.ApplicationMetadataConfig;
import com.yahoo.container.di.config.PlatformBundlesConfig;
-import com.yahoo.search.config.QrStartConfig;
import com.yahoo.vespa.model.VespaModel;
import com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyContainerCluster.AppDimensionNames;
import com.yahoo.vespa.model.container.component.Component;
@@ -40,14 +32,11 @@ import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.T
import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.getApplicationDimensionsConfig;
import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.getMetricsNodesConfig;
import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.getModel;
-import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.getQrStartConfig;
import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.servicesWithAdminOnly;
-import static com.yahoo.vespa.model.container.ContainerCluster.G1GC;
import static java.util.stream.Collectors.toList;
import static org.hamcrest.CoreMatchers.endsWith;
import static org.hamcrest.CoreMatchers.hasItem;
import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
@@ -77,42 +66,6 @@ public class MetricsProxyContainerClusterTest {
assertEquals(MockApplicationPackage.DEPLOYED_BY_USER, config.user());
}
- private void metrics_proxy_has_expected_qr_start_options(MetricsProxyModelTester.TestMode mode) {
- metrics_proxy_has_expected_qr_start_options(mode, 0);
- }
-
- private void metrics_proxy_has_expected_qr_start_options(MetricsProxyModelTester.TestMode mode, int maxHeapForAdminClusterNodes) {
- DeployState.Builder builder = new DeployState.Builder();
- if (maxHeapForAdminClusterNodes > 0) {
- builder.properties(new TestProperties().metricsProxyMaxHeapSizeInMb(maxHeapForAdminClusterNodes));
- }
-
- VespaModel model = getModel(servicesWithAdminOnly(), mode, builder);
- for (HostInfo host : model.getHosts()) {
- QrStartConfig qrStartConfig = getQrStartConfig(model, host.getHostname());
- assertEquals(32, qrStartConfig.jvm().minHeapsize());
- assertEquals(maxHeapForAdminClusterNodes > 0 ? maxHeapForAdminClusterNodes : 512, qrStartConfig.jvm().heapsize());
- assertEquals(0, qrStartConfig.jvm().heapSizeAsPercentageOfPhysicalMemory());
- assertEquals(2, qrStartConfig.jvm().availableProcessors());
- assertFalse(qrStartConfig.jvm().verbosegc());
- assertEquals(G1GC, qrStartConfig.jvm().gcopts());
- assertEquals(512, qrStartConfig.jvm().stacksize());
- assertEquals(0, qrStartConfig.jvm().directMemorySizeCache());
- assertEquals(32, qrStartConfig.jvm().compressedClassSpaceSize());
- assertEquals(75, qrStartConfig.jvm().baseMaxDirectMemorySize());
- }
- }
-
- @Test
- public void metrics_proxy_has_expected_qr_start_options() {
- metrics_proxy_has_expected_qr_start_options(self_hosted);
- metrics_proxy_has_expected_qr_start_options(hosted);
-
- // With max heap from feature flag
- metrics_proxy_has_expected_qr_start_options(self_hosted, 123);
- metrics_proxy_has_expected_qr_start_options(hosted, 123);
- }
-
@Test
public void http_handlers_are_set_up() {
VespaModel model = getModel(servicesWithAdminOnly(), self_hosted);
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyModelTester.java b/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyModelTester.java
index 7c31802fb4d..01167e40411 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyModelTester.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyModelTester.java
@@ -15,8 +15,6 @@ import com.yahoo.vespa.model.admin.monitoring.Metric;
import com.yahoo.vespa.model.admin.monitoring.MetricsConsumer;
import com.yahoo.vespa.model.test.VespaModelTester;
-import java.util.Optional;
-
import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.TestMode.hosted;
import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.TestMode.self_hosted;
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/ContentTypeRemovalValidatorTest.java b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/ContentTypeRemovalValidatorTest.java
index c8fdb8348c3..45f3b0fcf60 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/ContentTypeRemovalValidatorTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/ContentTypeRemovalValidatorTest.java
@@ -45,14 +45,6 @@ public class ContentTypeRemovalValidatorTest {
tester.deploy(previous, getServices("book"), Environment.prod, removalOverride); // Allowed due to override
}
- @Test
- public void testNoOverrideNeededinDev() {
- ValidationTester tester = new ValidationTester();
-
- VespaModel previous = tester.deploy(null, getServices("music"), Environment.prod, null).getFirst();
- tester.deploy(previous, getServices("book"), Environment.dev, null);
- }
-
private static String getServices(String documentType) {
return "<services version='1.0'>" +
" <content id='test' version='1.0'>" +
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/RedundancyIncreaseValidatorTest.java b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/RedundancyIncreaseValidatorTest.java
new file mode 100644
index 00000000000..ddeada8b33f
--- /dev/null
+++ b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/RedundancyIncreaseValidatorTest.java
@@ -0,0 +1,64 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.model.application.validation.change;
+
+import com.yahoo.config.application.api.ValidationId;
+import com.yahoo.config.application.api.ValidationOverrides;
+import com.yahoo.config.provision.Environment;
+import com.yahoo.vespa.model.VespaModel;
+import com.yahoo.vespa.model.application.validation.ValidationTester;
+import com.yahoo.yolean.Exceptions;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+/**
+ * @author bratseth
+ */
+public class RedundancyIncreaseValidatorTest {
+
+ private final ValidationTester tester = new ValidationTester(7);
+
+ @Test
+ public void testRedundancyIncreaseValidation() {
+ VespaModel previous = tester.deploy(null, getServices(2), Environment.prod, null).getFirst();
+ try {
+ tester.deploy(previous, getServices(3), Environment.prod, null);
+ fail("Expected exception due to redundancy increase");
+ }
+ catch (IllegalArgumentException expected) {
+ assertEquals("redundancy-increase: " +
+ "Increasing redundancy from 2 to 3 in 'content cluster 'contentClusterId'. " +
+ "This is a safe operation but verify that you have room for a 3/2x increase in content size. " +
+ ValidationOverrides.toAllowMessage(ValidationId.redundancyIncrease),
+ Exceptions.toMessageString(expected));
+ }
+ }
+
+ @Test
+ public void testOverridingContentRemovalValidation() {
+ VespaModel previous = tester.deploy(null, getServices(2), Environment.prod, null).getFirst();
+ tester.deploy(previous, getServices(3), Environment.prod, redundancyIncreaseOverride); // Allowed due to override
+ }
+
+ private static String getServices(int redundancy) {
+ return "<services version='1.0'>" +
+ " <content id='contentClusterId' version='1.0'>" +
+ " <redundancy>" + redundancy + "</redundancy>" +
+ " <engine>" +
+ " <proton/>" +
+ " </engine>" +
+ " <documents>" +
+ " <document type='music' mode='index'/>" +
+ " </documents>" +
+ " <nodes count='3'/>" +
+ " </content>" +
+ "</services>";
+ }
+
+ private static final String redundancyIncreaseOverride =
+ "<validation-overrides>\n" +
+ " <allow until='2000-01-03'>redundancy-increase</allow>\n" +
+ "</validation-overrides>\n";
+
+}
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/first/RedundancyOnFirstDeploymentValidatorTest.java b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/first/RedundancyOnFirstDeploymentValidatorTest.java
new file mode 100644
index 00000000000..d59b2f7227c
--- /dev/null
+++ b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/first/RedundancyOnFirstDeploymentValidatorTest.java
@@ -0,0 +1,64 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.model.application.validation.first;
+
+import com.yahoo.config.application.api.ValidationId;
+import com.yahoo.config.application.api.ValidationOverrides;
+import com.yahoo.config.model.deploy.TestProperties;
+import com.yahoo.config.provision.Environment;
+import com.yahoo.vespa.model.application.validation.ValidationTester;
+import com.yahoo.yolean.Exceptions;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+/**
+ * @author bratseth
+ */
+public class RedundancyOnFirstDeploymentValidatorTest {
+
+ private final ValidationTester tester = new ValidationTester(7, false,
+ new TestProperties().setFirstTimeDeployment(true)
+ .setHostedVespa(true));
+
+ @Test
+ public void testRedundancyOnFirstDeploymentValidation() {
+ try {
+ tester.deploy(null, getServices(1), Environment.prod, null);
+ fail("Expected exception due to redundancy 1");
+ }
+ catch (IllegalArgumentException expected) {
+ assertEquals("redundancy-one: " +
+ "content cluster 'contentClusterId' has redundancy 1, which will cause it to lose data if a node fails. " +
+ "This requires an override on first deployment in a production zone. " +
+ ValidationOverrides.toAllowMessage(ValidationId.redundancyOne),
+ Exceptions.toMessageString(expected));
+ }
+ }
+
+ @Test
+ public void testOverridingRedundancyOnFirstDeploymentValidation() {
+ tester.deploy(null, getServices(1), Environment.prod, redundancyOneOverride); // Allowed due to override
+ }
+
+ private static String getServices(int redundancy) {
+ return "<services version='1.0'>" +
+ " <content id='contentClusterId' version='1.0'>" +
+ " <redundancy>" + redundancy + "</redundancy>" +
+ " <engine>" +
+ " <proton/>" +
+ " </engine>" +
+ " <documents>" +
+ " <document type='music' mode='index'/>" +
+ " </documents>" +
+ " <nodes count='3'/>" +
+ " </content>" +
+ "</services>";
+ }
+
+ private static final String redundancyOneOverride =
+ "<validation-overrides>\n" +
+ " <allow until='2000-01-03'>redundancy-one</allow>\n" +
+ "</validation-overrides>\n";
+
+}
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/content/ClusterResourceLimitsTest.java b/config-model/src/test/java/com/yahoo/vespa/model/content/ClusterResourceLimitsTest.java
index 469e4649c14..4324f257922 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/content/ClusterResourceLimitsTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/content/ClusterResourceLimitsTest.java
@@ -6,10 +6,14 @@ import com.yahoo.config.model.application.provider.BaseDeployLogger;
import com.yahoo.searchdefinition.derived.TestableDeployLogger;
import com.yahoo.text.XML;
import com.yahoo.vespa.model.builder.xml.dom.ModelElement;
+import org.junit.Rule;
import org.junit.Test;
+import org.junit.rules.ExpectedException;
+import org.w3c.dom.Document;
import java.util.Optional;
+import static org.hamcrest.CoreMatchers.containsString;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
@@ -48,13 +52,19 @@ public class ClusterResourceLimitsTest {
return this;
}
public ClusterResourceLimits build() {
- var builder = new ClusterResourceLimits.Builder(enableFeedBlockInDistributor, false, new BaseDeployLogger());
+ var builder = new ClusterResourceLimits.Builder(enableFeedBlockInDistributor,
+ false,
+ false,
+ new BaseDeployLogger());
builder.setClusterControllerBuilder(ctrlBuilder);
builder.setContentNodeBuilder(nodeBuilder);
return builder.build();
}
}
+ @Rule
+ public ExpectedException expectedException = ExpectedException.none();
+
@Test
public void content_node_limits_are_derived_from_cluster_controller_limits_if_not_set() {
assertLimits(0.4, 0.7, 0.7, 0.85,
@@ -120,26 +130,40 @@ public class ClusterResourceLimitsTest {
}
@Test
- // TODO: Change to expect exception being thrown when no one uses this in hosted
- public void default_resource_limits_when_hosted_and_warning_is_logged() {
+ public void exception_is_thrown_when_resource_limits_are_specified() {
TestableDeployLogger logger = new TestableDeployLogger();
- final boolean hosted = true;
- ClusterResourceLimits.Builder builder = new ClusterResourceLimits.Builder(true, hosted, logger);
- ClusterResourceLimits limits = builder.build(new ModelElement(XML.getDocument("<cluster id=\"test\">" +
- " <tuning>\n" +
- " <resource-limits>\n" +
- " <memory>0.92</memory>\n" +
- " </resource-limits>\n" +
- " </tuning>\n" +
- "</cluster>")
- .getDocumentElement()));
+ buildClusterResourceLimitsAndLogIfSpecified(logger);
+ assertEquals(1, logger.warnings.size());
+ assertEquals("Element 'resource-limits' is not allowed to be set", logger.warnings.get(0));
- assertLimits(0.8, 0.8, limits.getClusterControllerLimits());
- assertLimits(0.9, 0.9, limits.getContentNodeLimits());
+ expectedException.expect(IllegalArgumentException.class);
+ expectedException.expectMessage(containsString("Element 'resource-limits' is not allowed to be set"));
+ buildClusterResourceLimitsAndThrowIfSpecified(logger);
+ }
- assertEquals(1, logger.warnings.size());
- assertEquals("Element resource-limits is not allowed, default limits will be used", logger.warnings.get(0));
+ private void buildClusterResourceLimitsAndThrowIfSpecified(DeployLogger deployLogger) {
+ buildClusterResourceLimits(true, deployLogger);
+ }
+
+ private void buildClusterResourceLimitsAndLogIfSpecified(DeployLogger deployLogger) {
+ buildClusterResourceLimits(false, deployLogger);
+ }
+
+ private void buildClusterResourceLimits(boolean throwIfSpecified, DeployLogger deployLogger) {
+ Document clusterXml = XML.getDocument("<cluster id=\"test\">" +
+ " <tuning>\n" +
+ " <resource-limits>\n" +
+ " <memory>0.92</memory>\n" +
+ " </resource-limits>\n" +
+ " </tuning>\n" +
+ "</cluster>");
+
+ ClusterResourceLimits.Builder builder = new ClusterResourceLimits.Builder(true,
+ true,
+ throwIfSpecified,
+ deployLogger);
+ builder.build(new ModelElement(clusterXml.getDocumentElement()));
}
private void assertLimits(Double expCtrlDisk, Double expCtrlMemory, Double expNodeDisk, Double expNodeMemory, Fixture f) {
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/content/DispatchTuningTest.java b/config-model/src/test/java/com/yahoo/vespa/model/content/DispatchTuningTest.java
index 8a46aaaa230..27a01750210 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/content/DispatchTuningTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/content/DispatchTuningTest.java
@@ -17,12 +17,10 @@ public class DispatchTuningTest {
DispatchTuning dispatch = new DispatchTuning.Builder()
.setMaxHitsPerPartition(69)
.setDispatchPolicy("round-robin")
- .setMinGroupCoverage(7.5)
.setMinActiveDocsCoverage(12.5)
.setTopKProbability(18.3)
.build();
assertEquals(69, dispatch.getMaxHitsPerPartition().intValue());
- assertEquals(7.5, dispatch.getMinGroupCoverage().doubleValue(), 0.0);
assertEquals(12.5, dispatch.getMinActiveDocsCoverage().doubleValue(), 0.0);
assertTrue(DispatchTuning.DispatchPolicy.ROUNDROBIN == dispatch.getDispatchPolicy());
assertEquals(18.3, dispatch.getTopkProbability(), 0.0);
@@ -33,7 +31,6 @@ public class DispatchTuningTest {
.setDispatchPolicy("random")
.build();
assertTrue(DispatchTuning.DispatchPolicy.ADAPTIVE == dispatch.getDispatchPolicy());
- assertNull(dispatch.getMinGroupCoverage());
assertNull(dispatch.getMinActiveDocsCoverage());
}
@@ -43,7 +40,6 @@ public class DispatchTuningTest {
.setDispatchPolicy("adaptive")
.build();
assertTrue(DispatchTuning.DispatchPolicy.ADAPTIVE == dispatch.getDispatchPolicy());
- assertNull(dispatch.getMinGroupCoverage());
assertNull(dispatch.getMinActiveDocsCoverage());
}
@@ -53,7 +49,6 @@ public class DispatchTuningTest {
assertNull(dispatch.getMaxHitsPerPartition());
assertNull(dispatch.getDispatchPolicy());
assertNull(dispatch.getMinActiveDocsCoverage());
- assertNull(dispatch.getMinGroupCoverage());
assertNull(dispatch.getTopkProbability());
}
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/content/FleetControllerClusterTest.java b/config-model/src/test/java/com/yahoo/vespa/model/content/FleetControllerClusterTest.java
index 22e38b30959..10bb00168bb 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/content/FleetControllerClusterTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/content/FleetControllerClusterTest.java
@@ -4,9 +4,9 @@ package com.yahoo.vespa.model.content;
import com.yahoo.config.model.application.provider.BaseDeployLogger;
import com.yahoo.config.model.deploy.DeployState;
import com.yahoo.config.model.deploy.TestProperties;
-import com.yahoo.vespa.config.content.FleetcontrollerConfig;
import com.yahoo.config.model.test.MockRoot;
import com.yahoo.text.XML;
+import com.yahoo.vespa.config.content.FleetcontrollerConfig;
import com.yahoo.vespa.model.builder.xml.dom.ModelElement;
import org.junit.Test;
import org.w3c.dom.Document;
@@ -26,6 +26,7 @@ public class FleetControllerClusterTest {
clusterElement,
new ClusterResourceLimits.Builder(enableFeedBlockInDistributor,
false,
+ false,
new BaseDeployLogger())
.build(clusterElement).getClusterControllerLimits())
.build(root.getDeployState(), root, clusterElement.getXml());
@@ -115,7 +116,7 @@ public class FleetControllerClusterTest {
assertLimits(0.8, 0.7, getConfigForResourceLimitsTuning(null, 0.7));
}
- private static double DELTA = 0.00001;
+ private static final double DELTA = 0.00001;
private void assertLimits(double expDisk, double expMemory, FleetcontrollerConfig config) {
var limits = config.cluster_feed_block_limit();
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/content/IndexedHierarchicDistributionTest.java b/config-model/src/test/java/com/yahoo/vespa/model/content/IndexedHierarchicDistributionTest.java
index 3be592e54e7..6c8cb393d3f 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/content/IndexedHierarchicDistributionTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/content/IndexedHierarchicDistributionTest.java
@@ -201,7 +201,6 @@ public class IndexedHierarchicDistributionTest {
assertEquals(8, dg.getRowBits());
assertEquals(3, dg.getNumPartitions());
assertEquals(true, dg.useFixedRowInDispatch());
- assertEquals(1, dg.getMaxNodesDownPerFixedRow());
ArrayList<SearchInterface> list = new ArrayList<>();
for(SearchInterface si : dg.getSearchersIterable()) {
list.add(si);
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/content/IndexingAndDocprocRoutingTest.java b/config-model/src/test/java/com/yahoo/vespa/model/content/IndexingAndDocprocRoutingTest.java
index 177b86c953e..e16862230fc 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/content/IndexingAndDocprocRoutingTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/content/IndexingAndDocprocRoutingTest.java
@@ -6,7 +6,6 @@ import com.yahoo.messagebus.routing.HopBlueprint;
import com.yahoo.messagebus.routing.PolicyDirective;
import com.yahoo.messagebus.routing.Route;
import com.yahoo.messagebus.routing.RoutingTable;
-import com.yahoo.searchdefinition.parser.ParseException;
import com.yahoo.vespa.model.VespaModel;
import com.yahoo.vespa.model.container.ContainerCluster;
import com.yahoo.vespa.model.container.docproc.ContainerDocproc;
@@ -17,50 +16,48 @@ import com.yahoo.vespa.model.routing.Routing;
import com.yahoo.vespa.model.test.utils.ApplicationPackageUtils;
import com.yahoo.vespa.model.test.utils.VespaModelCreatorWithMockPkg;
import org.junit.Test;
-import org.xml.sax.SAXException;
-import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
-import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static org.hamcrest.Matchers.*;
+import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertThat;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
/**
* @author Einar M R Rosenvinge
*/
public class IndexingAndDocprocRoutingTest extends ContentBaseTest {
+
@Test
- public void oneContentOneDoctypeImplicitIndexingClusterImplicitIndexingChain()
- throws IOException, SAXException, ParseException {
+ public void oneContentOneDoctypeImplicitIndexingClusterImplicitIndexingChain() {
final String CLUSTERNAME = "musiccluster";
SearchClusterSpec searchCluster = new SearchClusterSpec(CLUSTERNAME, null, null);
searchCluster.searchDefs.add(new SearchDefSpec("music", "artist", "album"));
- VespaModel model = getIndexedContentVespaModel(Collections.<DocprocClusterSpec>emptyList(), Arrays.asList(searchCluster));
+ VespaModel model = getIndexedContentVespaModel(List.of(), List.of(searchCluster));
assertIndexing(model, new DocprocClusterSpec("container", new DocprocChainSpec("container/chain.indexing")));
assertFeedingRoute(model, CLUSTERNAME, "container/chain.indexing");
}
@Test
- public void oneContentTwoDoctypesImplicitIndexingClusterImplicitIndexingChain()
- throws IOException, SAXException, ParseException {
+ public void oneContentTwoDoctypesImplicitIndexingClusterImplicitIndexingChain() {
final String CLUSTERNAME = "musicandbookscluster";
SearchClusterSpec searchCluster = new SearchClusterSpec(CLUSTERNAME, null, null);
searchCluster.searchDefs.add(new SearchDefSpec("music", "artist", "album"));
searchCluster.searchDefs.add(new SearchDefSpec("book", "author", "title"));
- VespaModel model = getIndexedContentVespaModel(Collections.<DocprocClusterSpec>emptyList(), Arrays.asList(searchCluster));
+ VespaModel model = getIndexedContentVespaModel(List.of(), List.of(searchCluster));
assertIndexing(model, new DocprocClusterSpec("container", new DocprocChainSpec("container/chain.indexing")));
assertFeedingRoute(model, CLUSTERNAME, "container/chain.indexing");
}
@Test
- public void twoContentTwoDoctypesImplicitIndexingClusterImplicitIndexingChain()
- throws IOException, SAXException, ParseException {
+ public void twoContentTwoDoctypesImplicitIndexingClusterImplicitIndexingChain() {
final String MUSIC = "musiccluster";
SearchClusterSpec musicCluster = new SearchClusterSpec(MUSIC, null, null);
musicCluster.searchDefs.add(new SearchDefSpec("music", "artist", "album"));
@@ -69,10 +66,10 @@ public class IndexingAndDocprocRoutingTest extends ContentBaseTest {
SearchClusterSpec booksCluster = new SearchClusterSpec(BOOKS, null, null);
booksCluster.searchDefs.add(new SearchDefSpec("book", "author", "title"));
- VespaModel model = getIndexedContentVespaModel(Collections.<DocprocClusterSpec>emptyList(), Arrays.asList(musicCluster, booksCluster));
+ VespaModel model = getIndexedContentVespaModel(List.of(), List.of(musicCluster, booksCluster));
assertIndexing(model,
- new DocprocClusterSpec("container", new DocprocChainSpec("container/chain.indexing")));
+ new DocprocClusterSpec("container", new DocprocChainSpec("container/chain.indexing")));
assertFeedingRoute(model, MUSIC, "container/chain.indexing");
assertFeedingRoute(model, BOOKS, "container/chain.indexing");
@@ -80,19 +77,17 @@ public class IndexingAndDocprocRoutingTest extends ContentBaseTest {
@Test
- public void oneContentOneDoctypeExplicitIndexingClusterImplicitIndexingChain()
- throws IOException, SAXException, ParseException {
+ public void oneContentOneDoctypeExplicitIndexingClusterImplicitIndexingChain() {
final String CLUSTERNAME = "musiccluster";
SearchClusterSpec searchCluster = new SearchClusterSpec(CLUSTERNAME, "dpcluster", null);
searchCluster.searchDefs.add(new SearchDefSpec("music", "artist", "album"));
- VespaModel model = getIndexedContentVespaModel(Arrays.asList(new DocprocClusterSpec("dpcluster")), Arrays.asList(searchCluster));
+ VespaModel model = getIndexedContentVespaModel(List.of(new DocprocClusterSpec("dpcluster")), List.of(searchCluster));
assertIndexing(model, new DocprocClusterSpec("dpcluster", new DocprocChainSpec("dpcluster/chain.indexing")));
assertFeedingRoute(model, CLUSTERNAME, "dpcluster/chain.indexing");
}
@Test
- public void oneSearchOneDoctypeExplicitIndexingClusterExplicitIndexingChain()
- throws IOException, SAXException, ParseException {
+ public void oneSearchOneDoctypeExplicitIndexingClusterExplicitIndexingChain() {
String xml =
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" +
"<services version=\"1.0\">\n" +
@@ -130,8 +125,7 @@ public class IndexingAndDocprocRoutingTest extends ContentBaseTest {
}
@Test
- public void twoContentTwoDoctypesExplicitIndexingInSameIndexingCluster()
- throws IOException, SAXException, ParseException {
+ public void twoContentTwoDoctypesExplicitIndexingInSameIndexingCluster() {
final String MUSIC = "musiccluster";
SearchClusterSpec musicCluster = new SearchClusterSpec(MUSIC, "dpcluster", null);
musicCluster.searchDefs.add(new SearchDefSpec("music", "artist", "album"));
@@ -140,8 +134,8 @@ public class IndexingAndDocprocRoutingTest extends ContentBaseTest {
SearchClusterSpec booksCluster = new SearchClusterSpec(BOOKS, "dpcluster", null);
booksCluster.searchDefs.add(new SearchDefSpec("book", "author", "title"));
- VespaModel model = getIndexedContentVespaModel(Arrays.asList(new DocprocClusterSpec("dpcluster")),
- Arrays.asList(musicCluster, booksCluster));
+ VespaModel model = getIndexedContentVespaModel(List.of(new DocprocClusterSpec("dpcluster")),
+ List.of(musicCluster, booksCluster));
assertIndexing(model, new DocprocClusterSpec("dpcluster", new DocprocChainSpec("dpcluster/chain.indexing")));
assertFeedingRoute(model, MUSIC, "dpcluster/chain.indexing");
@@ -165,14 +159,12 @@ public class IndexingAndDocprocRoutingTest extends ContentBaseTest {
"</services>\n";
List<String> sds = ApplicationPackageUtils.generateSchemas("music", "title", "artist");
- VespaModel model = new VespaModelCreatorWithMockPkg(getHosts(),
- services, sds).create();
+ VespaModel model = new VespaModelCreatorWithMockPkg(getHosts(), services, sds).create();
assertIndexing(model, new DocprocClusterSpec("dokprok"));
}
@Test
- public void twoContentTwoDoctypesExplicitIndexingInDifferentIndexingClustersExplicitChain()
- throws IOException, SAXException, ParseException {
+ public void twoContentTwoDoctypesExplicitIndexingInDifferentIndexingClustersExplicitChain() {
final String MUSIC = "musiccluster";
SearchClusterSpec musicCluster = new SearchClusterSpec(MUSIC, "dpmusiccluster", "dpmusicchain");
musicCluster.searchDefs.add(new SearchDefSpec("music", "artist", "album"));
@@ -183,12 +175,8 @@ public class IndexingAndDocprocRoutingTest extends ContentBaseTest {
DocprocClusterSpec dpMusicCluster = new DocprocClusterSpec("dpmusiccluster", new DocprocChainSpec("dpmusicchain", "indexing"));
DocprocClusterSpec dpBooksCluster = new DocprocClusterSpec("dpbookscluster", new DocprocChainSpec("dpbookschain", "indexing"));
- VespaModel model = getIndexedContentVespaModel(Arrays.asList(
- dpMusicCluster,
- dpBooksCluster),
- Arrays.asList(
- musicCluster,
- booksCluster));
+ VespaModel model = getIndexedContentVespaModel(List.of(dpMusicCluster, dpBooksCluster),
+ List.of(musicCluster, booksCluster));
//after we generated model, add indexing chains for validation:
dpMusicCluster.chains.clear();
@@ -204,52 +192,52 @@ public class IndexingAndDocprocRoutingTest extends ContentBaseTest {
assertFeedingRoute(model, BOOKS, "dpbookscluster/chain.dpbookschain");
}
- @Test(expected = IllegalArgumentException.class)
- public void twoContentTwoDoctypesExplicitIndexingInDifferentIndexingClustersExplicitChainIncorrectInheritance()
- throws IOException, SAXException, ParseException {
- final String MUSIC = "musiccluster";
- SearchClusterSpec musicCluster = new SearchClusterSpec(MUSIC, "dpmusiccluster", "dpmusicchain");
- musicCluster.searchDefs.add(new SearchDefSpec("music", "artist", "album"));
-
- final String BOOKS = "bookscluster";
- SearchClusterSpec booksCluster = new SearchClusterSpec(BOOKS, "dpbookscluster", "dpbookschain");
- booksCluster.searchDefs.add(new SearchDefSpec("book", "author", "title"));
-
- DocprocClusterSpec dpMusicCluster = new DocprocClusterSpec("dpmusiccluster", new DocprocChainSpec("dpmusicchain"));
- DocprocClusterSpec dpBooksCluster = new DocprocClusterSpec("dpbookscluster", new DocprocChainSpec("dpbookschain"));
- VespaModel model = getIndexedContentVespaModel(Arrays.asList(
- dpMusicCluster,
- dpBooksCluster),
- Arrays.asList(
- musicCluster,
- booksCluster));
-
- //after we generated model, add indexing chains for validation:
- dpMusicCluster.chains.clear();
- dpMusicCluster.chains.add(new DocprocChainSpec("dpmusiccluster/chain.indexing"));
- dpMusicCluster.chains.add(new DocprocChainSpec("dpmusiccluster/chain.dpmusicchain"));
-
- dpBooksCluster.chains.clear();
- dpBooksCluster.chains.add(new DocprocChainSpec("dpbookscluster/chain.indexing"));
- dpBooksCluster.chains.add(new DocprocChainSpec("dpbookscluster/chain.dpbookschain"));
+ @Test
+ public void requiresIndexingInheritance() {
+ try {
+ SearchClusterSpec musicCluster = new SearchClusterSpec("musiccluster",
+ "dpmusiccluster",
+ "dpmusicchain");
+ musicCluster.searchDefs.add(new SearchDefSpec("music", "artist", "album"));
+
+ DocprocClusterSpec dpMusicCluster = new DocprocClusterSpec("dpmusiccluster", new DocprocChainSpec("dpmusicchain"));
+ getIndexedContentVespaModel(List.of(dpMusicCluster), List.of(musicCluster));
+ fail("Expected exception");
+ }
+ catch (IllegalArgumentException e) {
+ assertEquals("Docproc chain 'dpmusicchain' must inherit from the 'indexing' chain", e.getMessage());
+ }
+ }
- assertIndexing(model, dpMusicCluster, dpBooksCluster);
- assertFeedingRoute(model, MUSIC, "dpmusiccluster/chain.dpmusicchain");
- assertFeedingRoute(model, BOOKS, "dpbookscluster/chain.dpbookschain");
+ @Test
+ public void indexingChainShouldNotBeTheDefaultChain() {
+ try {
+ SearchClusterSpec musicCluster = new SearchClusterSpec("musiccluster",
+ "dpmusiccluster",
+ "default");
+ musicCluster.searchDefs.add(new SearchDefSpec("music", "artist", "album"));
+
+ DocprocClusterSpec dpMusicCluster = new DocprocClusterSpec("dpmusiccluster", new DocprocChainSpec("default", "indexing"));
+ getIndexedContentVespaModel(List.of(dpMusicCluster), List.of(musicCluster));
+ fail("Expected exception");
+ }
+ catch (IllegalArgumentException e) {
+ assertTrue(e.getMessage().startsWith("Indexing cluster 'musiccluster' specifies the chain 'default' as indexing chain"));
+ }
}
private void assertIndexing(VespaModel model, DocprocClusterSpec... expectedDocprocClusters) {
Map<String, ContainerCluster> docprocClusters = getDocprocClusters(model);
- assertThat(docprocClusters.size(), is(expectedDocprocClusters.length));
+ assertEquals(expectedDocprocClusters.length, docprocClusters.size());
for (DocprocClusterSpec expectedDocprocCluster : expectedDocprocClusters) {
ContainerCluster docprocCluster = docprocClusters.get(expectedDocprocCluster.name);
- assertThat(docprocCluster, not(nullValue()));
- assertThat(docprocCluster.getName(), is(expectedDocprocCluster.name));
+ assertNotNull(docprocCluster);
+ assertEquals(expectedDocprocCluster.name, docprocCluster.getName());
ContainerDocproc containerDocproc = docprocCluster.getDocproc();
- assertThat(containerDocproc, not(nullValue()));
+ assertNotNull(containerDocproc);
List<DocprocChain> chains = containerDocproc.getChains().allChains().allComponents();
- assertThat(chains.size(), is(expectedDocprocCluster.chains.size()));
+ assertEquals(expectedDocprocCluster.chains.size(), chains.size());
List<String> actualDocprocChains = new ArrayList<>();
for (DocprocChain chain : chains) {
actualDocprocChains.add(chain.getServiceName());
@@ -373,7 +361,8 @@ public class IndexingAndDocprocRoutingTest extends ContentBaseTest {
return retval.toString();
}
- private String createVespaServicesWithContent(List<DocprocClusterSpec> docprocClusterSpecs, List<SearchClusterSpec> searchClusterSpecs) {
+ private String createVespaServicesWithContent(List<DocprocClusterSpec> docprocClusterSpecs,
+ List<SearchClusterSpec> searchClusterSpecs) {
String mainPre =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services version='1.0'>\n" +
@@ -393,7 +382,7 @@ public class IndexingAndDocprocRoutingTest extends ContentBaseTest {
String docprocCluster = "";
docprocCluster += " <container version='1.0' id='" + docprocClusterSpec.name + "'>\n";
- if (docprocClusterSpec.chains != null && docprocClusterSpec.chains.size() > 0) {
+ if (docprocClusterSpec.chains.size() > 0) {
docprocCluster += " <document-processing>\n";
for (DocprocChainSpec chain : docprocClusterSpec.chains) {
if (chain.inherits.isEmpty()) {
@@ -465,11 +454,12 @@ public class IndexingAndDocprocRoutingTest extends ContentBaseTest {
createVespaServicesWithContent(docprocClusterSpecs, searchClusterSpecs), sds).create();
}
- private class SearchClusterSpec {
+ private static class SearchClusterSpec {
+
private final String name;
- private List<SearchDefSpec> searchDefs = new ArrayList<>(2);
- private String indexingClusterName;
- private String indexingChainName;
+ private final List<SearchDefSpec> searchDefs = new ArrayList<>(2);
+ private final String indexingClusterName;
+ private final String indexingChainName;
private SearchClusterSpec(String name, String indexingClusterName, String indexingChainName) {
this.name = name;
@@ -478,10 +468,11 @@ public class IndexingAndDocprocRoutingTest extends ContentBaseTest {
}
}
- private class SearchDefSpec {
- private String typeName;
- private String field1Name;
- private String field2Name;
+ private static class SearchDefSpec {
+
+ private final String typeName;
+ private final String field1Name;
+ private final String field2Name;
private SearchDefSpec(String typeName, String field1Name, String field2Name) {
this.typeName = typeName;
@@ -491,6 +482,7 @@ public class IndexingAndDocprocRoutingTest extends ContentBaseTest {
}
private class DocprocClusterSpec {
+
private final String name;
private final List<DocprocChainSpec> chains = new ArrayList<>();
@@ -500,7 +492,8 @@ public class IndexingAndDocprocRoutingTest extends ContentBaseTest {
}
}
- private class DocprocChainSpec {
+ private static class DocprocChainSpec {
+
private final String name;
private final List<String> inherits = new ArrayList<>();
@@ -509,4 +502,5 @@ public class IndexingAndDocprocRoutingTest extends ContentBaseTest {
this.inherits.addAll(Arrays.asList(inherits));
}
}
+
}
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/content/StorageClusterTest.java b/config-model/src/test/java/com/yahoo/vespa/model/content/StorageClusterTest.java
index 5cf57430f91..9a681003293 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/content/StorageClusterTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/content/StorageClusterTest.java
@@ -17,6 +17,7 @@ import com.yahoo.vespa.config.content.PersistenceConfig;
import com.yahoo.config.model.test.MockRoot;
import com.yahoo.documentmodel.NewDocumentType;
import static com.yahoo.vespa.defaults.Defaults.getDefaults;
+import static com.yahoo.config.model.test.TestUtil.joinLines;
import com.yahoo.vespa.model.content.cluster.ContentCluster;
import com.yahoo.vespa.model.content.storagecluster.StorageCluster;
import com.yahoo.vespa.model.content.utils.ContentClusterUtils;
@@ -44,10 +45,17 @@ public class StorageClusterTest {
return parse(xml, root);
}
- StorageCluster parse(String xml) {
- MockRoot root = new MockRoot();
+ StorageCluster parse(String xml, ModelContext.Properties properties) {
+ MockRoot root = new MockRoot("",
+ new DeployState.Builder()
+ .properties(properties)
+ .applicationPackage(new MockApplicationPackage.Builder().build())
+ .build());
return parse(xml, root);
}
+ StorageCluster parse(String xml) {
+ return parse(xml, new TestProperties());
+ }
StorageCluster parse(String xml, MockRoot root) {
root.getDeployState().getDocumentModel().getDocumentManager().add(
new NewDocumentType(new NewDocumentType.Name("music"))
@@ -61,13 +69,23 @@ public class StorageClusterTest {
return cluster.getStorageNodes();
}
+ private static String group() {
+ return joinLines(
+ "<group>",
+ " <node distribution-key=\"0\" hostalias=\"mockhost\"/>",
+ "</group>");
+ }
+ private static String cluster(String clusterName, String insert) {
+ return joinLines(
+ "<content id=\"" + clusterName + "\">",
+ "<documents/>",
+ insert,
+ group(),
+ "</content>");
+ }
@Test
public void testBasics() {
- StorageCluster storage = parse("<content id=\"foofighters\"><documents/>\n" +
- " <group>" +
- " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
- " </group>" +
- "</content>\n");
+ StorageCluster storage = parse(cluster("foofighters", ""));
assertEquals(1, storage.getChildren().size());
StorServerConfig.Builder builder = new StorServerConfig.Builder();
@@ -79,11 +97,7 @@ public class StorageClusterTest {
}
@Test
public void testCommunicationManagerDefaults() {
- StorageCluster storage = parse("<content id=\"foofighters\"><documents/>\n" +
- " <group>" +
- " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
- " </group>" +
- "</content>\n");
+ StorageCluster storage = parse(cluster("foofighters", ""));
StorCommunicationmanagerConfig.Builder builder = new StorCommunicationmanagerConfig.Builder();
storage.getChildren().get("0").getConfig(builder);
StorCommunicationmanagerConfig config = new StorCommunicationmanagerConfig(builder);
@@ -97,40 +111,49 @@ public class StorageClusterTest {
}
@Test
+ public void testMergeDefaults() {
+ StorServerConfig.Builder builder = new StorServerConfig.Builder();
+ parse(cluster("foofighters", "")).getConfig(builder);
+
+ StorServerConfig config = new StorServerConfig(builder);
+ assertEquals(16, config.max_merges_per_node());
+ assertEquals(1024, config.max_merge_queue_size());
+ }
+
+ @Test
public void testMerges() {
StorServerConfig.Builder builder = new StorServerConfig.Builder();
- parse("" +
- "<content id=\"foofighters\">\n" +
- " <documents/>" +
- " <tuning>" +
- " <merges max-per-node=\"1K\" max-queue-size=\"10K\"/>\n" +
- " </tuning>" +
- " <group>" +
- " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
- " </group>" +
- "</content>"
+ parse(cluster("foofighters", joinLines(
+ "<tuning>",
+ " <merges max-per-node=\"1K\" max-queue-size=\"10K\"/>",
+ "</tuning>")),
+ new TestProperties().setMaxMergeQueueSize(1919).setMaxConcurrentMergesPerNode(37)
).getConfig(builder);
StorServerConfig config = new StorServerConfig(builder);
assertEquals(1024, config.max_merges_per_node());
assertEquals(1024*10, config.max_merge_queue_size());
}
+ @Test
+ public void testMergeFeatureFlags() {
+ StorServerConfig.Builder builder = new StorServerConfig.Builder();
+ parse(cluster("foofighters", ""), new TestProperties().setMaxMergeQueueSize(1919).setMaxConcurrentMergesPerNode(37)).getConfig(builder);
+
+ StorServerConfig config = new StorServerConfig(builder);
+ assertEquals(37, config.max_merges_per_node());
+ assertEquals(1919, config.max_merge_queue_size());
+ }
@Test
public void testVisitors() {
StorVisitorConfig.Builder builder = new StorVisitorConfig.Builder();
- parse(
- "<cluster id=\"bees\">\n" +
- " <documents/>" +
- " <tuning>\n" +
- " <visitors thread-count=\"7\" max-queue-size=\"1000\">\n" +
- " <max-concurrent fixed=\"42\" variable=\"100\"/>\n" +
- " </visitors>\n" +
- " </tuning>\n" +
- " <group>" +
- " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
- " </group>" +
- "</cluster>"
+ parse(cluster("bees",
+ joinLines(
+ "<tuning>",
+ " <visitors thread-count=\"7\" max-queue-size=\"1000\">",
+ " <max-concurrent fixed=\"42\" variable=\"100\"/>",
+ " </visitors>",
+ "</tuning>"))
).getConfig(builder);
StorVisitorConfig config = new StorVisitorConfig(builder);
@@ -143,16 +166,10 @@ public class StorageClusterTest {
@Test
public void testPersistenceThreads() {
- StorageCluster stc = parse(
- "<cluster id=\"bees\">\n" +
- " <documents/>" +
- " <tuning>\n" +
- " <persistence-threads count=\"7\"/>\n" +
- " </tuning>\n" +
- " <group>" +
- " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
- " </group>" +
- "</cluster>",
+ StorageCluster stc = parse(cluster("bees",joinLines(
+ "<tuning>",
+ " <persistence-threads count=\"7\"/>",
+ "</tuning>")),
new Flavor(new FlavorsConfig.Flavor.Builder().name("test-flavor").minCpuCores(9).build())
);
@@ -178,16 +195,10 @@ public class StorageClusterTest {
@Test
public void testResponseThreads() {
- StorageCluster stc = parse(
- "<cluster id=\"bees\">\n" +
- " <documents/>" +
- " <tuning>\n" +
- " <persistence-threads count=\"7\"/>\n" +
- " </tuning>\n" +
- " <group>" +
- " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
- " </group>" +
- "</cluster>",
+ StorageCluster stc = parse(cluster("bees",joinLines(
+ "<tuning>",
+ " <persistence-threads count=\"7\"/>",
+ "</tuning>")),
new Flavor(new FlavorsConfig.Flavor.Builder().name("test-flavor").minCpuCores(9).build())
);
StorFilestorConfig.Builder builder = new StorFilestorConfig.Builder();
@@ -201,20 +212,14 @@ public class StorageClusterTest {
@Test
public void testPersistenceThreadsOld() {
- StorageCluster stc = parse(
- "<cluster id=\"bees\">\n" +
- " <documents/>" +
- " <tuning>\n" +
- " <persistence-threads>\n" +
- " <thread lowest-priority=\"VERY_LOW\" count=\"2\"/>\n" +
- " <thread lowest-priority=\"VERY_HIGH\" count=\"1\"/>\n" +
- " <thread count=\"1\"/>\n" +
- " </persistence-threads>\n" +
- " </tuning>\n" +
- " <group>" +
- " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
- " </group>" +
- "</cluster>",
+ StorageCluster stc = parse(cluster("bees", joinLines(
+ "<tuning>",
+ " <persistence-threads>",
+ " <thread lowest-priority=\"VERY_LOW\" count=\"2\"/>",
+ " <thread lowest-priority=\"VERY_HIGH\" count=\"1\"/>",
+ " <thread count=\"1\"/>",
+ " </persistence-threads>",
+ "</tuning>")),
new Flavor(new FlavorsConfig.Flavor.Builder().name("test-flavor").minCpuCores(9).build())
);
@@ -238,15 +243,7 @@ public class StorageClusterTest {
@Test
public void testNoPersistenceThreads() {
- StorageCluster stc = parse(
- "<cluster id=\"bees\">\n" +
- " <documents/>" +
- " <tuning>\n" +
- " </tuning>\n" +
- " <group>" +
- " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
- " </group>" +
- "</cluster>",
+ StorageCluster stc = parse(cluster("bees", ""),
new Flavor(new FlavorsConfig.Flavor.Builder().name("test-flavor").minCpuCores(9).build())
);
@@ -267,13 +264,7 @@ public class StorageClusterTest {
}
private StorageCluster simpleCluster(ModelContext.Properties properties) {
- return parse(
- "<cluster id=\"bees\">\n" +
- " <documents/>" +
- " <group>" +
- " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
- " </group>" +
- "</cluster>",
+ return parse(cluster("bees", ""),
new Flavor(new FlavorsConfig.Flavor.Builder().name("test-flavor").minCpuCores(9).build()),
properties);
}
@@ -302,14 +293,7 @@ public class StorageClusterTest {
@Test
public void integrity_checker_explicitly_disabled_when_not_running_with_vds_provider() {
StorIntegritycheckerConfig.Builder builder = new StorIntegritycheckerConfig.Builder();
- parse(
- "<cluster id=\"bees\">\n" +
- " <documents/>" +
- " <group>" +
- " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
- " </group>" +
- "</cluster>"
- ).getConfig(builder);
+ parse(cluster("bees", "")).getConfig(builder);
StorIntegritycheckerConfig config = new StorIntegritycheckerConfig(builder);
// '-' --> don't run on the given week day
assertEquals("-------", config.weeklycycle());
@@ -317,15 +301,15 @@ public class StorageClusterTest {
@Test
public void testCapacity() {
- String xml =
- "<cluster id=\"storage\">\n" +
- " <documents/>" +
- " <group>\n" +
- " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" +
- " <node distribution-key=\"1\" hostalias=\"mockhost\" capacity=\"1.5\"/>\n" +
- " <node distribution-key=\"2\" hostalias=\"mockhost\" capacity=\"2.0\"/>\n" +
- " </group>\n" +
- "</cluster>";
+ String xml = joinLines(
+ "<cluster id=\"storage\">",
+ " <documents/>",
+ " <group>",
+ " <node distribution-key=\"0\" hostalias=\"mockhost\"/>",
+ " <node distribution-key=\"1\" hostalias=\"mockhost\" capacity=\"1.5\"/>",
+ " <node distribution-key=\"2\" hostalias=\"mockhost\" capacity=\"2.0\"/>",
+ " </group>",
+ "</cluster>");
ContentCluster cluster = ContentClusterUtils.createCluster(xml, new MockRoot());
@@ -341,15 +325,7 @@ public class StorageClusterTest {
@Test
public void testRootFolder() {
- String xml =
- "<cluster id=\"storage\">\n" +
- " <documents/>" +
- " <group>\n" +
- " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" +
- " </group>\n" +
- "</cluster>";
-
- ContentCluster cluster = ContentClusterUtils.createCluster(xml, new MockRoot());
+ ContentCluster cluster = ContentClusterUtils.createCluster(cluster("storage", ""), new MockRoot());
StorageNode node = cluster.getStorageNodes().getChildren().get("0");
@@ -372,18 +348,18 @@ public class StorageClusterTest {
@Test
public void testGenericPersistenceTuning() {
- String xml =
- "<cluster id=\"storage\">\n" +
- "<documents/>" +
- "<engine>\n" +
- " <fail-partition-on-error>true</fail-partition-on-error>\n" +
- " <revert-time>34m</revert-time>\n" +
- " <recovery-time>5d</recovery-time>\n" +
- "</engine>" +
- " <group>\n" +
- " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" +
- " </group>\n" +
- "</cluster>";
+ String xml = joinLines(
+ "<cluster id=\"storage\">",
+ " <documents/>",
+ " <engine>",
+ " <fail-partition-on-error>true</fail-partition-on-error>",
+ " <revert-time>34m</revert-time>",
+ " <recovery-time>5d</recovery-time>",
+ " </engine>",
+ " <group>",
+ " node distribution-key=\"0\" hostalias=\"mockhost\"/>",
+ " </group>",
+ "</cluster>");
ContentCluster cluster = ContentClusterUtils.createCluster(xml, new MockRoot());
@@ -398,21 +374,21 @@ public class StorageClusterTest {
@Test
public void requireThatUserDoesNotSpecifyBothGroupAndNodes() {
- String xml =
- "<cluster id=\"storage\">\n" +
- "<documents/>\n" +
- "<engine>\n" +
- " <fail-partition-on-error>true</fail-partition-on-error>\n" +
- " <revert-time>34m</revert-time>\n" +
- " <recovery-time>5d</recovery-time>\n" +
- "</engine>" +
- " <group>\n" +
- " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" +
- " </group>\n" +
- " <nodes>\n" +
- " <node distribution-key=\"1\" hostalias=\"mockhost\"/>\n" +
- " </nodes>\n" +
- "</cluster>";
+ String xml = joinLines(
+ "<cluster id=\"storage\">",
+ " <documents/>",
+ " <engine>",
+ " <fail-partition-on-error>true</fail-partition-on-error>",
+ " <revert-time>34m</revert-time>",
+ " <recovery-time>5d</recovery-time>",
+ " </engine>",
+ " <group>",
+ " <node distribution-key=\"0\" hostalias=\"mockhost\"/>",
+ " </group>",
+ " <nodes>",
+ " <node distribution-key=\"1\" hostalias=\"mockhost\"/>",
+ " </nodes>",
+ "</cluster>");
try {
final MockRoot root = new MockRoot();
@@ -429,20 +405,20 @@ public class StorageClusterTest {
@Test
public void requireThatGroupNamesMustBeUniqueAmongstSiblings() {
- String xml =
- "<cluster id=\"storage\">\n" +
- " <redundancy>2</redundancy>" +
- " <documents/>\n" +
- " <group>\n" +
- " <distribution partitions=\"*\"/>\n" +
- " <group distribution-key=\"0\" name=\"bar\">\n" +
- " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" +
- " </group>\n" +
- " <group distribution-key=\"0\" name=\"bar\">\n" +
- " <node distribution-key=\"1\" hostalias=\"mockhost\"/>\n" +
- " </group>\n" +
- " </group>\n" +
- "</cluster>";
+ String xml = joinLines(
+ "<cluster id=\"storage\">",
+ " <redundancy>2</redundancy>",
+ " <documents/>",
+ " <group>",
+ " <distribution partitions=\"*\"/>",
+ " <group distribution-key=\"0\" name=\"bar\">",
+ " <node distribution-key=\"0\" hostalias=\"mockhost\"/>",
+ " </group>",
+ " <group distribution-key=\"0\" name=\"bar\">",
+ " <node distribution-key=\"1\" hostalias=\"mockhost\"/>",
+ " </group>",
+ " </group>",
+ "</cluster>");
try {
ContentClusterUtils.createCluster(xml, new MockRoot());
@@ -455,24 +431,24 @@ public class StorageClusterTest {
@Test
public void requireThatGroupNamesCanBeDuplicatedAcrossLevels() {
- String xml =
- "<cluster id=\"storage\">\n" +
- " <redundancy>2</redundancy>" +
- "<documents/>\n" +
- " <group>\n" +
- " <distribution partitions=\"*\"/>\n" +
- " <group distribution-key=\"0\" name=\"bar\">\n" +
- " <group distribution-key=\"0\" name=\"foo\">\n" +
- " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" +
- " </group>\n" +
- " </group>\n" +
- " <group distribution-key=\"0\" name=\"foo\">\n" +
- " <group distribution-key=\"0\" name=\"bar\">\n" +
- " <node distribution-key=\"1\" hostalias=\"mockhost\"/>\n" +
- " </group>\n" +
- " </group>\n" +
- " </group>\n" +
- "</cluster>";
+ String xml = joinLines(
+ "<cluster id=\"storage\">",
+ " <redundancy>2</redundancy>",
+ " <documents/>",
+ " <group>",
+ " <distribution partitions=\"*\"/>",
+ " <group distribution-key=\"0\" name=\"bar\">",
+ " <group distribution-key=\"0\" name=\"foo\">",
+ " <node distribution-key=\"0\" hostalias=\"mockhost\"/>",
+ " </group>",
+ " </group>",
+ " <group distribution-key=\"0\" name=\"foo\">",
+ " <group distribution-key=\"0\" name=\"bar\">",
+ " <node distribution-key=\"1\" hostalias=\"mockhost\"/>",
+ " </group>",
+ " </group>",
+ " </group>",
+ "</cluster>");
// Should not throw.
ContentClusterUtils.createCluster(xml, new MockRoot());
@@ -480,18 +456,18 @@ public class StorageClusterTest {
@Test
public void requireThatNestedGroupsRequireDistribution() {
- String xml =
- "<cluster id=\"storage\">\n" +
- "<documents/>\n" +
- " <group>\n" +
- " <group distribution-key=\"0\" name=\"bar\">\n" +
- " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" +
- " </group>\n" +
- " <group distribution-key=\"0\" name=\"baz\">\n" +
- " <node distribution-key=\"1\" hostalias=\"mockhost\"/>\n" +
- " </group>\n" +
- " </group>\n" +
- "</cluster>";
+ String xml = joinLines(
+ "<cluster id=\"storage\">",
+ " <documents/>",
+ " <group>",
+ " <group distribution-key=\"0\" name=\"bar\">",
+ " <node distribution-key=\"0\" hostalias=\"mockhost\"/>",
+ " </group>",
+ " <group distribution-key=\"0\" name=\"baz\">",
+ " <node distribution-key=\"1\" hostalias=\"mockhost\"/>",
+ " </group>",
+ " </group>",
+ "</cluster>");
try {
ContentClusterUtils.createCluster(xml, new MockRoot());
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/content/cluster/ClusterTest.java b/config-model/src/test/java/com/yahoo/vespa/model/content/cluster/ClusterTest.java
index afeffbbc875..1426b094971 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/content/cluster/ClusterTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/content/cluster/ClusterTest.java
@@ -73,7 +73,6 @@ public class ClusterTest {
joinLines(
"<max-hits-per-partition>77</max-hits-per-partition>",
"<dispatch-policy>round-robin</dispatch-policy>",
- "<min-group-coverage>13</min-group-coverage>",
"<min-active-docs-coverage>93</min-active-docs-coverage>",
"<top-k-probability>0.777</top-k-probability>"),
false);
@@ -82,7 +81,6 @@ public class ClusterTest {
DispatchConfig config = new DispatchConfig(builder);
assertEquals(2, config.searchableCopies());
assertEquals(93.0, config.minActivedocsPercentage(), DELTA);
- assertEquals(13.0, config.minGroupCoverage(), DELTA);
assertEquals(DispatchConfig.DistributionPolicy.ROUNDROBIN, config.distributionPolicy());
assertEquals(77, config.maxHitsPerNode());
assertEquals(0.777, config.topKProbability(), DELTA);
@@ -97,7 +95,6 @@ public class ClusterTest {
DispatchConfig config = new DispatchConfig(builder);
assertEquals(2, config.searchableCopies());
assertEquals(DispatchConfig.DistributionPolicy.ADAPTIVE, config.distributionPolicy());
- assertEquals(0, config.maxNodesDownPerGroup());
assertEquals(1.0, config.maxWaitAfterCoverageFactor(), DELTA);
assertEquals(0, config.minWaitAfterCoverageFactor(), DELTA);
assertEquals(8, config.numJrtConnectionsPerNode());
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/content/cluster/DomDispatchTuningBuilderTest.java b/config-model/src/test/java/com/yahoo/vespa/model/content/cluster/DomDispatchTuningBuilderTest.java
index abfb03e41dd..7533bf1ef9d 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/content/cluster/DomDispatchTuningBuilderTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/content/cluster/DomDispatchTuningBuilderTest.java
@@ -44,7 +44,6 @@ public class DomDispatchTuningBuilderTest {
" </tuning>" +
"</content>");
assertNull(dispatch.getMaxHitsPerPartition());
- assertNull(dispatch.getMinGroupCoverage());
assertNull(dispatch.getMinActiveDocsCoverage());
assertNull(dispatch.getDispatchPolicy());
assertNull(dispatch.getTopkProbability());
@@ -57,14 +56,12 @@ public class DomDispatchTuningBuilderTest {
" <tuning>" +
" <dispatch>" +
" <max-hits-per-partition>69</max-hits-per-partition>" +
- " <min-group-coverage>7.5</min-group-coverage>" +
" <min-active-docs-coverage>12.5</min-active-docs-coverage>" +
" <top-k-probability>0.999</top-k-probability>" +
" </dispatch>" +
" </tuning>" +
"</content>");
assertEquals(69, dispatch.getMaxHitsPerPartition().intValue());
- assertEquals(7.5, dispatch.getMinGroupCoverage().doubleValue(), 0.0);
assertEquals(12.5, dispatch.getMinActiveDocsCoverage().doubleValue(), 0.0);
assertEquals(0.999, dispatch.getTopkProbability().doubleValue(), 0.0);
}
diff --git a/config-provisioning/src/main/java/com/yahoo/config/provision/ApplicationLockException.java b/config-provisioning/src/main/java/com/yahoo/config/provision/ApplicationLockException.java
index e9fe669269c..8f71d4f5061 100644
--- a/config-provisioning/src/main/java/com/yahoo/config/provision/ApplicationLockException.java
+++ b/config-provisioning/src/main/java/com/yahoo/config/provision/ApplicationLockException.java
@@ -1,4 +1,4 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.config.provision;
/**
@@ -13,4 +13,8 @@ public class ApplicationLockException extends RuntimeException {
super(e);
}
+ public ApplicationLockException(String message) {
+ super(message);
+ }
+
}
diff --git a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/ProxyServer.java b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/ProxyServer.java
index 047cec87ed7..5ad9fabcb61 100644
--- a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/ProxyServer.java
+++ b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/ProxyServer.java
@@ -56,7 +56,7 @@ public class ProxyServer implements Runnable {
ProxyServer(Spec spec, ConfigSourceSet source, MemoryCache memoryCache, ConfigSourceClient configClient) {
this.configSource = source;
- supervisor = new Supervisor(new Transport("proxy-server", JRT_TRANSPORT_THREADS)).useSmallBuffers();
+ supervisor = new Supervisor(new Transport("proxy-server", JRT_TRANSPORT_THREADS)).setDropEmptyBuffers(true);
log.log(Level.FINE, () -> "Using config source '" + source);
this.memoryCache = memoryCache;
this.rpcServer = createRpcServer(spec);
diff --git a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileDistributionAndUrlDownload.java b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileDistributionAndUrlDownload.java
index 2767d2c8027..1dba56805a5 100644
--- a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileDistributionAndUrlDownload.java
+++ b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileDistributionAndUrlDownload.java
@@ -1,4 +1,4 @@
-// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.config.proxy.filedistribution;
import com.yahoo.concurrent.DaemonThreadFactory;
@@ -26,7 +26,9 @@ public class FileDistributionAndUrlDownload {
new ScheduledThreadPoolExecutor(1, new DaemonThreadFactory("file references and downloads cleanup"));
public FileDistributionAndUrlDownload(Supervisor supervisor, ConfigSourceSet source) {
- fileDistributionRpcServer = new FileDistributionRpcServer(supervisor, new FileDownloader(new JRTConnectionPool(source)));
+ fileDistributionRpcServer =
+ new FileDistributionRpcServer(supervisor,
+ new FileDownloader(new JRTConnectionPool(source, "filedistribution-jrt-pool-")));
urlDownloadRpcServer = new UrlDownloadRpcServer(supervisor);
cleanupExecutor.scheduleAtFixedRate(new CachedFilesMaintainer(), delay.toSeconds(), delay.toSeconds(), TimeUnit.SECONDS);
}
diff --git a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileDistributionRpcServer.java b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileDistributionRpcServer.java
index a25e86926a1..ea3a69c54a2 100644
--- a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileDistributionRpcServer.java
+++ b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileDistributionRpcServer.java
@@ -1,4 +1,4 @@
-// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.config.proxy.filedistribution;
import com.yahoo.concurrent.DaemonThreadFactory;
@@ -41,7 +41,7 @@ class FileDistributionRpcServer {
FileDistributionRpcServer(Supervisor supervisor, FileDownloader downloader) {
this.supervisor = supervisor;
this.downloader = downloader;
- declareFileDistributionMethods();
+ declareMethods();
}
void close() {
@@ -53,7 +53,7 @@ class FileDistributionRpcServer {
}
}
- private void declareFileDistributionMethods() {
+ private void declareMethods() {
// Legacy method, needs to be the same name as used in filedistributor
supervisor.addMethod(new Method("waitFor", "s", "s", this::getFile)
.methodDesc("get path to file reference")
diff --git a/config/src/main/java/com/yahoo/vespa/config/JRTConnectionPool.java b/config/src/main/java/com/yahoo/vespa/config/JRTConnectionPool.java
index 26eafb67c1b..b5147075972 100644
--- a/config/src/main/java/com/yahoo/vespa/config/JRTConnectionPool.java
+++ b/config/src/main/java/com/yahoo/vespa/config/JRTConnectionPool.java
@@ -29,6 +29,7 @@ public class JRTConnectionPool implements ConnectionPool {
private final Supervisor supervisor;
private final Map<String, JRTConnection> connections = new LinkedHashMap<>();
+ private final String poolName;
// The config sources used by this connection pool.
private ConfigSourceSet sourceSet = null;
@@ -37,11 +38,16 @@ public class JRTConnectionPool implements ConnectionPool {
private volatile JRTConnection currentConnection;
public JRTConnectionPool(ConfigSourceSet sourceSet) {
- supervisor = new Supervisor(new Transport("config-jrtpool-" + sourceSet.hashCode())).useSmallBuffers();
+ this(sourceSet, "config-jrt-pool-" + sourceSet.hashCode());
+ }
+
+ public JRTConnectionPool(ConfigSourceSet sourceSet, String poolName) {
+ this.poolName = poolName;
+ supervisor = new Supervisor(new Transport(poolName)).setDropEmptyBuffers(true);
addSources(sourceSet);
}
- public JRTConnectionPool(List<String> addresses) {
+ JRTConnectionPool(List<String> addresses) {
this(new ConfigSourceSet(addresses));
}
@@ -131,7 +137,7 @@ public class JRTConnectionPool implements ConnectionPool {
}
public String toString() {
- StringBuilder sb = new StringBuilder();
+ StringBuilder sb = new StringBuilder(poolName + ": ");
synchronized (connections) {
for (JRTConnection conn : connections.values()) {
sb.append(conn.toString());
diff --git a/config/src/main/java/com/yahoo/vespa/config/benchmark/LoadTester.java b/config/src/main/java/com/yahoo/vespa/config/benchmark/LoadTester.java
index 3a8d80e5ffe..43caf2d52fe 100644
--- a/config/src/main/java/com/yahoo/vespa/config/benchmark/LoadTester.java
+++ b/config/src/main/java/com/yahoo/vespa/config/benchmark/LoadTester.java
@@ -1,4 +1,4 @@
-// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.config.benchmark;
import com.yahoo.collections.Tuple2;
@@ -33,6 +33,8 @@ import java.util.Map;
import java.util.Optional;
import java.util.concurrent.ThreadLocalRandom;
+import static com.yahoo.vespa.config.ConfigKey.createFull;
+
/**
* A config client for generating load against a config server or config proxy.
* <p>
@@ -69,8 +71,7 @@ public class LoadTester {
String configsList = parser.getBinarySwitches().get("-l");
String defPath = parser.getBinarySwitches().get("-dd");
debug = parser.getUnarySwitches().contains("-d");
- LoadTester loadTester = new LoadTester();
- loadTester.runLoad(host, port, iterations, threads, configsList, defPath);
+ new LoadTester().runLoad(host, port, iterations, threads, configsList, defPath);
}
private void runLoad(String host, int port, int iterations, int threads,
@@ -97,14 +98,17 @@ public class LoadTester {
private Map<ConfigDefinitionKey, Tuple2<String, String[]>> readDefs(String defPath) throws IOException {
Map<ConfigDefinitionKey, Tuple2<String, String[]>> ret = new HashMap<>();
if (defPath == null) return ret;
+
File defDir = new File(defPath);
if (!defDir.isDirectory()) {
- System.out.println("# Given def file dir is not a directory: " + defDir.getPath() + " , will not send def contents in requests.");
+ System.out.println("# Given def file dir is not a directory: " +
+ defDir.getPath() + " , will not send def contents in requests.");
return ret;
}
- final File[] files = defDir.listFiles();
+ File[] files = defDir.listFiles();
if (files == null) {
- System.out.println("# Given def file dir has no files: " + defDir.getPath() + " , will not send def contents in requests.");
+ System.out.println("# Given def file dir has no files: " +
+ defDir.getPath() + " , will not send def contents in requests.");
return ret;
}
for (File f : files) {
@@ -131,7 +135,7 @@ public class LoadTester {
sb.append((metrics.failedRequests));
sb.append("\n");
sb.append('#').append(TransportMetrics.getInstance().snapshot().toString()).append('\n');
- System.out.println(sb.toString());
+ System.out.println(sb);
}
private List<ConfigKey<?>> readConfigs(String configsList) throws IOException {
@@ -189,10 +193,10 @@ public class LoadTester {
private class LoadThread extends Thread {
- int iterations = 0;
- String host = "";
- int port = 0;
- Metrics metrics = new Metrics();
+ private final int iterations;
+ private final String host;
+ private final int port;
+ private final Metrics metrics = new Metrics();
LoadThread(int iterations, String host, int port) {
this.iterations = iterations;
@@ -204,44 +208,24 @@ public class LoadTester {
public void run() {
Spec spec = new Spec(host, port);
Target target = connect(spec);
- ConfigKey<?> reqKey;
- JRTClientConfigRequest request;
- int totConfs = configs.size();
- boolean reconnCycle = false; // to log reconn message only once, for instance at restart
+
for (int i = 0; i < iterations; i++) {
- reqKey = configs.get(ThreadLocalRandom.current().nextInt(totConfs));
+ ConfigKey<?> reqKey = configs.get(ThreadLocalRandom.current().nextInt(configs.size()));
ConfigDefinitionKey dKey = new ConfigDefinitionKey(reqKey);
Tuple2<String, String[]> defContent = defs.get(dKey);
if (defContent == null && defs.size() > 0) { // Only complain if we actually did run with a def dir
System.out.println("# No def found for " + dKey + ", not sending in request.");
}
- request = getRequest(ConfigKey.createFull(reqKey.getName(), reqKey.getConfigId(), reqKey.getNamespace(), defContent.first), defContent.second);
+ ConfigKey<?> configKey = createFull(reqKey.getName(), reqKey.getConfigId(), reqKey.getNamespace(), defContent.first);
+ JRTClientConfigRequest request = createRequest(configKey, defContent.second);
if (debug) System.out.println("# Requesting: " + reqKey);
long start = System.currentTimeMillis();
target.invokeSync(request.getRequest(), 10.0);
long end = System.currentTimeMillis();
if (request.isError()) {
- if ("Connection lost".equals(request.errorMessage()) || "Connection down".equals(request.errorMessage())) {
- try {
- Thread.sleep(100);
- } catch (InterruptedException e) {
- throw new RuntimeException(e);
- }
- if (!reconnCycle) {
- System.out.println("# Connection lost, reconnecting...");
- reconnCycle = true;
- }
- target.close();
- target = connect(spec);
- } else {
- System.err.println(request.errorMessage());
- }
- metrics.incFailedRequests();
+ target = handleError(request, spec, target);
} else {
- if (reconnCycle) {
- reconnCycle = false;
- System.out.println("# Connection OK");
- }
+ System.out.println("# Connection OK");
long duration = end - start;
if (debug) {
@@ -255,7 +239,7 @@ public class LoadTester {
}
}
- private JRTClientConfigRequest getRequest(ConfigKey<?> reqKey, String[] defContent) {
+ private JRTClientConfigRequest createRequest(ConfigKey<?> reqKey, String[] defContent) {
if (defContent == null) defContent = new String[0];
final long serverTimeout = 1000;
return JRTClientConfigRequestV3.createWithParams(reqKey, DefContent.fromList(Arrays.asList(defContent)),
@@ -266,6 +250,24 @@ public class LoadTester {
private Target connect(Spec spec) {
return supervisor.connect(spec);
}
+
+ private Target handleError(JRTClientConfigRequest request, Spec spec, Target target) {
+ if (List.of("Connection lost", "Connection down").contains(request.errorMessage())) {
+ try {
+ Thread.sleep(100);
+ } catch (InterruptedException e) {
+ throw new RuntimeException(e);
+ }
+ System.out.println("# Connection lost, reconnecting...");
+ target.close();
+ target = connect(spec);
+ } else {
+ System.err.println(request.errorMessage());
+ }
+ metrics.incFailedRequests();
+ return target;
+ }
+
}
}
diff --git a/config/src/vespa/config/common/configvalue.cpp b/config/src/vespa/config/common/configvalue.cpp
index 8857eacc987..d5c0c2047df 100644
--- a/config/src/vespa/config/common/configvalue.cpp
+++ b/config/src/vespa/config/common/configvalue.cpp
@@ -57,8 +57,12 @@ ConfigValue::getLegacyFormat() const
const vespalib::string
ConfigValue::asJson() const {
- const vespalib::slime::Inspector & payload(_payload->getSlimePayload());
- return payload.toString();
+ if (_payload) {
+ const vespalib::slime::Inspector & payload(_payload->getSlimePayload());
+ return payload.toString();
+ } else {
+ return {};
+ }
}
void
@@ -74,7 +78,9 @@ ConfigValue::serializeV1(vespalib::slime::Cursor & cursor) const
void
ConfigValue::serializeV2(vespalib::slime::Cursor & cursor) const
{
- copySlimeObject(_payload->getSlimePayload(), cursor);
+ if (_payload) {
+ copySlimeObject(_payload->getSlimePayload(), cursor);
+ }
}
}
diff --git a/config/src/vespa/config/subscription/configsubscriptionset.cpp b/config/src/vespa/config/subscription/configsubscriptionset.cpp
index 949e36c046b..659458fb533 100644
--- a/config/src/vespa/config/subscription/configsubscriptionset.cpp
+++ b/config/src/vespa/config/subscription/configsubscriptionset.cpp
@@ -40,7 +40,7 @@ ConfigSubscriptionSet::acquireSnapshot(milliseconds timeoutInMillis, bool ignore
int64_t lastGeneration = _currentGeneration;
bool inSync = false;
- LOG(debug, "Going into nextConfig loop, time left is %" PRId64, timeLeft.count());
+ LOG(spam, "Going into nextConfig loop, time left is %" PRId64, timeLeft.count());
while (!isClosed() && (timeLeft.count() >= 0) && !inSync) {
size_t numChanged = 0;
size_t numGenerationChanged = 0;
@@ -62,7 +62,7 @@ ConfigSubscriptionSet::acquireSnapshot(milliseconds timeoutInMillis, bool ignore
} else {
LOG(spam, "Config subscription did not change, id(%s), defname(%s)", key.getConfigId().c_str(), key.getDefName().c_str());
}
- LOG(spam, "Previous generation is %" PRId64 ", updates is %" PRId64, generation, subscription->getGeneration());
+ LOG(spam, "Previous generation is %" PRId64 ", updates is %" PRId64, lastGeneration, subscription->getGeneration());
if (isGenerationNewer(subscription->getGeneration(), _currentGeneration)) {
numGenerationChanged++;
}
diff --git a/configd/src/apps/cmd/main.cpp b/configd/src/apps/cmd/main.cpp
index 33b4aa8111d..1b90483b65d 100644
--- a/configd/src/apps/cmd/main.cpp
+++ b/configd/src/apps/cmd/main.cpp
@@ -13,6 +13,24 @@
#include <vespa/log/log.h>
LOG_SETUP("vespa-sentinel-cmd");
+namespace {
+struct Method {
+ const char * name;
+ const char * rpcMethod;
+ bool noArgNeeded;
+ bool needsTimeoutArg;
+};
+const Method methods[] = {
+ { "list", "sentinel.ls", true, false },
+ { "restart", "sentinel.service.restart", false, false },
+ { "start", "sentinel.service.start", false, false },
+ { "stop", "sentinel.service.stop", false, false },
+ { "connectivity", "sentinel.report.connectivity", true, true }
+};
+
+}
+
+
class Cmd
{
private:
@@ -22,7 +40,7 @@ private:
public:
Cmd() : _server(), _target(nullptr) {}
~Cmd();
- int run(const char *cmd, const char *arg);
+ int run(const Method &cmd, const char *arg);
void initRPC(const char *spec);
void finiRPC();
};
@@ -41,6 +59,7 @@ void usage()
fprintf(stderr, " restart {service}\n");
fprintf(stderr, " start {service}\n");
fprintf(stderr, " stop {service}\n");
+ fprintf(stderr, " connectivity [milliseconds]\n");
}
void
@@ -63,7 +82,7 @@ Cmd::finiRPC()
int
-Cmd::run(const char *cmd, const char *arg)
+Cmd::run(const Method &cmd, const char *arg)
{
int retval = 0;
try {
@@ -74,33 +93,61 @@ Cmd::run(const char *cmd, const char *arg)
return 2;
}
FRT_RPCRequest *req = _server->supervisor().AllocRPCRequest();
- req->SetMethodName(cmd);
+ req->SetMethodName(cmd.rpcMethod);
- if (arg) {
+ int pingTimeoutMs = 5000;
+ if (cmd.needsTimeoutArg) {
+ if (arg) {
+ pingTimeoutMs = atoi(arg);
+ }
+ req->GetParams()->AddInt32(pingTimeoutMs);
+ } else if (arg) {
// one param
req->GetParams()->AddString(arg);
}
- _target->InvokeSync(req, 5.0);
+ _target->InvokeSync(req, 2 * pingTimeoutMs * 0.001);
if (req->IsError()) {
fprintf(stderr, "vespa-sentinel-cmd '%s' error %d: %s\n",
- cmd, req->GetErrorCode(), req->GetErrorMessage());
+ cmd.name, req->GetErrorCode(), req->GetErrorMessage());
retval = 1;
} else {
FRT_Values &answer = *(req->GetReturn());
const char *atypes = answer.GetTypeString();
- fprintf(stderr, "vespa-sentinel-cmd '%s' OK.\n", cmd);
- uint32_t idx = 0;
- while (atypes != nullptr && *atypes != '\0') {
- switch (*atypes) {
- case 's':
+ fprintf(stderr, "vespa-sentinel-cmd '%s' OK.\n", cmd.name);
+ if (atypes && (strcmp(atypes, "SS") == 0)) {
+ uint32_t numHosts = answer[0]._string_array._len;
+ uint32_t numStats = answer[1]._string_array._len;
+ FRT_StringValue *hosts = answer[0]._string_array._pt;
+ FRT_StringValue *stats = answer[1]._string_array._pt;
+ uint32_t ml = 0;
+ uint32_t j;
+ for (j = 0; j < numHosts; ++j) {
+ uint32_t hl = strlen(hosts[j]._str);
+ if (hl > ml) ml = hl;
+ }
+ for (j = 0; j < numHosts && j < numStats; ++j) {
+ printf("%-*s -> %s\n", ml, hosts[j]._str, stats[j]._str);
+ }
+ for (; j < numHosts; ++j) {
+ printf("Extra host: %s\n", hosts[j]._str);
+ }
+ for (; j < numStats; ++j) {
+ printf("Extra stat: %s\n", stats[j]._str);
+ }
+ } else {
+ uint32_t idx = 0;
+ while (atypes != nullptr && *atypes != '\0') {
+ switch (*atypes) {
+ case 's':
printf("%s\n", answer[idx]._string._str);
break;
- default:
+ default:
printf("BAD: unknown type %c\n", *atypes);
- }
- ++atypes;
+ }
+ ++atypes;
++idx;
+ }
}
}
req->SubRef();
@@ -108,19 +155,15 @@ Cmd::run(const char *cmd, const char *arg)
return retval;
}
-const char *
+const Method *
parseCmd(const char *arg)
{
- if (strcmp(arg, "list") == 0) {
- return "sentinel.ls";
- } else if (strcmp(arg, "restart") == 0) {
- return "sentinel.service.restart";
- } else if (strcmp(arg, "start") == 0) {
- return "sentinel.service.start";
- } else if (strcmp(arg, "stop") == 0) {
- return "sentinel.service.stop";
+ for (const auto & method : methods) {
+ if (strcmp(arg, method.name) == 0) {
+ return &method;
+ }
}
- return 0;
+ return nullptr;
}
void hookSignals() {
@@ -131,14 +174,15 @@ void hookSignals() {
int main(int argc, char** argv)
{
int retval = 1;
- const char *cmd = 0;
+ const Method *cmd = nullptr;
if (argc > 1) {
cmd = parseCmd(argv[1]);
}
- if (cmd) {
+ const char *extraArg = (argc > 2 ? argv[2] : nullptr);
+ if (cmd && (extraArg || cmd->noArgNeeded)) {
hookSignals();
Cmd runner;
- retval = runner.run(cmd, argc > 2 ? argv[2] : 0);
+ retval = runner.run(*cmd, extraArg);
} else {
usage();
}
diff --git a/configd/src/apps/sentinel/CMakeLists.txt b/configd/src/apps/sentinel/CMakeLists.txt
index 43b4f79a0b2..0323df2864f 100644
--- a/configd/src/apps/sentinel/CMakeLists.txt
+++ b/configd/src/apps/sentinel/CMakeLists.txt
@@ -9,9 +9,11 @@ vespa_add_executable(configd_config-sentinel_app
line-splitter.cpp
manager.cpp
metrics.cpp
+ model-owner.cpp
output-connection.cpp
outward-check.cpp
peer-check.cpp
+ report-connectivity.cpp
rpchooks.cpp
rpcserver.cpp
sentinel.cpp
diff --git a/configd/src/apps/sentinel/cc-result.h b/configd/src/apps/sentinel/cc-result.h
new file mode 100644
index 00000000000..3468cf4324a
--- /dev/null
+++ b/configd/src/apps/sentinel/cc-result.h
@@ -0,0 +1,9 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+namespace config::sentinel {
+
+enum class CcResult { UNKNOWN, CONN_FAIL, UNREACHABLE_UP, INDIRECT_PING_FAIL, INDIRECT_PING_UNAVAIL, ALL_OK };
+
+}
diff --git a/configd/src/apps/sentinel/config-owner.cpp b/configd/src/apps/sentinel/config-owner.cpp
index 26972911b29..89d5796ae79 100644
--- a/configd/src/apps/sentinel/config-owner.cpp
+++ b/configd/src/apps/sentinel/config-owner.cpp
@@ -6,11 +6,11 @@
#include <string>
#include <vespa/log/log.h>
-LOG_SETUP(".config-owner");
+LOG_SETUP(".sentinel.config-owner");
namespace config::sentinel {
-ConfigOwner::ConfigOwner() : _subscriber() {}
+ConfigOwner::ConfigOwner() = default;
ConfigOwner::~ConfigOwner() = default;
@@ -27,7 +27,7 @@ ConfigOwner::doConfigure()
_currGeneration = _subscriber.getGeneration();
const SentinelConfig& config(*_currConfig);
const auto & app = config.application;
- LOG(config, "Sentinel got %zd service elements [tenant(%s), application(%s), instance(%s)] for config generation %zd",
+ LOG(config, "Sentinel got %zd service elements [tenant(%s), application(%s), instance(%s)] for config generation %" PRId64,
config.service.size(), app.tenant.c_str(), app.name.c_str(), app.instance.c_str(), _currGeneration);
}
@@ -42,29 +42,4 @@ ConfigOwner::checkForConfigUpdate() {
return false;
}
-std::unique_ptr<ModelConfig>
-ConfigOwner::fetchModelConfig(std::chrono::milliseconds timeout)
-{
- std::unique_ptr<ModelConfig> modelConfig;
- ConfigSubscriber tempSubscriber;
- try {
- ConfigHandle<ModelConfig>::UP modelHandle =
- tempSubscriber.subscribe<ModelConfig>("admin/model", timeout);
- if (tempSubscriber.nextGenerationNow()) {
- modelConfig = modelHandle->getConfig();
- LOG(config, "Sentinel got model info [version %s] for %zd hosts [config generation %zd]",
- modelConfig->vespaVersion.c_str(), modelConfig->hosts.size(),
- tempSubscriber.getGeneration());
- }
- } catch (ConfigTimeoutException & ex) {
- LOG(warning, "Timeout getting model config: %s [skipping connectivity checks]", ex.getMessage().c_str());
- } catch (InvalidConfigException& ex) {
- LOG(warning, "Invalid model config: %s [skipping connectivity checks]", ex.getMessage().c_str());
- } catch (ConfigRuntimeException& ex) {
- LOG(warning, "Runtime exception getting model config: %s [skipping connectivity checks]", ex.getMessage().c_str());
-
- }
- return modelConfig;
-}
-
}
diff --git a/configd/src/apps/sentinel/config-owner.h b/configd/src/apps/sentinel/config-owner.h
index 2850e6b3904..b72aed59271 100644
--- a/configd/src/apps/sentinel/config-owner.h
+++ b/configd/src/apps/sentinel/config-owner.h
@@ -7,10 +7,6 @@
#include <vespa/config/config.h>
using cloud::config::SentinelConfig;
-using cloud::config::ModelConfig;
-
-using config::ConfigSubscriber;
-using config::ConfigHandle;
namespace config::sentinel {
@@ -19,9 +15,9 @@ namespace config::sentinel {
**/
class ConfigOwner {
private:
- ConfigSubscriber _subscriber;
- ConfigHandle<SentinelConfig>::UP _sentinelHandle;
-
+ config::ConfigSubscriber _subscriber;
+ config::ConfigHandle<SentinelConfig>::UP _sentinelHandle;
+
int64_t _currGeneration = -1;
std::unique_ptr<SentinelConfig> _currConfig;
@@ -37,7 +33,6 @@ public:
bool hasConfig() const { return _currConfig.get() != nullptr; }
const SentinelConfig& getConfig() const { return *_currConfig; }
int64_t getGeneration() const { return _currGeneration; }
- static std::unique_ptr<ModelConfig> fetchModelConfig(std::chrono::milliseconds timeout);
};
}
diff --git a/configd/src/apps/sentinel/connectivity.cpp b/configd/src/apps/sentinel/connectivity.cpp
index 9cced1d3475..5996d709c5d 100644
--- a/configd/src/apps/sentinel/connectivity.cpp
+++ b/configd/src/apps/sentinel/connectivity.cpp
@@ -1,5 +1,6 @@
// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include "config-owner.h"
#include "connectivity.h"
#include "outward-check.h"
#include <vespa/defaults.h>
@@ -9,47 +10,113 @@
#include <thread>
#include <chrono>
-LOG_SETUP(".connectivity");
+LOG_SETUP(".sentinel.connectivity");
using vespalib::make_string_short::fmt;
using namespace std::chrono_literals;
namespace config::sentinel {
-Connectivity::Connectivity(const SentinelConfig::Connectivity & config, RpcServer &rpcServer)
- : _config(config),
- _rpcServer(rpcServer)
-{
- LOG(config, "connectivity.maxBadReverseCount = %d", _config.maxBadReverseCount);
- LOG(config, "connectivity.maxBadOutPercent = %d", _config.maxBadOutPercent);
-}
-
+Connectivity::Connectivity() = default;
Connectivity::~Connectivity() = default;
namespace {
-const char *toString(CcResult value) {
+std::string toString(CcResult value) {
switch (value) {
case CcResult::UNKNOWN: return "BAD: missing result"; // very very bad
- case CcResult::REVERSE_FAIL: return "connect OK, but reverse check FAILED"; // very bad
+ case CcResult::INDIRECT_PING_FAIL: return "connect OK, but reverse check FAILED"; // very bad
+ case CcResult::UNREACHABLE_UP: return "unreachable from me, but up"; // very bad
case CcResult::CONN_FAIL: return "failed to connect"; // bad
- case CcResult::REVERSE_UNAVAIL: return "connect OK (but reverse check unavailable)"; // unfortunate
+ case CcResult::INDIRECT_PING_UNAVAIL: return "connect OK (but reverse check unavailable)"; // unfortunate
case CcResult::ALL_OK: return "OK: both ways connectivity verified"; // good
}
LOG(error, "Unknown CcResult enum value: %d", (int)value);
LOG_ABORT("Unknown CcResult enum value");
}
-std::map<std::string, std::string> specsFrom(const ModelConfig &model) {
- std::map<std::string, std::string> checkSpecs;
+using ConnectivityMap = std::map<std::string, OutwardCheck>;
+using HostAndPort = Connectivity::HostAndPort;
+using SpecMap = Connectivity::SpecMap;
+
+std::string spec(const SpecMap::value_type &host_and_port) {
+ return fmt("tcp/%s:%d", host_and_port.first.c_str(), host_and_port.second);
+}
+
+void classifyConnFails(ConnectivityMap &connectivityMap,
+ const SpecMap &specMap,
+ RpcServer &rpcServer)
+{
+ std::vector<HostAndPort> failedConnSpecs;
+ std::vector<HostAndPort> goodNeighborSpecs;
+ std::string myHostname = vespa::Defaults::vespaHostname();
+ for (auto & [hostname, check] : connectivityMap) {
+ if (hostname == myHostname) {
+ if (check.result() == CcResult::CONN_FAIL) {
+ check.classifyResult(CcResult::UNREACHABLE_UP);
+ }
+ } else {
+ auto iter = specMap.find(hostname);
+ LOG_ASSERT(iter != specMap.end());
+ if (check.result() == CcResult::ALL_OK) {
+ goodNeighborSpecs.push_back(*iter);
+ }
+ if (check.result() == CcResult::CONN_FAIL) {
+ failedConnSpecs.push_back(*iter);
+ }
+ }
+ }
+ if ((failedConnSpecs.size() == 0) || (goodNeighborSpecs.size() == 0)) {
+ return;
+ }
+ for (const auto & toClassify : failedConnSpecs) {
+ const auto & [ nameToCheck, portToCheck ] = toClassify;
+ auto cmIter = connectivityMap.find(nameToCheck);
+ LOG_ASSERT(cmIter != connectivityMap.end());
+ OutwardCheckContext cornerContext(goodNeighborSpecs.size(), nameToCheck, portToCheck, rpcServer.orb());
+ ConnectivityMap cornerProbes;
+ for (const auto & hp : goodNeighborSpecs) {
+ cornerProbes.try_emplace(hp.first, spec(hp), cornerContext);
+ }
+ cornerContext.latch.await();
+ size_t numReportsUp = 0;
+ size_t numReportsDown = 0;
+ for (const auto & [hostname, probe] : cornerProbes) {
+ if (probe.result() == CcResult::INDIRECT_PING_FAIL) ++numReportsDown;
+ if (probe.result() == CcResult::ALL_OK) ++numReportsUp;
+ }
+ if (numReportsUp > 0) {
+ LOG(debug, "Unreachable: %s is up according to %zd hosts (down according to me + %zd others)",
+ nameToCheck.c_str(), numReportsUp, numReportsDown);
+ OutwardCheckContext reverseContext(1,
+ myHostname,
+ rpcServer.getPort(),
+ rpcServer.orb());
+ OutwardCheck check(spec(toClassify), reverseContext);
+ reverseContext.latch.await();
+ auto secondResult = check.result();
+ if (secondResult == CcResult::CONN_FAIL) {
+ cmIter->second.classifyResult(CcResult::UNREACHABLE_UP);
+ } else {
+ LOG(debug, "Recheck %s gives new result: %s",
+ nameToCheck.c_str(), toString(secondResult).c_str());
+ cmIter->second.classifyResult(secondResult);
+ }
+ }
+ }
+}
+
+} // namespace <unnamed>
+
+SpecMap Connectivity::specsFrom(const ModelConfig &model) {
+ SpecMap checkSpecs;
for (const auto & h : model.hosts) {
bool foundSentinelPort = false;
for (const auto & s : h.services) {
if (s.name == "config-sentinel") {
for (const auto & p : s.ports) {
if (p.tags.find("rpc") != p.tags.npos) {
- auto spec = fmt("tcp/%s:%d", h.name.c_str(), p.number);
- checkSpecs[h.name] = spec;
+ checkSpecs[h.name] = p.number;
foundSentinelPort = true;
}
}
@@ -63,50 +130,84 @@ std::map<std::string, std::string> specsFrom(const ModelConfig &model) {
return checkSpecs;
}
+void Connectivity::configure(const SentinelConfig::Connectivity &config,
+ const ModelConfig &model)
+{
+ _config = config;
+ LOG(config, "connectivity.maxBadCount = %d", _config.maxBadCount);
+ LOG(config, "connectivity.minOkPercent = %d", _config.minOkPercent);
+ _checkSpecs = specsFrom(model);
}
-Connectivity::CheckResult
-Connectivity::checkConnectivity(const ModelConfig &model) {
- const auto checkSpecs = specsFrom(model);
- size_t clusterSize = checkSpecs.size();
+bool
+Connectivity::checkConnectivity(RpcServer &rpcServer) {
+ size_t clusterSize = _checkSpecs.size();
+ if (clusterSize == 0) {
+ LOG(warning, "could not get model config, skipping connectivity checks");
+ return true;
+ }
+ std::string myHostname = vespa::Defaults::vespaHostname();
OutwardCheckContext checkContext(clusterSize,
- vespa::Defaults::vespaHostname(),
- _rpcServer.getPort(),
- _rpcServer.orb());
- std::map<std::string, OutwardCheck> connectivityMap;
- for (const auto & [ hn, spec ] : checkSpecs) {
- connectivityMap.try_emplace(hn, spec, checkContext);
+ myHostname,
+ rpcServer.getPort(),
+ rpcServer.orb());
+ ConnectivityMap connectivityMap;
+ for (const auto &host_and_port : _checkSpecs) {
+ connectivityMap.try_emplace(host_and_port.first, spec(host_and_port), checkContext);
}
checkContext.latch.await();
- size_t numFailedConns = 0;
- size_t numFailedReverse = 0;
- bool allChecksOk = true;
+ classifyConnFails(connectivityMap, _checkSpecs, rpcServer);
+ Accumulator accumulated;
for (const auto & [hostname, check] : connectivityMap) {
+ std::string detail = toString(check.result());
+ std::string prev = _detailsPerHost[hostname];
+ if (prev != detail) {
+ LOG(info, "Connectivity check details: %s -> %s", hostname.c_str(), detail.c_str());
+ }
+ _detailsPerHost[hostname] = detail;
LOG_ASSERT(check.result() != CcResult::UNKNOWN);
- if (check.result() == CcResult::CONN_FAIL) ++numFailedConns;
- if (check.result() == CcResult::REVERSE_FAIL) ++numFailedReverse;
+ accumulated.handleResult(check.result());
}
- if (numFailedReverse > size_t(_config.maxBadReverseCount)) {
- LOG(warning, "%zu of %zu nodes report problems connecting to me (max is %d)",
- numFailedReverse, clusterSize, _config.maxBadReverseCount);
- allChecksOk = false;
+ return accumulated.enoughOk(_config);
+}
+
+void Connectivity::Accumulator::handleResult(CcResult value) {
+ ++_numHandled;
+ switch (value) {
+ case CcResult::UNKNOWN:
+ case CcResult::UNREACHABLE_UP:
+ case CcResult::INDIRECT_PING_FAIL:
+ ++_numBad;
+ break;
+ case CcResult::CONN_FAIL:
+ // not OK, but not a serious issue either
+ break;
+ case CcResult::INDIRECT_PING_UNAVAIL:
+ case CcResult::ALL_OK:
+ ++_numOk;
+ break;
}
- if (numFailedConns * 100.0 > _config.maxBadOutPercent * clusterSize) {
- double pct = numFailedConns * 100.0 / clusterSize;
- LOG(warning, "Problems connecting to %zu of %zu nodes, %.2f %% (max is %d)",
- numFailedConns, clusterSize, pct, _config.maxBadOutPercent);
- allChecksOk = false;
+}
+
+bool Connectivity::Accumulator::enoughOk(const SentinelConfig::Connectivity &config) const {
+ bool enough = true;
+ if (_numBad > size_t(config.maxBadCount)) {
+ LOG(warning, "%zu of %zu nodes up but with network connectivity problems (max is %d)",
+ _numBad, _numHandled, config.maxBadCount);
+ enough = false;
}
- std::vector<std::string> details;
- for (const auto & [hostname, check] : connectivityMap) {
- std::string detail = fmt("%s -> %s", hostname.c_str(), toString(check.result()));
- details.push_back(detail);
+ if (_numOk * 100.0 < config.minOkPercent * _numHandled) {
+ double pct = _numOk * 100.0 / _numHandled;
+ LOG(warning, "Only %zu of %zu nodes are up and OK, %.1f%% (min is %d%%)",
+ _numOk, _numHandled, pct, config.minOkPercent);
+ enough = false;
+ }
+ if (_numOk == _numHandled) {
+ LOG(info, "All connectivity checks OK, proceeding with service startup");
+ } else if (enough) {
+ LOG(info, "Enough connectivity checks OK, proceeding with service startup");
}
- CheckResult result{false, false, {}};
- result.enoughOk = allChecksOk;
- result.allOk = (numFailedConns == 0) && (numFailedReverse == 0);
- result.details = std::move(details);
- return result;
+ return enough;
}
}
diff --git a/configd/src/apps/sentinel/connectivity.h b/configd/src/apps/sentinel/connectivity.h
index 0e32b5243e0..8d923387ffa 100644
--- a/configd/src/apps/sentinel/connectivity.h
+++ b/configd/src/apps/sentinel/connectivity.h
@@ -3,10 +3,11 @@
#pragma once
#include "rpcserver.h"
+#include "cc-result.h"
#include <vespa/config-sentinel.h>
#include <vespa/config-model.h>
#include <string>
-#include <vector>
+#include <map>
using cloud::config::SentinelConfig;
using cloud::config::ModelConfig;
@@ -18,19 +19,28 @@ namespace config::sentinel {
**/
class Connectivity {
public:
- Connectivity(const SentinelConfig::Connectivity & config, RpcServer &rpcServer);
- ~Connectivity();
-
- struct CheckResult {
- bool enoughOk;
- bool allOk;
- std::vector<std::string> details;
- };
+ using SpecMap = std::map<std::string, int>;
+ using HostAndPort = SpecMap::value_type;
- CheckResult checkConnectivity(const ModelConfig &model);
+ Connectivity();
+ ~Connectivity();
+ void configure(const SentinelConfig::Connectivity &config,
+ const ModelConfig &model);
+ bool checkConnectivity(RpcServer &rpcServer);
+ static SpecMap specsFrom(const ModelConfig &model);
private:
- const SentinelConfig::Connectivity _config;
- RpcServer &_rpcServer;
+ class Accumulator {
+ private:
+ size_t _numOk = 0;
+ size_t _numBad = 0;
+ size_t _numHandled = 0;
+ public:
+ void handleResult(CcResult value);
+ bool enoughOk(const SentinelConfig::Connectivity &config) const;
+ };
+ SentinelConfig::Connectivity _config;
+ SpecMap _checkSpecs;
+ std::map<std::string, std::string> _detailsPerHost;
};
}
diff --git a/configd/src/apps/sentinel/env.cpp b/configd/src/apps/sentinel/env.cpp
index e4174ee450d..c345de1df36 100644
--- a/configd/src/apps/sentinel/env.cpp
+++ b/configd/src/apps/sentinel/env.cpp
@@ -2,27 +2,41 @@
#include "env.h"
#include "check-completion-handler.h"
-#include "outward-check.h"
+#include "connectivity.h"
#include <vespa/defaults.h>
#include <vespa/log/log.h>
#include <vespa/config/common/exceptions.h>
#include <vespa/vespalib/util/exceptions.h>
+#include <vespa/vespalib/util/signalhandler.h>
#include <vespa/vespalib/util/stringfmt.h>
#include <thread>
#include <chrono>
-LOG_SETUP(".env");
+LOG_SETUP(".sentinel.env");
using vespalib::make_string_short::fmt;
using namespace std::chrono_literals;
namespace config::sentinel {
+namespace {
+
+void maybeStopNow() {
+ if (vespalib::SignalHandler::INT.check() ||
+ vespalib::SignalHandler::TERM.check())
+ {
+ throw vespalib::FatalException("got signal during boot()");
+ }
+}
+
constexpr std::chrono::milliseconds CONFIG_TIMEOUT_MS = 3min;
-constexpr std::chrono::milliseconds MODEL_TIMEOUT_MS = 1500ms;
+constexpr int maxConnectivityRetries = 100;
+
+} // namespace <unnamed>
Env::Env()
: _cfgOwner(),
+ _modelOwner("admin/model"),
_rpcCommandQueue(),
_rpcServer(),
_stateApi(),
@@ -31,6 +45,7 @@ Env::Env()
_statePort(0)
{
_startMetrics.startedTime = vespalib::steady_clock::now();
+ _stateApi.myHealth.setFailed("initializing...");
}
Env::~Env() = default;
@@ -38,17 +53,41 @@ Env::~Env() = default;
void Env::boot(const std::string &configId) {
LOG(debug, "Reading configuration for ID: %s", configId.c_str());
_cfgOwner.subscribe(configId, CONFIG_TIMEOUT_MS);
- bool ok = _cfgOwner.checkForConfigUpdate();
+ _modelOwner.start(CONFIG_TIMEOUT_MS, true);
// subscribe() should throw if something is not OK
- LOG_ASSERT(ok && _cfgOwner.hasConfig());
- const auto & cfg = _cfgOwner.getConfig();
- LOG(config, "Booting sentinel '%s' with [stateserver port %d] and [rpc port %d]",
- configId.c_str(), cfg.port.telnet, cfg.port.rpc);
- rpcPort(cfg.port.rpc);
- statePort(cfg.port.telnet);
- if (auto up = ConfigOwner::fetchModelConfig(MODEL_TIMEOUT_MS)) {
- waitForConnectivity(*up);
+ Connectivity checker;
+ for (int retry = 0; retry < maxConnectivityRetries; ++retry) {
+ bool changed = _cfgOwner.checkForConfigUpdate();
+ LOG_ASSERT(changed || retry > 0);
+ if (changed) {
+ LOG_ASSERT(_cfgOwner.hasConfig());
+ const auto & cfg = _cfgOwner.getConfig();
+ LOG(config, "Booting sentinel '%s' with [stateserver port %d] and [rpc port %d]",
+ configId.c_str(), cfg.port.telnet, cfg.port.rpc);
+ rpcPort(cfg.port.rpc);
+ statePort(cfg.port.telnet);
+ _modelOwner.checkForUpdates();
+ auto model = _modelOwner.getModelConfig();
+ if (model.has_value()) {
+ checker.configure(cfg.connectivity, model.value());
+ }
+ }
+ if (checker.checkConnectivity(*_rpcServer)) {
+ _stateApi.myHealth.setOk();
+ return;
+ } else {
+ _stateApi.myHealth.setFailed("FAILED connectivity check");
+ if ((retry % 10) == 0) {
+ LOG(warning, "Bad network connectivity (try %d)", 1+retry);
+ }
+ for (int i = 0; i < 5; ++i) {
+ respondAsEmpty();
+ maybeStopNow();
+ std::this_thread::sleep_for(600ms);
+ }
+ }
}
+ throw vespalib::FatalException("Giving up - too many connectivity check failures");
}
void Env::rpcPort(int port) {
@@ -61,7 +100,7 @@ void Env::rpcPort(int port) {
if (_rpcServer && port == _rpcServer->getPort()) {
return; // ok already
}
- _rpcServer = std::make_unique<RpcServer>(port, _rpcCommandQueue);
+ _rpcServer = std::make_unique<RpcServer>(port, _rpcCommandQueue, _modelOwner);
}
void Env::statePort(int port) {
@@ -83,7 +122,6 @@ void Env::statePort(int port) {
void Env::notifyConfigUpdated() {
vespalib::ComponentConfigProducer::Config current("sentinel", _cfgOwner.getGeneration(), "ok");
_stateApi.myComponents.addConfig(current);
-
}
void Env::respondAsEmpty() {
@@ -93,60 +131,4 @@ void Env::respondAsEmpty() {
}
}
-namespace {
-
-const char *toString(CcResult value) {
- switch (value) {
- case CcResult::UNKNOWN: return "unknown";
- case CcResult::CONN_FAIL: return "failed to connect";
- case CcResult::REVERSE_FAIL: return "connect OK, but reverse check FAILED";
- case CcResult::REVERSE_UNAVAIL: return "connect OK, but reverse check unavailable";
- case CcResult::ALL_OK: return "both ways connectivity OK";
- }
- LOG(error, "Unknown CcResult enum value: %d", (int)value);
- LOG_ABORT("Unknown CcResult enum value");
-}
-
-std::map<std::string, std::string> specsFrom(const ModelConfig &model) {
- std::map<std::string, std::string> checkSpecs;
- for (const auto & h : model.hosts) {
- bool foundSentinelPort = false;
- for (const auto & s : h.services) {
- if (s.name == "config-sentinel") {
- for (const auto & p : s.ports) {
- if (p.tags.find("rpc") != p.tags.npos) {
- auto spec = fmt("tcp/%s:%d", h.name.c_str(), p.number);
- checkSpecs[h.name] = spec;
- foundSentinelPort = true;
- }
- }
- }
- }
- if (! foundSentinelPort) {
- LOG(warning, "Did not find 'config-sentinel' RPC port in model for host %s [%zd services]",
- h.name.c_str(), h.services.size());
- }
- }
- return checkSpecs;
-}
-
-}
-
-void Env::waitForConnectivity(const ModelConfig &model) {
- auto checkSpecs = specsFrom(model);
- OutwardCheckContext checkContext(checkSpecs.size(),
- vespa::Defaults::vespaHostname(),
- _rpcServer->getPort(),
- _rpcServer->orb());
- std::map<std::string, OutwardCheck> connectivityMap;
- for (const auto & [ hn, spec ] : checkSpecs) {
- connectivityMap.try_emplace(hn, spec, checkContext);
- }
- checkContext.latch.await();
- for (const auto & [hostname, check] : connectivityMap) {
- LOG(info, "outward check status for host %s is: %s",
- hostname.c_str(), toString(check.result()));
- }
-}
-
}
diff --git a/configd/src/apps/sentinel/env.h b/configd/src/apps/sentinel/env.h
index f117854f006..1bd3a7380ba 100644
--- a/configd/src/apps/sentinel/env.h
+++ b/configd/src/apps/sentinel/env.h
@@ -5,6 +5,7 @@
#include "cmdq.h"
#include "config-owner.h"
#include "metrics.h"
+#include "model-owner.h"
#include "rpcserver.h"
#include "state-api.h"
#include <vespa/vespalib/net/state_server.h>
@@ -22,6 +23,7 @@ public:
~Env();
ConfigOwner &configOwner() { return _cfgOwner; }
+ ModelOwner &modelOwner() { return _modelOwner; }
CommandQueue &commandQueue() { return _rpcCommandQueue; }
StartMetrics &metrics() { return _startMetrics; }
@@ -32,8 +34,8 @@ public:
void notifyConfigUpdated();
private:
void respondAsEmpty();
- void waitForConnectivity(const ModelConfig &model);
ConfigOwner _cfgOwner;
+ ModelOwner _modelOwner;
CommandQueue _rpcCommandQueue;
std::unique_ptr<RpcServer> _rpcServer;
StateApi _stateApi;
diff --git a/configd/src/apps/sentinel/manager.cpp b/configd/src/apps/sentinel/manager.cpp
index 6e0ed78211c..839f7c96ae2 100644
--- a/configd/src/apps/sentinel/manager.cpp
+++ b/configd/src/apps/sentinel/manager.cpp
@@ -11,7 +11,7 @@
#include <sys/wait.h>
#include <vespa/log/log.h>
-LOG_SETUP(".manager");
+LOG_SETUP(".sentinel.manager");
namespace config::sentinel {
@@ -116,6 +116,7 @@ Manager::doWork()
if (_env.configOwner().checkForConfigUpdate()) {
doConfigure();
}
+ _env.modelOwner().checkForUpdates();
handleRestarts();
handleCommands();
handleOutputs();
diff --git a/configd/src/apps/sentinel/model-owner.cpp b/configd/src/apps/sentinel/model-owner.cpp
new file mode 100644
index 00000000000..cfa9f1f6bf5
--- /dev/null
+++ b/configd/src/apps/sentinel/model-owner.cpp
@@ -0,0 +1,66 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "model-owner.h"
+#include <vespa/vespalib/util/exceptions.h>
+#include <vespa/config/common/exceptions.h>
+#include <string>
+#include <chrono>
+#include <vespa/log/log.h>
+
+LOG_SETUP(".sentinel.model-owner");
+
+using namespace std::chrono_literals;
+
+namespace config::sentinel {
+
+std::optional<ModelConfig> ModelOwner::getModelConfig() {
+ std::lock_guard<std::mutex> guard(_lock);
+ if (_modelConfig) {
+ return ModelConfig(*_modelConfig);
+ } else {
+ return {};
+ }
+}
+
+
+ModelOwner::ModelOwner(const std::string &configId)
+ : _configId(configId)
+{}
+
+ModelOwner::~ModelOwner() = default;
+
+void
+ModelOwner::start(std::chrono::milliseconds timeout, bool firstTime) {
+ try {
+ _modelHandle =_subscriber.subscribe<ModelConfig>(_configId, timeout);
+ } catch (ConfigTimeoutException & ex) {
+ if (firstTime) {
+ LOG(warning, "Timeout getting model config: %s [skipping connectivity checks]", ex.message());
+ }
+ } catch (InvalidConfigException& ex) {
+ if (firstTime) {
+ LOG(warning, "Invalid model config: %s [skipping connectivity checks]", ex.message());
+ }
+ } catch (ConfigRuntimeException& ex) {
+ if (firstTime) {
+ LOG(warning, "Runtime exception getting model config: %s [skipping connectivity checks]", ex.message());
+ }
+ }
+}
+
+void
+ModelOwner::checkForUpdates() {
+ if (! _modelHandle) {
+ start(250ms, false);
+ }
+ if (_modelHandle && _subscriber.nextGenerationNow()) {
+ if (auto newModel = _modelHandle->getConfig()) {
+ LOG(config, "Sentinel got model info [version %s] for %zd hosts [config generation %" PRId64 "]",
+ newModel->vespaVersion.c_str(), newModel->hosts.size(), _subscriber.getGeneration());
+ std::lock_guard<std::mutex> guard(_lock);
+ _modelConfig = std::move(newModel);
+ }
+ }
+}
+
+}
diff --git a/configd/src/apps/sentinel/model-owner.h b/configd/src/apps/sentinel/model-owner.h
new file mode 100644
index 00000000000..0513463e955
--- /dev/null
+++ b/configd/src/apps/sentinel/model-owner.h
@@ -0,0 +1,32 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <vespa/config-model.h>
+#include <vespa/config/config.h>
+#include <optional>
+#include <mutex>
+
+using cloud::config::ModelConfig;
+
+namespace config::sentinel {
+
+/**
+ * Handles config subscription and has a snapshot of current config.
+ **/
+class ModelOwner {
+private:
+ std::string _configId;
+ config::ConfigSubscriber _subscriber;
+ config::ConfigHandle<ModelConfig>::UP _modelHandle;
+ std::mutex _lock;
+ std::unique_ptr<ModelConfig> _modelConfig;
+public:
+ ModelOwner(const std::string &configId);
+ ~ModelOwner();
+ void start(std::chrono::milliseconds timeout, bool firstTime);
+ void checkForUpdates();
+ std::optional<ModelConfig> getModelConfig();
+};
+
+}
diff --git a/configd/src/apps/sentinel/output-connection.cpp b/configd/src/apps/sentinel/output-connection.cpp
index 5dbe1c22f58..caf97c92eea 100644
--- a/configd/src/apps/sentinel/output-connection.cpp
+++ b/configd/src/apps/sentinel/output-connection.cpp
@@ -5,7 +5,7 @@
#include <cstring>
#include <vespa/log/log.h>
-LOG_SETUP("");
+LOG_SETUP(".sentinel.output-connection");
#include <vespa/log/llparser.h>
#include "output-connection.h"
diff --git a/configd/src/apps/sentinel/outward-check.cpp b/configd/src/apps/sentinel/outward-check.cpp
index 5fed69d0b6e..391e5fee8bf 100644
--- a/configd/src/apps/sentinel/outward-check.cpp
+++ b/configd/src/apps/sentinel/outward-check.cpp
@@ -3,10 +3,12 @@
#include "outward-check.h"
#include <vespa/log/log.h>
-LOG_SETUP(".outward-check");
+LOG_SETUP(".sentinel.outward-check");
namespace config::sentinel {
+OutwardCheckContext::~OutwardCheckContext() = default;
+
OutwardCheck::OutwardCheck(const std::string &spec, OutwardCheckContext &context)
: _spec(spec),
_context(context)
@@ -14,8 +16,8 @@ OutwardCheck::OutwardCheck(const std::string &spec, OutwardCheckContext &context
_target = context.orb.GetTarget(spec.c_str());
_req = context.orb.AllocRPCRequest();
_req->SetMethodName("sentinel.check.connectivity");
- _req->GetParams()->AddString(context.myHostname);
- _req->GetParams()->AddInt32(context.myPortnum);
+ _req->GetParams()->AddString(context.targetHostname.c_str());
+ _req->GetParams()->AddInt32(context.targetPortnum);
_req->GetParams()->AddInt32(500);
_target->InvokeAsync(_req, 1.500, this);
}
@@ -29,17 +31,21 @@ void OutwardCheck::RequestDone(FRT_RPCRequest *req) {
if (answer == "ok") {
LOG(debug, "ping to %s with reverse connectivity OK", _spec.c_str());
_result = CcResult::ALL_OK;
- } else {
+ } else if (answer == "bad") {
LOG(debug, "connected to %s, but reverse connectivity fails: %s",
_spec.c_str(), answer.c_str());
- _result = CcResult::REVERSE_FAIL;
+ _result = CcResult::INDIRECT_PING_FAIL;
+ } else {
+ LOG(warning, "connected to %s, but strange reverse connectivity: %s",
+ _spec.c_str(), answer.c_str());
+ _result = CcResult::INDIRECT_PING_UNAVAIL;
}
} else if (req->GetErrorCode() == FRTE_RPC_NO_SUCH_METHOD ||
req->GetErrorCode() == FRTE_RPC_WRONG_PARAMS ||
req->GetErrorCode() == FRTE_RPC_WRONG_RETURN)
{
LOG(debug, "Connected OK to %s but no reverse connectivity check available", _spec.c_str());
- _result = CcResult::REVERSE_UNAVAIL;
+ _result = CcResult::INDIRECT_PING_UNAVAIL;
} else {
LOG(debug, "error on request to %s : %s (%d)", _spec.c_str(),
req->GetErrorMessage(), req->GetErrorCode());
@@ -52,4 +58,9 @@ void OutwardCheck::RequestDone(FRT_RPCRequest *req) {
_context.latch.countDown();
}
+void OutwardCheck::classifyResult(CcResult value) {
+ LOG_ASSERT(_result == CcResult::CONN_FAIL);
+ _result = value;
+}
+
}
diff --git a/configd/src/apps/sentinel/outward-check.h b/configd/src/apps/sentinel/outward-check.h
index 01a298aee18..0e53b9010dc 100644
--- a/configd/src/apps/sentinel/outward-check.h
+++ b/configd/src/apps/sentinel/outward-check.h
@@ -1,5 +1,8 @@
// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#pragma once
+
+#include "cc-result.h"
#include <string>
#include <vespa/vespalib/util/count_down_latch.h>
#include <vespa/fnet/frt/supervisor.h>
@@ -12,22 +15,21 @@ namespace config::sentinel {
struct OutwardCheckContext {
vespalib::CountDownLatch latch;
- const char * myHostname;
- int myPortnum;
+ std::string targetHostname;
+ int targetPortnum;
FRT_Supervisor &orb;
OutwardCheckContext(size_t count,
- const char * hostname,
+ const std::string &hostname,
int portnumber,
FRT_Supervisor &supervisor)
: latch(count),
- myHostname(hostname),
- myPortnum(portnumber),
+ targetHostname(hostname),
+ targetPortnum(portnumber),
orb(supervisor)
{}
+ ~OutwardCheckContext();
};
-enum class CcResult { UNKNOWN, CONN_FAIL, REVERSE_FAIL, REVERSE_UNAVAIL, ALL_OK };
-
class OutwardCheck : public FRT_IRequestWait {
private:
CcResult _result = CcResult::UNKNOWN;
@@ -41,6 +43,7 @@ public:
void RequestDone(FRT_RPCRequest *req) override;
bool ok() const { return _result == CcResult::ALL_OK; }
CcResult result() const { return _result; }
+ void classifyResult(CcResult value);
};
}
diff --git a/configd/src/apps/sentinel/peer-check.cpp b/configd/src/apps/sentinel/peer-check.cpp
index 60c3d9c96c9..b8060742355 100644
--- a/configd/src/apps/sentinel/peer-check.cpp
+++ b/configd/src/apps/sentinel/peer-check.cpp
@@ -4,7 +4,7 @@
#include <vespa/vespalib/util/stringfmt.h>
#include <vespa/log/log.h>
-LOG_SETUP(".peer-check");
+LOG_SETUP(".sentinel.peer-check");
using vespalib::make_string_short::fmt;
@@ -15,7 +15,8 @@ PeerCheck::PeerCheck(StatusCallback &callback, const std::string &host, int port
_hostname(host),
_portnum(port),
_target(nullptr),
- _req(nullptr)
+ _req(nullptr),
+ _statusOk(false)
{
auto spec = fmt("tcp/%s:%d", _hostname.c_str(), _portnum);
_target = orb.GetTarget(spec.c_str());
@@ -31,20 +32,19 @@ PeerCheck::~PeerCheck() {
void PeerCheck::RequestDone(FRT_RPCRequest *req) {
LOG_ASSERT(req == _req);
- bool statusOk = false;
if (req->IsError()) {
- LOG(warning, "error on ping to %s [port %d]: %s (%d)", _hostname.c_str(), _portnum,
+ LOG(debug, "error on ping to %s [port %d]: %s (%d)", _hostname.c_str(), _portnum,
req->GetErrorMessage(), req->GetErrorCode());
} else {
LOG(debug, "OK ping to %s [port %d]", _hostname.c_str(), _portnum);
- statusOk = true;
+ _statusOk = true;
}
_req->SubRef();
_req = nullptr;
_target->SubRef();
_target = nullptr;
// Note: will delete this object, so must be called as final step:
- _callback.returnStatus(statusOk);
+ _callback.returnStatus(_statusOk);
}
}
diff --git a/configd/src/apps/sentinel/peer-check.h b/configd/src/apps/sentinel/peer-check.h
index 096f304467b..ac124106387 100644
--- a/configd/src/apps/sentinel/peer-check.h
+++ b/configd/src/apps/sentinel/peer-check.h
@@ -17,6 +17,9 @@ public:
PeerCheck(StatusCallback &callback, const std::string &host, int portnum, FRT_Supervisor &orb, int timeout_ms);
~PeerCheck();
+ bool okStatus() const { return _statusOk; }
+ const std::string& getHostname() const { return _hostname; }
+
PeerCheck(const PeerCheck &) = delete;
PeerCheck(PeerCheck &&) = delete;
PeerCheck& operator= (const PeerCheck &) = delete;
@@ -30,6 +33,7 @@ private:
int _portnum;
FRT_Target *_target;
FRT_RPCRequest *_req;
+ bool _statusOk;
};
}
diff --git a/configd/src/apps/sentinel/report-connectivity.cpp b/configd/src/apps/sentinel/report-connectivity.cpp
new file mode 100644
index 00000000000..1ea7365aa3f
--- /dev/null
+++ b/configd/src/apps/sentinel/report-connectivity.cpp
@@ -0,0 +1,53 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "report-connectivity.h"
+#include "connectivity.h"
+#include <vespa/config/common/exceptions.h>
+#include <vespa/log/log.h>
+#include <chrono>
+
+LOG_SETUP(".sentinel.report-connectivity");
+
+using cloud::config::ModelConfig;
+using namespace std::chrono_literals;
+
+namespace config::sentinel {
+
+ReportConnectivity::ReportConnectivity(FRT_RPCRequest *req, int timeout_ms, FRT_Supervisor &orb, ModelOwner &modelOwner)
+ : _parentRequest(req),
+ _checks()
+{
+ auto cfg = modelOwner.getModelConfig();
+ if (cfg.has_value()) {
+ auto map = Connectivity::specsFrom(cfg.value());
+ LOG(debug, "making connectivity report for %zd peers", map.size());
+ _remaining = map.size();
+ for (const auto & [ hostname, port ] : map) {
+ _checks.emplace_back(std::make_unique<PeerCheck>(*this, hostname, port, orb, timeout_ms));
+ }
+ } else {
+ _parentRequest->SetError(FRTE_RPC_METHOD_FAILED, "failed getting model config");
+ _parentRequest->Return();
+ }
+}
+
+ReportConnectivity::~ReportConnectivity() = default;
+
+void ReportConnectivity::returnStatus(bool) {
+ if (--_remaining == 0) {
+ finish();
+ }
+}
+
+void ReportConnectivity::finish() const {
+ FRT_Values *dst = _parentRequest->GetReturn();
+ FRT_StringValue *pt_hn = dst->AddStringArray(_checks.size());
+ FRT_StringValue *pt_ss = dst->AddStringArray(_checks.size());
+ for (const auto & peer : _checks) {
+ dst->SetString(pt_hn++, peer->getHostname().c_str());
+ dst->SetString(pt_ss++, peer->okStatus() ? "ok" : "ping failed");
+ }
+ _parentRequest->Return();
+}
+
+}
diff --git a/configd/src/apps/sentinel/report-connectivity.h b/configd/src/apps/sentinel/report-connectivity.h
new file mode 100644
index 00000000000..1f243b73028
--- /dev/null
+++ b/configd/src/apps/sentinel/report-connectivity.h
@@ -0,0 +1,33 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <vespa/fnet/frt/rpcrequest.h>
+#include <vespa/fnet/frt/supervisor.h>
+#include <vespa/config-model.h>
+#include <vespa/config/helper/configfetcher.h>
+#include "model-owner.h"
+#include "peer-check.h"
+#include "status-callback.h"
+
+#include <atomic>
+#include <memory>
+#include <string>
+#include <vector>
+
+namespace config::sentinel {
+
+class ReportConnectivity : public StatusCallback
+{
+public:
+ ReportConnectivity(FRT_RPCRequest *req, int timeout_ms, FRT_Supervisor &orb, ModelOwner &modelOwner);
+ virtual ~ReportConnectivity();
+ void returnStatus(bool ok) override;
+private:
+ void finish() const;
+ FRT_RPCRequest *_parentRequest;
+ std::vector<std::unique_ptr<PeerCheck>> _checks;
+ std::atomic<size_t> _remaining;
+};
+
+}
diff --git a/configd/src/apps/sentinel/rpchooks.cpp b/configd/src/apps/sentinel/rpchooks.cpp
index 24e3cd53509..3e5509bc8c3 100644
--- a/configd/src/apps/sentinel/rpchooks.cpp
+++ b/configd/src/apps/sentinel/rpchooks.cpp
@@ -1,20 +1,22 @@
// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "rpchooks.h"
-#include "cmdq.h"
#include "check-completion-handler.h"
+#include "cmdq.h"
#include "peer-check.h"
+#include "report-connectivity.h"
#include <vespa/fnet/frt/supervisor.h>
#include <vespa/fnet/frt/rpcrequest.h>
#include <vespa/log/log.h>
-LOG_SETUP(".rpchooks");
+LOG_SETUP(".sentinel.rpchooks");
namespace config::sentinel {
-RPCHooks::RPCHooks(CommandQueue &commands, FRT_Supervisor &supervisor)
+RPCHooks::RPCHooks(CommandQueue &commands, FRT_Supervisor &supervisor, ModelOwner &modelOwner)
: _commands(commands),
- _orb(supervisor)
+ _orb(supervisor),
+ _modelOwner(modelOwner)
{
initRPC(&_orb);
}
@@ -53,6 +55,13 @@ RPCHooks::initRPC(FRT_Supervisor *supervisor)
rb.ParamDesc("timeout", "Timeout for check in milliseconds");
rb.ReturnDesc("status", "Status (ok, bad, or unknown) for peer");
//-------------------------------------------------------------------------
+ rb.DefineMethod("sentinel.report.connectivity", "i", "SS",
+ FRT_METHOD(RPCHooks::rpc_reportConnectivity), this);
+ rb.MethodDesc("report connectivity for peer sentinels");
+ rb.ParamDesc("timeout", "Timeout for check in milliseconds");
+ rb.ReturnDesc("hostnames", "Names of peers checked");
+ rb.ReturnDesc("peerstatus", "Status description for each peer");
+ //-------------------------------------------------------------------------
}
void
@@ -106,4 +115,14 @@ RPCHooks::rpc_checkConnectivity(FRT_RPCRequest *req)
req->getStash().create<PeerCheck>(completionHandler, hostname, portnum, _orb, timeout);
}
+void
+RPCHooks::rpc_reportConnectivity(FRT_RPCRequest *req)
+{
+ LOG(debug, "got reportConnectivity");
+ FRT_Values &args = *req->GetParams();
+ int timeout = args[0]._intval32;
+ req->Detach();
+ req->getStash().create<ReportConnectivity>(req, timeout, _orb, _modelOwner);
+}
+
} // namespace slobrok
diff --git a/configd/src/apps/sentinel/rpchooks.h b/configd/src/apps/sentinel/rpchooks.h
index 67f5804dcf7..292e8198b55 100644
--- a/configd/src/apps/sentinel/rpchooks.h
+++ b/configd/src/apps/sentinel/rpchooks.h
@@ -2,6 +2,7 @@
#pragma once
+#include "model-owner.h"
#include <vespa/fnet/frt/invokable.h>
#include <memory>
@@ -25,8 +26,9 @@ class RPCHooks : public FRT_Invokable
private:
CommandQueue &_commands;
FRT_Supervisor &_orb;
+ ModelOwner &_modelOwner;
public:
- RPCHooks(CommandQueue &commands, FRT_Supervisor &supervisor);
+ RPCHooks(CommandQueue &commands, FRT_Supervisor &supervisor, ModelOwner &modelOwner);
~RPCHooks() override;
private:
void initRPC(FRT_Supervisor *supervisor);
@@ -36,6 +38,7 @@ private:
void rpc_stopService(FRT_RPCRequest *req);
void rpc_startService(FRT_RPCRequest *req);
void rpc_checkConnectivity(FRT_RPCRequest *req);
+ void rpc_reportConnectivity(FRT_RPCRequest *req);
};
} // namespace config::sentinel
diff --git a/configd/src/apps/sentinel/rpcserver.cpp b/configd/src/apps/sentinel/rpcserver.cpp
index 80c3c81c826..6c0de35a9e2 100644
--- a/configd/src/apps/sentinel/rpcserver.cpp
+++ b/configd/src/apps/sentinel/rpcserver.cpp
@@ -3,13 +3,13 @@
#include "rpcserver.h"
#include <vespa/log/log.h>
-LOG_SETUP(".rpcserver");
+LOG_SETUP(".sentinel.rpcserver");
namespace config::sentinel {
-RpcServer::RpcServer(int portNumber, CommandQueue &cmdQ)
+RpcServer::RpcServer(int portNumber, CommandQueue &cmdQ, ModelOwner &modelOwner)
: _server(),
- _rpcHooks(cmdQ, _server.supervisor()),
+ _rpcHooks(cmdQ, _server.supervisor(), modelOwner),
_port(portNumber)
{
if (_server.supervisor().Listen(portNumber)) {
diff --git a/configd/src/apps/sentinel/rpcserver.h b/configd/src/apps/sentinel/rpcserver.h
index 4c6dea00ddf..8f60acce1ca 100644
--- a/configd/src/apps/sentinel/rpcserver.h
+++ b/configd/src/apps/sentinel/rpcserver.h
@@ -5,6 +5,7 @@
#include <memory>
#include "cmdq.h"
+#include "model-owner.h"
#include "rpchooks.h"
#include <vespa/fnet/frt/supervisor.h>
@@ -18,7 +19,7 @@ private:
int _port;
public:
- RpcServer(int port, CommandQueue &cmdQ);
+ RpcServer(int port, CommandQueue &cmdQ, ModelOwner &modelOwner);
~RpcServer();
int getPort() const { return _port; }
diff --git a/configd/src/apps/sentinel/sentinel.cpp b/configd/src/apps/sentinel/sentinel.cpp
index 18d4dc28f8a..32f4708188c 100644
--- a/configd/src/apps/sentinel/sentinel.cpp
+++ b/configd/src/apps/sentinel/sentinel.cpp
@@ -11,12 +11,10 @@
#include <sys/time.h>
#include <vespa/log/log.h>
-LOG_SETUP("config-sentinel");
+LOG_SETUP("sentinel.config-sentinel");
using namespace config;
-constexpr std::chrono::milliseconds CONFIG_TIMEOUT_MS(3 * 60 * 1000);
-
static bool stop()
{
return (vespalib::SignalHandler::INT.check() ||
@@ -65,16 +63,20 @@ main(int argc, char **argv)
LOG(debug, "Reading configuration");
try {
environment.boot(configId);
+ } catch (vespalib::FatalException& ex) {
+ LOG(error, "Stopping before boot complete: %s", ex.message());
+ EV_STOPPING("config-sentinel", ex.message());
+ return EXIT_FAILURE;
} catch (ConfigTimeoutException & ex) {
- LOG(warning, "Timeout getting config, please check your setup. Will exit and restart: %s", ex.getMessage().c_str());
- EV_STOPPING("config-sentinel", ex.what());
+ LOG(warning, "Timeout getting config, please check your setup. Will exit and restart: %s", ex.message());
+ EV_STOPPING("config-sentinel", ex.message());
return EXIT_FAILURE;
} catch (InvalidConfigException& ex) {
- LOG(error, "Fatal: Invalid configuration, please check your setup: %s", ex.getMessage().c_str());
- EV_STOPPING("config-sentinel", ex.what());
+ LOG(error, "Fatal: Invalid configuration, please check your setup: %s", ex.message());
+ EV_STOPPING("config-sentinel", ex.message());
return EXIT_FAILURE;
} catch (ConfigRuntimeException& ex) {
- LOG(error, "Fatal: Could not get config, please check your setup: %s", ex.getMessage().c_str());
+ LOG(error, "Fatal: Could not get config, please check your setup: %s", ex.message());
EV_STOPPING("config-sentinel", ex.what());
return EXIT_FAILURE;
}
@@ -86,13 +88,13 @@ main(int argc, char **argv)
vespalib::SignalHandler::CHLD.clear();
manager.doWork(); // Check for child procs & commands
} catch (InvalidConfigException& ex) {
- LOG(warning, "Configuration problem: (ignoring): %s", ex.what());
+ LOG(warning, "Configuration problem: (ignoring): %s", ex.message());
} catch (vespalib::PortListenException& ex) {
- LOG(error, "Fatal: %s", ex.getMessage().c_str());
+ LOG(error, "Fatal: %s", ex.message());
EV_STOPPING("config-sentinel", ex.what());
return EXIT_FAILURE;
} catch (vespalib::FatalException& ex) {
- LOG(error, "Fatal: %s", ex.getMessage().c_str());
+ LOG(error, "Fatal: %s", ex.message());
EV_STOPPING("config-sentinel", ex.what());
return EXIT_FAILURE;
}
diff --git a/configd/src/apps/sentinel/service.cpp b/configd/src/apps/sentinel/service.cpp
index 9c78894f1a7..d1dab4bb26f 100644
--- a/configd/src/apps/sentinel/service.cpp
+++ b/configd/src/apps/sentinel/service.cpp
@@ -12,7 +12,7 @@
#include <sys/wait.h>
#include <vespa/log/log.h>
-LOG_SETUP(".service");
+LOG_SETUP(".sentinel.service");
#include <vespa/log/llparser.h>
static bool stop()
diff --git a/configdefinitions/src/vespa/dispatch.def b/configdefinitions/src/vespa/dispatch.def
index aa40c317d75..150fe2e1603 100644
--- a/configdefinitions/src/vespa/dispatch.def
+++ b/configdefinitions/src/vespa/dispatch.def
@@ -7,10 +7,10 @@ namespace=vespa.config.search
# for that group to be included in queries
minActivedocsPercentage double default=97.0
-# Minimum coverage for allowing a group to be considered for serving
+# Not used. TODO: Remove on Vespa 8
minGroupCoverage double default=100
-# Maximum number of nodes allowed to be down for group to be considered for serving
+# Not used. TODO: Remove on Vespa 8
maxNodesDownPerGroup int default=0
# Distribution policy for group selection
diff --git a/configdefinitions/src/vespa/sentinel.def b/configdefinitions/src/vespa/sentinel.def
index 45ef9b21cfd..cf19e701717 100644
--- a/configdefinitions/src/vespa/sentinel.def
+++ b/configdefinitions/src/vespa/sentinel.def
@@ -22,11 +22,11 @@ application.region string default="default"
# those that can connect back to us. We delay starting services
# if we have more problems than the following limits allow:
-## Percentage we fail to talk to, maximum
-connectivity.maxBadOutPercent int default=100
+## Percentage of nodes that must be up and fully OK, minimum
+connectivity.minOkPercent int default=0
-## Absolute number of nodes that fail to talk back to us, maximum
-connectivity.maxBadReverseCount int default=999999999
+## Absolute number of nodes with confirmed network connectivity problems, maximum
+connectivity.maxBadCount int default=999999999
## The command to run. This will be run by sh -c, and the following
## environment variables are defined: $ROOT, $VESPA_SERVICE_NAME,
diff --git a/configserver-client/src/test/java/ai/vespa/hosted/client/HttpConfigServerClientTest.java b/configserver-client/src/test/java/ai/vespa/hosted/client/HttpConfigServerClientTest.java
index 956bc90380f..90fd6203a21 100644
--- a/configserver-client/src/test/java/ai/vespa/hosted/client/HttpConfigServerClientTest.java
+++ b/configserver-client/src/test/java/ai/vespa/hosted/client/HttpConfigServerClientTest.java
@@ -92,6 +92,7 @@ class HttpConfigServerClientTest {
assertEquals("GET http://localhost:" + server.port() + "/ failed with status 409 and body 'hi'", thrown.getMessage());
server.verify(1, getRequestedFor(urlEqualTo("/")));
server.verify(1, anyRequestedFor(anyUrl()));
+ server.resetRequests();
}
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/ConfigServerBootstrap.java b/configserver/src/main/java/com/yahoo/vespa/config/server/ConfigServerBootstrap.java
index 772c2bf5125..4c2aa33a886 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/ConfigServerBootstrap.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/ConfigServerBootstrap.java
@@ -270,15 +270,20 @@ public class ConfigServerBootstrap extends AbstractComponent implements Runnable
}
});
if ( ! Duration.between(lastLogged, Instant.now()).minus(Duration.ofSeconds(10)).isNegative()) {
- log.log(Level.INFO, () -> finishedDeployments.size() + " of " + applicationCount + " apps redeployed " +
- "(" + failedDeployments.size() + " failed)");
+ logProgress(applicationCount, failedDeployments, finishedDeployments);
lastLogged = Instant.now();
}
} while (failedDeployments.size() + finishedDeployments.size() < applicationCount);
+ logProgress(applicationCount, failedDeployments, finishedDeployments);
return new ArrayList<>(failedDeployments);
}
+ private void logProgress(int applicationCount, Set<ApplicationId> failedDeployments, Set<ApplicationId> finishedDeployments) {
+ log.log(Level.INFO, () -> finishedDeployments.size() + " of " + applicationCount + " apps redeployed " +
+ "(" + failedDeployments.size() + " failed)");
+ }
+
private DeploymentStatus getDeploymentStatus(ApplicationId applicationId, Future<?> future) {
try {
future.get(1, TimeUnit.MILLISECONDS);
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/Deployment.java b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/Deployment.java
index 2d336267169..1ab667f8a01 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/Deployment.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/Deployment.java
@@ -148,6 +148,8 @@ public class Deployment implements com.yahoo.config.provision.Deployment {
provisioner.get().restart(applicationId, HostFilter.from(hostnames, Set.of(), Set.of(), Set.of()));
deployLogger.log(Level.INFO, String.format("Scheduled service restart of %d nodes: %s",
hostnames.size(), hostnames.stream().sorted().collect(Collectors.joining(", "))));
+ log.info(String.format("%sScheduled service restart of %d nodes: %s",
+ session.logPre(), hostnames.size(), restartActions.format()));
this.configChangeActions = new ConfigChangeActions(
new RestartActions(), configChangeActions.getRefeedActions(), configChangeActions.getReindexActions());
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java
index d97442e4980..94cfba12453 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java
@@ -178,6 +178,10 @@ public class ModelContextImpl implements ModelContext {
private final boolean distributeExternalRankExpressions;
private final int numDistributorStripes;
private final boolean requireConnectivityCheck;
+ private final int maxConcurrentMergesPerContentNode;
+ private final int maxMergeQueueSize;
+ private final int largeRankExpressionLimit;
+ private final boolean throwIfResourceLimitsSpecified;
public FeatureFlags(FlagSource source, ApplicationId appId) {
this.dedicatedClusterControllerFlavor = parseDedicatedClusterControllerFlavor(flagValue(source, appId, Flags.DEDICATED_CLUSTER_CONTROLLER_FLAVOR));
@@ -200,7 +204,11 @@ public class ModelContextImpl implements ModelContext {
this.numDistributorStripes = flagValue(source, appId, Flags.NUM_DISTRIBUTOR_STRIPES);
this.useExternalRankExpression = flagValue(source, appId, Flags.USE_EXTERNAL_RANK_EXPRESSION);
this.distributeExternalRankExpressions = flagValue(source, appId, Flags.DISTRIBUTE_EXTERNAL_RANK_EXPRESSION);
+ this.largeRankExpressionLimit = flagValue(source, appId, Flags.LARGE_RANK_EXPRESSION_LIMIT);
this.requireConnectivityCheck = flagValue(source, appId, Flags.REQUIRE_CONNECTIVITY_CHECK);
+ this.maxConcurrentMergesPerContentNode = flagValue(source, appId, Flags.MAX_CONCURRENT_MERGES_PER_NODE);
+ this.maxMergeQueueSize = flagValue(source, appId, Flags.MAX_MERGE_QUEUE_SIZE);
+ this.throwIfResourceLimitsSpecified = flagValue(source, appId, Flags.THROW_EXCEPTION_IF_RESOURCE_LIMITS_SPECIFIED);
}
@Override public Optional<NodeResources> dedicatedClusterControllerFlavor() { return Optional.ofNullable(dedicatedClusterControllerFlavor); }
@@ -225,7 +233,11 @@ public class ModelContextImpl implements ModelContext {
@Override public int numDistributorStripes() { return numDistributorStripes; }
@Override public boolean useExternalRankExpressions() { return useExternalRankExpression; }
@Override public boolean distributeExternalRankExpressions() { return distributeExternalRankExpressions; }
+ @Override public int largeRankExpressionLimit() { return largeRankExpressionLimit; }
@Override public boolean requireConnectivityCheck() { return requireConnectivityCheck; }
+ @Override public int maxConcurrentMergesPerNode() { return maxConcurrentMergesPerContentNode; }
+ @Override public int maxMergeQueueSize() { return maxMergeQueueSize; }
+ @Override public boolean throwIfResourceLimitsSpecified() { return throwIfResourceLimitsSpecified; }
private static <V> V flagValue(FlagSource source, ApplicationId appId, UnboundFlag<? extends V, ?, ?> flag) {
return flag.bindTo(source)
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ZooKeeperClient.java b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ZooKeeperClient.java
index 42f4dbd5762..f0a63757477 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ZooKeeperClient.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ZooKeeperClient.java
@@ -35,7 +35,7 @@ import static com.yahoo.vespa.config.server.zookeeper.ConfigCurator.USERAPP_ZK_S
import static com.yahoo.vespa.config.server.zookeeper.ConfigCurator.USER_DEFCONFIGS_ZK_SUBPATH;
/**
- * A class used for reading and writing application data to zookeeper.
+ * Reads and writes application package to and from ZooKeeper.
*
* @author hmusum
*/
@@ -76,13 +76,13 @@ public class ZooKeeperClient {
*
* @param app the application package to feed to zookeeper
*/
- void write(ApplicationPackage app) {
+ void writeApplicationPackage(ApplicationPackage app) {
try {
writeUserDefs(app);
writeSomeOf(app);
writeSchemas(app);
writeUserIncludeDirs(app, app.getUserIncludeDirs());
- write(app.getMetaData());
+ writeMetadata(app.getMetaData());
} catch (Exception e) {
throw new IllegalStateException("Unable to write vespa model to config server(s) " + System.getProperty("configsources") + "\n" +
"Please ensure that config server is started " +
@@ -153,7 +153,6 @@ public class ZooKeeperClient {
for (ApplicationFile file : listFiles(dir, filenameFilter)) {
String name = file.getPath().getName();
if (name.startsWith(".")) continue; //.svn , .git ...
- if ("CVS".equals(name)) continue;
if (file.isDirectory()) {
configCurator.createNode(path.append(name).getAbsolute());
if (recurse) {
@@ -198,7 +197,6 @@ public class ZooKeeperClient {
}
private void writeUserIncludeDirs(ApplicationPackage applicationPackage, List<String> userIncludeDirs) throws IOException {
- // User defined include directories
for (String userInclude : userIncludeDirs) {
ApplicationFile dir = applicationPackage.getFile(Path.fromString(userInclude));
final List<ApplicationFile> files = dir.listFiles();
@@ -238,12 +236,12 @@ public class ZooKeeperClient {
}
/**
- * Feeds application metadata to zookeeper. Used by vespamodel to create config
- * for application metadata (used by ApplicationStatusHandler)
+ * Feeds application metadata to zookeeper. Used by config model to create config
+ * for application metadata
*
* @param metaData The application metadata.
*/
- private void write(ApplicationMetaData metaData) {
+ private void writeMetadata(ApplicationMetaData metaData) {
configCurator.putData(getZooKeeperAppPath(META_ZK_PATH).getAbsolute(), metaData.asJsonBytes());
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ZooKeeperDeployer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ZooKeeperDeployer.java
index 12aa5b7cc35..8c7d6ea28dd 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ZooKeeperDeployer.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ZooKeeperDeployer.java
@@ -33,7 +33,7 @@ public class ZooKeeperDeployer {
public void deploy(ApplicationPackage applicationPackage, Map<Version, FileRegistry> fileRegistryMap,
AllocatedHosts allocatedHosts) throws IOException {
zooKeeperClient.initialize();
- zooKeeperClient.write(applicationPackage);
+ zooKeeperClient.writeApplicationPackage(applicationPackage);
zooKeeperClient.write(fileRegistryMap);
zooKeeperClient.write(allocatedHosts);
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/AddFileInterface.java b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/AddFileInterface.java
index 2250f2dc579..163c19abe75 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/AddFileInterface.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/AddFileInterface.java
@@ -3,10 +3,13 @@ package com.yahoo.vespa.config.server.filedistribution;
import com.yahoo.config.FileReference;
+import java.nio.ByteBuffer;
+
/**
* @author baldersheim
*/
public interface AddFileInterface {
FileReference addUri(String uri, String relativePath);
FileReference addFile(String relativePath);
+ FileReference addBlob(ByteBuffer blob, String relativePath);
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/ApplicationFileManager.java b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/ApplicationFileManager.java
index 4152c92c289..a1907c01085 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/ApplicationFileManager.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/ApplicationFileManager.java
@@ -7,6 +7,7 @@ import java.io.FileOutputStream;
import java.io.IOException;
import java.net.SocketTimeoutException;
import java.net.URL;
+import java.nio.ByteBuffer;
import java.nio.channels.Channels;
import java.nio.channels.ReadableByteChannel;
import java.nio.file.Files;
@@ -35,6 +36,32 @@ public class ApplicationFileManager implements AddFileInterface {
return addFile(relativePath);
}
+ @Override
+ public FileReference addBlob(ByteBuffer blob, String relativePath) {
+ writeBlob(blob, relativePath);
+ return addFile(relativePath);
+ }
+
+ private void writeBlob(ByteBuffer blob, String relativePath) {
+ File file = new File(applicationDir, relativePath);
+ FileOutputStream fos = null;
+ try {
+ Files.createDirectories(file.toPath().getParent());
+ fos = new FileOutputStream(file.getAbsolutePath());
+ fos.write(blob.array(), blob.arrayOffset(), blob.remaining());
+ } catch (IOException e) {
+ throw new IllegalArgumentException("Failed creating directory " + file.getParent(), e);
+ } finally {
+ try {
+ if (fos != null) {
+ fos.close();
+ }
+ } catch (IOException e) {
+ throw new IllegalArgumentException("Failed closing down after writing blob of size " + blob.remaining() + " to " + file.getAbsolutePath());
+ }
+ }
+ }
+
private void download(String uri, String relativePath) {
File file = new File(applicationDir, relativePath);
FileOutputStream fos = null;
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDBRegistry.java b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDBRegistry.java
index ce582a8a1a8..4605d5e5f5c 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDBRegistry.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDBRegistry.java
@@ -51,6 +51,22 @@ public class FileDBRegistry implements FileRegistry {
}
@Override
+ public FileReference addBlob(ByteBuffer blob) {
+ long blobHash = XXHashFactory.fastestJavaInstance().hash64().hash(blob, 0);
+ String blobName = Long.toHexString(blobHash);
+ String relativePath = blobToRelativeFile(blob, blobName);
+ synchronized (this) {
+ Optional<FileReference> cachedReference = Optional.ofNullable(fileReferenceCache.get(blobName));
+ return cachedReference.orElseGet(() -> {
+ FileReference newRef = manager.addBlob(blob, relativePath);
+ entries.add(new Entry(blobName, newRef));
+ fileReferenceCache.put(blobName, newRef);
+ return newRef;
+ });
+ }
+ }
+
+ @Override
public String fileSourceHost() {
return HostName.getLocalhost();
}
@@ -72,4 +88,9 @@ public class FileDBRegistry implements FileRegistry {
return relative;
}
+ private static String blobToRelativeFile(ByteBuffer blob, String blobName) {
+ String relative = "blob/" + blobName;
+ return relative;
+ }
+
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDistributionUtil.java b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDistributionUtil.java
index 33cd425d6aa..cfe7349a1c6 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDistributionUtil.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDistributionUtil.java
@@ -40,7 +40,7 @@ public class FileDistributionUtil {
/**
* Returns a connection pool with all config servers except this one, or an empty pool if there
- * is only one config server.
+ * is only one config server (no point in trying to download from yourself).
*/
public static ConnectionPool createConnectionPool(ConfigserverConfig configserverConfig) {
List<String> configServers = ConfigServerSpec.fromConfig(configserverConfig)
@@ -49,7 +49,9 @@ public class FileDistributionUtil {
.map(spec -> "tcp/" + spec.getHostName() + ":" + spec.getConfigServerPort())
.collect(Collectors.toList());
- return configServers.size() > 0 ? new JRTConnectionPool(new ConfigSourceSet(configServers)) : emptyConnectionPool();
+ return configServers.size() > 0
+ ? new JRTConnectionPool(new ConfigSourceSet(configServers), "filedistribution-jrt-pool-")
+ : emptyConnectionPool();
}
public static boolean fileReferenceExistsOnDisk(File downloadDirectory, FileReference applicationPackageReference) {
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileServer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileServer.java
index 93fabd8d6c0..3c91cea91e1 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileServer.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileServer.java
@@ -10,9 +10,9 @@ import com.yahoo.jrt.Request;
import com.yahoo.jrt.StringValue;
import com.yahoo.vespa.defaults.Defaults;
import com.yahoo.vespa.filedistribution.CompressedFileReference;
+import com.yahoo.vespa.filedistribution.EmptyFileReferenceData;
import com.yahoo.vespa.filedistribution.FileDownloader;
import com.yahoo.vespa.filedistribution.FileReferenceData;
-import com.yahoo.vespa.filedistribution.EmptyFileReferenceData;
import com.yahoo.vespa.filedistribution.FileReferenceDownload;
import com.yahoo.vespa.filedistribution.LazyFileReferenceData;
import com.yahoo.vespa.filedistribution.LazyTemporaryStorageFileReferenceData;
@@ -82,7 +82,7 @@ public class FileServer {
this.downloader = fileDownloader;
this.root = new FileDirectory(rootDir);
this.executor = Executors.newFixedThreadPool(Math.max(8, Runtime.getRuntime().availableProcessors()),
- new DaemonThreadFactory("file server push"));
+ new DaemonThreadFactory("file-server-"));
}
boolean hasFile(String fileReference) {
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ApplicationPackageMaintainer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ApplicationPackageMaintainer.java
index 003b4fbb345..7352f71c032 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ApplicationPackageMaintainer.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ApplicationPackageMaintainer.java
@@ -11,6 +11,7 @@ import com.yahoo.vespa.config.server.session.SessionRepository;
import com.yahoo.vespa.config.server.tenant.Tenant;
import com.yahoo.vespa.curator.Curator;
import com.yahoo.vespa.defaults.Defaults;
+import com.yahoo.vespa.filedistribution.Downloads;
import com.yahoo.vespa.filedistribution.FileDownloader;
import com.yahoo.vespa.flags.FlagSource;
@@ -52,7 +53,7 @@ public class ApplicationPackageMaintainer extends ConfigServerMaintainer {
int attempts = 0;
int failures = 0;
- try (var fileDownloader = new FileDownloader(connectionPool, downloadDirectory)) {
+ try (var fileDownloader = new FileDownloader(connectionPool, downloadDirectory, new Downloads())) {
for (var applicationId : applicationRepository.listApplications()) {
log.fine(() -> "Verifying application package for " + applicationId);
Session session = applicationRepository.getActiveSession(applicationId);
@@ -65,11 +66,11 @@ public class ApplicationPackageMaintainer extends ConfigServerMaintainer {
if (applicationPackage != null) {
attempts++;
if (! fileReferenceExistsOnDisk(downloadDirectory, applicationPackage)) {
- log.fine(() -> "Downloading missing application package for application " + applicationId + " - session " + sessionId);
+ log.fine(() -> "Downloading missing application package for application " + applicationId + " (session " + sessionId + ")");
if (fileDownloader.getFile(applicationPackage).isEmpty()) {
failures++;
- log.warning("Failed to download application package for application " + applicationId + " - session " + sessionId);
+ log.warning("Failed to download application package for application " + applicationId + " (session " + sessionId + ")");
continue;
}
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetriever.java b/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetriever.java
index e1135063f97..d406cafc3b8 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetriever.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetriever.java
@@ -127,8 +127,10 @@ public class ClusterDeploymentMetricsRetriever {
case VESPA_CONTAINER:
optionalDouble(values.field("query_latency.sum")).ifPresent(qlSum ->
aggregator.get()
- .addContainerLatency(qlSum, values.field("query_latency.count").asDouble())
- .addFeedLatency(values.field("feed.latency.sum").asDouble(), values.field("feed.latency.count").asDouble()));
+ .addContainerLatency(qlSum, values.field("query_latency.count").asDouble()));
+ optionalDouble(values.field("feed.latency.sum")).ifPresent(flSum ->
+ aggregator.get()
+ .addFeedLatency(flSum, values.field("feed.latency.count").asDouble()));
break;
case VESPA_QRSERVER:
optionalDouble(values.field("query_latency.sum")).ifPresent(qlSum ->
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ModelsBuilder.java b/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ModelsBuilder.java
index 2d4aa78bcf6..4c25708dca2 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ModelsBuilder.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ModelsBuilder.java
@@ -208,10 +208,11 @@ public abstract class ModelsBuilder<MODELRESULT extends ModelResult> {
builtModelVersions.add(modelVersion);
} catch (RuntimeException e) {
// allow failure to create old config models if there is a validation override that allow skipping old
- // config models (which is always true for manually deployed zones)
- if (builtModelVersions.size() > 0 && builtModelVersions.get(0).getModel().skipOldConfigModels(now))
+ // config models or we're manually deploying
+ if (builtModelVersions.size() > 0 &&
+ ( builtModelVersions.get(0).getModel().skipOldConfigModels(now) || zone().environment().isManuallyDeployed()))
log.log(Level.INFO, applicationId + ": Failed to build version " + version +
- ", but allow failure due to validation override ´skipOldConfigModels´");
+ ", but allow failure due to validation override or manual deployment");
else {
log.log(Level.SEVERE, applicationId + ": Failed to build version " + version);
throw e;
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/GetConfigProcessor.java b/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/GetConfigProcessor.java
index 637fc421457..820f5c15318 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/GetConfigProcessor.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/GetConfigProcessor.java
@@ -4,6 +4,7 @@ package com.yahoo.vespa.config.server.rpc;
import com.yahoo.cloud.config.SentinelConfig;
import com.yahoo.collections.Pair;
import com.yahoo.component.Version;
+import com.yahoo.config.ConfigInstance;
import com.yahoo.config.provision.TenantName;
import com.yahoo.jrt.Request;
import com.yahoo.net.HostName;
@@ -164,7 +165,7 @@ class GetConfigProcessor implements Runnable {
private void returnEmpty(JRTServerConfigRequest request) {
log.log(Level.FINE, () -> "Returning empty sentinel config for request from " + request.getClientHostName());
- ConfigPayload emptyPayload = ConfigPayload.empty();
+ var emptyPayload = ConfigPayload.fromInstance(new SentinelConfig.Builder().build());
String configMd5 = ConfigUtils.getMd5(emptyPayload);
ConfigResponse config = SlimeConfigResponse.fromConfigPayload(emptyPayload, 0, false, configMd5);
request.addOkResponse(request.payloadFromResponse(config), config.getGeneration(), false, config.getConfigMd5());
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/session/PrepareParams.java b/configserver/src/main/java/com/yahoo/vespa/config/server/session/PrepareParams.java
index 1b43e57c01a..071a0dd8f0c 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/session/PrepareParams.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/session/PrepareParams.java
@@ -306,7 +306,7 @@ public final class PrepareParams {
.athenzDomain(SlimeUtils.optionalString(params.field(ATHENZ_DOMAIN)).orElse(null))
.applicationRoles(ApplicationRoles.fromString(SlimeUtils.optionalString(params.field(APPLICATION_HOST_ROLE)).orElse(null), SlimeUtils.optionalString(params.field(APPLICATION_CONTAINER_ROLE)).orElse(null)))
.quota(deserialize(params.field(QUOTA_PARAM_NAME), Quota::fromSlime))
- .tenantSecretStores(SlimeUtils.optionalString(params.field(TENANT_SECRET_STORES_PARAM_NAME)).orElse(null))
+ .tenantSecretStores(deserialize(params.field(TENANT_SECRET_STORES_PARAM_NAME), TenantSecretStoreSerializer::listFromSlime, List.of()))
.force(booleanValue(params, FORCE_PARAM_NAME))
.waitForResourcesInPrepare(booleanValue(params, WAIT_FOR_RESOURCES_IN_PREPARE))
.withOperatorCertificates(deserialize(params.field(OPERATOR_CERTIFICATES), PrepareParams::readOperatorCertificates, Collections.emptyList()))
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionRepository.java b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionRepository.java
index ac350db5c21..41d050025bf 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionRepository.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionRepository.java
@@ -36,13 +36,14 @@ import com.yahoo.vespa.config.server.modelfactory.ModelFactoryRegistry;
import com.yahoo.vespa.config.server.monitoring.MetricUpdater;
import com.yahoo.vespa.config.server.monitoring.Metrics;
import com.yahoo.vespa.config.server.provision.HostProvisionerProvider;
-import com.yahoo.vespa.config.server.tenant.TenantListener;
import com.yahoo.vespa.config.server.tenant.TenantRepository;
import com.yahoo.vespa.config.server.zookeeper.ConfigCurator;
import com.yahoo.vespa.config.server.zookeeper.SessionCounter;
import com.yahoo.vespa.curator.Curator;
import com.yahoo.vespa.defaults.Defaults;
+import com.yahoo.vespa.flags.BooleanFlag;
import com.yahoo.vespa.flags.FlagSource;
+import com.yahoo.vespa.flags.Flags;
import com.yahoo.yolean.Exceptions;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.recipes.cache.ChildData;
@@ -58,6 +59,7 @@ import java.time.Clock;
import java.time.Duration;
import java.time.Instant;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
@@ -121,6 +123,7 @@ public class SessionRepository {
private final Zone zone;
private final ModelFactoryRegistry modelFactoryRegistry;
private final ConfigDefinitionRepo configDefinitionRepo;
+ private final BooleanFlag rewriteSearchDefinitions;
public SessionRepository(TenantName tenantName,
TenantApplications applicationRepo,
@@ -161,17 +164,19 @@ public class SessionRepository {
this.zone = zone;
this.modelFactoryRegistry = modelFactoryRegistry;
this.configDefinitionRepo = configDefinitionRepo;
+ this.rewriteSearchDefinitions = Flags.MOVE_SEARCH_DEFINITIONS_TO_SCHEMAS_DIR.bindTo(flagSource);
- loadSessions(); // Needs to be done before creating cache below
+ loadSessions(Flags.LOAD_LOCAL_SESSIONS_WHEN_BOOTSTRAPPING.bindTo(flagSource)); // Needs to be done before creating cache below
this.directoryCache = curator.createDirectoryCache(sessionsPath.getAbsolute(), false, false, zkCacheExecutor);
this.directoryCache.addListener(this::childEvent);
this.directoryCache.start();
}
- private void loadSessions() {
+ private void loadSessions(BooleanFlag loadLocalSessions) {
ExecutorService executor = Executors.newFixedThreadPool(Math.max(8, Runtime.getRuntime().availableProcessors()),
new DaemonThreadFactory("load-sessions-"));
- loadLocalSessions(executor);
+ if (loadLocalSessions.value())
+ loadLocalSessions(executor);
loadRemoteSessions(executor);
try {
executor.shutdown();
@@ -214,7 +219,7 @@ public class SessionRepository {
future.get();
log.log(Level.FINE, () -> "Local session " + sessionId + " loaded");
} catch (ExecutionException | InterruptedException e) {
- log.log(Level.WARNING, "Could not load session " + sessionId, e);
+ throw new RuntimeException("Could not load local session " + sessionId, e);
}
});
}
@@ -379,7 +384,7 @@ public class SessionRepository {
future.get();
log.log(Level.FINE, () -> "Remote session " + sessionId + " loaded");
} catch (ExecutionException | InterruptedException e) {
- log.log(Level.WARNING, "Could not load session " + sessionId, e);
+ throw new RuntimeException("Could not load remote session " + sessionId, e);
}
});
}
@@ -676,6 +681,9 @@ public class SessionRepository {
tempDestinationDir = Files.createTempDirectory(destinationDir.getParentFile().toPath(), "app-package");
log.log(Level.FINE, "Copying dir " + sourceDir.getAbsolutePath() + " to " + tempDestinationDir.toFile().getAbsolutePath());
IOUtils.copyDirectory(sourceDir, tempDestinationDir.toFile());
+ if (rewriteSearchDefinitions.value())
+ moveSearchDefinitionsToSchemasDir(tempDestinationDir);
+
log.log(Level.FINE, "Moving " + tempDestinationDir + " to " + destinationDir.getAbsolutePath());
Files.move(tempDestinationDir, destinationDir.toPath(), StandardCopyOption.ATOMIC_MOVE);
} finally {
@@ -685,6 +693,24 @@ public class SessionRepository {
}
}
+ // TODO: Remove in Vespa 8 (when we don't allow files in SEARCH_DEFINITIONS_DIR)
+ // Copies schemas from searchdefinitions/ to schemas/ if searchdefinitions/ exists
+ private void moveSearchDefinitionsToSchemasDir(java.nio.file.Path applicationDir) throws IOException {
+ File schemasDir = applicationDir.resolve(ApplicationPackage.SCHEMAS_DIR.getRelative()).toFile();
+ File sdDir = applicationDir.resolve(ApplicationPackage.SEARCH_DEFINITIONS_DIR.getRelative()).toFile();
+ if (sdDir.exists() && sdDir.isDirectory()) {
+ File[] sdFiles = sdDir.listFiles();
+ if (sdFiles != null) {
+ Files.createDirectories(schemasDir.toPath());
+ Arrays.asList(sdFiles).forEach(file -> Exceptions.uncheck(
+ () -> Files.move(file.toPath(),
+ schemasDir.toPath().resolve(file.toPath().getFileName()),
+ StandardCopyOption.REPLACE_EXISTING)));
+ }
+ Files.delete(sdDir.toPath());
+ }
+ }
+
/**
* Returns a new session instance for the given session id.
*/
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/TenantRepository.java b/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/TenantRepository.java
index 21db290d5e8..2f7b397cbd9 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/TenantRepository.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/TenantRepository.java
@@ -467,12 +467,7 @@ public class TenantRepository {
*/
public static String logPre(ApplicationId app) {
if (DEFAULT_TENANT.equals(app.tenant())) return "";
- StringBuilder ret = new StringBuilder()
- .append(logPre(app.tenant()))
- .append("app:" + app.application().value())
- .append(":" + app.instance().value())
- .append(" ");
- return ret.toString();
+ return "app:" + app.toFullString() + " ";
}
/**
@@ -483,10 +478,7 @@ public class TenantRepository {
*/
public static String logPre(TenantName tenant) {
if (DEFAULT_TENANT.equals(tenant)) return "";
- StringBuilder ret = new StringBuilder()
- .append("tenant:" + tenant.value())
- .append(" ");
- return ret.toString();
+ return "tenant:" + tenant.value() + " ";
}
private void stateChanged(CuratorFramework framework, ConnectionState connectionState) {
diff --git a/configserver/src/test/apps/app_sdbundles/services.xml b/configserver/src/test/apps/app_sdbundles/services.xml
index f1eabb7d1ef..29c736fb41b 100644
--- a/configserver/src/test/apps/app_sdbundles/services.xml
+++ b/configserver/src/test/apps/app_sdbundles/services.xml
@@ -11,7 +11,7 @@
</admin>
<content version="1.0">
- <redundancy>1</redundancy>
+ <redundancy>2</redundancy>
<documents>
<document type="music" mode="index"/>
</documents>
diff --git a/configserver/src/test/apps/deprecated-features-app/hosts.xml b/configserver/src/test/apps/deprecated-features-app/hosts.xml
new file mode 100644
index 00000000000..f4256c9fc81
--- /dev/null
+++ b/configserver/src/test/apps/deprecated-features-app/hosts.xml
@@ -0,0 +1,7 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!-- Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -->
+<hosts>
+ <host name="mytesthost">
+ <alias>node1</alias>
+ </host>
+</hosts>
diff --git a/configserver/src/test/apps/deprecated-features-app/searchdefinitions/music.sd b/configserver/src/test/apps/deprecated-features-app/searchdefinitions/music.sd
new file mode 100644
index 00000000000..a2d4614c657
--- /dev/null
+++ b/configserver/src/test/apps/deprecated-features-app/searchdefinitions/music.sd
@@ -0,0 +1,50 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+# A basic search definition - called music, should be saved to music.sd
+search music {
+
+ # It contains one document type only - called music as well
+ document music {
+
+ field title type string {
+ indexing: summary | index # How this field should be indexed
+ # index-to: title, default # Create two indexes
+ weight: 75 # Ranking importancy of this field, used by the built in nativeRank feature
+ }
+
+ field artist type string {
+ indexing: summary | attribute | index
+ # index-to: artist, default
+
+ weight: 25
+ }
+
+ field year type int {
+ indexing: summary | attribute
+ }
+
+ # Increase query
+ field popularity type int {
+ indexing: summary | attribute
+ }
+
+ field url type uri {
+ indexing: summary | index
+ }
+
+ }
+
+ rank-profile default inherits default {
+ first-phase {
+ expression: nativeRank(title,artist) + attribute(popularity)
+ }
+
+ }
+
+ rank-profile textmatch inherits default {
+ first-phase {
+ expression: nativeRank(title,artist)
+ }
+
+ }
+
+}
diff --git a/configserver/src/test/apps/deprecated-features-app/services.xml b/configserver/src/test/apps/deprecated-features-app/services.xml
new file mode 100644
index 00000000000..509d7786be0
--- /dev/null
+++ b/configserver/src/test/apps/deprecated-features-app/services.xml
@@ -0,0 +1,38 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!-- Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -->
+<services version="1.0">
+
+ <admin version="2.0">
+ <adminserver hostalias="node1"/>
+ <logserver hostalias="node1" />
+ </admin>
+
+ <content version="1.0">
+ <redundancy>2</redundancy>
+ <documents>
+ <document type="music" mode="index"/>
+ </documents>
+ <nodes>
+ <node hostalias="node1" distribution-key="0"/>
+ </nodes>
+
+ </content>
+
+ <container version="1.0">
+ <document-processing compressdocuments="true">
+ <chain id="ContainerWrapperTest">
+ <documentprocessor id="com.yahoo.vespa.config.AppleDocProc"/>
+ </chain>
+ </document-processing>
+
+ <config name="project.specific">
+ <value>someval</value>
+ </config>
+
+ <nodes>
+ <node hostalias="node1" />
+ </nodes>
+
+ </container>
+
+</services>
diff --git a/configserver/src/test/apps/hosted-no-write-access-control/services.xml b/configserver/src/test/apps/hosted-no-write-access-control/services.xml
index b12f630ef80..429995c03a4 100644
--- a/configserver/src/test/apps/hosted-no-write-access-control/services.xml
+++ b/configserver/src/test/apps/hosted-no-write-access-control/services.xml
@@ -15,7 +15,7 @@
</container>
<content id="music" version="1.0">
- <redundancy>1</redundancy>
+ <redundancy>2</redundancy>
<documents>
<document type="music" mode="index" />
</documents>
diff --git a/configserver/src/test/apps/hosted/services.xml b/configserver/src/test/apps/hosted/services.xml
index a5c8fa1d26f..456a41c6994 100644
--- a/configserver/src/test/apps/hosted/services.xml
+++ b/configserver/src/test/apps/hosted/services.xml
@@ -18,7 +18,7 @@
</container>
<content id="music" version="1.0">
- <redundancy>1</redundancy>
+ <redundancy>2</redundancy>
<documents>
<document type="music" mode="index" />
</documents>
diff --git a/configserver/src/test/apps/zkapp/services.xml b/configserver/src/test/apps/zkapp/services.xml
index 58ecf41707d..037c8e75677 100644
--- a/configserver/src/test/apps/zkapp/services.xml
+++ b/configserver/src/test/apps/zkapp/services.xml
@@ -19,7 +19,7 @@
</container>
<content version="1.0">
- <redundancy>1</redundancy>
+ <redundancy>2</redundancy>
<documents>
<document type="music" mode="index"/>
</documents>
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/ApplicationRepositoryTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/ApplicationRepositoryTest.java
index 2cf4d7e7b69..e8dc08d4e8d 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/ApplicationRepositoryTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/ApplicationRepositoryTest.java
@@ -615,7 +615,7 @@ public class ApplicationRepositoryTest {
applicationRepository.prepare(sessionId2, prepareParams());
exceptionRule.expect(ActivationConflictException.class);
- exceptionRule.expectMessage(containsString("tenant:test1 app:testapp:default Cannot activate session 3 because the currently active session (4) has changed since session 3 was created (was 2 at creation time)"));
+ exceptionRule.expectMessage(containsString("app:test1.testapp.default Cannot activate session 3 because the currently active session (4) has changed since session 3 was created (was 2 at creation time)"));
applicationRepository.activate(applicationRepository.getTenant(applicationId()), sessionId2, timeoutBudget, false);
}
@@ -629,7 +629,7 @@ public class ApplicationRepositoryTest {
applicationRepository.prepare(sessionId, prepareParams());
exceptionRule.expect(IllegalArgumentException.class);
- exceptionRule.expectMessage(containsString("tenant:test1 app:testapp:default Session 2 is already active"));
+ exceptionRule.expectMessage(containsString("app:test1.testapp.default Session 2 is already active"));
applicationRepository.activate(applicationRepository.getTenant(applicationId()), sessionId, timeoutBudget, false);
}
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/deploy/DeployTester.java b/configserver/src/test/java/com/yahoo/vespa/config/server/deploy/DeployTester.java
index f2722fb49e1..cca26cbb4f1 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/deploy/DeployTester.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/deploy/DeployTester.java
@@ -209,7 +209,7 @@ public class DeployTester {
@Override
public ModelCreateResult createAndValidateModel(ModelContext modelContext, ValidationParameters validationParameters) {
if ( ! validationParameters.ignoreValidationErrors())
- throw new IllegalArgumentException("Validation fails");
+ throw new IllegalArgumentException("Model building fails");
return new ModelCreateResult(createModel(modelContext), Collections.emptyList());
}
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/deploy/ZooKeeperClientTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/deploy/ZooKeeperClientTest.java
index 7d14b1996b0..e20363af4e9 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/deploy/ZooKeeperClientTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/deploy/ZooKeeperClientTest.java
@@ -64,7 +64,7 @@ public class ZooKeeperClientTest {
Map<Version, FileRegistry> fileRegistries = createFileRegistries();
app.writeMetaData();
zkc.initialize();
- zkc.write(app);
+ zkc.writeApplicationPackage(app);
zkc.write(fileRegistries);
}
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/FileServerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/FileServerTest.java
index 6ce94aa8499..2b595992cdb 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/FileServerTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/FileServerTest.java
@@ -5,6 +5,7 @@ import com.yahoo.cloud.config.ConfigserverConfig;
import com.yahoo.config.FileReference;
import com.yahoo.io.IOUtils;
import com.yahoo.net.HostName;
+import com.yahoo.vespa.filedistribution.Downloads;
import com.yahoo.vespa.filedistribution.FileDownloader;
import com.yahoo.vespa.filedistribution.FileReferenceData;
import com.yahoo.vespa.filedistribution.FileReferenceDownload;
@@ -85,7 +86,7 @@ public class FileServerTest {
.configServerDBDir(temporaryFolder.newFolder("serverdb").getAbsolutePath())
.configDefinitionsDir(temporaryFolder.newFolder("configdefinitions").getAbsolutePath());
FileServer fileServer = createFileServer(builder);
- assertEquals(0, fileServer.downloader().fileReferenceDownloader().connectionPool().getSize());
+ assertEquals(0, fileServer.downloader().connectionPool().getSize());
// Empty connection pool when only one server, no use in downloading from yourself
List<ConfigserverConfig.Zookeeperserver.Builder> servers = new ArrayList<>();
@@ -95,7 +96,7 @@ public class FileServerTest {
servers.add(serverBuilder);
builder.zookeeperserver(servers);
fileServer = createFileServer(builder);
- assertEquals(0, fileServer.downloader().fileReferenceDownloader().connectionPool().getSize());
+ assertEquals(0, fileServer.downloader().connectionPool().getSize());
// connection pool of size 1 when 2 servers
ConfigserverConfig.Zookeeperserver.Builder serverBuilder2 = new ConfigserverConfig.Zookeeperserver.Builder();
@@ -104,7 +105,7 @@ public class FileServerTest {
servers.add(serverBuilder2);
builder.zookeeperserver(servers);
fileServer = createFileServer(builder);
- assertEquals(1, fileServer.downloader().fileReferenceDownloader().connectionPool().getSize());
+ assertEquals(1, fileServer.downloader().connectionPool().getSize());
}
private void writeFile(String dir) throws IOException {
@@ -137,7 +138,7 @@ public class FileServerTest {
private static class MockFileDownloader extends FileDownloader {
public MockFileDownloader(File downloadDirectory) {
- super(emptyConnectionPool(), downloadDirectory, downloadDirectory, Duration.ofMillis(100), Duration.ofMillis(100));
+ super(emptyConnectionPool(), downloadDirectory, new Downloads(), Duration.ofMillis(100), Duration.ofMillis(100));
}
}
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/MockFileRegistry.java b/configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/MockFileRegistry.java
index 890a31645fd..723adc1400b 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/MockFileRegistry.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/MockFileRegistry.java
@@ -4,8 +4,10 @@ package com.yahoo.vespa.config.server.filedistribution;
import com.yahoo.config.FileReference;
import com.yahoo.config.application.api.FileRegistry;
import com.yahoo.net.HostName;
+import net.jpountz.xxhash.XXHashFactory;
import java.io.File;
+import java.nio.ByteBuffer;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.List;
@@ -45,4 +47,14 @@ public class MockFileRegistry implements FileRegistry {
throw new IllegalArgumentException("FileReference addUri(String uri) is not implemented for " + getClass().getCanonicalName());
}
+ @Override
+ public FileReference addBlob(ByteBuffer blob) {
+ long blobHash = XXHashFactory.fastestJavaInstance().hash64().hash(blob, 0);
+ String relativePath = "./" + Long.toHexString(blobHash) + ".blob";
+ FileReference fileReference = addFileInterface.addBlob(blob, relativePath);
+
+ entries.add(new Entry(relativePath, fileReference));
+ return fileReference;
+ }
+
}
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/session/PrepareParamsTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/session/PrepareParamsTest.java
index 08794cf0b78..f68e79ae266 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/session/PrepareParamsTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/session/PrepareParamsTest.java
@@ -4,6 +4,7 @@ package com.yahoo.vespa.config.server.session;
import com.yahoo.config.model.api.ApplicationRoles;
import com.yahoo.config.model.api.ContainerEndpoint;
import com.yahoo.config.model.api.EndpointCertificateMetadata;
+import com.yahoo.config.model.api.TenantSecretStore;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.TenantName;
import com.yahoo.container.jdisc.HttpRequest;
@@ -24,6 +25,7 @@ import com.yahoo.slime.SlimeInserter;
import com.yahoo.slime.SlimeUtils;
import com.yahoo.vespa.config.server.tenant.ContainerEndpointSerializer;
import com.yahoo.vespa.config.server.tenant.EndpointCertificateMetadataSerializer;
+import com.yahoo.vespa.config.server.tenant.TenantSecretStoreSerializer;
import org.junit.Test;
import javax.security.auth.x500.X500Principal;
@@ -203,6 +205,26 @@ public class PrepareParamsTest {
assertEquals(certificate, prepareParams.operatorCertificates().get(0));
}
+ @Test
+ public void testSecretStores() throws IOException {
+ List<TenantSecretStore> secretStores = List.of(new TenantSecretStore("name", "awsId", "role"));
+ Slime secretStoreSlime = TenantSecretStoreSerializer.toSlime(secretStores);
+ String secretStoreParam = new String(SlimeUtils.toJsonBytes(secretStoreSlime), StandardCharsets.UTF_8);
+
+ var prepareParams = createParams(request + "&" + PrepareParams.TENANT_SECRET_STORES_PARAM_NAME + "=" + URLEncoder.encode(secretStoreParam, StandardCharsets.UTF_8), TenantName.from("foo"));
+ assertEquals(1, prepareParams.tenantSecretStores().size());
+ TenantSecretStore tenantSecretStore = prepareParams.tenantSecretStores().get(0);
+ assertEquals("name", tenantSecretStore.getName());
+ assertEquals("awsId", tenantSecretStore.getAwsId());
+ assertEquals("role", tenantSecretStore.getRole());
+
+ // Verify using json object
+ var root = SlimeUtils.jsonToSlime(json);
+ new Injector().inject(secretStoreSlime.get(), new ObjectInserter(root.get(), PrepareParams.TENANT_SECRET_STORES_PARAM_NAME));
+ PrepareParams prepareParamsJson = PrepareParams.fromJson(SlimeUtils.toJsonBytes(root), TenantName.from("foo"), Duration.ofSeconds(60));
+ assertPrepareParamsEqual(prepareParams, prepareParamsJson);
+ }
+
private void assertPrepareParamsEqual(PrepareParams urlParams, PrepareParams jsonParams) {
assertEquals(urlParams.ignoreValidationErrors(), jsonParams.ignoreValidationErrors());
assertEquals(urlParams.isDryRun(), jsonParams.isDryRun());
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionRepositoryTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionRepositoryTest.java
index a3025cbf364..31d29e7b78e 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionRepositoryTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionRepositoryTest.java
@@ -3,6 +3,7 @@ package com.yahoo.vespa.config.server.session;
import com.yahoo.cloud.config.ConfigserverConfig;
import com.yahoo.component.Version;
+import com.yahoo.config.application.api.ApplicationFile;
import com.yahoo.config.application.api.ApplicationPackage;
import com.yahoo.config.model.NullConfigModelRegistry;
import com.yahoo.config.model.api.Model;
@@ -14,6 +15,8 @@ import com.yahoo.config.model.deploy.DeployState;
import com.yahoo.config.model.test.MockApplicationPackage;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.TenantName;
+import com.yahoo.io.reader.NamedReader;
+import com.yahoo.path.Path;
import com.yahoo.text.Utf8;
import com.yahoo.vespa.config.server.ApplicationRepository;
import com.yahoo.vespa.config.server.MockProvisioner;
@@ -28,6 +31,8 @@ import com.yahoo.vespa.config.server.zookeeper.ConfigCurator;
import com.yahoo.vespa.config.util.ConfigUtils;
import com.yahoo.vespa.curator.Curator;
import com.yahoo.vespa.curator.mock.MockCurator;
+import com.yahoo.vespa.flags.Flags;
+import com.yahoo.vespa.flags.InMemoryFlagSource;
import com.yahoo.vespa.model.VespaModel;
import com.yahoo.vespa.model.VespaModelFactory;
import org.junit.Rule;
@@ -42,15 +47,18 @@ import java.time.LocalDate;
import java.time.ZoneOffset;
import java.time.format.DateTimeFormatter;
import java.util.ArrayList;
+import java.util.Collection;
import java.util.List;
import java.util.Optional;
import java.util.function.LongPredicate;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertThat;
+import static org.junit.Assert.assertTrue;
/**
* @author Ulf Lilleengen
@@ -66,6 +74,7 @@ public class SessionRepositoryTest {
private TenantRepository tenantRepository;
private ApplicationRepository applicationRepository;
private SessionRepository sessionRepository;
+ private final InMemoryFlagSource flagSource = new InMemoryFlagSource();
@Rule
public TemporaryFolder temporaryFolder = new TemporaryFolder();
@@ -86,6 +95,7 @@ public class SessionRepositoryTest {
tenantRepository = new TestTenantRepository.Builder()
.withConfigserverConfig(configserverConfig)
.withCurator(curator)
+ .withFlagSource(flagSource)
.withFileDistributionFactory(new MockFileDistributionFactory(configserverConfig))
.withModelFactoryRegistry(modelFactoryRegistry)
.build();
@@ -94,6 +104,7 @@ public class SessionRepositoryTest {
.withTenantRepository(tenantRepository)
.withProvisioner(new MockProvisioner())
.withOrchestrator(new OrchestratorMock())
+ .withFlagSource(flagSource)
.build();
sessionRepository = tenantRepository.getTenant(tenantName).getSessionRepository();
}
@@ -113,6 +124,10 @@ public class SessionRepositoryTest {
assertEquals(applicationId.application(), applicationSet.getForVersionOrLatest(Optional.empty(), Instant.now()).getId().application());
assertNotNull(applicationSet.getForVersionOrLatest(Optional.empty(), Instant.now()).getModel());
+ LocalSession session = sessionRepository.getLocalSession(secondSessionId);
+ Collection<NamedReader> a = session.applicationPackage.get().getSchemas();
+ assertEquals(1, a.size());
+
sessionRepository.close();
// All created sessions are deleted
assertNull(sessionRepository.getLocalSession(firstSessionId));
@@ -241,6 +256,40 @@ public class SessionRepositoryTest {
// Does not cause an error because model version 3 is skipped
}
+ @Test
+ public void require_that_searchdefinitions_are_written_to_schemas_dir() throws Exception {
+ setup();
+
+ // App has schemas in searchdefinitions/, should NOT be moved to schemas/ on deploy
+ flagSource.withBooleanFlag(Flags.MOVE_SEARCH_DEFINITIONS_TO_SCHEMAS_DIR.id(), false);
+ long sessionId = deploy(applicationId, new File("src/test/apps/deprecated-features-app"));
+ LocalSession session = sessionRepository.getLocalSession(sessionId);
+
+ assertEquals(1, session.applicationPackage.get().getSchemas().size());
+
+ ApplicationFile schema = getSchema(session, "schemas");
+ assertFalse(schema.exists());
+ ApplicationFile sd = getSchema(session, "searchdefinitions");
+ assertTrue(sd.exists());
+
+
+ // App has schemas in searchdefinitions/, should be moved to schemas/ on deploy
+ flagSource.withBooleanFlag(Flags.MOVE_SEARCH_DEFINITIONS_TO_SCHEMAS_DIR.id(), true);
+ sessionId = deploy(applicationId, new File("src/test/apps/deprecated-features-app"));
+ session = sessionRepository.getLocalSession(sessionId);
+
+ assertEquals(1, session.applicationPackage.get().getSchemas().size());
+
+ schema = getSchema(session, "schemas");
+ assertTrue(schema.exists());
+ sd = getSchema(session, "searchdefinitions");
+ assertFalse(sd.exists());
+ }
+
+ ApplicationFile getSchema(Session session, String subDirectory) {
+ return session.applicationPackage.get().getFile(Path.fromString(subDirectory).append("music.sd"));
+ }
+
private void createSession(long sessionId, boolean wait) {
SessionZooKeeperClient zkc = new SessionZooKeeperClient(curator,
ConfigCurator.create(curator),
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/zookeeper/ZKApplicationPackageTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/zookeeper/ZKApplicationPackageTest.java
index a71f75f5035..458cdb82066 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/zookeeper/ZKApplicationPackageTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/zookeeper/ZKApplicationPackageTest.java
@@ -124,8 +124,7 @@ public class ZKApplicationPackageTest {
}
/**
- * Takes for instance the dir /app and puts the contents into the given ZK path. Ignores files starting with dot,
- * and dirs called CVS.
+ * Takes for instance the dir /app and puts the contents into the given ZK path. Ignores files starting with dot.
*
* @param dir directory which holds the summary class part files
* @param path zookeeper path
@@ -142,7 +141,6 @@ public class ZKApplicationPackageTest {
}
for (File file : listFiles(dir, filenameFilter)) {
if (file.getName().startsWith(".")) continue; //.svn , .git ...
- if ("CVS".equals(file.getName())) continue;
if (file.isFile()) {
String contents = IOUtils.readFile(file);
zk.putData(path, file.getName(), contents);
diff --git a/configserver/src/test/resources/deploy/advancedapp/services.xml b/configserver/src/test/resources/deploy/advancedapp/services.xml
index b8e93b14317..77fa426041f 100644
--- a/configserver/src/test/resources/deploy/advancedapp/services.xml
+++ b/configserver/src/test/resources/deploy/advancedapp/services.xml
@@ -18,7 +18,7 @@
</container>
<content version="1.0">
- <redundancy>1</redundancy>
+ <redundancy>2</redundancy>
<documents>
<document type="keyvalue" mode="index"/>
</documents>
diff --git a/container-core/src/main/java/com/yahoo/container/jdisc/RequestHandlerTestDriver.java b/container-core/src/main/java/com/yahoo/container/jdisc/RequestHandlerTestDriver.java
index faa30bd109d..e976caf3f9f 100644
--- a/container-core/src/main/java/com/yahoo/container/jdisc/RequestHandlerTestDriver.java
+++ b/container-core/src/main/java/com/yahoo/container/jdisc/RequestHandlerTestDriver.java
@@ -74,14 +74,14 @@ public class RequestHandlerTestDriver implements AutoCloseable {
}
public MockResponseHandler sendRequest(String uri, HttpRequest.Method method, ByteBuffer body) {
- responseHandler = new MockResponseHandler();
+ MockResponseHandler responseHandler = new MockResponseHandler();
Request request = HttpRequest.newServerRequest(driver, URI.create(uri), method);
request.context().put("contextVariable", 37); // TODO: Add a method for accepting a Request instead
ContentChannel requestContent = request.connect(responseHandler);
requestContent.write(body, null);
requestContent.close(null);
request.release();
- return responseHandler;
+ return this.responseHandler = responseHandler;
}
public MockResponseHandler sendRequest(String uri, HttpRequest.Method method, ByteBuffer body, String contentType) {
diff --git a/container-core/src/main/java/com/yahoo/container/logging/ConnectionLogEntry.java b/container-core/src/main/java/com/yahoo/container/logging/ConnectionLogEntry.java
index 5b30ce5963d..6f9d7840573 100644
--- a/container-core/src/main/java/com/yahoo/container/logging/ConnectionLogEntry.java
+++ b/container-core/src/main/java/com/yahoo/container/logging/ConnectionLogEntry.java
@@ -33,6 +33,7 @@ public class ConnectionLogEntry {
private final Instant sslPeerNotAfter;
private final String sslSniServerName;
private final SslHandshakeFailure sslHandshakeFailure;
+ private final List<String> sslSubjectAlternativeNames;
private final String httpProtocol;
private final String proxyProtocolVersion;
@@ -59,6 +60,7 @@ public class ConnectionLogEntry {
this.sslPeerNotAfter = builder.sslPeerNotAfter;
this.sslSniServerName = builder.sslSniServerName;
this.sslHandshakeFailure = builder.sslHandshakeFailure;
+ this.sslSubjectAlternativeNames = builder.sslSubjectAlternativeNames;
this.httpProtocol = builder.httpProtocol;
this.proxyProtocolVersion = builder.proxyProtocolVersion;
}
@@ -88,6 +90,7 @@ public class ConnectionLogEntry {
public Optional<Instant> sslPeerNotAfter() { return Optional.ofNullable(sslPeerNotAfter); }
public Optional<String> sslSniServerName() { return Optional.ofNullable(sslSniServerName); }
public Optional<SslHandshakeFailure> sslHandshakeFailure() { return Optional.ofNullable(sslHandshakeFailure); }
+ public List<String> sslSubjectAlternativeNames() { return sslSubjectAlternativeNames == null ? List.of() : sslSubjectAlternativeNames; }
public Optional<String> httpProtocol() { return Optional.ofNullable(httpProtocol); }
public Optional<String> proxyProtocolVersion() { return Optional.ofNullable(proxyProtocolVersion); }
@@ -139,6 +142,7 @@ public class ConnectionLogEntry {
private Instant sslPeerNotAfter;
private String sslSniServerName;
private SslHandshakeFailure sslHandshakeFailure;
+ private List<String> sslSubjectAlternativeNames;
private String httpProtocol;
private String proxyProtocolVersion;
@@ -225,6 +229,10 @@ public class ConnectionLogEntry {
this.sslHandshakeFailure = sslHandshakeFailure;
return this;
}
+ public Builder withSslSubjectAlternativeNames(List<String> sslSubjectAlternativeNames) {
+ this.sslSubjectAlternativeNames = sslSubjectAlternativeNames;
+ return this;
+ }
public Builder withHttpProtocol(String protocol) {
this.httpProtocol = protocol;
return this;
diff --git a/container-core/src/main/java/com/yahoo/container/logging/JsonConnectionLogWriter.java b/container-core/src/main/java/com/yahoo/container/logging/JsonConnectionLogWriter.java
index dfdc5f1b55a..53aa79b9f8c 100644
--- a/container-core/src/main/java/com/yahoo/container/logging/JsonConnectionLogWriter.java
+++ b/container-core/src/main/java/com/yahoo/container/logging/JsonConnectionLogWriter.java
@@ -11,6 +11,7 @@ import java.io.IOException;
import java.io.OutputStream;
import java.time.Instant;
import java.util.Arrays;
+import java.util.List;
import java.util.Objects;
import java.util.Optional;
@@ -68,6 +69,7 @@ class JsonConnectionLogWriter implements LogWriter<ConnectionLogEntry> {
Instant sslPeerNotAfter = unwrap(record.sslPeerNotAfter());
String sslSniServerName = unwrap(record.sslSniServerName());
ConnectionLogEntry.SslHandshakeFailure sslHandshakeFailure = unwrap(record.sslHandshakeFailure());
+ List<String> sslSubjectAlternativeNames = record.sslSubjectAlternativeNames();
if (isAnyValuePresent(
sslProtocol, sslSessionId, sslCipherSuite, sslPeerSubject, sslPeerNotBefore, sslPeerNotAfter,
@@ -95,7 +97,13 @@ class JsonConnectionLogWriter implements LogWriter<ConnectionLogEntry> {
generator.writeStringField("type", sslHandshakeFailure.type());
generator.writeEndObject();
}
-
+ if (!sslSubjectAlternativeNames.isEmpty()) {
+ generator.writeArrayFieldStart("san");
+ for (String sanEntry : sslSubjectAlternativeNames) {
+ generator.writeString(sanEntry);
+ }
+ generator.writeEndArray();
+ }
generator.writeEndObject();
}
}
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ConnectorFactory.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ConnectorFactory.java
index 4ad39f91a83..92d2cc5d1cd 100644
--- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ConnectorFactory.java
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ConnectorFactory.java
@@ -8,6 +8,7 @@ import com.yahoo.jdisc.http.ssl.SslContextFactoryProvider;
import com.yahoo.security.tls.MixedMode;
import com.yahoo.security.tls.TransportSecurityUtils;
import org.eclipse.jetty.alpn.server.ALPNServerConnectionFactory;
+import org.eclipse.jetty.http2.parser.RateControl;
import org.eclipse.jetty.http2.server.HTTP2ServerConnectionFactory;
import org.eclipse.jetty.server.ConnectionFactory;
import org.eclipse.jetty.server.DetectorConnectionFactory;
@@ -75,7 +76,7 @@ public class ConnectorFactory {
connector.setName(connectorConfig.name());
connector.setAcceptQueueSize(connectorConfig.acceptQueueSize());
connector.setReuseAddress(connectorConfig.reuseAddress());
- connector.setIdleTimeout(toMillis(connector.getIdleTimeout()));
+ connector.setIdleTimeout(toMillis(connectorConfig.idleTimeout()));
return connector;
}
@@ -164,6 +165,8 @@ public class ConnectorFactory {
HTTP2ServerConnectionFactory factory = new HTTP2ServerConnectionFactory(newHttpConfiguration());
factory.setStreamIdleTimeout(toMillis(connectorConfig.http2().streamIdleTimeout()));
factory.setMaxConcurrentStreams(connectorConfig.http2().maxConcurrentStreams());
+ factory.setInitialSessionRecvWindow(1 << 24);
+ factory.setInitialStreamRecvWindow(1 << 20);
return factory;
}
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JettyConnectionLogger.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JettyConnectionLogger.java
index d337131b313..88e68e7f2e6 100644
--- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JettyConnectionLogger.java
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JettyConnectionLogger.java
@@ -6,6 +6,8 @@ import com.yahoo.container.logging.ConnectionLogEntry;
import com.yahoo.container.logging.ConnectionLogEntry.SslHandshakeFailure.ExceptionEntry;
import com.yahoo.io.HexDump;
import com.yahoo.jdisc.http.ServerConfig;
+import com.yahoo.security.SubjectAlternativeName;
+import com.yahoo.security.X509CertificateUtils;
import org.eclipse.jetty.alpn.server.ALPNServerConnection;
import org.eclipse.jetty.http2.server.HTTP2ServerConnection;
import org.eclipse.jetty.io.Connection;
@@ -36,6 +38,7 @@ import java.util.List;
import java.util.UUID;
import java.util.logging.Level;
import java.util.logging.Logger;
+import java.util.stream.Collectors;
/**
* Jetty integration for jdisc connection log ({@link ConnectionLog}).
@@ -247,6 +250,7 @@ class JettyConnectionLogger extends AbstractLifeCycle implements Connection.List
private Date sslPeerNotAfter;
private List<SNIServerName> sslSniServerNames;
private SSLHandshakeException sslHandshakeException;
+ private List<String> sslSubjectAlternativeNames;
private String proxyProtocolVersion;
private String httpProtocol;
@@ -300,6 +304,10 @@ class JettyConnectionLogger extends AbstractLifeCycle implements Connection.List
X509Certificate peerCertificate = (X509Certificate) session.getPeerCertificates()[0];
this.sslPeerNotBefore = peerCertificate.getNotBefore();
this.sslPeerNotAfter = peerCertificate.getNotAfter();
+ this.sslSubjectAlternativeNames = X509CertificateUtils.getSubjectAlternativeNames(peerCertificate).stream()
+ .map(SubjectAlternativeName::getValue)
+ .collect(Collectors.toList());
+
} catch (SSLPeerUnverifiedException e) {
// Throw if peer is not authenticated (e.g when client auth is disabled)
// JSSE provides no means of checking for client authentication without catching this exception
@@ -362,6 +370,9 @@ class JettyConnectionLogger extends AbstractLifeCycle implements Connection.List
.withSslPeerNotAfter(sslPeerNotAfter.toInstant())
.withSslPeerNotBefore(sslPeerNotBefore.toInstant());
}
+ if (sslSubjectAlternativeNames != null && !sslSubjectAlternativeNames.isEmpty()) {
+ builder.withSslSubjectAlternativeNames(sslSubjectAlternativeNames);
+ }
if (sslHandshakeException != null) {
List<ExceptionEntry> exceptionChain = new ArrayList<>();
Throwable cause = sslHandshakeException;
diff --git a/container-core/src/test/java/com/yahoo/container/logging/JsonConnectionLogWriterTest.java b/container-core/src/test/java/com/yahoo/container/logging/JsonConnectionLogWriterTest.java
index 75bc0c915d3..66b3da06ff2 100644
--- a/container-core/src/test/java/com/yahoo/container/logging/JsonConnectionLogWriterTest.java
+++ b/container-core/src/test/java/com/yahoo/container/logging/JsonConnectionLogWriterTest.java
@@ -26,6 +26,7 @@ class JsonConnectionLogWriterTest {
List.of(
new ConnectionLogEntry.SslHandshakeFailure.ExceptionEntry("javax.net.ssl.SSLHandshakeException", "message"),
new ConnectionLogEntry.SslHandshakeFailure.ExceptionEntry("java.io.IOException", "cause message"))))
+ .withSslSubjectAlternativeNames(List.of("sandns", "sanemail"))
.build();
String expectedJson = "{" +
"\"id\":\""+id.toString()+"\"," +
@@ -34,7 +35,7 @@ class JsonConnectionLogWriterTest {
"\"ssl\":{\"handshake-failure\":{\"exception\":[" +
"{\"cause\":\"javax.net.ssl.SSLHandshakeException\",\"message\":\"message\"}," +
"{\"cause\":\"java.io.IOException\",\"message\":\"cause message\"}" +
- "],\"type\":\"UNKNOWN\"}}}";
+ "],\"type\":\"UNKNOWN\"},\"san\":[\"sandns\",\"sanemail\"]}}";
JsonConnectionLogWriter writer = new JsonConnectionLogWriter();
ByteArrayOutputStream out = new ByteArrayOutputStream();
diff --git a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/ConnectorFactoryTest.java b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/ConnectorFactoryTest.java
index 93261a2401f..bb736122867 100644
--- a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/ConnectorFactoryTest.java
+++ b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/ConnectorFactoryTest.java
@@ -8,6 +8,8 @@ import com.yahoo.jdisc.http.ssl.impl.ConfiguredSslContextFactoryProvider;
import org.eclipse.jetty.server.Request;
import org.eclipse.jetty.server.Server;
import org.eclipse.jetty.server.handler.AbstractHandler;
+import org.junit.After;
+import org.junit.Before;
import org.junit.Test;
import javax.servlet.http.HttpServletRequest;
@@ -16,46 +18,73 @@ import java.io.IOException;
import java.util.Map;
import static org.hamcrest.CoreMatchers.equalTo;
+import static org.junit.Assert.assertEquals;
/**
* @author Einar M R Rosenvinge
+ * @author bjorncs
*/
public class ConnectorFactoryTest {
- @Test
- public void requireThatServerCanBindChannel() throws Exception {
- Server server = new Server();
+ private Server server;
+
+ @Before
+ public void createServer() {
+ server = new Server();
+ }
+
+ @After
+ public void stopServer() {
try {
- ConnectorConfig config = new ConnectorConfig(new ConnectorConfig.Builder());
- ConnectorFactory factory = createConnectorFactory(config);
- JettyConnectionLogger connectionLogger = new JettyConnectionLogger(
- new ServerConfig.ConnectionLog.Builder().enabled(false).build(),
- new VoidConnectionLog());
- DummyMetric metric = new DummyMetric();
- var connectionMetricAggregator = new ConnectionMetricAggregator(new ServerConfig(new ServerConfig.Builder()), metric);
- JDiscServerConnector connector =
- (JDiscServerConnector)factory.createConnector(metric, server, connectionLogger, connectionMetricAggregator);
- server.addConnector(connector);
- server.setHandler(new HelloWorldHandler());
- server.start();
-
- SimpleHttpClient client = new SimpleHttpClient(null, connector.getLocalPort(), false);
- SimpleHttpClient.RequestExecutor ex = client.newGet("/blaasdfnb");
- SimpleHttpClient.ResponseValidator val = ex.execute();
- val.expectContent(equalTo("Hello world"));
- } finally {
- try {
- server.stop();
- } catch (Exception e) {
- //ignore
- }
+ server.stop();
+ server = null;
+ } catch (Exception e) {
+ //ignore
}
}
+ @Test
+ public void requireThatServerCanBindChannel() throws Exception {
+ ConnectorConfig config = new ConnectorConfig(new ConnectorConfig.Builder());
+ ConnectorFactory factory = createConnectorFactory(config);
+ JDiscServerConnector connector = createConnectorFromFactory(factory);
+ server.addConnector(connector);
+ server.setHandler(new HelloWorldHandler());
+ server.start();
+
+ SimpleHttpClient client = new SimpleHttpClient(null, connector.getLocalPort(), false);
+ SimpleHttpClient.RequestExecutor ex = client.newGet("/blaasdfnb");
+ SimpleHttpClient.ResponseValidator val = ex.execute();
+ val.expectContent(equalTo("Hello world"));
+ }
+
+ @Test
+ public void constructed_connector_is_based_on_jdisc_connector_config() {
+ ConnectorConfig config = new ConnectorConfig.Builder()
+ .idleTimeout(25)
+ .name("my-server-name")
+ .listenPort(12345)
+ .build();
+ ConnectorFactory factory = createConnectorFactory(config);
+ JDiscServerConnector connector = createConnectorFromFactory(factory);
+ assertEquals(25000, connector.getIdleTimeout());
+ assertEquals(12345, connector.listenPort());
+ assertEquals("my-server-name", connector.getName());
+ }
+
private static ConnectorFactory createConnectorFactory(ConnectorConfig config) {
return new ConnectorFactory(config, new ConfiguredSslContextFactoryProvider(config));
}
+ private JDiscServerConnector createConnectorFromFactory(ConnectorFactory factory) {
+ JettyConnectionLogger connectionLogger = new JettyConnectionLogger(
+ new ServerConfig.ConnectionLog.Builder().enabled(false).build(),
+ new VoidConnectionLog());
+ DummyMetric metric = new DummyMetric();
+ var connectionMetricAggregator = new ConnectionMetricAggregator(new ServerConfig(new ServerConfig.Builder()), metric);
+ return (JDiscServerConnector)factory.createConnector(metric, server, connectionLogger, connectionMetricAggregator);
+ }
+
private static class HelloWorldHandler extends AbstractHandler {
@Override
public void handle(String target, Request baseRequest, HttpServletRequest request, HttpServletResponse response) throws IOException {
diff --git a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerTest.java b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerTest.java
index 0dec711f4c0..0f625b5c3df 100644
--- a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerTest.java
+++ b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerTest.java
@@ -41,6 +41,7 @@ import org.apache.hc.client5.http.entity.mime.FormBodyPart;
import org.apache.hc.client5.http.entity.mime.FormBodyPartBuilder;
import org.apache.hc.client5.http.entity.mime.StringBody;
import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient;
+import org.apache.hc.client5.http.impl.async.H2AsyncClientBuilder;
import org.apache.hc.client5.http.impl.async.HttpAsyncClientBuilder;
import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManagerBuilder;
import org.apache.hc.client5.http.ssl.ClientTlsStrategyBuilder;
@@ -969,14 +970,12 @@ public class HttpServerTest {
private static CloseableHttpAsyncClient createHttp2Client(JettyTestDriver driver) {
TlsStrategy tlsStrategy = ClientTlsStrategyBuilder.create()
- .setSslContext(driver.sslContext())
- .build();
- var client = HttpAsyncClientBuilder.create()
- .setVersionPolicy(HttpVersionPolicy.FORCE_HTTP_2)
- .disableConnectionState()
- .disableAutomaticRetries()
- .setConnectionManager(PoolingAsyncClientConnectionManagerBuilder.create().setTlsStrategy(tlsStrategy).build())
- .build();
+ .setSslContext(driver.sslContext())
+ .build();
+ var client = H2AsyncClientBuilder.create()
+ .disableAutomaticRetries()
+ .setTlsStrategy(tlsStrategy)
+ .build();
client.start();
return client;
}
diff --git a/container-disc/src/main/java/com/yahoo/container/jdisc/ConfiguredApplication.java b/container-disc/src/main/java/com/yahoo/container/jdisc/ConfiguredApplication.java
index fe8668427f4..0e4c4446778 100644
--- a/container-disc/src/main/java/com/yahoo/container/jdisc/ConfiguredApplication.java
+++ b/container-disc/src/main/java/com/yahoo/container/jdisc/ConfiguredApplication.java
@@ -155,7 +155,7 @@ public final class ConfiguredApplication implements Application {
if ( ! qrConfig.rpc().enabled()) return null;
// 1. Set up RPC server
- supervisor = new Supervisor(new Transport("slobrok")).useSmallBuffers();
+ supervisor = new Supervisor(new Transport("slobrok")).setDropEmptyBuffers(true);
Spec listenSpec = new Spec(qrConfig.rpc().port());
try {
acceptor = supervisor.listen(listenSpec);
diff --git a/container-search/src/main/java/com/yahoo/prelude/query/NearestNeighborItem.java b/container-search/src/main/java/com/yahoo/prelude/query/NearestNeighborItem.java
index 1f4afed451c..4fe977bff2b 100644
--- a/container-search/src/main/java/com/yahoo/prelude/query/NearestNeighborItem.java
+++ b/container-search/src/main/java/com/yahoo/prelude/query/NearestNeighborItem.java
@@ -2,7 +2,6 @@
package com.yahoo.prelude.query;
-import com.google.common.annotations.Beta;
import com.yahoo.compress.IntegerCompressor;
import com.yahoo.prelude.query.textualrepresentation.Discloser;
@@ -17,7 +16,6 @@ import java.nio.ByteBuffer;
*
* @author arnej
*/
-@Beta
public class NearestNeighborItem extends SimpleTaggableItem {
private int targetNumHits = 0;
diff --git a/container-search/src/main/java/com/yahoo/prelude/searcher/FillSearcher.java b/container-search/src/main/java/com/yahoo/prelude/searcher/FillSearcher.java
index 45034482bb6..5390f202ef0 100644
--- a/container-search/src/main/java/com/yahoo/prelude/searcher/FillSearcher.java
+++ b/container-search/src/main/java/com/yahoo/prelude/searcher/FillSearcher.java
@@ -1,7 +1,6 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.prelude.searcher;
-import com.yahoo.component.ComponentId;
import com.yahoo.search.Query;
import com.yahoo.search.Result;
import com.yahoo.search.Searcher;
diff --git a/container-search/src/main/java/com/yahoo/search/Searcher.java b/container-search/src/main/java/com/yahoo/search/Searcher.java
index 5fefe9d2468..cd6b7167f08 100644
--- a/container-search/src/main/java/com/yahoo/search/Searcher.java
+++ b/container-search/src/main/java/com/yahoo/search/Searcher.java
@@ -73,6 +73,7 @@ public abstract class Searcher extends Processor {
// Note to developers: If you think you should add something here you are probably wrong
// Create a subclass containing the new method instead.
+
private final Logger logger = Logger.getLogger(getClass().getName());
public Searcher() {}
diff --git a/container-search/src/main/java/com/yahoo/search/cluster/BaseNodeMonitor.java b/container-search/src/main/java/com/yahoo/search/cluster/BaseNodeMonitor.java
index 0d491d2f0c1..8eee7c11d3e 100644
--- a/container-search/src/main/java/com/yahoo/search/cluster/BaseNodeMonitor.java
+++ b/container-search/src/main/java/com/yahoo/search/cluster/BaseNodeMonitor.java
@@ -44,7 +44,7 @@ public abstract class BaseNodeMonitor<T> {
protected MonitorConfiguration configuration;
/** Is the node we monitor part of an internal Vespa cluster or not */
- private boolean internal;
+ private final boolean internal;
public BaseNodeMonitor(boolean internal) {
this.internal=internal;
diff --git a/container-search/src/main/java/com/yahoo/search/cluster/ClusterMonitor.java b/container-search/src/main/java/com/yahoo/search/cluster/ClusterMonitor.java
index 27d8bb27ee8..c9b8aeee417 100644
--- a/container-search/src/main/java/com/yahoo/search/cluster/ClusterMonitor.java
+++ b/container-search/src/main/java/com/yahoo/search/cluster/ClusterMonitor.java
@@ -25,9 +25,9 @@ import java.util.logging.Logger;
*/
public class ClusterMonitor<T> {
- private final MonitorConfiguration configuration = new MonitorConfiguration();
+ private static final Logger log = Logger.getLogger(ClusterMonitor.class.getName());
- private static Logger log = Logger.getLogger(ClusterMonitor.class.getName());
+ private final MonitorConfiguration configuration = new MonitorConfiguration();
private final NodeManager<T> nodeManager;
diff --git a/container-search/src/main/java/com/yahoo/search/cluster/MonitorConfiguration.java b/container-search/src/main/java/com/yahoo/search/cluster/MonitorConfiguration.java
index a2fb982e3c5..95f51b374d6 100644
--- a/container-search/src/main/java/com/yahoo/search/cluster/MonitorConfiguration.java
+++ b/container-search/src/main/java/com/yahoo/search/cluster/MonitorConfiguration.java
@@ -9,7 +9,7 @@ package com.yahoo.search.cluster;
public class MonitorConfiguration {
/** The interval in ms between consecutive checks of the monitored nodes */
- private long checkInterval=1000;
+ private long checkInterval = 1000;
/** The number of milliseconds to attempt to complete a request before giving up */
private final long requestTimeout = 980;
@@ -18,6 +18,7 @@ public class MonitorConfiguration {
private long failLimit = 5000;
/** Sets the interval between each ping of idle or failing nodes. Default is 1000 ms. */
+ @Deprecated // TODO: Remove on Vespa 8
public void setCheckInterval(long intervalMs) { this.checkInterval = intervalMs; }
/** Returns the interval between each ping of idle or failing nodes. Default is 1000 ms. */
@@ -59,6 +60,7 @@ public class MonitorConfiguration {
* Sets the number of milliseconds a node is allowed to fail before we
* mark it as not working
*/
+ @Deprecated // TODO: Remove on Vespa 8
public void setFailLimit(long failLimit) { this.failLimit=failLimit; }
/**
@@ -86,6 +88,7 @@ public class MonitorConfiguration {
@Deprecated // TODO: Remove on Vespa 8
public void setQuarantineTime(long quarantineTime) { }
+ @Override
public String toString() {
return "monitor configuration [" +
"checkInterval: " + checkInterval +
diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchCluster.java b/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchCluster.java
index 159a42676ec..9ae25518969 100644
--- a/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchCluster.java
+++ b/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchCluster.java
@@ -368,7 +368,7 @@ public class SearchCluster implements NodeManager<Node> {
*/
public boolean isPartialGroupCoverageSufficient(List<Node> nodes) {
if (orderedGroups().size() == 1)
- return nodes.size() >= wantedGroupSize() - dispatchConfig.maxNodesDownPerGroup();
+ return true;
long activeDocuments = nodes.stream().mapToLong(Node::getActiveDocuments).sum();
return isGroupCoverageSufficient(activeDocuments, medianDocumentsPerGroup());
}
@@ -378,7 +378,6 @@ public class SearchCluster implements NodeManager<Node> {
boolean changed = group.isFullCoverageStatusChanged(fullCoverage);
if (changed || (!fullCoverage && System.currentTimeMillis() > nextLogTime)) {
nextLogTime = System.currentTimeMillis() + 30 * 1000;
- int requiredNodes = group.nodes().size() - dispatchConfig.maxNodesDownPerGroup();
if (fullCoverage) {
log.info("Cluster " + clusterId + ": " + group + " has full coverage. " +
"Active documents: " + group.getActiveDocuments() + "/" + medianDocuments + ", " +
@@ -391,7 +390,7 @@ public class SearchCluster implements NodeManager<Node> {
}
log.warning("Cluster " + clusterId + ": " + group + " has reduced coverage: " +
"Active documents: " + group.getActiveDocuments() + "/" + medianDocuments + ", " +
- "working nodes: " + group.workingNodes() + "/" + group.nodes().size() + " required " + requiredNodes +
+ "working nodes: " + group.workingNodes() + "/" + group.nodes().size() +
", unresponsive nodes: " + (unresponsive.toString().isEmpty() ? " none" : unresponsive));
}
}
diff --git a/container-search/src/main/java/com/yahoo/search/grouping/request/GroupingOperation.java b/container-search/src/main/java/com/yahoo/search/grouping/request/GroupingOperation.java
index 8b73fa01128..499ed610d34 100644
--- a/container-search/src/main/java/com/yahoo/search/grouping/request/GroupingOperation.java
+++ b/container-search/src/main/java/com/yahoo/search/grouping/request/GroupingOperation.java
@@ -8,7 +8,12 @@ import com.yahoo.search.grouping.request.parser.GroupingParserInput;
import com.yahoo.search.grouping.request.parser.ParseException;
import com.yahoo.search.grouping.request.parser.TokenMgrException;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
/**
* This class represents a single node in a grouping operation tree. You may manually construct this tree, or you may
diff --git a/container-search/src/main/java/com/yahoo/search/query/Presentation.java b/container-search/src/main/java/com/yahoo/search/query/Presentation.java
index b10e8442a5f..db2fbf525e0 100644
--- a/container-search/src/main/java/com/yahoo/search/query/Presentation.java
+++ b/container-search/src/main/java/com/yahoo/search/query/Presentation.java
@@ -23,7 +23,7 @@ import java.util.Set;
public class Presentation implements Cloneable {
/** The type representing the property arguments consumed by this */
- private static QueryProfileType argumentType;
+ private static final QueryProfileType argumentType;
public static final String PRESENTATION = "presentation";
public static final String BOLDING = "bolding";
@@ -48,7 +48,7 @@ public class Presentation implements Cloneable {
public static QueryProfileType getArgumentType() { return argumentType; }
/** How the result should be highlighted */
- private Highlight highlight= null;
+ private Highlight highlight = null;
/** The terms to highlight in the result (only used by BoldingSearcher, may be removed later). */
private List<IndexedItem> boldingData = null;
diff --git a/container-search/src/main/java/com/yahoo/search/searchchain/Execution.java b/container-search/src/main/java/com/yahoo/search/searchchain/Execution.java
index 0574fc660c3..fac0d35d509 100644
--- a/container-search/src/main/java/com/yahoo/search/searchchain/Execution.java
+++ b/container-search/src/main/java/com/yahoo/search/searchchain/Execution.java
@@ -523,7 +523,6 @@ public class Execution extends com.yahoo.processing.execution.Execution {
*
* @param result the result to fill
*/
- @SuppressWarnings("deprecation")
public void fillAttributes(Result result) {
fill(result, ATTRIBUTEPREFETCH);
}
diff --git a/container-search/src/main/java/com/yahoo/search/searchers/ValidateNearestNeighborSearcher.java b/container-search/src/main/java/com/yahoo/search/searchers/ValidateNearestNeighborSearcher.java
index ca9d17cb656..d22dd2e6af6 100644
--- a/container-search/src/main/java/com/yahoo/search/searchers/ValidateNearestNeighborSearcher.java
+++ b/container-search/src/main/java/com/yahoo/search/searchers/ValidateNearestNeighborSearcher.java
@@ -19,6 +19,7 @@ import com.yahoo.tensor.TensorType;
import com.yahoo.vespa.config.search.AttributesConfig;
import com.yahoo.yolean.chain.Before;
+import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -32,15 +33,17 @@ import java.util.Optional;
@Before(GroupingExecutor.COMPONENT_NAME) // Must happen before query.prepare()
public class ValidateNearestNeighborSearcher extends Searcher {
- private final Map<String, TensorType> validAttributes = new HashMap<>();
+ private final Map<String, List<TensorType>> validAttributes = new HashMap<>();
public ValidateNearestNeighborSearcher(AttributesConfig attributesConfig) {
for (AttributesConfig.Attribute a : attributesConfig.attribute()) {
- TensorType tt = null;
+ if (! validAttributes.containsKey(a.name())) {
+ validAttributes.put(a.name(), new ArrayList<TensorType>());
+ }
if (a.datatype() == AttributesConfig.Attribute.Datatype.TENSOR) {
- tt = TensorType.fromSpec(a.tensortype());
+ TensorType tt = TensorType.fromSpec(a.tensortype());
+ validAttributes.get(a.name()).add(tt);
}
- validAttributes.put(a.name(), tt);
}
}
@@ -60,10 +63,10 @@ public class ValidateNearestNeighborSearcher extends Searcher {
public Optional<ErrorMessage> errorMessage = Optional.empty();
- private final Map<String, TensorType> validAttributes;
+ private final Map<String, List<TensorType>> validAttributes;
private final Query query;
- public NNVisitor(RankProperties rankProperties, Map<String, TensorType> validAttributes, Query query) {
+ public NNVisitor(RankProperties rankProperties, Map<String, List<TensorType>> validAttributes, Query query) {
this.validAttributes = validAttributes;
this.query = query;
}
@@ -101,17 +104,26 @@ public class ValidateNearestNeighborSearcher extends Searcher {
if (queryTensor.isEmpty())
return item + " requires a tensor rank feature " + queryFeatureName + " but this is not present";
- if ( ! validAttributes.containsKey(item.getIndexName()))
+ if ( ! validAttributes.containsKey(item.getIndexName())) {
return item + " field is not an attribute";
- TensorType fieldType = validAttributes.get(item.getIndexName());
- if (fieldType == null) return item + " field is not a tensor";
- if ( ! isDenseVector(fieldType))
- return item + " tensor type " + fieldType + " is not a dense vector";
-
- if ( ! isCompatible(fieldType, queryTensor.get().type()))
- return item + " field type " + fieldType + " does not match query type " + queryTensor.get().type();
-
- return null;
+ }
+ List<TensorType> allTensorTypes = validAttributes.get(item.getIndexName());
+ for (TensorType fieldType : allTensorTypes) {
+ if (isDenseVector(fieldType) && isCompatible(fieldType, queryTensor.get().type())) {
+ return null;
+ }
+ }
+ for (TensorType fieldType : allTensorTypes) {
+ if (isDenseVector(fieldType) && ! isCompatible(fieldType, queryTensor.get().type())) {
+ return item + " field type " + fieldType + " does not match query type " + queryTensor.get().type();
+ }
+ }
+ for (TensorType fieldType : allTensorTypes) {
+ if (! isDenseVector(fieldType)) {
+ return item + " tensor type " + fieldType + " is not a dense vector";
+ }
+ }
+ return item + " field is not a tensor";
}
@Override
diff --git a/container-search/src/test/java/com/yahoo/search/dispatch/MockSearchCluster.java b/container-search/src/test/java/com/yahoo/search/dispatch/MockSearchCluster.java
index 9d96b2302d7..8db54218e56 100644
--- a/container-search/src/test/java/com/yahoo/search/dispatch/MockSearchCluster.java
+++ b/container-search/src/test/java/com/yahoo/search/dispatch/MockSearchCluster.java
@@ -119,7 +119,6 @@ public class MockSearchCluster extends SearchCluster {
DispatchConfig.Builder builder = new DispatchConfig.Builder();
builder.minActivedocsPercentage(88.0);
builder.minGroupCoverage(99.0);
- builder.maxNodesDownPerGroup(0);
builder.minSearchCoverage(minSearchCoverage);
builder.distributionPolicy(DispatchConfig.DistributionPolicy.Enum.ROUNDROBIN);
if (minSearchCoverage < 100.0) {
diff --git a/container-search/src/test/java/com/yahoo/search/searchers/ValidateNearestNeighborTestCase.java b/container-search/src/test/java/com/yahoo/search/searchers/ValidateNearestNeighborTestCase.java
index 72956b5b6eb..e5ed6f89fd4 100644
--- a/container-search/src/test/java/com/yahoo/search/searchers/ValidateNearestNeighborTestCase.java
+++ b/container-search/src/test/java/com/yahoo/search/searchers/ValidateNearestNeighborTestCase.java
@@ -51,10 +51,20 @@ public class ValidateNearestNeighborTestCase {
"attribute[3].tensortype tensor(x{})\n" +
"attribute[4].name matrix\n" +
"attribute[4].datatype TENSOR\n" +
- "attribute[4].tensortype tensor(x[3],y[1])\n"
+ "attribute[4].tensortype tensor(x[3],y[1])\n" +
+ "attribute[5].name threetypes\n" +
+ "attribute[5].datatype TENSOR\n" +
+ "attribute[5].tensortype tensor(x[42])\n" +
+ "attribute[6].name threetypes\n" +
+ "attribute[6].datatype TENSOR\n" +
+ "attribute[6].tensortype tensor(x[3])\n" +
+ "attribute[7].name threetypes\n" +
+ "attribute[7].datatype TENSOR\n" +
+ "attribute[7].tensortype tensor(x{})\n"
)));
}
+ private static TensorType tt_dense_dvector_42 = TensorType.fromSpec("tensor(x[42])");
private static TensorType tt_dense_dvector_3 = TensorType.fromSpec("tensor(x[3])");
private static TensorType tt_dense_dvector_2 = TensorType.fromSpec("tensor(x[2])");
private static TensorType tt_dense_fvector_3 = TensorType.fromSpec("tensor<float>(x[3])");
@@ -186,6 +196,20 @@ public class ValidateNearestNeighborTestCase {
}
@Test
+ public void testSeveralAttributesWithSameName() {
+ String q = makeQuery("threetypes", "qvector");
+ Tensor t1 = makeTensor(tt_dense_fvector_3);
+ Result r1 = doSearch(searcher, q, t1);
+ assertNull(r1.hits().getError());
+ Tensor t2 = makeTensor(tt_dense_dvector_42, 42);
+ Result r2 = doSearch(searcher, q, t2);
+ assertNull(r2.hits().getError());
+ Tensor t3 = makeTensor(tt_dense_dvector_2, 2);
+ Result r3 = doSearch(searcher, q, t3);
+ assertErrMsg(desc("threetypes", "qvector", 1, "field type tensor(x[42]) does not match query type tensor(x[2])"), r3);
+ }
+
+ @Test
public void testSparseTensor() {
String q = makeQuery("sparse", "qvector");
Tensor t = makeTensor(tt_sparse_vector_x);
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/ServiceRegistry.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/ServiceRegistry.java
index 0b17428296c..fdd66c037d9 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/ServiceRegistry.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/ServiceRegistry.java
@@ -16,6 +16,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.deployment.ArtifactRepo
import com.yahoo.vespa.hosted.controller.api.integration.deployment.TesterCloud;
import com.yahoo.vespa.hosted.controller.api.integration.dns.NameService;
import com.yahoo.vespa.hosted.controller.api.integration.entity.EntityService;
+import com.yahoo.vespa.hosted.controller.api.integration.horizon.HorizonClient;
import com.yahoo.vespa.hosted.controller.api.integration.organization.ContactRetriever;
import com.yahoo.vespa.hosted.controller.api.integration.organization.DeploymentIssues;
import com.yahoo.vespa.hosted.controller.api.integration.organization.IssueHandler;
@@ -96,4 +97,6 @@ public interface ServiceRegistry {
ChangeRequestClient changeRequestClient();
AccessControlService accessControlService();
+
+ HorizonClient horizonClient();
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/JobType.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/JobType.java
index 35b1a238325..9bcb80f24ee 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/JobType.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/JobType.java
@@ -91,18 +91,22 @@ public enum JobType {
productionAwsApNortheast1a ("production-aws-ap-northeast-1a",
Map.of(Public, ZoneId.from("prod", "aws-ap-northeast-1a"))),
+ testAwsApNortheast1a ("test-aws-ap-northeast-1a",
+ Map.of(Public, ZoneId.from("prod", "aws-ap-northeast-1a")), true),
+
productionAwsEuWest1a ("production-aws-eu-west-1a",
Map.of(Public, ZoneId.from("prod", "aws-eu-west-1a"))),
- testAwsApNortheast1a ("test-aws-ap-northeast-1a",
- Map.of(Public, ZoneId.from("prod", "aws-ap-northeast-1a")), true),
+ testAwsEuWest1a ("test-aws-eu-west-1a",
+ Map.of(Public, ZoneId.from("prod", "aws-eu-west-1a")), true),
productionAwsUsWest2a ("production-aws-us-west-2a",
Map.of(main, ZoneId.from("prod", "aws-us-west-2a"),
Public, ZoneId.from("prod", "aws-us-west-2a"))),
testAwsUsWest2a ("test-aws-us-west-2a",
- Map.of(main, ZoneId.from("prod" , "aws-us-west-2a")), true),
+ Map.of(main, ZoneId.from("prod", "aws-us-west-2a"),
+ Public, ZoneId.from("prod", "aws-us-west-2a")), true),
productionAwsUsEast1b ("production-aws-us-east-1b",
Map.of(main, ZoneId.from("prod" , "aws-us-east-1b"))),
@@ -132,6 +136,9 @@ public enum JobType {
productionCdUsCentral2 ("production-cd-us-central-2",
Map.of(cd , ZoneId.from("prod" , "cd-us-central-2"))),
+ testCdUsCentral2 ("test-cd-us-central-2",
+ Map.of(cd , ZoneId.from("prod" , "cd-us-central-2")), true),
+
productionCdUsWest1 ("production-cd-us-west-1",
Map.of(cd , ZoneId.from("prod" , "cd-us-west-1"))),
@@ -155,7 +162,7 @@ public enum JobType {
Map.of(main, ZoneId.from("perf" , "us-east-3")));
private final String jobName;
- private final Map<SystemName, ZoneId> zones;
+ final Map<SystemName, ZoneId> zones;
private final boolean isProductionTest;
JobType(String jobName, Map<SystemName, ZoneId> zones, boolean isProductionTest) {
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/horizon/HorizonClient.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/horizon/HorizonClient.java
new file mode 100644
index 00000000000..554d3e5b7fa
--- /dev/null
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/horizon/HorizonClient.java
@@ -0,0 +1,25 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.api.integration.horizon;
+
+/**
+ * @author olaa
+ */
+public interface HorizonClient {
+
+ HorizonResponse getMetrics(byte[] query);
+
+ HorizonResponse getUser();
+
+ HorizonResponse getDashboard(String dashboardId);
+
+ HorizonResponse getFavorite(String userId);
+
+ HorizonResponse getTopFolders();
+
+ HorizonResponse getRecent(String userId);
+
+ HorizonResponse getClipboard(String dashboardId);
+
+ HorizonResponse getMetaData(byte[] query);
+
+}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/horizon/HorizonResponse.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/horizon/HorizonResponse.java
new file mode 100644
index 00000000000..5447b8c3b0b
--- /dev/null
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/horizon/HorizonResponse.java
@@ -0,0 +1,36 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.api.integration.horizon;
+
+import java.io.IOException;
+import java.io.InputStream;
+
+/**
+ * @author valerijf
+ */
+public class HorizonResponse implements AutoCloseable {
+
+ private final int code;
+ private final InputStream inputStream;
+
+ public HorizonResponse(int code, InputStream inputStream) {
+ this.code = code;
+ this.inputStream = inputStream;
+ }
+
+ public int code() {
+ return code;
+ }
+
+ public InputStream inputStream() {
+ return inputStream;
+ }
+
+ public static HorizonResponse empty() {
+ return new HorizonResponse(200, InputStream.nullInputStream());
+ }
+
+ @Override
+ public void close() throws IOException {
+ inputStream.close();
+ }
+}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/horizon/MockHorizonClient.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/horizon/MockHorizonClient.java
new file mode 100644
index 00000000000..13a8c2ec079
--- /dev/null
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/horizon/MockHorizonClient.java
@@ -0,0 +1,48 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.api.integration.horizon;
+
+/**
+ * @author olaa
+ */
+public class MockHorizonClient implements HorizonClient {
+
+ @Override
+ public HorizonResponse getMetrics(byte[] query) {
+ return HorizonResponse.empty();
+ }
+
+ @Override
+ public HorizonResponse getUser() {
+ return HorizonResponse.empty();
+ }
+
+ @Override
+ public HorizonResponse getDashboard(String dashboardId) {
+ return HorizonResponse.empty();
+ }
+
+ @Override
+ public HorizonResponse getFavorite(String userId) {
+ return HorizonResponse.empty();
+ }
+
+ @Override
+ public HorizonResponse getTopFolders() {
+ return HorizonResponse.empty();
+ }
+
+ @Override
+ public HorizonResponse getRecent(String userId) {
+ return HorizonResponse.empty();
+ }
+
+ @Override
+ public HorizonResponse getClipboard(String dashboardId) {
+ return HorizonResponse.empty();
+ }
+
+ @Override
+ public HorizonResponse getMetaData(byte[] query) {
+ return HorizonResponse.empty();
+ }
+}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/horizon/package-info.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/horizon/package-info.java
new file mode 100644
index 00000000000..80bb635089c
--- /dev/null
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/horizon/package-info.java
@@ -0,0 +1,5 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+@ExportPackage
+package com.yahoo.vespa.hosted.controller.api.integration.horizon;
+
+import com.yahoo.osgi.annotation.ExportPackage; \ No newline at end of file
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/NodeHistory.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/NodeHistory.java
index 624e4c61662..cf40ac00d64 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/NodeHistory.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/NodeHistory.java
@@ -17,7 +17,7 @@ public class NodeHistory {
@JsonProperty("at")
public Long at;
@JsonProperty("agent")
- public Agent agent;
+ public String agent;
@JsonProperty("event")
public String event;
@@ -25,7 +25,7 @@ public class NodeHistory {
return at;
}
- public Agent getAgent() {
+ public String getAgent() {
return agent;
}
@@ -33,24 +33,4 @@ public class NodeHistory {
return event;
}
- public enum Agent {
- operator,
- application,
- system,
- DirtyExpirer,
- DynamicProvisioningMaintainer,
- FailedExpirer,
- InactiveExpirer,
- NodeFailer,
- NodeHealthTracker,
- ProvisionedExpirer,
- Rebalancer,
- ReservationExpirer,
- RetiringUpgrader,
- RebuildingOsUpgrader,
- SpareCapacityMaintainer,
- SwitchRebalancer,
- HostEncrypter,
- }
-
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/user/User.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/user/User.java
index dcce25bda95..d3ed804e546 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/user/User.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/user/User.java
@@ -1,6 +1,7 @@
// Copyright 2020 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.controller.api.integration.user;
+import java.time.LocalDate;
import java.util.Objects;
/**
@@ -9,17 +10,34 @@ import java.util.Objects;
public class User {
public static final String ATTRIBUTE_NAME = "vespa.user.attributes";
+ public static final LocalDate NO_DATE = LocalDate.EPOCH;
private final String email;
private final String name;
private final String nickname;
private final String picture;
+ private final boolean isVerified;
+ private final int loginCount;
+ private final LocalDate lastLogin;
public User(String email, String name, String nickname, String picture) {
this.email = Objects.requireNonNull(email);
this.name = name;
this.nickname = nickname;
this.picture = picture;
+ this.isVerified = false;
+ this.loginCount = -1;
+ this.lastLogin = NO_DATE;
+ }
+
+ public User(String email, String name, String nickname, String picture, boolean isVerified, int loginCount, LocalDate lastLogin) {
+ this.email = Objects.requireNonNull(email);
+ this.name = name;
+ this.nickname = nickname;
+ this.picture = picture;
+ this.isVerified = isVerified;
+ this.loginCount = loginCount;
+ this.lastLogin = Objects.requireNonNull(lastLogin);
}
public String name() {
@@ -38,6 +56,12 @@ public class User {
return picture;
}
+ public LocalDate lastLogin() { return lastLogin; }
+
+ public boolean isVerified() { return isVerified; }
+
+ public int loginCount() { return loginCount; }
+
@Override
public boolean equals(Object o) {
if (this == o) return true;
@@ -46,11 +70,14 @@ public class User {
return Objects.equals(name, user.name) &&
Objects.equals(email, user.email) &&
Objects.equals(nickname, user.nickname) &&
- Objects.equals(picture, user.picture);
+ Objects.equals(picture, user.picture) &&
+ Objects.equals(lastLogin, user.lastLogin) &&
+ loginCount == user.loginCount &&
+ isVerified == user.isVerified;
}
@Override
public int hashCode() {
- return Objects.hash(name, email, nickname, picture);
+ return Objects.hash(name, email, nickname, picture, lastLogin, loginCount, isVerified);
}
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/PathGroup.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/PathGroup.java
index 5e5dfcd6aed..327175c19ed 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/PathGroup.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/PathGroup.java
@@ -202,10 +202,6 @@ enum PathGroup {
classifiedTenantInfo("/application/v4/",
"/application/v4/tenant/"),
- /** Paths which contain (not very strictly) classified information about, e.g., customers. */
- classifiedInfo("/",
- "/d/{*}"),
-
/** Paths providing public information. */
publicInfo("/user/v1/user", // Information about who you are.
"/badge/v1/{*}", // Badges for deployment jobs.
@@ -229,7 +225,10 @@ enum PathGroup {
endpointCertificateRequestInfo("/certificateRequests/"),
/** Path used for secret store management */
- secretStore(Matcher.tenant, "/application/v4/tenant/{tenant}/secret-store/{*}");
+ secretStore(Matcher.tenant, "/application/v4/tenant/{tenant}/secret-store/{*}"),
+
+ /** Paths used to proxy Horizon metric requests */
+ horizonProxy("/horizon/v1/{*}");
final List<String> pathSpecs;
final List<Matcher> matchers;
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/Policy.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/Policy.java
index ee5f1d806ab..eae5ad5b685 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/Policy.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/Policy.java
@@ -201,7 +201,11 @@ enum Policy {
/** Secret store operations */
secretStoreOperations(Privilege.grant(Action.all())
.on(PathGroup.secretStore)
- .in(SystemName.PublicCd, SystemName.Public));
+ .in(SystemName.PublicCd, SystemName.Public)),
+
+ horizonProxyOperations(Privilege.grant(Action.all())
+ .on(PathGroup.horizonProxy)
+ .in(SystemName.PublicCd));
private final Set<Privilege> privileges;
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/RoleDefinition.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/RoleDefinition.java
index a0ee0fe3548..3b0e7222cf1 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/RoleDefinition.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/RoleDefinition.java
@@ -44,7 +44,8 @@ public enum RoleDefinition {
Policy.publicRead,
Policy.paymentInstrumentRead,
Policy.paymentInstrumentDelete,
- Policy.billingInformationRead),
+ Policy.billingInformationRead,
+ Policy.horizonProxyOperations),
/** User — the dev.ops. role for normal Vespa tenant users */
developer(Policy.applicationCreate,
diff --git a/controller-api/src/test/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/JobTypeTest.java b/controller-api/src/test/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/JobTypeTest.java
new file mode 100644
index 00000000000..22486875a0b
--- /dev/null
+++ b/controller-api/src/test/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/JobTypeTest.java
@@ -0,0 +1,28 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.api.integration.deployment;
+
+import org.junit.Test;
+
+import static org.junit.Assert.assertTrue;
+
+/**
+ * @author jonmv
+ */
+public class JobTypeTest {
+
+ @Test
+ public void test() {
+ for (JobType type : JobType.values()) {
+ if (type.isProduction()) {
+ boolean match = false;
+ for (JobType other : JobType.values())
+ match |= type != other
+ && type.isTest() == other.isDeployment()
+ && type.zones.equals(other.zones);
+
+ assertTrue(type + " should have matching job", match);
+ }
+ }
+ }
+
+}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java
index cb3c84f5bd1..c867b97b544 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java
@@ -90,7 +90,6 @@ import java.util.function.Consumer;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.stream.Collectors;
-import java.util.stream.Stream;
import static com.yahoo.vespa.hosted.controller.api.integration.configserver.Node.State.active;
import static com.yahoo.vespa.hosted.controller.api.integration.configserver.Node.State.reserved;
@@ -443,6 +442,9 @@ public class ApplicationController {
if (applicationPackage.deploymentSpec().requireInstance(instance).concerns(Environment.prod))
application = controller.routing().assignRotations(application, instance);
+ // Validate new deployment spec thoroughly before storing it.
+ controller.jobController().deploymentStatus(application.get());
+
store(application);
return application;
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java
index 1b1df28c201..990549b6d8c 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java
@@ -72,7 +72,6 @@ public class RoutingController {
private final RoutingPolicies routingPolicies;
private final RotationRepository rotationRepository;
private final BooleanFlag hideSharedRoutingEndpoint;
- private final BooleanFlag vespaAppDomainInCertificate;
public RoutingController(Controller controller, RotationsConfig rotationsConfig) {
this.controller = Objects.requireNonNull(controller, "controller must be non-null");
@@ -80,7 +79,6 @@ public class RoutingController {
this.rotationRepository = new RotationRepository(rotationsConfig, controller.applications(),
controller.curator());
this.hideSharedRoutingEndpoint = Flags.HIDE_SHARED_ROUTING_ENDPOINT.bindTo(controller.flagSource());
- this.vespaAppDomainInCertificate = Flags.VESPA_APP_DOMAIN_IN_CERTIFICATE.bindTo(controller.flagSource());
}
public RoutingPolicies policies() {
@@ -180,7 +178,7 @@ public class RoutingController {
builder = builder.routingMethod(RoutingMethod.exclusive)
.on(Port.tls());
Endpoint endpoint = builder.in(controller.system());
- if (controller.system().isPublic() && vespaAppDomainInCertificate.with(FetchVector.Dimension.APPLICATION_ID, deployment.applicationId().serializedForm()).value()) {
+ if (controller.system().isPublic()) {
Endpoint legacyEndpoint = builder.legacy().in(controller.system());
endpointDnsNames.add(legacyEndpoint.dnsName());
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java
index c7270b6c426..10f96ff13cd 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java
@@ -340,7 +340,10 @@ public class InternalStepRunner implements StepRunner {
.map(since -> since.isBefore(controller.clock().instant().minus(timeouts.noNodesDown())))
.orElse(false)) {
if (summary.needPlatformUpgrade() > 0 || summary.needReboot() > 0 || summary.needRestart() > 0)
- failureReason = "No nodes allowed to suspend to progress installation for " + timeouts.noNodesDown().toMinutes() + " minutes.";
+ failureReason = "Timed out after waiting " + timeouts.noNodesDown().toMinutes() + " minutes for " +
+ "nodes to suspend. This is normal if the cluster is excessively busy. " +
+ "Nodes will continue to attempt suspension to progress installation independently of " +
+ "this run.";
else
failureReason = "Nodes not able to start with new application package.";
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgradeScheduler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgradeScheduler.java
index 666d1c3b23a..a02937a03e3 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgradeScheduler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgradeScheduler.java
@@ -43,8 +43,10 @@ public class OsUpgradeScheduler extends ControllerMaintainer {
@Override
protected double maintain() {
+ Instant now = controller().clock().instant();
+ if (!canTriggerAt(now)) return 1.0;
for (var cloud : supportedClouds()) {
- Optional<Version> newTarget = newTargetIn(cloud);
+ Optional<Version> newTarget = newTargetIn(cloud, now);
if (newTarget.isEmpty()) continue;
controller().upgradeOsIn(cloud, newTarget.get(), upgradeBudget(), false);
}
@@ -52,14 +54,12 @@ public class OsUpgradeScheduler extends ControllerMaintainer {
}
/** Returns the new target version for given cloud, if any */
- private Optional<Version> newTargetIn(CloudName cloud) {
+ private Optional<Version> newTargetIn(CloudName cloud, Instant now) {
Optional<Version> currentTarget = controller().osVersionTarget(cloud)
.map(OsVersionTarget::osVersion)
.map(OsVersion::version);
if (currentTarget.isEmpty()) return Optional.empty();
if (!hasExpired(currentTarget.get())) return Optional.empty();
-
- Instant now = controller().clock().instant();
String qualifier = LocalDate.ofInstant(now.minus(AVAILABILITY_INTERVAL), ZoneOffset.UTC)
.format(VERSION_DATE_PATTERN);
return Optional.of(new Version(currentTarget.get().getMajor(),
@@ -88,6 +88,14 @@ public class OsUpgradeScheduler extends ControllerMaintainer {
.collect(Collectors.toUnmodifiableSet());
}
+ private boolean canTriggerAt(Instant instant) {
+ int hourOfDay = instant.atZone(ZoneOffset.UTC).getHour();
+ int dayOfWeek = instant.atZone(ZoneOffset.UTC).getDayOfWeek().getValue();
+ // Upgrade can only be scheduled between 07:00 and 12:59 UTC, Monday-Thursday
+ return hourOfDay >= 7 && hourOfDay <= 12 &&
+ dayOfWeek < 5;
+ }
+
private Duration upgradeBudget() {
return controller().system().isCd() ? Duration.ofHours(1) : Duration.ofDays(14);
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgrader.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgrader.java
index e71fcf12b23..203c8187c2c 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgrader.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgrader.java
@@ -3,7 +3,6 @@ package com.yahoo.vespa.hosted.controller.maintenance;
import com.yahoo.component.Version;
import com.yahoo.config.provision.CloudName;
-import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.zone.ZoneApi;
import com.yahoo.vespa.hosted.controller.Controller;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.Node;
@@ -89,12 +88,18 @@ public class OsUpgrader extends InfrastructureUpgrader<OsVersionTarget> {
/** Returns the available upgrade budget for given zone */
private Duration zoneBudgetOf(Duration totalBudget, ZoneApi zone) {
- if (!zone.getEnvironment().isProduction()) return Duration.ZERO;
- long consecutiveProductionZones = upgradePolicy.asList().stream()
- .filter(parallelZones -> parallelZones.stream().map(ZoneApi::getEnvironment)
- .anyMatch(Environment::isProduction))
- .count();
- return totalBudget.dividedBy(consecutiveProductionZones);
+ if (!spendBudget(zone)) return Duration.ZERO;
+ long consecutiveZones = upgradePolicy.asList().stream()
+ .filter(parallelZones -> parallelZones.stream().anyMatch(this::spendBudget))
+ .count();
+ return totalBudget.dividedBy(consecutiveZones);
+ }
+
+ /** Returns whether to spend upgrade budget on given zone */
+ private boolean spendBudget(ZoneApi zone) {
+ if (!zone.getEnvironment().isProduction()) return false;
+ if (controller().zoneRegistry().systemZone().getVirtualId().equals(zone.getVirtualId())) return false; // Controller zone
+ return true;
}
/** Returns whether node is in a state where it can be upgraded */
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/CuratorDb.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/CuratorDb.java
index 3736d18a01c..a90f10401aa 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/CuratorDb.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/CuratorDb.java
@@ -632,7 +632,7 @@ public class CuratorDb {
/** Take lock before reading before writing */
public void writeSupportAccess(DeploymentId deploymentId, SupportAccess supportAccess) {
- curator.set(supportAccessPath(deploymentId), asJson(SupportAccessSerializer.toSlime(supportAccess, true, Optional.empty())));
+ curator.set(supportAccessPath(deploymentId), asJson(SupportAccessSerializer.toSlime(supportAccess)));
}
// -------------- Paths ---------------------------------------------------
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/SupportAccessSerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/SupportAccessSerializer.java
index 74e2bfbb471..33596fce2bd 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/SupportAccessSerializer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/SupportAccessSerializer.java
@@ -45,33 +45,65 @@ public class SupportAccessSerializer {
private static final String certificateFieldName = "certificate";
- public static Slime toSlime(SupportAccess supportAccess, boolean includeCertificates, Optional<Instant> withCurrentState) {
+ public static Slime toSlime(SupportAccess supportAccess) {
Slime slime = new Slime();
Cursor root = slime.setObject();
- withCurrentState.ifPresent(now -> {
- Cursor status = root.setObject(stateFieldName);
- SupportAccess.CurrentStatus currentState = supportAccess.currentStatus(now);
- status.setString(supportAccessFieldName, currentState.state().name());
- if (currentState.state() == SupportAccess.State.ALLOWED) {
- status.setString(untilFieldName, serializeInstant(currentState.allowedUntil().orElseThrow()));
- status.setString(byFieldName, currentState.allowedBy().orElseThrow());
- }
- }
- );
-
- Cursor history = root.setArray(historyFieldName);
- for (SupportAccessChange change : supportAccess.changeHistory()) {
- Cursor historyObject = history.addObject();
+ serializeHistoricEvents(root, supportAccess.changeHistory(), List.of());
+ serializeGrants(root, supportAccess.grantHistory(), true);
+
+ return slime;
+ }
+
+ public static Slime serializeCurrentState(SupportAccess supportAccess, Instant currentTime) {
+ Slime slime = new Slime();
+ Cursor root = slime.setObject();
+
+ Cursor status = root.setObject(stateFieldName);
+ SupportAccess.CurrentStatus currentState = supportAccess.currentStatus(currentTime);
+ status.setString(supportAccessFieldName, currentState.state().name());
+ if (currentState.state() == SupportAccess.State.ALLOWED) {
+ status.setString(untilFieldName, serializeInstant(currentState.allowedUntil().orElseThrow()));
+ status.setString(byFieldName, currentState.allowedBy().orElseThrow());
+ }
+
+ List<SupportAccessGrant> inactiveGrants = supportAccess.grantHistory().stream()
+ .filter(grant -> currentTime.isAfter(grant.certificate().getNotAfter().toInstant()))
+ .collect(Collectors.toList());
+
+ serializeHistoricEvents(root, supportAccess.changeHistory(), inactiveGrants);
+
+ // Active grants should show up in the grant section
+ List<SupportAccessGrant> activeGrants = supportAccess.grantHistory().stream()
+ .filter(grant -> currentTime.isBefore(grant.certificate().getNotAfter().toInstant()))
+ .collect(Collectors.toList());
+ serializeGrants(root, activeGrants, false);
+ return slime;
+ }
+
+ private static void serializeHistoricEvents(Cursor root, List<SupportAccessChange> changeEvents, List<SupportAccessGrant> historicGrants) {
+ Cursor historyRoot = root.setArray(historyFieldName);
+ for (SupportAccessChange change : changeEvents) {
+ Cursor historyObject = historyRoot.addObject();
historyObject.setString(stateFieldName, change.accessAllowedUntil().isPresent() ? allowedStateName : disallowedStateName);
historyObject.setString(atFieldName, serializeInstant(change.changeTime()));
change.accessAllowedUntil().ifPresent(allowedUntil -> historyObject.setString(untilFieldName, serializeInstant(allowedUntil)));
historyObject.setString(byFieldName, change.madeBy());
}
- Cursor grants = root.setArray(grantFieldName);
- for (SupportAccessGrant grant : supportAccess.grantHistory()) {
- Cursor grantObject = grants.addObject();
+ for (SupportAccessGrant grant : historicGrants) {
+ Cursor historyObject = historyRoot.addObject();
+ historyObject.setString(stateFieldName, "grant");
+ historyObject.setString(atFieldName, serializeInstant(grant.certificate().getNotBefore().toInstant()));
+ historyObject.setString(untilFieldName, serializeInstant(grant.certificate().getNotAfter().toInstant()));
+ historyObject.setString(byFieldName, grant.requestor());
+ }
+ }
+
+ private static void serializeGrants(Cursor root, List<SupportAccessGrant> grants, boolean includeCertificates) {
+ Cursor grantsRoot = root.setArray(grantFieldName);
+ for (SupportAccessGrant grant : grants) {
+ Cursor grantObject = grantsRoot.addObject();
grantObject.setString(requestorFieldName, grant.requestor());
if (includeCertificates) {
grantObject.setString(certificateFieldName, X509CertificateUtils.toPem(grant.certificate()));
@@ -80,7 +112,6 @@ public class SupportAccessSerializer {
grantObject.setString(notAfterFieldName, serializeInstant(grant.certificate().getNotAfter().toInstant()));
}
- return slime;
}
private static String serializeInstant(Instant i) {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
index d1cbe8e14b4..017da94facc 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
@@ -31,7 +31,6 @@ import com.yahoo.restapi.Path;
import com.yahoo.restapi.ResourceResponse;
import com.yahoo.restapi.SlimeJsonResponse;
import com.yahoo.security.KeyUtils;
-import com.yahoo.security.X509CertificateUtils;
import com.yahoo.slime.Cursor;
import com.yahoo.slime.Inspector;
import com.yahoo.slime.JsonParseException;
@@ -123,7 +122,6 @@ import java.net.URISyntaxException;
import java.security.DigestInputStream;
import java.security.Principal;
import java.security.PublicKey;
-import java.security.cert.X509Certificate;
import java.time.DayOfWeek;
import java.time.Duration;
import java.time.Instant;
@@ -982,7 +980,7 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler {
private HttpResponse supportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
SupportAccess supportAccess = controller.supportAccess().forDeployment(deployment);
- return new SlimeJsonResponse(SupportAccessSerializer.toSlime(supportAccess, false, Optional.of(controller.clock().instant())));
+ return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(supportAccess, controller.clock().instant()));
}
// TODO support access: only let tenants (not operators!) allow access
@@ -992,14 +990,14 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler {
Principal principal = requireUserPrincipal(request);
Instant now = controller.clock().instant();
SupportAccess allowed = controller.supportAccess().allow(deployment, now.plus(7, ChronoUnit.DAYS), principal.getName());
- return new SlimeJsonResponse(SupportAccessSerializer.toSlime(allowed, false, Optional.of(now)));
+ return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(allowed, now));
}
private HttpResponse disallowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Principal principal = requireUserPrincipal(request);
SupportAccess disallowed = controller.supportAccess().disallow(deployment, principal.getName());
- return new SlimeJsonResponse(SupportAccessSerializer.toSlime(disallowed, false, Optional.of(controller.clock().instant())));
+ return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(disallowed, controller.clock().instant()));
}
private HttpResponse metrics(String tenantName, String applicationName, String instanceName, String environment, String region) {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/horizon/HorizonApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/horizon/HorizonApiHandler.java
new file mode 100644
index 00000000000..6f5b1f30592
--- /dev/null
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/horizon/HorizonApiHandler.java
@@ -0,0 +1,121 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.restapi.horizon;
+
+import com.google.inject.Inject;
+import com.yahoo.config.provision.SystemName;
+import com.yahoo.container.jdisc.HttpRequest;
+import com.yahoo.container.jdisc.HttpResponse;
+import com.yahoo.container.jdisc.LoggingRequestHandler;
+import com.yahoo.restapi.ErrorResponse;
+import com.yahoo.restapi.Path;
+import com.yahoo.vespa.hosted.controller.Controller;
+import com.yahoo.vespa.hosted.controller.api.integration.horizon.HorizonClient;
+import com.yahoo.vespa.hosted.controller.api.integration.horizon.HorizonResponse;
+import com.yahoo.vespa.hosted.controller.api.role.SecurityContext;
+import com.yahoo.yolean.Exceptions;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.Optional;
+import java.util.logging.Level;
+
+/**
+ * Proxies metrics requests from Horizon UI
+ *
+ * @author valerijf
+ */
+public class HorizonApiHandler extends LoggingRequestHandler {
+
+ private final SystemName systemName;
+ private final HorizonClient client;
+
+ @Inject
+ public HorizonApiHandler(LoggingRequestHandler.Context parentCtx, Controller controller) {
+ super(parentCtx);
+ this.systemName = controller.system();
+ this.client = controller.serviceRegistry().horizonClient();
+ }
+
+ @Override
+ public HttpResponse handle(HttpRequest request) {
+ try {
+ switch (request.getMethod()) {
+ case GET: return get(request);
+ case POST: return post(request);
+ case PUT: return put(request);
+ default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
+ }
+ }
+ catch (IllegalArgumentException e) {
+ return ErrorResponse.badRequest(Exceptions.toMessageString(e));
+ }
+ catch (RuntimeException e) {
+ log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
+ return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
+ }
+ }
+
+ private HttpResponse get(HttpRequest request) {
+ Path path = new Path(request.getUri());
+ if (path.matches("/horizon/v1/config/dashboard/topFolders")) return new JsonInputStreamResponse(client.getTopFolders());
+ if (path.matches("/horizon/v1/config/dashboard/file/{id}")) return new JsonInputStreamResponse(client.getDashboard(path.get("id")));
+ if (path.matches("/horizon/v1/config/dashboard/favorite")) return new JsonInputStreamResponse(client.getFavorite(request.getProperty("user")));
+ if (path.matches("/horizon/v1/config/dashboard/recent")) return new JsonInputStreamResponse(client.getRecent(request.getProperty("user")));
+ return ErrorResponse.notFoundError("Nothing at " + path);
+ }
+
+ private HttpResponse post(HttpRequest request) {
+ Path path = new Path(request.getUri());
+ if (path.matches("/horizon/v1/tsdb/api/query/graph")) return tsdbQuery(request, true);
+ if (path.matches("/horizon/v1/meta/search/timeseries")) return tsdbQuery(request, false);
+ return ErrorResponse.notFoundError("Nothing at " + path);
+ }
+
+ private HttpResponse put(HttpRequest request) {
+ Path path = new Path(request.getUri());
+ if (path.matches("/horizon/v1/config/user")) return new JsonInputStreamResponse(client.getUser());
+ return ErrorResponse.notFoundError("Nothing at " + path);
+ }
+
+ private HttpResponse tsdbQuery(HttpRequest request, boolean isMetricQuery) {
+ SecurityContext securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class);
+ try {
+ byte[] data = TsdbQueryRewriter.rewrite(request.getData().readAllBytes(), securityContext.roles(), systemName);
+ return new JsonInputStreamResponse(isMetricQuery ? client.getMetrics(data) : client.getMetaData(data));
+ } catch (TsdbQueryRewriter.UnauthorizedException e) {
+ return ErrorResponse.forbidden("Access denied");
+ } catch (IOException e) {
+ return ErrorResponse.badRequest("Failed to parse request body: " + e.getMessage());
+ }
+ }
+
+ private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> clazz) {
+ return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName))
+ .filter(clazz::isInstance)
+ .map(clazz::cast)
+ .orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request"));
+ }
+
+ private static class JsonInputStreamResponse extends HttpResponse {
+
+ private final HorizonResponse response;
+
+ public JsonInputStreamResponse(HorizonResponse response) {
+ super(response.code());
+ this.response = response;
+ }
+
+ @Override
+ public String getContentType() {
+ return "application/json";
+ }
+
+ @Override
+ public void render(OutputStream outputStream) throws IOException {
+ try (InputStream inputStream = response.inputStream()) {
+ inputStream.transferTo(outputStream);
+ }
+ }
+ }
+}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/horizon/TsdbQueryRewriter.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/horizon/TsdbQueryRewriter.java
new file mode 100644
index 00000000000..e034be46063
--- /dev/null
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/horizon/TsdbQueryRewriter.java
@@ -0,0 +1,112 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.restapi.horizon;
+
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.node.ArrayNode;
+import com.fasterxml.jackson.databind.node.ObjectNode;
+import com.yahoo.config.provision.SystemName;
+import com.yahoo.config.provision.TenantName;
+import com.yahoo.vespa.hosted.controller.api.role.Role;
+import com.yahoo.vespa.hosted.controller.api.role.RoleDefinition;
+import com.yahoo.vespa.hosted.controller.api.role.TenantRole;
+
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.Optional;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+/**
+ * @author valerijf
+ */
+public class TsdbQueryRewriter {
+
+ private static final ObjectMapper mapper = new ObjectMapper();
+ private static final EnumSet<RoleDefinition> operatorRoleDefinitions =
+ EnumSet.of(RoleDefinition.hostedOperator, RoleDefinition.hostedSupporter);
+
+ public static byte[] rewrite(byte[] data, Set<Role> roles, SystemName systemName) throws IOException {
+ boolean operator = roles.stream().map(Role::definition).anyMatch(operatorRoleDefinitions::contains);
+
+ // Anyone with any tenant relation can view metrics for apps within those tenants
+ Set<TenantName> authorizedTenants = roles.stream()
+ .filter(TenantRole.class::isInstance)
+ .map(role -> ((TenantRole) role).tenant())
+ .collect(Collectors.toUnmodifiableSet());
+ if (!operator && authorizedTenants.isEmpty())
+ throw new UnauthorizedException();
+
+ JsonNode root = mapper.readTree(data);
+ requireLegalType(root);
+ getField(root, "executionGraph", ArrayNode.class)
+ .ifPresent(graph -> rewriteQueryGraph(graph, authorizedTenants, operator, systemName));
+ getField(root, "filters", ArrayNode.class)
+ .ifPresent(filters -> rewriteFilters(filters, authorizedTenants, operator, systemName));
+ getField(root, "queries", ArrayNode.class)
+ .ifPresent(graph -> rewriteQueryGraph(graph, authorizedTenants, operator, systemName));
+
+ return mapper.writeValueAsBytes(root);
+ }
+
+ private static void rewriteQueryGraph(ArrayNode executionGraph, Set<TenantName> tenantNames, boolean operator, SystemName systemName) {
+ for (int i = 0; i < executionGraph.size(); i++) {
+ JsonNode execution = executionGraph.get(i);
+
+ // Will be handled by rewriteFilters()
+ if (execution.has("filterId")) continue;
+
+ rewriteFilter((ObjectNode) execution, tenantNames, operator, systemName);
+ }
+ }
+
+ private static void rewriteFilters(ArrayNode filters, Set<TenantName> tenantNames, boolean operator, SystemName systemName) {
+ for (int i = 0; i < filters.size(); i++)
+ rewriteFilter((ObjectNode) filters.get(i), tenantNames, operator, systemName);
+ }
+
+ private static void rewriteFilter(ObjectNode parent, Set<TenantName> tenantNames, boolean operator, SystemName systemName) {
+ ObjectNode prev = ((ObjectNode) parent.get("filter"));
+ ArrayNode filters;
+ // If we dont already have a filter object, or the object that we have is not an AND filter
+ if (prev == null || !"Chain".equals(prev.get("type").asText()) || prev.get("op") != null && !"AND".equals(prev.get("op").asText())) {
+ // Create new filter object
+ filters = parent.putObject("filter")
+ .put("type", "Chain")
+ .put("op", "AND")
+ .putArray("filters");
+
+ // Add the previous filter to the AND expression
+ if (prev != null) filters.add(prev);
+ } else filters = (ArrayNode) prev.get("filters");
+
+ // Make sure we only show metrics in the relevant system
+ ObjectNode systemFilter = filters.addObject();
+ systemFilter.put("type", "TagValueLiteralOr");
+ systemFilter.put("filter", systemName.name().toLowerCase());
+ systemFilter.put("tagKey", "system");
+
+ // Make sure non-operators cannot see metrics outside of their tenants
+ if (!operator) {
+ ObjectNode appFilter = filters.addObject();
+ appFilter.put("type", "TagValueRegex");
+ appFilter.put("filter",
+ tenantNames.stream().map(TenantName::value).sorted().collect(Collectors.joining("|", "^(", ")\\..*")));
+ appFilter.put("tagKey", "applicationId");
+ }
+ }
+
+ private static void requireLegalType(JsonNode root) {
+ Optional.ofNullable(root.get("type"))
+ .map(JsonNode::asText)
+ .filter(type -> !"TAG_KEYS_AND_VALUES".equals(type))
+ .ifPresent(type -> { throw new IllegalArgumentException("Illegal type " + type); });
+ }
+
+ private static <T extends JsonNode> Optional<T> getField(JsonNode object, String fieldName, Class<T> clazz) {
+ return Optional.ofNullable(object.get(fieldName)).filter(clazz::isInstance).map(clazz::cast);
+ }
+
+ static class UnauthorizedException extends RuntimeException { }
+
+}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/user/UserApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/user/UserApiHandler.java
index 6e069b2b5ec..e195401f03a 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/user/UserApiHandler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/user/UserApiHandler.java
@@ -40,6 +40,8 @@ import com.yahoo.vespa.hosted.controller.tenant.Tenant;
import com.yahoo.yolean.Exceptions;
import java.security.PublicKey;
+import java.time.LocalDateTime;
+import java.time.format.DateTimeFormatter;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
@@ -136,12 +138,16 @@ public class UserApiHandler extends LoggingRequestHandler {
RoleDefinition.hostedAccountant);
private HttpResponse userMetadata(HttpRequest request) {
- @SuppressWarnings("unchecked")
- Map<String, String> userAttributes = (Map<String, String>) getAttribute(request, User.ATTRIBUTE_NAME, Map.class);
- User user = new User(userAttributes.get("email"),
- userAttributes.get("name"),
- userAttributes.get("nickname"),
- userAttributes.get("picture"));
+ User user;
+ if (request.getJDiscRequest().context().get(User.ATTRIBUTE_NAME) instanceof User) {
+ user = getAttribute(request, User.ATTRIBUTE_NAME, User.class);
+ } else {
+ // Remove this after June 2021 (once all security filters are setting this)
+ @SuppressWarnings("unchecked")
+ Map<String, String> attr = (Map<String, String>) getAttribute(request, User.ATTRIBUTE_NAME, Map.class);
+ user = new User(attr.get("email"), attr.get("name"), attr.get("nickname"), attr.get("picture"));
+ }
+
Set<Role> roles = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class).roles();
Map<TenantName, List<TenantRole>> tenantRolesByTenantName = roles.stream()
@@ -241,6 +247,11 @@ public class UserApiHandler extends LoggingRequestHandler {
userObject.setString("email", user.email());
if (user.nickname() != null) userObject.setString("nickname", user.nickname());
if (user.picture() != null) userObject.setString("picture", user.picture());
+ userObject.setBool("verified", user.isVerified());
+ if (!user.lastLogin().equals(User.NO_DATE))
+ userObject.setString("lastLogin", user.lastLogin().format(DateTimeFormatter.ISO_DATE));
+ if (user.loginCount() > -1)
+ userObject.setLong("loginCount", user.loginCount());
}
private HttpResponse addTenantRoleMember(String tenantName, HttpRequest request) {
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/certificate/EndpointCertificatesTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/certificate/EndpointCertificatesTest.java
index d8544ff3947..a3580a9fda3 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/certificate/EndpointCertificatesTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/certificate/EndpointCertificatesTest.java
@@ -13,8 +13,6 @@ import com.yahoo.security.SignatureAlgorithm;
import com.yahoo.security.X509CertificateBuilder;
import com.yahoo.security.X509CertificateUtils;
import com.yahoo.test.ManualClock;
-import com.yahoo.vespa.flags.Flags;
-import com.yahoo.vespa.flags.InMemoryFlagSource;
import com.yahoo.vespa.hosted.controller.ControllerTester;
import com.yahoo.vespa.hosted.controller.Instance;
import com.yahoo.vespa.hosted.controller.api.integration.certificates.EndpointCertificateMetadata;
@@ -133,7 +131,6 @@ public class EndpointCertificatesTest {
@Test
public void provisions_new_certificate_in_public_prod() {
ControllerTester tester = new ControllerTester(SystemName.Public);
- ((InMemoryFlagSource) tester.controller().flagSource()).withBooleanFlag(Flags.VESPA_APP_DOMAIN_IN_CERTIFICATE.id(), true);
EndpointCertificateValidatorImpl endpointCertificateValidator = new EndpointCertificateValidatorImpl(secretStore, clock);
EndpointCertificates endpointCertificates = new EndpointCertificates(tester.controller(), endpointCertificateMock, endpointCertificateValidator);
List<String> expectedSans = List.of(
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ServiceRegistryMock.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ServiceRegistryMock.java
index 4a068681a50..a4ce0316e25 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ServiceRegistryMock.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ServiceRegistryMock.java
@@ -23,6 +23,8 @@ import com.yahoo.vespa.hosted.controller.api.integration.certificates.EndpointCe
import com.yahoo.vespa.hosted.controller.api.integration.certificates.EndpointCertificateValidatorMock;
import com.yahoo.vespa.hosted.controller.api.integration.dns.MemoryNameService;
import com.yahoo.vespa.hosted.controller.api.integration.entity.MemoryEntityService;
+import com.yahoo.vespa.hosted.controller.api.integration.horizon.HorizonClient;
+import com.yahoo.vespa.hosted.controller.api.integration.horizon.MockHorizonClient;
import com.yahoo.vespa.hosted.controller.api.integration.organization.MockContactRetriever;
import com.yahoo.vespa.hosted.controller.api.integration.organization.MockIssueHandler;
import com.yahoo.vespa.hosted.controller.api.integration.resource.CostReportConsumerMock;
@@ -74,6 +76,7 @@ public class ServiceRegistryMock extends AbstractComponent implements ServiceReg
private final ArchiveService archiveService = new MockArchiveService();
private final MockChangeRequestClient changeRequestClient = new MockChangeRequestClient();
private final AccessControlService accessControlService = new MockAccessControlService();
+ private final HorizonClient horizonClient = new MockHorizonClient();
public ServiceRegistryMock(SystemName system) {
this.zoneRegistryMock = new ZoneRegistryMock(system);
@@ -235,6 +238,11 @@ public class ServiceRegistryMock extends AbstractComponent implements ServiceReg
return accessControlService;
}
+ @Override
+ public HorizonClient horizonClient() {
+ return horizonClient;
+ }
+
public ConfigServerMock configServerMock() {
return configServerMock;
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentExpirerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentExpirerTest.java
index 232521c9609..ffc82f90ad4 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentExpirerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentExpirerTest.java
@@ -70,7 +70,7 @@ public class DeploymentExpirerTest {
assertEquals(1, permanentDeployments(prodApp.instance()));
// Dev application expires when enough time has passed since most recent attempt
- tester.clock().advance(Duration.ofDays(12));
+ tester.clock().advance(Duration.ofDays(12).plus(Duration.ofSeconds(1)));
expirer.maintain();
assertEquals(0, permanentDeployments(devApp.instance()));
assertEquals(1, permanentDeployments(prodApp.instance()));
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgradeSchedulerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgradeSchedulerTest.java
index 7d512ba090c..7a0175845ca 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgradeSchedulerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgradeSchedulerTest.java
@@ -24,8 +24,8 @@ public class OsUpgradeSchedulerTest {
public void maintain() {
ControllerTester tester = new ControllerTester();
OsUpgradeScheduler scheduler = new OsUpgradeScheduler(tester.controller(), Duration.ofDays(1));
- Instant initialTime = Instant.parse("2021-01-23T00:00:00.00Z");
- tester.clock().setInstant(initialTime);
+ Instant t0 = Instant.parse("2021-01-23T00:00:00.00Z"); // Outside trigger period
+ tester.clock().setInstant(t0);
CloudName cloud = CloudName.from("cloud");
ZoneApi zone = zone("prod.us-west-1", cloud);
@@ -50,7 +50,12 @@ public class OsUpgradeSchedulerTest {
Version version1 = Version.fromString("7.0.0.20210302");
tester.clock().advance(Duration.ofDays(15).plus(Duration.ofSeconds(1)));
scheduler.maintain();
- assertEquals("New target set", version1, tester.controller().osVersionTarget(cloud).get().osVersion().version());
+ assertEquals("Target is unchanged because we're outside trigger period", version0,
+ tester.controller().osVersionTarget(cloud).get().osVersion().version());
+ tester.clock().advance(Duration.ofHours(7)); // Put us inside the trigger period
+ scheduler.maintain();
+ assertEquals("New target set", version1,
+ tester.controller().osVersionTarget(cloud).get().osVersion().version());
// A few days pass and target remains unchanged
tester.clock().advance(Duration.ofDays(2));
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgraderTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgraderTest.java
index 3e2fd4ec0b9..664a1fdc83c 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgraderTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgraderTest.java
@@ -119,11 +119,13 @@ public class OsUpgraderTest {
@Test
public void upgrade_os_with_budget() {
CloudName cloud = CloudName.from("cloud");
+ ZoneApi zone0 = zone("prod.us-north-42", "prod.controller", cloud);
ZoneApi zone1 = zone("dev.us-east-1", cloud);
ZoneApi zone2 = zone("prod.us-west-1", cloud);
ZoneApi zone3 = zone("prod.us-central-1", cloud);
ZoneApi zone4 = zone("prod.eu-west-1", cloud);
UpgradePolicy upgradePolicy = UpgradePolicy.create()
+ .upgrade(zone0)
.upgrade(zone1)
.upgradeInParallel(zone2, zone3)
.upgrade(zone4);
@@ -133,6 +135,7 @@ public class OsUpgraderTest {
List<SystemApplication> nodeTypes = List.of(SystemApplication.configServerHost, SystemApplication.tenantHost);
tester.configServer().bootstrap(List.of(zone1.getId(), zone2.getId(), zone3.getId(), zone4.getId()),
nodeTypes);
+ tester.configServer().addNodes(List.of(zone0.getVirtualId()), List.of(SystemApplication.controllerHost));
// Upgrade with budget
Version version = Version.fromString("7.1");
@@ -141,7 +144,16 @@ public class OsUpgraderTest {
statusUpdater.maintain();
osUpgrader.maintain();
+ // Controllers upgrade first
+ osUpgrader.maintain();
+ assertWanted(version, SystemApplication.controllerHost, zone0);
+ assertEquals("Controller zone gets a zero budget", Duration.ZERO, upgradeBudget(zone0, SystemApplication.controllerHost, version));
+ completeUpgrade(version, SystemApplication.controllerHost, zone0);
+ statusUpdater.maintain();
+ assertEquals(3, nodesOn(version).size());
+
// First zone upgrades
+ osUpgrader.maintain();
for (var nodeType : nodeTypes) {
assertEquals("Dev zone gets a zero budget", Duration.ZERO, upgradeBudget(zone1, nodeType, version));
completeUpgrade(version, nodeType, zone1);
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java
index 484b471cbaa..326f4bf311e 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java
@@ -164,7 +164,6 @@ public class UpgraderTest {
tester.triggerJobs();
assertEquals("Upgrade with error should retry", 1, tester.jobs().active().size());
-
// --- Failing application is repaired by changing the application, causing confidence to move above 'high' threshold
// Deploy application change
default0.submit(applicationPackage("default"));
@@ -1114,11 +1113,32 @@ public class UpgraderTest {
assertEquals("Upgrade orders are distinct", versions.size(), upgradeOrders.size());
}
+ private static final ApplicationPackage canaryApplicationPackage =
+ new ApplicationPackageBuilder().upgradePolicy("canary")
+ .region("us-west-1")
+ .region("us-east-3")
+ .build();
+
+ private static final ApplicationPackage defaultApplicationPackage =
+ new ApplicationPackageBuilder().upgradePolicy("default")
+ .region("us-west-1")
+ .region("us-east-3")
+ .build();
+
+ private static final ApplicationPackage conservativeApplicationPackage =
+ new ApplicationPackageBuilder().upgradePolicy("conservative")
+ .region("us-west-1")
+ .region("us-east-3")
+ .build();
+
+ /** Returns empty prebuilt applications for efficiency */
private ApplicationPackage applicationPackage(String upgradePolicy) {
- return new ApplicationPackageBuilder().upgradePolicy(upgradePolicy)
- .region("us-west-1")
- .region("us-east-3")
- .build();
+ switch (upgradePolicy) {
+ case "canary" : return canaryApplicationPackage;
+ case "default" : return defaultApplicationPackage;
+ case "conservative" : return conservativeApplicationPackage;
+ default : throw new IllegalArgumentException("No upgrade policy '" + upgradePolicy + "'");
+ }
}
private DeploymentContext createAndDeploy(String applicationName, String upgradePolicy) {
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/SupportAccessSerializerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/SupportAccessSerializerTest.java
index 2a43f8cc4f3..97cf53d7b89 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/SupportAccessSerializerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/SupportAccessSerializerTest.java
@@ -7,7 +7,7 @@ import com.yahoo.security.SignatureAlgorithm;
import com.yahoo.security.X509CertificateBuilder;
import com.yahoo.security.X509CertificateUtils;
import com.yahoo.slime.JsonFormat;
-import com.yahoo.slime.SlimeUtils;
+import com.yahoo.slime.Slime;
import com.yahoo.vespa.hosted.controller.support.access.SupportAccess;
import com.yahoo.vespa.hosted.controller.support.access.SupportAccessGrant;
import org.intellij.lang.annotations.Language;
@@ -21,7 +21,6 @@ import java.math.BigInteger;
import java.security.cert.X509Certificate;
import java.time.Instant;
import java.time.temporal.ChronoUnit;
-import java.util.Optional;
import static org.junit.Assert.*;
@@ -80,7 +79,8 @@ public class SupportAccessSerializerTest {
@Test
public void serialize_default() {
- assertSerialized(SupportAccess.DISALLOWED_NO_HISTORY, true, Instant.EPOCH, "{\n" +
+ var slime = SupportAccessSerializer.serializeCurrentState(SupportAccess.DISALLOWED_NO_HISTORY, Instant.EPOCH);
+ assertSerialized(slime, "{\n" +
" \"state\": {\n" +
" \"supportAccess\": \"NOT_ALLOWED\"\n" +
" },\n" +
@@ -93,12 +93,14 @@ public class SupportAccessSerializerTest {
@Test
public void serialize_with_certificates() {
- assertSerialized(supportAccessExample, true, null, expectedWithCertificates);
+ var slime = SupportAccessSerializer.toSlime(supportAccessExample);
+ assertSerialized(slime, expectedWithCertificates);
}
@Test
public void serialize_with_status() {
- assertSerialized(supportAccessExample, false, hour(32),
+ var slime = SupportAccessSerializer.serializeCurrentState(supportAccessExample, hour(12));
+ assertSerialized(slime,
"{\n" +
" \"state\": {\n" +
" \"supportAccess\": \"ALLOWED\",\n" +
@@ -122,6 +124,12 @@ public class SupportAccessSerializerTest {
" \"at\": \"1970-01-01T02:00:00Z\",\n" +
" \"until\": \"1970-01-02T00:00:00Z\",\n" +
" \"by\": \"andreer\"\n" +
+ " },\n" +
+ " {\n" +
+ " \"state\": \"grant\",\n" +
+ " \"at\": \"1970-01-01T03:00:00Z\",\n" +
+ " \"until\": \"1970-01-01T04:00:00Z\",\n" +
+ " \"by\": \"mortent\"\n" +
" }\n" +
" ],\n" +
" \"grants\": [\n" +
@@ -129,28 +137,26 @@ public class SupportAccessSerializerTest {
" \"requestor\": \"mortent\",\n" +
" \"notBefore\": \"1970-01-01T07:00:00Z\",\n" +
" \"notAfter\": \"1970-01-01T19:00:00Z\"\n" +
- " },\n" +
- " {\n" +
- " \"requestor\": \"mortent\",\n" +
- " \"notBefore\": \"1970-01-01T03:00:00Z\",\n" +
- " \"notAfter\": \"1970-01-01T04:00:00Z\"\n" +
- " }\n" +
+ " }" +
+ "\n" +
" ]\n" +
"}\n");
}
@Test
public void deserialize() {
- assertEquals(supportAccessExample, SupportAccessSerializer.fromSlime(SlimeUtils.jsonToSlime(expectedWithCertificates)));
+ var slime = SupportAccessSerializer.toSlime(supportAccessExample);
+ assertSerialized(slime, expectedWithCertificates);
+
+ var deserialized = SupportAccessSerializer.fromSlime(slime);
+ assertEquals(supportAccessExample, deserialized);
}
private Instant hour(long h) {
return Instant.EPOCH.plus(h, ChronoUnit.HOURS);
}
- private void assertSerialized(SupportAccess supportAccess, boolean includeCertificates, Instant now, String expected) {
- var slime = SupportAccessSerializer.toSlime(supportAccess, includeCertificates, Optional.ofNullable(now));
-
+ private void assertSerialized(Slime slime, String expected) {
try (ByteArrayOutputStream out = new ByteArrayOutputStream()) {
new JsonFormat(false).encode(out, slime);
assertEquals(expected, out.toString());
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/horizon/TsdbQueryRewriterTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/horizon/TsdbQueryRewriterTest.java
new file mode 100644
index 00000000000..ab9d50f8eae
--- /dev/null
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/horizon/TsdbQueryRewriterTest.java
@@ -0,0 +1,59 @@
+package com.yahoo.vespa.hosted.controller.restapi.horizon;
+
+import com.yahoo.config.provision.SystemName;
+import com.yahoo.config.provision.TenantName;
+import com.yahoo.slime.JsonFormat;
+import com.yahoo.slime.SlimeUtils;
+import com.yahoo.vespa.hosted.controller.api.role.Role;
+import org.junit.Test;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import java.util.Set;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * @author valerijf
+ */
+public class TsdbQueryRewriterTest {
+
+ @Test
+ public void rewrites_query() throws IOException {
+ assertRewrite("filters-complex.json", "filters-complex.expected.json", Role.reader(TenantName.from("tenant2")));
+
+ assertRewrite("filter-in-execution-graph.json",
+ "filter-in-execution-graph.expected.json",
+ Role.reader(TenantName.from("tenant2")), Role.athenzTenantAdmin(TenantName.from("tenant3")));
+
+ assertRewrite("filter-in-execution-graph.json",
+ "filter-in-execution-graph.expected.operator.json",
+ Role.reader(TenantName.from("tenant2")), Role.athenzTenantAdmin(TenantName.from("tenant3")), Role.hostedOperator());
+
+ assertRewrite("no-filters.json",
+ "no-filters.expected.json",
+ Role.reader(TenantName.from("tenant2")), Role.athenzTenantAdmin(TenantName.from("tenant3")));
+
+ assertRewrite("filters-meta-query.json",
+ "filters-meta-query.expected.json",
+ Role.reader(TenantName.from("tenant2")), Role.athenzTenantAdmin(TenantName.from("tenant3")));
+ }
+
+ @Test(expected = TsdbQueryRewriter.UnauthorizedException.class)
+ public void throws_if_no_roles() throws IOException {
+ assertRewrite("filters-complex.json", "filters-complex.expected.json");
+ }
+
+ private static void assertRewrite(String initialFilename, String expectedFilename, Role... roles) throws IOException {
+ byte[] data = Files.readAllBytes(Paths.get("src/test/resources/horizon", initialFilename));
+ data = TsdbQueryRewriter.rewrite(data, Set.of(roles), SystemName.Public);
+
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ new JsonFormat(false).encode(baos, SlimeUtils.jsonToSlime(data));
+ String expectedJson = Files.readString(Paths.get("src/test/resources/horizon", expectedFilename));
+
+ assertEquals(expectedJson, baos.toString());
+ }
+} \ No newline at end of file
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/application-roles.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/application-roles.json
index 9bd66c16308..ca437dba761 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/application-roles.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/application-roles.json
@@ -6,11 +6,13 @@
{
"name": "administrator@tenant",
"email": "administrator@tenant",
+ "verified": false,
"roles": {}
},
{
"name": "developer@tenant",
"email": "developer@tenant",
+ "verified": false,
"roles": {}
}
]
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/tenant-roles.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/tenant-roles.json
index 6a1c4c88878..bc921e4bdf4 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/tenant-roles.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/tenant-roles.json
@@ -9,6 +9,7 @@
{
"name": "administrator@tenant",
"email": "administrator@tenant",
+ "verified": false,
"roles": {
"administrator": {
"explicit": true,
@@ -27,6 +28,7 @@
{
"name": "developer@tenant",
"email": "developer@tenant",
+ "verified": false,
"roles": {
"administrator": {
"explicit": false,
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-athenz.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-athenz.json
index 2ae3514bec3..5d3a38334ad 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-athenz.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-athenz.json
@@ -6,7 +6,8 @@
"user": {
"name": "Joe Developer",
"email": "dev@domail",
- "nickname": "dev"
+ "nickname": "dev",
+ "verified": false
},
"tenants": {
"sandbox": {
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-cloud.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-cloud.json
index 2d2a137c2ca..ae3dc68d9e3 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-cloud.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-cloud.json
@@ -6,7 +6,8 @@
"user": {
"name": "Joe Developer",
"email": "dev@domail",
- "nickname": "dev"
+ "nickname": "dev",
+ "verified":false
},
"tenants": {
"sandbox": {
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-without-applications.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-without-applications.json
index e03a18a1949..3bf999b490b 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-without-applications.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-without-applications.json
@@ -6,7 +6,8 @@
"user": {
"name": "Joe Developer",
"email": "dev@domail",
- "nickname": "dev"
+ "nickname": "dev",
+ "verified":false
},
"tenants": {},
"operator": [
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-without-trial-capacity-cloud.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-without-trial-capacity-cloud.json
index a7410b14850..27242424579 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-without-trial-capacity-cloud.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-without-trial-capacity-cloud.json
@@ -6,7 +6,8 @@
"user": {
"name": "Joe Developer",
"email": "dev@domail",
- "nickname": "dev"
+ "nickname": "dev",
+ "verified":false
},
"tenants": {}
} \ No newline at end of file
diff --git a/controller-server/src/test/resources/horizon/filter-in-execution-graph.expected.json b/controller-server/src/test/resources/horizon/filter-in-execution-graph.expected.json
new file mode 100644
index 00000000000..a71fd812de9
--- /dev/null
+++ b/controller-server/src/test/resources/horizon/filter-in-execution-graph.expected.json
@@ -0,0 +1,37 @@
+{
+ "start": 1619301600000,
+ "end": 1623161217471,
+ "executionGraph": [
+ {
+ "id": "q1_m1",
+ "type": "TimeSeriesDataSource",
+ "metric": {
+ "type": "MetricLiteral",
+ "metric": "Vespa.vespa.distributor.vds.distributor.docsstored.average"
+ },
+ "sourceId": null,
+ "fetchLast": false,
+ "filter": {
+ "type": "Chain",
+ "op": "AND",
+ "filters": [
+ {
+ "type": "TagValueLiteralOr",
+ "filter": "tenant1.application1.instance1",
+ "tagKey": "applicationId"
+ },
+ {
+ "type": "TagValueLiteralOr",
+ "filter": "public",
+ "tagKey": "system"
+ },
+ {
+ "type": "TagValueRegex",
+ "filter": "^(tenant2|tenant3)\\..*",
+ "tagKey": "applicationId"
+ }
+ ]
+ }
+ }
+ ]
+}
diff --git a/controller-server/src/test/resources/horizon/filter-in-execution-graph.expected.operator.json b/controller-server/src/test/resources/horizon/filter-in-execution-graph.expected.operator.json
new file mode 100644
index 00000000000..babf3219c6a
--- /dev/null
+++ b/controller-server/src/test/resources/horizon/filter-in-execution-graph.expected.operator.json
@@ -0,0 +1,32 @@
+{
+ "start": 1619301600000,
+ "end": 1623161217471,
+ "executionGraph": [
+ {
+ "id": "q1_m1",
+ "type": "TimeSeriesDataSource",
+ "metric": {
+ "type": "MetricLiteral",
+ "metric": "Vespa.vespa.distributor.vds.distributor.docsstored.average"
+ },
+ "sourceId": null,
+ "fetchLast": false,
+ "filter": {
+ "type": "Chain",
+ "op": "AND",
+ "filters": [
+ {
+ "type": "TagValueLiteralOr",
+ "filter": "tenant1.application1.instance1",
+ "tagKey": "applicationId"
+ },
+ {
+ "type": "TagValueLiteralOr",
+ "filter": "public",
+ "tagKey": "system"
+ }
+ ]
+ }
+ }
+ ]
+}
diff --git a/controller-server/src/test/resources/horizon/filter-in-execution-graph.json b/controller-server/src/test/resources/horizon/filter-in-execution-graph.json
new file mode 100644
index 00000000000..6a2512c3642
--- /dev/null
+++ b/controller-server/src/test/resources/horizon/filter-in-execution-graph.json
@@ -0,0 +1,21 @@
+{
+ "start": 1619301600000,
+ "end": 1623161217471,
+ "executionGraph": [
+ {
+ "id": "q1_m1",
+ "type": "TimeSeriesDataSource",
+ "metric": {
+ "type": "MetricLiteral",
+ "metric": "Vespa.vespa.distributor.vds.distributor.docsstored.average"
+ },
+ "sourceId": null,
+ "fetchLast": false,
+ "filter": {
+ "type": "TagValueLiteralOr",
+ "filter": "tenant1.application1.instance1",
+ "tagKey": "applicationId"
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/controller-server/src/test/resources/horizon/filters-complex.expected.json b/controller-server/src/test/resources/horizon/filters-complex.expected.json
new file mode 100644
index 00000000000..b3416f8a410
--- /dev/null
+++ b/controller-server/src/test/resources/horizon/filters-complex.expected.json
@@ -0,0 +1,56 @@
+{
+ "start": 1623080040000,
+ "end": 1623166440000,
+ "executionGraph": [
+ {
+ "id": "q1_m1",
+ "type": "TimeSeriesDataSource",
+ "metric": {
+ "type": "MetricLiteral",
+ "metric": "Vespa.vespa.qrserver.documents_covered.count"
+ },
+ "sourceId": null,
+ "fetchLast": false,
+ "filterId": "filter-ni8"
+ }
+ ],
+ "filters": [
+ {
+ "filter": {
+ "type": "Chain",
+ "op": "AND",
+ "filters": [
+ {
+ "type": "NOT",
+ "filter": {
+ "type": "TagValueLiteralOr",
+ "filter": "tenant1.app1.instance1",
+ "tagKey": "applicationId"
+ }
+ },
+ {
+ "type": "TagValueLiteralOr",
+ "filter": "public",
+ "tagKey": "system"
+ },
+ {
+ "type": "TagValueRegex",
+ "filter": "^(tenant2)\\..*",
+ "tagKey": "applicationId"
+ }
+ ]
+ },
+ "id": "filter-ni8"
+ }
+ ],
+ "serdesConfigs": [
+ {
+ "id": "JsonV3QuerySerdes",
+ "filter": [
+ "summarizer"
+ ]
+ }
+ ],
+ "logLevel": "ERROR",
+ "cacheMode": null
+}
diff --git a/controller-server/src/test/resources/horizon/filters-complex.json b/controller-server/src/test/resources/horizon/filters-complex.json
new file mode 100644
index 00000000000..3acc7fe5044
--- /dev/null
+++ b/controller-server/src/test/resources/horizon/filters-complex.json
@@ -0,0 +1,46 @@
+{
+ "start": 1623080040000,
+ "end": 1623166440000,
+ "executionGraph": [
+ {
+ "id": "q1_m1",
+ "type": "TimeSeriesDataSource",
+ "metric": {
+ "type": "MetricLiteral",
+ "metric": "Vespa.vespa.qrserver.documents_covered.count"
+ },
+ "sourceId": null,
+ "fetchLast": false,
+ "filterId": "filter-ni8"
+ }
+ ],
+ "filters": [
+ {
+ "filter": {
+ "type": "Chain",
+ "op": "AND",
+ "filters": [
+ {
+ "type": "NOT",
+ "filter": {
+ "type": "TagValueLiteralOr",
+ "filter": "tenant1.app1.instance1",
+ "tagKey": "applicationId"
+ }
+ }
+ ]
+ },
+ "id": "filter-ni8"
+ }
+ ],
+ "serdesConfigs": [
+ {
+ "id": "JsonV3QuerySerdes",
+ "filter": [
+ "summarizer"
+ ]
+ }
+ ],
+ "logLevel": "ERROR",
+ "cacheMode": null
+}
diff --git a/controller-server/src/test/resources/horizon/filters-meta-query.expected.json b/controller-server/src/test/resources/horizon/filters-meta-query.expected.json
new file mode 100644
index 00000000000..6c8cab217fa
--- /dev/null
+++ b/controller-server/src/test/resources/horizon/filters-meta-query.expected.json
@@ -0,0 +1,39 @@
+{
+ "from": 0,
+ "to": 1,
+ "order": "ASCENDING",
+ "type": "TAG_KEYS_AND_VALUES",
+ "source": "",
+ "aggregationSize": 1000,
+ "queries": [
+ {
+ "id": "id-0",
+ "namespace": "Vespa",
+ "filter": {
+ "type": "Chain",
+ "filters": [
+ {
+ "type": "TagValueRegex",
+ "filter": ".*",
+ "tagKey": "applicationId"
+ },
+ {
+ "type": "MetricLiteral",
+ "metric": "vespa.distributor.vds.distributor.docsstored.average|vespa.searchnode.content.proton.resource_usage.disk.average|vespa.searchnode.content.proton.resource_usage.memory.average|vespa.container.peak_qps.max"
+ },
+ {
+ "type": "TagValueLiteralOr",
+ "filter": "public",
+ "tagKey": "system"
+ },
+ {
+ "type": "TagValueRegex",
+ "filter": "^(tenant2|tenant3)\\..*",
+ "tagKey": "applicationId"
+ }
+ ]
+ }
+ }
+ ],
+ "aggregationField": "applicationId"
+}
diff --git a/controller-server/src/test/resources/horizon/filters-meta-query.json b/controller-server/src/test/resources/horizon/filters-meta-query.json
new file mode 100644
index 00000000000..ed59bef5eaa
--- /dev/null
+++ b/controller-server/src/test/resources/horizon/filters-meta-query.json
@@ -0,0 +1,29 @@
+{
+ "from": 0,
+ "to": 1,
+ "order": "ASCENDING",
+ "type": "TAG_KEYS_AND_VALUES",
+ "source": "",
+ "aggregationSize": 1000,
+ "queries": [
+ {
+ "id": "id-0",
+ "namespace": "Vespa",
+ "filter": {
+ "type": "Chain",
+ "filters": [
+ {
+ "type": "TagValueRegex",
+ "filter": ".*",
+ "tagKey": "applicationId"
+ },
+ {
+ "type": "MetricLiteral",
+ "metric": "vespa.distributor.vds.distributor.docsstored.average|vespa.searchnode.content.proton.resource_usage.disk.average|vespa.searchnode.content.proton.resource_usage.memory.average|vespa.container.peak_qps.max"
+ }
+ ]
+ }
+ }
+ ],
+ "aggregationField": "applicationId"
+} \ No newline at end of file
diff --git a/controller-server/src/test/resources/horizon/no-filters.expected.json b/controller-server/src/test/resources/horizon/no-filters.expected.json
new file mode 100644
index 00000000000..35decea21db
--- /dev/null
+++ b/controller-server/src/test/resources/horizon/no-filters.expected.json
@@ -0,0 +1,32 @@
+{
+ "start": 1619301600000,
+ "end": 1623161217471,
+ "executionGraph": [
+ {
+ "id": "q1_m1",
+ "type": "TimeSeriesDataSource",
+ "metric": {
+ "type": "MetricLiteral",
+ "metric": "Vespa.vespa.distributor.vds.distributor.docsstored.average"
+ },
+ "sourceId": null,
+ "fetchLast": false,
+ "filter": {
+ "type": "Chain",
+ "op": "AND",
+ "filters": [
+ {
+ "type": "TagValueLiteralOr",
+ "filter": "public",
+ "tagKey": "system"
+ },
+ {
+ "type": "TagValueRegex",
+ "filter": "^(tenant2|tenant3)\\..*",
+ "tagKey": "applicationId"
+ }
+ ]
+ }
+ }
+ ]
+}
diff --git a/controller-server/src/test/resources/horizon/no-filters.json b/controller-server/src/test/resources/horizon/no-filters.json
new file mode 100644
index 00000000000..3ff80feba02
--- /dev/null
+++ b/controller-server/src/test/resources/horizon/no-filters.json
@@ -0,0 +1,16 @@
+{
+ "start": 1619301600000,
+ "end": 1623161217471,
+ "executionGraph": [
+ {
+ "id": "q1_m1",
+ "type": "TimeSeriesDataSource",
+ "metric": {
+ "type": "MetricLiteral",
+ "metric": "Vespa.vespa.distributor.vds.distributor.docsstored.average"
+ },
+ "sourceId": null,
+ "fetchLast": false
+ }
+ ]
+} \ No newline at end of file
diff --git a/default_build_settings.cmake b/default_build_settings.cmake
index a9c3f55dc61..a61410ebf31 100644
--- a/default_build_settings.cmake
+++ b/default_build_settings.cmake
@@ -298,8 +298,8 @@ function(vespa_use_default_cxx_compiler)
unset(DEFAULT_CMAKE_CXX_COMPILER)
if(NOT DEFINED VESPA_COMPILER_VARIANT OR VESPA_COMPILER_VARIANT STREQUAL "gcc")
if(APPLE)
- set(DEFAULT_CMAKE_C_COMPILER "/usr/local/bin/gcc-10")
- set(DEFAULT_CMAKE_CXX_COMPILER "/usr/local/bin/g++-10")
+ set(DEFAULT_CMAKE_C_COMPILER "/usr/local/bin/gcc-11")
+ set(DEFAULT_CMAKE_CXX_COMPILER "/usr/local/bin/g++-11")
elseif(VESPA_OS_DISTRO_COMBINED STREQUAL "amzn 2")
set(DEFAULT_CMAKE_C_COMPILER "/usr/bin/gcc10-gcc")
set(DEFAULT_CMAKE_CXX_COMPILER "/usr/bin/gcc10-g++")
diff --git a/dist/vespa.spec b/dist/vespa.spec
index f4a4759860a..125d4cea194 100644
--- a/dist/vespa.spec
+++ b/dist/vespa.spec
@@ -721,7 +721,7 @@ fi
%{_prefix}/bin/vespa-feed-client
%{_prefix}/conf/vespa-feed-client/logging.properties
%{_prefix}/lib/jars/vespa-http-client-jar-with-dependencies.jar
-%{_prefix}/lib/jars/vespa-feed-client-cli.jar
+%{_prefix}/lib/jars/vespa-feed-client-cli-jar-with-dependencies.jar
%files config-model-fat
%if %{_defattr_is_vespa_vespa}
diff --git a/document/src/main/java/com/yahoo/document/StructDataType.java b/document/src/main/java/com/yahoo/document/StructDataType.java
index 73fe580308e..8a153856eff 100644
--- a/document/src/main/java/com/yahoo/document/StructDataType.java
+++ b/document/src/main/java/com/yahoo/document/StructDataType.java
@@ -22,7 +22,7 @@ public class StructDataType extends BaseStructDataType {
super(name);
}
- public StructDataType(int id,String name) {
+ public StructDataType(int id, String name) {
super(id, name);
}
diff --git a/document/src/main/java/com/yahoo/document/StructuredDataType.java b/document/src/main/java/com/yahoo/document/StructuredDataType.java
index e4bb94a5465..8a5f344e79e 100644
--- a/document/src/main/java/com/yahoo/document/StructuredDataType.java
+++ b/document/src/main/java/com/yahoo/document/StructuredDataType.java
@@ -10,8 +10,6 @@ import java.util.Collection;
import java.util.List;
/**
- * TODO: What is this and why
- *
* @author HÃ¥kon Humberset
*/
public abstract class StructuredDataType extends DataType {
diff --git a/document/src/main/java/com/yahoo/document/TemporaryStructuredDataType.java b/document/src/main/java/com/yahoo/document/TemporaryStructuredDataType.java
index 0449612da6f..f4139a597d2 100644
--- a/document/src/main/java/com/yahoo/document/TemporaryStructuredDataType.java
+++ b/document/src/main/java/com/yahoo/document/TemporaryStructuredDataType.java
@@ -2,7 +2,8 @@
package com.yahoo.document;
/**
- * Internal class, DO NOT USE!!&nbsp;Only public because it must be used from com.yahoo.searchdefinition.parser.
+ * Internal class, DO NOT USE!!
+ * Only public because it must be used from com.yahoo.searchdefinition.parser.
*
* @author Einar M R Rosenvinge
*/
diff --git a/documentapi/src/main/java/com/yahoo/documentapi/messagebus/protocol/ExternPolicy.java b/documentapi/src/main/java/com/yahoo/documentapi/messagebus/protocol/ExternPolicy.java
index 94dfabb2c4f..26b7cb71f2d 100755
--- a/documentapi/src/main/java/com/yahoo/documentapi/messagebus/protocol/ExternPolicy.java
+++ b/documentapi/src/main/java/com/yahoo/documentapi/messagebus/protocol/ExternPolicy.java
@@ -59,7 +59,7 @@ public class ExternPolicy implements DocumentProtocolRoutingPolicy {
pattern = args[1];
session = pattern.substring(pos);
orb = new Supervisor(new Transport("externpolicy"));
- orb.useSmallBuffers();
+ orb.setDropEmptyBuffers(true);
mirror = new Mirror(orb, slobroks);
error = null;
}
diff --git a/documentgen-test/src/test/java/com/yahoo/vespa/config/DocumentGenPluginTest.java b/documentgen-test/src/test/java/com/yahoo/vespa/config/DocumentGenPluginTest.java
index b3ae9d2bd0c..e4cddf0a606 100644
--- a/documentgen-test/src/test/java/com/yahoo/vespa/config/DocumentGenPluginTest.java
+++ b/documentgen-test/src/test/java/com/yahoo/vespa/config/DocumentGenPluginTest.java
@@ -61,7 +61,6 @@ import com.yahoo.vespa.documentgen.test.annotation.Person;
import org.junit.Ignore;
import org.junit.Test;
-import java.lang.Class;
import java.lang.reflect.Method;
import java.lang.reflect.Modifier;
import java.nio.ByteBuffer;
@@ -151,18 +150,18 @@ public class DocumentGenPluginTest {
assertEquals(music.getFieldValue(artist), bingoAstroburger);
assertEquals(music.getFieldValue("artist"), bingoAstroburger);
- assertEquals(music.getFieldValue(new Field("nonexisting")), null);
- assertEquals(music.getFieldValue("nono"), null);
- assertEquals(music.getField("nope"), null);
- assertEquals(music.getFieldValue(new Field("nada")), null);
- assertEquals(music.getFieldValue("zilch"), null);
- assertEquals(music.getFieldValue("zero"), null);
-
- assertEquals(music.removeFieldValue("nothere"), null);
- assertEquals(music.removeFieldValue(new Field("nothereno")), null);
- assertEquals(music.removeFieldValue(new Field("invalid")), null);
- assertEquals(music.removeFieldValue("goner"), null);
- assertEquals(music.removeFieldValue("absent"), null);
+ assertNull(music.getFieldValue(new Field("nonexisting")));
+ assertNull(music.getFieldValue("nono"));
+ assertNull(music.getField("nope"));
+ assertNull(music.getFieldValue(new Field("nada")));
+ assertNull(music.getFieldValue("zilch"));
+ assertNull(music.getFieldValue("zero"));
+
+ assertNull(music.removeFieldValue("nothere"));
+ assertNull(music.removeFieldValue(new Field("nothereno")));
+ assertNull(music.removeFieldValue(new Field("invalid")));
+ assertNull(music.removeFieldValue("goner"));
+ assertNull(music.removeFieldValue("absent"));
}
@Test
@@ -193,12 +192,12 @@ public class DocumentGenPluginTest {
Book book = getBook();
book.setAuthor(null);
Field a = new Field("author", DataType.STRING);
- assertEquals(book.getFieldValue("author"), null);
- assertEquals(book.getFieldValue(a), null);
+ assertNull(book.getFieldValue("author"));
+ assertNull(book.getFieldValue(a));
assertEquals(book.getField("author"), a);
- assertEquals(book.getFieldValue(a), null);
- assertEquals(book.getFieldValue("author"), null);
- assertEquals(book.getFieldValue("author"), null);
+ assertNull(book.getFieldValue(a));
+ assertNull(book.getFieldValue("author"));
+ assertNull(book.getFieldValue("author"));
book.removeFieldValue("isbn");
book.removeFieldValue(new Field("year", DataType.INT));
@@ -207,26 +206,26 @@ public class DocumentGenPluginTest {
assertEquals(old.get(0), new IntegerFieldValue(10));
book.removeFieldValue("stringmap");
book.removeFieldValue("mywsfloat");
- assertEquals(book.getIsbn(), null);
- assertEquals(book.getYear(), null);
- assertEquals(book.getDescription(), null);
- assertEquals(book.getStringmap(), null);
- assertEquals(book.getMyarrayint(), null);
- assertEquals(book.getMywsfloat(), null);
+ assertNull(book.getIsbn());
+ assertNull(book.getYear());
+ assertNull(book.getDescription());
+ assertNull(book.getStringmap());
+ assertNull(book.getMyarrayint());
+ assertNull(book.getMywsfloat());
Music music = getMusicBasic();
Field artist = music.getField("artist");
Field year = music.getField("year");
music.removeFieldValue(artist);
- assertEquals(music.getArtist(), null);
+ assertNull(music.getArtist());
music.removeFieldValue("disp_song");
- assertEquals(music.getDisp_song(), null);
+ assertNull(music.getDisp_song());
music.removeFieldValue(year);
- assertEquals(music.getYear(), null);
+ assertNull(music.getYear());
music.removeFieldValue("uri");
- assertEquals(music.getUri(), null);
+ assertNull(music.getUri());
music.removeFieldValue("weight_src");
- assertEquals(music.getWeight_src(), null);
+ assertNull(music.getWeight_src());
}
@Test
@@ -393,12 +392,12 @@ public class DocumentGenPluginTest {
Person p2 = new Person();
p2.setName("H. Melville");
descTree.annotate(p2);
- book.setDescriptionSpanTrees(new HashMap<String, SpanTree>(){{ put(descTree.getName(), descTree); }});
+ book.setDescriptionSpanTrees(new HashMap<>(){{ put(descTree.getName(), descTree); }});
assertEquals(((Person) ((StringFieldValue) book.getFieldValue(book.getField("description"))).getSpanTrees().iterator().next().iterator().next()).getName(),
"H. Melville");
assertEquals(((Person) ((StringFieldValue) book.removeFieldValue("description")).getSpanTrees().iterator().next().iterator().next()).getName(), "H. Melville");
- assertEquals(book.descriptionSpanTrees(), null);
- assertEquals((book.getFieldValue("description")), null);
+ assertNull(book.descriptionSpanTrees());
+ assertNull((book.getFieldValue("description")));
Artist a = new Artist();
assertTrue(Person.class.isInstance(a));
assertEquals(((StructDataType) a.getType().getDataType()).getField("name").getDataType(), DataType.STRING);
@@ -553,13 +552,13 @@ public class DocumentGenPluginTest {
private Book newBookConcrete(int i) {
Book book = new Book(new DocumentId("id:book:book::"+i));
book.setAuthor("Melville");
- Date date = new Date().setExacttime(99l);
- book.setTitleSpanTrees(new HashMap<String, SpanTree>());
+ Date date = new Date().setExacttime(99L);
+ book.setTitleSpanTrees(new HashMap<>());
SpanTree t = new SpanTree().annotate(date);
book.titleSpanTrees().put(t.getName(), t);
book.setTitle("Moby Dick");
book.setYear(1851);
- book.setMystruct(new Ss1().setSs01(new Ss0().setS0("My s0").setD0(99d)).setS1("My s1").setL1(89l));//.setAl1(myAs1));
+ book.setMystruct(new Ss1().setSs01(new Ss0().setS0("My s0").setD0(99d)).setS1("My s1").setL1(89L));//.setAl1(myAs1));
Map<Float, Integer> wsFloat = new HashMap<>();
wsFloat.put(56f, 55);
wsFloat.put(57f, 54);
@@ -587,7 +586,7 @@ public class DocumentGenPluginTest {
AnnotationType dateType = mgr.getAnnotationTypeRegistry().getType("date");
Struct dateStruct = new Struct(mgr.getAnnotationTypeRegistry().getType("date").getDataType());
- dateStruct.setFieldValue("exacttime", new LongFieldValue(99l));
+ dateStruct.setFieldValue("exacttime", new LongFieldValue(99L));
Annotation date = new Annotation(dateType);
date.setFieldValue(dateStruct);
titleTree.annotate(date);
@@ -637,7 +636,7 @@ public class DocumentGenPluginTest {
AnnotationType dateType = mgr.getAnnotationTypeRegistry().getType("date");
Struct dateStruct = new Struct(mgr.getAnnotationTypeRegistry().getType("date").getDataType());
- dateStruct.setFieldValue("exacttime", new LongFieldValue(99l));
+ dateStruct.setFieldValue("exacttime", new LongFieldValue(99L));
Annotation date = new Annotation(dateType);
date.setFieldValue(dateStruct);
titleTree.annotate(date);
@@ -647,7 +646,7 @@ public class DocumentGenPluginTest {
assertEquals(titleCheck.getWrappedValue(), "Moby Dick");
SpanTree treeCheck = titleCheck.getSpanTrees().iterator().next();
Annotation titleAnnCheck = treeCheck.iterator().next();
- assertEquals(((StructuredFieldValue) titleAnnCheck.getFieldValue()).getFieldValue("exacttime").getWrappedValue(), 99l);
+ assertEquals(((StructuredFieldValue) titleAnnCheck.getFieldValue()).getFieldValue("exacttime").getWrappedValue(), 99L);
bookGeneric.setFieldValue("year", new IntegerFieldValue(1851));
Struct myS0 = new Struct(mgr.getDataType("ss0"));
@@ -689,7 +688,7 @@ public class DocumentGenPluginTest {
assertEquals(book.getMystruct().getAs1().get(1), "as1_2");
treeCheck = book.titleSpanTrees().values().iterator().next();
titleAnnCheck = treeCheck.iterator().next();
- assertEquals(((StructuredFieldValue) titleAnnCheck.getFieldValue()).getFieldValue("exacttime").getWrappedValue(), 99l);
+ assertEquals(((StructuredFieldValue) titleAnnCheck.getFieldValue()).getFieldValue("exacttime").getWrappedValue(), 99L);
Book book2 = new Book(book, book.getId());
assertEquals(book2.getId(), bookGeneric.getId());
@@ -704,7 +703,7 @@ public class DocumentGenPluginTest {
assertEquals(book2.getMystruct().getAs1().get(1), "as1_2");
treeCheck = book2.titleSpanTrees().values().iterator().next();
titleAnnCheck = treeCheck.iterator().next();
- assertEquals(((StructuredFieldValue) titleAnnCheck.getFieldValue()).getFieldValue("exacttime").getWrappedValue(), 99l);
+ assertEquals(((StructuredFieldValue) titleAnnCheck.getFieldValue()).getFieldValue("exacttime").getWrappedValue(), 99L);
}
@Test
@@ -712,13 +711,13 @@ public class DocumentGenPluginTest {
Book b = (Book) ConcreteDocumentFactory.getDocument("book", new DocumentId("id:book:book::10"));
b.setAuthor("Per Ulv");
final Date d = (Date) ConcreteDocumentFactory.getAnnotation("date");
- d.setExacttime(79l);
- b.setAuthorSpanTrees(new HashMap<String, SpanTree>() {{ put("root", new SpanTree("root").annotate(d)); }});
+ d.setExacttime(79L);
+ b.setAuthorSpanTrees(new HashMap<>() {{ put("root", new SpanTree("root").annotate(d)); }});
StringFieldValue authorCheck=(StringFieldValue) b.getFieldValue("author");
assertEquals(authorCheck.getWrappedValue(), "Per Ulv");
SpanTree treeCheck = authorCheck.getSpanTrees().iterator().next();
Annotation authorAnnCheck = treeCheck.iterator().next();
- assertEquals(((Struct) authorAnnCheck.getFieldValue()).getFieldValue("exacttime").getWrappedValue(), 79l);
+ assertEquals(((Struct) authorAnnCheck.getFieldValue()).getFieldValue("exacttime").getWrappedValue(), 79L);
b.setMystruct(((Ss1) ConcreteDocumentFactory.getStruct("ss1")).setS1("Test s1!"));
assertEquals(((Struct) b.getFieldValue("mystruct")).getFieldValue("s1").getWrappedValue(), "Test s1!");
@@ -761,7 +760,7 @@ public class DocumentGenPluginTest {
}
private String className(String s) {
- return s.substring(0, 1).toUpperCase()+s.substring(1, s.length());
+ return s.substring(0, 1).toUpperCase()+s.substring(1);
}
private Music getMusicBasic() {
@@ -799,8 +798,8 @@ public class DocumentGenPluginTest {
myArrInt.add(30);
book.setMyarrayint(myArrInt);
- List<Integer> intL = new ArrayList<Integer>(){{add(1);add(2);add(3);}};
- List<Integer> intL2 = new ArrayList<Integer>(){{add(9);add(10);add(11);}};
+ List<Integer> intL = new ArrayList<>(){{add(1);add(2);add(3);}};
+ List<Integer> intL2 = new ArrayList<>(){{add(9);add(10);add(11);}};
List<List<Integer>> doubleIntL = new ArrayList<>();
doubleIntL.add(intL);
doubleIntL.add(intL2);
@@ -861,7 +860,6 @@ public class DocumentGenPluginTest {
}
@Test
- @SuppressWarnings("deprecation")
public void testSerialization() {
final Book book = getBook();
assertEquals(book.getMystruct().getD1(), (Double)56.777);
diff --git a/eval/CMakeLists.txt b/eval/CMakeLists.txt
index 302b6768cea..16c9c72d8a5 100644
--- a/eval/CMakeLists.txt
+++ b/eval/CMakeLists.txt
@@ -5,6 +5,7 @@ vespa_define_module(
staging_vespalib
APPS
+ src/apps/analyze_onnx_model
src/apps/eval_expr
src/apps/make_tensor_binary_format_test_spec
src/apps/tensor_conformance
diff --git a/eval/src/apps/analyze_onnx_model/.gitignore b/eval/src/apps/analyze_onnx_model/.gitignore
new file mode 100644
index 00000000000..12ce20b03ba
--- /dev/null
+++ b/eval/src/apps/analyze_onnx_model/.gitignore
@@ -0,0 +1 @@
+/vespa-analyze-onnx-model
diff --git a/eval/src/apps/analyze_onnx_model/CMakeLists.txt b/eval/src/apps/analyze_onnx_model/CMakeLists.txt
new file mode 100644
index 00000000000..47cbb6504f4
--- /dev/null
+++ b/eval/src/apps/analyze_onnx_model/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(vespa-analyze-onnx-model
+ SOURCES
+ analyze_onnx_model.cpp
+ INSTALL bin
+ DEPENDS
+ vespaeval
+)
diff --git a/eval/src/apps/analyze_onnx_model/analyze_onnx_model.cpp b/eval/src/apps/analyze_onnx_model/analyze_onnx_model.cpp
new file mode 100644
index 00000000000..3f56610dcaa
--- /dev/null
+++ b/eval/src/apps/analyze_onnx_model/analyze_onnx_model.cpp
@@ -0,0 +1,208 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/eval/onnx/onnx_wrapper.h>
+#include <vespa/eval/eval/tensor_spec.h>
+#include <vespa/eval/eval/value_codec.h>
+#include <vespa/eval/eval/fast_value.h>
+#include <vespa/vespalib/util/benchmark_timer.h>
+#include <vespa/vespalib/util/require.h>
+#include <vespa/vespalib/util/guard.h>
+#include <vespa/vespalib/util/stringfmt.h>
+
+using vespalib::make_string_short::fmt;
+
+using vespalib::FilePointer;
+using namespace vespalib::eval;
+
+bool read_line(FilePointer &file, vespalib::string &line) {
+ char line_buffer[1024];
+ char *res = fgets(line_buffer, sizeof(line_buffer), file.fp());
+ if (res == nullptr) {
+ line.clear();
+ return false;
+ }
+ line = line_buffer;
+ while (!line.empty() && isspace(line[line.size() - 1])) {
+ line.pop_back();
+ }
+ return true;
+}
+
+void extract(const vespalib::string &str, const vespalib::string &prefix, vespalib::string &dst) {
+ if (starts_with(str, prefix)) {
+ size_t pos = prefix.size();
+ while ((str.size() > pos) && isspace(str[pos])) {
+ ++pos;
+ }
+ dst = str.substr(pos);
+ }
+}
+
+void report_memory_usage(const vespalib::string &desc) {
+ vespalib::string vm_size = "unknown";
+ vespalib::string vm_rss = "unknown";
+ vespalib::string line;
+ FilePointer file(fopen("/proc/self/status", "r"));
+ while (read_line(file, line)) {
+ extract(line, "VmSize:", vm_size);
+ extract(line, "VmRSS:", vm_rss);
+ }
+ fprintf(stderr, "vm_size: %s, vm_rss: %s (%s)\n", vm_size.c_str(), vm_rss.c_str(), desc.c_str());
+}
+
+struct Options {
+ size_t pos = 0;
+ std::vector<vespalib::string> opt_list;
+ void add_option(const vespalib::string &opt) {
+ opt_list.push_back(opt);
+ }
+ vespalib::string get_option(const vespalib::string &desc, const vespalib::string &fallback) {
+ vespalib::string opt;
+ if (pos < opt_list.size()) {
+ opt = opt_list[pos];
+ fprintf(stderr, "option[%zu](%s): %s\n",
+ pos, desc.c_str(), opt.c_str());
+ } else {
+ opt = fallback;
+ fprintf(stderr, "unspecified option[%zu](%s), fallback: %s\n",
+ pos, desc.c_str(), fallback.c_str());
+ }
+ ++pos;
+ return opt;
+ }
+ bool get_bool_opt(const vespalib::string &desc, const vespalib::string &fallback) {
+ auto opt = get_option(desc, fallback);
+ REQUIRE((opt == "true") || (opt == "false"));
+ return (opt == "true");
+ }
+ size_t get_size_opt(const vespalib::string &desc, const vespalib::string &fallback) {
+ auto opt = get_option(desc, fallback);
+ size_t value = atoi(opt.c_str());
+ REQUIRE(value > 0);
+ return value;
+ }
+};
+
+void dump_model_info(const Onnx &model) {
+ fprintf(stderr, "model meta-data:\n");
+ for (size_t i = 0; i < model.inputs().size(); ++i) {
+ fprintf(stderr, " input[%zu]: '%s' %s\n", i, model.inputs()[i].name.c_str(), model.inputs()[i].type_as_string().c_str());
+ }
+ for (size_t i = 0; i < model.outputs().size(); ++i) {
+ fprintf(stderr, " output[%zu]: '%s' %s\n", i, model.outputs()[i].name.c_str(), model.outputs()[i].type_as_string().c_str());
+ }
+}
+
+void dump_wire_info(const Onnx::WireInfo &wire) {
+ fprintf(stderr, "test setup:\n");
+ REQUIRE_EQ(wire.vespa_inputs.size(), wire.onnx_inputs.size());
+ for (size_t i = 0; i < wire.vespa_inputs.size(); ++i) {
+ fprintf(stderr, " input[%zu]: %s -> %s\n", i, wire.vespa_inputs[i].to_spec().c_str(), wire.onnx_inputs[i].type_as_string().c_str());
+ }
+ REQUIRE_EQ(wire.onnx_outputs.size(), wire.vespa_outputs.size());
+ for (size_t i = 0; i < wire.onnx_outputs.size(); ++i) {
+ fprintf(stderr, " output[%zu]: %s -> %s\n", i, wire.onnx_outputs[i].type_as_string().c_str(), wire.vespa_outputs[i].to_spec().c_str());
+ }
+}
+
+struct MakeInputType {
+ Options &opts;
+ std::map<vespalib::string,int> symbolic_sizes;
+ MakeInputType(Options &opts_in) : opts(opts_in), symbolic_sizes() {}
+ ValueType operator()(const Onnx::TensorInfo &info) {
+ int d = 0;
+ std::vector<ValueType::Dimension> dim_list;
+ for (const auto &dim: info.dimensions) {
+ REQUIRE(d <= 9);
+ size_t size = 0;
+ if (dim.is_known()) {
+ size = dim.value;
+ } else if (dim.is_symbolic()) {
+ size = symbolic_sizes[dim.name];
+ if (size == 0) {
+ size = opts.get_size_opt(fmt("symbolic size '%s'", dim.name.c_str()), "1");
+ symbolic_sizes[dim.name] = size;
+ }
+ } else {
+ size = opts.get_size_opt(fmt("size of input '%s' dimension %d", info.name.c_str(), d), "1");
+ }
+ dim_list.emplace_back(fmt("d%d", d), size);
+ ++d;
+ }
+ return ValueType::make_type(Onnx::WirePlanner::best_cell_type(info.elements), std::move(dim_list));
+ }
+};
+
+Onnx::WireInfo make_plan(Options &opts, const Onnx &model) {
+ Onnx::WirePlanner planner;
+ MakeInputType make_input_type(opts);
+ for (const auto &input: model.inputs()) {
+ auto type = make_input_type(input);
+ REQUIRE(planner.bind_input_type(type, input));
+ }
+ for (const auto &output: model.outputs()) {
+ REQUIRE(!planner.make_output_type(output).is_error());
+ }
+ return planner.get_wire_info(model);
+}
+
+struct MyEval {
+ Onnx::EvalContext context;
+ std::vector<Value::UP> inputs;
+ MyEval(const Onnx &model, const Onnx::WireInfo &wire) : context(model, wire), inputs() {
+ for (const auto &input_type: wire.vespa_inputs) {
+ TensorSpec spec(input_type.to_spec());
+ inputs.push_back(value_from_spec(spec, FastValueBuilderFactory::get()));
+ }
+ }
+ void eval() {
+ for (size_t i = 0; i < inputs.size(); ++i) {
+ context.bind_param(i, *inputs[i]);
+ }
+ context.eval();
+ }
+};
+
+int usage(const char *self) {
+ fprintf(stderr, "usage: %s <onnx-model> [options...]\n", self);
+ fprintf(stderr, " load onnx model and report memory usage\n");
+ fprintf(stderr, " options are used to specify unknown values, like dimension sizes\n");
+ fprintf(stderr, " options are accepted in the order in which they are needed\n");
+ fprintf(stderr, " tip: run without options first, to see which you need\n");
+ return 1;
+}
+
+int main(int argc, char **argv) {
+ if (argc < 2) {
+ return usage(argv[0]);
+ }
+ Options opts;
+ for (int i = 2; i < argc; ++i) {
+ opts.add_option(argv[i]);
+ }
+ Onnx::Optimize optimize = opts.get_bool_opt("optimize model", "true")
+ ? Onnx::Optimize::ENABLE : Onnx::Optimize::DISABLE;
+ report_memory_usage("before loading model");
+ Onnx model(argv[1], optimize);
+ report_memory_usage("after loading model");
+ dump_model_info(model);
+ auto wire_info = make_plan(opts, model);
+ dump_wire_info(wire_info);
+ std::vector<std::unique_ptr<MyEval>> eval_list;
+ size_t max_concurrent = opts.get_size_opt("max concurrent evaluations", "1");
+ report_memory_usage("no evaluations yet");
+ for (size_t i = 1; i <= max_concurrent; ++i) {
+ eval_list.push_back(std::make_unique<MyEval>(model, wire_info));
+ eval_list.back()->eval();
+ if ((i % 8) == 0) {
+ report_memory_usage(fmt("concurrent evaluations: %zu", i));
+ }
+ }
+ if ((max_concurrent % 8) != 0) {
+ report_memory_usage(fmt("concurrent evaluations: %zu", max_concurrent));
+ }
+ eval_list.resize(1);
+ double min_time_s = vespalib::BenchmarkTimer::benchmark([&e = *eval_list.back()](){ e.eval(); }, 10.0);
+ fprintf(stderr, "estimated model evaluation time: %g ms\n", min_time_s * 1000.0);
+ return 0;
+}
diff --git a/eval/src/tests/tensor/onnx_wrapper/onnx_wrapper_test.cpp b/eval/src/tests/tensor/onnx_wrapper/onnx_wrapper_test.cpp
index 54f958f8111..6b45172ef80 100644
--- a/eval/src/tests/tensor/onnx_wrapper/onnx_wrapper_test.cpp
+++ b/eval/src/tests/tensor/onnx_wrapper/onnx_wrapper_test.cpp
@@ -3,6 +3,7 @@
#include <vespa/eval/eval/tensor_spec.h>
#include <vespa/eval/eval/int8float.h>
#include <vespa/eval/onnx/onnx_wrapper.h>
+#include <vespa/eval/onnx/onnx_model_cache.h>
#include <vespa/vespalib/util/bfloat16.h>
#include <vespa/vespalib/util/stringfmt.h>
#include <vespa/vespalib/gtest/gtest.h>
@@ -443,4 +444,23 @@ TEST(OnnxTest, default_allocator_type) {
fprintf(stderr, "default allocator type: %d\n", int(res));
}
+TEST(OnnxModelCacheTest, share_and_evict_onnx_models) {
+ {
+ auto simple1 = OnnxModelCache::load(simple_model);
+ auto simple2 = OnnxModelCache::load(simple_model);
+ auto dynamic1 = OnnxModelCache::load(dynamic_model);
+ auto dynamic2 = OnnxModelCache::load(dynamic_model);
+ auto dynamic3 = OnnxModelCache::load(dynamic_model);
+ EXPECT_EQ(simple1->get().inputs().size(), 3);
+ EXPECT_EQ(dynamic1->get().inputs().size(), 3);
+ EXPECT_EQ(&(simple1->get()), &(simple2->get()));
+ EXPECT_EQ(&(dynamic1->get()), &(dynamic2->get()));
+ EXPECT_EQ(&(dynamic2->get()), &(dynamic3->get()));
+ EXPECT_EQ(OnnxModelCache::num_cached(), 2);
+ EXPECT_EQ(OnnxModelCache::count_refs(), 5);
+ }
+ EXPECT_EQ(OnnxModelCache::num_cached(), 0);
+ EXPECT_EQ(OnnxModelCache::count_refs(), 0);
+}
+
GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/eval/src/vespa/eval/onnx/CMakeLists.txt b/eval/src/vespa/eval/onnx/CMakeLists.txt
index 9b18557c036..40444936d02 100644
--- a/eval/src/vespa/eval/onnx/CMakeLists.txt
+++ b/eval/src/vespa/eval/onnx/CMakeLists.txt
@@ -2,5 +2,6 @@
vespa_add_library(eval_onnx OBJECT
SOURCES
+ onnx_model_cache.cpp
onnx_wrapper.cpp
)
diff --git a/eval/src/vespa/eval/onnx/onnx_model_cache.cpp b/eval/src/vespa/eval/onnx/onnx_model_cache.cpp
new file mode 100644
index 00000000000..01d5fdd9c84
--- /dev/null
+++ b/eval/src/vespa/eval/onnx/onnx_model_cache.cpp
@@ -0,0 +1,51 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "onnx_model_cache.h"
+
+namespace vespalib::eval {
+
+std::mutex OnnxModelCache::_lock{};
+OnnxModelCache::Map OnnxModelCache::_cached{};
+
+void
+OnnxModelCache::release(Map::iterator entry)
+{
+ std::lock_guard<std::mutex> guard(_lock);
+ if (--(entry->second.num_refs) == 0) {
+ _cached.erase(entry);
+ }
+}
+
+OnnxModelCache::Token::UP
+OnnxModelCache::load(const vespalib::string &model_file)
+{
+ std::lock_guard<std::mutex> guard(_lock);
+ auto pos = _cached.find(model_file);
+ if (pos == _cached.end()) {
+ auto model = std::make_unique<Onnx>(model_file, Onnx::Optimize::ENABLE);
+ auto res = _cached.emplace(model_file, std::move(model));
+ assert(res.second);
+ pos = res.first;
+ }
+ return std::make_unique<Token>(pos, ctor_tag());
+}
+
+size_t
+OnnxModelCache::num_cached()
+{
+ std::lock_guard<std::mutex> guard(_lock);
+ return _cached.size();
+}
+
+size_t
+OnnxModelCache::count_refs()
+{
+ std::lock_guard<std::mutex> guard(_lock);
+ size_t refs = 0;
+ for (const auto &entry: _cached) {
+ refs += entry.second.num_refs;
+ }
+ return refs;
+}
+
+}
diff --git a/eval/src/vespa/eval/onnx/onnx_model_cache.h b/eval/src/vespa/eval/onnx/onnx_model_cache.h
new file mode 100644
index 00000000000..35d5fefa061
--- /dev/null
+++ b/eval/src/vespa/eval/onnx/onnx_model_cache.h
@@ -0,0 +1,58 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "onnx_wrapper.h"
+#include <vespa/vespalib/stllike/string.h>
+#include <memory>
+#include <mutex>
+#include <map>
+
+namespace vespalib::eval {
+
+/**
+ * Cache used to share loaded onnx models between users. The cache
+ * itself will not keep anything alive, but will let you find loaded
+ * models that are currently in use by others.
+ **/
+class OnnxModelCache
+{
+private:
+ struct ctor_tag {};
+ using Key = vespalib::string;
+ struct Value {
+ size_t num_refs;
+ std::unique_ptr<Onnx> model;
+ Value(std::unique_ptr<Onnx> model_in) : num_refs(0), model(std::move(model_in)) {}
+ const Onnx &get() { return *model; }
+ };
+ using Map = std::map<Key,Value>;
+ static std::mutex _lock;
+ static Map _cached;
+
+ static void release(Map::iterator entry);
+
+public:
+ class Token
+ {
+ private:
+ OnnxModelCache::Map::iterator _entry;
+ public:
+ Token(Token &&) = delete;
+ Token(const Token &) = delete;
+ Token &operator=(Token &&) = delete;
+ Token &operator=(const Token &) = delete;
+ using UP = std::unique_ptr<Token>;
+ explicit Token(OnnxModelCache::Map::iterator entry, ctor_tag) : _entry(entry) {
+ ++_entry->second.num_refs;
+ }
+ const Onnx &get() const { return _entry->second.get(); }
+ ~Token() { OnnxModelCache::release(_entry); }
+ };
+
+ static Token::UP load(const vespalib::string &model_file);
+ static size_t num_cached();
+ static size_t count_refs();
+};
+
+}
diff --git a/eval/src/vespa/eval/onnx/onnx_wrapper.cpp b/eval/src/vespa/eval/onnx/onnx_wrapper.cpp
index f848c421c9d..e2528fcb1c3 100644
--- a/eval/src/vespa/eval/onnx/onnx_wrapper.cpp
+++ b/eval/src/vespa/eval/onnx/onnx_wrapper.cpp
@@ -117,23 +117,6 @@ auto convert_optimize(Onnx::Optimize optimize) {
abort();
}
-CellType to_cell_type(Onnx::ElementType type) {
- switch (type) {
- case Onnx::ElementType::INT8: return CellType::INT8;
- case Onnx::ElementType::BFLOAT16: return CellType::BFLOAT16;
- case Onnx::ElementType::UINT8: [[fallthrough]];
- case Onnx::ElementType::INT16: [[fallthrough]];
- case Onnx::ElementType::UINT16: [[fallthrough]];
- case Onnx::ElementType::FLOAT: return CellType::FLOAT;
- case Onnx::ElementType::INT32: [[fallthrough]];
- case Onnx::ElementType::INT64: [[fallthrough]];
- case Onnx::ElementType::UINT32: [[fallthrough]];
- case Onnx::ElementType::UINT64: [[fallthrough]];
- case Onnx::ElementType::DOUBLE: return CellType::DOUBLE;
- }
- abort();
-}
-
Onnx::ElementType make_element_type(ONNXTensorElementDataType element_type) {
switch (element_type) {
case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT8: return Onnx::ElementType::INT8;
@@ -245,12 +228,41 @@ Onnx::TensorInfo::type_as_string() const
Onnx::TensorInfo::~TensorInfo() = default;
+vespalib::string
+Onnx::TensorType::type_as_string() const
+{
+ vespalib::string res = type_name(elements);
+ for (const auto &size: dimensions) {
+ res += DimSize(size).as_string();
+ }
+ return res;
+}
+
//-----------------------------------------------------------------------------
Onnx::WireInfo::~WireInfo() = default;
Onnx::WirePlanner::~WirePlanner() = default;
+CellType
+Onnx::WirePlanner::best_cell_type(Onnx::ElementType type)
+{
+ switch (type) {
+ case Onnx::ElementType::INT8: return CellType::INT8;
+ case Onnx::ElementType::BFLOAT16: return CellType::BFLOAT16;
+ case Onnx::ElementType::UINT8: [[fallthrough]];
+ case Onnx::ElementType::INT16: [[fallthrough]];
+ case Onnx::ElementType::UINT16: [[fallthrough]];
+ case Onnx::ElementType::FLOAT: return CellType::FLOAT;
+ case Onnx::ElementType::INT32: [[fallthrough]];
+ case Onnx::ElementType::INT64: [[fallthrough]];
+ case Onnx::ElementType::UINT32: [[fallthrough]];
+ case Onnx::ElementType::UINT64: [[fallthrough]];
+ case Onnx::ElementType::DOUBLE: return CellType::DOUBLE;
+ }
+ abort();
+}
+
bool
Onnx::WirePlanner::bind_input_type(const ValueType &vespa_in, const TensorInfo &onnx_in)
{
@@ -309,7 +321,7 @@ Onnx::WirePlanner::make_output_type(const TensorInfo &onnx_out) const
}
dim_list.emplace_back(fmt("d%zu", dim_list.size()), dim_size);
}
- return ValueType::make_type(to_cell_type(elements), std::move(dim_list));
+ return ValueType::make_type(best_cell_type(elements), std::move(dim_list));
}
Onnx::WireInfo
diff --git a/eval/src/vespa/eval/onnx/onnx_wrapper.h b/eval/src/vespa/eval/onnx/onnx_wrapper.h
index 507d75efbd9..9392536eae7 100644
--- a/eval/src/vespa/eval/onnx/onnx_wrapper.h
+++ b/eval/src/vespa/eval/onnx/onnx_wrapper.h
@@ -68,6 +68,7 @@ public:
std::vector<int64_t> dimensions;
TensorType(ElementType elements_in, std::vector<int64_t> dimensions_in) noexcept
: elements(elements_in), dimensions(std::move(dimensions_in)) {}
+ vespalib::string type_as_string() const;
};
// how the model should be wired with inputs/outputs
@@ -88,6 +89,7 @@ public:
public:
WirePlanner() : _input_types(), _symbolic_sizes(), _bound_unknown_sizes() {}
~WirePlanner();
+ static CellType best_cell_type(Onnx::ElementType type);
bool bind_input_type(const ValueType &vespa_in, const TensorInfo &onnx_in);
ValueType make_output_type(const TensorInfo &onnx_out) const;
WireInfo get_wire_info(const Onnx &model) const;
diff --git a/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/Downloads.java b/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/Downloads.java
new file mode 100644
index 00000000000..eb0976edc40
--- /dev/null
+++ b/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/Downloads.java
@@ -0,0 +1,141 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.filedistribution;
+
+import com.yahoo.config.FileReference;
+
+import java.io.File;
+import java.time.Instant;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.Map;
+import java.util.Optional;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+import java.util.stream.Collectors;
+
+/**
+ * Keeps track of downloads and download status
+ *
+ * @author hmusum
+ */
+public class Downloads {
+
+ private static final Logger log = Logger.getLogger(Downloads.class.getName());
+
+ private final Map<FileReference, FileReferenceDownload> downloads = new ConcurrentHashMap<>();
+ private final DownloadStatuses downloadStatuses = new DownloadStatuses();
+
+ public DownloadStatuses downloadStatuses() { return downloadStatuses; }
+
+ void setDownloadStatus(FileReference fileReference, double completeness) {
+ Optional<Downloads.DownloadStatus> downloadStatus = downloadStatuses.get(fileReference);
+ if (downloadStatus.isPresent())
+ downloadStatus.get().setProgress(completeness);
+ else
+ downloadStatuses.add(fileReference, completeness);
+ }
+
+ void completedDownloading(FileReference fileReference, File file) {
+ Optional<FileReferenceDownload> download = get(fileReference);
+ if (download.isPresent()) {
+ downloadStatuses().get(fileReference).ifPresent(Downloads.DownloadStatus::finished);
+ downloads.remove(fileReference);
+ download.get().future().complete(Optional.of(file));
+ } else {
+ log.log(Level.FINE, () -> "Received '" + fileReference + "', which was not requested. Can be ignored if happening during upgrades/restarts");
+ }
+ }
+
+ void add(FileReferenceDownload fileReferenceDownload) {
+ downloads.put(fileReferenceDownload.fileReference(), fileReferenceDownload);
+ downloadStatuses.add(fileReferenceDownload.fileReference());
+ }
+
+ void remove(FileReference fileReference) {
+ downloadStatuses.get(fileReference).ifPresent(d -> d.setProgress(0.0));
+ downloads.remove(fileReference);
+ }
+
+ double downloadStatus(FileReference fileReference) {
+ double status = 0.0;
+ Optional<Downloads.DownloadStatus> downloadStatus = downloadStatuses.get(fileReference);
+ if (downloadStatus.isPresent()) {
+ status = downloadStatus.get().progress();
+ }
+ return status;
+ }
+
+ Map<FileReference, Double> downloadStatus() {
+ return downloadStatuses.all().values().stream().collect(Collectors.toMap(Downloads.DownloadStatus::fileReference, Downloads.DownloadStatus::progress));
+ }
+
+ Optional<FileReferenceDownload> get(FileReference fileReference) {
+ return Optional.ofNullable(downloads.get(fileReference));
+ }
+
+ /* Status for ongoing and completed downloads, keeps at most status for 100 last downloads */
+ static class DownloadStatuses {
+
+ private static final int maxEntries = 100;
+
+ private final Map<FileReference, DownloadStatus> downloadStatus = new ConcurrentHashMap<>();
+
+ void add(FileReference fileReference) {
+ add(fileReference, 0.0);
+ }
+
+ void add(FileReference fileReference, double progress) {
+ DownloadStatus ds = new DownloadStatus(fileReference);
+ ds.setProgress(progress);
+ downloadStatus.put(fileReference, ds);
+ if (downloadStatus.size() > maxEntries) {
+ Map.Entry<FileReference, DownloadStatus> oldest =
+ Collections.min(downloadStatus.entrySet(), Comparator.comparing(e -> e.getValue().created));
+ downloadStatus.remove(oldest.getKey());
+ }
+ }
+
+ Optional<DownloadStatus> get(FileReference fileReference) {
+ return Optional.ofNullable(downloadStatus.get(fileReference));
+ }
+
+ Map<FileReference, DownloadStatus> all() {
+ return Map.copyOf(downloadStatus);
+ }
+
+ }
+
+ static class DownloadStatus {
+ private final FileReference fileReference;
+ private double progress; // between 0 and 1
+ private final Instant created;
+
+ DownloadStatus(FileReference fileReference) {
+ this.fileReference = fileReference;
+ this.progress = 0.0;
+ this.created = Instant.now();
+ }
+
+ public FileReference fileReference() {
+ return fileReference;
+ }
+
+ public double progress() {
+ return progress;
+ }
+
+ public void setProgress(double progress) {
+ this.progress = progress;
+ }
+
+ public void finished() {
+ setProgress(1.0);
+ }
+
+ public Instant created() {
+ return created;
+ }
+ }
+
+}
diff --git a/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileDownloader.java b/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileDownloader.java
index b1e43e4cee1..292674497ed 100644
--- a/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileDownloader.java
+++ b/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileDownloader.java
@@ -30,23 +30,30 @@ public class FileDownloader implements AutoCloseable {
private final static Logger log = Logger.getLogger(FileDownloader.class.getName());
public static File defaultDownloadDirectory = new File(Defaults.getDefaults().underVespaHome("var/db/vespa/filedistribution"));
+ private final ConnectionPool connectionPool;
private final File downloadDirectory;
private final Duration timeout;
private final FileReferenceDownloader fileReferenceDownloader;
+ private final Downloads downloads;
public FileDownloader(ConnectionPool connectionPool) {
- this(connectionPool, defaultDownloadDirectory );
+ this(connectionPool, defaultDownloadDirectory, new Downloads());
}
- public FileDownloader(ConnectionPool connectionPool, File downloadDirectory) {
+ public FileDownloader(ConnectionPool connectionPool, File downloadDirectory, Downloads downloads) {
// TODO: Reduce timeout even more, timeout is so long that we might get starvation
- this(connectionPool, downloadDirectory, downloadDirectory, Duration.ofMinutes(5), Duration.ofSeconds(10));
+ this(connectionPool, downloadDirectory, downloads, Duration.ofMinutes(5), Duration.ofSeconds(10));
}
- public FileDownloader(ConnectionPool connectionPool, File downloadDirectory, File tmpDirectory, Duration timeout, Duration sleepBetweenRetries) {
+ public FileDownloader(ConnectionPool connectionPool, File downloadDirectory, Downloads downloads,
+ Duration timeout, Duration sleepBetweenRetries) {
+ this.connectionPool = connectionPool;
this.downloadDirectory = downloadDirectory;
this.timeout = timeout;
- this.fileReferenceDownloader = new FileReferenceDownloader(downloadDirectory, tmpDirectory, connectionPool, timeout, sleepBetweenRetries);
+ // Needed to receive RPC calls receiveFile* from server after asking for files
+ new FileReceiver(connectionPool.getSupervisor(), downloads, downloadDirectory);
+ this.fileReferenceDownloader = new FileReferenceDownloader(connectionPool, downloads, timeout, sleepBetweenRetries);
+ this.downloads = downloads;
}
public Optional<File> getFile(FileReference fileReference) {
@@ -74,39 +81,39 @@ public class FileDownloader implements AutoCloseable {
: download(fileReferenceDownload);
}
- double downloadStatus(FileReference fileReference) {
- return fileReferenceDownloader.downloadStatus(fileReference.value());
- }
+ public Map<FileReference, Double> downloadStatus() { return downloads.downloadStatus(); }
- public Map<FileReference, Double> downloadStatus() {
- return fileReferenceDownloader.downloadStatus();
- }
+ public ConnectionPool connectionPool() { return connectionPool; }
File downloadDirectory() {
return downloadDirectory;
}
- // Files are moved atomically, so if file reference exists and is accessible we can use it
private Optional<File> getFileFromFileSystem(FileReference fileReference) {
File[] files = new File(downloadDirectory, fileReference.value()).listFiles();
- if (downloadDirectory.exists() && downloadDirectory.isDirectory() && files != null && files.length > 0) {
- File file = files[0];
- if (!file.exists()) {
- throw new RuntimeException("File reference '" + fileReference.value() + "' does not exist");
- } else if (!file.canRead()) {
- throw new RuntimeException("File reference '" + fileReference.value() + "'exists, but unable to read it");
- } else {
- log.log(Level.FINE, () -> "File reference '" + fileReference.value() + "' found: " + file.getAbsolutePath());
- fileReferenceDownloader.setDownloadStatus(fileReference, 1.0);
- return Optional.of(file);
- }
+ if (files == null) return Optional.empty();
+ if (files.length == 0) return Optional.empty();
+ if (files.length > 1) throw new RuntimeException("More than one file reference found for " + fileReference);
+
+ File file = files[0];
+ if (!file.exists()) {
+ throw new RuntimeException("File reference '" + fileReference.value() + "' does not exist");
+ } else if (!file.canRead()) {
+ throw new RuntimeException("File reference '" + fileReference.value() + "' exists, but unable to read it");
+ } else {
+ log.log(Level.FINE, () -> "File reference '" + fileReference.value() + "' found: " + file.getAbsolutePath());
+ downloads.setDownloadStatus(fileReference, 1.0);
+ return Optional.of(file);
}
- return Optional.empty();
}
- private boolean alreadyDownloaded(FileReference fileReference) {
+ boolean isDownloading(FileReference fileReference) {
+ return downloads.get(fileReference).isPresent();
+ }
+
+ private boolean alreadyDownloaded(FileReferenceDownload fileReferenceDownload) {
try {
- return (getFileFromFileSystem(fileReference).isPresent());
+ return getFileFromFileSystem(fileReferenceDownload.fileReference()).isPresent();
} catch (RuntimeException e) {
return false;
}
@@ -114,8 +121,7 @@ public class FileDownloader implements AutoCloseable {
/** Start a download, don't wait for result */
public void downloadIfNeeded(FileReferenceDownload fileReferenceDownload) {
- FileReference fileReference = fileReferenceDownload.fileReference();
- if (alreadyDownloaded(fileReference)) return;
+ if (alreadyDownloaded(fileReferenceDownload)) return;
download(fileReferenceDownload);
}
@@ -125,11 +131,8 @@ public class FileDownloader implements AutoCloseable {
return fileReferenceDownloader.download(fileReferenceDownload);
}
- public FileReferenceDownloader fileReferenceDownloader() {
- return fileReferenceDownloader;
- }
-
public void close() {
fileReferenceDownloader.close();
}
+
}
diff --git a/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReceiver.java b/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReceiver.java
index 1bc44e0bed2..e1a8cf92513 100644
--- a/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReceiver.java
+++ b/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReceiver.java
@@ -8,7 +8,6 @@ import com.yahoo.jrt.Int32Value;
import com.yahoo.jrt.Method;
import com.yahoo.jrt.Request;
import com.yahoo.jrt.Supervisor;
-import java.util.logging.Level;
import net.jpountz.xxhash.StreamingXXHash64;
import net.jpountz.xxhash.XXHashFactory;
@@ -22,6 +21,7 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
+import java.util.logging.Level;
import java.util.logging.Logger;
/**
@@ -39,11 +39,8 @@ public class FileReceiver {
public final static String RECEIVE_EOF_METHOD = "filedistribution.receiveFileEof";
private final Supervisor supervisor;
- private final FileReferenceDownloader downloader;
+ private final Downloads downloads;
private final File downloadDirectory;
- // Should be on same partition as downloadDirectory to make sure moving files from tmpDirectory
- // to downloadDirectory is atomic
- private final File tmpDirectory;
private final AtomicInteger nextSessionId = new AtomicInteger(1);
private final Map<Integer, Session> sessions = new HashMap<>();
@@ -61,7 +58,7 @@ public class FileReceiver {
private final File tmpDir;
private final File inprogressFile;
- Session(File downloadDirectory, File tmpDirectory, int sessionId, FileReference reference,
+ Session(File downloadDirectory, int sessionId, FileReference reference,
FileReferenceData.Type fileType, String fileName, long fileSize)
{
this.hasher = XXHashFactory.fastestInstance().newStreamingHash64(0);
@@ -74,12 +71,12 @@ public class FileReceiver {
currentPartId = 0;
currentHash = 0;
fileReferenceDir = new File(downloadDirectory, reference.value());
- this.tmpDir = tmpDirectory;
+ this.tmpDir = downloadDirectory;
try {
- inprogressFile = Files.createTempFile(tmpDirectory.toPath(), fileName, ".inprogress").toFile();
+ inprogressFile = Files.createTempFile(tmpDir.toPath(), fileName, ".inprogress").toFile();
} catch (IOException e) {
- String msg = "Failed creating temp file for inprogress file for " + fileName + " in '" + tmpDirectory.toPath() + "': ";
+ String msg = "Failed creating temp file for inprogress file for " + fileName + " in '" + tmpDir.toPath() + "': ";
log.log(Level.SEVERE, msg + e.getMessage(), e);
throw new RuntimeException(msg, e);
}
@@ -149,11 +146,10 @@ public class FileReceiver {
}
}
- FileReceiver(Supervisor supervisor, FileReferenceDownloader downloader, File downloadDirectory, File tmpDirectory) {
+ FileReceiver(Supervisor supervisor, Downloads downloads, File downloadDirectory) {
this.supervisor = supervisor;
- this.downloader = downloader;
+ this.downloads = downloads;
this.downloadDirectory = downloadDirectory;
- this.tmpDirectory = tmpDirectory;
registerMethods();
}
@@ -231,7 +227,7 @@ public class FileReceiver {
log.severe("Session id " + sessionId + " already exist, impossible. Request from(" + req.target() + ")");
} else {
try {
- sessions.put(sessionId, new Session(downloadDirectory, tmpDirectory, sessionId, reference,
+ sessions.put(sessionId, new Session(downloadDirectory, sessionId, reference,
FileReferenceData.Type.valueOf(type),fileName, fileSize));
} catch (Exception e) {
retval = 1;
@@ -260,7 +256,7 @@ public class FileReceiver {
}
double completeness = (double) session.currentFileSize / (double) session.fileSize;
log.log(Level.FINEST, () -> String.format("%.1f percent of '%s' downloaded", completeness * 100, reference.value()));
- downloader.setDownloadStatus(reference, completeness);
+ downloads.setDownloadStatus(reference, completeness);
}
req.returnValues().add(new Int32Value(retval));
}
@@ -273,7 +269,7 @@ public class FileReceiver {
Session session = getSession(sessionId);
int retval = verifySession(session, sessionId, reference);
File file = session.close(xxhash);
- downloader.completedDownloading(reference, file);
+ downloads.completedDownloading(reference, file);
synchronized (sessions) {
sessions.remove(sessionId);
}
diff --git a/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReferenceDownloader.java b/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReferenceDownloader.java
index 0ce69c182ce..01240357fbe 100644
--- a/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReferenceDownloader.java
+++ b/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReferenceDownloader.java
@@ -12,23 +12,16 @@ import com.yahoo.vespa.config.ConnectionPool;
import java.io.File;
import java.time.Duration;
import java.time.Instant;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.Map;
import java.util.Optional;
-import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.logging.Level;
import java.util.logging.Logger;
-import java.util.stream.Collectors;
/**
* Downloads file reference using rpc requests to config server and keeps track of files being downloaded
- * <p>
- * Some methods are synchronized to make sure access to downloads is atomic
*
* @author hmusum
*/
@@ -40,20 +33,19 @@ public class FileReferenceDownloader {
Executors.newFixedThreadPool(Math.max(8, Runtime.getRuntime().availableProcessors()),
new DaemonThreadFactory("filereference downloader"));
private final ConnectionPool connectionPool;
- /* Ongoing downloads */
- private final Downloads downloads = new Downloads();
- /* Status for ongoing and finished downloads */
- private final DownloadStatuses downloadStatuses = new DownloadStatuses();
+ private final Downloads downloads;
private final Duration downloadTimeout;
private final Duration sleepBetweenRetries;
private final Duration rpcTimeout;
- FileReferenceDownloader(File downloadDirectory, File tmpDirectory, ConnectionPool connectionPool, Duration timeout, Duration sleepBetweenRetries) {
+ FileReferenceDownloader(ConnectionPool connectionPool,
+ Downloads downloads,
+ Duration timeout,
+ Duration sleepBetweenRetries) {
this.connectionPool = connectionPool;
+ this.downloads = downloads;
this.downloadTimeout = timeout;
this.sleepBetweenRetries = sleepBetweenRetries;
- // Needed to receive RPC calls receiveFile* from server after asking for files
- new FileReceiver(connectionPool.getSupervisor(), this, downloadDirectory, tmpDirectory);
String timeoutString = System.getenv("VESPA_CONFIGPROXY_FILEDOWNLOAD_RPC_TIMEOUT");
this.rpcTimeout = Duration.ofSeconds(timeoutString == null ? 30 : Integer.parseInt(timeoutString));
}
@@ -90,24 +82,11 @@ public class FileReferenceDownloader {
log.log(Level.FINE, () -> "Will download file reference '" + fileReference.value() + "' with timeout " + downloadTimeout);
downloads.add(fileReferenceDownload);
- downloadStatuses.add(fileReference);
downloadExecutor.submit(() -> startDownload(fileReferenceDownload));
return fileReferenceDownload.future();
}
- void completedDownloading(FileReference fileReference, File file) {
- Optional<FileReferenceDownload> download = downloads.get(fileReference);
- if (download.isPresent()) {
- downloadStatuses.get(fileReference).ifPresent(DownloadStatus::finished);
- downloads.remove(fileReference);
- download.get().future().complete(Optional.of(file));
- } else {
- log.log(Level.FINE, () -> "Received '" + fileReference + "', which was not requested. Can be ignored if happening during upgrades/restarts");
- }
- }
-
void failedDownloading(FileReference fileReference) {
- downloadStatuses.get(fileReference).ifPresent(d -> d.setProgress(0.0));
downloads.remove(fileReference);
}
@@ -139,10 +118,6 @@ public class FileReferenceDownloader {
}
}
- boolean isDownloading(FileReference fileReference) {
- return downloads.get(fileReference).isPresent();
- }
-
private boolean validateResponse(Request request) {
if (request.isError()) {
return false;
@@ -155,31 +130,6 @@ public class FileReferenceDownloader {
return true;
}
- double downloadStatus(String file) {
- double status = 0.0;
- Optional<DownloadStatus> downloadStatus = downloadStatuses.get(new FileReference(file));
- if (downloadStatus.isPresent()) {
- status = downloadStatus.get().progress();
- }
- return status;
- }
-
- void setDownloadStatus(FileReference fileReference, double completeness) {
- Optional<DownloadStatus> downloadStatus = downloadStatuses.get(fileReference);
- if (downloadStatus.isPresent())
- downloadStatus.get().setProgress(completeness);
- else
- downloadStatuses.add(fileReference, completeness);
- }
-
- Map<FileReference, Double> downloadStatus() {
- return downloadStatuses.all().values().stream().collect(Collectors.toMap(DownloadStatus::fileReference, DownloadStatus::progress));
- }
-
- public ConnectionPool connectionPool() {
- return connectionPool;
- }
-
public void close() {
downloadExecutor.shutdown();
try {
@@ -189,84 +139,4 @@ public class FileReferenceDownloader {
}
}
- private static class Downloads {
- private final Map<FileReference, FileReferenceDownload> downloads = new ConcurrentHashMap<>();
-
- void add(FileReferenceDownload fileReferenceDownload) {
- downloads.put(fileReferenceDownload.fileReference(), fileReferenceDownload);
- }
-
- void remove(FileReference fileReference) {
- downloads.remove(fileReference);
- }
-
- Optional<FileReferenceDownload> get(FileReference fileReference) {
- return Optional.ofNullable(downloads.get(fileReference));
- }
- }
-
- private static class DownloadStatus {
- private final FileReference fileReference;
- private double progress; // between 0 and 1
- private final Instant created;
-
- DownloadStatus(FileReference fileReference) {
- this.fileReference = fileReference;
- this.progress = 0.0;
- this.created = Instant.now();
- }
-
- public FileReference fileReference() {
- return fileReference;
- }
-
- public double progress() {
- return progress;
- }
-
- public void setProgress(double progress) {
- this.progress = progress;
- }
-
- public void finished() {
- setProgress(1.0);
- }
-
- public Instant created() {
- return created;
- }
- }
-
- /* Status for ongoing and completed downloads, keeps at most status for 100 last downloads */
- private static class DownloadStatuses {
-
- private static final int maxEntries = 100;
-
- private final Map<FileReference, DownloadStatus> downloadStatus = new ConcurrentHashMap<>();
-
- void add(FileReference fileReference) {
- add(fileReference, 0.0);
- }
-
- void add(FileReference fileReference, double progress) {
- DownloadStatus ds = new DownloadStatus(fileReference);
- ds.setProgress(progress);
- downloadStatus.put(fileReference, ds);
- if (downloadStatus.size() > maxEntries) {
- Map.Entry<FileReference, DownloadStatus> oldest =
- Collections.min(downloadStatus.entrySet(), Comparator.comparing(e -> e.getValue().created));
- downloadStatus.remove(oldest.getKey());
- }
- }
-
- Optional<DownloadStatus> get(FileReference fileReference) {
- return Optional.ofNullable(downloadStatus.get(fileReference));
- }
-
- Map<FileReference, DownloadStatus> all() {
- return Map.copyOf(downloadStatus);
- }
-
- }
-
}
diff --git a/filedistribution/src/test/java/com/yahoo/vespa/filedistribution/FileDownloaderTest.java b/filedistribution/src/test/java/com/yahoo/vespa/filedistribution/FileDownloaderTest.java
index 61575b650ce..d7700467494 100644
--- a/filedistribution/src/test/java/com/yahoo/vespa/filedistribution/FileDownloaderTest.java
+++ b/filedistribution/src/test/java/com/yahoo/vespa/filedistribution/FileDownloaderTest.java
@@ -1,5 +1,4 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.filedistribution;
import com.yahoo.config.FileReference;
@@ -42,17 +41,17 @@ public class FileDownloaderTest {
private static final Duration sleepBetweenRetries = Duration.ofMillis(10);
private MockConnection connection;
+ private Downloads downloads;
private FileDownloader fileDownloader;
private File downloadDir;
- private File tempDir;
@Before
public void setup() {
try {
downloadDir = Files.createTempDirectory("filedistribution").toFile();
- tempDir = Files.createTempDirectory("download").toFile();
connection = new MockConnection();
- fileDownloader = new FileDownloader(connection, downloadDir, tempDir, Duration.ofSeconds(1), sleepBetweenRetries);
+ downloads = new Downloads();
+ fileDownloader = new FileDownloader(connection, downloadDir, downloads, Duration.ofSeconds(1), sleepBetweenRetries);
} catch (IOException e) {
e.printStackTrace();
fail(e.getMessage());
@@ -85,7 +84,7 @@ public class FileDownloaderTest {
assertEquals("content", IOUtils.readFile(pathToFile.get()));
// Verify download status when downloaded
- assertDownloadStatus(fileDownloader, fileReference, 1.0);
+ assertDownloadStatus(fileReference, 1.0);
}
{
@@ -98,7 +97,7 @@ public class FileDownloaderTest {
assertFalse(fileReferenceFullPath.getAbsolutePath(), fileDownloader.getFile(fileReference).isPresent());
// Verify download status when unable to download
- assertDownloadStatus(fileDownloader, fileReference, 0.0);
+ assertDownloadStatus(fileReference, 0.0);
}
{
@@ -109,7 +108,7 @@ public class FileDownloaderTest {
assertFalse(fileReferenceFullPath.getAbsolutePath(), fileDownloader.getFile(fileReference).isPresent());
// Verify download status
- assertDownloadStatus(fileDownloader, fileReference, 0.0);
+ assertDownloadStatus(fileReference, 0.0);
// Receives fileReference, should return and make it available to caller
String filename = "abc.jar";
@@ -122,7 +121,7 @@ public class FileDownloaderTest {
assertEquals("some other content", IOUtils.readFile(downloadedFile.get()));
// Verify download status when downloaded
- assertDownloadStatus(fileDownloader, fileReference, 1.0);
+ assertDownloadStatus(fileReference, 1.0);
}
{
@@ -133,7 +132,7 @@ public class FileDownloaderTest {
assertFalse(fileReferenceFullPath.getAbsolutePath(), fileDownloader.getFile(fileReference).isPresent());
// Verify download status
- assertDownloadStatus(fileDownloader, fileReference, 0.0);
+ assertDownloadStatus(fileReference, 0.0);
// Receives fileReference, should return and make it available to caller
String filename = "abc.tar.gz";
@@ -157,13 +156,13 @@ public class FileDownloaderTest {
assertEquals("bar", IOUtils.readFile(downloadedBar));
// Verify download status when downloaded
- assertDownloadStatus(fileDownloader, fileReference, 1.0);
+ assertDownloadStatus(fileReference, 1.0);
}
}
@Test
public void getFileWhenConnectionError() throws IOException {
- fileDownloader = new FileDownloader(connection, downloadDir, tempDir, Duration.ofSeconds(2), sleepBetweenRetries);
+ fileDownloader = new FileDownloader(connection, downloadDir, downloads, Duration.ofSeconds(2), sleepBetweenRetries);
File downloadDir = fileDownloader.downloadDirectory();
int timesToFail = 2;
@@ -175,8 +174,8 @@ public class FileDownloaderTest {
assertFalse(fileReferenceFullPath.getAbsolutePath(), fileDownloader.getFile(fileReference).isPresent());
// Getting file failed, verify download status and since there was an error is not downloading ATM
- assertDownloadStatus(fileDownloader, fileReference, 0.0);
- assertFalse(fileDownloader.fileReferenceDownloader().isDownloading(fileReference));
+ assertDownloadStatus(fileReference, 0.0);
+ assertFalse(fileDownloader.isDownloading(fileReference));
// Receives fileReference, should return and make it available to caller
String filename = "abc.jar";
@@ -188,7 +187,7 @@ public class FileDownloaderTest {
assertEquals("some other content", IOUtils.readFile(downloadedFile.get()));
// Verify download status when downloaded
- assertDownloadStatus(fileDownloader, fileReference, 1.0);
+ assertDownloadStatus(fileReference, 1.0);
assertEquals(timesToFail, responseHandler.failedTimes);
}
@@ -197,7 +196,7 @@ public class FileDownloaderTest {
public void getFileWhenDownloadInProgress() throws IOException, ExecutionException, InterruptedException {
ExecutorService executor = Executors.newFixedThreadPool(2);
String filename = "abc.jar";
- fileDownloader = new FileDownloader(connection, downloadDir, tempDir, Duration.ofSeconds(3), sleepBetweenRetries);
+ fileDownloader = new FileDownloader(connection, downloadDir, downloads, Duration.ofSeconds(3), sleepBetweenRetries);
File downloadDir = fileDownloader.downloadDirectory();
// Delay response so that we can make a second request while downloading the file from the first request
@@ -210,8 +209,8 @@ public class FileDownloaderTest {
Future<Future<Optional<File>>> future1 = executor.submit(() -> fileDownloader.getFutureFile(fileReferenceDownload));
do {
Thread.sleep(10);
- } while (! fileDownloader.fileReferenceDownloader().isDownloading(fileReference));
- assertTrue(fileDownloader.fileReferenceDownloader().isDownloading(fileReference));
+ } while (! fileDownloader.isDownloading(fileReference));
+ assertTrue(fileDownloader.isDownloading(fileReference));
// Request file while download is in progress
Future<Future<Optional<File>>> future2 = executor.submit(() -> fileDownloader.getFutureFile(fileReferenceDownload));
@@ -237,11 +236,11 @@ public class FileDownloaderTest {
Duration timeout = Duration.ofMillis(200);
MockConnection connectionPool = new MockConnection();
connectionPool.setResponseHandler(new MockConnection.WaitResponseHandler(timeout.plus(Duration.ofMillis(1000))));
- FileDownloader fileDownloader = new FileDownloader(connectionPool, downloadDir, tempDir, timeout, sleepBetweenRetries);
+ FileDownloader fileDownloader = new FileDownloader(connectionPool, downloadDir, downloads, timeout, sleepBetweenRetries);
FileReference foo = new FileReference("foo");
// Should download since we do not have the file on disk
fileDownloader.downloadIfNeeded(new FileReferenceDownload(foo));
- assertTrue(fileDownloader.fileReferenceDownloader().isDownloading(foo));
+ assertTrue(fileDownloader.isDownloading(foo));
assertFalse(fileDownloader.getFile(foo).isPresent());
// Receive files to simulate download
receiveFile();
@@ -271,8 +270,8 @@ public class FileDownloaderTest {
return new File(dir, fileReference.value());
}
- private void assertDownloadStatus(FileDownloader fileDownloader, FileReference fileReference, double expectedDownloadStatus) {
- double downloadStatus = fileDownloader.downloadStatus(fileReference);
+ private void assertDownloadStatus(FileReference fileReference, double expectedDownloadStatus) {
+ double downloadStatus = downloads.downloadStatus(fileReference);
assertEquals(expectedDownloadStatus, downloadStatus, 0.0001);
}
@@ -285,10 +284,10 @@ public class FileDownloaderTest {
FileReferenceData.Type type, byte[] content) {
XXHash64 hasher = XXHashFactory.fastestInstance().hash64();
FileReceiver.Session session =
- new FileReceiver.Session(downloadDir, tempDir, 1, fileReference, type, filename, content.length);
+ new FileReceiver.Session(downloadDir, 1, fileReference, type, filename, content.length);
session.addPart(0, content);
File file = session.close(hasher.hash(ByteBuffer.wrap(content), 0));
- fileDownloader.fileReferenceDownloader().completedDownloading(fileReference, file);
+ downloads.completedDownloading(fileReference, file);
}
private static class MockConnection implements ConnectionPool, com.yahoo.vespa.config.Connection {
diff --git a/filedistribution/src/test/java/com/yahoo/vespa/filedistribution/FileReceiverTest.java b/filedistribution/src/test/java/com/yahoo/vespa/filedistribution/FileReceiverTest.java
index a9ddff655e3..69d4344d246 100644
--- a/filedistribution/src/test/java/com/yahoo/vespa/filedistribution/FileReceiverTest.java
+++ b/filedistribution/src/test/java/com/yahoo/vespa/filedistribution/FileReceiverTest.java
@@ -21,7 +21,6 @@ import java.nio.file.Files;
public class FileReceiverTest {
private File root;
- private File tempDir;
private final XXHash64 hasher = XXHashFactory.fastestInstance().hash64();
@Rule
@@ -30,7 +29,6 @@ public class FileReceiverTest {
@Before
public void setup() throws IOException {
root = temporaryFolder.newFolder("root");
- tempDir = temporaryFolder.newFolder("tmp");
}
@Test
@@ -70,7 +68,7 @@ public class FileReceiverTest {
private void transferPartsAndAssert(FileReference ref, String fileName, String all, int numParts) throws IOException {
byte [] allContent = Utf8.toBytes(all);
- FileReceiver.Session session = new FileReceiver.Session(root, tempDir, 1, ref,
+ FileReceiver.Session session = new FileReceiver.Session(root, 1, ref,
FileReferenceData.Type.file, fileName, allContent.length);
int partSize = (allContent.length+(numParts-1))/numParts;
ByteBuffer bb = ByteBuffer.wrap(allContent);
@@ -91,7 +89,7 @@ public class FileReceiverTest {
private void transferCompressedData(FileReference ref, String fileName, byte[] data) {
FileReceiver.Session session =
- new FileReceiver.Session(root, tempDir, 1, ref, FileReferenceData.Type.compressed, fileName, data.length);
+ new FileReceiver.Session(root, 1, ref, FileReferenceData.Type.compressed, fileName, data.length);
session.addPart(0, data);
session.close(hasher.hash(ByteBuffer.wrap(data), 0));
}
diff --git a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
index 0ce7ec2259d..4885f5c9ae5 100644
--- a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
+++ b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
@@ -139,13 +139,13 @@ public class Flags {
public static final UnboundBooleanFlag ENCRYPT_DISK = defineFeatureFlag(
"encrypt-disk", false,
- List.of("hakonhall"), "2021-05-05", "2021-06-05",
+ List.of("hakonhall"), "2021-05-05", "2021-08-05",
"Allow migrating an unencrypted data partition to being encrypted.",
"Takes effect on next host-admin tick.");
public static final UnboundBooleanFlag ENCRYPT_DIRTY_DISK = defineFeatureFlag(
"encrypt-dirty-disk", false,
- List.of("hakonhall"), "2021-05-14", "2021-06-05",
+ List.of("hakonhall"), "2021-05-14", "2021-08-05",
"Allow migrating an unencrypted data partition to being encrypted when (de)provisioned.",
"Takes effect on next host-admin tick.");
@@ -158,7 +158,7 @@ public class Flags {
public static final UnboundIntFlag METRICS_PROXY_MAX_HEAP_SIZE_IN_MB = defineIntFlag(
"metrics-proxy-max-heap-size-in-mb", 256,
- List.of("hmusum"), "2021-03-01", "2021-06-15",
+ List.of("hmusum"), "2021-03-01", "2021-07-01",
"JVM max heap size for metrics proxy in Mb",
"Takes effect when restarting metrics proxy",
CLUSTER_TYPE);
@@ -204,6 +204,20 @@ public class Flags {
"Takes effect after distributor restart",
ZONE_ID, APPLICATION_ID);
+ public static final UnboundIntFlag MAX_CONCURRENT_MERGES_PER_NODE = defineIntFlag(
+ "max-concurrent-merges-per-node", 16,
+ List.of("balder", "vekterli"), "2021-06-06", "2021-08-01",
+ "Specifies max concurrent merges per content node.",
+ "Takes effect at redeploy",
+ ZONE_ID, APPLICATION_ID);
+
+ public static final UnboundIntFlag MAX_MERGE_QUEUE_SIZE = defineIntFlag(
+ "max-merge-queue-size", 1024,
+ List.of("balder", "vekterli"), "2021-06-06", "2021-08-01",
+ "Specifies max size of merge queue.",
+ "Takes effect at redeploy",
+ ZONE_ID, APPLICATION_ID);
+
public static final UnboundBooleanFlag USE_EXTERNAL_RANK_EXPRESSION = defineFeatureFlag(
"use-external-rank-expression", false,
List.of("baldersheim"), "2021-05-24", "2021-07-01",
@@ -218,6 +232,13 @@ public class Flags {
"Takes effect on next internal redeployment",
APPLICATION_ID);
+ public static final UnboundIntFlag LARGE_RANK_EXPRESSION_LIMIT = defineIntFlag(
+ "large-rank-expression-limit", 0x10000,
+ List.of("baldersheim"), "2021-06-09", "2021-07-01",
+ "Limit for size of rank expressions distributed by filedistribution",
+ "Takes effect on next internal redeployment",
+ APPLICATION_ID);
+
public static final UnboundBooleanFlag ENABLE_ROUTING_CORE_DUMP = defineFeatureFlag(
"enable-routing-core-dumps", false,
List.of("tokle"), "2021-04-16", "2021-08-01",
@@ -232,13 +253,6 @@ public class Flags {
"Takes effect immediately",
APPLICATION_ID);
- public static final UnboundBooleanFlag VESPA_APP_DOMAIN_IN_CERTIFICATE = defineFeatureFlag(
- "new-domain-in-certificate", false,
- List.of("mpolden"), "2021-05-25", "2021-09-01",
- "Whether to include the vespa-app.cloud names in certificate requests",
- "Takes effect on next deployment through controller",
- APPLICATION_ID);
-
public static final UnboundIntFlag MAX_ENCRYPTING_HOSTS = defineIntFlag(
"max-encrypting-hosts", 0,
List.of("mpolden", "hakonhall"), "2021-05-27", "2021-10-01",
@@ -252,6 +266,26 @@ public class Flags {
"Takes effect on next restart",
ZONE_ID, APPLICATION_ID);
+ public static final UnboundBooleanFlag THROW_EXCEPTION_IF_RESOURCE_LIMITS_SPECIFIED = defineFeatureFlag(
+ "throw-exception-if-resource-limits-specified", false,
+ List.of("hmusum"), "2021-06-07", "2021-08-07",
+ "Whether to throw an exception in hosted Vespa if the application specifies resource limits in services.xml",
+ "Takes effect on next deployment through controller",
+ APPLICATION_ID);
+
+ public static final UnboundBooleanFlag MOVE_SEARCH_DEFINITIONS_TO_SCHEMAS_DIR = defineFeatureFlag(
+ "move-search-definitions-to-schemas-dir", false,
+ List.of("hmusum"), "2021-06-09", "2021-08-09",
+ "Whether to move files in searchdefinitions/ to schemas/ when deploying an application",
+ "Takes effect on next deployment",
+ ZONE_ID, APPLICATION_ID);
+
+ public static final UnboundBooleanFlag LOAD_LOCAL_SESSIONS_WHEN_BOOTSTRAPPING = defineFeatureFlag(
+ "load-local-sessions-when-bootstrapping", true,
+ List.of("hmusum"), "2021-06-15", "2021-07-15",
+ "Whether to load local sessions when bootstrapping config server",
+ "Takes effect on restart of config server");
+
/** WARNING: public for testing: All flags should be defined in {@link Flags}. */
public static UnboundBooleanFlag defineFeatureFlag(String flagId, boolean defaultValue, List<String> owners,
String createdAt, String expiresAt, String description,
diff --git a/fnet/src/vespa/fnet/connection.cpp b/fnet/src/vespa/fnet/connection.cpp
index 47d6a1e429a..4315e76f7ef 100644
--- a/fnet/src/vespa/fnet/connection.cpp
+++ b/fnet/src/vespa/fnet/connection.cpp
@@ -491,10 +491,10 @@ FNET_Connection::FNET_Connection(FNET_TransportThread *owner,
_packetCHID(0),
_writeWork(0),
_currentID(1), // <-- NB
- _input(FNET_READ_SIZE * 2),
+ _input(0),
_queue(256),
_myQueue(256),
- _output(FNET_WRITE_SIZE * 2),
+ _output(0),
_channels(),
_callbackTarget(nullptr),
_cleanup(nullptr)
@@ -525,10 +525,10 @@ FNET_Connection::FNET_Connection(FNET_TransportThread *owner,
_packetCHID(0),
_writeWork(0),
_currentID(0),
- _input(FNET_READ_SIZE * 2),
+ _input(0),
_queue(256),
_myQueue(256),
- _output(FNET_WRITE_SIZE * 2),
+ _output(0),
_channels(),
_callbackTarget(nullptr),
_cleanup(nullptr)
diff --git a/fnet/src/vespa/fnet/connection.h b/fnet/src/vespa/fnet/connection.h
index 3da9b58f928..532bd7c6638 100644
--- a/fnet/src/vespa/fnet/connection.h
+++ b/fnet/src/vespa/fnet/connection.h
@@ -11,6 +11,7 @@
#include <vespa/vespalib/net/socket_handle.h>
#include <vespa/vespalib/net/async_resolver.h>
#include <vespa/vespalib/net/crypto_socket.h>
+#include <vespa/vespalib/util/size_literals.h>
#include <atomic>
class FNET_IPacketStreamer;
@@ -60,9 +61,9 @@ public:
};
enum {
- FNET_READ_SIZE = 32768,
+ FNET_READ_SIZE = 16_Ki,
FNET_READ_REDO = 10,
- FNET_WRITE_SIZE = 32768,
+ FNET_WRITE_SIZE = 16_Ki,
FNET_WRITE_REDO = 10
};
diff --git a/fnet/src/vespa/fnet/frt/supervisor.cpp b/fnet/src/vespa/fnet/frt/supervisor.cpp
index 388d754ece4..d992567f776 100644
--- a/fnet/src/vespa/fnet/frt/supervisor.cpp
+++ b/fnet/src/vespa/fnet/frt/supervisor.cpp
@@ -430,4 +430,8 @@ StandaloneFRT::~StandaloneFRT()
_transport->ShutDown(true);
}
+void StandaloneFRT::shutdown() {
+ _transport->ShutDown(true);
+}
+
}
diff --git a/fnet/src/vespa/fnet/frt/supervisor.h b/fnet/src/vespa/fnet/frt/supervisor.h
index 1332bbe3ddb..2743cafae26 100644
--- a/fnet/src/vespa/fnet/frt/supervisor.h
+++ b/fnet/src/vespa/fnet/frt/supervisor.h
@@ -133,6 +133,7 @@ public:
explicit StandaloneFRT(std::shared_ptr<vespalib::CryptoEngine> crypto);
~StandaloneFRT();
FRT_Supervisor & supervisor() { return *_supervisor; }
+ void shutdown();
private:
std::unique_ptr<FastOS_ThreadPool> _threadPool;
std::unique_ptr<FNET_Transport> _transport;
diff --git a/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/linguistics/LinguisticsAnnotator.java b/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/linguistics/LinguisticsAnnotator.java
index 8b6ef83f05e..81a5305a778 100644
--- a/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/linguistics/LinguisticsAnnotator.java
+++ b/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/linguistics/LinguisticsAnnotator.java
@@ -115,9 +115,7 @@ public class LinguisticsAnnotator {
}
return;
}
- if ( ! token.isIndexable()) {
- return;
- }
+ if ( ! token.isIndexable()) return;
}
String orig = token.getOrig();
int pos = (int)token.getOffset();
@@ -138,9 +136,6 @@ public class LinguisticsAnnotator {
String lowercasedTerm = lowercasedOrig;
String term = token.getTokenString();
if (term != null) {
- term = tokenizer.getReplacementTerm(term);
- }
- if (term != null) {
lowercasedTerm = toLowerCase(term);
}
if (! lowercasedOrig.equals(lowercasedTerm)) {
@@ -155,12 +150,7 @@ public class LinguisticsAnnotator {
}
} else {
String term = token.getTokenString();
- if (term != null) {
- term = tokenizer.getReplacementTerm(term);
- }
- if (term == null || term.trim().isEmpty()) {
- return;
- }
+ if (term == null || term.trim().isEmpty()) return;
if (termOccurrences.termCountBelowLimit(term)) {
parent.span(pos, len).annotate(lowerCaseTermAnnotation(term, token.getOrig()));
}
diff --git a/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/linguistics/LinguisticsAnnotatorTestCase.java b/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/linguistics/LinguisticsAnnotatorTestCase.java
index afbcf597a46..5f436720990 100644
--- a/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/linguistics/LinguisticsAnnotatorTestCase.java
+++ b/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/linguistics/LinguisticsAnnotatorTestCase.java
@@ -19,6 +19,7 @@ import org.junit.Test;
import org.mockito.Mockito;
import java.util.*;
+import java.util.stream.Collectors;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
@@ -30,12 +31,6 @@ public class LinguisticsAnnotatorTestCase {
private static final AnnotatorConfig CONFIG = new AnnotatorConfig();
- // --------------------------------------------------------------------------------
- //
- // Tests
- //
- // --------------------------------------------------------------------------------
-
@Test
public void requireThatAnnotateFailsWithZeroTokens() {
assertAnnotations(null, "foo");
@@ -145,7 +140,7 @@ public class LinguisticsAnnotatorTestCase {
continue;
}
assertAnnotations(expected, "foo",
- newLinguistics(Arrays.asList(newToken("foo", "foo", type, specialToken)),
+ newLinguistics(List.of(newToken("foo", "foo", type, specialToken)),
Collections.singletonMap("foo", "bar")));
}
}
@@ -159,7 +154,7 @@ public class LinguisticsAnnotatorTestCase {
StringFieldValue val = new StringFieldValue("foo");
val.setSpanTree(spanTree);
- Linguistics linguistics = newLinguistics(Arrays.asList(newToken("foo", "bar", TokenType.ALPHABETIC, false)),
+ Linguistics linguistics = newLinguistics(List.of(newToken("foo", "bar", TokenType.ALPHABETIC, false)),
Collections.<String, String>emptyMap());
new LinguisticsAnnotator(linguistics, CONFIG).annotate(val);
@@ -253,11 +248,15 @@ public class LinguisticsAnnotatorTestCase {
private static class MyTokenizer implements Tokenizer {
final List<Token> tokens;
- final Map<String, String> replacementTerms;
public MyTokenizer(List<? extends Token> tokens, Map<String, String> replacementTerms) {
- this.tokens = new ArrayList<>(tokens);
- this.replacementTerms = replacementTerms;
+ this.tokens = tokens.stream().map(token -> replace(token, replacementTerms)).collect(Collectors.toList());
+ }
+
+ private Token replace(Token token, Map<String, String> replacementTerms) {
+ var simpleToken = (SimpleToken)token;
+ simpleToken.setTokenString(replacementTerms.getOrDefault(token.getTokenString(), token.getTokenString()));
+ return simpleToken;
}
@Override
@@ -265,10 +264,6 @@ public class LinguisticsAnnotatorTestCase {
return tokens;
}
- @Override
- public String getReplacementTerm(String term) {
- String replacement = replacementTerms.get(term);
- return replacement != null ? replacement : term;
- }
}
+
}
diff --git a/jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/athenz/AthenzAuthorizationFilter.java b/jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/athenz/AthenzAuthorizationFilter.java
index dd4b62ee494..6136bcdfd3a 100644
--- a/jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/athenz/AthenzAuthorizationFilter.java
+++ b/jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/athenz/AthenzAuthorizationFilter.java
@@ -3,12 +3,14 @@ package com.yahoo.jdisc.http.filter.security.athenz;
import com.google.inject.Inject;
import com.yahoo.jdisc.Metric;
+import com.yahoo.jdisc.http.HttpRequest;
import com.yahoo.jdisc.http.filter.DiscFilterRequest;
import com.yahoo.jdisc.http.filter.security.athenz.RequestResourceMapper.ResourceNameAndAction;
import com.yahoo.jdisc.http.filter.security.base.JsonSecurityRequestFilterBase;
import com.yahoo.vespa.athenz.api.AthenzAccessToken;
import com.yahoo.vespa.athenz.api.AthenzIdentity;
import com.yahoo.vespa.athenz.api.AthenzPrincipal;
+import com.yahoo.vespa.athenz.api.AthenzRole;
import com.yahoo.vespa.athenz.api.ZToken;
import com.yahoo.vespa.athenz.tls.AthenzX509CertificateUtils;
import com.yahoo.vespa.athenz.utils.AthenzIdentities;
@@ -20,6 +22,7 @@ import java.security.cert.X509Certificate;
import java.util.EnumSet;
import java.util.List;
import java.util.Map;
+import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.logging.Level;
@@ -56,16 +59,20 @@ public class AthenzAuthorizationFilter extends JsonSecurityRequestFilterBase {
private final RequestResourceMapper requestResourceMapper;
private final Metric metric;
private final Set<AthenzIdentity> allowedProxyIdentities;
+ private final Optional<AthenzRole> readRole;
+ private final Optional<AthenzRole> writeRole;
@Inject
public AthenzAuthorizationFilter(AthenzAuthorizationFilterConfig config, RequestResourceMapper resourceMapper, Metric metric) {
- this(config, resourceMapper, new DefaultZpe(), metric);
+ this(config, resourceMapper, new DefaultZpe(), metric, null, null);
}
public AthenzAuthorizationFilter(AthenzAuthorizationFilterConfig config,
RequestResourceMapper resourceMapper,
Zpe zpe,
- Metric metric) {
+ Metric metric,
+ AthenzRole readRole,
+ AthenzRole writeRole) {
this.roleTokenHeaderName = config.roleTokenHeaderName();
List<EnabledCredentials.Enum> enabledCredentials = config.enabledCredentials();
this.enabledCredentials = enabledCredentials.isEmpty()
@@ -77,6 +84,8 @@ public class AthenzAuthorizationFilter extends JsonSecurityRequestFilterBase {
this.allowedProxyIdentities = config.allowedProxyIdentities().stream()
.map(AthenzIdentities::from)
.collect(Collectors.toSet());
+ this.readRole = Optional.ofNullable(readRole);
+ this.writeRole = Optional.ofNullable(writeRole);
}
@Override
@@ -86,7 +95,7 @@ public class AthenzAuthorizationFilter extends JsonSecurityRequestFilterBase {
requestResourceMapper.getResourceNameAndAction(request);
log.log(Level.FINE, () -> String.format("Resource mapping for '%s': %s", request, resourceMapping));
if (resourceMapping.isEmpty()) {
- incrementAcceptedMetrics(request, false);
+ incrementAcceptedMetrics(request, false, Optional.empty());
return Optional.empty();
}
Result result = checkAccessAllowed(request, resourceMapping.get());
@@ -94,15 +103,15 @@ public class AthenzAuthorizationFilter extends JsonSecurityRequestFilterBase {
setAttribute(request, RESULT_ATTRIBUTE, resultType.name());
if (resultType == AuthorizationResult.Type.ALLOW) {
populateRequestWithResult(request, result);
- incrementAcceptedMetrics(request, true);
+ incrementAcceptedMetrics(request, true, Optional.of(result));
return Optional.empty();
}
log.log(Level.FINE, () -> String.format("Forbidden (403) for '%s': %s", request, resultType.name()));
- incrementRejectedMetrics(request, FORBIDDEN, resultType.name());
+ incrementRejectedMetrics(request, FORBIDDEN, resultType.name(), Optional.of(result));
return Optional.of(new ErrorResponse(FORBIDDEN, "Access forbidden: " + resultType.getDescription()));
} catch (IllegalArgumentException e) {
log.log(Level.FINE, () -> String.format("Unauthorized (401) for '%s': %s", request, e.getMessage()));
- incrementRejectedMetrics(request, UNAUTHORIZED, "Unauthorized");
+ incrementRejectedMetrics(request, UNAUTHORIZED, "Unauthorized", Optional.empty());
return Optional.of(new ErrorResponse(UNAUTHORIZED, e.getMessage()));
}
}
@@ -130,33 +139,53 @@ public class AthenzAuthorizationFilter extends JsonSecurityRequestFilterBase {
X509Certificate identityCertificate = getClientCertificate(request).get();
AthenzIdentity peerIdentity = AthenzIdentities.from(identityCertificate);
if (allowedProxyIdentities.contains(peerIdentity)) {
- return checkAccessWithProxiedAccessToken(resourceAndAction, accessToken, identityCertificate);
+ return checkAccessWithProxiedAccessToken(request, resourceAndAction, accessToken, identityCertificate);
} else {
var zpeResult = zpe.checkAccessAllowed(
accessToken, identityCertificate, resourceAndAction.resourceName(), resourceAndAction.action());
- return new Result(ACCESS_TOKEN, peerIdentity, zpeResult);
+ return getResult(ACCESS_TOKEN, peerIdentity, zpeResult, request, resourceAndAction, mapToRequestPrivileges(accessToken.roles()));
}
}
- private Result checkAccessWithProxiedAccessToken(ResourceNameAndAction resourceAndAction, AthenzAccessToken accessToken, X509Certificate identityCertificate) {
+ private Result getResult(EnabledCredentials.Enum credentialType, AthenzIdentity identity, AuthorizationResult zpeResult, DiscFilterRequest request, ResourceNameAndAction resourceAndAction, List<String> privileges) {
+ String currentAction = resourceAndAction.action();
+ String futureAction = resourceAndAction.futureAction();
+ return new Result(credentialType, identity, zpeResult, privileges, currentAction, futureAction);
+ }
+
+ private List<String> mapToRequestPrivileges(List<AthenzRole> roles) {
+ return roles.stream()
+ .map(this::rolePrivilege)
+ .filter(Objects::nonNull)
+ .collect(Collectors.toList());
+ }
+
+ private String rolePrivilege(AthenzRole role) {
+ if (readRole.stream().anyMatch(role::equals)) return "read";
+ if (writeRole.stream().anyMatch(role::equals)) return "write";
+ return null;
+ }
+
+ private Result checkAccessWithProxiedAccessToken(DiscFilterRequest request, ResourceNameAndAction resourceAndAction, AthenzAccessToken accessToken, X509Certificate identityCertificate) {
AthenzIdentity proxyIdentity = AthenzIdentities.from(identityCertificate);
log.log(Level.FINE,
() -> String.format("Checking proxied access token. Proxy identity: '%s'. Allowed identities: %s", proxyIdentity, allowedProxyIdentities));
var zpeResult = zpe.checkAccessAllowed(accessToken, resourceAndAction.resourceName(), resourceAndAction.action());
- return new Result(ACCESS_TOKEN, AthenzIdentities.from(identityCertificate), zpeResult);
+ return getResult(ACCESS_TOKEN, AthenzIdentities.from(identityCertificate), zpeResult, request, resourceAndAction, mapToRequestPrivileges(accessToken.roles()));
}
private Result checkAccessWithRoleCertificate(DiscFilterRequest request, ResourceNameAndAction resourceAndAction) {
X509Certificate roleCertificate = getClientCertificate(request).get();
var zpeResult = zpe.checkAccessAllowed(roleCertificate, resourceAndAction.resourceName(), resourceAndAction.action());
AthenzIdentity identity = AthenzX509CertificateUtils.getIdentityFromRoleCertificate(roleCertificate);
- return new Result(ROLE_CERTIFICATE, identity, zpeResult);
+ AthenzX509CertificateUtils.getRolesFromRoleCertificate(roleCertificate).roleName();
+ return getResult(ROLE_CERTIFICATE, identity, zpeResult, request, resourceAndAction, mapToRequestPrivileges(List.of(AthenzX509CertificateUtils.getRolesFromRoleCertificate(roleCertificate))));
}
private Result checkAccessWithRoleToken(DiscFilterRequest request, ResourceNameAndAction resourceAndAction) {
ZToken roleToken = getRoleToken(request);
var zpeResult = zpe.checkAccessAllowed(roleToken, resourceAndAction.resourceName(), resourceAndAction.action());
- return new Result(ROLE_TOKEN, roleToken.getIdentity(), zpeResult);
+ return getResult(ROLE_TOKEN, roleToken.getIdentity(), zpeResult, request, resourceAndAction, mapToRequestPrivileges(roleToken.getRoles()));
}
private static boolean isAccessTokenPresent(DiscFilterRequest request) {
@@ -246,20 +275,30 @@ public class AthenzAuthorizationFilter extends JsonSecurityRequestFilterBase {
request.setAttribute(name, value);
}
- private void incrementAcceptedMetrics(DiscFilterRequest request, boolean authzRequired) {
+ private void incrementAcceptedMetrics(DiscFilterRequest request, boolean authzRequired, Optional<Result> result) {
String hostHeader = request.getHeader("Host");
Metric.Context context = metric.createContext(Map.of(
"endpoint", hostHeader != null ? hostHeader : "",
- "authz-required", Boolean.toString(authzRequired)));
+ "authz-required", Boolean.toString(authzRequired),
+ "httpMethod", HttpRequest.Method.valueOf(request.getMethod()).name(),
+ "requestPrivileges", result.map(r -> String.join(",", r.requestPrivileges)).orElse(""),
+ "currentRequestMapping", result.map(r -> r.currentAction).orElse(""),
+ "futureRequestMapping", result.map(r -> r.futureAction).orElse("")
+ ));
metric.add(ACCEPTED_METRIC_NAME, 1L, context);
}
- private void incrementRejectedMetrics(DiscFilterRequest request, int statusCode, String zpeCode) {
+ private void incrementRejectedMetrics(DiscFilterRequest request, int statusCode, String zpeCode, Optional<Result> result) {
String hostHeader = request.getHeader("Host");
Metric.Context context = metric.createContext(Map.of(
"endpoint", hostHeader != null ? hostHeader : "",
"status-code", Integer.toString(statusCode),
- "zpe-status", zpeCode));
+ "zpe-status", zpeCode,
+ "httpMethod", HttpRequest.Method.valueOf(request.getMethod()),
+ "requestPrivileges", result.map(r -> String.join(",", r.requestPrivileges)).orElse(""),
+ "currentRequestMapping", result.map(r -> r.currentAction).orElse(""),
+ "futureRequestMapping", result.map(r -> r.futureAction).orElse("")
+ ));
metric.add(REJECTED_METRIC_NAME, 1L, context);
}
@@ -267,11 +306,17 @@ public class AthenzAuthorizationFilter extends JsonSecurityRequestFilterBase {
final EnabledCredentials.Enum credentialType;
final AthenzIdentity identity;
final AuthorizationResult zpeResult;
+ final List<String> requestPrivileges;
+ final String currentAction;
+ final String futureAction;
- Result(EnabledCredentials.Enum credentialType, AthenzIdentity identity, AuthorizationResult zpeResult) {
+ public Result(EnabledCredentials.Enum credentialType, AthenzIdentity identity, AuthorizationResult zpeResult, List<String> requestPrivileges, String currentAction, String futureAction) {
this.credentialType = credentialType;
this.identity = identity;
this.zpeResult = zpeResult;
+ this.requestPrivileges = requestPrivileges;
+ this.currentAction = currentAction;
+ this.futureAction = futureAction;
}
}
}
diff --git a/jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/athenz/RequestResourceMapper.java b/jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/athenz/RequestResourceMapper.java
index 56c52bd71c4..c962e973959 100644
--- a/jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/athenz/RequestResourceMapper.java
+++ b/jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/athenz/RequestResourceMapper.java
@@ -28,10 +28,15 @@ public interface RequestResourceMapper {
class ResourceNameAndAction {
private final AthenzResourceName resourceName;
private final String action;
+ private final String futureAction;
public ResourceNameAndAction(AthenzResourceName resourceName, String action) {
+ this(resourceName, action, action);
+ }
+ public ResourceNameAndAction(AthenzResourceName resourceName, String action, String futureAction) {
this.resourceName = resourceName;
this.action = action;
+ this.futureAction = futureAction;
}
public AthenzResourceName resourceName() {
@@ -42,6 +47,14 @@ public interface RequestResourceMapper {
return action;
}
+ public ResourceNameAndAction withFutureAction(String futureAction) {
+ return new ResourceNameAndAction(resourceName, action, futureAction);
+ }
+
+ public String futureAction() {
+ return futureAction;
+ }
+
@Override
public String toString() {
return "ResourceNameAndAction{" +
diff --git a/jdisc-security-filters/src/test/java/com/yahoo/jdisc/http/filter/security/athenz/AthenzAuthorizationFilterTest.java b/jdisc-security-filters/src/test/java/com/yahoo/jdisc/http/filter/security/athenz/AthenzAuthorizationFilterTest.java
index bfe02d1f279..137e4653670 100644
--- a/jdisc-security-filters/src/test/java/com/yahoo/jdisc/http/filter/security/athenz/AthenzAuthorizationFilterTest.java
+++ b/jdisc-security-filters/src/test/java/com/yahoo/jdisc/http/filter/security/athenz/AthenzAuthorizationFilterTest.java
@@ -296,7 +296,9 @@ public class AthenzAuthorizationFilterTest {
.allowedProxyIdentities(allowedProxyIdentities)),
new StaticRequestResourceMapper(RESOURCE_NAME, ACTION),
zpe,
- metric);
+ metric,
+ new AthenzRole("domain","reader"),
+ new AthenzRole("domain", "writer"));
}
private static void assertAuthorizationResult(DiscFilterRequest request, Type expectedResult) {
diff --git a/jrt/src/com/yahoo/jrt/Buffer.java b/jrt/src/com/yahoo/jrt/Buffer.java
index 937666a28ae..06a658740e5 100644
--- a/jrt/src/com/yahoo/jrt/Buffer.java
+++ b/jrt/src/com/yahoo/jrt/Buffer.java
@@ -6,9 +6,6 @@ import java.nio.ByteBuffer;
class Buffer {
-
- static final int MAX_IO = 65000;
-
private ByteBuffer buf;
private int readPos;
private int writePos;
@@ -111,20 +108,4 @@ class Buffer {
ensureFree(minFree);
return buf;
}
-
- public ByteBuffer getChannelReadable() {
- ByteBuffer bb = getReadable();
- if (bb.remaining() > MAX_IO) {
- bb.limit(bb.position() + MAX_IO);
- }
- return bb;
- }
-
- public ByteBuffer getChannelWritable(int minFree) {
- ByteBuffer bb = getWritable(minFree);
- if (bb.remaining() > MAX_IO) {
- bb.limit(bb.position() + MAX_IO);
- }
- return bb;
- }
}
diff --git a/jrt/src/com/yahoo/jrt/Connection.java b/jrt/src/com/yahoo/jrt/Connection.java
index 891558684ed..6158576348a 100644
--- a/jrt/src/com/yahoo/jrt/Connection.java
+++ b/jrt/src/com/yahoo/jrt/Connection.java
@@ -19,9 +19,9 @@ class Connection extends Target {
private static final Logger log = Logger.getLogger(Connection.class.getName());
- private static final int READ_SIZE = 32768;
+ private static final int READ_SIZE = 16*1024;
private static final int READ_REDO = 10;
- private static final int WRITE_SIZE = 32768;
+ private static final int WRITE_SIZE = 16*1024;
private static final int WRITE_REDO = 10;
private static final int INITIAL = 0;
@@ -32,11 +32,11 @@ class Connection extends Target {
private int state = INITIAL;
private final Queue queue = new Queue();
private final Queue myQueue = new Queue();
- private final Buffer input = new Buffer(0x1000); // Start off with small buffer.
- private final Buffer output = new Buffer(0x1000); // Start off with small buffer.
- private int maxInputSize = 64*1024;
- private int maxOutputSize = 64*1024;
- private boolean dropEmptyBuffers = false;
+ private final Buffer input = new Buffer(0); // Start off with empty buffer.
+ private final Buffer output = new Buffer(0); // Start off with empty buffer.
+ private final int maxInputSize;
+ private final int maxOutputSize;
+ private final boolean dropEmptyBuffers;
private final boolean tcpNoDelay;
private final Map<Integer, ReplyHandler> replyMap = new HashMap<>();
private final Map<TargetWatcher, TargetWatcher> watchers = new IdentityHashMap<>();
@@ -98,6 +98,9 @@ class Connection extends Target {
this.socket = parent.transport().createServerCryptoSocket(channel);
this.spec = null;
this.tcpNoDelay = tcpNoDelay;
+ maxInputSize = owner.getMaxInputBufferSize();
+ maxOutputSize = owner.getMaxOutputBufferSize();
+ dropEmptyBuffers = owner.getDropEmptyBuffers();
server = true;
owner.sessionInit(this);
}
@@ -108,22 +111,13 @@ class Connection extends Target {
this.owner = owner;
this.spec = spec;
this.tcpNoDelay = tcpNoDelay;
+ maxInputSize = owner.getMaxInputBufferSize();
+ maxOutputSize = owner.getMaxOutputBufferSize();
+ dropEmptyBuffers = owner.getDropEmptyBuffers();
server = false;
owner.sessionInit(this);
}
- public void setMaxInputSize(int bytes) {
- maxInputSize = bytes;
- }
-
- public void setMaxOutputSize(int bytes) {
- maxOutputSize = bytes;
- }
-
- public void setDropEmptyBuffers(boolean value) {
- dropEmptyBuffers = value;
- }
-
public TransportThread transportThread() {
return parent;
}
@@ -235,7 +229,7 @@ class Connection extends Target {
readSize = socket.getMinimumReadBufferSize();
}
setState(CONNECTED);
- while (socket.drain(input.getChannelWritable(readSize)) > 0) {
+ while (socket.drain(input.getWritable(readSize)) > 0) {
handlePackets();
}
break;
@@ -302,14 +296,14 @@ class Connection extends Target {
private void read() throws IOException {
boolean doneRead = false;
for (int i = 0; !doneRead && i < READ_REDO; i++) {
- ByteBuffer wb = input.getChannelWritable(readSize);
+ ByteBuffer wb = input.getWritable(readSize);
if (socket.read(wb) == -1) {
throw new IOException("jrt: Connection closed by peer");
}
doneRead = (wb.remaining() > 0);
handlePackets();
}
- while (socket.drain(input.getChannelWritable(readSize)) > 0) {
+ while (socket.drain(input.getWritable(readSize)) > 0) {
handlePackets();
}
if (dropEmptyBuffers) {
@@ -346,7 +340,7 @@ class Connection extends Target {
owner.writePacket(info);
info.encodePacket(packet, wb);
}
- ByteBuffer rb = output.getChannelReadable();
+ ByteBuffer rb = output.getReadable();
if (rb.remaining() == 0) {
break;
}
diff --git a/jrt/src/com/yahoo/jrt/Supervisor.java b/jrt/src/com/yahoo/jrt/Supervisor.java
index d7c2c83ea69..b82664b2f56 100644
--- a/jrt/src/com/yahoo/jrt/Supervisor.java
+++ b/jrt/src/com/yahoo/jrt/Supervisor.java
@@ -21,8 +21,8 @@ public class Supervisor {
private SessionHandler sessionHandler = null;
private final Object methodMapLock = new Object();
private final AtomicReference<HashMap<String, Method>> methodMap = new AtomicReference<>(new HashMap<>());
- private int maxInputBufferSize = 0;
- private int maxOutputBufferSize = 0;
+ private int maxInputBufferSize = 64*1024;
+ private int maxOutputBufferSize = 64*1024;
private boolean dropEmptyBuffers = false;
/**
@@ -37,16 +37,6 @@ public class Supervisor {
}
/**
- * Will optimize buffers size for small memory footprint
- * Use this when you have many connections with very little traffic.
- **/
- public Supervisor useSmallBuffers() {
- setMaxInputBufferSize(SMALL_INPUT_BUFFER_SIZE);
- setMaxOutputBufferSize(SMALL_OUTPUT_BUFFER_SIZE);
- return this;
- }
-
- /**
* Drop empty buffers. This will reduce memory footprint for idle
* connections at the cost of extra allocations when buffer space
* is needed again.
@@ -57,6 +47,7 @@ public class Supervisor {
dropEmptyBuffers = value;
return this;
}
+ boolean getDropEmptyBuffers() { return dropEmptyBuffers; }
/**
* Set maximum input buffer size. This value will only affect
@@ -71,6 +62,7 @@ public class Supervisor {
public void setMaxInputBufferSize(int bytes) {
maxInputBufferSize = bytes;
}
+ int getMaxInputBufferSize() { return maxInputBufferSize; }
/**
* Set maximum output buffer size. This value will only affect
@@ -85,6 +77,7 @@ public class Supervisor {
public void setMaxOutputBufferSize(int bytes) {
maxOutputBufferSize = bytes;
}
+ int getMaxOutputBufferSize() { return maxOutputBufferSize; }
/**
* Obtain the method map for this Supervisor
@@ -202,12 +195,6 @@ public class Supervisor {
* @param target the target
**/
void sessionInit(Target target) {
- if (target instanceof Connection) {
- Connection conn = (Connection) target;
- conn.setMaxInputSize(maxInputBufferSize);
- conn.setMaxOutputSize(maxOutputBufferSize);
- conn.setDropEmptyBuffers(dropEmptyBuffers);
- }
SessionHandler handler = sessionHandler;
if (handler != null) {
handler.handleSessionInit(target);
diff --git a/jrt/src/com/yahoo/jrt/TlsCryptoSocket.java b/jrt/src/com/yahoo/jrt/TlsCryptoSocket.java
index 7ba83d6718e..09bb584c983 100644
--- a/jrt/src/com/yahoo/jrt/TlsCryptoSocket.java
+++ b/jrt/src/com/yahoo/jrt/TlsCryptoSocket.java
@@ -51,11 +51,13 @@ public class TlsCryptoSocket implements CryptoSocket {
public TlsCryptoSocket(SocketChannel channel, SSLEngine sslEngine) {
this.channel = channel;
this.sslEngine = sslEngine;
+ this.wrapBuffer = new Buffer(0);
+ this.unwrapBuffer = new Buffer(0);
SSLSession nullSession = sslEngine.getSession();
- this.wrapBuffer = new Buffer(Math.max(0x8000, nullSession.getPacketBufferSize()));
- this.unwrapBuffer = new Buffer(Math.max(0x8000, nullSession.getPacketBufferSize()));
+ sessionApplicationBufferSize = nullSession.getApplicationBufferSize();
+ sessionPacketBufferSize = nullSession.getPacketBufferSize();
// Note: Dummy buffer as unwrap requires a full size application buffer even though no application data is unwrapped
- this.handshakeDummyBuffer = ByteBuffer.allocate(nullSession.getApplicationBufferSize());
+ this.handshakeDummyBuffer = ByteBuffer.allocate(sessionApplicationBufferSize);
this.handshakeState = HandshakeState.NOT_STARTED;
log.fine(() -> "Initialized with " + sslEngine.toString());
}
diff --git a/jrt/src/com/yahoo/jrt/slobrok/server/Slobrok.java b/jrt/src/com/yahoo/jrt/slobrok/server/Slobrok.java
index 854cd973e4d..6d66a38406a 100644
--- a/jrt/src/com/yahoo/jrt/slobrok/server/Slobrok.java
+++ b/jrt/src/com/yahoo/jrt/slobrok/server/Slobrok.java
@@ -39,7 +39,7 @@ public class Slobrok {
public Slobrok(int port) throws ListenFailedException {
// NB: rpc must be single-threaded
- orb = new Supervisor(new Transport("slobrok-" + port, 1)).useSmallBuffers();
+ orb = new Supervisor(new Transport("slobrok-" + port, 1)).setDropEmptyBuffers(true);
registerMethods();
try {
listener = orb.listen(new Spec(port));
diff --git a/jrt/tests/com/yahoo/jrt/BufferTest.java b/jrt/tests/com/yahoo/jrt/BufferTest.java
index 7f3145365d9..10f1fbc17d3 100644
--- a/jrt/tests/com/yahoo/jrt/BufferTest.java
+++ b/jrt/tests/com/yahoo/jrt/BufferTest.java
@@ -28,7 +28,7 @@ public class BufferTest {
@org.junit.Test
public void testBuffer() {
- int size = Buffer.MAX_IO + (Buffer.MAX_IO / 10);
+ int size = 70*1024;
Buffer buf = new Buffer(1024);
ByteBuffer b = null;
@@ -118,62 +118,6 @@ public class BufferTest {
}
@org.junit.Test
- public void testBufferMax() {
- int size = Buffer.MAX_IO + (Buffer.MAX_IO / 10);
- Buffer buf = new Buffer(1024);
- ByteBuffer b = null;
-
- byte[] x = new byte[size];
- byte[] y = new byte[size];
-
- Arrays.fill(x, (byte) 10);
- Arrays.fill(y, (byte) 55);
-
- assertEquals(buf.bytes(), 0);
- assertFalse(Arrays.equals(x, y));
-
- b = buf.getChannelWritable(size);
- assertEquals(b.remaining(), Buffer.MAX_IO);
- assertTrue(b.remaining() < size);
- assertEquals(buf.bytes(), 0);
- b.put(x, 0, Buffer.MAX_IO);
- assertEquals(buf.bytes(), Buffer.MAX_IO);
- assertEquals(b.remaining(), 0);
-
- b = buf.getChannelWritable(size - Buffer.MAX_IO);
- assertTrue(b.remaining() >= size - Buffer.MAX_IO);
- assertEquals(buf.bytes(), Buffer.MAX_IO);
- b.put(x, Buffer.MAX_IO, x.length - Buffer.MAX_IO);
- assertEquals(buf.bytes(), size);
-
- b = buf.getChannelReadable();
- assertEquals(buf.bytes(), size);
-
- b = buf.getChannelWritable(512);
- assertEquals(buf.bytes(), size);
- b.put((byte)42);
- assertEquals(buf.bytes(), size + 1);
-
- b = buf.getChannelReadable();
- assertEquals(buf.bytes(), size + 1);
- assertEquals(b.remaining(), Buffer.MAX_IO);
- b.get(y, 0, Buffer.MAX_IO);
- assertEquals(buf.bytes(), size - Buffer.MAX_IO + 1);
-
- b = buf.getChannelReadable();
- assertEquals(buf.bytes(), size - Buffer.MAX_IO + 1);
- assertEquals(b.remaining(), size - Buffer.MAX_IO + 1);
- b.get(y, Buffer.MAX_IO, y.length - Buffer.MAX_IO);
- assertEquals(buf.bytes(), 1);
- assertEquals(b.remaining(), 1);
- assertEquals(b.get(), 42);
- assertEquals(buf.bytes(), 0);
- assertEquals(b.remaining(), 0);
-
- assertTrue(Arrays.equals(x, y));
- }
-
- @org.junit.Test
public void testBufferShrink() {
Buffer buf = new Buffer(500);
ByteBuffer b = null;
diff --git a/linguistics/src/main/java/com/yahoo/language/process/Token.java b/linguistics/src/main/java/com/yahoo/language/process/Token.java
index 73c0ac857ab..70b78ef1a92 100644
--- a/linguistics/src/main/java/com/yahoo/language/process/Token.java
+++ b/linguistics/src/main/java/com/yahoo/language/process/Token.java
@@ -38,12 +38,12 @@ public interface Token {
TokenScript getScript();
/**
- * Returns token string in a form suitable for indexing: The
- * most lowercased variant of the most processed token form available.
+ * Returns the token string in a form suitable for indexing: The
+ * most lowercased variant of the most processed token form available,
* If called on a compound token this returns a lowercased form of the
* entire word.
- *
- * @return token string value
+ * If this is a special token with a configured replacement,
+ * this will return the replacement token.
*/
String getTokenString();
diff --git a/linguistics/src/main/java/com/yahoo/language/process/Tokenizer.java b/linguistics/src/main/java/com/yahoo/language/process/Tokenizer.java
index 7e61cd885a8..5be0a6fa635 100644
--- a/linguistics/src/main/java/com/yahoo/language/process/Tokenizer.java
+++ b/linguistics/src/main/java/com/yahoo/language/process/Tokenizer.java
@@ -23,16 +23,11 @@ public interface Tokenizer {
Iterable<Token> tokenize(String input, Language language, StemMode stemMode, boolean removeAccents);
/**
- * Return a replacement for an input token string.
- * This accepts strings returned by Token.getTokenString
- * and returns a replacement which will be used as the index token.
- * The input token string is returned if there is no replacement.
- * <p>
- * This default implementation always returns the input token string.
+ * Not used.
*
- * @param tokenString the token string of the term to lookup a replacement for
- * @return the replacement, if any, or the argument token string if not
+ * @deprecated replacements are already applied in tokens returned by tokenize
*/
+ @Deprecated // Remove on Vespa 8
default String getReplacementTerm(String tokenString) { return tokenString; }
}
diff --git a/linguistics/src/main/java/com/yahoo/language/simple/SimpleToken.java b/linguistics/src/main/java/com/yahoo/language/simple/SimpleToken.java
index 122b9b6dff6..7b63650fa94 100644
--- a/linguistics/src/main/java/com/yahoo/language/simple/SimpleToken.java
+++ b/linguistics/src/main/java/com/yahoo/language/simple/SimpleToken.java
@@ -25,6 +25,10 @@ public class SimpleToken implements Token {
this.orig = orig;
}
+ public SimpleToken(String orig, String tokenString) {
+ this.orig = orig;
+ }
+
@Override
public String getOrig() {
return orig;
diff --git a/messagebus/src/main/java/com/yahoo/messagebus/StaticThrottlePolicy.java b/messagebus/src/main/java/com/yahoo/messagebus/StaticThrottlePolicy.java
index aa1bc1ce624..52b92737bb9 100644
--- a/messagebus/src/main/java/com/yahoo/messagebus/StaticThrottlePolicy.java
+++ b/messagebus/src/main/java/com/yahoo/messagebus/StaticThrottlePolicy.java
@@ -7,7 +7,7 @@ package com.yahoo.messagebus;
* way of {@link #setMaxPendingCount(int)}), the total size of pending messages (by way of {@link
* #setMaxPendingSize(long)}), or some combination thereof.
*
- * <b>NOTE:</b> By context, "pending" is refering to the number of sent messages that have not been replied to yet.
+ * <b>NOTE:</b> By context, "pending" refers to the number of sent messages that have not been replied to yet.
*
* @author Simon Thoresen Hult
*/
diff --git a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/rpc/RpcConnector.java b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/rpc/RpcConnector.java
index 9d93b440a1d..881ed19ce0c 100644
--- a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/rpc/RpcConnector.java
+++ b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/rpc/RpcConnector.java
@@ -27,7 +27,7 @@ public class RpcConnector extends AbstractComponent {
private final Acceptor acceptor;
public RpcConnector(RpcConnectorConfig config) {
- supervisor = new Supervisor(new Transport("rpc-" + config.port())).useSmallBuffers();
+ supervisor = new Supervisor(new Transport("rpc-" + config.port())).setDropEmptyBuffers(true);
Spec spec = new Spec(config.port());
try {
acceptor = supervisor.listen(spec);
diff --git a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/ConfigSentinelClient.java b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/ConfigSentinelClient.java
index ab05e778ea6..d07a52f42bd 100644
--- a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/ConfigSentinelClient.java
+++ b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/ConfigSentinelClient.java
@@ -28,7 +28,7 @@ public class ConfigSentinelClient extends AbstractComponent {
@Inject
public ConfigSentinelClient() {
- supervisor = new Supervisor(new Transport("sentinel-client")).useSmallBuffers();
+ supervisor = new Supervisor(new Transport("sentinel-client")).setDropEmptyBuffers(true);
}
@Override
diff --git a/metrics/src/tests/summetrictest.cpp b/metrics/src/tests/summetrictest.cpp
index e3d58659daf..d0380a630f1 100644
--- a/metrics/src/tests/summetrictest.cpp
+++ b/metrics/src/tests/summetrictest.cpp
@@ -125,4 +125,45 @@ TEST(SumMetricTest, test_start_value)
EXPECT_EQ(int64_t(60), sum.getLongValue("value"));
}
+namespace {
+
+struct MetricSetWithSum : public MetricSet
+{
+ LongValueMetric _v1;
+ LongValueMetric _v2;
+ SumMetric<LongValueMetric> _sum;
+ MetricSetWithSum();
+ ~MetricSetWithSum() override;
+};
+
+MetricSetWithSum::MetricSetWithSum()
+ : MetricSet("MetricSetWithSum", {}, ""),
+ _v1("v1", {}, "", this),
+ _v2("v2", {}, "", this),
+ _sum("sum", {}, "", this)
+{
+ _sum.addMetricToSum(_v1);
+ _sum.addMetricToSum(_v2);
+}
+
+MetricSetWithSum::~MetricSetWithSum() = default;
+
+}
+
+TEST(SumMetricTest, test_nested_sum)
+{
+ MetricSetWithSum w1;
+ MetricSetWithSum w2;
+ MetricSetWithSum sum;
+ w1._v1.addValue(10);
+ w1._v2.addValue(13);
+ w2._v1.addValue(27);
+ w2._v2.addValue(29);
+ w1.addToPart(sum);
+ w2.addToPart(sum);
+ EXPECT_EQ(int64_t(37), sum._v1.getLongValue("value"));
+ EXPECT_EQ(int64_t(42), sum._v2.getLongValue("value"));
+ EXPECT_EQ(int64_t(79), sum._sum.getLongValue("value"));
+}
+
}
diff --git a/metrics/src/vespa/metrics/countmetric.h b/metrics/src/vespa/metrics/countmetric.h
index 02a6827d1ce..1701071104e 100644
--- a/metrics/src/vespa/metrics/countmetric.h
+++ b/metrics/src/vespa/metrics/countmetric.h
@@ -105,7 +105,7 @@ public:
void addToSnapshot(Metric&, std::vector<Metric::UP> &) const override;
};
-typedef CountMetric<uint64_t, true> LongCountMetric;
+using LongCountMetric = CountMetric<uint64_t, true>;
} // metrics
diff --git a/metrics/src/vespa/metrics/metric.cpp b/metrics/src/vespa/metrics/metric.cpp
index a8d8194b26d..50fc36c62cb 100644
--- a/metrics/src/vespa/metrics/metric.cpp
+++ b/metrics/src/vespa/metrics/metric.cpp
@@ -232,4 +232,11 @@ Metric::assignValues(const Metric& m) {
assert(ownerList.empty());
return this;
}
+
+bool
+Metric::is_sum_metric() const
+{
+ return false;
+}
+
} // metrics
diff --git a/metrics/src/vespa/metrics/metric.h b/metrics/src/vespa/metrics/metric.h
index 10b74a2da22..c8fb3031278 100644
--- a/metrics/src/vespa/metrics/metric.h
+++ b/metrics/src/vespa/metrics/metric.h
@@ -247,6 +247,8 @@ public:
virtual bool isMetricSet() const { return false; }
+ virtual bool is_sum_metric() const;
+
private:
/**
diff --git a/metrics/src/vespa/metrics/metricvalueset.h b/metrics/src/vespa/metrics/metricvalueset.h
index 2463990378e..c522876f5b1 100644
--- a/metrics/src/vespa/metrics/metricvalueset.h
+++ b/metrics/src/vespa/metrics/metricvalueset.h
@@ -76,12 +76,6 @@ public:
*/
bool setValues(const ValueClass& values);
- /**
- * Retrieve and reset in a single operation, to minimize chance of
- * alteration in the process.
- */
- ValueClass getValuesAndReset();
-
void reset() {
setFlag(RESET);
}
@@ -105,9 +99,6 @@ public:
_flags.store(_flags.load(std::memory_order_relaxed) & ~flags,
std::memory_order_relaxed);
}
- uint32_t getFlags() const {
- return _flags.load(std::memory_order_relaxed);
- }
};
} // metrics
diff --git a/metrics/src/vespa/metrics/metricvalueset.hpp b/metrics/src/vespa/metrics/metricvalueset.hpp
index 8c5b32afcf8..57b3e7f9901 100644
--- a/metrics/src/vespa/metrics/metricvalueset.hpp
+++ b/metrics/src/vespa/metrics/metricvalueset.hpp
@@ -70,14 +70,6 @@ MetricValueSet<ValueClass>::setValues(const ValueClass& values) {
}
template<typename ValueClass>
-ValueClass
-MetricValueSet<ValueClass>::getValuesAndReset() {
- ValueClass result(getValues());
- setFlag(RESET);
- return result;
-}
-
-template<typename ValueClass>
std::string
MetricValueSet<ValueClass>::toString() {
std::ostringstream ost;
diff --git a/metrics/src/vespa/metrics/summetric.h b/metrics/src/vespa/metrics/summetric.h
index f04c1696638..7b60c968e5b 100644
--- a/metrics/src/vespa/metrics/summetric.h
+++ b/metrics/src/vespa/metrics/summetric.h
@@ -69,6 +69,7 @@ public:
void printDebug(std::ostream&, const std::string& indent="") const override;
void addToPart(Metric&) const override;
void addToSnapshot(Metric&, std::vector<Metric::UP> &) const override;
+ bool is_sum_metric() const override;
private:
friend struct MetricManagerTest;
diff --git a/metrics/src/vespa/metrics/summetric.hpp b/metrics/src/vespa/metrics/summetric.hpp
index 9520456a974..e067b9643c2 100644
--- a/metrics/src/vespa/metrics/summetric.hpp
+++ b/metrics/src/vespa/metrics/summetric.hpp
@@ -142,8 +142,17 @@ template<typename AddendMetric>
void
SumMetric<AddendMetric>::addToPart(Metric& m) const
{
- std::pair<std::vector<Metric::UP>, Metric::UP> sum(generateSum());
- sum.second->addToPart(m);
+ if (!m.is_sum_metric()) {
+ std::pair<std::vector<Metric::UP>, Metric::UP> sum(generateSum());
+ sum.second->addToPart(m);
+ }
+}
+
+template<typename AddendMetric>
+bool
+SumMetric<AddendMetric>::is_sum_metric() const
+{
+ return true;
}
template<typename AddendMetric>
diff --git a/model-integration/src/test/java/ai/vespa/modelintegration/evaluator/OnnxEvaluatorTest.java b/model-integration/src/test/java/ai/vespa/modelintegration/evaluator/OnnxEvaluatorTest.java
index 4b42e18d75e..a7186aae5fe 100644
--- a/model-integration/src/test/java/ai/vespa/modelintegration/evaluator/OnnxEvaluatorTest.java
+++ b/model-integration/src/test/java/ai/vespa/modelintegration/evaluator/OnnxEvaluatorTest.java
@@ -74,7 +74,7 @@ public class OnnxEvaluatorTest {
assertEvaluate("cast_int8_float.onnx", "tensor<float>(d0[1]):[-128]", "tensor<int8>(d0[1]):[128]");
assertEvaluate("cast_float_int8.onnx", "tensor<int8>(d0[1]):[-1]", "tensor<float>(d0[1]):[255]");
- // ONNX Runtime 1.7.0 does not support much of bfloat16 yet
+ // ONNX Runtime 1.8.0 does not support much of bfloat16 yet
// assertEvaluate("cast_bfloat16_float.onnx", "tensor<float>(d0[1]):[1]", "tensor<bfloat16>(d0[1]):[1]");
}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeReports.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeReports.java
index 70ce548916a..d3ab6464822 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeReports.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeReports.java
@@ -42,6 +42,8 @@ public class NodeReports {
reports.put(reportId, jsonNode);
}
+ public boolean hasReport(String reportId) { return reports.containsKey(reportId); }
+
public <T> Optional<T> getReport(String reportId, Class<T> jacksonClass) {
return Optional.ofNullable(reports.get(reportId)).map(r -> uncheck(() -> mapper.treeToValue(r, jacksonClass)));
}
@@ -75,6 +77,17 @@ public class NodeReports {
return new TreeMap<>(reports);
}
+ /** Apply the override to this. null value means removing report. */
+ public void updateFromRawMap(Map<String, JsonNode> override) {
+ override.forEach((reportId, jsonNode) -> {
+ if (jsonNode == null) {
+ reports.remove(reportId);
+ } else {
+ reports.put(reportId, jsonNode);
+ }
+ });
+ }
+
@Override
public boolean equals(Object o) {
if (this == o) return true;
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeSpec.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeSpec.java
index 7408041462c..fa1f8528b31 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeSpec.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeSpec.java
@@ -604,7 +604,7 @@ public class NodeSpec {
attributes.getCurrentOsVersion().ifPresent(this::currentOsVersion);
attributes.getRebootGeneration().ifPresent(this::currentRebootGeneration);
attributes.getRestartGeneration().ifPresent(this::currentRestartGeneration);
- NodeReports.fromMap(attributes.getReports());
+ this.reports.updateFromRawMap(attributes.getReports());
return this;
}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImpl.java
index ef9520969af..5d7ab48753f 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImpl.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImpl.java
@@ -15,11 +15,13 @@ import java.time.Clock;
import java.time.Duration;
import java.time.Instant;
import java.util.HashSet;
+import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.function.Function;
import java.util.stream.Collectors;
+import java.util.stream.Stream;
/**
* Administers a host (for now only docker hosts) and its nodes (docker containers nodes).
@@ -130,7 +132,7 @@ public class NodeAdminImpl implements NodeAdmin {
}
// Use filter with count instead of allMatch() because allMatch() will short circuit on first non-match
- boolean allNodeAgentsConverged = nodeAgentWithSchedulerByHostname.values().parallelStream()
+ boolean allNodeAgentsConverged = parallelStreamOfNodeAgentWithScheduler()
.filter(nodeAgentScheduler -> !nodeAgentScheduler.setFrozen(wantFrozen, freezeTimeout))
.count() == 0;
@@ -158,9 +160,7 @@ public class NodeAdminImpl implements NodeAdmin {
@Override
public void stopNodeAgentServices() {
// Each container may spend 1-1:30 minutes stopping
- nodeAgentWithSchedulerByHostname.values()
- .parallelStream()
- .forEach(NodeAgentWithScheduler::stopForHostSuspension);
+ parallelStreamOfNodeAgentWithScheduler().forEach(NodeAgentWithScheduler::stopForHostSuspension);
}
@Override
@@ -171,7 +171,18 @@ public class NodeAdminImpl implements NodeAdmin {
@Override
public void stop() {
// Stop all node-agents in parallel, will block until the last NodeAgent is stopped
- nodeAgentWithSchedulerByHostname.values().parallelStream().forEach(NodeAgentWithScheduler::stopForRemoval);
+ parallelStreamOfNodeAgentWithScheduler().forEach(NodeAgentWithScheduler::stopForRemoval);
+ }
+
+ /**
+ * Returns a parallel stream of NodeAgentWithScheduler.
+ *
+ * <p>Why not just call nodeAgentWithSchedulerByHostname.values().parallelStream()? Experiments
+ * with Java 11 have shown that with 10 nodes and forEach(), there are a maximum of 3 concurrent
+ * threads. With HashMap it produces 5. With List it produces 10 concurrent threads.</p>
+ */
+ private Stream<NodeAgentWithScheduler> parallelStreamOfNodeAgentWithScheduler() {
+ return List.copyOf(nodeAgentWithSchedulerByHostname.values()).parallelStream();
}
// Set-difference. Returns minuend minus subtrahend.
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java
index df3f075e8d9..05c765c9d78 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java
@@ -1,4 +1,4 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.node.admin.nodeagent;
import com.yahoo.config.provision.ApplicationId;
@@ -357,7 +357,7 @@ public class NodeAgentImpl implements NodeAgent {
}
try {
- if (context.node().state() != NodeState.dirty) {
+ if (context.node().state() == NodeState.active) {
suspend(context);
}
stopServices(context);
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
index d113ca68d01..f084b83bf97 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
@@ -109,7 +109,7 @@ public class NodeRepository extends AbstractComponent {
"dynamicProvisioning property must be 1-to-1 with availability of HostProvisioner, was: dynamicProvisioning=%s, hostProvisioner=%s",
zone.getCloud().dynamicProvisioning(), provisionServiceProvider.getHostProvisioner().map(__ -> "present").orElse("empty")));
- this.db = new CuratorDatabaseClient(flavors, curator, clock, zone, useCuratorClientCache, nodeCacheSize);
+ this.db = new CuratorDatabaseClient(flavors, curator, clock, useCuratorClientCache, nodeCacheSize);
this.zone = zone;
this.clock = clock;
this.nodes = new Nodes(db, zone, clock);
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Applications.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Applications.java
index ccd5af1cb64..fe363bf3786 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Applications.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Applications.java
@@ -2,8 +2,8 @@
package com.yahoo.vespa.hosted.provision.applications;
import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.ApplicationLockException;
import com.yahoo.config.provision.ApplicationTransaction;
-import com.yahoo.config.provision.ProvisionLock;
import com.yahoo.transaction.Mutex;
import com.yahoo.transaction.NestedTransaction;
import com.yahoo.vespa.hosted.provision.persistence.CuratorDatabaseClient;
@@ -28,6 +28,8 @@ public class Applications {
for (ApplicationId id : ids()) {
try (Mutex lock = db.lock(id)) {
get(id).ifPresent(application -> put(application, lock));
+ } catch (ApplicationLockException e) {
+ throw new ApplicationLockException(e.getMessage()); // No need for stack trace here
}
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerService.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerService.java
index fba0993f2f9..40f9b330634 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerService.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerService.java
@@ -29,10 +29,7 @@ public interface LoadBalancerService {
Protocol protocol();
/** Returns whether load balancers created by this service can forward traffic to given node and cluster type */
- default boolean canForwardTo(NodeType nodeType, ClusterSpec.Type clusterType) {
- return (nodeType == NodeType.tenant && clusterType.isContainer()) ||
- nodeType.isConfigServerLike();
- }
+ boolean supports(NodeType nodeType, ClusterSpec.Type clusterType);
/** Load balancer protocols */
enum Protocol {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerServiceMock.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerServiceMock.java
index b912087da46..f752cbc4349 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerServiceMock.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerServiceMock.java
@@ -5,6 +5,7 @@ import com.google.common.collect.ImmutableSet;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.HostName;
+import com.yahoo.config.provision.NodeType;
import java.util.Collections;
import java.util.HashMap;
@@ -18,6 +19,7 @@ public class LoadBalancerServiceMock implements LoadBalancerService {
private final Map<LoadBalancerId, LoadBalancerInstance> instances = new HashMap<>();
private boolean throwOnCreate = false;
+ private boolean supportsProvisioning = true;
public Map<LoadBalancerId, LoadBalancerInstance> instances() {
return Collections.unmodifiableMap(instances);
@@ -28,6 +30,18 @@ public class LoadBalancerServiceMock implements LoadBalancerService {
return this;
}
+ public LoadBalancerServiceMock supportsProvisioning(boolean supportsProvisioning) {
+ this.supportsProvisioning = supportsProvisioning;
+ return this;
+ }
+
+ @Override
+ public boolean supports(NodeType nodeType, ClusterSpec.Type clusterType) {
+ if (!supportsProvisioning) return false;
+ return (nodeType == NodeType.tenant && clusterType.isContainer()) ||
+ nodeType.isConfigServerLike();
+ }
+
@Override
public Protocol protocol() {
return Protocol.ipv4;
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/PassthroughLoadBalancerService.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/PassthroughLoadBalancerService.java
index 7667672e470..9a6a65eca69 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/PassthroughLoadBalancerService.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/PassthroughLoadBalancerService.java
@@ -3,6 +3,7 @@ package com.yahoo.vespa.hosted.provision.lb;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.config.provision.NodeType;
import java.util.Comparator;
import java.util.Optional;
@@ -35,4 +36,9 @@ public class PassthroughLoadBalancerService implements LoadBalancerService {
return Protocol.ipv4;
}
+ @Override
+ public boolean supports(NodeType nodeType, ClusterSpec.Type clusterType) {
+ return true;
+ }
+
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/SharedLoadBalancerService.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/SharedLoadBalancerService.java
index 33a3c138d70..e17e5a5a449 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/SharedLoadBalancerService.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/SharedLoadBalancerService.java
@@ -66,7 +66,7 @@ public class SharedLoadBalancerService implements LoadBalancerService {
}
@Override
- public boolean canForwardTo(NodeType nodeType, ClusterSpec.Type clusterType) {
+ public boolean supports(NodeType nodeType, ClusterSpec.Type clusterType) {
// Shared routing layer only supports routing to tenant nodes
return nodeType == NodeType.tenant && clusterType.isContainer();
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/OperatorChangeApplicationMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ExpeditedChangeApplicationMaintainer.java
index 47337518a65..39183688340 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/OperatorChangeApplicationMaintainer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ExpeditedChangeApplicationMaintainer.java
@@ -20,19 +20,22 @@ import java.util.Set;
import java.util.stream.Collectors;
/**
- * The application maintainer detects manual operator changes to nodes and redeploys affected applications.
- * The purpose of this is to redeploy affected applications faster than achieved by the regular application
- * maintenance to reduce the time period where the node repository and the application model is out of sync.
+ * This maintainer detects changes to nodes that must be expedited, and redeploys affected applications.
+ *
+ * The purpose of this is to redeploy affected applications faster than achieved by
+ * {@link PeriodicApplicationMaintainer}, to reduce the time period where the node repository and the application model
+ * is out of sync.
*
* Why can't the manual change directly make the application redeployment?
- * Because the redeployment must run at the right config server, while the node state change may be running
- * at any config server.
+ *
+ * Because we want to queue redeployments to avoid overloading config servers.
*
* @author bratseth
+ * @author mpolden
*/
-public class OperatorChangeApplicationMaintainer extends ApplicationMaintainer {
+public class ExpeditedChangeApplicationMaintainer extends ApplicationMaintainer {
- OperatorChangeApplicationMaintainer(Deployer deployer, Metric metric, NodeRepository nodeRepository, Duration interval) {
+ ExpeditedChangeApplicationMaintainer(Deployer deployer, Metric metric, NodeRepository nodeRepository, Duration interval) {
super(deployer, metric, nodeRepository, interval);
}
@@ -57,7 +60,7 @@ public class OperatorChangeApplicationMaintainer extends ApplicationMaintainer {
boolean deployed = deployWithLock(application);
if (deployed)
log.info("Redeployed application " + application.toShortString() +
- " as a manual change was made to its nodes");
+ " as an expedited change was made to its nodes");
}
private boolean hasNodesWithChanges(ApplicationId applicationId, NodeList nodes) {
@@ -66,7 +69,7 @@ public class OperatorChangeApplicationMaintainer extends ApplicationMaintainer {
return nodes.stream()
.flatMap(node -> node.history().events().stream())
- .filter(event -> event.agent() == Agent.operator)
+ .filter(event -> expediteChangeBy(event.agent()))
.map(History.Event::at)
.anyMatch(e -> lastDeployTime.get().isBefore(e));
}
@@ -84,5 +87,14 @@ public class OperatorChangeApplicationMaintainer extends ApplicationMaintainer {
.groupingBy(node -> node.allocation().get().owner());
}
+ /** Returns whether to expedite changes performed by agent */
+ private boolean expediteChangeBy(Agent agent) {
+ switch (agent) {
+ case operator:
+ case RebuildingOsUpgrader:
+ case HostEncrypter: return true;
+ }
+ return false;
+ }
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostEncrypter.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostEncrypter.java
index 80f74a011c0..6d88e43630a 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostEncrypter.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostEncrypter.java
@@ -48,6 +48,8 @@ public class HostEncrypter extends NodeRepositoryMaintainer {
NodeList allNodes = nodeRepository().nodes().list();
for (var nodeType : NodeType.values()) {
if (!nodeType.isHost()) continue;
+ // TODO: Require a minimum number of proxies in Orchestrator. For now skip proxy hosts.
+ if (nodeType == NodeType.proxyhost) continue;
if (upgradingVespa(allNodes, nodeType)) continue;
unencryptedHosts(allNodes, nodeType).forEach(host -> encrypt(host, now));
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirer.java
index ac9d8d6671a..c3d6f5c42b8 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirer.java
@@ -65,7 +65,7 @@ public class LoadBalancerExpirer extends NodeRepositoryMaintainer {
Instant now = nodeRepository().clock().instant();
Instant expiry = now.minus(reservedExpiry);
patchLoadBalancers(lb -> lb.state() == State.reserved && lb.changedAt().isBefore(expiry),
- lb -> db.writeLoadBalancer(lb.with(State.inactive, now)));
+ lb -> db.writeLoadBalancer(lb.with(State.inactive, now), lb.state()));
}
/** Deprovision inactive load balancers that have expired */
@@ -114,7 +114,7 @@ public class LoadBalancerExpirer extends NodeRepositoryMaintainer {
attempts.add(1);
LOG.log(Level.INFO, () -> "Removing reals from inactive load balancer " + lb.id() + ": " + Sets.difference(lb.instance().get().reals(), reals));
service.create(new LoadBalancerSpec(lb.id().application(), lb.id().cluster(), reals), true);
- db.writeLoadBalancer(lb.with(lb.instance().map(instance -> instance.withReals(reals))));
+ db.writeLoadBalancer(lb.with(lb.instance().map(instance -> instance.withReals(reals))), lb.state());
} catch (Exception e) {
failed.add(lb.id());
lastException.set(e);
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporter.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporter.java
index 3990c5099eb..d8bbf305b57 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporter.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporter.java
@@ -27,6 +27,7 @@ import com.yahoo.vespa.service.monitor.ServiceModel;
import com.yahoo.vespa.service.monitor.ServiceMonitor;
import java.time.Duration;
+import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
@@ -66,7 +67,9 @@ public class MetricsReporter extends NodeRepositoryMaintainer {
@Override
public double maintain() {
- NodeList nodes = nodeRepository().nodes().list();
+ // Sort by hostname to get deterministic metric reporting order (and hopefully avoid changes
+ // to metric reporting time so we get double reporting or no reporting within a minute)
+ NodeList nodes = nodeRepository().nodes().list().sortedBy(Comparator.comparing(Node::hostname));
ServiceModel serviceModel = serviceMonitor.getServiceModelSnapshot();
updateZoneMetrics();
@@ -200,6 +203,11 @@ public class MetricsReporter extends NodeRepositoryMaintainer {
metric.set("wantToDeprovision", node.status().wantToDeprovision() ? 1 : 0, context);
metric.set("failReport", NodeFailer.reasonsToFailParentHost(node).isEmpty() ? 0 : 1, context);
+ if (node.type().isHost()) {
+ metric.set("wantToEncrypt", node.reports().getReport("wantToEncrypt").isPresent() ? 1 : 0, context);
+ metric.set("diskEncrypted", node.reports().getReport("diskEncrypted").isPresent() ? 1 : 0, context);
+ }
+
HostName hostname = new HostName(node.hostname());
serviceModel.getApplication(hostname)
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java
index 44ee8b5a8b3..96373bd764f 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java
@@ -48,7 +48,7 @@ public class NodeRepositoryMaintenance extends AbstractComponent {
maintainers.add(new NodeFailer(deployer, nodeRepository, defaults.failGrace, defaults.nodeFailerInterval, orchestrator, defaults.throttlePolicy, metric));
maintainers.add(new NodeHealthTracker(hostLivenessTracker, serviceMonitor, nodeRepository, defaults.nodeFailureStatusUpdateInterval, metric));
- maintainers.add(new OperatorChangeApplicationMaintainer(deployer, metric, nodeRepository, defaults.operatorChangeRedeployInterval));
+ maintainers.add(new ExpeditedChangeApplicationMaintainer(deployer, metric, nodeRepository, defaults.expeditedChangeRedeployInterval));
maintainers.add(new ReservationExpirer(nodeRepository, defaults.reservationExpiry, metric));
maintainers.add(new RetiredExpirer(nodeRepository, orchestrator, deployer, metric, defaults.retiredInterval, defaults.retiredExpiry));
maintainers.add(new InactiveExpirer(nodeRepository, defaults.inactiveExpiry, Map.of(NodeType.config, defaults.inactiveConfigServerExpiry,
@@ -91,7 +91,7 @@ public class NodeRepositoryMaintenance extends AbstractComponent {
/** Time between each run of maintainer that does periodic redeployment */
private final Duration redeployMaintainerInterval;
/** Applications are redeployed after manual operator changes within this time period */
- private final Duration operatorChangeRedeployInterval;
+ private final Duration expeditedChangeRedeployInterval;
/** The time a node must be continuously unresponsive before it is failed */
private final Duration failGrace;
@@ -133,7 +133,7 @@ public class NodeRepositoryMaintenance extends AbstractComponent {
nodeFailerInterval = Duration.ofMinutes(15);
nodeFailureStatusUpdateInterval = Duration.ofMinutes(2);
nodeMetricsCollectionInterval = Duration.ofMinutes(1);
- operatorChangeRedeployInterval = Duration.ofMinutes(3);
+ expeditedChangeRedeployInterval = Duration.ofMinutes(3);
// Vespa upgrade frequency is higher in CD so (de)activate OS upgrades more frequently as well
osUpgradeActivatorInterval = zone.system().isCd() ? Duration.ofSeconds(30) : Duration.ofMinutes(5);
periodicRedeployInterval = Duration.ofMinutes(60);
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ProvisionedExpirer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ProvisionedExpirer.java
index 856d534bbd2..76c8210338e 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ProvisionedExpirer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ProvisionedExpirer.java
@@ -4,15 +4,18 @@ package com.yahoo.vespa.hosted.provision.maintenance;
import com.yahoo.config.provision.NodeType;
import com.yahoo.jdisc.Metric;
import com.yahoo.vespa.hosted.provision.Node;
+import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.node.Agent;
import com.yahoo.vespa.hosted.provision.node.History;
import java.time.Duration;
+import java.time.Instant;
import java.util.List;
/**
* This moves nodes of type {@link NodeType#host} from provisioned to parked if they have been in provisioned too long.
+ * Parked hosts are deprovisioned as well, if too many hosts are being expired.
*
* Only {@link NodeType#host} is moved because any number of nodes of that type can exist. Other node types such as
* {@link NodeType#confighost} have a fixed number and thus cannot be replaced while the fixed number of nodes exist in
@@ -22,17 +25,40 @@ import java.util.List;
*/
public class ProvisionedExpirer extends Expirer {
+ private final NodeRepository nodeRepository;
+ private static final int MAXIMUM_ALLOWED_EXPIRED_HOSTS = 20;
+
ProvisionedExpirer(NodeRepository nodeRepository, Duration dirtyTimeout, Metric metric) {
super(Node.State.provisioned, History.Event.Type.provisioned, nodeRepository, dirtyTimeout, metric);
+ this.nodeRepository = nodeRepository;
}
@Override
protected void expire(List<Node> expired) {
+ int previouslyExpired = numberOfPreviouslyExpired();
for (Node expiredNode : expired) {
- if (expiredNode.type() == NodeType.host) {
- nodeRepository().nodes().parkRecursively(expiredNode.hostname(), Agent.ProvisionedExpirer, "Node is stuck in provisioned");
+ if (expiredNode.type() != NodeType.host)
+ continue;
+ nodeRepository().nodes().parkRecursively(expiredNode.hostname(), Agent.ProvisionedExpirer, "Node is stuck in provisioned");
+ if (MAXIMUM_ALLOWED_EXPIRED_HOSTS < ++previouslyExpired) {
+ nodeRepository.nodes().deprovision(expiredNode.hostname(), Agent.ProvisionedExpirer, nodeRepository.clock().instant());
}
}
}
+ private int numberOfPreviouslyExpired() {
+ return nodeRepository.nodes()
+ .list(Node.State.parked)
+ .nodeType(NodeType.host)
+ .matching(this::parkedByProvisionedExpirer)
+ .not().deprovisioning()
+ .size();
+ }
+
+ private boolean parkedByProvisionedExpirer(Node node) {
+ return node.history().event(History.Event.Type.parked)
+ .map(History.Event::agent)
+ .map(Agent.ProvisionedExpirer::equals)
+ .orElse(false);
+ }
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Agent.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Agent.java
index d1c3f00ddca..ed82470fa42 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Agent.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Agent.java
@@ -21,6 +21,7 @@ public enum Agent {
InactiveExpirer,
ProvisionedExpirer,
ReservationExpirer,
+ ParkedExpirer,
DynamicProvisioningMaintainer,
RetiringUpgrader,
RebuildingOsUpgrader,
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java
index 3afe5824af5..ec1bfba6996 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java
@@ -161,6 +161,14 @@ public class Nodes {
node = node.with(node.status().withFailCount(existing.get().status().failCount()));
if (existing.get().status().firmwareVerifiedAt().isPresent())
node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get()));
+ // Preserve wantToRebuild/wantToRetire when rebuilding as the fields shouldn't be cleared until the
+ // host is readied (i.e. we know it is up and rebuild completed)
+ boolean rebuilding = existing.get().status().wantToRebuild();
+ if (rebuilding) {
+ node = node.with(node.status().withWantToRetire(existing.get().status().wantToRetire(),
+ false,
+ rebuilding));
+ }
nodesToRemove.add(existing.get());
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java
index d1f881f8b7a..0205cc6c818 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java
@@ -11,7 +11,6 @@ import com.yahoo.config.provision.HostName;
import com.yahoo.config.provision.NodeFlavors;
import com.yahoo.config.provision.NodeType;
import com.yahoo.config.provision.TenantName;
-import com.yahoo.config.provision.Zone;
import com.yahoo.path.Path;
import com.yahoo.transaction.NestedTransaction;
import com.yahoo.transaction.Transaction;
@@ -72,18 +71,15 @@ public class CuratorDatabaseClient {
private static final Path firmwareCheckPath = root.append("firmwareCheck");
private static final Path archiveUrisPath = root.append("archiveUris");
- private static final Duration defaultLockTimeout = Duration.ofMinutes(6);
+ private static final Duration defaultLockTimeout = Duration.ofMinutes(10);
private final NodeSerializer nodeSerializer;
private final CuratorDatabase db;
private final Clock clock;
- private final Zone zone;
private final CuratorCounter provisionIndexCounter;
- public CuratorDatabaseClient(NodeFlavors flavors, Curator curator, Clock clock, Zone zone, boolean useCache,
- long nodeCacheSize) {
+ public CuratorDatabaseClient(NodeFlavors flavors, Curator curator, Clock clock, boolean useCache, long nodeCacheSize) {
this.nodeSerializer = new NodeSerializer(flavors, nodeCacheSize);
- this.zone = zone;
this.db = new CuratorDatabase(curator, root, useCache);
this.clock = clock;
this.provisionIndexCounter = new CuratorCounter(curator, root.append("provisionIndexCounter").getAbsolute());
@@ -486,18 +482,29 @@ public class CuratorDatabaseClient {
return read(loadBalancerPath(id), LoadBalancerSerializer::fromJson);
}
- public void writeLoadBalancer(LoadBalancer loadBalancer) {
+ public void writeLoadBalancer(LoadBalancer loadBalancer, LoadBalancer.State fromState) {
NestedTransaction transaction = new NestedTransaction();
- writeLoadBalancers(List.of(loadBalancer), transaction);
+ writeLoadBalancers(List.of(loadBalancer), fromState, transaction);
transaction.commit();
}
- public void writeLoadBalancers(Collection<LoadBalancer> loadBalancers, NestedTransaction transaction) {
+ public void writeLoadBalancers(Collection<LoadBalancer> loadBalancers, LoadBalancer.State fromState, NestedTransaction transaction) {
CuratorTransaction curatorTransaction = db.newCuratorTransactionIn(transaction);
loadBalancers.forEach(loadBalancer -> {
curatorTransaction.add(createOrSet(loadBalancerPath(loadBalancer.id()),
LoadBalancerSerializer.toJson(loadBalancer)));
});
+ transaction.onCommitted(() -> {
+ for (var lb : loadBalancers) {
+ if (lb.state() == fromState) continue;
+ if (fromState == null) {
+ log.log(Level.INFO, () -> "Creating " + lb.id() + " in " + lb.state());
+ } else {
+ log.log(Level.INFO, () -> "Moving " + lb.id() + " from " + fromState +
+ " to " + lb.state());
+ }
+ }
+ });
}
public void removeLoadBalancer(LoadBalancerId loadBalancer) {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java
index d83f21e5fec..dff4a66bd42 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java
@@ -482,6 +482,7 @@ public class NodeSerializer {
case "SpareCapacityMaintainer": return Agent.SpareCapacityMaintainer;
case "SwitchRebalancer": return Agent.SwitchRebalancer;
case "HostEncrypter": return Agent.HostEncrypter;
+ case "ParkedExpirer": return Agent.ParkedExpirer;
}
throw new IllegalArgumentException("Unknown node event agent '" + eventAgentField.asString() + "'");
}
@@ -504,6 +505,7 @@ public class NodeSerializer {
case SpareCapacityMaintainer: return "SpareCapacityMaintainer";
case SwitchRebalancer: return "SwitchRebalancer";
case HostEncrypter: return "HostEncrypter";
+ case ParkedExpirer: return "ParkedExpirer";
}
throw new IllegalArgumentException("Serialized form of '" + agent + "' not defined");
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java
index c114aa58a05..1a01fac247e 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java
@@ -7,7 +7,6 @@ import com.yahoo.config.provision.ApplicationTransaction;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.HostName;
import com.yahoo.config.provision.NodeType;
-import com.yahoo.config.provision.ProvisionLock;
import com.yahoo.config.provision.TenantName;
import com.yahoo.config.provision.exception.LoadBalancerServiceException;
import com.yahoo.transaction.NestedTransaction;
@@ -61,7 +60,7 @@ public class LoadBalancerProvisioner {
for (var id : db.readLoadBalancerIds()) {
try (var lock = db.lock(id.application())) {
var loadBalancer = db.readLoadBalancer(id);
- loadBalancer.ifPresent(db::writeLoadBalancer);
+ loadBalancer.ifPresent(lb -> db.writeLoadBalancer(lb, lb.state()));
}
}
}
@@ -77,15 +76,12 @@ public class LoadBalancerProvisioner {
* Calling this for irrelevant node or cluster types is a no-op.
*/
public void prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes) {
- if (!service.canForwardTo(requestedNodes.type(), cluster.type())) return; // Nothing to provision for this node and cluster type
- if (application.instance().isTester()) return; // Do not provision for tester instances
+ if (!shouldProvision(application, requestedNodes.type(), cluster.type())) return;
try (var lock = db.lock(application)) {
ClusterSpec.Id clusterId = effectiveId(cluster);
- NodeList nodes = nodesOf(clusterId, application);
LoadBalancerId loadBalancerId = requireNonClashing(new LoadBalancerId(application, clusterId));
- ApplicationTransaction transaction = new ApplicationTransaction(new ProvisionLock(application, lock), new NestedTransaction());
- provision(transaction, loadBalancerId, nodes, false);
- transaction.nested().commit();
+ NodeList nodes = nodesOf(clusterId, application);
+ prepare(loadBalancerId, nodes);
}
}
@@ -100,14 +96,18 @@ public class LoadBalancerProvisioner {
* Calling this when no load balancer has been prepared for given cluster is a no-op.
*/
public void activate(Set<ClusterSpec> clusters, ApplicationTransaction transaction) {
+ Set<ClusterSpec.Id> activatingClusters = clusters.stream()
+ .map(LoadBalancerProvisioner::effectiveId)
+ .collect(Collectors.toSet());
for (var cluster : loadBalancedClustersOf(transaction.application()).entrySet()) {
- // Provision again to ensure that load balancer instance is re-configured with correct nodes
- provision(transaction, cluster.getKey(), cluster.getValue());
+ if (!activatingClusters.contains(cluster.getKey())) continue;
+
+ Node clusterNode = cluster.getValue().first().get();
+ if (!shouldProvision(transaction.application(), clusterNode.type(), clusterNode.allocation().get().membership().cluster().type())) continue;
+ activate(transaction, cluster.getKey(), cluster.getValue());
}
// Deactivate any surplus load balancers, i.e. load balancers for clusters that have been removed
- var surplusLoadBalancers = surplusLoadBalancersOf(transaction.application(), clusters.stream()
- .map(LoadBalancerProvisioner::effectiveId)
- .collect(Collectors.toSet()));
+ var surplusLoadBalancers = surplusLoadBalancersOf(transaction.application(), activatingClusters);
deactivate(surplusLoadBalancers, transaction.nested());
}
@@ -119,6 +119,13 @@ public class LoadBalancerProvisioner {
deactivate(nodeRepository.loadBalancers().list(transaction.application()).asList(), transaction.nested());
}
+ /** Returns whether to provision a load balancer for given application */
+ private boolean shouldProvision(ApplicationId application, NodeType nodeType, ClusterSpec.Type clusterType) {
+ if (application.instance().isTester()) return false; // Do not provision for tester instances
+ if (!service.supports(nodeType, clusterType)) return false; // Nothing to provision for this node and cluster type
+ return true;
+ }
+
/** Returns load balancers of given application that are no longer referenced by given clusters */
private List<LoadBalancer> surplusLoadBalancersOf(ApplicationId application, Set<ClusterSpec.Id> activeClusters) {
var activeLoadBalancersByCluster = nodeRepository.loadBalancers().list(application)
@@ -140,7 +147,7 @@ public class LoadBalancerProvisioner {
var deactivatedLoadBalancers = loadBalancers.stream()
.map(lb -> lb.with(LoadBalancer.State.inactive, now))
.collect(Collectors.toList());
- db.writeLoadBalancers(deactivatedLoadBalancers, transaction);
+ db.writeLoadBalancers(deactivatedLoadBalancers, LoadBalancer.State.active, transaction);
}
/** Find all load balancer IDs owned by given tenant and application */
@@ -165,52 +172,41 @@ public class LoadBalancerProvisioner {
return loadBalancerId;
}
- /** Idempotently provision a load balancer for given application and cluster */
- private void provision(ApplicationTransaction transaction, LoadBalancerId id, NodeList nodes, boolean activate) {
+ private void prepare(LoadBalancerId id, NodeList nodes) {
Instant now = nodeRepository.clock().instant();
Optional<LoadBalancer> loadBalancer = db.readLoadBalancer(id);
- if (loadBalancer.isEmpty() && activate) return; // Nothing to activate as this load balancer was never prepared
-
- Set<Real> reals = realsOf(nodes);
- Optional<LoadBalancerInstance> instance = provisionInstance(id, reals, loadBalancer);
+ Optional<LoadBalancerInstance> instance = provisionInstance(id, nodes, loadBalancer);
LoadBalancer newLoadBalancer;
+ LoadBalancer.State fromState = null;
if (loadBalancer.isEmpty()) {
newLoadBalancer = new LoadBalancer(id, instance, LoadBalancer.State.reserved, now);
} else {
- LoadBalancer.State state = activate && instance.isPresent()
- ? LoadBalancer.State.active
- : loadBalancer.get().state();
- newLoadBalancer = loadBalancer.get().with(instance).with(state, now);
- if (loadBalancer.get().state() != newLoadBalancer.state()) {
- log.log(Level.INFO, () -> "Moving " + newLoadBalancer.id() + " from " + loadBalancer.get().state() +
- " to " + newLoadBalancer.state());
- }
- }
-
- if (activate) {
- db.writeLoadBalancers(List.of(newLoadBalancer), transaction.nested());
- } else {
- // Always store load balancer so that LoadBalancerExpirer can expire partially provisioned load balancers
- db.writeLoadBalancer(newLoadBalancer);
- }
-
- // Signal that load balancer is not ready yet
- if (instance.isEmpty()) {
- throw new LoadBalancerServiceException("Could not (re)configure " + id + ", targeting: " +
- reals + ". The operation will be retried on next deployment",
- null);
+ newLoadBalancer = loadBalancer.get().with(instance);
+ fromState = newLoadBalancer.state();
}
+ // Always store load balancer so that LoadBalancerExpirer can expire partially provisioned load balancers
+ db.writeLoadBalancer(newLoadBalancer, fromState);
+ requireInstance(id, instance);
}
- private void provision(ApplicationTransaction transaction, ClusterSpec.Id clusterId, NodeList nodes) {
- provision(transaction, new LoadBalancerId(transaction.application(), clusterId), nodes, true);
+ private void activate(ApplicationTransaction transaction, ClusterSpec.Id cluster, NodeList nodes) {
+ Instant now = nodeRepository.clock().instant();
+ LoadBalancerId id = new LoadBalancerId(transaction.application(), cluster);
+ Optional<LoadBalancer> loadBalancer = db.readLoadBalancer(id);
+ if (loadBalancer.isEmpty()) throw new IllegalArgumentException("Could not active load balancer that was never prepared: " + id);
+
+ Optional<LoadBalancerInstance> instance = provisionInstance(id, nodes, loadBalancer);
+ LoadBalancer.State state = instance.isPresent() ? LoadBalancer.State.active : loadBalancer.get().state();
+ LoadBalancer newLoadBalancer = loadBalancer.get().with(instance).with(state, now);
+ db.writeLoadBalancers(List.of(newLoadBalancer), loadBalancer.get().state(), transaction.nested());
+ requireInstance(id, instance);
}
/** Provision or reconfigure a load balancer instance, if necessary */
- private Optional<LoadBalancerInstance> provisionInstance(LoadBalancerId id, Set<Real> reals,
- Optional<LoadBalancer> currentLoadBalancer) {
+ private Optional<LoadBalancerInstance> provisionInstance(LoadBalancerId id, NodeList nodes, Optional<LoadBalancer> currentLoadBalancer) {
+ Set<Real> reals = realsOf(nodes);
if (hasReals(currentLoadBalancer, reals)) return currentLoadBalancer.get().instance();
- log.log(Level.INFO, () -> "Creating " + id + ", targeting: " + reals);
+ log.log(Level.INFO, () -> "Provisioning instance for " + id + ", targeting: " + reals);
try {
return Optional.of(service.create(new LoadBalancerSpec(id.application(), id.cluster(), reals),
allowEmptyReals(currentLoadBalancer)));
@@ -241,7 +237,7 @@ public class LoadBalancerProvisioner {
/** Returns real servers for given nodes */
private Set<Real> realsOf(NodeList nodes) {
- var reals = new LinkedHashSet<Real>();
+ Set<Real> reals = new LinkedHashSet<Real>();
for (var node : nodes) {
for (var ip : reachableIpAddresses(node)) {
reals.add(new Real(HostName.from(node.hostname()), ip));
@@ -289,6 +285,14 @@ public class LoadBalancerProvisioner {
return reachable;
}
+ private static void requireInstance(LoadBalancerId id, Optional<LoadBalancerInstance> instance) {
+ if (instance.isEmpty()) {
+ // Signal that load balancer is not ready yet
+ throw new LoadBalancerServiceException("Could not (re)configure " + id + ". The operation will be retried on next deployment",
+ null);
+ }
+ }
+
private static ClusterSpec.Id effectiveId(ClusterSpec cluster) {
return cluster.combinedId().orElse(cluster.id());
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java
index 4e74104c25b..d23b3c782c8 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java
@@ -64,6 +64,7 @@ public class NodeResourceLimits {
private double minAdvertisedVcpu(ClusterSpec.Type clusterType) {
if (zone().environment() == Environment.dev && !zone().getCloud().dynamicProvisioning()) return 0.1;
+ if (clusterType.isContent() && zone().environment().isProduction()) return 1.0;
if (clusterType == ClusterSpec.Type.admin) return 0.1;
return 0.5;
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTest.java
index 351f9fe44ee..4bfe01375c1 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTest.java
@@ -197,8 +197,16 @@ public class NodeRepositoryTest {
}
tester.nodeRepository().nodes().removeRecursively("host1");
+ // Set host 2 properties and deprovision it
+ try (var lock = tester.nodeRepository().nodes().lockAndGetRequired("host2")) {
+ Node host2 = lock.node().withWantToRetire(true, false, true, Agent.system, tester.nodeRepository().clock().instant());
+ tester.nodeRepository().nodes().write(host2, lock);
+ }
+ tester.nodeRepository().nodes().removeRecursively("host2");
+
// Host 1 is deprovisioned and unwanted properties are cleared
Node host1 = tester.nodeRepository().nodes().node("host1").get();
+ Node host2 = tester.nodeRepository().nodes().node("host2").get();
assertEquals(Node.State.deprovisioned, host1.state());
assertTrue(host1.history().hasEventAfter(History.Event.Type.deprovisioned, testStart));
@@ -214,6 +222,8 @@ public class NodeRepositoryTest {
assertTrue("Transferred from deprovisioned host", host1.status().firmwareVerifiedAt().isPresent());
assertEquals("Transferred from deprovisioned host", 1, host1.status().failCount());
assertEquals("Transferred from deprovisioned host", 1, host1.reports().getReports().size());
+ assertTrue("Transferred from rebuilt host", host2.status().wantToRetire());
+ assertTrue("Transferred from rebuilt host", host2.status().wantToRebuild());
}
@Test
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/OperatorChangeApplicationMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ExpeditedChangeApplicationMaintainerTest.java
index db6aebacddc..62d09c99f16 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/OperatorChangeApplicationMaintainerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ExpeditedChangeApplicationMaintainerTest.java
@@ -26,7 +26,7 @@ import static org.junit.Assert.assertEquals;
/**
* @author bratseth
*/
-public class OperatorChangeApplicationMaintainerTest {
+public class ExpeditedChangeApplicationMaintainerTest {
@Test
public void test_application_maintenance() {
@@ -42,10 +42,10 @@ public class OperatorChangeApplicationMaintainerTest {
// Create applications
fixture.activate();
assertEquals("Initial applications are deployed", 3, fixture.deployer.redeployments);
- OperatorChangeApplicationMaintainer maintainer = new OperatorChangeApplicationMaintainer(fixture.deployer,
- new TestMetric(),
- nodeRepository,
- Duration.ofMinutes(1));
+ ExpeditedChangeApplicationMaintainer maintainer = new ExpeditedChangeApplicationMaintainer(fixture.deployer,
+ new TestMetric(),
+ nodeRepository,
+ Duration.ofMinutes(1));
clock.advance(Duration.ofMinutes(2));
maintainer.maintain();
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporterTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporterTest.java
index 968427f0781..7fa6810c1ba 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporterTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporterTest.java
@@ -43,6 +43,7 @@ import java.util.TreeMap;
import java.util.stream.Collectors;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.mock;
@@ -112,6 +113,8 @@ public class MetricsReporterTest {
expectedMetrics.put("wantToRetire", 0);
expectedMetrics.put("wantToDeprovision", 0);
expectedMetrics.put("failReport", 0);
+
+
expectedMetrics.put("allowedToBeDown", 1);
expectedMetrics.put("suspended", 1);
expectedMetrics.put("suspendedSeconds", 123L);
@@ -146,6 +149,27 @@ public class MetricsReporterTest {
assertEquals(expectedMetrics, new TreeMap<>(metric.values));
}
+ @Test
+ public void test_registered_metrics_for_host() {
+ NodeFlavors nodeFlavors = FlavorConfigBuilder.createDummies("default");
+ Orchestrator orchestrator = mock(Orchestrator.class);
+ when(orchestrator.getHostInfo(eq(reference), any())).thenReturn(
+ HostInfo.createSuspended(HostStatus.ALLOWED_TO_BE_DOWN, Instant.ofEpochSecond(1)));
+ ProvisioningTester tester = new ProvisioningTester.Builder().flavors(nodeFlavors.getFlavors()).orchestrator(orchestrator).build();
+ tester.makeProvisionedNodes(1, "default", NodeType.host, 0);
+
+ tester.clock().setInstant(Instant.ofEpochSecond(124));
+
+ TestMetric metric = new TestMetric();
+ MetricsReporter metricsReporter = metricsReporter(metric, tester);
+ metricsReporter.maintain();
+
+ // Only verify metrics that are set for hosts
+ TreeMap<String, Number> metrics = new TreeMap<>(metric.values);
+ assertTrue(metrics.containsKey("wantToEncrypt"));
+ assertTrue(metrics.containsKey("diskEncrypted"));
+ }
+
private void verifyAndRemoveIntegerMetricSum(TestMetric metric, String key, int expected) {
assertEquals(expected, (int) metric.sumNumberValues(key));
metric.remove(key);
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ProvisionedExpirerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ProvisionedExpirerTest.java
new file mode 100644
index 00000000000..786faae24b4
--- /dev/null
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ProvisionedExpirerTest.java
@@ -0,0 +1,50 @@
+package com.yahoo.vespa.hosted.provision.maintenance;
+
+import com.yahoo.config.provision.Cloud;
+import com.yahoo.config.provision.Environment;
+import com.yahoo.config.provision.Flavor;
+import com.yahoo.config.provision.NodeResources;
+import com.yahoo.config.provision.NodeType;
+import com.yahoo.config.provision.RegionName;
+import com.yahoo.config.provision.SystemName;
+import com.yahoo.config.provision.Zone;
+import com.yahoo.vespa.hosted.provision.Node;
+import com.yahoo.vespa.hosted.provision.node.Agent;
+import com.yahoo.vespa.hosted.provision.provisioning.ProvisioningTester;
+import com.yahoo.vespa.hosted.provision.testutils.MockHostProvisioner;
+import org.junit.Test;
+
+import java.time.Duration;
+import java.util.List;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+
+import static org.junit.Assert.*;
+
+/**
+ * @author olaa
+ */
+public class ProvisionedExpirerTest {
+
+ private ProvisioningTester tester;
+
+ @Test
+ public void deprovisions_hosts_if_excessive_expiry() {
+ tester = new ProvisioningTester.Builder().build();
+ populateNodeRepo();
+
+ tester.clock().advance(Duration.ofMinutes(5));
+ new ProvisionedExpirer(tester.nodeRepository(), Duration.ofMinutes(4), new TestMetric()).maintain();
+
+ assertEquals(5, tester.nodeRepository().nodes().list().deprovisioning().size());
+ assertEquals(20, tester.nodeRepository().nodes().list().not().deprovisioning().size());
+ }
+
+ private void populateNodeRepo() {
+ var nodes = IntStream.range(0, 25)
+ .mapToObj(i -> Node.create("id-" + i, "host-" + i, new Flavor(NodeResources.unspecified()), Node.State.provisioned, NodeType.host).build())
+ .collect(Collectors.toList());
+ tester.nodeRepository().database().addNodesInState(nodes, Node.State.provisioned, Agent.system);
+ }
+
+}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClientTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClientTest.java
index f47fb7f23be..99f6ce4fb00 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClientTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClientTest.java
@@ -1,4 +1,4 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.provision.persistence;
import com.yahoo.config.provision.ApplicationId;
@@ -6,7 +6,6 @@ import com.yahoo.config.provision.ApplicationName;
import com.yahoo.config.provision.InstanceName;
import com.yahoo.config.provision.NodeType;
import com.yahoo.config.provision.TenantName;
-import com.yahoo.config.provision.Zone;
import com.yahoo.vespa.curator.Curator;
import com.yahoo.vespa.curator.mock.MockCurator;
import com.yahoo.vespa.hosted.provision.Node;
@@ -25,7 +24,7 @@ public class CuratorDatabaseClientTest {
private final Curator curator = new MockCurator();
private final CuratorDatabaseClient zkClient = new CuratorDatabaseClient(
- FlavorConfigBuilder.createDummies("default"), curator, Clock.systemUTC(), Zone.defaultZone(), true, 1000);
+ FlavorConfigBuilder.createDummies("default"), curator, Clock.systemUTC(), true, 1000);
@Test
public void can_read_stored_host_information() throws Exception {
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningCompleteHostCalculatorTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningCompleteHostCalculatorTest.java
index 8b2febf37b1..afbd44a346f 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningCompleteHostCalculatorTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningCompleteHostCalculatorTest.java
@@ -26,7 +26,7 @@ public class DockerProvisioningCompleteHostCalculatorTest {
@Test
public void changing_to_different_range_preserves_allocation() {
- Flavor hostFlavor = new Flavor(new NodeResources(20, 40, 1000, 4));
+ Flavor hostFlavor = new Flavor(new NodeResources(40, 40, 1000, 4));
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east")))
.resourcesCalculator(new CompleteResourcesCalculator(hostFlavor))
.flavors(List.of(hostFlavor))
@@ -36,25 +36,25 @@ public class DockerProvisioningCompleteHostCalculatorTest {
ApplicationId app1 = ProvisioningTester.applicationId("app1");
ClusterSpec cluster1 = ClusterSpec.request(ClusterSpec.Type.content, new ClusterSpec.Id("cluster1")).vespaVersion("7").build();
- var initialResources = new NodeResources(2, 16, 50, 1);
+ var initialResources = new NodeResources(20, 16, 50, 1);
tester.activate(app1, cluster1, Capacity.from(new ClusterResources(2, 1, initialResources),
new ClusterResources(2, 1, initialResources)));
tester.assertNodes("Initial allocation",
- 2, 1, 2, 16, 50, 1.0,
+ 2, 1, 20, 16, 50, 1.0,
app1, cluster1);
- var newMinResources = new NodeResources(0.5, 4, 11, 1);
- var newMaxResources = new NodeResources(2.0, 10, 30, 1);
+ var newMinResources = new NodeResources( 5, 4, 11, 1);
+ var newMaxResources = new NodeResources(20, 10, 30, 1);
tester.activate(app1, cluster1, Capacity.from(new ClusterResources(7, 1, newMinResources),
new ClusterResources(7, 1, newMaxResources)));
tester.assertNodes("New allocation preserves total resources",
- 7, 1, 0.7, 4.6, 14.3, 1.0,
+ 7, 1, 7, 4.6, 14.3, 1.0,
app1, cluster1);
tester.activate(app1, cluster1, Capacity.from(new ClusterResources(7, 1, newMinResources),
new ClusterResources(7, 1, newMaxResources)));
tester.assertNodes("Redeploying the same ranges does not cause changes",
- 7, 1, 0.7, 4.6, 14.3, 1.0,
+ 7, 1, 7, 4.6, 14.3, 1.0,
app1, cluster1);
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java
index f2ca993f4d7..fd8cf9ea00f 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java
@@ -295,7 +295,7 @@ public class DockerProvisioningTest {
@Test
public void changing_to_different_range_preserves_allocation() {
- Flavor hostFlavor = new Flavor(new NodeResources(20, 40, 100, 4));
+ Flavor hostFlavor = new Flavor(new NodeResources(40, 40, 100, 4));
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east")))
.resourcesCalculator(3, 0)
.flavors(List.of(hostFlavor))
@@ -305,25 +305,25 @@ public class DockerProvisioningTest {
ApplicationId app1 = ProvisioningTester.applicationId("app1");
ClusterSpec cluster1 = ClusterSpec.request(ClusterSpec.Type.content, new ClusterSpec.Id("cluster1")).vespaVersion("7").build();
- var initialResources = new NodeResources(2, 16, 50, 1);
+ var initialResources = new NodeResources(20, 16, 50, 1);
tester.activate(app1, cluster1, Capacity.from(new ClusterResources(2, 1, initialResources),
new ClusterResources(2, 1, initialResources)));
tester.assertNodes("Initial allocation",
- 2, 1, 2, 16, 50, 1.0,
+ 2, 1, 20, 16, 50, 1.0,
app1, cluster1);
- var newMinResources = new NodeResources(0.5, 6, 11, 1);
- var newMaxResources = new NodeResources(2.0, 10, 30, 1);
+ var newMinResources = new NodeResources( 5, 6, 11, 1);
+ var newMaxResources = new NodeResources(20, 10, 30, 1);
tester.activate(app1, cluster1, Capacity.from(new ClusterResources(7, 1, newMinResources),
new ClusterResources(7, 1, newMaxResources)));
tester.assertNodes("New allocation preserves total resources",
- 7, 1, 0.7, 6.7, 14.3, 1.0,
+ 7, 1, 7, 6.7, 14.3, 1.0,
app1, cluster1);
tester.activate(app1, cluster1, Capacity.from(new ClusterResources(7, 1, newMinResources),
new ClusterResources(7, 1, newMaxResources)));
tester.assertNodes("Redeploying does not cause changes",
- 7, 1, 0.7, 6.7, 14.3, 1.0,
+ 7, 1, 7, 6.7, 14.3, 1.0,
app1, cluster1);
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisionTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisionTest.java
index f5cf9dbb471..029c9ffa559 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisionTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisionTest.java
@@ -286,7 +286,7 @@ public class DynamicDockerProvisionTest {
}
// Initial deployment
- tester.activate(app1, cluster1, Capacity.from(resources(4, 2, 0.5, 5, 20),
+ tester.activate(app1, cluster1, Capacity.from(resources(4, 2, 1, 5, 20),
resources(6, 3, 4, 20, 40)));
tester.assertNodes("Initial allocation at first actual flavor above min (except for disk)",
4, 2, 1, 10, 20,
@@ -316,7 +316,7 @@ public class DynamicDockerProvisionTest {
// Force 1 more groups: Reducing to 2 nodes per group to preserve node count is rejected
// since it will reduce total group memory from 60 to 40.
- tester.activate(app1, cluster1, Capacity.from(resources(6, 3, 0.5, 5, 10),
+ tester.activate(app1, cluster1, Capacity.from(resources(6, 3, 1, 5, 10),
resources(9, 3, 5, 20, 15)));
tester.assertNodes("Group size is preserved",
9, 3, 2, 20, 15,
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java
index bdc3bdfd816..16fe5ef241a 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java
@@ -300,6 +300,13 @@ public class LoadBalancerProvisionerTest {
assertTrue("Load balancer has instance", loadBalancers.get(0).instance().isPresent());
}
+ @Test
+ public void provisioning_load_balancer_for_unsupported_cluster_fails_gracefully() {
+ tester.loadBalancerService().supportsProvisioning(false);
+ tester.activate(app1, prepare(app1, clusterRequest(ClusterSpec.Type.container, ClusterSpec.Id.from("qrs"))));
+ assertTrue("No load balancer provisioned", tester.nodeRepository().loadBalancers().list(app1).asList().isEmpty());
+ }
+
private void dirtyNodesOf(ApplicationId application) {
tester.nodeRepository().nodes().deallocate(tester.nodeRepository().nodes().list().owner(application).asList(), Agent.system, this.getClass().getSimpleName());
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/maintenance.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/maintenance.json
index 2dcf2d0b838..72224ef3cba 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/maintenance.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/maintenance.json
@@ -7,6 +7,9 @@
"name": "DirtyExpirer"
},
{
+ "name": "ExpeditedChangeApplicationMaintainer"
+ },
+ {
"name": "FailedExpirer"
},
{
@@ -37,9 +40,6 @@
"name": "NodeRebooter"
},
{
- "name": "OperatorChangeApplicationMaintainer"
- },
- {
"name": "OsUpgradeActivator"
},
{
diff --git a/parent/pom.xml b/parent/pom.xml
index 6f1d6f23f51..2a763dcc6ac 100644
--- a/parent/pom.xml
+++ b/parent/pom.xml
@@ -885,7 +885,7 @@
<maven-site-plugin.version>3.3</maven-site-plugin.version>
<maven-source-plugin.version>3.0.1</maven-source-plugin.version>
<prometheus.client.version>0.6.0</prometheus.client.version>
- <onnxruntime.version>1.7.0</onnxruntime.version>
+ <onnxruntime.version>1.8.0</onnxruntime.version>
<protobuf.version>3.11.4</protobuf.version>
<spifly.version>1.3.3</spifly.version>
<surefire.version>2.22.0</surefire.version>
diff --git a/searchcore/src/tests/proton/documentdb/documentdb_test.cpp b/searchcore/src/tests/proton/documentdb/documentdb_test.cpp
index 463a7b164e1..3013e8f38d1 100644
--- a/searchcore/src/tests/proton/documentdb/documentdb_test.cpp
+++ b/searchcore/src/tests/proton/documentdb/documentdb_test.cpp
@@ -19,6 +19,8 @@
#include <vespa/searchcore/proton/server/document_db_explorer.h>
#include <vespa/searchcore/proton/server/documentdb.h>
#include <vespa/searchcore/proton/server/documentdbconfigmanager.h>
+#include <vespa/searchcore/proton/server/feedhandler.h>
+#include <vespa/searchcore/proton/server/fileconfigmanager.h>
#include <vespa/searchcore/proton/server/memoryconfigstore.h>
#include <vespa/persistence/dummyimpl/dummy_bucket_executor.h>
#include <vespa/searchcorespi/index/indexflushtarget.h>
@@ -28,7 +30,10 @@
#include <vespa/vespalib/data/slime/slime.h>
#include <vespa/vespalib/util/size_literals.h>
#include <vespa/config-bucketspaces.h>
+#include <vespa/vespalib/io/fileutil.h>
+#include <vespa/vespalib/stllike/asciistream.h>
#include <vespa/vespalib/testkit/test_kit.h>
+#include <iostream>
using namespace cloud::config::filedistribution;
using namespace proton;
@@ -39,6 +44,7 @@ using document::DocumentType;
using document::DocumentTypeRepo;
using document::DocumenttypesConfig;
using document::test::makeBucketSpace;
+using search::SerialNum;
using search::TuneFileDocumentDB;
using search::index::DummyFileHeaderContext;
using search::index::Schema;
@@ -51,6 +57,24 @@ using vespalib::Slime;
namespace {
+void
+cleanup_dirs(bool file_config)
+{
+ vespalib::rmdir("typea", true);
+ vespalib::rmdir("tmp", true);
+ if (file_config) {
+ vespalib::rmdir("config", true);
+ }
+}
+
+vespalib::string
+config_subdir(SerialNum serialNum)
+{
+ vespalib::asciistream os;
+ os << "config/config-" << serialNum;
+ return os.str();
+}
+
struct MyDBOwner : public DummyDBOwner
{
std::shared_ptr<DocumentDBReferenceRegistry> _registry;
@@ -67,7 +91,30 @@ MyDBOwner::MyDBOwner()
{}
MyDBOwner::~MyDBOwner() = default;
-struct Fixture {
+struct FixtureBase {
+ bool _cleanup;
+ bool _file_config;
+ FixtureBase(bool file_config);
+ ~FixtureBase();
+ void disable_cleanup() { _cleanup = false; }
+};
+
+FixtureBase::FixtureBase(bool file_config)
+ : _cleanup(true),
+ _file_config(file_config)
+{
+ vespalib::mkdir("typea");
+}
+
+
+FixtureBase::~FixtureBase()
+{
+ if (_cleanup) {
+ cleanup_dirs(_file_config);
+ }
+}
+
+struct Fixture : public FixtureBase {
DummyWireService _dummy;
MyDBOwner _myDBOwner;
vespalib::ThreadStackExecutor _summaryExecutor;
@@ -79,12 +126,20 @@ struct Fixture {
matching::QueryLimiter _queryLimiter;
vespalib::Clock _clock;
+ std::unique_ptr<ConfigStore> make_config_store();
Fixture();
+ Fixture(bool file_config);
~Fixture();
};
Fixture::Fixture()
- : _dummy(),
+ : Fixture(false)
+{
+}
+
+Fixture::Fixture(bool file_config)
+ : FixtureBase(file_config),
+ _dummy(),
_myDBOwner(),
_summaryExecutor(8, 128_Ki),
_hwInfo(),
@@ -111,13 +166,25 @@ Fixture::Fixture()
_db = DocumentDB::create(".", mgr.getConfig(), "tcp/localhost:9014", _queryLimiter, _clock, DocTypeName("typea"),
makeBucketSpace(),
*b->getProtonConfigSP(), _myDBOwner, _summaryExecutor, _summaryExecutor, _bucketExecutor, _tls, _dummy,
- _fileHeaderContext, std::make_unique<MemoryConfigStore>(),
+ _fileHeaderContext, make_config_store(),
std::make_shared<vespalib::ThreadStackExecutor>(16, 128_Ki), _hwInfo);
_db->start();
_db->waitForOnlineState();
}
-Fixture::~Fixture() = default;
+Fixture::~Fixture()
+{
+}
+
+std::unique_ptr<ConfigStore>
+Fixture::make_config_store()
+{
+ if (_file_config) {
+ return std::make_unique<FileConfigManager>("config", "", "typea");
+ } else {
+ return std::make_unique<MemoryConfigStore>();
+ }
+}
const IFlushTarget *
extractRealFlushTarget(const IFlushTarget *target)
@@ -249,11 +316,56 @@ TEST_F("require that document db registers reference", Fixture)
EXPECT_EQUAL(search::attribute::BasicType::INT32, attrReadGuard->attribute()->getBasicType());
}
+TEST("require that normal restart works")
+{
+ {
+ Fixture f(true);
+ f.disable_cleanup();
+ }
+ {
+ Fixture f(true);
+ }
+}
+
+TEST("require that resume after interrupted save config works")
+{
+ SerialNum serialNum = 0;
+ {
+ Fixture f(true);
+ f.disable_cleanup();
+ serialNum = f._db->getFeedHandler().getSerialNum();
+ }
+ {
+ /*
+ * Simulate interrupted save config by copying best config to
+ * serial number after end of transaction log
+ */
+ std::cout << "Replay end serial num is " << serialNum << std::endl;
+ search::IndexMetaInfo info("config");
+ ASSERT_TRUE(info.load());
+ auto best_config_snapshot = info.getBestSnapshot();
+ ASSERT_TRUE(best_config_snapshot.valid);
+ std::cout << "Best config serial is " << best_config_snapshot.syncToken << std::endl;
+ auto old_config_subdir = config_subdir(best_config_snapshot.syncToken);
+ auto new_config_subdir = config_subdir(serialNum + 1);
+ vespalib::mkdir(new_config_subdir);
+ auto config_files = vespalib::listDirectory(old_config_subdir);
+ for (auto &config_file : config_files) {
+ vespalib::copy(old_config_subdir + "/" + config_file, new_config_subdir + "/" + config_file, false, false);
+ }
+ info.addSnapshot({true, serialNum + 1, new_config_subdir.substr(new_config_subdir.rfind('/') + 1)});
+ info.save();
+ }
+ {
+ Fixture f(true);
+ }
+}
+
} // namespace
TEST_MAIN() {
+ cleanup_dirs(true);
DummyFileHeaderContext::setCreator("documentdb_test");
- FastOS_File::MakeDirectory("typea");
TEST_RUN_ALL();
- FastOS_FileInterface::EmptyAndRemoveDirectory("typea");
+ cleanup_dirs(true);
}
diff --git a/searchcore/src/vespa/searchcore/grouping/groupingcontext.cpp b/searchcore/src/vespa/searchcore/grouping/groupingcontext.cpp
index 55e9ce16f70..01dd069b03c 100644
--- a/searchcore/src/vespa/searchcore/grouping/groupingcontext.cpp
+++ b/searchcore/src/vespa/searchcore/grouping/groupingcontext.cpp
@@ -4,17 +4,15 @@
#include <vespa/searchlib/aggregation/predicates.h>
#include <vespa/searchlib/aggregation/modifiers.h>
-namespace search {
+namespace search::grouping {
using aggregation::CountFS4Hits;
using aggregation::FS4HitSetDistributionKey;
-namespace grouping {
-
void
GroupingContext::deserialize(const char *groupSpec, uint32_t groupSpecLen)
{
- if ((groupSpec != NULL) && (groupSpecLen > 4)) {
+ if ((groupSpec != nullptr) && (groupSpecLen > 4)) {
vespalib::nbostream is(groupSpec, groupSpecLen);
vespalib::NBOSerializer nis(is);
uint32_t numGroupings = 0;
@@ -102,6 +100,4 @@ GroupingContext::needRanking() const
return true;
}
-
-} // namespace search::grouping
-} // namespace search
+}
diff --git a/searchcore/src/vespa/searchcore/proton/server/documentdb.cpp b/searchcore/src/vespa/searchcore/proton/server/documentdb.cpp
index aa633536419..e53e817af8d 100644
--- a/searchcore/src/vespa/searchcore/proton/server/documentdb.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/documentdb.cpp
@@ -632,8 +632,9 @@ DocumentDB::saveInitialConfig(const DocumentDBConfig &configSnapshot)
// Only called from ctor
lock_guard guard(_configMutex);
- if (_config_store->getBestSerialNum() != 0)
+ if (_config_store->getBestSerialNum() != 0) {
return; // Initial config already present
+ }
SerialNum confSerial = _feedHandler->inc_replay_end_serial_num();
_feedHandler->setSerialNum(confSerial);
@@ -658,16 +659,17 @@ void
DocumentDB::resumeSaveConfig()
{
SerialNum bestSerial = _config_store->getBestSerialNum();
- if (bestSerial == 0)
- return;
- if (bestSerial != _feedHandler->get_replay_end_serial_num() + 1)
+ assert(bestSerial != 0);
+ if (bestSerial != _feedHandler->get_replay_end_serial_num() + 1) {
return;
+ }
+ LOG(warning, "DocumentDB(%s): resumeSaveConfig() resuming save config for serial %" PRIu64,
+ _docTypeName.toString().c_str(), bestSerial);
// proton was interrupted when saving later config.
SerialNum confSerial = _feedHandler->inc_replay_end_serial_num();
- _feedHandler->setSerialNum(confSerial);
+ assert(confSerial == bestSerial);
// resume operation, i.e. save config entry in transaction log
NewConfigOperation op(confSerial, *_config_store);
- op.setSerialNum(_feedHandler->inc_replay_end_serial_num());
(void) _feedHandler->storeOperationSync(op);
sync(op.getSerialNum());
}
diff --git a/searchcore/src/vespa/searchcore/proton/server/fileconfigmanager.cpp b/searchcore/src/vespa/searchcore/proton/server/fileconfigmanager.cpp
index 4b862b40896..04aea64fbd4 100644
--- a/searchcore/src/vespa/searchcore/proton/server/fileconfigmanager.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/fileconfigmanager.cpp
@@ -4,8 +4,6 @@
#include "bootstrapconfig.h"
#include <vespa/searchcore/proton/common/hw_info_sampler.h>
#include <vespa/config/print/fileconfigwriter.h>
-#include <vespa/config/print/fileconfigsnapshotreader.h>
-#include <vespa/config/print/fileconfigsnapshotwriter.h>
#include <vespa/config-bucketspaces.h>
#include <vespa/document/repo/document_type_repo_factory.h>
#include <vespa/searchcommon/common/schemaconfigurer.h>
@@ -42,7 +40,8 @@ using vespa::config::search::summary::JuniperrcConfig;
using vespa::config::content::core::BucketspacesConfig;
using vespalib::nbostream;
-typedef IndexMetaInfo::SnapshotList SnapshotList;
+using SnapshotList = IndexMetaInfo::SnapshotList;
+using Snapshot = IndexMetaInfo::Snapshot;
using namespace std::chrono_literals;
namespace proton {
@@ -74,9 +73,7 @@ fsyncFile(const vespalib::string &fileName)
template <class Config>
void
-saveHelper(const vespalib::string &snapDir,
- const vespalib::string &name,
- const Config &config)
+saveHelper(const vespalib::string &snapDir, const vespalib::string &name, const Config &config)
{
vespalib::string fileName(snapDir + "/" + name + ".cfg");
config::FileConfigWriter writer(fileName);
@@ -105,8 +102,7 @@ public:
ConfigFile();
~ConfigFile();
- ConfigFile(const vespalib::string &name,
- const vespalib::string &fullName);
+ ConfigFile(const vespalib::string &name, const vespalib::string &fullName);
nbostream &serialize(nbostream &stream) const;
nbostream &deserialize(nbostream &stream);
@@ -122,8 +118,7 @@ ConfigFile::ConfigFile()
ConfigFile::~ConfigFile() = default;
-ConfigFile::ConfigFile(const vespalib::string &name,
- const vespalib::string &fullName)
+ConfigFile::ConfigFile(const vespalib::string &name, const vespalib::string &fullName)
: _name(name),
_modTime(0),
_content()
@@ -142,7 +137,7 @@ ConfigFile::ConfigFile(const vespalib::string &name,
nbostream &
ConfigFile::serialize(nbostream &stream) const
{
- assert(strchr(_name.c_str(), '/') == NULL);
+ assert(strchr(_name.c_str(), '/') == nullptr);
stream << _name;
stream << static_cast<int64_t>(_modTime);;
uint32_t sz = _content.size();
@@ -155,7 +150,7 @@ nbostream &
ConfigFile::deserialize(nbostream &stream)
{
stream >> _name;
- assert(strchr(_name.c_str(), '/') == NULL);
+ assert(strchr(_name.c_str(), '/') == nullptr);
int64_t modTime;
stream >> modTime;
_modTime = modTime;
@@ -255,8 +250,7 @@ FileConfigManager::getOldestSerialNum() const
}
void
-FileConfigManager::saveConfig(const DocumentDBConfig &snapshot,
- SerialNum serialNum)
+FileConfigManager::saveConfig(const DocumentDBConfig &snapshot, SerialNum serialNum)
{
if (getBestSerialNum() >= serialNum) {
LOG(warning, "Config for serial >= %" PRIu64 " already saved",
@@ -318,8 +312,7 @@ void addEmptyFile(vespalib::string snapDir, vespalib::string fileName)
}
void
-FileConfigManager::loadConfig(const DocumentDBConfig &currentSnapshot,
- search::SerialNum serialNum,
+FileConfigManager::loadConfig(const DocumentDBConfig &currentSnapshot, search::SerialNum serialNum,
DocumentDBConfig::SP &loadedSnapshot)
{
vespalib::string snapDirBaseName(makeSnapDirBaseName(serialNum));
@@ -333,13 +326,14 @@ FileConfigManager::loadConfig(const DocumentDBConfig &currentSnapshot,
DocumentDBConfigHelper dbc(spec, _docTypeName);
- typedef DocumenttypesConfig DTC;
- typedef DocumentDBConfig::DocumenttypesConfigSP DTCSP;
- DTCSP docTypesCfg(config::ConfigGetter<DTC>::getConfig("", spec).release());
+ using DTC = DocumenttypesConfig;
+ using DTCSP = DocumentDBConfig::DocumenttypesConfigSP;
+ DTCSP docTypesCfg = config::ConfigGetter<DTC>::getConfig("", spec);
std::shared_ptr<const DocumentTypeRepo> repo;
if (currentSnapshot.getDocumenttypesConfigSP() &&
currentSnapshot.getDocumentTypeRepoSP() &&
- currentSnapshot.getDocumenttypesConfig() == *docTypesCfg) {
+ (currentSnapshot.getDocumenttypesConfig() == *docTypesCfg))
+ {
docTypesCfg = currentSnapshot.getDocumenttypesConfigSP();
repo = currentSnapshot.getDocumentTypeRepoSP();
} else {
@@ -462,8 +456,7 @@ FileConfigManager::serializeConfig(SerialNum serialNum, nbostream &stream)
uint32_t numConfigs = configs.size();
stream << numConfigs;
for (const auto &config : configs) {
- ConfigFile file(config,
- snapDir + "/" + config);
+ ConfigFile file(config, snapDir + "/" + config);
stream << file;
}
}
diff --git a/searchcore/src/vespa/searchcore/proton/server/fileconfigmanager.h b/searchcore/src/vespa/searchcore/proton/server/fileconfigmanager.h
index 1c477ffd3c8..d58d7920c67 100644
--- a/searchcore/src/vespa/searchcore/proton/server/fileconfigmanager.h
+++ b/searchcore/src/vespa/searchcore/proton/server/fileconfigmanager.h
@@ -10,17 +10,12 @@
namespace proton {
class FileConfigManager : public ConfigStore {
-public:
- typedef std::unique_ptr<FileConfigManager> UP;
- typedef std::shared_ptr<FileConfigManager> SP;
- typedef search::IndexMetaInfo::Snapshot Snapshot;
-
private:
- vespalib::string _baseDir;
- vespalib::string _configId;
- vespalib::string _docTypeName;
+ vespalib::string _baseDir;
+ vespalib::string _configId;
+ vespalib::string _docTypeName;
search::IndexMetaInfo _info;
- ProtonConfigSP _protonConfig;
+ ProtonConfigSP _protonConfig;
public:
/**
@@ -33,14 +28,12 @@ public:
const vespalib::string &configId,
const vespalib::string &docTypeName);
- virtual
- ~FileConfigManager();
+ ~FileConfigManager() override;
- virtual SerialNum getBestSerialNum() const override;
- virtual SerialNum getOldestSerialNum() const override;
+ SerialNum getBestSerialNum() const override;
+ SerialNum getOldestSerialNum() const override;
- virtual void saveConfig(const DocumentDBConfig &snapshot,
- SerialNum serialNum) override;
+ void saveConfig(const DocumentDBConfig &snapshot, SerialNum serialNum) override;
/**
* Load a config snapshot from disk corresponding to the given
@@ -53,23 +46,21 @@ public:
* @param loadedSnapshot the shared pointer in which to store the
* resulting config snapshot.
*/
- virtual void loadConfig(const DocumentDBConfig &currentSnapshot,
- SerialNum serialNum,
- DocumentDBConfig::SP &loadedSnapshot) override;
+ void loadConfig(const DocumentDBConfig &currentSnapshot, SerialNum serialNum,
+ DocumentDBConfig::SP &loadedSnapshot) override;
- virtual void removeInvalid() override;
- virtual void prune(SerialNum serialNum) override;
- virtual bool hasValidSerial(SerialNum serialNum) const override;
+ void removeInvalid() override;
+ void prune(SerialNum serialNum) override;
+ bool hasValidSerial(SerialNum serialNum) const override;
- virtual SerialNum getPrevValidSerial(SerialNum serialNum) const override;
+ SerialNum getPrevValidSerial(SerialNum serialNum) const override;
/**
* Serialize config files.
*
* Used for serializing config into transaction log.
*/
- virtual void
- serializeConfig(SerialNum serialNum, vespalib::nbostream &stream) override;
+ void serializeConfig(SerialNum serialNum, vespalib::nbostream &stream) override;
/**
@@ -80,10 +71,9 @@ public:
* takes precedence over the serialized config files in the
* transaction log.
*/
- virtual void
- deserializeConfig(SerialNum serialNum, vespalib::nbostream &stream) override;
+ void deserializeConfig(SerialNum serialNum, vespalib::nbostream &stream) override;
- virtual void setProtonConfig(const ProtonConfigSP &protonConfig) override;
+ void setProtonConfig(const ProtonConfigSP &protonConfig) override;
};
} // namespace proton
diff --git a/searchlib/abi-spec.json b/searchlib/abi-spec.json
index 2d006bbd973..5035a5f583f 100644
--- a/searchlib/abi-spec.json
+++ b/searchlib/abi-spec.json
@@ -336,6 +336,8 @@
"public java.util.Map getRankProperties(com.yahoo.searchlib.rankingexpression.rule.SerializationContext)",
"public java.util.Map getRankProperties(java.util.List)",
"public static java.lang.String propertyName(java.lang.String)",
+ "public static java.lang.String propertyExpressionName(java.lang.String)",
+ "public static java.lang.String extractScriptName(java.lang.String)",
"public com.yahoo.tensor.TensorType type(com.yahoo.tensor.evaluation.TypeContext)",
"public com.yahoo.searchlib.rankingexpression.evaluation.Value evaluate(com.yahoo.searchlib.rankingexpression.evaluation.Context)",
"public static com.yahoo.searchlib.rankingexpression.RankingExpression from(java.lang.String)"
diff --git a/searchlib/src/main/java/com/yahoo/searchlib/aggregation/Group.java b/searchlib/src/main/java/com/yahoo/searchlib/aggregation/Group.java
index c508296d739..c7080ec28d8 100644
--- a/searchlib/src/main/java/com/yahoo/searchlib/aggregation/Group.java
+++ b/searchlib/src/main/java/com/yahoo/searchlib/aggregation/Group.java
@@ -4,9 +4,17 @@ package com.yahoo.searchlib.aggregation;
import com.yahoo.searchlib.expression.AggregationRefNode;
import com.yahoo.searchlib.expression.ExpressionNode;
import com.yahoo.searchlib.expression.ResultNode;
-import com.yahoo.vespa.objects.*;
-
-import java.util.*;
+import com.yahoo.vespa.objects.Deserializer;
+import com.yahoo.vespa.objects.Identifiable;
+import com.yahoo.vespa.objects.ObjectOperation;
+import com.yahoo.vespa.objects.ObjectPredicate;
+import com.yahoo.vespa.objects.ObjectVisitor;
+import com.yahoo.vespa.objects.Serializer;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
public class Group extends Identifiable {
@@ -132,11 +140,7 @@ public class Group extends Identifiable {
if (sortType == SortType.BYID) {
return;
}
- Collections.sort(children, new Comparator<Group>() {
- public int compare(Group lhs, Group rhs) {
- return lhs.compareId(rhs);
- }
- });
+ Collections.sort(children, (Group lhs, Group rhs) -> lhs.compareId(rhs));
sortType = SortType.BYID;
}
@@ -145,11 +149,8 @@ public class Group extends Identifiable {
if (sortType == SortType.BYRANK) {
return;
}
- Collections.sort(children, new Comparator<Group>() {
- public int compare(Group lhs, Group rhs) {
- return lhs.compareRank(rhs);
- }
- });
+ Collections.sort(children, (Group lhs, Group rhs) -> lhs.compareRank(rhs) );
+
sortType = SortType.BYRANK;
}
@@ -403,22 +404,19 @@ public class Group extends Identifiable {
if (id != null) {
obj.id = (ResultNode)id.clone();
}
- obj.aggregationResults = new ArrayList<AggregationResult>();
+ obj.aggregationResults = new ArrayList<>();
for (AggregationResult result : aggregationResults) {
obj.aggregationResults.add(result.clone());
}
- obj.orderByIdx = new ArrayList<Integer>();
- for (Integer idx : orderByIdx) {
- obj.orderByIdx.add(idx);
- }
- obj.orderByExp = new ArrayList<ExpressionNode>();
+ obj.orderByIdx = new ArrayList<>(orderByIdx);
+ obj.orderByExp = new ArrayList<>();
RefResolver resolver = new RefResolver(obj);
for (ExpressionNode exp : orderByExp) {
exp = exp.clone();
exp.select(REF_LOCATOR, resolver);
obj.orderByExp.add(exp);
}
- obj.children = new ArrayList<Group>();
+ obj.children = new ArrayList<>();
for (Group child : children) {
obj.children.add(child.clone());
}
@@ -447,7 +445,7 @@ public class Group extends Identifiable {
}
}
- private static enum SortType {
+ private enum SortType {
UNSORTED,
BYRANK,
BYID
diff --git a/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/RankingExpression.java b/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/RankingExpression.java
index 3eb4f16a9dd..cdaaba34a44 100755
--- a/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/RankingExpression.java
+++ b/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/RankingExpression.java
@@ -81,6 +81,9 @@ public class RankingExpression implements Serializable {
private String name = "";
private ExpressionNode root;
+ private final static String RANKEXPRESSION = "rankingExpression(";
+ private final static String RANKINGSCRIPT = ").rankingScript";
+ private final static String EXPRESSION_NAME = ").expressionName";
/** Creates an anonymous ranking expression by consuming from the reader */
public RankingExpression(Reader reader) throws ParseException {
@@ -273,7 +276,16 @@ public class RankingExpression implements Serializable {
* @return the property name.
*/
public static String propertyName(String expressionName) {
- return "rankingExpression(" + expressionName + ").rankingScript";
+ return RANKEXPRESSION + expressionName + RANKINGSCRIPT;
+ }
+ public static String propertyExpressionName(String expressionName) {
+ return RANKEXPRESSION + expressionName + EXPRESSION_NAME;
+ }
+ public static String extractScriptName(String propertyName) {
+ if (propertyName.startsWith(RANKEXPRESSION) && propertyName.endsWith(RANKINGSCRIPT)) {
+ return propertyName.substring(RANKEXPRESSION.length(), propertyName.length() - RANKINGSCRIPT.length());
+ }
+ return null;
}
/**
diff --git a/searchlib/src/test/java/com/yahoo/searchlib/aggregation/GroupingTestCase.java b/searchlib/src/test/java/com/yahoo/searchlib/aggregation/GroupingTestCase.java
index ec379e5f8af..e6143a17523 100644
--- a/searchlib/src/test/java/com/yahoo/searchlib/aggregation/GroupingTestCase.java
+++ b/searchlib/src/test/java/com/yahoo/searchlib/aggregation/GroupingTestCase.java
@@ -1,6 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.searchlib.aggregation;
+import com.yahoo.searchlib.expression.FloatResultNode;
import com.yahoo.searchlib.expression.NullResultNode;
import com.yahoo.searchlib.expression.StringBucketResultNode;
import com.yahoo.vespa.objects.BufferSerializer;
@@ -186,6 +187,7 @@ public class GroupingTestCase {
public void requireThatNeedDeepResultCollectionWorks() {
assertFalse(new Grouping().addLevel(new GroupingLevel().setGroupPrototype(new Group())).needDeepResultCollection());
assertTrue(new Grouping().addLevel(new GroupingLevel().setGroupPrototype(new Group().addOrderBy(new CountAggregationResult(9), true))).needDeepResultCollection());
+ assertTrue(new Grouping().addLevel(new GroupingLevel().setGroupPrototype(new Group().addOrderBy(new AverageAggregationResult(), true))).needDeepResultCollection());
}
@Test
diff --git a/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/RankingExpressionTestCase.java b/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/RankingExpressionTestCase.java
index 092faa1934e..4214727eb5f 100755
--- a/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/RankingExpressionTestCase.java
+++ b/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/RankingExpressionTestCase.java
@@ -14,6 +14,7 @@ import com.yahoo.searchlib.rankingexpression.rule.TensorFunctionNode;
import com.yahoo.tensor.functions.Reduce;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.assertFalse;
@@ -23,10 +24,13 @@ import java.io.FileReader;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
-import java.util.Collections;
import java.util.List;
import java.util.Map;
-import java.util.concurrent.*;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
/**
* @author Simon Thoresen Hult
@@ -208,6 +212,16 @@ public class RankingExpressionTestCase {
}
@Test
+ public void testPropertyName() {
+ assertEquals("rankingExpression(m4).rankingScript", RankingExpression.propertyName("m4"));
+ assertEquals("m4", RankingExpression.extractScriptName("rankingExpression(m4).rankingScript"));
+ assertNull(RankingExpression.extractScriptName("rankingexpression(m4).rankingScript"));
+ assertNull(RankingExpression.extractScriptName("rankingExpression(m4).rankingscript"));
+
+ assertEquals("rankingExpression(m4).expressionName", RankingExpression.propertyExpressionName("m4"));
+ }
+
+ @Test
public void testBug3464208() throws ParseException {
List<ExpressionFunction> functions = new ArrayList<>();
functions.add(new ExpressionFunction("log10tweetage", null, new RankingExpression("69")));
diff --git a/searchlib/src/tests/predicate/predicate_bounds_posting_list_test.cpp b/searchlib/src/tests/predicate/predicate_bounds_posting_list_test.cpp
index 191c7495271..31aebf95ea2 100644
--- a/searchlib/src/tests/predicate/predicate_bounds_posting_list_test.cpp
+++ b/searchlib/src/tests/predicate/predicate_bounds_posting_list_test.cpp
@@ -29,7 +29,7 @@ SimpleIndexConfig config;
const uint64_t hash = 0x123;
TEST("require that empty bounds posting list starts at 0.") {
- PredicateIndex index(generation_handler, generation_holder, limit_provider, config, 8);
+ PredicateIndex index(generation_holder, limit_provider, config, 8);
vespalib::datastore::EntryRef ref;
PredicateBoundsPostingList<PredicateIndex::BTreeIterator>
posting_list(index.getIntervalStore(),
@@ -54,7 +54,7 @@ void checkNext(PredicateBoundsPostingList<PredicateIndex::BTreeIterator> &postin
}
TEST("require that bounds posting list checks bounds.") {
- PredicateIndex index(generation_handler, generation_holder, limit_provider, config, 8);
+ PredicateIndex index(generation_holder, limit_provider, config, 8);
const auto &bounds_index = index.getBoundsIndex();
for (uint32_t id = 1; id < 100; ++id) {
PredicateTreeAnnotations annotations(id);
diff --git a/searchlib/src/tests/predicate/predicate_index_test.cpp b/searchlib/src/tests/predicate/predicate_index_test.cpp
index 669f70dd544..19ad0301b5c 100644
--- a/searchlib/src/tests/predicate/predicate_index_test.cpp
+++ b/searchlib/src/tests/predicate/predicate_index_test.cpp
@@ -33,7 +33,7 @@ DummyDocIdLimitProvider dummy_provider;
SimpleIndexConfig simple_index_config;
TEST("require that PredicateIndex can index empty documents") {
- PredicateIndex index(generation_handler, generation_holder, dummy_provider, simple_index_config, 10);
+ PredicateIndex index(generation_holder, dummy_provider, simple_index_config, 10);
EXPECT_EQUAL(0u, index.getZeroConstraintDocs().size());
index.indexEmptyDocument(2);
index.commit();
@@ -41,7 +41,7 @@ TEST("require that PredicateIndex can index empty documents") {
}
TEST("require that indexDocument don't index empty documents") {
- PredicateIndex index(generation_handler, generation_holder, dummy_provider, simple_index_config, 10);
+ PredicateIndex index(generation_holder, dummy_provider, simple_index_config, 10);
EXPECT_EQUAL(0u, index.getZeroConstraintDocs().size());
PredicateTreeAnnotations annotations;
index.indexDocument(3, annotations);
@@ -50,7 +50,7 @@ TEST("require that indexDocument don't index empty documents") {
}
TEST("require that PredicateIndex can remove empty documents") {
- PredicateIndex index(generation_handler, generation_holder, dummy_provider, simple_index_config, 10);
+ PredicateIndex index(generation_holder, dummy_provider, simple_index_config, 10);
EXPECT_EQUAL(0u, index.getZeroConstraintDocs().size());
index.indexEmptyDocument(2);
index.commit();
@@ -61,7 +61,7 @@ TEST("require that PredicateIndex can remove empty documents") {
}
TEST("require that indexing the same empty document multiple times is ok") {
- PredicateIndex index(generation_handler, generation_holder, dummy_provider, simple_index_config, 10);
+ PredicateIndex index(generation_holder, dummy_provider, simple_index_config, 10);
EXPECT_EQUAL(0u, index.getZeroConstraintDocs().size());
index.indexEmptyDocument(2);
index.commit();
@@ -109,11 +109,10 @@ const IntervalWithBounds bounds = {0x0001ffff, 0x03};
Interval single_buf;
TEST("require that PredicateIndex can index document") {
- PredicateIndex index(generation_handler, generation_holder, dummy_provider, simple_index_config, 10);
+ PredicateIndex index(generation_holder, dummy_provider, simple_index_config, 10);
EXPECT_FALSE(index.getIntervalIndex().lookup(hash).valid());
indexFeature(index, doc_id, min_feature, {{hash, interval}}, {});
index.commit();
-
auto posting_it = lookupPosting(index, hash);
EXPECT_EQUAL(doc_id, posting_it.getKey());
uint32_t size;
@@ -123,8 +122,27 @@ TEST("require that PredicateIndex can index document") {
EXPECT_EQUAL(interval, interval_list[0]);
}
+TEST("require that bit vector cache is initialized correctly") {
+ BitVectorCache::KeyAndCountSet keySet;
+ keySet.emplace_back(hash, dummy_provider.getDocIdLimit()/2);
+ PredicateIndex index(generation_holder, dummy_provider, simple_index_config, 10);
+ EXPECT_FALSE(index.getIntervalIndex().lookup(hash).valid());
+ indexFeature(index, doc_id, min_feature, {{hash, interval}}, {});
+ index.requireCachePopulation();
+ index.populateIfNeeded(dummy_provider.getDocIdLimit());
+ EXPECT_TRUE(index.lookupCachedSet(keySet).empty());
+ index.commit();
+ EXPECT_TRUE(index.getIntervalIndex().lookup(hash).valid());
+ EXPECT_TRUE(index.lookupCachedSet(keySet).empty());
+
+ index.requireCachePopulation();
+ index.populateIfNeeded(dummy_provider.getDocIdLimit());
+ EXPECT_FALSE(index.lookupCachedSet(keySet).empty());
+}
+
+
TEST("require that PredicateIndex can index document with bounds") {
- PredicateIndex index(generation_handler, generation_holder, dummy_provider, simple_index_config, 10);
+ PredicateIndex index(generation_holder, dummy_provider, simple_index_config, 10);
EXPECT_FALSE(index.getIntervalIndex().lookup(hash).valid());
indexFeature(index, doc_id, min_feature, {}, {{hash, bounds}});
index.commit();
@@ -149,7 +167,7 @@ TEST("require that PredicateIndex can index document with bounds") {
TEST("require that PredicateIndex can index multiple documents "
"with the same feature") {
- PredicateIndex index(generation_handler, generation_holder, dummy_provider, simple_index_config, 10);
+ PredicateIndex index(generation_holder, dummy_provider, simple_index_config, 10);
EXPECT_FALSE(index.getIntervalIndex().lookup(hash).valid());
for (uint32_t id = 1; id < 100; ++id) {
indexFeature(index, id, min_feature, {{hash, interval}}, {});
@@ -171,7 +189,7 @@ TEST("require that PredicateIndex can index multiple documents "
}
TEST("require that PredicateIndex can remove indexed documents") {
- PredicateIndex index(generation_handler, generation_holder, dummy_provider, simple_index_config, 10);
+ PredicateIndex index(generation_holder, dummy_provider, simple_index_config, 10);
EXPECT_FALSE(index.getIntervalIndex().lookup(hash).valid());
indexFeature(index, doc_id, min_feature,
{{hash, interval}}, {{hash2, bounds}});
@@ -187,7 +205,7 @@ TEST("require that PredicateIndex can remove indexed documents") {
}
TEST("require that PredicateIndex can remove multiple documents") {
- PredicateIndex index(generation_handler, generation_holder, dummy_provider, simple_index_config, 10);
+ PredicateIndex index(generation_holder, dummy_provider, simple_index_config, 10);
const auto &interval_index = index.getIntervalIndex();
EXPECT_FALSE(interval_index.lookup(hash).valid());
for (uint32_t id = 1; id < 100; ++id) {
@@ -214,7 +232,7 @@ TEST("require that PredicateIndex can remove multiple documents with "
intervals.push_back(make_pair(hash + i, interval));
bounds_intervals.push_back(make_pair(hash2 + i, bounds));
}
- PredicateIndex index(generation_handler, generation_holder, dummy_provider, simple_index_config, 10);
+ PredicateIndex index(generation_holder, dummy_provider, simple_index_config, 10);
const auto &interval_index = index.getIntervalIndex();
EXPECT_FALSE(interval_index.lookup(hash).valid());
for (uint32_t id = 1; id < 100; ++id) {
@@ -272,7 +290,7 @@ TEST("require that PredicateIndex can be (de)serialized") {
intervals.push_back(make_pair(hash + i, interval));
bounds_intervals.push_back(make_pair(hash2 + i, bounds));
}
- PredicateIndex index(generation_handler, generation_holder, dummy_provider, simple_index_config, 8);
+ PredicateIndex index(generation_holder, dummy_provider, simple_index_config, 8);
EXPECT_FALSE(index.getIntervalIndex().lookup(hash).valid());
for (uint32_t id = 1; id < 100; ++id) {
indexFeature(index, id, id, intervals, bounds_intervals);
@@ -284,7 +302,7 @@ TEST("require that PredicateIndex can be (de)serialized") {
index.serialize(buffer);
uint32_t doc_id_limit;
DocIdLimitFinder finder(doc_id_limit);
- PredicateIndex index2(generation_handler, generation_holder, dummy_provider, simple_index_config,
+ PredicateIndex index2(generation_holder, dummy_provider, simple_index_config,
buffer, finder, PredicateAttribute::PREDICATE_ATTRIBUTE_VERSION);
const PredicateIntervalStore &interval_store = index2.getIntervalStore();
EXPECT_EQUAL(199u, doc_id_limit);
@@ -322,7 +340,7 @@ TEST("require that PredicateIndex can be (de)serialized") {
}
TEST("require that DocumentFeaturesStore is restored on deserialization") {
- PredicateIndex index(generation_handler, generation_holder, dummy_provider, simple_index_config, 10);
+ PredicateIndex index(generation_holder, dummy_provider, simple_index_config, 10);
EXPECT_FALSE(index.getIntervalIndex().lookup(hash).valid());
indexFeature(index, doc_id, min_feature,
{{hash, interval}}, {{hash2, bounds}});
@@ -330,7 +348,7 @@ TEST("require that DocumentFeaturesStore is restored on deserialization") {
index.serialize(buffer);
uint32_t doc_id_limit;
DocIdLimitFinder finder(doc_id_limit);
- PredicateIndex index2(generation_handler, generation_holder, dummy_provider, simple_index_config,
+ PredicateIndex index2(generation_holder, dummy_provider, simple_index_config,
buffer, finder, PredicateAttribute::PREDICATE_ATTRIBUTE_VERSION);
const auto &interval_index = index2.getIntervalIndex();
const auto &bounds_index = index2.getBoundsIndex();
@@ -351,7 +369,7 @@ TEST("require that DocumentFeaturesStore is restored on deserialization") {
}
TEST("require that hold lists are attempted emptied on destruction") {
- PredicateIndex index(generation_handler, generation_holder, dummy_provider, simple_index_config, 10);
+ PredicateIndex index(generation_holder, dummy_provider, simple_index_config, 10);
indexFeature(index, doc_id, min_feature,
{{hash, interval}}, {{hash2, bounds}});
{
diff --git a/searchlib/src/tests/predicate/predicate_interval_posting_list_test.cpp b/searchlib/src/tests/predicate/predicate_interval_posting_list_test.cpp
index a77542f364e..660d8556b5c 100644
--- a/searchlib/src/tests/predicate/predicate_interval_posting_list_test.cpp
+++ b/searchlib/src/tests/predicate/predicate_interval_posting_list_test.cpp
@@ -28,7 +28,7 @@ SimpleIndexConfig config;
const uint64_t hash = 0x123;
TEST("require that empty posting list starts at 0.") {
- PredicateIndex index(generation_handler, generation_holder, limit_provider, config, 8);
+ PredicateIndex index(generation_holder, limit_provider, config, 8);
vespalib::datastore::EntryRef ref;
PredicateIntervalPostingList<PredicateIndex::BTreeIterator>
posting_list(index.getIntervalStore(), index.getIntervalIndex().getBTreePostingList(ref));
@@ -38,7 +38,7 @@ TEST("require that empty posting list starts at 0.") {
}
TEST("require that posting list can iterate.") {
- PredicateIndex index(generation_handler, generation_holder, limit_provider, config, 8);
+ PredicateIndex index(generation_holder, limit_provider, config, 8);
const auto &interval_index = index.getIntervalIndex();
for (uint32_t id = 1; id < 100; ++id) {
PredicateTreeAnnotations annotations(id);
diff --git a/searchlib/src/tests/predicate/predicate_zero_constraint_posting_list_test.cpp b/searchlib/src/tests/predicate/predicate_zero_constraint_posting_list_test.cpp
index e427c99c007..12de48b5d31 100644
--- a/searchlib/src/tests/predicate/predicate_zero_constraint_posting_list_test.cpp
+++ b/searchlib/src/tests/predicate/predicate_zero_constraint_posting_list_test.cpp
@@ -25,7 +25,7 @@ DummyDocIdLimitProvider limit_provider;
SimpleIndexConfig config;
TEST("require that empty posting list starts at 0.") {
- PredicateIndex index(generation_handler, generation_holder, limit_provider, config, 8);
+ PredicateIndex index(generation_holder, limit_provider, config, 8);
PredicateZeroConstraintPostingList posting_list(index.getZeroConstraintDocs().begin());
EXPECT_EQUAL(0u, posting_list.getDocId());
EXPECT_EQUAL(0x00010001u, posting_list.getInterval());
@@ -33,7 +33,7 @@ TEST("require that empty posting list starts at 0.") {
}
TEST("require that posting list can iterate.") {
- PredicateIndex index(generation_handler, generation_holder, limit_provider, config, 8);
+ PredicateIndex index(generation_holder, limit_provider, config, 8);
for (uint32_t id = 1; id < 100; ++id) {
index.indexEmptyDocument(id);
}
diff --git a/searchlib/src/tests/predicate/predicate_zstar_compressed_posting_list_test.cpp b/searchlib/src/tests/predicate/predicate_zstar_compressed_posting_list_test.cpp
index 4e86e996704..6d00b45a283 100644
--- a/searchlib/src/tests/predicate/predicate_zstar_compressed_posting_list_test.cpp
+++ b/searchlib/src/tests/predicate/predicate_zstar_compressed_posting_list_test.cpp
@@ -29,7 +29,7 @@ SimpleIndexConfig config;
const uint64_t hash = 0x123;
TEST("require that empty posting list starts at 0.") {
- PredicateIndex index(generation_handler, generation_holder, limit_provider, config, 8);
+ PredicateIndex index(generation_holder, limit_provider, config, 8);
vespalib::datastore::EntryRef ref;
PredicateZstarCompressedPostingList<PredicateIndex::BTreeIterator>
posting_list(index.getIntervalStore(), index.getIntervalIndex().getBTreePostingList(ref));
@@ -39,7 +39,7 @@ TEST("require that empty posting list starts at 0.") {
}
TEST("require that posting list can iterate.") {
- PredicateIndex index(generation_handler, generation_holder, limit_provider, config, 8);
+ PredicateIndex index(generation_holder, limit_provider, config, 8);
const auto &interval_index = index.getIntervalIndex();
vector<vector<Interval>> intervals =
{{{0x00010000}},
diff --git a/searchlib/src/tests/queryeval/predicate/predicate_blueprint_test.cpp b/searchlib/src/tests/queryeval/predicate/predicate_blueprint_test.cpp
index 3dd2ec26dea..5b8d5f5b9ce 100644
--- a/searchlib/src/tests/queryeval/predicate/predicate_blueprint_test.cpp
+++ b/searchlib/src/tests/queryeval/predicate/predicate_blueprint_test.cpp
@@ -86,8 +86,7 @@ TEST_F("require that blueprint with empty index estimates empty.", Fixture) {
EXPECT_EQUAL(0u, blueprint.getState().estimate().estHits);
}
-TEST_F("require that blueprint with zero-constraint doc estimates non-empty.",
- Fixture) {
+TEST_F("require that blueprint with zero-constraint doc estimates non-empty.", Fixture) {
f.indexEmptyDocument(42);
PredicateBlueprint blueprint(f.field, f.guard(), f.query);
EXPECT_FALSE(blueprint.getState().estimate().empty);
@@ -98,11 +97,9 @@ const int min_feature = 1;
const uint32_t doc_id = 2;
const uint32_t interval = 0x0001ffff;
-TEST_F("require that blueprint with posting list entry estimates non-empty.",
- Fixture) {
+TEST_F("require that blueprint with posting list entry estimates non-empty.", Fixture) {
PredicateTreeAnnotations annotations(min_feature);
- annotations.interval_map[PredicateHash::hash64("key=value")] =
- std::vector<Interval>{{interval}};
+ annotations.interval_map[PredicateHash::hash64("key=value")] = std::vector<Interval>{{interval}};
f.indexDocument(doc_id, annotations);
PredicateBlueprint blueprint(f.field, f.guard(), f.query);
@@ -110,8 +107,7 @@ TEST_F("require that blueprint with posting list entry estimates non-empty.",
EXPECT_EQUAL(0u, blueprint.getState().estimate().estHits);
}
-TEST_F("require that blueprint with 'bounds' posting list entry estimates "
- "non-empty.", Fixture) {
+TEST_F("require that blueprint with 'bounds' posting list entry estimates non-empty.", Fixture) {
PredicateTreeAnnotations annotations(min_feature);
annotations.bounds_map[PredicateHash::hash64("range_key=40")] =
std::vector<IntervalWithBounds>{{interval, 0x80000003}};
@@ -122,34 +118,50 @@ TEST_F("require that blueprint with 'bounds' posting list entry estimates "
EXPECT_EQUAL(0u, blueprint.getState().estimate().estHits);
}
-TEST_F("require that blueprint with zstar-compressed estimates non-empty.",
- Fixture) {
+TEST_F("require that blueprint with zstar-compressed estimates non-empty.", Fixture) {
PredicateTreeAnnotations annotations(1);
- annotations.interval_map[Constants::z_star_compressed_hash] =std::vector<Interval>{{0xfffe0000}};
+ annotations.interval_map[Constants::z_star_compressed_hash] = std::vector<Interval>{{0xfffe0000}};
f.indexDocument(doc_id, annotations);
PredicateBlueprint blueprint(f.field, f.guard(), f.query);
EXPECT_FALSE(blueprint.getState().estimate().empty);
EXPECT_EQUAL(0u, blueprint.getState().estimate().estHits);
}
-TEST_F("require that blueprint can create search", Fixture) {
- PredicateTreeAnnotations annotations(1);
- annotations.interval_map[PredicateHash::hash64("key=value")] =std::vector<Interval>{{interval}};
- f.indexDocument(doc_id, annotations);
-
+void
+runQuery(Fixture & f, std::vector<uint32_t> expected, bool expectCachedSize, uint32_t expectedKV) {
PredicateBlueprint blueprint(f.field, f.guard(), f.query);
blueprint.fetchPostings(ExecuteInfo::TRUE);
+ EXPECT_EQUAL(expectCachedSize, blueprint.getCachedFeatures().size());
+ for (uint32_t docId : expected) {
+ EXPECT_EQUAL(expectedKV, uint32_t(blueprint.getKV()[docId]));
+ }
TermFieldMatchDataArray tfmda;
SearchIterator::UP it = blueprint.createLeafSearch(tfmda, true);
ASSERT_TRUE(it.get());
it->initFullRange();
EXPECT_EQUAL(SearchIterator::beginId(), it->getDocId());
- EXPECT_FALSE(it->seek(doc_id - 1));
- EXPECT_EQUAL(doc_id, it->getDocId());
- EXPECT_TRUE(it->seek(doc_id));
- EXPECT_EQUAL(doc_id, it->getDocId());
- EXPECT_FALSE(it->seek(doc_id + 1));
- EXPECT_TRUE(it->isAtEnd());
+ std::vector<uint32_t> actual;
+ for (it->seek(1); ! it->isAtEnd(); it->seek(it->getDocId()+1)) {
+ actual.push_back(it->getDocId());
+ }
+ EXPECT_EQUAL(expected.size(), actual.size());
+ for (size_t i(0); i < expected.size(); i++) {
+ EXPECT_EQUAL(expected[i], actual[i]);
+ }
+}
+
+TEST_F("require that blueprint can create search", Fixture) {
+ PredicateTreeAnnotations annotations(1);
+ annotations.interval_map[PredicateHash::hash64("key=value")] = std::vector<Interval>{{interval}};
+ for (size_t i(0); i < 9; i++) {
+ f.indexDocument(doc_id + i, annotations);
+ }
+ runQuery(f, {2,3,4,5,6,7,8,9,10}, 0, 1);
+ f.indexDocument(doc_id+9, annotations);
+ runQuery(f, {2, 3,4,5,6,7,8,9,10,11}, 0, 1);
+ f.index().requireCachePopulation();
+ f.indexDocument(doc_id+10, annotations);
+ runQuery(f, {2,3,4,5,6,7,8,9,10,11,12}, 1, 1);
}
TEST_F("require that blueprint can create more advanced search", Fixture) {
diff --git a/searchlib/src/vespa/searchlib/aggregation/group.h b/searchlib/src/vespa/searchlib/aggregation/group.h
index 5b425de24e6..681cda43afa 100644
--- a/searchlib/src/vespa/searchlib/aggregation/group.h
+++ b/searchlib/src/vespa/searchlib/aggregation/group.h
@@ -232,7 +232,7 @@ public:
/**
* Recursively checks if any itself or any children needs a full resort.
- * Then all hits must be processed and should be doen before any hit sorting.
+ * Then all hits must be processed and should be done before any hit sorting.
*/
bool needResort() const { return _aggr.needResort(); }
diff --git a/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.cpp b/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.cpp
index d867ae9f211..3b3fdd9bc5c 100644
--- a/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.cpp
@@ -1,13 +1,8 @@
// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "enum_store_dictionary.h"
-#include "enumstore.h"
#include <vespa/vespalib/btree/btree.hpp>
-#include <vespa/vespalib/btree/btreeiterator.hpp>
#include <vespa/vespalib/btree/btreenode.hpp>
-#include <vespa/vespalib/btree/btreenodeallocator.hpp>
-#include <vespa/vespalib/btree/btreeroot.hpp>
-#include <vespa/vespalib/datastore/datastore.hpp>
#include <vespa/vespalib/datastore/sharded_hash_map.h>
#include <vespa/vespalib/datastore/unique_store_dictionary.hpp>
#include <vespa/searchlib/util/bufferwriter.h>
@@ -15,7 +10,6 @@
#include <vespa/log/log.h>
LOG_SETUP(".searchlib.attribute.enum_store_dictionary");
-using vespalib::datastore::EntryComparator;
using vespalib::datastore::EntryRef;
using vespalib::datastore::UniqueStoreAddResult;
@@ -25,12 +19,8 @@ using vespalib::btree::BTreeNode;
template <typename BTreeDictionaryT, typename HashDictionaryT>
void
-EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::remove_unused_values(const IndexSet& unused,
- const vespalib::datastore::EntryComparator& cmp)
+EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::remove_unused_values(const IndexList & unused,const EntryComparator& cmp)
{
- if (unused.empty()) {
- return;
- }
for (const auto& ref : unused) {
this->remove(cmp, ref);
}
@@ -48,9 +38,9 @@ EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::~EnumStoreDictionary() =
template <typename BTreeDictionaryT, typename HashDictionaryT>
void
-EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::free_unused_values(const vespalib::datastore::EntryComparator& cmp)
+EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::free_unused_values(const EntryComparator& cmp)
{
- IndexSet unused;
+ IndexList unused;
// find unused enums
if constexpr (has_btree_dictionary) {
@@ -58,19 +48,26 @@ EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::free_unused_values(const
_enumStore.free_value_if_unused(iter.getKey(), unused);
}
} else {
- this->_hash_dict.foreach_key([this, &unused](EntryRef ref) { _enumStore.free_value_if_unused(ref, unused); });
+ this->_hash_dict.foreach_key([this, &unused](EntryRef ref) {
+ _enumStore.free_value_if_unused(ref, unused);
+ });
}
remove_unused_values(unused, cmp);
}
template <typename BTreeDictionaryT, typename HashDictionaryT>
void
-EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::free_unused_values(const IndexSet& to_remove,
- const vespalib::datastore::EntryComparator& cmp)
+EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::free_unused_values(const IndexList& to_remove, const EntryComparator& cmp)
{
- IndexSet unused;
+ IndexList unused;
+
+ EntryRef prev;
for (const auto& index : to_remove) {
- _enumStore.free_value_if_unused(index, unused);
+ assert(prev <= index);
+ if (index != prev) {
+ _enumStore.free_value_if_unused(index, unused);
+ prev = index;
+ }
}
remove_unused_values(unused, cmp);
}
@@ -96,8 +93,7 @@ EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::remove(const EntryCompar
template <typename BTreeDictionaryT, typename HashDictionaryT>
bool
-EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::find_index(const vespalib::datastore::EntryComparator& cmp,
- Index& idx) const
+EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::find_index(const EntryComparator& cmp, Index& idx) const
{
if constexpr (has_hash_dictionary) {
auto find_result = this->_hash_dict.find(cmp, EntryRef());
@@ -118,8 +114,7 @@ EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::find_index(const vespali
template <typename BTreeDictionaryT, typename HashDictionaryT>
bool
-EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::find_frozen_index(const vespalib::datastore::EntryComparator& cmp,
- Index& idx) const
+EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::find_frozen_index(const EntryComparator& cmp, Index& idx) const
{
if constexpr (has_hash_dictionary) {
auto find_result = this->_hash_dict.find(cmp, EntryRef());
@@ -140,7 +135,7 @@ EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::find_frozen_index(const
template <typename BTreeDictionaryT, typename HashDictionaryT>
std::vector<IEnumStore::EnumHandle>
-EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::find_matching_enums(const vespalib::datastore::EntryComparator& cmp) const
+EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::find_matching_enums(const EntryComparator& cmp) const
{
std::vector<IEnumStore::EnumHandle> result;
if constexpr (has_btree_dictionary) {
@@ -171,14 +166,14 @@ EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::get_frozen_root() const
template <>
std::pair<IEnumStore::Index, EntryRef>
-EnumStoreDictionary<EnumTree>::find_posting_list(const vespalib::datastore::EntryComparator&, EntryRef) const
+EnumStoreDictionary<EnumTree>::find_posting_list(const EntryComparator&, EntryRef) const
{
LOG_ABORT("should not be reached");
}
template <typename BTreeDictionaryT, typename HashDictionaryT>
std::pair<IEnumStore::Index, EntryRef>
-EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::find_posting_list(const vespalib::datastore::EntryComparator& cmp, EntryRef root) const
+EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::find_posting_list(const EntryComparator& cmp, EntryRef root) const
{
if constexpr (has_hash_dictionary) {
(void) root;
@@ -199,7 +194,7 @@ EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::find_posting_list(const
template <typename BTreeDictionaryT, typename HashDictionaryT>
void
-EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::collect_folded(Index idx, EntryRef, const std::function<void(vespalib::datastore::EntryRef)>& callback) const
+EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::collect_folded(Index idx, EntryRef, const std::function<void(EntryRef)>& callback) const
{
callback(idx);
}
@@ -244,14 +239,14 @@ EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::clear_all_posting_lists(
template <>
void
-EnumStoreDictionary<EnumTree>::update_posting_list(Index, const vespalib::datastore::EntryComparator&, std::function<EntryRef(EntryRef)>)
+EnumStoreDictionary<EnumTree>::update_posting_list(Index, const EntryComparator&, std::function<EntryRef(EntryRef)>)
{
LOG_ABORT("should not be reached");
}
template <typename BTreeDictionaryT, typename HashDictionaryT>
void
-EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::update_posting_list(Index idx, const vespalib::datastore::EntryComparator& cmp, std::function<EntryRef(EntryRef)> updater)
+EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::update_posting_list(Index idx, const EntryComparator& cmp, std::function<EntryRef(EntryRef)> updater)
{
if constexpr (has_btree_dictionary) {
auto& dict = this->_btree_dict;
@@ -336,7 +331,7 @@ EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::get_posting_dictionary()
return this->_btree_dict;
}
-EnumStoreFoldedDictionary::EnumStoreFoldedDictionary(IEnumStore& enumStore, std::unique_ptr<vespalib::datastore::EntryComparator> compare, std::unique_ptr<EntryComparator> folded_compare)
+EnumStoreFoldedDictionary::EnumStoreFoldedDictionary(IEnumStore& enumStore, std::unique_ptr<EntryComparator> compare, std::unique_ptr<EntryComparator> folded_compare)
: EnumStoreDictionary<EnumPostingTree>(enumStore, std::move(compare)),
_folded_compare(std::move(folded_compare))
{
@@ -389,7 +384,7 @@ EnumStoreFoldedDictionary::remove(const EntryComparator& comp, EntryRef ref)
}
void
-EnumStoreFoldedDictionary::collect_folded(Index idx, EntryRef root, const std::function<void(vespalib::datastore::EntryRef)>& callback) const
+EnumStoreFoldedDictionary::collect_folded(Index idx, EntryRef root, const std::function<void(EntryRef)>& callback) const
{
BTreeDictionaryType::ConstIterator itr(vespalib::btree::BTreeNode::Ref(), _btree_dict.getAllocator());
itr.lower_bound(root, idx, *_folded_compare);
@@ -421,6 +416,7 @@ namespace vespalib::btree {
using search::IEnumStore;
using search::EnumTreeTraits;
+using datastore::EntryComparatorWrapper;
template
class BTreeNodeT<IEnumStore::Index, EnumTreeTraits::INTERNAL_SLOTS>;
@@ -456,19 +452,19 @@ class BTreeNodeStore<IEnumStore::Index, uint32_t, NoAggregated,
template
class BTreeRoot<IEnumStore::Index, BTreeNoLeafData, NoAggregated,
- const vespalib::datastore::EntryComparatorWrapper, EnumTreeTraits>;
+ const EntryComparatorWrapper, EnumTreeTraits>;
template
class BTreeRoot<IEnumStore::Index, uint32_t, NoAggregated,
- const vespalib::datastore::EntryComparatorWrapper, EnumTreeTraits>;
+ const EntryComparatorWrapper, EnumTreeTraits>;
template
class BTreeRootT<IEnumStore::Index, BTreeNoLeafData, NoAggregated,
- const vespalib::datastore::EntryComparatorWrapper, EnumTreeTraits>;
+ const EntryComparatorWrapper, EnumTreeTraits>;
template
class BTreeRootT<IEnumStore::Index, uint32_t, NoAggregated,
- const vespalib::datastore::EntryComparatorWrapper, EnumTreeTraits>;
+ const EntryComparatorWrapper, EnumTreeTraits>;
template
class BTreeRootBase<IEnumStore::Index, BTreeNoLeafData, NoAggregated,
@@ -494,23 +490,23 @@ class BTreeIteratorBase<IEnumStore::Index, uint32_t, NoAggregated,
EnumTreeTraits::INTERNAL_SLOTS, EnumTreeTraits::LEAF_SLOTS, EnumTreeTraits::PATH_SIZE>;
template class BTreeConstIterator<IEnumStore::Index, BTreeNoLeafData, NoAggregated,
- const vespalib::datastore::EntryComparatorWrapper, EnumTreeTraits>;
+ const EntryComparatorWrapper, EnumTreeTraits>;
template class BTreeConstIterator<IEnumStore::Index, uint32_t, NoAggregated,
- const vespalib::datastore::EntryComparatorWrapper, EnumTreeTraits>;
+ const EntryComparatorWrapper, EnumTreeTraits>;
template
class BTreeIterator<IEnumStore::Index, BTreeNoLeafData, NoAggregated,
- const vespalib::datastore::EntryComparatorWrapper, EnumTreeTraits>;
+ const EntryComparatorWrapper, EnumTreeTraits>;
template
class BTreeIterator<IEnumStore::Index, uint32_t, NoAggregated,
- const vespalib::datastore::EntryComparatorWrapper, EnumTreeTraits>;
+ const EntryComparatorWrapper, EnumTreeTraits>;
template
class BTree<IEnumStore::Index, BTreeNoLeafData, NoAggregated,
- const vespalib::datastore::EntryComparatorWrapper, EnumTreeTraits>;
+ const EntryComparatorWrapper, EnumTreeTraits>;
template
class BTree<IEnumStore::Index, uint32_t, NoAggregated,
- const vespalib::datastore::EntryComparatorWrapper, EnumTreeTraits>;
+ const EntryComparatorWrapper, EnumTreeTraits>;
}
diff --git a/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.h b/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.h
index a39ff524618..3626fb098d2 100644
--- a/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.h
+++ b/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.h
@@ -18,9 +18,10 @@ protected:
using EntryRef = IEnumStoreDictionary::EntryRef;
using Index = IEnumStoreDictionary::Index;
using BTreeDictionaryType = BTreeDictionaryT;
+ using EntryComparator = IEnumStoreDictionary::EntryComparator;
private:
using EnumVector = IEnumStoreDictionary::EnumVector;
- using IndexSet = IEnumStoreDictionary::IndexSet;
+ using IndexList = IEnumStoreDictionary::IndexList;
using IndexVector = IEnumStoreDictionary::IndexVector;
using ParentUniqueStoreDictionary = vespalib::datastore::UniqueStoreDictionary<BTreeDictionaryT, IEnumStoreDictionary, HashDictionaryT>;
using generation_t = IEnumStoreDictionary::generation_t;
@@ -30,31 +31,28 @@ protected:
private:
IEnumStore& _enumStore;
- void remove_unused_values(const IndexSet& unused,
- const vespalib::datastore::EntryComparator& cmp);
+ void remove_unused_values(const IndexList& unused, const EntryComparator& cmp);
public:
- EnumStoreDictionary(IEnumStore& enumStore, std::unique_ptr<vespalib::datastore::EntryComparator> compare);
+ EnumStoreDictionary(IEnumStore& enumStore, std::unique_ptr<EntryComparator> compare);
~EnumStoreDictionary() override;
- void free_unused_values(const vespalib::datastore::EntryComparator& cmp) override;
+ void free_unused_values(const EntryComparator& cmp) override;
+ void free_unused_values(const IndexList& to_remove, const EntryComparator& cmp) override;
- void free_unused_values(const IndexSet& to_remove,
- const vespalib::datastore::EntryComparator& cmp) override;
-
- void remove(const vespalib::datastore::EntryComparator& comp, vespalib::datastore::EntryRef ref) override;
- bool find_index(const vespalib::datastore::EntryComparator& cmp, Index& idx) const override;
- bool find_frozen_index(const vespalib::datastore::EntryComparator& cmp, Index& idx) const override;
+ void remove(const EntryComparator& comp, EntryRef ref) override;
+ bool find_index(const EntryComparator& cmp, Index& idx) const override;
+ bool find_frozen_index(const EntryComparator& cmp, Index& idx) const override;
std::vector<attribute::IAttributeVector::EnumHandle>
- find_matching_enums(const vespalib::datastore::EntryComparator& cmp) const override;
+ find_matching_enums(const EntryComparator& cmp) const override;
EntryRef get_frozen_root() const override;
- std::pair<Index, EntryRef> find_posting_list(const vespalib::datastore::EntryComparator& cmp, EntryRef root) const override;
- void collect_folded(Index idx, EntryRef root, const std::function<void(vespalib::datastore::EntryRef)>& callback) const override;
+ std::pair<Index, EntryRef> find_posting_list(const EntryComparator& cmp, EntryRef root) const override;
+ void collect_folded(Index idx, EntryRef root, const std::function<void(EntryRef)>& callback) const override;
Index remap_index(Index idx) override;
void clear_all_posting_lists(std::function<void(EntryRef)> clearer) override;
- void update_posting_list(Index idx, const vespalib::datastore::EntryComparator& cmp, std::function<EntryRef(EntryRef)> updater) override;
+ void update_posting_list(Index idx, const EntryComparator& cmp, std::function<EntryRef(EntryRef)> updater) override;
bool normalize_posting_lists(std::function<EntryRef(EntryRef)> normalize) override;
const EnumPostingTree& get_posting_dictionary() const override;
};
@@ -71,14 +69,14 @@ public:
class EnumStoreFoldedDictionary : public EnumStoreDictionary<EnumPostingTree>
{
private:
- std::unique_ptr<vespalib::datastore::EntryComparator> _folded_compare;
+ std::unique_ptr<EntryComparator> _folded_compare;
public:
- EnumStoreFoldedDictionary(IEnumStore& enumStore, std::unique_ptr<vespalib::datastore::EntryComparator> compare, std::unique_ptr<vespalib::datastore::EntryComparator> folded_compare);
+ EnumStoreFoldedDictionary(IEnumStore& enumStore, std::unique_ptr<EntryComparator> compare, std::unique_ptr<EntryComparator> folded_compare);
~EnumStoreFoldedDictionary() override;
- vespalib::datastore::UniqueStoreAddResult add(const vespalib::datastore::EntryComparator& comp, std::function<vespalib::datastore::EntryRef(void)> insertEntry) override;
- void remove(const vespalib::datastore::EntryComparator& comp, vespalib::datastore::EntryRef ref) override;
- void collect_folded(Index idx, EntryRef root, const std::function<void(vespalib::datastore::EntryRef)>& callback) const override;
+ vespalib::datastore::UniqueStoreAddResult add(const EntryComparator& comp, std::function<EntryRef(void)> insertEntry) override;
+ void remove(const EntryComparator& comp, EntryRef ref) override;
+ void collect_folded(Index idx, EntryRef root, const std::function<void(EntryRef)>& callback) const override;
Index remap_index(Index idx) override;
};
diff --git a/searchlib/src/vespa/searchlib/attribute/enumstore.h b/searchlib/src/vespa/searchlib/attribute/enumstore.h
index 326e0916039..59d77ea0558 100644
--- a/searchlib/src/vespa/searchlib/attribute/enumstore.h
+++ b/searchlib/src/vespa/searchlib/attribute/enumstore.h
@@ -63,7 +63,7 @@ private:
EnumStoreT(const EnumStoreT & rhs) = delete;
EnumStoreT & operator=(const EnumStoreT & rhs) = delete;
- void free_value_if_unused(Index idx, IndexSet &unused) override;
+ void free_value_if_unused(Index idx, IndexList &unused) override;
const vespalib::datastore::UniqueStoreEntryBase& get_entry_base(Index idx) const {
return _store.get_allocator().get_wrapped(idx);
@@ -153,7 +153,7 @@ public:
class BatchUpdater {
private:
EnumStoreType& _store;
- IndexSet _possibly_unused;
+ IndexList _possibly_unused;
public:
BatchUpdater(EnumStoreType& store)
@@ -168,11 +168,11 @@ public:
auto& entry = _store.get_entry_base(idx);
entry.dec_ref_count();
if (entry.get_ref_count() == 0) {
- _possibly_unused.insert(idx);
+ _possibly_unused.push_back(idx);
}
}
void commit() {
- _store.free_unused_values(_possibly_unused);
+ _store.free_unused_values(std::move(_possibly_unused));
}
};
@@ -198,7 +198,7 @@ public:
Index insert(EntryType value);
bool find_index(EntryType value, Index& idx) const;
void free_unused_values() override;
- void free_unused_values(const IndexSet& to_remove);
+ void free_unused_values(IndexList to_remove);
vespalib::MemoryUsage update_stat() override;
std::unique_ptr<EnumIndexRemapper> consider_compact_values(const CompactionStrategy& compaction_strategy) override;
std::unique_ptr<EnumIndexRemapper> compact_worst_values(bool compact_memory, bool compact_address_space) override;
diff --git a/searchlib/src/vespa/searchlib/attribute/enumstore.hpp b/searchlib/src/vespa/searchlib/attribute/enumstore.hpp
index 9885613f4e3..771da8ffa01 100644
--- a/searchlib/src/vespa/searchlib/attribute/enumstore.hpp
+++ b/searchlib/src/vespa/searchlib/attribute/enumstore.hpp
@@ -30,11 +30,11 @@ make_enum_store_dictionary(IEnumStore &store, bool has_postings, const search::D
std::unique_ptr<EntryComparator> folded_compare);
template <typename EntryT>
-void EnumStoreT<EntryT>::free_value_if_unused(Index idx, IndexSet& unused)
+void EnumStoreT<EntryT>::free_value_if_unused(Index idx, IndexList& unused)
{
const auto& entry = get_entry_base(idx);
if (entry.get_ref_count() == 0) {
- unused.insert(idx);
+ unused.push_back(idx);
_store.get_allocator().hold(idx);
}
}
@@ -140,7 +140,7 @@ EnumStoreT<EntryT>::BatchUpdater::insert(EntryType value)
auto cmp = _store.make_comparator(value);
auto result = _store._dict->add(cmp, [this, &value]() -> EntryRef { return _store._store.get_allocator().allocate(value); });
if (result.inserted()) {
- _possibly_unused.insert(result.ref());
+ _possibly_unused.push_back(result.ref());
}
return result.ref();
}
@@ -191,8 +191,16 @@ EnumStoreT<EntryT>::free_unused_values()
template <typename EntryT>
void
-EnumStoreT<EntryT>::free_unused_values(const IndexSet& to_remove)
+EnumStoreT<EntryT>::free_unused_values(IndexList to_remove)
{
+ struct CompareEnumIndex {
+ using Index = IEnumStore::Index;
+
+ bool operator()(const Index &lhs, const Index &rhs) const {
+ return lhs.ref() < rhs.ref();
+ }
+ };
+ std::sort(to_remove.begin(), to_remove.end(), CompareEnumIndex());
_dict->free_unused_values(to_remove, get_comparator());
}
diff --git a/searchlib/src/vespa/searchlib/attribute/i_enum_store.h b/searchlib/src/vespa/searchlib/attribute/i_enum_store.h
index 6d714ec25ba..716609764f4 100644
--- a/searchlib/src/vespa/searchlib/attribute/i_enum_store.h
+++ b/searchlib/src/vespa/searchlib/attribute/i_enum_store.h
@@ -40,22 +40,14 @@ public:
using EnumIndexRemapper = vespalib::datastore::UniqueStoreRemapper<InternalIndex>;
using Enumerator = vespalib::datastore::UniqueStoreEnumerator<IEnumStore::InternalIndex>;
- struct CompareEnumIndex {
- using Index = IEnumStore::Index;
-
- bool operator()(const Index &lhs, const Index &rhs) const {
- return lhs.ref() < rhs.ref();
- }
- };
-
- using IndexSet = std::set<Index, CompareEnumIndex>;
+ using IndexList = std::vector<Index>;
virtual ~IEnumStore() = default;
virtual void write_value(BufferWriter& writer, Index idx) const = 0;
virtual ssize_t load_unique_values(const void* src, size_t available, IndexVector& idx) = 0;
virtual void set_ref_count(Index idx, uint32_t ref_count) = 0;
- virtual void free_value_if_unused(Index idx, IndexSet& unused) = 0;
+ virtual void free_value_if_unused(Index idx, IndexList& unused) = 0;
virtual void free_unused_values() = 0;
virtual bool is_folded_change(Index idx1, Index idx2) const = 0;
virtual IEnumStoreDictionary& get_dictionary() = 0;
diff --git a/searchlib/src/vespa/searchlib/attribute/i_enum_store_dictionary.h b/searchlib/src/vespa/searchlib/attribute/i_enum_store_dictionary.h
index f816177b06c..bef7384b0b7 100644
--- a/searchlib/src/vespa/searchlib/attribute/i_enum_store_dictionary.h
+++ b/searchlib/src/vespa/searchlib/attribute/i_enum_store_dictionary.h
@@ -29,29 +29,29 @@ using EnumPostingTree = vespalib::btree::BTree<IEnumStore::Index, uint32_t,
class IEnumStoreDictionary : public vespalib::datastore::IUniqueStoreDictionary {
public:
using EntryRef = vespalib::datastore::EntryRef;
+ using EntryComparator = vespalib::datastore::EntryComparator;
using EnumVector = IEnumStore::EnumVector;
using Index = IEnumStore::Index;
- using IndexSet = IEnumStore::IndexSet;
+ using IndexList = IEnumStore::IndexList;
using IndexVector = IEnumStore::IndexVector;
using generation_t = vespalib::GenerationHandler::generation_t;
public:
virtual ~IEnumStoreDictionary() = default;
- virtual void free_unused_values(const vespalib::datastore::EntryComparator& cmp) = 0;
- virtual void free_unused_values(const IndexSet& to_remove,
- const vespalib::datastore::EntryComparator& cmp) = 0;
- virtual bool find_index(const vespalib::datastore::EntryComparator& cmp, Index& idx) const = 0;
- virtual bool find_frozen_index(const vespalib::datastore::EntryComparator& cmp, Index& idx) const = 0;
+ virtual void free_unused_values(const EntryComparator& cmp) = 0;
+ virtual void free_unused_values(const IndexList& to_remove, const EntryComparator& cmp) = 0;
+ virtual bool find_index(const EntryComparator& cmp, Index& idx) const = 0;
+ virtual bool find_frozen_index(const EntryComparator& cmp, Index& idx) const = 0;
virtual std::vector<attribute::IAttributeVector::EnumHandle>
- find_matching_enums(const vespalib::datastore::EntryComparator& cmp) const = 0;
+ find_matching_enums(const EntryComparator& cmp) const = 0;
virtual EntryRef get_frozen_root() const = 0;
- virtual std::pair<Index, EntryRef> find_posting_list(const vespalib::datastore::EntryComparator& cmp, EntryRef root) const = 0;
- virtual void collect_folded(Index idx, EntryRef root, const std::function<void(vespalib::datastore::EntryRef)>& callback) const = 0;
+ virtual std::pair<Index, EntryRef> find_posting_list(const EntryComparator& cmp, EntryRef root) const = 0;
+ virtual void collect_folded(Index idx, EntryRef root, const std::function<void(EntryRef)>& callback) const = 0;
virtual Index remap_index(Index idx) = 0;
virtual void clear_all_posting_lists(std::function<void(EntryRef)> clearer) = 0;
- virtual void update_posting_list(Index idx, const vespalib::datastore::EntryComparator& cmp, std::function<EntryRef(EntryRef)> updater) = 0;
+ virtual void update_posting_list(Index idx, const EntryComparator& cmp, std::function<EntryRef(EntryRef)> updater) = 0;
virtual bool normalize_posting_lists(std::function<EntryRef(EntryRef)> normalize) = 0;
virtual const EnumPostingTree& get_posting_dictionary() const = 0;
};
diff --git a/searchlib/src/vespa/searchlib/attribute/postingstore.cpp b/searchlib/src/vespa/searchlib/attribute/postingstore.cpp
index 6c62e650345..477917debf0 100644
--- a/searchlib/src/vespa/searchlib/attribute/postingstore.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/postingstore.cpp
@@ -696,7 +696,10 @@ PostingStore<DataT>::move(EntryRef ref)
if (!_store.getCompacting(ref)) {
return ref;
}
- return allocBitVectorCopy(*bve).ref;
+ auto new_ref = allocBitVectorCopy(*bve).ref;
+ _bvs.erase(ref.ref());
+ _bvs.insert(new_ref.ref());
+ return new_ref;
} else {
if (!_store.getCompacting(ref)) {
return ref;
diff --git a/searchlib/src/vespa/searchlib/attribute/predicate_attribute.cpp b/searchlib/src/vespa/searchlib/attribute/predicate_attribute.cpp
index 7913e617d70..555117126a9 100644
--- a/searchlib/src/vespa/searchlib/attribute/predicate_attribute.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/predicate_attribute.cpp
@@ -26,7 +26,8 @@ constexpr uint8_t MAX_MIN_FEATURE = 255;
constexpr uint16_t MAX_INTERVAL_RANGE = static_cast<uint16_t>(predicate::MAX_INTERVAL);
-int64_t adjustBound(int32_t arity, int64_t bound) {
+int64_t
+adjustBound(int32_t arity, int64_t bound) {
int64_t adjusted = arity;
int64_t value = bound;
int64_t max = LLONG_MAX / arity;
@@ -39,7 +40,8 @@ int64_t adjustBound(int32_t arity, int64_t bound) {
return adjusted - 1;
}
-int64_t adjustLowerBound(int32_t arity, int64_t lower_bound) {
+int64_t
+adjustLowerBound(int32_t arity, int64_t lower_bound) {
if (lower_bound == LLONG_MIN) {
return lower_bound;
} else if (lower_bound > 0) {
@@ -49,7 +51,8 @@ int64_t adjustLowerBound(int32_t arity, int64_t lower_bound) {
}
}
-int64_t adjustUpperBound(int32_t arity, int64_t upper_bound) {
+int64_t
+adjustUpperBound(int32_t arity, int64_t upper_bound) {
if (upper_bound == LLONG_MAX) {
return upper_bound;
} else if (upper_bound < 0) {
@@ -66,13 +69,11 @@ SimpleIndexConfig createSimpleIndexConfig(const search::attribute::Config &confi
} // namespace
-PredicateAttribute::PredicateAttribute(const vespalib::string &base_file_name,
- const Config &config)
+PredicateAttribute::PredicateAttribute(const vespalib::string &base_file_name, const Config &config)
: NotImplementedAttribute(base_file_name, config),
- _base_file_name(base_file_name),
_limit_provider(*this),
- _index(new PredicateIndex(getGenerationHandler(), getGenerationHolder(),
- _limit_provider, createSimpleIndexConfig(config), config.predicateParams().arity())),
+ _index(std::make_unique<PredicateIndex>(getGenerationHolder(), _limit_provider,
+ createSimpleIndexConfig(config), config.predicateParams().arity())),
_lower_bound(adjustLowerBound(config.predicateParams().arity(), config.predicateParams().lower_bound())),
_upper_bound(adjustUpperBound(config.predicateParams().arity(), config.predicateParams().upper_bound())),
_min_feature(config.getGrowStrategy().to_generic_strategy(), getGenerationHolder()),
@@ -99,8 +100,8 @@ PredicateAttribute::getValueCount(DocId) const
void
PredicateAttribute::onCommit()
{
- populateIfNeeded();
_index->commit();
+ populateIfNeeded();
incGeneration();
}
@@ -183,7 +184,8 @@ struct DummyObserver : SimpleIndexDeserializeObserver<> {
}
-bool PredicateAttribute::onLoad()
+bool
+PredicateAttribute::onLoad()
{
auto loaded_buffer = attribute::LoadUtils::loadDAT(*this);
char *rawBuffer = const_cast<char *>(static_cast<const char *>(loaded_buffer->buffer()));
@@ -202,12 +204,12 @@ bool PredicateAttribute::onLoad()
DocId highest_doc_id;
if (version == 0) {
DocIdLimitFinderAndMinFeatureFiller<MinFeatureVector> observer(_min_feature, *_index);
- _index = std::make_unique<PredicateIndex>(getGenerationHandler(), getGenerationHolder(), _limit_provider,
+ _index = std::make_unique<PredicateIndex>(getGenerationHolder(), _limit_provider,
createSimpleIndexConfig(getConfig()), buffer, observer, 0);
highest_doc_id = observer._highest_doc_id;
} else {
DummyObserver observer;
- _index = std::make_unique<PredicateIndex>(getGenerationHandler(), getGenerationHolder(), _limit_provider,
+ _index = std::make_unique<PredicateIndex>(getGenerationHolder(), _limit_provider,
createSimpleIndexConfig(getConfig()), buffer, observer, version);
highest_doc_id = buffer.readInt32();
// Deserialize min feature vector
@@ -240,6 +242,7 @@ PredicateAttribute::addDoc(DocId &doc_id)
_min_feature.ensure_size(doc_id + 1);
return true;
}
+
uint32_t
PredicateAttribute::clearDoc(DocId doc_id)
{
diff --git a/searchlib/src/vespa/searchlib/attribute/predicate_attribute.h b/searchlib/src/vespa/searchlib/attribute/predicate_attribute.h
index 6e3f0c4399f..4d7fd3c235b 100644
--- a/searchlib/src/vespa/searchlib/attribute/predicate_attribute.h
+++ b/searchlib/src/vespa/searchlib/attribute/predicate_attribute.h
@@ -80,7 +80,6 @@ public:
void populateIfNeeded();
private:
- vespalib::string _base_file_name;
const AttributeVectorDocIdLimitProvider _limit_provider;
std::unique_ptr<predicate::PredicateIndex> _index;
int64_t _lower_bound;
diff --git a/searchlib/src/vespa/searchlib/attribute/singleenumattribute.hpp b/searchlib/src/vespa/searchlib/attribute/singleenumattribute.hpp
index bf75400b157..a9a94afb763 100644
--- a/searchlib/src/vespa/searchlib/attribute/singleenumattribute.hpp
+++ b/searchlib/src/vespa/searchlib/attribute/singleenumattribute.hpp
@@ -312,9 +312,9 @@ SingleValueEnumAttribute<B>::onShrinkLidSpace()
uint32_t default_value_ref_count = this->_enumStore.get_ref_count(default_value_ref);
assert(default_value_ref_count >= shrink_docs);
this->_enumStore.set_ref_count(default_value_ref, default_value_ref_count - shrink_docs);
- IEnumStore::IndexSet possibly_unused;
- possibly_unused.insert(default_value_ref);
- this->_enumStore.free_unused_values(possibly_unused);
+ IEnumStore::IndexList possibly_unused;
+ possibly_unused.push_back(default_value_ref);
+ this->_enumStore.free_unused_values(std::move(possibly_unused));
}
_enumIndices.shrink(committedDocIdLimit);
this->setNumDocs(committedDocIdLimit);
diff --git a/searchlib/src/vespa/searchlib/common/bitvectorcache.cpp b/searchlib/src/vespa/searchlib/common/bitvectorcache.cpp
index 81f01de0c33..8cfab1f64cf 100644
--- a/searchlib/src/vespa/searchlib/common/bitvectorcache.cpp
+++ b/searchlib/src/vespa/searchlib/common/bitvectorcache.cpp
@@ -87,10 +87,10 @@ BitVectorCache::lookupCachedSet(const KeyAndCountSet & keys)
BitVectorCache::SortedKeyMeta
BitVectorCache::getSorted(Key2Index & keys)
{
- std::vector<std::pair<Key, KeyMeta *>> sorted;
+ SortedKeyMeta sorted;
sorted.reserve(keys.size());
for (auto & e : keys) {
- sorted.push_back({e.first, &e.second});
+ sorted.emplace_back(e.first, &e.second);
}
std::sort(sorted.begin(), sorted.end(),
[&] (const auto & a, const auto & b) {
diff --git a/searchlib/src/vespa/searchlib/common/bitvectorcache.h b/searchlib/src/vespa/searchlib/common/bitvectorcache.h
index c1415d9130f..a642d66f42f 100644
--- a/searchlib/src/vespa/searchlib/common/bitvectorcache.h
+++ b/searchlib/src/vespa/searchlib/common/bitvectorcache.h
@@ -3,6 +3,7 @@
#include "condensedbitvectors.h"
#include <vespa/vespalib/stllike/hash_set.h>
+#include <vespa/vespalib/stllike/hash_map.h>
#include <vespa/fastos/dynamiclibrary.h>
#include <mutex>
@@ -40,6 +41,7 @@ public:
void adjustDocIdLimit(uint32_t docId);
void populate(uint32_t count, const PopulateInterface &);
bool needPopulation() const { return _needPopulation; }
+ void requirePopulation() { _needPopulation = true; }
private:
class KeyMeta {
public:
@@ -76,12 +78,12 @@ private:
VESPA_DLL_LOCAL static void populate(Key2Index & newKeys, CondensedBitVector & chunk, const PopulateInterface & lookup);
VESPA_DLL_LOCAL bool hasCostChanged(const std::lock_guard<std::mutex> &);
- uint64_t _lookupCount;
- bool _needPopulation;
+ uint64_t _lookupCount;
+ bool _needPopulation;
mutable std::mutex _lock;
- Key2Index _keys;
- ChunkV _chunks;
- GenerationHolder &_genHolder;
+ Key2Index _keys;
+ ChunkV _chunks;
+ GenerationHolder &_genHolder;
};
}
diff --git a/searchlib/src/vespa/searchlib/common/condensedbitvectors.cpp b/searchlib/src/vespa/searchlib/common/condensedbitvectors.cpp
index d6efc4fddc2..50b971f499f 100644
--- a/searchlib/src/vespa/searchlib/common/condensedbitvectors.cpp
+++ b/searchlib/src/vespa/searchlib/common/condensedbitvectors.cpp
@@ -129,9 +129,7 @@ void throwIllegalKey(size_t numKeys, size_t key)
}
-CondensedBitVector::~CondensedBitVector()
-{
-}
+CondensedBitVector::~CondensedBitVector() = default;
void
CondensedBitVector::addKey(Key key) const
@@ -144,7 +142,7 @@ CondensedBitVector::addKey(Key key) const
CondensedBitVector::UP
CondensedBitVector::create(size_t size, GenerationHolder &genHolder)
{
- return UP(new CondensedBitVectorT<uint32_t>(size, genHolder));
+ return std::make_unique<CondensedBitVectorT<uint32_t>>(size, genHolder);
}
}
diff --git a/searchlib/src/vespa/searchlib/common/condensedbitvectors.h b/searchlib/src/vespa/searchlib/common/condensedbitvectors.h
index 4bda29894cc..02355a61e40 100644
--- a/searchlib/src/vespa/searchlib/common/condensedbitvectors.h
+++ b/searchlib/src/vespa/searchlib/common/condensedbitvectors.h
@@ -1,7 +1,6 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once
-#include <vespa/vespalib/stllike/hash_map.h>
#include <vespa/vespalib/util/generationholder.h>
#include <vespa/vespalib/util/arrayref.h>
#include <set>
@@ -31,9 +30,6 @@ public:
bool hasKey(Key key) const { return key < getKeyCapacity(); }
void addKey(Key key) const;
static CondensedBitVector::UP create(size_t size, vespalib::GenerationHolder &genHolder);
-private:
- typedef vespalib::hash_map<Key, uint32_t> Key2Index;
- Key2Index _keys;
};
}
diff --git a/searchlib/src/vespa/searchlib/common/indexmetainfo.cpp b/searchlib/src/vespa/searchlib/common/indexmetainfo.cpp
index 837c38eb340..25bc754a86f 100644
--- a/searchlib/src/vespa/searchlib/common/indexmetainfo.cpp
+++ b/searchlib/src/vespa/searchlib/common/indexmetainfo.cpp
@@ -5,7 +5,6 @@
#include <vespa/vespalib/util/stringfmt.h>
#include <vespa/vespalib/util/guard.h>
#include <cassert>
-#include <algorithm>
#include <vespa/log/log.h>
LOG_SETUP(".indexmetainfo");
@@ -14,13 +13,13 @@ namespace {
class Parser {
private:
- vespalib::string _name;
+ vespalib::string _name;
vespalib::FilePointer _file;
uint32_t _line;
char _buf[2048];
bool _error;
- vespalib::string _lastKey;
- vespalib::string _lastValue;
+ vespalib::string _lastKey;
+ vespalib::string _lastValue;
uint32_t _lastIdx;
bool _matched;
@@ -44,8 +43,7 @@ public:
return false;
}
bool illegalLine() {
- LOG(warning, "%s:%d: illegal line: %s",
- _name.c_str(), _line, _buf);
+ LOG(warning, "%s:%d: illegal line: %s", _name.c_str(), _line, _buf);
_error = true;
return false;
}
@@ -57,8 +55,7 @@ public:
}
bool illegalValue() {
LOG(warning, "%s:%d: illegal value for '%s': %s",
- _name.c_str(), _line, _lastKey.c_str(),
- _lastValue.c_str());
+ _name.c_str(), _line, _lastKey.c_str(), _lastValue.c_str());
_error = true;
return false;
}
@@ -79,7 +76,7 @@ public:
if (!_file.valid()) {
return openFailed();
}
- if (fgets(_buf, sizeof(_buf), _file) == NULL) {
+ if (fgets(_buf, sizeof(_buf), _file) == nullptr) {
return false; // EOF
}
++_line;
@@ -88,7 +85,7 @@ public:
_buf[--len] = '\0';
}
char *split = strchr(_buf, '=');
- if (split == NULL || (split - _buf) == 0) {
+ if (split == nullptr || (split - _buf) == 0) {
return illegalLine();
}
_lastKey = vespalib::string(_buf, split - _buf);
@@ -119,9 +116,9 @@ public:
void parseInt64(const vespalib::string &k, uint64_t &v) {
if (!_matched && !_error && _lastKey == k) {
_matched = true;
- char *end = NULL;
+ char *end = nullptr;
uint64_t val = strtoull(_lastValue.c_str(), &end, 10);
- if (end == NULL || *end != '\0' ||
+ if (end == nullptr || *end != '\0' ||
val == static_cast<uint64_t>(-1)) {
illegalValue();
return;
@@ -141,10 +138,10 @@ public:
if (dot2 == vespalib::string::npos) {
return illegalArrayKey();
}
- char *end = NULL;
+ char *end = nullptr;
const char *pt = _lastKey.c_str() + name.length() + 1;
uint32_t val = strtoul(pt, &end, 10);
- if (end == NULL || end == pt || *end != '.'
+ if (end == nullptr || end == pt || *end != '.'
|| val > size || size > val + 1)
{
return illegalArrayKey();
@@ -200,7 +197,7 @@ IndexMetaInfo::IndexMetaInfo(const vespalib::string &path)
{
}
-IndexMetaInfo::~IndexMetaInfo() {}
+IndexMetaInfo::~IndexMetaInfo() = default;
IndexMetaInfo::Snapshot
IndexMetaInfo::getBestSnapshot() const
@@ -209,11 +206,7 @@ IndexMetaInfo::getBestSnapshot() const
while (idx >= 0 && !_snapshots[idx].valid) {
--idx;
}
- if (idx >= 0) {
- return _snapshots[idx];
- } else {
- return Snapshot();
- }
+ return (idx >= 0) ? _snapshots[idx] : Snapshot();
}
@@ -233,7 +226,7 @@ bool
IndexMetaInfo::addSnapshot(const Snapshot &snap)
{
if (snap.dirName.empty()
- || findSnapshot(snap.syncToken) != _snapshots.end())
+ || (findSnapshot(snap.syncToken) != _snapshots.end()))
{
return false;
}
@@ -324,32 +317,23 @@ IndexMetaInfo::save(const vespalib::string &baseName)
fprintf(f, "snapshot.%d.dirName=%s\n", i, snap.dirName.c_str());
}
if (ferror(f) != 0) {
- LOG(error,
- "Could not write to file %s",
- newName.c_str());
+ LOG(error, "Could not write to file %s", newName.c_str());
return false;
}
if (fflush(f) != 0) {
- LOG(error,
- "Could not flush file %s",
- newName.c_str());
+ LOG(error, "Could not flush file %s", newName.c_str());
return false;
}
if (fsync(fileno(f)) != 0) {
- LOG(error,
- "Could not fsync file %s",
- newName.c_str());
+ LOG(error, "Could not fsync file %s", newName.c_str());
return false;
}
if (fclose(f.release()) != 0) {
- LOG(error,
- "Could not close file %s",
- newName.c_str());
+ LOG(error, "Could not close file %s", newName.c_str());
return false;
}
if (rename(newName.c_str(), fileName.c_str()) != 0) {
- LOG(warning, "could not rename: %s->%s",
- newName.c_str(), fileName.c_str());
+ LOG(warning, "could not rename: %s->%s", newName.c_str(), fileName.c_str());
return false;
}
vespalib::File::sync(vespalib::dirname(fileName));
diff --git a/searchlib/src/vespa/searchlib/features/onnx_feature.cpp b/searchlib/src/vespa/searchlib/features/onnx_feature.cpp
index 87e5ef2a5c2..e9fecb3578e 100644
--- a/searchlib/src/vespa/searchlib/features/onnx_feature.cpp
+++ b/searchlib/src/vespa/searchlib/features/onnx_feature.cpp
@@ -69,6 +69,8 @@ public:
OnnxBlueprint::OnnxBlueprint()
: Blueprint("onnxModel"),
+ _cache_token(),
+ _debug_model(),
_model(nullptr),
_wire_info()
{
@@ -80,15 +82,18 @@ bool
OnnxBlueprint::setup(const IIndexEnvironment &env,
const ParameterList &params)
{
- auto optimize = (env.getFeatureMotivation() == env.FeatureMotivation::VERIFY_SETUP)
- ? Onnx::Optimize::DISABLE
- : Onnx::Optimize::ENABLE;
auto model_cfg = env.getOnnxModel(params[0].getValue());
if (!model_cfg) {
return fail("no model with name '%s' found", params[0].getValue().c_str());
}
try {
- _model = std::make_unique<Onnx>(model_cfg->file_path(), optimize);
+ if (env.getFeatureMotivation() == env.FeatureMotivation::VERIFY_SETUP) {
+ _debug_model = std::make_unique<Onnx>(model_cfg->file_path(), Optimize::DISABLE);
+ _model = _debug_model.get();
+ } else {
+ _cache_token = OnnxModelCache::load(model_cfg->file_path());
+ _model = &(_cache_token->get());
+ }
} catch (std::exception &ex) {
return fail("model setup failed: %s", ex.what());
}
@@ -132,7 +137,7 @@ OnnxBlueprint::setup(const IIndexEnvironment &env,
FeatureExecutor &
OnnxBlueprint::createExecutor(const IQueryEnvironment &, Stash &stash) const
{
- assert(_model);
+ assert(_model != nullptr);
return stash.create<OnnxFeatureExecutor>(*_model, _wire_info);
}
diff --git a/searchlib/src/vespa/searchlib/features/onnx_feature.h b/searchlib/src/vespa/searchlib/features/onnx_feature.h
index 6a63e7276c2..ed0fbc502f0 100644
--- a/searchlib/src/vespa/searchlib/features/onnx_feature.h
+++ b/searchlib/src/vespa/searchlib/features/onnx_feature.h
@@ -3,7 +3,7 @@
#pragma once
#include <vespa/searchlib/fef/blueprint.h>
-#include <vespa/eval/onnx/onnx_wrapper.h>
+#include <vespa/eval/onnx/onnx_model_cache.h>
namespace search::features {
@@ -13,7 +13,11 @@ namespace search::features {
class OnnxBlueprint : public fef::Blueprint {
private:
using Onnx = vespalib::eval::Onnx;
- std::unique_ptr<Onnx> _model;
+ using Optimize = vespalib::eval::Onnx::Optimize;
+ using OnnxModelCache = vespalib::eval::OnnxModelCache;
+ OnnxModelCache::Token::UP _cache_token;
+ std::unique_ptr<Onnx> _debug_model;
+ const Onnx *_model;
Onnx::WireInfo _wire_info;
public:
OnnxBlueprint();
diff --git a/searchlib/src/vespa/searchlib/predicate/document_features_store.cpp b/searchlib/src/vespa/searchlib/predicate/document_features_store.cpp
index dcda13cac54..ad7d6fe3456 100644
--- a/searchlib/src/vespa/searchlib/predicate/document_features_store.cpp
+++ b/searchlib/src/vespa/searchlib/predicate/document_features_store.cpp
@@ -7,8 +7,6 @@
#include <vespa/vespalib/btree/btreeroot.hpp>
#include <vespa/vespalib/btree/btreenodeallocator.hpp>
-//#include "predicate_index.h"
-
using vespalib::btree::BTreeNoLeafData;
using vespalib::datastore::EntryRef;
using vespalib::DataBuffer;
@@ -38,10 +36,8 @@ DocumentFeaturesStore::DocumentFeaturesStore(uint32_t arity)
namespace {
template <typename KeyComp, typename WordIndex>
-void deserializeWords(DataBuffer &buffer,
- memoryindex::WordStore &word_store,
- WordIndex &word_index,
- vector<EntryRef> &word_refs) {
+void
+deserializeWords(DataBuffer &buffer, memoryindex::WordStore &word_store, WordIndex &word_index, vector<EntryRef> &word_refs) {
uint32_t word_list_size = buffer.readInt32();
word_refs.reserve(word_list_size);
vector<char> word;
@@ -57,8 +53,8 @@ void deserializeWords(DataBuffer &buffer,
}
template <typename RangeFeaturesMap>
-void deserializeRanges(DataBuffer &buffer, vector<EntryRef> &word_refs,
- RangeFeaturesMap &ranges, size_t &num_ranges) {
+void
+deserializeRanges(DataBuffer &buffer, vector<EntryRef> &word_refs, RangeFeaturesMap &ranges, size_t &num_ranges) {
typedef typename RangeFeaturesMap::mapped_type::value_type Range;
uint32_t ranges_size = buffer.readInt32();
for (uint32_t i = 0; i < ranges_size; ++i) {
@@ -78,8 +74,8 @@ void deserializeRanges(DataBuffer &buffer, vector<EntryRef> &word_refs,
}
template <typename DocumentFeaturesMap>
-void deserializeDocs(DataBuffer &buffer, DocumentFeaturesMap &docs,
- size_t &num_features) {
+void
+deserializeDocs(DataBuffer &buffer, DocumentFeaturesMap &docs, size_t &num_features) {
uint32_t docs_size = buffer.readInt32();
for (uint32_t i = 0; i < docs_size; ++i) {
uint32_t doc_id = buffer.readInt32();
@@ -111,7 +107,8 @@ DocumentFeaturesStore::~DocumentFeaturesStore() {
_word_index.clear();
}
-void DocumentFeaturesStore::insert(uint64_t featureId, uint32_t docId) {
+void
+DocumentFeaturesStore::insert(uint64_t featureId, uint32_t docId) {
assert(docId != 0);
if (_currDocId != docId) {
auto docsItr = _docs.find(docId);
@@ -125,8 +122,8 @@ void DocumentFeaturesStore::insert(uint64_t featureId, uint32_t docId) {
++_numFeatures;
}
-void DocumentFeaturesStore::insert(const PredicateTreeAnnotations &annotations,
- uint32_t doc_id) {
+void
+DocumentFeaturesStore::insert(const PredicateTreeAnnotations &annotations, uint32_t doc_id) {
assert(doc_id != 0);
if (!annotations.features.empty()) {
auto it = _docs.find(doc_id);
@@ -172,15 +169,15 @@ DocumentFeaturesStore::get(uint32_t docId) const {
if (rangeItr != _ranges.end()) {
for (auto range : rangeItr->second) {
const char *label = _word_store.getWord(range.label_ref);
- PredicateRangeExpander::expandRange(
- label, range.from, range.to, _arity,
- std::inserter(features, features.end()));
+ PredicateRangeExpander::expandRange(label, range.from, range.to, _arity,
+ std::inserter(features, features.end()));
}
}
return features;
}
-void DocumentFeaturesStore::remove(uint32_t doc_id) {
+void
+DocumentFeaturesStore::remove(uint32_t doc_id) {
auto itr = _docs.find(doc_id);
if (itr != _docs.end()) {
_numFeatures = _numFeatures >= itr->second.size() ?
@@ -198,7 +195,8 @@ void DocumentFeaturesStore::remove(uint32_t doc_id) {
}
}
-vespalib::MemoryUsage DocumentFeaturesStore::getMemoryUsage() const {
+vespalib::MemoryUsage
+DocumentFeaturesStore::getMemoryUsage() const {
vespalib::MemoryUsage usage;
usage.incAllocatedBytes(_docs.getMemoryConsumption());
usage.incUsedBytes(_docs.getMemoryUsed());
@@ -219,9 +217,11 @@ vespalib::MemoryUsage DocumentFeaturesStore::getMemoryUsage() const {
namespace {
template <typename RangeFeaturesMap>
-void findUsedWords(const RangeFeaturesMap &ranges,
- unordered_map<uint32_t, uint32_t> &word_map,
- vector<EntryRef> &word_list) {
+void
+findUsedWords(const RangeFeaturesMap &ranges,
+ unordered_map<uint32_t, uint32_t> &word_map,
+ vector<EntryRef> &word_list)
+{
for (const auto &range_features_entry : ranges) {
for (const auto &range : range_features_entry.second) {
if (!word_map.count(range.label_ref.ref())) {
@@ -232,8 +232,10 @@ void findUsedWords(const RangeFeaturesMap &ranges,
}
}
-void serializeWords(DataBuffer &buffer, const vector<EntryRef> &word_list,
- const memoryindex::WordStore &word_store) {
+void
+serializeWords(DataBuffer &buffer, const vector<EntryRef> &word_list,
+ const memoryindex::WordStore &word_store)
+{
buffer.writeInt32(word_list.size());
for (const auto &word_ref : word_list) {
const char *word = word_store.getWord(word_ref);
@@ -244,8 +246,10 @@ void serializeWords(DataBuffer &buffer, const vector<EntryRef> &word_list,
}
template <typename RangeFeaturesMap>
-void serializeRanges(DataBuffer &buffer, RangeFeaturesMap &ranges,
- unordered_map<uint32_t, uint32_t> &word_map) {
+void
+serializeRanges(DataBuffer &buffer, RangeFeaturesMap &ranges,
+ unordered_map<uint32_t, uint32_t> &word_map)
+{
buffer.writeInt32(ranges.size());
for (const auto &range_features_entry : ranges) {
buffer.writeInt32(range_features_entry.first); // doc id
@@ -259,7 +263,8 @@ void serializeRanges(DataBuffer &buffer, RangeFeaturesMap &ranges,
}
template <typename DocumentFeaturesMap>
-void serializeDocs(DataBuffer &buffer, DocumentFeaturesMap &docs) {
+void
+serializeDocs(DataBuffer &buffer, DocumentFeaturesMap &docs) {
buffer.writeInt32(docs.size());
for (const auto &doc_features_entry : docs) {
buffer.writeInt32(doc_features_entry.first); // doc id
@@ -271,7 +276,8 @@ void serializeDocs(DataBuffer &buffer, DocumentFeaturesMap &docs) {
}
} // namespace
-void DocumentFeaturesStore::serialize(DataBuffer &buffer) const {
+void
+DocumentFeaturesStore::serialize(DataBuffer &buffer) const {
vector<EntryRef> word_list;
unordered_map<uint32_t, uint32_t> word_map;
diff --git a/searchlib/src/vespa/searchlib/predicate/document_features_store.h b/searchlib/src/vespa/searchlib/predicate/document_features_store.h
index a45c7ba043a..442249d619a 100644
--- a/searchlib/src/vespa/searchlib/predicate/document_features_store.h
+++ b/searchlib/src/vespa/searchlib/predicate/document_features_store.h
@@ -54,14 +54,14 @@ class DocumentFeaturesStore {
vespalib::btree::NoAggregated, const KeyComp &> WordIndex;
DocumentFeaturesMap _docs;
- RangeFeaturesMap _ranges;
- WordStore _word_store;
- WordIndex _word_index;
- uint32_t _currDocId;
- FeatureVector *_currFeatures;
- size_t _numFeatures;
- size_t _numRanges;
- uint32_t _arity;
+ RangeFeaturesMap _ranges;
+ WordStore _word_store;
+ WordIndex _word_index;
+ uint32_t _currDocId;
+ FeatureVector *_currFeatures;
+ size_t _numFeatures;
+ size_t _numRanges;
+ uint32_t _arity;
void setCurrent(uint32_t docId, FeatureVector *features);
diff --git a/searchlib/src/vespa/searchlib/predicate/predicate_bounds_posting_list.h b/searchlib/src/vespa/searchlib/predicate/predicate_bounds_posting_list.h
index 0ef2d81f094..9d2e90af7a5 100644
--- a/searchlib/src/vespa/searchlib/predicate/predicate_bounds_posting_list.h
+++ b/searchlib/src/vespa/searchlib/predicate/predicate_bounds_posting_list.h
@@ -53,7 +53,8 @@ namespace {
} // namespace
template<typename Iterator>
-bool PredicateBoundsPostingList<Iterator>::next(uint32_t doc_id) {
+bool
+PredicateBoundsPostingList<Iterator>::next(uint32_t doc_id) {
if (_iterator.valid() && _iterator.getKey() <= doc_id) {
_iterator.linearSeek(doc_id + 1);
}
@@ -74,7 +75,8 @@ bool PredicateBoundsPostingList<Iterator>::next(uint32_t doc_id) {
}
template<typename Iterator>
-bool PredicateBoundsPostingList<Iterator>::nextInterval() {
+bool
+PredicateBoundsPostingList<Iterator>::nextInterval() {
uint32_t next_bounds;
do {
if (__builtin_expect(_interval_count == 1, true)) {
diff --git a/searchlib/src/vespa/searchlib/predicate/predicate_index.cpp b/searchlib/src/vespa/searchlib/predicate/predicate_index.cpp
index e9b1a6bd685..6cbe11e2240 100644
--- a/searchlib/src/vespa/searchlib/predicate/predicate_index.cpp
+++ b/searchlib/src/vespa/searchlib/predicate/predicate_index.cpp
@@ -17,16 +17,19 @@ using std::vector;
namespace search::predicate {
template <>
-void PredicateIndex::addPosting<Interval>(uint64_t feature, uint32_t doc_id, EntryRef ref) {
+void
+PredicateIndex::addPosting<Interval>(uint64_t feature, uint32_t doc_id, EntryRef ref) {
_interval_index.addPosting(feature, doc_id, ref);
}
template <>
-void PredicateIndex::addPosting<IntervalWithBounds>(uint64_t feature, uint32_t doc_id, EntryRef ref) {
+void
+PredicateIndex::addPosting<IntervalWithBounds>(uint64_t feature, uint32_t doc_id, EntryRef ref) {
_bounds_index.addPosting(feature, doc_id, ref);
}
template <typename IntervalT>
-void PredicateIndex::indexDocumentFeatures(uint32_t doc_id, const PredicateIndex::FeatureMap<IntervalT> &interval_map) {
+void
+PredicateIndex::indexDocumentFeatures(uint32_t doc_id, const PredicateIndex::FeatureMap<IntervalT> &interval_map) {
if (interval_map.empty()) {
return;
}
@@ -80,11 +83,10 @@ public:
} // namespace
-PredicateIndex::PredicateIndex(GenerationHandler &generation_handler, GenerationHolder &genHolder,
+PredicateIndex::PredicateIndex(GenerationHolder &genHolder,
const DocIdLimitProvider &limit_provider,
const SimpleIndexConfig &simple_index_config, uint32_t arity)
: _arity(arity),
- _generation_handler(generation_handler),
_limit_provider(limit_provider),
_interval_index(genHolder, limit_provider, simple_index_config),
_bounds_index(genHolder, limit_provider, simple_index_config),
@@ -95,12 +97,11 @@ PredicateIndex::PredicateIndex(GenerationHandler &generation_handler, Generation
{
}
-PredicateIndex::PredicateIndex(GenerationHandler &generation_handler, GenerationHolder &genHolder,
+PredicateIndex::PredicateIndex(GenerationHolder &genHolder,
const DocIdLimitProvider &limit_provider,
const SimpleIndexConfig &simple_index_config, DataBuffer &buffer,
SimpleIndexDeserializeObserver<> & observer, uint32_t version)
: _arity(0),
- _generation_handler(generation_handler),
_limit_provider(limit_provider),
_interval_index(genHolder, limit_provider, simple_index_config),
_bounds_index(genHolder, limit_provider, simple_index_config),
@@ -121,15 +122,15 @@ PredicateIndex::PredicateIndex(GenerationHandler &generation_handler, Generation
_zero_constraint_docs.assign(builder);
IntervalDeserializer<Interval> interval_deserializer(_interval_store);
_interval_index.deserialize(buffer, interval_deserializer, observer, version);
- IntervalDeserializer<IntervalWithBounds>
- bounds_deserializer(_interval_store);
+ IntervalDeserializer<IntervalWithBounds> bounds_deserializer(_interval_store);
_bounds_index.deserialize(buffer, bounds_deserializer, observer, version);
commit();
}
PredicateIndex::~PredicateIndex() = default;
-void PredicateIndex::serialize(DataBuffer &buffer) const {
+void
+PredicateIndex::serialize(DataBuffer &buffer) const {
_features_store.serialize(buffer);
buffer.writeInt16(_arity);
buffer.writeInt32(_zero_constraint_docs.size());
@@ -142,25 +143,29 @@ void PredicateIndex::serialize(DataBuffer &buffer) const {
_bounds_index.serialize(buffer, bounds_serializer);
}
-void PredicateIndex::onDeserializationCompleted() {
+void
+PredicateIndex::onDeserializationCompleted() {
_interval_index.promoteOverThresholdVectors();
_bounds_index.promoteOverThresholdVectors();
}
-void PredicateIndex::indexDocument(uint32_t doc_id, const PredicateTreeAnnotations &annotations) {
+void
+PredicateIndex::indexDocument(uint32_t doc_id, const PredicateTreeAnnotations &annotations) {
indexDocumentFeatures(doc_id, annotations.interval_map);
indexDocumentFeatures(doc_id, annotations.bounds_map);
_features_store.insert(annotations, doc_id);
}
-void PredicateIndex::indexEmptyDocument(uint32_t doc_id)
+void
+PredicateIndex::indexEmptyDocument(uint32_t doc_id)
{
_zero_constraint_docs.insert(doc_id, vespalib::btree::BTreeNoLeafData::_instance);
}
namespace {
-void removeFromIndex(
- uint64_t feature, uint32_t doc_id, SimpleIndex<vespalib::datastore::EntryRef> &index, PredicateIntervalStore &interval_store)
+void
+removeFromIndex(uint64_t feature, uint32_t doc_id, SimpleIndex<vespalib::datastore::EntryRef> &index,
+ PredicateIntervalStore &interval_store)
{
auto result = index.removeFromPostingList(feature, doc_id);
if (result.second) { // Posting was removed
@@ -189,7 +194,8 @@ private:
} // namespace
-void PredicateIndex::removeDocument(uint32_t doc_id) {
+void
+PredicateIndex::removeDocument(uint32_t doc_id) {
_zero_constraint_docs.remove(doc_id);
auto features = _features_store.get(doc_id);
@@ -203,27 +209,31 @@ void PredicateIndex::removeDocument(uint32_t doc_id) {
_features_store.remove(doc_id);
}
-void PredicateIndex::commit() {
+void
+PredicateIndex::commit() {
_interval_index.commit();
_bounds_index.commit();
_zero_constraint_docs.getAllocator().freeze();
}
-void PredicateIndex::trimHoldLists(generation_t used_generation) {
+void
+PredicateIndex::trimHoldLists(generation_t used_generation) {
_interval_index.trimHoldLists(used_generation);
_bounds_index.trimHoldLists(used_generation);
_interval_store.trimHoldLists(used_generation);
_zero_constraint_docs.getAllocator().trimHoldLists(used_generation);
}
-void PredicateIndex::transferHoldLists(generation_t generation) {
+void
+PredicateIndex::transferHoldLists(generation_t generation) {
_interval_index.transferHoldLists(generation);
_bounds_index.transferHoldLists(generation);
_interval_store.transferHoldLists(generation);
_zero_constraint_docs.getAllocator().transferHoldLists(generation);
}
-vespalib::MemoryUsage PredicateIndex::getMemoryUsage() const {
+vespalib::MemoryUsage
+PredicateIndex::getMemoryUsage() const {
// TODO Include bit vector cache memory usage
vespalib::MemoryUsage combined;
combined.merge(_interval_index.getMemoryUsage());
diff --git a/searchlib/src/vespa/searchlib/predicate/predicate_index.h b/searchlib/src/vespa/searchlib/predicate/predicate_index.h
index d2ed70694a2..49bf77f2fcc 100644
--- a/searchlib/src/vespa/searchlib/predicate/predicate_index.h
+++ b/searchlib/src/vespa/searchlib/predicate/predicate_index.h
@@ -38,16 +38,15 @@ public:
using BTreeIterator = SimpleIndex<vespalib::datastore::EntryRef>::BTreeIterator;
using VectorIterator = SimpleIndex<vespalib::datastore::EntryRef>::VectorIterator;
private:
- uint32_t _arity;
- GenerationHandler &_generation_handler;
+ uint32_t _arity;
const DocIdLimitProvider &_limit_provider;
- IntervalIndex _interval_index;
- BoundsIndex _bounds_index;
- PredicateIntervalStore _interval_store;
- BTreeSet _zero_constraint_docs;
+ IntervalIndex _interval_index;
+ BoundsIndex _bounds_index;
+ PredicateIntervalStore _interval_store;
+ BTreeSet _zero_constraint_docs;
- DocumentFeaturesStore _features_store;
- mutable BitVectorCache _cache;
+ DocumentFeaturesStore _features_store;
+ mutable BitVectorCache _cache;
template <typename IntervalT>
void addPosting(uint64_t feature, uint32_t doc_id, vespalib::datastore::EntryRef ref);
@@ -55,15 +54,13 @@ private:
template <typename IntervalT>
void indexDocumentFeatures(uint32_t doc_id, const FeatureMap<IntervalT> &interval_map);
- PopulateInterface::Iterator::UP lookup(uint64_t key) const override;
-
public:
- PredicateIndex(GenerationHandler &generation_handler, GenerationHolder &genHolder,
+ PredicateIndex(GenerationHolder &genHolder,
const DocIdLimitProvider &limit_provider,
const SimpleIndexConfig &simple_index_config, uint32_t arity);
// deserializes PredicateIndex from buffer.
// The observer can be used to gain some insight into what has been added to the index..
- PredicateIndex(GenerationHandler &generation_handler, GenerationHolder &genHolder,
+ PredicateIndex(GenerationHolder &genHolder,
const DocIdLimitProvider &limit_provider,
const SimpleIndexConfig &simple_index_config, vespalib::DataBuffer &buffer,
SimpleIndexDeserializeObserver<> & observer, uint32_t version);
@@ -106,6 +103,9 @@ public:
* Adjust size of structures to have space for docId.
*/
void adjustDocIdLimit(uint32_t docId);
+ PopulateInterface::Iterator::UP lookup(uint64_t key) const override;
+ // Exposed for testing
+ void requireCachePopulation() const { _cache.requirePopulation(); }
};
extern template class SimpleIndex<vespalib::datastore::EntryRef>;
diff --git a/searchlib/src/vespa/searchlib/predicate/predicate_interval.cpp b/searchlib/src/vespa/searchlib/predicate/predicate_interval.cpp
index a92c16de462..d98e8a151dc 100644
--- a/searchlib/src/vespa/searchlib/predicate/predicate_interval.cpp
+++ b/searchlib/src/vespa/searchlib/predicate/predicate_interval.cpp
@@ -5,14 +5,16 @@
namespace search::predicate {
-std::ostream &operator<<(std::ostream &out, const Interval &i) {
+std::ostream &
+operator<<(std::ostream &out, const Interval &i) {
std::ios_base::fmtflags flags = out.flags();
out << "0x" << std::hex << i.interval;
out.flags(flags);
return out;
}
-std::ostream &operator<<(std::ostream &out, const IntervalWithBounds &i) {
+std::ostream &
+operator<<(std::ostream &out, const IntervalWithBounds &i) {
std::ios_base::fmtflags flags = out.flags();
out << "0x" << std::hex << i.interval << ", 0x" << i.bounds;
out.flags(flags);
diff --git a/searchlib/src/vespa/searchlib/predicate/predicate_interval_posting_list.h b/searchlib/src/vespa/searchlib/predicate/predicate_interval_posting_list.h
index f93d99b550b..33e15b2be33 100644
--- a/searchlib/src/vespa/searchlib/predicate/predicate_interval_posting_list.h
+++ b/searchlib/src/vespa/searchlib/predicate/predicate_interval_posting_list.h
@@ -14,10 +14,10 @@ namespace search::predicate {
template<typename Iterator>
class PredicateIntervalPostingList : public PredicatePostingList {
const PredicateIntervalStore &_interval_store;
- Iterator _iterator;
- const Interval *_current_interval;
- uint32_t _interval_count;
- Interval _single_buf;
+ Iterator _iterator;
+ const Interval *_current_interval;
+ uint32_t _interval_count;
+ Interval _single_buf;
public:
PredicateIntervalPostingList(const PredicateIntervalStore &interval_store, Iterator it);
@@ -46,7 +46,8 @@ PredicateIntervalPostingList<Iterator>::PredicateIntervalPostingList(
}
template<typename Iterator>
-bool PredicateIntervalPostingList<Iterator>::next(uint32_t doc_id) {
+bool
+PredicateIntervalPostingList<Iterator>::next(uint32_t doc_id) {
if (!_iterator.valid()) {
return false;
}
diff --git a/searchlib/src/vespa/searchlib/predicate/predicate_interval_store.cpp b/searchlib/src/vespa/searchlib/predicate/predicate_interval_store.cpp
index 28c82cb7a97..13be0f0127b 100644
--- a/searchlib/src/vespa/searchlib/predicate/predicate_interval_store.cpp
+++ b/searchlib/src/vespa/searchlib/predicate/predicate_interval_store.cpp
@@ -21,7 +21,8 @@ PredicateIntervalStore::PredicateIntervalStore()
: _store(),
_size1Type(1, 1024u, RefType::offsetSize()),
_store_adapter(_store),
- _ref_cache(_store_adapter) {
+ _ref_cache(_store_adapter)
+{
// This order determines type ids.
_store.addType(&_size1Type);
@@ -46,7 +47,8 @@ PredicateIntervalStore::~PredicateIntervalStore() {
// anyway.
//
template <typename IntervalT>
-EntryRef PredicateIntervalStore::insert(const vector<IntervalT> &intervals) {
+EntryRef
+PredicateIntervalStore::insert(const vector<IntervalT> &intervals) {
const uint32_t size = entrySize<IntervalT>() * intervals.size();
if (size == 0) {
return EntryRef();
@@ -81,7 +83,8 @@ EntryRef PredicateIntervalStore::insert(const vector<Interval> &);
template
EntryRef PredicateIntervalStore::insert(const vector<IntervalWithBounds> &);
-void PredicateIntervalStore::remove(EntryRef ref) {
+void
+PredicateIntervalStore::remove(EntryRef ref) {
if (ref.valid()) {
uint32_t buffer_id = RefType(ref).bufferId();
if (buffer_id == 0) { // single interval optimization.
@@ -96,11 +99,13 @@ void PredicateIntervalStore::remove(EntryRef ref) {
}
}
-void PredicateIntervalStore::trimHoldLists(generation_t used_generation) {
+void
+PredicateIntervalStore::trimHoldLists(generation_t used_generation) {
_store.trimHoldLists(used_generation);
}
-void PredicateIntervalStore::transferHoldLists(generation_t generation) {
+void
+PredicateIntervalStore::transferHoldLists(generation_t generation) {
_store.transferHoldLists(generation);
}
diff --git a/searchlib/src/vespa/searchlib/predicate/predicate_interval_store.h b/searchlib/src/vespa/searchlib/predicate/predicate_interval_store.h
index e4573866eb8..5f55a2d3d5f 100644
--- a/searchlib/src/vespa/searchlib/predicate/predicate_interval_store.h
+++ b/searchlib/src/vespa/searchlib/predicate/predicate_interval_store.h
@@ -34,7 +34,7 @@ class PredicateIntervalStore {
}
};
DataStoreAdapter _store_adapter;
- RefCacheType _ref_cache;
+ RefCacheType _ref_cache;
// Return type for private allocation functions
template <typename T>
@@ -89,7 +89,8 @@ public:
* single interval optimization.
*/
template <typename IntervalT>
- const IntervalT *get(vespalib::datastore::EntryRef btree_ref,
+ const IntervalT
+ *get(vespalib::datastore::EntryRef btree_ref,
uint32_t &size_out,
IntervalT *single_buf) const
{
diff --git a/searchlib/src/vespa/searchlib/predicate/predicate_posting_list.h b/searchlib/src/vespa/searchlib/predicate/predicate_posting_list.h
index 93e671f603f..50024913dcb 100644
--- a/searchlib/src/vespa/searchlib/predicate/predicate_posting_list.h
+++ b/searchlib/src/vespa/searchlib/predicate/predicate_posting_list.h
@@ -16,9 +16,9 @@ class PredicatePostingList {
protected:
PredicatePostingList()
- : _docId(0),
- _subquery(UINT64_MAX) {
- }
+ : _docId(0),
+ _subquery(UINT64_MAX)
+ { }
void setDocId(uint32_t docId) { _docId = docId; }
diff --git a/searchlib/src/vespa/searchlib/predicate/predicate_zstar_compressed_posting_list.h b/searchlib/src/vespa/searchlib/predicate/predicate_zstar_compressed_posting_list.h
index 965c4ad3042..0268d2bdb0c 100644
--- a/searchlib/src/vespa/searchlib/predicate/predicate_zstar_compressed_posting_list.h
+++ b/searchlib/src/vespa/searchlib/predicate/predicate_zstar_compressed_posting_list.h
@@ -41,7 +41,8 @@ PredicateZstarCompressedPostingList<Iterator>::PredicateZstarCompressedPostingLi
}
template<typename Iterator>
-bool PredicateZstarCompressedPostingList<Iterator>::next(uint32_t doc_id) {
+bool
+PredicateZstarCompressedPostingList<Iterator>::next(uint32_t doc_id) {
if (_iterator.valid() && _iterator.getKey() <= doc_id) {
_iterator.linearSeek(doc_id + 1);
}
@@ -57,7 +58,8 @@ bool PredicateZstarCompressedPostingList<Iterator>::next(uint32_t doc_id) {
}
template<typename Iterator>
-bool PredicateZstarCompressedPostingList<Iterator>::nextInterval() {
+bool
+PredicateZstarCompressedPostingList<Iterator>::nextInterval() {
uint32_t next_interval = UINT32_MAX;
if (_interval_count > 1) {
next_interval = _current_interval[1].interval;
diff --git a/searchlib/src/vespa/searchlib/predicate/simple_index.cpp b/searchlib/src/vespa/searchlib/predicate/simple_index.cpp
index 1b0db8f52d4..b0ef11e1c25 100644
--- a/searchlib/src/vespa/searchlib/predicate/simple_index.cpp
+++ b/searchlib/src/vespa/searchlib/predicate/simple_index.cpp
@@ -6,14 +6,12 @@
#include <vespa/vespalib/btree/btreeiterator.hpp>
#include <vespa/vespalib/btree/btreestore.hpp>
#include <vespa/vespalib/btree/btreenodeallocator.hpp>
-#include <vespa/vespalib/util/array.hpp>
#include <vespa/vespalib/datastore/buffer_type.hpp>
#include <vespa/log/log.h>
LOG_SETUP(".searchlib.predicate.simple_index");
-namespace search::predicate {
- namespace simpleindex {
+namespace search::predicate::simpleindex {
bool log_enabled() {
return LOG_WOULD_LOG(debug);
@@ -25,6 +23,8 @@ void log_debug(vespalib::string &str) {
} // namespace simpleindex
+namespace search::predicate {
+
template class SimpleIndex<vespalib::datastore::EntryRef>;
}
diff --git a/searchlib/src/vespa/searchlib/predicate/simple_index.h b/searchlib/src/vespa/searchlib/predicate/simple_index.h
index cfc288770c8..75dc540f787 100644
--- a/searchlib/src/vespa/searchlib/predicate/simple_index.h
+++ b/searchlib/src/vespa/searchlib/predicate/simple_index.h
@@ -141,12 +141,12 @@ private:
template <typename T>
using optional = std::optional<T>;
- Dictionary _dictionary;
- BTreeStore _btree_posting_lists;
- VectorStore _vector_posting_lists;
- GenerationHolder &_generation_holder;
- uint32_t _insert_remove_counter = 0;
- const SimpleIndexConfig _config;
+ Dictionary _dictionary;
+ BTreeStore _btree_posting_lists;
+ VectorStore _vector_posting_lists;
+ GenerationHolder &_generation_holder;
+ uint32_t _insert_remove_counter = 0;
+ const SimpleIndexConfig _config;
const DocIdLimitProvider &_limit_provider;
void insertIntoPosting(vespalib::datastore::EntryRef &ref, Key key, DocId doc_id, const Posting &posting);
@@ -164,12 +164,10 @@ private:
bool shouldRemoveVectorPosting(size_t size, double ratio) const;
size_t getVectorPostingSize(const PostingVector &vector) const {
return std::min(vector.size(),
- static_cast<size_t>(_limit_provider.getCommittedDocIdLimit()));
+ static_cast<size_t>(_limit_provider.getCommittedDocIdLimit()));
}
public:
- SimpleIndex(GenerationHolder &generation_holder, const DocIdLimitProvider &provider) :
- SimpleIndex(generation_holder, provider, SimpleIndexConfig()) {}
SimpleIndex(GenerationHolder &generation_holder,
const DocIdLimitProvider &provider, const SimpleIndexConfig &config)
: _generation_holder(generation_holder), _config(config), _limit_provider(provider) {}
@@ -219,8 +217,8 @@ public:
template<typename Posting, typename Key, typename DocId>
template<typename FunctionType>
-void SimpleIndex<Posting, Key, DocId>::foreach_frozen_key(
- vespalib::datastore::EntryRef ref, Key key, FunctionType func) const {
+void
+SimpleIndex<Posting, Key, DocId>::foreach_frozen_key(vespalib::datastore::EntryRef ref, Key key, FunctionType func) const {
auto it = _vector_posting_lists.getFrozenView().find(key);
double ratio = getDocumentRatio(getDocumentCount(ref), _limit_provider.getDocIdLimit());
if (it.valid() && ratio > _config.foreach_vector_threshold) {
diff --git a/searchlib/src/vespa/searchlib/predicate/simple_index.hpp b/searchlib/src/vespa/searchlib/predicate/simple_index.hpp
index b49218f1ba6..ada77b9fe38 100644
--- a/searchlib/src/vespa/searchlib/predicate/simple_index.hpp
+++ b/searchlib/src/vespa/searchlib/predicate/simple_index.hpp
@@ -13,8 +13,8 @@ namespace simpleindex {
}
template <typename Posting, typename Key, typename DocId>
-void SimpleIndex<Posting, Key, DocId>::insertIntoPosting(
- vespalib::datastore::EntryRef &ref, Key key, DocId doc_id, const Posting &posting) {
+void
+SimpleIndex<Posting, Key, DocId>::insertIntoPosting(vespalib::datastore::EntryRef &ref, Key key, DocId doc_id, const Posting &posting) {
bool ok = _btree_posting_lists.insert(ref, doc_id, posting);
if (!ok) {
_btree_posting_lists.remove(ref, doc_id);
@@ -26,8 +26,8 @@ void SimpleIndex<Posting, Key, DocId>::insertIntoPosting(
}
template <typename Posting, typename Key, typename DocId>
-void SimpleIndex<Posting, Key, DocId>::insertIntoVectorPosting(
- vespalib::datastore::EntryRef ref, Key key, DocId doc_id, const Posting &posting) {
+void
+SimpleIndex<Posting, Key, DocId>::insertIntoVectorPosting(vespalib::datastore::EntryRef ref, Key key, DocId doc_id, const Posting &posting) {
assert(doc_id < _limit_provider.getDocIdLimit());
auto it = _vector_posting_lists.find(key);
if (it.valid()) {
@@ -69,9 +69,8 @@ SimpleIndex<Posting, Key, DocId>::~SimpleIndex() {
}
template <typename Posting, typename Key, typename DocId>
-void SimpleIndex<Posting, Key, DocId>::serialize(
- vespalib::DataBuffer &buffer,
- const PostingSerializer<Posting> &serializer) const {
+void
+SimpleIndex<Posting, Key, DocId>::serialize(vespalib::DataBuffer &buffer, const PostingSerializer<Posting> &serializer) const {
assert(sizeof(Key) <= sizeof(uint64_t));
assert(sizeof(DocId) <= sizeof(uint32_t));
buffer.writeInt32(_dictionary.size());
@@ -90,10 +89,10 @@ void SimpleIndex<Posting, Key, DocId>::serialize(
}
template <typename Posting, typename Key, typename DocId>
-void SimpleIndex<Posting, Key, DocId>::deserialize(
- vespalib::DataBuffer &buffer,
- PostingDeserializer<Posting> &deserializer,
- SimpleIndexDeserializeObserver<Key, DocId> &observer, uint32_t version) {
+void
+SimpleIndex<Posting, Key, DocId>::deserialize(vespalib::DataBuffer &buffer, PostingDeserializer<Posting> &deserializer,
+ SimpleIndexDeserializeObserver<Key, DocId> &observer, uint32_t version)
+{
typename Dictionary::Builder builder(_dictionary.getAllocator());
uint32_t size = buffer.readInt32();
std::vector<vespalib::btree::BTreeKeyData<DocId, Posting>> postings;
@@ -128,8 +127,8 @@ void SimpleIndex<Posting, Key, DocId>::deserialize(
}
template <typename Posting, typename Key, typename DocId>
-void SimpleIndex<Posting, Key, DocId>::addPosting(Key key, DocId doc_id,
- const Posting &posting) {
+void
+SimpleIndex<Posting, Key, DocId>::addPosting(Key key, DocId doc_id, const Posting &posting) {
auto iter = _dictionary.find(key);
vespalib::datastore::EntryRef ref;
if (iter.valid()) {
@@ -178,8 +177,8 @@ SimpleIndex<Posting, Key, DocId>::removeFromPostingList(Key key, DocId doc_id) {
}
template <typename Posting, typename Key, typename DocId>
-void SimpleIndex<Posting, Key, DocId>::removeFromVectorPostingList(
- vespalib::datastore::EntryRef ref, Key key, DocId doc_id) {
+void
+SimpleIndex<Posting, Key, DocId>::removeFromVectorPostingList(vespalib::datastore::EntryRef ref, Key key, DocId doc_id) {
auto it = _vector_posting_lists.find(key);
if (it.valid()) {
if (!removeVectorIfBelowThreshold(ref, it)) {
@@ -189,7 +188,8 @@ void SimpleIndex<Posting, Key, DocId>::removeFromVectorPostingList(
};
template <typename Posting, typename Key, typename DocId>
-void SimpleIndex<Posting, Key, DocId>::pruneBelowThresholdVectors() {
+void
+SimpleIndex<Posting, Key, DocId>::pruneBelowThresholdVectors() {
// Check if it is time to prune any vector postings
if (++_insert_remove_counter % _config.vector_prune_frequency > 0) return;
@@ -204,7 +204,8 @@ void SimpleIndex<Posting, Key, DocId>::pruneBelowThresholdVectors() {
};
template <typename Posting, typename Key, typename DocId>
-void SimpleIndex<Posting, Key, DocId>::promoteOverThresholdVectors() {
+void
+SimpleIndex<Posting, Key, DocId>::promoteOverThresholdVectors() {
for (auto it = _dictionary.begin(); it.valid(); ++it) {
Key key = it.getKey();
if (!_vector_posting_lists.find(key).valid()) {
@@ -214,8 +215,8 @@ void SimpleIndex<Posting, Key, DocId>::promoteOverThresholdVectors() {
}
template <typename Posting, typename Key, typename DocId>
-void SimpleIndex<Posting, Key, DocId>::logVector(
- const char *action, Key key, size_t document_count, double ratio, size_t vector_length) const {
+void
+SimpleIndex<Posting, Key, DocId>::logVector(const char *action, Key key, size_t document_count, double ratio, size_t vector_length) const {
if (!simpleindex::log_enabled()) return;
auto msg = vespalib::make_string(
"%s vector for key '%016" PRIx64 "' with length %zu. Contains %zu documents "
@@ -227,7 +228,8 @@ void SimpleIndex<Posting, Key, DocId>::logVector(
}
template <typename Posting, typename Key, typename DocId>
-void SimpleIndex<Posting, Key, DocId>::createVectorIfOverThreshold(vespalib::datastore::EntryRef ref, Key key) {
+void
+SimpleIndex<Posting, Key, DocId>::createVectorIfOverThreshold(vespalib::datastore::EntryRef ref, Key key) {
uint32_t doc_id_limit = _limit_provider.getDocIdLimit();
size_t size = getDocumentCount(ref);
double ratio = getDocumentRatio(size, doc_id_limit);
@@ -242,8 +244,8 @@ void SimpleIndex<Posting, Key, DocId>::createVectorIfOverThreshold(vespalib::dat
}
template <typename Posting, typename Key, typename DocId>
-bool SimpleIndex<Posting, Key, DocId>::removeVectorIfBelowThreshold(
- vespalib::datastore::EntryRef ref, typename VectorStore::Iterator &it) {
+bool
+SimpleIndex<Posting, Key, DocId>::removeVectorIfBelowThreshold(vespalib::datastore::EntryRef ref, typename VectorStore::Iterator &it) {
size_t size = getDocumentCount(ref);
double ratio = getDocumentRatio(size, _limit_provider.getDocIdLimit());
if (shouldRemoveVectorPosting(size, ratio)) {
@@ -257,36 +259,41 @@ bool SimpleIndex<Posting, Key, DocId>::removeVectorIfBelowThreshold(
}
template <typename Posting, typename Key, typename DocId>
-double SimpleIndex<Posting, Key, DocId>::getDocumentRatio(size_t document_count,
- uint32_t doc_id_limit) const {
+double
+SimpleIndex<Posting, Key, DocId>::getDocumentRatio(size_t document_count, uint32_t doc_id_limit) const {
assert(doc_id_limit > 1);
return document_count / static_cast<double>(doc_id_limit - 1);
};
template <typename Posting, typename Key, typename DocId>
-size_t SimpleIndex<Posting, Key, DocId>::getDocumentCount(vespalib::datastore::EntryRef ref) const {
+size_t
+SimpleIndex<Posting, Key, DocId>::getDocumentCount(vespalib::datastore::EntryRef ref) const {
return _btree_posting_lists.size(ref);
};
template <typename Posting, typename Key, typename DocId>
-bool SimpleIndex<Posting, Key, DocId>::shouldRemoveVectorPosting(size_t size, double ratio) const {
+bool
+SimpleIndex<Posting, Key, DocId>::shouldRemoveVectorPosting(size_t size, double ratio) const {
return size < _config.lower_vector_size_threshold || ratio < _config.lower_docid_freq_threshold;
};
template <typename Posting, typename Key, typename DocId>
-bool SimpleIndex<Posting, Key, DocId>::shouldCreateVectorPosting(size_t size, double ratio) const {
+bool
+SimpleIndex<Posting, Key, DocId>::shouldCreateVectorPosting(size_t size, double ratio) const {
return size >= _config.upper_vector_size_threshold && ratio >= _config.upper_docid_freq_threshold;
};
template <typename Posting, typename Key, typename DocId>
-void SimpleIndex<Posting, Key, DocId>::commit() {
+void
+SimpleIndex<Posting, Key, DocId>::commit() {
_dictionary.getAllocator().freeze();
_btree_posting_lists.freeze();
_vector_posting_lists.getAllocator().freeze();
}
template <typename Posting, typename Key, typename DocId>
-void SimpleIndex<Posting, Key, DocId>::trimHoldLists(generation_t used_generation) {
+void
+SimpleIndex<Posting, Key, DocId>::trimHoldLists(generation_t used_generation) {
_btree_posting_lists.trimHoldLists(used_generation);
_dictionary.getAllocator().trimHoldLists(used_generation);
_vector_posting_lists.getAllocator().trimHoldLists(used_generation);
@@ -294,14 +301,16 @@ void SimpleIndex<Posting, Key, DocId>::trimHoldLists(generation_t used_generatio
}
template <typename Posting, typename Key, typename DocId>
-void SimpleIndex<Posting, Key, DocId>::transferHoldLists(generation_t generation) {
+void
+SimpleIndex<Posting, Key, DocId>::transferHoldLists(generation_t generation) {
_dictionary.getAllocator().transferHoldLists(generation);
_btree_posting_lists.transferHoldLists(generation);
_vector_posting_lists.getAllocator().transferHoldLists(generation);
}
template <typename Posting, typename Key, typename DocId>
-vespalib::MemoryUsage SimpleIndex<Posting, Key, DocId>::getMemoryUsage() const {
+vespalib::MemoryUsage
+SimpleIndex<Posting, Key, DocId>::getMemoryUsage() const {
vespalib::MemoryUsage combined;
combined.merge(_dictionary.getMemoryUsage());
combined.merge(_btree_posting_lists.getMemoryUsage());
diff --git a/searchlib/src/vespa/searchlib/queryeval/predicate_blueprint.cpp b/searchlib/src/vespa/searchlib/queryeval/predicate_blueprint.cpp
index 66acc2f0836..24d731156b3 100644
--- a/searchlib/src/vespa/searchlib/queryeval/predicate_blueprint.cpp
+++ b/searchlib/src/vespa/searchlib/queryeval/predicate_blueprint.cpp
@@ -8,13 +8,13 @@
#include <vespa/searchlib/predicate/predicate_hash.h>
#include <vespa/searchlib/predicate/predicate_index.h>
#include <vespa/searchlib/query/tree/termnodes.h>
-#include <vespa/vespalib/btree/btree.hpp>
#include <vespa/vespalib/btree/btreeroot.hpp>
#include <vespa/vespalib/btree/btreeiterator.hpp>
#include <vespa/vespalib/btree/btreestore.hpp>
#include <vespa/vespalib/btree/btreenodeallocator.hpp>
#include <vespa/vespalib/util/memory_allocator.h>
#include <algorithm>
+
#include <vespa/log/log.h>
LOG_SETUP(".searchlib.predicate.predicate_blueprint");
#include <vespa/searchlib/predicate/predicate_range_term_expander.h>
@@ -54,7 +54,8 @@ struct MyRangeHandler {
vector<BoundsEntry> &bounds_entries;
uint64_t subquery_bitmap;
- void handleRange(const string &label) {
+ void
+ handleRange(const string &label) {
uint64_t feature = PredicateHash::hash64(label);
auto iterator = interval_index.lookup(feature);
if (iterator.valid()) {
@@ -62,7 +63,8 @@ struct MyRangeHandler {
interval_entries.push_back({iterator.getData(), subquery_bitmap, sz, feature});
}
}
- void handleEdge(const string &label, uint32_t value) {
+ void
+ handleEdge(const string &label, uint32_t value) {
uint64_t feature = PredicateHash::hash64(label);
auto iterator = bounds_index.lookup(feature);
if (iterator.valid()) {
@@ -73,18 +75,19 @@ struct MyRangeHandler {
};
template <typename Entry>
-void pushRangeDictionaryEntries(
- const Entry &entry,
- const PredicateIndex &index,
- vector<IntervalEntry> &interval_entries,
- vector<BoundsEntry> &bounds_entries) {
+void
+pushRangeDictionaryEntries(const Entry &entry, const PredicateIndex &index,
+ vector<IntervalEntry> &interval_entries,
+ vector<BoundsEntry> &bounds_entries)
+{
PredicateRangeTermExpander expander(index.getArity());
MyRangeHandler handler{index.getIntervalIndex(), index.getBoundsIndex(), interval_entries,
bounds_entries, entry.getSubQueryBitmap()};
expander.expand(entry.getKey(), entry.getValue(), handler);
}
-void pushZStarPostingList(const SimpleIndex<vespalib::datastore::EntryRef> &interval_index,
+void
+pushZStarPostingList(const SimpleIndex<vespalib::datastore::EntryRef> &interval_index,
vector<IntervalEntry> &interval_entries) {
uint64_t feature = Constants::z_star_hash;
auto iterator = interval_index.lookup(feature);
@@ -96,7 +99,8 @@ void pushZStarPostingList(const SimpleIndex<vespalib::datastore::EntryRef> &inte
} // namespace
-void PredicateBlueprint::addPostingToK(uint64_t feature)
+void
+PredicateBlueprint::addPostingToK(uint64_t feature)
{
const auto &interval_index = _index.getIntervalIndex();
auto tmp = interval_index.lookup(feature);
@@ -115,7 +119,8 @@ void PredicateBlueprint::addPostingToK(uint64_t feature)
}
}
-void PredicateBlueprint::addBoundsPostingToK(uint64_t feature)
+void
+PredicateBlueprint::addBoundsPostingToK(uint64_t feature)
{
const auto &bounds_index = _index.getBoundsIndex();
auto tmp = bounds_index.lookup(feature);
@@ -134,7 +139,8 @@ void PredicateBlueprint::addBoundsPostingToK(uint64_t feature)
}
}
-void PredicateBlueprint::addZeroConstraintToK()
+void
+PredicateBlueprint::addZeroConstraintToK()
{
uint8_t *kVBase = &_kV[0];
size_t kVSize = _kV.size();
@@ -174,15 +180,14 @@ PredicateBlueprint::PredicateBlueprint(const FieldSpecBase &field,
pushValueDictionaryEntry(entry, interval_index, _interval_dict_entries);
}
for (const auto &entry : term.getRangeFeatures()) {
- pushRangeDictionaryEntries(entry, _index, _interval_dict_entries,
- _bounds_dict_entries);
+ pushRangeDictionaryEntries(entry, _index, _interval_dict_entries,_bounds_dict_entries);
}
pushZStarPostingList(interval_index, _interval_dict_entries);
BitVectorCache::KeyAndCountSet keys;
keys.reserve(_interval_dict_entries.size());
for (const auto & e : _interval_dict_entries) {
- keys.push_back({e.feature, e.size});
+ keys.emplace_back(e.feature, e.size);
}
_cachedFeatures = _index.lookupCachedSet(keys);
@@ -202,40 +207,43 @@ PredicateBlueprint::PredicateBlueprint(const FieldSpecBase &field,
});
- if (zero_constraints_docs.size() == 0 &&
+ if ((zero_constraints_docs.size() == 0) &&
_interval_dict_entries.empty() && _bounds_dict_entries.empty() &&
- !_zstar_dict_entry.valid()) {
+ !_zstar_dict_entry.valid())
+ {
setEstimate(HitEstimate(0, true));
} else {
setEstimate(HitEstimate(static_cast<uint32_t>(zero_constraints_docs.size()), false));
}
}
-PredicateBlueprint::~PredicateBlueprint() {}
+PredicateBlueprint::~PredicateBlueprint() = default;
namespace {
- template<typename DictEntry, typename VectorIteratorEntry, typename BTreeIteratorEntry>
- void lookupPostingLists(const std::vector<DictEntry> &dict_entries,
- std::vector<VectorIteratorEntry> &vector_iterators,
- std::vector<BTreeIteratorEntry> &btree_iterators,
- const SimpleIndex<vespalib::datastore::EntryRef> &index)
- {
- for (const auto &entry : dict_entries) {
- auto vector_iterator = index.getVectorPostingList(entry.feature);
- if (vector_iterator) {
- vector_iterators.push_back(VectorIteratorEntry{*vector_iterator, entry});
- } else {
- auto btree_iterator = index.getBTreePostingList(entry.entry_ref);
- btree_iterators.push_back(BTreeIteratorEntry{btree_iterator, entry});
- }
+template<typename DictEntry, typename VectorIteratorEntry, typename BTreeIteratorEntry>
+void
+lookupPostingLists(const std::vector<DictEntry> &dict_entries,
+ std::vector<VectorIteratorEntry> &vector_iterators,
+ std::vector<BTreeIteratorEntry> &btree_iterators,
+ const SimpleIndex<vespalib::datastore::EntryRef> &index)
+{
+ for (const auto &entry : dict_entries) {
+ auto vector_iterator = index.getVectorPostingList(entry.feature);
+ if (vector_iterator) {
+ vector_iterators.push_back(VectorIteratorEntry{*vector_iterator, entry});
+ } else {
+ auto btree_iterator = index.getBTreePostingList(entry.entry_ref);
+ btree_iterators.push_back(BTreeIteratorEntry{btree_iterator, entry});
}
+ }
- };
+}
}
-void PredicateBlueprint::fetchPostings(const ExecuteInfo &) {
+void
+PredicateBlueprint::fetchPostings(const ExecuteInfo &) {
if (!_fetch_postings_done) {
const auto &interval_index = _index.getIntervalIndex();
const auto &bounds_index = _index.getBoundsIndex();
@@ -277,29 +285,31 @@ PredicateBlueprint::createLeafSearch(const fef::TermFieldMatchDataArray &tfmda,
PredicateAttribute::MinFeatureHandle mfh = attribute.getMinFeatureVector();
auto interval_range_vector = attribute.getIntervalRangeVector();
auto max_interval_range = attribute.getMaxIntervalRange();
- return SearchIterator::UP(new PredicateSearch(mfh.first, interval_range_vector, max_interval_range, _kV,
- createPostingLists(), tfmda));
+ return std::make_unique<PredicateSearch>(mfh.first, interval_range_vector, max_interval_range, _kV,
+ createPostingLists(), tfmda);
}
namespace {
- template<typename IteratorEntry, typename PostingListFactory>
- void createPredicatePostingLists(const std::vector<IteratorEntry> &iterator_entries,
- std::vector<PredicatePostingList::UP> &posting_lists,
- PostingListFactory posting_list_factory)
- {
- for (const auto &entry : iterator_entries) {
- if (entry.iterator.valid()) {
- auto posting_list = posting_list_factory(entry);
- posting_list->setSubquery(entry.entry.subquery);
- posting_lists.emplace_back(PredicatePostingList::UP(posting_list));
- }
+template<typename IteratorEntry, typename PostingListFactory>
+void
+createPredicatePostingLists(const std::vector<IteratorEntry> &iterator_entries,
+ std::vector<PredicatePostingList::UP> &posting_lists,
+ PostingListFactory posting_list_factory)
+{
+ for (const auto &entry : iterator_entries) {
+ if (entry.iterator.valid()) {
+ auto posting_list = posting_list_factory(entry);
+ posting_list->setSubquery(entry.entry.subquery);
+ posting_lists.emplace_back(PredicatePostingList::UP(posting_list));
}
}
+}
}
-std::vector<PredicatePostingList::UP> PredicateBlueprint::createPostingLists() const {
+std::vector<PredicatePostingList::UP>
+PredicateBlueprint::createPostingLists() const {
size_t total_size = _interval_btree_iterators.size() + _interval_vector_iterators.size() +
_bounds_btree_iterators.size() + _bounds_vector_iterators.size() + 2;
std::vector<PredicatePostingList::UP> posting_lists;
@@ -333,17 +343,15 @@ std::vector<PredicatePostingList::UP> PredicateBlueprint::createPostingLists() c
});
if (_zstar_vector_iterator && _zstar_vector_iterator->valid()) {
- auto posting_list = PredicatePostingList::UP(
- new PredicateZstarCompressedPostingList<VectorIterator>(interval_store, *_zstar_vector_iterator));
+ auto posting_list = std::make_unique<PredicateZstarCompressedPostingList<VectorIterator>>(interval_store, *_zstar_vector_iterator);
posting_lists.emplace_back(std::move(posting_list));
} else if (_zstar_btree_iterator && _zstar_btree_iterator->valid()) {
- auto posting_list = PredicatePostingList::UP(
- new PredicateZstarCompressedPostingList<BTreeIterator>(interval_store, *_zstar_btree_iterator));
+ auto posting_list = std::make_unique<PredicateZstarCompressedPostingList<BTreeIterator>>(interval_store, *_zstar_btree_iterator);
posting_lists.emplace_back(std::move(posting_list));
}
auto iterator = _index.getZeroConstraintDocs().begin();
if (iterator.valid()) {
- auto posting_list = PredicatePostingList::UP(new PredicateZeroConstraintPostingList(iterator));
+ auto posting_list = std::make_unique<PredicateZeroConstraintPostingList>(iterator);
posting_lists.emplace_back(std::move(posting_list));
}
return posting_lists;
diff --git a/searchlib/src/vespa/searchlib/queryeval/predicate_blueprint.h b/searchlib/src/vespa/searchlib/queryeval/predicate_blueprint.h
index 9609cd4f6c9..ef225e86c50 100644
--- a/searchlib/src/vespa/searchlib/queryeval/predicate_blueprint.h
+++ b/searchlib/src/vespa/searchlib/queryeval/predicate_blueprint.h
@@ -50,8 +50,11 @@ public:
void fetchPostings(const ExecuteInfo &execInfo) override;
SearchIterator::UP
- createLeafSearch(const fef::TermFieldMatchDataArray &tfmda,
- bool strict) const override;
+ createLeafSearch(const fef::TermFieldMatchDataArray &tfmda, bool strict) const override;
+
+ // Exposed for testing
+ const BitVectorCache::CountVector & getKV() const { return _kV; }
+ const BitVectorCache::KeySet & getCachedFeatures() const { return _cachedFeatures; }
private:
using BTreeIterator = predicate::SimpleIndex<vespalib::datastore::EntryRef>::BTreeIterator;
using VectorIterator = predicate::SimpleIndex<vespalib::datastore::EntryRef>::VectorIterator;
@@ -70,24 +73,24 @@ private:
void addZeroConstraintToK();
std::vector<predicate::PredicatePostingList::UP> createPostingLists() const;
- const PredicateAttribute & _attribute;
+ const PredicateAttribute & _attribute;
const predicate::PredicateIndex &_index;
- Alloc _kVBacking;
- BitVectorCache::CountVector _kV;
- BitVectorCache::KeySet _cachedFeatures;
+ Alloc _kVBacking;
+ BitVectorCache::CountVector _kV;
+ BitVectorCache::KeySet _cachedFeatures;
- std::vector<IntervalEntry> _interval_dict_entries;
- std::vector<BoundsEntry> _bounds_dict_entries;
- vespalib::datastore::EntryRef _zstar_dict_entry;
+ std::vector<IntervalEntry> _interval_dict_entries;
+ std::vector<BoundsEntry> _bounds_dict_entries;
+ vespalib::datastore::EntryRef _zstar_dict_entry;
- std::vector<IntervalIteratorEntry<BTreeIterator>> _interval_btree_iterators;
+ std::vector<IntervalIteratorEntry<BTreeIterator>> _interval_btree_iterators;
std::vector<IntervalIteratorEntry<VectorIterator>> _interval_vector_iterators;
- std::vector<BoundsIteratorEntry<BTreeIterator>> _bounds_btree_iterators;
- std::vector<BoundsIteratorEntry<VectorIterator>> _bounds_vector_iterators;
+ std::vector<BoundsIteratorEntry<BTreeIterator>> _bounds_btree_iterators;
+ std::vector<BoundsIteratorEntry<VectorIterator>> _bounds_vector_iterators;
// The zstar iterator is either a vector or a btree iterator.
- optional<BTreeIterator> _zstar_btree_iterator;
+ optional<BTreeIterator> _zstar_btree_iterator;
optional<VectorIterator> _zstar_vector_iterator;
- bool _fetch_postings_done;
+ bool _fetch_postings_done;
};
}
diff --git a/searchlib/src/vespa/searchlib/tensor/hnsw_index.cpp b/searchlib/src/vespa/searchlib/tensor/hnsw_index.cpp
index 465793739ff..545ee7cfa96 100644
--- a/searchlib/src/vespa/searchlib/tensor/hnsw_index.cpp
+++ b/searchlib/src/vespa/searchlib/tensor/hnsw_index.cpp
@@ -25,8 +25,8 @@ namespace {
// TODO: Move this to MemoryAllocator, with name PAGE_SIZE.
constexpr size_t small_page_size = 4_Ki;
-constexpr size_t min_num_arrays_for_new_buffer = 64_Ki;
-constexpr float alloc_grow_factor = 0.2;
+constexpr size_t min_num_arrays_for_new_buffer = 512_Ki;
+constexpr float alloc_grow_factor = 0.3;
// TODO: Adjust these numbers to what we accept as max in config.
constexpr size_t max_level_array_size = 16;
constexpr size_t max_link_array_size = 64;
diff --git a/service-monitor/src/main/java/com/yahoo/vespa/service/slobrok/SlobrokMonitorManagerImpl.java b/service-monitor/src/main/java/com/yahoo/vespa/service/slobrok/SlobrokMonitorManagerImpl.java
index 0c9148ad834..34e5fe49f69 100644
--- a/service-monitor/src/main/java/com/yahoo/vespa/service/slobrok/SlobrokMonitorManagerImpl.java
+++ b/service-monitor/src/main/java/com/yahoo/vespa/service/slobrok/SlobrokMonitorManagerImpl.java
@@ -50,7 +50,7 @@ public class SlobrokMonitorManagerImpl extends AbstractComponent implements Slob
private SlobrokMonitorManagerImpl(Transport transport, Supervisor orb, DuperModelManager duperModel) {
this(() -> new SlobrokMonitor(orb), transport, duperModel);
- orb.useSmallBuffers();
+ orb.setDropEmptyBuffers(true);
}
SlobrokMonitorManagerImpl(Supplier<SlobrokMonitor> slobrokMonitorFactory, Transport transport, DuperModelManager duperModel) {
diff --git a/slobrok/src/tests/registerapi/registerapi.cpp b/slobrok/src/tests/registerapi/registerapi.cpp
index 59bc4690985..696812e2a3d 100644
--- a/slobrok/src/tests/registerapi/registerapi.cpp
+++ b/slobrok/src/tests/registerapi/registerapi.cpp
@@ -6,6 +6,7 @@
#include <vespa/slobrok/sbregister.h>
#include <vespa/slobrok/server/slobrokserver.h>
#include <vespa/fnet/frt/supervisor.h>
+#include <vespa/fnet/transport.h>
#include <sstream>
#include <algorithm>
#include <thread>
@@ -217,5 +218,6 @@ Test::Main()
.add("F/y/w", myspec.c_str())));
mock.stop();
+ server.shutdown();
TEST_DONE();
}
diff --git a/slobrok/src/vespa/slobrok/sbmirror.cpp b/slobrok/src/vespa/slobrok/sbmirror.cpp
index 5f6a54504e5..8102e1fecbf 100644
--- a/slobrok/src/vespa/slobrok/sbmirror.cpp
+++ b/slobrok/src/vespa/slobrok/sbmirror.cpp
@@ -26,7 +26,6 @@ MirrorAPI::MirrorAPI(FRT_Supervisor &orb, const ConfiguratorFactory & config)
_configurator(config.create(_slobrokSpecs)),
_currSlobrok(""),
_rpc_ms(100),
- _idx(0),
_backOff(),
_target(0),
_req(0)
diff --git a/slobrok/src/vespa/slobrok/sbmirror.h b/slobrok/src/vespa/slobrok/sbmirror.h
index ad86daa56bb..ec1ce22194b 100644
--- a/slobrok/src/vespa/slobrok/sbmirror.h
+++ b/slobrok/src/vespa/slobrok/sbmirror.h
@@ -101,7 +101,6 @@ private:
Configurator::UP _configurator;
std::string _currSlobrok;
int _rpc_ms;
- uint32_t _idx;
BackOff _backOff;
FRT_Target *_target;
FRT_RPCRequest *_req;
diff --git a/standalone-container/src/main/java/com/yahoo/container/standalone/LocalFileDb.java b/standalone-container/src/main/java/com/yahoo/container/standalone/LocalFileDb.java
index 4d968914dfa..d21211bdffe 100644
--- a/standalone-container/src/main/java/com/yahoo/container/standalone/LocalFileDb.java
+++ b/standalone-container/src/main/java/com/yahoo/container/standalone/LocalFileDb.java
@@ -9,11 +9,11 @@ import com.yahoo.net.HostName;
import java.io.File;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
+import java.nio.ByteBuffer;
import java.nio.file.Path;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
@@ -78,12 +78,13 @@ public class LocalFileDb implements FileAcquirer, FileRegistry {
throw new RuntimeException("addUri(String uri) is not implemented here.");
}
- public String fileSourceHost() {
- return HostName.getLocalhost();
+ @Override
+ public FileReference addBlob(ByteBuffer blob) {
+ throw new RuntimeException("addBlob(ByteBuffer blob) is not implemented here.");
}
- public Set<String> allRelativePaths() {
- return fileReferenceToFile.values().stream().map(File::getPath).collect(Collectors.toSet());
+ public String fileSourceHost() {
+ return HostName.getLocalhost();
}
private static Constructor<FileReference> createFileReferenceConstructor() {
diff --git a/storage/src/tests/distributor/CMakeLists.txt b/storage/src/tests/distributor/CMakeLists.txt
index f43280a5b44..fad8ca0bb25 100644
--- a/storage/src/tests/distributor/CMakeLists.txt
+++ b/storage/src/tests/distributor/CMakeLists.txt
@@ -7,14 +7,12 @@ vespa_add_executable(storage_distributor_gtest_runner_app TEST
bucket_db_prune_elision_test.cpp
bucketdatabasetest.cpp
bucketdbmetricupdatertest.cpp
- bucketdbupdatertest.cpp
bucketgctimecalculatortest.cpp
bucketstateoperationtest.cpp
distributor_bucket_space_test.cpp
distributor_host_info_reporter_test.cpp
distributor_message_sender_stub.cpp
distributor_stripe_pool_test.cpp
- distributortest.cpp
distributortestutil.cpp
externaloperationhandlertest.cpp
garbagecollectiontest.cpp
@@ -22,6 +20,8 @@ vespa_add_executable(storage_distributor_gtest_runner_app TEST
gtest_runner.cpp
idealstatemanagertest.cpp
joinbuckettest.cpp
+ legacy_bucket_db_updater_test.cpp
+ legacy_distributor_test.cpp
maintenanceschedulertest.cpp
mergelimitertest.cpp
mergeoperationtest.cpp
diff --git a/storage/src/tests/distributor/blockingoperationstartertest.cpp b/storage/src/tests/distributor/blockingoperationstartertest.cpp
index 5203fec2462..861f8e72832 100644
--- a/storage/src/tests/distributor/blockingoperationstartertest.cpp
+++ b/storage/src/tests/distributor/blockingoperationstartertest.cpp
@@ -1,6 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include <vespa/storage/frameworkimpl/component/storagecomponentregisterimpl.h>
#include <vespa/storage/distributor/blockingoperationstarter.h>
+#include <vespa/storage/distributor/distributor_stripe_operation_context.h>
#include <vespa/storage/distributor/pendingmessagetracker.h>
#include <vespa/storage/distributor/operation_sequencer.h>
#include <tests/distributor/maintenancemocks.h>
@@ -13,6 +14,86 @@ using namespace ::testing;
namespace storage::distributor {
+struct FakeDistributorStripeOperationContext : public DistributorStripeOperationContext {
+
+ PendingMessageTracker& _message_tracker;
+
+ explicit FakeDistributorStripeOperationContext(PendingMessageTracker& message_tracker)
+ : _message_tracker(message_tracker)
+ {}
+
+ ~FakeDistributorStripeOperationContext() override = default;
+
+ // From DistributorOperationContext:
+ api::Timestamp generate_unique_timestamp() override {
+ abort();
+ }
+ const DistributorBucketSpaceRepo& bucket_space_repo() const noexcept override {
+ abort();
+ }
+ DistributorBucketSpaceRepo& bucket_space_repo() noexcept override {
+ abort();
+ }
+ const DistributorBucketSpaceRepo& read_only_bucket_space_repo() const noexcept override {
+ abort();
+ }
+ DistributorBucketSpaceRepo& read_only_bucket_space_repo() noexcept override {
+ abort();
+ }
+ const DistributorConfiguration& distributor_config() const noexcept override {
+ abort();
+ }
+ // From DistributorStripeOperationContext:
+ void update_bucket_database(const document::Bucket&, const BucketCopy&, uint32_t) override {
+ abort();
+ }
+ void update_bucket_database(const document::Bucket&, const std::vector<BucketCopy>&, uint32_t) override {
+ abort();
+ }
+ void remove_node_from_bucket_database(const document::Bucket&, uint16_t) override {
+ abort();
+ }
+ void remove_nodes_from_bucket_database(const document::Bucket&, const std::vector<uint16_t>&) override {
+ abort();
+ }
+ document::BucketId make_split_bit_constrained_bucket_id(const document::DocumentId&) const override {
+ abort();
+ }
+ void recheck_bucket_info(uint16_t, const document::Bucket&) override {
+ abort();
+ }
+ document::BucketId get_sibling(const document::BucketId&) const override {
+ abort();
+ }
+ void send_inline_split_if_bucket_too_large(document::BucketSpace, const BucketDatabase::Entry&, uint8_t) override {
+ abort();
+ }
+ OperationRoutingSnapshot read_snapshot_for_bucket(const document::Bucket&) const override {
+ abort();
+ }
+ PendingMessageTracker& pending_message_tracker() noexcept override {
+ return _message_tracker;
+ }
+ const PendingMessageTracker& pending_message_tracker() const noexcept override {
+ return _message_tracker;
+ }
+ bool has_pending_message(uint16_t, const document::Bucket&, uint32_t) const override {
+ abort();
+ }
+ const lib::ClusterState* pending_cluster_state_or_null(const document::BucketSpace&) const override {
+ abort();
+ }
+ const lib::ClusterStateBundle& cluster_state_bundle() const override {
+ abort();
+ }
+ bool storage_node_is_up(document::BucketSpace, uint32_t) const override {
+ abort();
+ }
+ const BucketGcTimeCalculator::BucketIdHasher& bucket_id_hasher() const override {
+ abort();
+ }
+};
+
struct BlockingOperationStarterTest : Test {
std::shared_ptr<Operation> createMockOperation() {
return std::make_shared<MockOperation>(makeDocumentBucket(BucketId(16, 1)));
@@ -27,6 +108,7 @@ struct BlockingOperationStarterTest : Test {
std::unique_ptr<MockOperationStarter> _starterImpl;
std::unique_ptr<StorageComponentRegisterImpl> _compReg;
std::unique_ptr<PendingMessageTracker> _messageTracker;
+ std::unique_ptr<FakeDistributorStripeOperationContext> _fake_ctx;
std::unique_ptr<OperationSequencer> _operation_sequencer;
std::unique_ptr<BlockingOperationStarter> _operationStarter;
@@ -41,8 +123,9 @@ BlockingOperationStarterTest::SetUp()
_compReg->setClock(_clock);
_clock.setAbsoluteTimeInSeconds(1);
_messageTracker = std::make_unique<PendingMessageTracker>(*_compReg);
+ _fake_ctx = std::make_unique<FakeDistributorStripeOperationContext>(*_messageTracker);
_operation_sequencer = std::make_unique<OperationSequencer>();
- _operationStarter = std::make_unique<BlockingOperationStarter>(*_messageTracker, *_operation_sequencer, *_starterImpl);
+ _operationStarter = std::make_unique<BlockingOperationStarter>(*_fake_ctx, *_operation_sequencer, *_starterImpl);
}
TEST_F(BlockingOperationStarterTest, operation_not_blocked_when_no_messages_pending) {
diff --git a/storage/src/tests/distributor/distributor_host_info_reporter_test.cpp b/storage/src/tests/distributor/distributor_host_info_reporter_test.cpp
index e1010285dba..934ecc7456b 100644
--- a/storage/src/tests/distributor/distributor_host_info_reporter_test.cpp
+++ b/storage/src/tests/distributor/distributor_host_info_reporter_test.cpp
@@ -11,10 +11,12 @@
namespace storage::distributor {
-using PerNodeBucketSpacesStats = BucketSpacesStatsProvider::PerNodeBucketSpacesStats;
using End = vespalib::JsonStream::End;
using File = vespalib::File;
+using MinReplicaStats = std::unordered_map<uint16_t, uint32_t>;
using Object = vespalib::JsonStream::Object;
+using PerNodeBucketSpacesStats = BucketSpacesStatsProvider::PerNodeBucketSpacesStats;
+using BucketSpacesStats = BucketSpacesStatsProvider::BucketSpacesStats;
using namespace ::testing;
struct DistributorHostInfoReporterTest : Test {
@@ -35,7 +37,7 @@ namespace {
// My kingdom for GoogleMock!
struct MockedMinReplicaProvider : MinReplicaProvider
{
- std::unordered_map<uint16_t, uint32_t> minReplica;
+ MinReplicaStats minReplica;
std::unordered_map<uint16_t, uint32_t> getMinReplica() const override {
return minReplica;
@@ -121,7 +123,7 @@ struct Fixture {
TEST_F(DistributorHostInfoReporterTest, min_replica_stats_are_reported) {
Fixture f;
- std::unordered_map<uint16_t, uint32_t> minReplica;
+ MinReplicaStats minReplica;
minReplica[0] = 2;
minReplica[5] = 9;
f.minReplicaProvider.minReplica = minReplica;
@@ -133,10 +135,30 @@ TEST_F(DistributorHostInfoReporterTest, min_replica_stats_are_reported) {
EXPECT_EQ(9, getMinReplica(root, 5));
}
+TEST_F(DistributorHostInfoReporterTest, merge_min_replica_stats) {
+
+ MinReplicaStats min_replica_a;
+ min_replica_a[3] = 2;
+ min_replica_a[5] = 4;
+
+ MinReplicaStats min_replica_b;
+ min_replica_b[5] = 6;
+ min_replica_b[7] = 8;
+
+ MinReplicaStats result;
+ merge_min_replica_stats(result, min_replica_a);
+ merge_min_replica_stats(result, min_replica_b);
+
+ EXPECT_EQ(3, result.size());
+ EXPECT_EQ(2, result[3]);
+ EXPECT_EQ(4, result[5]);
+ EXPECT_EQ(8, result[7]);
+}
+
TEST_F(DistributorHostInfoReporterTest, generate_example_json) {
Fixture f;
- std::unordered_map<uint16_t, uint32_t> minReplica;
+ MinReplicaStats minReplica;
minReplica[0] = 2;
minReplica[5] = 9;
f.minReplicaProvider.minReplica = minReplica;
@@ -175,7 +197,7 @@ TEST_F(DistributorHostInfoReporterTest, no_report_generated_if_disabled) {
Fixture f;
f.reporter.enableReporting(false);
- std::unordered_map<uint16_t, uint32_t> minReplica;
+ MinReplicaStats minReplica;
minReplica[0] = 2;
minReplica[5] = 9;
f.minReplicaProvider.minReplica = minReplica;
@@ -210,5 +232,41 @@ TEST_F(DistributorHostInfoReporterTest, bucket_spaces_stats_are_reported) {
}
}
+TEST_F(DistributorHostInfoReporterTest, merge_per_node_bucket_spaces_stats) {
+
+ PerNodeBucketSpacesStats stats_a;
+ stats_a[3]["default"] = BucketSpaceStats(3, 2);
+ stats_a[3]["global"] = BucketSpaceStats(5, 4);
+ stats_a[5]["default"] = BucketSpaceStats(7, 6);
+ stats_a[5]["global"] = BucketSpaceStats(9, 8);
+
+ PerNodeBucketSpacesStats stats_b;
+ stats_b[5]["default"] = BucketSpaceStats(11, 10);
+ stats_b[5]["global"] = BucketSpaceStats(13, 12);
+ stats_b[7]["default"] = BucketSpaceStats(15, 14);
+
+ PerNodeBucketSpacesStats result;
+ merge_per_node_bucket_spaces_stats(result, stats_a);
+ merge_per_node_bucket_spaces_stats(result, stats_b);
+
+ PerNodeBucketSpacesStats exp;
+ exp[3]["default"] = BucketSpaceStats(3, 2);
+ exp[3]["global"] = BucketSpaceStats(5, 4);
+ exp[5]["default"] = BucketSpaceStats(7+11, 6+10);
+ exp[5]["global"] = BucketSpaceStats(9+13, 8+12);
+ exp[7]["default"] = BucketSpaceStats(15, 14);
+
+ EXPECT_EQ(exp, result);
}
+TEST_F(DistributorHostInfoReporterTest, merge_bucket_space_stats_maintains_valid_flag) {
+ BucketSpaceStats stats_a(5, 3);
+ BucketSpaceStats stats_b;
+
+ stats_a.merge(stats_b);
+ EXPECT_FALSE(stats_a.valid());
+ EXPECT_EQ(5, stats_a.bucketsTotal());
+ EXPECT_EQ(3, stats_a.bucketsPending());
+}
+
+}
diff --git a/storage/src/tests/distributor/distributor_message_sender_stub.h b/storage/src/tests/distributor/distributor_message_sender_stub.h
index 59a5a82b7df..18662fbce8f 100644
--- a/storage/src/tests/distributor/distributor_message_sender_stub.h
+++ b/storage/src/tests/distributor/distributor_message_sender_stub.h
@@ -87,6 +87,11 @@ public:
return dummy_cluster_context;
}
+ distributor::PendingMessageTracker& getPendingMessageTracker() override {
+ assert(_pending_message_tracker);
+ return *_pending_message_tracker;
+ }
+
const distributor::PendingMessageTracker& getPendingMessageTracker() const override {
assert(_pending_message_tracker);
return *_pending_message_tracker;
diff --git a/storage/src/tests/distributor/distributortestutil.cpp b/storage/src/tests/distributor/distributortestutil.cpp
index a2f32d8faa2..3ec1c95b206 100644
--- a/storage/src/tests/distributor/distributortestutil.cpp
+++ b/storage/src/tests/distributor/distributortestutil.cpp
@@ -101,6 +101,19 @@ DistributorTestUtil::triggerDistributionChange(lib::Distribution::SP distr)
}
void
+DistributorTestUtil::receive_set_system_state_command(const vespalib::string& state_str)
+{
+ auto state_cmd = std::make_shared<api::SetSystemStateCommand>(lib::ClusterState(state_str));
+ _distributor->handleMessage(state_cmd); // TODO move semantics
+}
+
+void
+DistributorTestUtil::handle_top_level_message(const std::shared_ptr<api::StorageMessage>& msg)
+{
+ _distributor->handleMessage(msg);
+}
+
+void
DistributorTestUtil::setTypeRepo(const std::shared_ptr<const document::DocumentTypeRepo> &repo)
{
_node->getComponentRegister().setDocumentTypeRepo(repo);
diff --git a/storage/src/tests/distributor/distributortestutil.h b/storage/src/tests/distributor/distributortestutil.h
index 6664b8d823d..533fd49811f 100644
--- a/storage/src/tests/distributor/distributortestutil.h
+++ b/storage/src/tests/distributor/distributortestutil.h
@@ -202,6 +202,12 @@ public:
void setSystemState(const lib::ClusterState& systemState);
+ // Invokes full cluster state transition pipeline rather than directly applying
+ // the state and just pretending everything has been completed.
+ void receive_set_system_state_command(const vespalib::string& state_str);
+
+ void handle_top_level_message(const std::shared_ptr<api::StorageMessage>& msg);
+
// Must be called prior to createLinks() to have any effect
void set_num_distributor_stripes(uint32_t n_stripes) noexcept {
_num_distributor_stripes = n_stripes;
diff --git a/storage/src/tests/distributor/idealstatemanagertest.cpp b/storage/src/tests/distributor/idealstatemanagertest.cpp
index 0a36e5cd0e5..e38e4b5b668 100644
--- a/storage/src/tests/distributor/idealstatemanagertest.cpp
+++ b/storage/src/tests/distributor/idealstatemanagertest.cpp
@@ -40,18 +40,18 @@ struct IdealStateManagerTest : Test, DistributorTestUtil {
bool checkBlock(const IdealStateOperation& op,
const document::Bucket& bucket,
- const PendingMessageTracker& tracker,
+ const DistributorStripeOperationContext& ctx,
const OperationSequencer& op_seq) const
{
- return op.checkBlock(bucket, tracker, op_seq);
+ return op.checkBlock(bucket, ctx, op_seq);
}
bool checkBlockForAllNodes(const IdealStateOperation& op,
const document::Bucket& bucket,
- const PendingMessageTracker& tracker,
+ const DistributorStripeOperationContext& ctx,
const OperationSequencer& op_seq) const
{
- return op.checkBlockForAllNodes(bucket, tracker, op_seq);
+ return op.checkBlockForAllNodes(bucket, ctx, op_seq);
}
std::vector<document::BucketSpace> _bucketSpaces;
@@ -170,92 +170,86 @@ TEST_F(IdealStateManagerTest, recheck_when_active) {
active_ideal_state_operations());
}
-TEST_F(IdealStateManagerTest, block_ideal_state_ops_on_full_request_bucket_info) {
+/**
+ * Don't schedule ideal state operations when there's a pending cluster state.
+ * This subsumes the legacy behavior of blocking ideal state ops when there is a
+ * zero-bucket RequestBucketInfoCommand pending towards a node (i.e. full bucket
+ * info fetch).
+ *
+ * This is for two reasons:
+ * - Avoids race conditions where we change the bucket set concurrently with
+ * requesting bucket info.
+ * - Once we get updated bucket info it's likely that the set of ideal state ops
+ * to execute will change anyway, so it makes sense to wait until it's ready.
+ */
+TEST_F(IdealStateManagerTest, block_ideal_state_ops_when_pending_cluster_state_is_present) {
+
+ setupDistributor(2, 10, "version:1 distributor:1 storage:1 .0.s:d");
+
+ // Trigger a pending cluster state with bucket info requests towards 1 node
+ receive_set_system_state_command("version:2 distributor:1 storage:1");
- setupDistributor(2, 10, "distributor:1 storage:2");
-
- framework::defaultimplementation::FakeClock clock;
- PendingMessageTracker tracker(_node->getComponentRegister());
OperationSequencer op_seq;
-
document::BucketId bid(16, 1234);
- std::vector<document::BucketId> buckets;
-
- // RequestBucketInfoCommand does not have a specific bucketid since it's
- // sent to the entire node. It will then use a null bucketid.
- {
- auto msg = std::make_shared<api::RequestBucketInfoCommand>(makeBucketSpace(), buckets);
- msg->setAddress(api::StorageMessageAddress::create(dummy_cluster_context.cluster_name_ptr(), lib::NodeType::STORAGE, 4));
- tracker.insert(msg);
- }
{
RemoveBucketOperation op(dummy_cluster_context,
BucketAndNodes(makeDocumentBucket(bid), toVector<uint16_t>(3, 4)));
- EXPECT_TRUE(op.isBlocked(tracker, op_seq));
- }
-
- {
- // Don't trigger on requests to other nodes.
- RemoveBucketOperation op(dummy_cluster_context,
- BucketAndNodes(makeDocumentBucket(bid), toVector<uint16_t>(3, 5)));
- EXPECT_FALSE(op.isBlocked(tracker, op_seq));
+ EXPECT_TRUE(op.isBlocked(operation_context(), op_seq));
}
- // Don't block on null-bucket messages that aren't RequestBucketInfo.
- {
- auto msg = std::make_shared<api::CreateVisitorCommand>(makeBucketSpace(), "foo", "bar", "baz");
- msg->setAddress(api::StorageMessageAddress::create(dummy_cluster_context.cluster_name_ptr(), lib::NodeType::STORAGE, 7));
- tracker.insert(msg);
+ // Clear pending by replying with zero buckets for all bucket spaces
+ ASSERT_EQ(_bucketSpaces.size(), _sender.commands().size());
+ for (uint32_t i = 0; i < _sender.commands().size(); ++i) {
+ auto& bucket_req = dynamic_cast<api::RequestBucketInfoCommand&>(*_sender.command(i));
+ handle_top_level_message(bucket_req.makeReply());
}
{
RemoveBucketOperation op(dummy_cluster_context,
BucketAndNodes(makeDocumentBucket(bid), toVector<uint16_t>(7)));
- EXPECT_FALSE(op.isBlocked(tracker, op_seq));
+ EXPECT_FALSE(op.isBlocked(operation_context(), op_seq));
}
}
TEST_F(IdealStateManagerTest, block_check_for_all_operations_to_specific_bucket) {
setupDistributor(2, 10, "distributor:1 storage:2");
framework::defaultimplementation::FakeClock clock;
- PendingMessageTracker tracker(_node->getComponentRegister());
OperationSequencer op_seq;
document::BucketId bid(16, 1234);
{
auto msg = std::make_shared<api::JoinBucketsCommand>(makeDocumentBucket(bid));
msg->setAddress(api::StorageMessageAddress::create(dummy_cluster_context.cluster_name_ptr(), lib::NodeType::STORAGE, 4));
- tracker.insert(msg);
+ pending_message_tracker().insert(msg);
}
{
RemoveBucketOperation op(dummy_cluster_context,
BucketAndNodes(makeDocumentBucket(bid), toVector<uint16_t>(7)));
// Not blocked for exact node match.
- EXPECT_FALSE(checkBlock(op, makeDocumentBucket(bid), tracker, op_seq));
+ EXPECT_FALSE(checkBlock(op, makeDocumentBucket(bid), operation_context(), op_seq));
// But blocked for bucket match!
- EXPECT_TRUE(checkBlockForAllNodes(op, makeDocumentBucket(bid), tracker, op_seq));
+ EXPECT_TRUE(checkBlockForAllNodes(op, makeDocumentBucket(bid), operation_context(), op_seq));
}
}
TEST_F(IdealStateManagerTest, block_operations_with_locked_buckets) {
setupDistributor(2, 10, "distributor:1 storage:2");
framework::defaultimplementation::FakeClock clock;
- PendingMessageTracker tracker(_node->getComponentRegister());
OperationSequencer op_seq;
const auto bucket = makeDocumentBucket(document::BucketId(16, 1234));
{
auto msg = std::make_shared<api::JoinBucketsCommand>(bucket);
msg->setAddress(api::StorageMessageAddress::create(dummy_cluster_context.cluster_name_ptr(), lib::NodeType::STORAGE, 1));
- tracker.insert(msg);
+ pending_message_tracker().insert(msg);
}
auto token = op_seq.try_acquire(bucket, "foo");
EXPECT_TRUE(token.valid());
{
RemoveBucketOperation op(dummy_cluster_context, BucketAndNodes(bucket, toVector<uint16_t>(0)));
- EXPECT_TRUE(checkBlock(op, bucket, tracker, op_seq));
- EXPECT_TRUE(checkBlockForAllNodes(op, bucket, tracker, op_seq));
+ EXPECT_TRUE(checkBlock(op, bucket, operation_context(), op_seq));
+ EXPECT_TRUE(checkBlockForAllNodes(op, bucket, operation_context(), op_seq));
}
}
diff --git a/storage/src/tests/distributor/bucketdbupdatertest.cpp b/storage/src/tests/distributor/legacy_bucket_db_updater_test.cpp
index 7e8fec3b83a..e353e976081 100644
--- a/storage/src/tests/distributor/bucketdbupdatertest.cpp
+++ b/storage/src/tests/distributor/legacy_bucket_db_updater_test.cpp
@@ -57,12 +57,14 @@ getRequestBucketInfoStrings(uint32_t count)
}
-class BucketDBUpdaterTest : public Test,
- public DistributorTestUtil
+// TODO STRIPE: Add variant of this test for the new stripe mode.
+// TODO STRIPE: Remove this test when legacy mode is gone.
+class LegacyBucketDBUpdaterTest : public Test,
+ public DistributorTestUtil
{
public:
- BucketDBUpdaterTest();
- ~BucketDBUpdaterTest() override;
+ LegacyBucketDBUpdaterTest();
+ ~LegacyBucketDBUpdaterTest() override;
auto &defaultDistributorBucketSpace() { return getBucketSpaceRepo().get(makeBucketSpace()); }
@@ -501,7 +503,7 @@ public:
std::unique_ptr<PendingClusterState> state;
PendingClusterStateFixture(
- BucketDBUpdaterTest& owner,
+ LegacyBucketDBUpdaterTest& owner,
const std::string& oldClusterState,
const std::string& newClusterState)
{
@@ -520,7 +522,7 @@ public:
}
PendingClusterStateFixture(
- BucketDBUpdaterTest& owner,
+ LegacyBucketDBUpdaterTest& owner,
const std::string& oldClusterState)
{
ClusterInformation::CSP clusterInfo(
@@ -551,15 +553,15 @@ public:
}
};
-BucketDBUpdaterTest::BucketDBUpdaterTest()
+LegacyBucketDBUpdaterTest::LegacyBucketDBUpdaterTest()
: DistributorTestUtil(),
_bucketSpaces()
{
}
-BucketDBUpdaterTest::~BucketDBUpdaterTest() = default;
+LegacyBucketDBUpdaterTest::~LegacyBucketDBUpdaterTest() = default;
-TEST_F(BucketDBUpdaterTest, normal_usage) {
+TEST_F(LegacyBucketDBUpdaterTest, normal_usage) {
setSystemState(lib::ClusterState("distributor:2 .0.s:i .1.s:i storage:3"));
ASSERT_EQ(messageCount(3), _sender.commands().size());
@@ -590,7 +592,7 @@ TEST_F(BucketDBUpdaterTest, normal_usage) {
ASSERT_NO_FATAL_FAILURE(assertCorrectBuckets(10, "distributor:2 storage:3"));
}
-TEST_F(BucketDBUpdaterTest, distributor_change) {
+TEST_F(LegacyBucketDBUpdaterTest, distributor_change) {
int numBuckets = 100;
// First sends request
@@ -620,7 +622,7 @@ TEST_F(BucketDBUpdaterTest, distributor_change) {
ASSERT_NO_FATAL_FAILURE(assertCorrectBuckets(numBuckets, "distributor:2 storage:3"));
}
-TEST_F(BucketDBUpdaterTest, distributor_change_with_grouping) {
+TEST_F(LegacyBucketDBUpdaterTest, distributor_change_with_grouping) {
std::string distConfig(getDistConfig6Nodes2Groups());
setDistribution(distConfig);
int numBuckets = 100;
@@ -651,7 +653,7 @@ TEST_F(BucketDBUpdaterTest, distributor_change_with_grouping) {
ASSERT_EQ(messageCount(6), _sender.commands().size());
}
-TEST_F(BucketDBUpdaterTest, normal_usage_initializing) {
+TEST_F(LegacyBucketDBUpdaterTest, normal_usage_initializing) {
setSystemState(lib::ClusterState("distributor:1 .0.s:i storage:1 .0.s:i"));
ASSERT_EQ(_bucketSpaces.size(), _sender.commands().size());
@@ -688,7 +690,7 @@ TEST_F(BucketDBUpdaterTest, normal_usage_initializing) {
ASSERT_NO_FATAL_FAILURE(assertCorrectBuckets(20, "distributor:1 storage:1"));
}
-TEST_F(BucketDBUpdaterTest, failed_request_bucket_info) {
+TEST_F(LegacyBucketDBUpdaterTest, failed_request_bucket_info) {
setSystemState(lib::ClusterState("distributor:1 .0.s:i storage:1"));
// 2 messages sent up: 1 to the nodes, and one reply to the setsystemstate.
@@ -730,7 +732,7 @@ TEST_F(BucketDBUpdaterTest, failed_request_bucket_info) {
EXPECT_EQ(std::string("Set system state"), _senderDown.getCommands());
}
-TEST_F(BucketDBUpdaterTest, down_while_init) {
+TEST_F(LegacyBucketDBUpdaterTest, down_while_init) {
ASSERT_NO_FATAL_FAILURE(setStorageNodes(3));
ASSERT_NO_FATAL_FAILURE(fakeBucketReply(lib::ClusterState("distributor:1 storage:3"),
@@ -746,7 +748,7 @@ TEST_F(BucketDBUpdaterTest, down_while_init) {
}
bool
-BucketDBUpdaterTest::bucketExistsThatHasNode(int bucketCount, uint16_t node) const
+LegacyBucketDBUpdaterTest::bucketExistsThatHasNode(int bucketCount, uint16_t node) const
{
for (int i=1; i<bucketCount; i++) {
if (bucketHasNode(document::BucketId(16, i), node)) {
@@ -758,7 +760,7 @@ BucketDBUpdaterTest::bucketExistsThatHasNode(int bucketCount, uint16_t node) con
}
std::string
-BucketDBUpdaterTest::getNodeList(std::vector<uint16_t> nodes, size_t count)
+LegacyBucketDBUpdaterTest::getNodeList(std::vector<uint16_t> nodes, size_t count)
{
std::ostringstream ost;
bool first = true;
@@ -775,13 +777,13 @@ BucketDBUpdaterTest::getNodeList(std::vector<uint16_t> nodes, size_t count)
}
std::string
-BucketDBUpdaterTest::getNodeList(std::vector<uint16_t> nodes)
+LegacyBucketDBUpdaterTest::getNodeList(std::vector<uint16_t> nodes)
{
return getNodeList(std::move(nodes), _bucketSpaces.size());
}
std::vector<uint16_t>
-BucketDBUpdaterTest::expandNodeVec(const std::vector<uint16_t> &nodes)
+LegacyBucketDBUpdaterTest::expandNodeVec(const std::vector<uint16_t> &nodes)
{
std::vector<uint16_t> res;
size_t count = _bucketSpaces.size();
@@ -793,7 +795,7 @@ BucketDBUpdaterTest::expandNodeVec(const std::vector<uint16_t> &nodes)
return res;
}
-TEST_F(BucketDBUpdaterTest, node_down) {
+TEST_F(LegacyBucketDBUpdaterTest, node_down) {
ASSERT_NO_FATAL_FAILURE(setStorageNodes(3));
enableDistributorClusterState("distributor:1 storage:3");
@@ -808,7 +810,7 @@ TEST_F(BucketDBUpdaterTest, node_down) {
EXPECT_FALSE(bucketExistsThatHasNode(100, 1));
}
-TEST_F(BucketDBUpdaterTest, storage_node_in_maintenance_clears_buckets_for_node) {
+TEST_F(LegacyBucketDBUpdaterTest, storage_node_in_maintenance_clears_buckets_for_node) {
ASSERT_NO_FATAL_FAILURE(setStorageNodes(3));
enableDistributorClusterState("distributor:1 storage:3");
@@ -823,7 +825,7 @@ TEST_F(BucketDBUpdaterTest, storage_node_in_maintenance_clears_buckets_for_node)
EXPECT_FALSE(bucketExistsThatHasNode(100, 1));
}
-TEST_F(BucketDBUpdaterTest, node_down_copies_get_in_sync) {
+TEST_F(LegacyBucketDBUpdaterTest, node_down_copies_get_in_sync) {
ASSERT_NO_FATAL_FAILURE(setStorageNodes(3));
lib::ClusterState systemState("distributor:1 storage:3");
@@ -840,7 +842,7 @@ TEST_F(BucketDBUpdaterTest, node_down_copies_get_in_sync) {
dumpBucket(bid));
}
-TEST_F(BucketDBUpdaterTest, initializing_while_recheck) {
+TEST_F(LegacyBucketDBUpdaterTest, initializing_while_recheck) {
lib::ClusterState systemState("distributor:1 storage:2 .0.s:i .0.i:0.1");
setSystemState(systemState);
@@ -858,7 +860,7 @@ TEST_F(BucketDBUpdaterTest, initializing_while_recheck) {
EXPECT_EQ(MessageType::SETSYSTEMSTATE, _senderDown.command(0)->getType());
}
-TEST_F(BucketDBUpdaterTest, bit_change) {
+TEST_F(LegacyBucketDBUpdaterTest, bit_change) {
std::vector<document::BucketId> bucketlist;
{
@@ -957,7 +959,7 @@ TEST_F(BucketDBUpdaterTest, bit_change) {
}
};
-TEST_F(BucketDBUpdaterTest, recheck_node_with_failure) {
+TEST_F(LegacyBucketDBUpdaterTest, recheck_node_with_failure) {
ASSERT_NO_FATAL_FAILURE(initializeNodesAndBuckets(3, 5));
_sender.clear();
@@ -1000,7 +1002,7 @@ TEST_F(BucketDBUpdaterTest, recheck_node_with_failure) {
EXPECT_EQ(size_t(2), _sender.commands().size());
}
-TEST_F(BucketDBUpdaterTest, recheck_node) {
+TEST_F(LegacyBucketDBUpdaterTest, recheck_node) {
ASSERT_NO_FATAL_FAILURE(initializeNodesAndBuckets(3, 5));
_sender.clear();
@@ -1038,7 +1040,7 @@ TEST_F(BucketDBUpdaterTest, recheck_node) {
EXPECT_EQ(api::BucketInfo(20,10,12, 50, 60, true, true), copy->getBucketInfo());
}
-TEST_F(BucketDBUpdaterTest, notify_bucket_change) {
+TEST_F(LegacyBucketDBUpdaterTest, notify_bucket_change) {
enableDistributorClusterState("distributor:1 storage:1");
addNodesToBucketDB(document::BucketId(16, 1), "0=1234");
@@ -1101,7 +1103,7 @@ TEST_F(BucketDBUpdaterTest, notify_bucket_change) {
dumpBucket(document::BucketId(16, 2)));
}
-TEST_F(BucketDBUpdaterTest, notify_bucket_change_from_node_down) {
+TEST_F(LegacyBucketDBUpdaterTest, notify_bucket_change_from_node_down) {
enableDistributorClusterState("distributor:1 storage:2");
addNodesToBucketDB(document::BucketId(16, 1), "1=1234");
@@ -1155,7 +1157,7 @@ TEST_F(BucketDBUpdaterTest, notify_bucket_change_from_node_down) {
* distributor in the pending state but not by the current state would be
* discarded when attempted inserted into the bucket database.
*/
-TEST_F(BucketDBUpdaterTest, notify_change_with_pending_state_queues_bucket_info_requests) {
+TEST_F(LegacyBucketDBUpdaterTest, notify_change_with_pending_state_queues_bucket_info_requests) {
setSystemState(lib::ClusterState("distributor:1 storage:1"));
ASSERT_EQ(_bucketSpaces.size(), _sender.commands().size());
@@ -1194,7 +1196,7 @@ TEST_F(BucketDBUpdaterTest, notify_change_with_pending_state_queues_bucket_info_
}
}
-TEST_F(BucketDBUpdaterTest, merge_reply) {
+TEST_F(LegacyBucketDBUpdaterTest, merge_reply) {
enableDistributorClusterState("distributor:1 storage:3");
addNodesToBucketDB(document::BucketId(16, 1234),
@@ -1236,7 +1238,7 @@ TEST_F(BucketDBUpdaterTest, merge_reply) {
dumpBucket(document::BucketId(16, 1234)));
};
-TEST_F(BucketDBUpdaterTest, merge_reply_node_down) {
+TEST_F(LegacyBucketDBUpdaterTest, merge_reply_node_down) {
enableDistributorClusterState("distributor:1 storage:3");
std::vector<api::MergeBucketCommand::Node> nodes;
@@ -1278,7 +1280,7 @@ TEST_F(BucketDBUpdaterTest, merge_reply_node_down) {
dumpBucket(document::BucketId(16, 1234)));
};
-TEST_F(BucketDBUpdaterTest, merge_reply_node_down_after_request_sent) {
+TEST_F(LegacyBucketDBUpdaterTest, merge_reply_node_down_after_request_sent) {
enableDistributorClusterState("distributor:1 storage:3");
std::vector<api::MergeBucketCommand::Node> nodes;
@@ -1321,7 +1323,7 @@ TEST_F(BucketDBUpdaterTest, merge_reply_node_down_after_request_sent) {
};
-TEST_F(BucketDBUpdaterTest, flush) {
+TEST_F(LegacyBucketDBUpdaterTest, flush) {
enableDistributorClusterState("distributor:1 storage:3");
_sender.clear();
@@ -1348,7 +1350,7 @@ TEST_F(BucketDBUpdaterTest, flush) {
}
std::string
-BucketDBUpdaterTest::getSentNodes(
+LegacyBucketDBUpdaterTest::getSentNodes(
const std::string& oldClusterState,
const std::string& newClusterState)
{
@@ -1372,7 +1374,7 @@ BucketDBUpdaterTest::getSentNodes(
}
std::string
-BucketDBUpdaterTest::getSentNodesDistributionChanged(
+LegacyBucketDBUpdaterTest::getSentNodesDistributionChanged(
const std::string& oldClusterState)
{
DistributorMessageSenderStub sender;
@@ -1399,7 +1401,7 @@ BucketDBUpdaterTest::getSentNodesDistributionChanged(
return ost.str();
}
-TEST_F(BucketDBUpdaterTest, pending_cluster_state_send_messages) {
+TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_send_messages) {
EXPECT_EQ(getNodeList({0, 1, 2}),
getSentNodes("cluster:d",
"distributor:1 storage:3"));
@@ -1496,7 +1498,7 @@ TEST_F(BucketDBUpdaterTest, pending_cluster_state_send_messages) {
"distributor:3 storage:3 .1.s:m"));
};
-TEST_F(BucketDBUpdaterTest, pending_cluster_state_receive) {
+TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_receive) {
DistributorMessageSenderStub sender;
auto cmd(std::make_shared<api::SetSystemStateCommand>(
@@ -1534,7 +1536,7 @@ TEST_F(BucketDBUpdaterTest, pending_cluster_state_receive) {
EXPECT_EQ(3, (int)pendingTransition.results().size());
}
-TEST_F(BucketDBUpdaterTest, pending_cluster_state_with_group_down) {
+TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_with_group_down) {
std::string config(getDistConfig6Nodes4Groups());
config += "distributor_auto_ownership_transfer_on_whole_group_down true\n";
setDistribution(config);
@@ -1553,7 +1555,7 @@ TEST_F(BucketDBUpdaterTest, pending_cluster_state_with_group_down) {
"distributor:6 .2.s:d storage:6"));
}
-TEST_F(BucketDBUpdaterTest, pending_cluster_state_with_group_down_and_no_handover) {
+TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_with_group_down_and_no_handover) {
std::string config(getDistConfig6Nodes4Groups());
config += "distributor_auto_ownership_transfer_on_whole_group_down false\n";
setDistribution(config);
@@ -1639,7 +1641,7 @@ struct BucketDumper : public BucketDatabase::EntryProcessor
};
std::string
-BucketDBUpdaterTest::mergeBucketLists(
+LegacyBucketDBUpdaterTest::mergeBucketLists(
const lib::ClusterState& oldState,
const std::string& existingData,
const lib::ClusterState& newState,
@@ -1694,7 +1696,7 @@ BucketDBUpdaterTest::mergeBucketLists(
}
std::string
-BucketDBUpdaterTest::mergeBucketLists(const std::string& existingData,
+LegacyBucketDBUpdaterTest::mergeBucketLists(const std::string& existingData,
const std::string& newData,
bool includeBucketInfo)
{
@@ -1706,7 +1708,7 @@ BucketDBUpdaterTest::mergeBucketLists(const std::string& existingData,
includeBucketInfo);
}
-TEST_F(BucketDBUpdaterTest, pending_cluster_state_merge) {
+TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_merge) {
// Simple initializing case - ask all nodes for info
EXPECT_EQ(
// Result is on the form: [bucket w/o count bits]:[node indexes]|..
@@ -1745,7 +1747,7 @@ TEST_F(BucketDBUpdaterTest, pending_cluster_state_merge) {
mergeBucketLists("", "0:5/0/0/0|1:5/2/3/4", true));
}
-TEST_F(BucketDBUpdaterTest, pending_cluster_state_merge_replica_changed) {
+TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_merge_replica_changed) {
// Node went from initializing to up and non-invalid bucket changed.
EXPECT_EQ(
std::string("2:0/2/3/4/t|3:0/2/4/6/t|"),
@@ -1757,7 +1759,7 @@ TEST_F(BucketDBUpdaterTest, pending_cluster_state_merge_replica_changed) {
true));
}
-TEST_F(BucketDBUpdaterTest, no_db_resurrection_for_bucket_not_owned_in_current_state) {
+TEST_F(LegacyBucketDBUpdaterTest, no_db_resurrection_for_bucket_not_owned_in_current_state) {
document::BucketId bucket(16, 3);
lib::ClusterState stateBefore("distributor:1 storage:1");
{
@@ -1786,7 +1788,7 @@ TEST_F(BucketDBUpdaterTest, no_db_resurrection_for_bucket_not_owned_in_current_s
EXPECT_EQ(std::string("NONEXISTING"), dumpBucket(bucket));
}
-TEST_F(BucketDBUpdaterTest, no_db_resurrection_for_bucket_not_owned_in_pending_state) {
+TEST_F(LegacyBucketDBUpdaterTest, no_db_resurrection_for_bucket_not_owned_in_pending_state) {
document::BucketId bucket(16, 3);
lib::ClusterState stateBefore("distributor:1 storage:1");
{
@@ -1820,7 +1822,7 @@ TEST_F(BucketDBUpdaterTest, no_db_resurrection_for_bucket_not_owned_in_pending_s
* will with a high likelihood end up not getting the complete view of the buckets in
* the cluster.
*/
-TEST_F(BucketDBUpdaterTest, cluster_state_always_sends_full_fetch_when_distribution_change_pending) {
+TEST_F(LegacyBucketDBUpdaterTest, cluster_state_always_sends_full_fetch_when_distribution_change_pending) {
lib::ClusterState stateBefore("distributor:6 storage:6");
{
uint32_t expectedMsgs = messageCount(6), dummyBucketsToReturn = 1;
@@ -1862,7 +1864,7 @@ TEST_F(BucketDBUpdaterTest, cluster_state_always_sends_full_fetch_when_distribut
EXPECT_EQ(size_t(0), _sender.commands().size());
}
-TEST_F(BucketDBUpdaterTest, changed_distribution_config_triggers_recovery_mode) {
+TEST_F(LegacyBucketDBUpdaterTest, changed_distribution_config_triggers_recovery_mode) {
ASSERT_NO_FATAL_FAILURE(setAndEnableClusterState(lib::ClusterState("distributor:6 storage:6"), messageCount(6), 20));
_sender.clear();
EXPECT_TRUE(distributor_is_in_recovery_mode());
@@ -1911,7 +1913,7 @@ std::unique_ptr<BucketDatabase::EntryProcessor> func_processor(Func&& f) {
}
-TEST_F(BucketDBUpdaterTest, changed_distribution_config_does_not_elide_bucket_db_pruning) {
+TEST_F(LegacyBucketDBUpdaterTest, changed_distribution_config_does_not_elide_bucket_db_pruning) {
setDistribution(getDistConfig3Nodes1Group());
constexpr uint32_t n_buckets = 100;
@@ -1930,7 +1932,7 @@ TEST_F(BucketDBUpdaterTest, changed_distribution_config_does_not_elide_bucket_db
}));
}
-TEST_F(BucketDBUpdaterTest, newly_added_buckets_have_current_time_as_gc_timestamp) {
+TEST_F(LegacyBucketDBUpdaterTest, newly_added_buckets_have_current_time_as_gc_timestamp) {
getClock().setAbsoluteTimeInSeconds(101234);
lib::ClusterState stateBefore("distributor:1 storage:1");
{
@@ -1945,7 +1947,7 @@ TEST_F(BucketDBUpdaterTest, newly_added_buckets_have_current_time_as_gc_timestam
EXPECT_EQ(uint32_t(101234), e->getLastGarbageCollectionTime());
}
-TEST_F(BucketDBUpdaterTest, newer_mutations_not_overwritten_by_earlier_bucket_fetch) {
+TEST_F(LegacyBucketDBUpdaterTest, newer_mutations_not_overwritten_by_earlier_bucket_fetch) {
{
lib::ClusterState stateBefore("distributor:1 storage:1 .0.s:i");
uint32_t expectedMsgs = _bucketSpaces.size(), dummyBucketsToReturn = 0;
@@ -1992,7 +1994,7 @@ TEST_F(BucketDBUpdaterTest, newer_mutations_not_overwritten_by_earlier_bucket_fe
}
std::vector<uint16_t>
-BucketDBUpdaterTest::getSendSet() const
+LegacyBucketDBUpdaterTest::getSendSet() const
{
std::vector<uint16_t> nodes;
std::transform(_sender.commands().begin(),
@@ -2007,7 +2009,7 @@ BucketDBUpdaterTest::getSendSet() const
}
std::vector<uint16_t>
-BucketDBUpdaterTest::getSentNodesWithPreemption(
+LegacyBucketDBUpdaterTest::getSentNodesWithPreemption(
const std::string& oldClusterState,
uint32_t expectedOldStateMessages,
const std::string& preemptedClusterState,
@@ -2040,7 +2042,7 @@ using nodeVec = std::vector<uint16_t>;
* database modifications caused by intermediate states will not be
* accounted for (basically the ABA problem in a distributed setting).
*/
-TEST_F(BucketDBUpdaterTest, preempted_distributor_change_carries_node_set_over_to_next_state_fetch) {
+TEST_F(LegacyBucketDBUpdaterTest, preempted_distributor_change_carries_node_set_over_to_next_state_fetch) {
EXPECT_EQ(
expandNodeVec({0, 1, 2, 3, 4, 5}),
getSentNodesWithPreemption("version:1 distributor:6 storage:6",
@@ -2049,7 +2051,7 @@ TEST_F(BucketDBUpdaterTest, preempted_distributor_change_carries_node_set_over_t
"version:3 distributor:6 storage:6"));
}
-TEST_F(BucketDBUpdaterTest, preempted_storage_change_carries_node_set_over_to_next_state_fetch) {
+TEST_F(LegacyBucketDBUpdaterTest, preempted_storage_change_carries_node_set_over_to_next_state_fetch) {
EXPECT_EQ(
expandNodeVec({2, 3}),
getSentNodesWithPreemption(
@@ -2059,7 +2061,7 @@ TEST_F(BucketDBUpdaterTest, preempted_storage_change_carries_node_set_over_to_ne
"version:3 distributor:6 storage:6"));
}
-TEST_F(BucketDBUpdaterTest, preempted_storage_node_down_must_be_re_fetched) {
+TEST_F(LegacyBucketDBUpdaterTest, preempted_storage_node_down_must_be_re_fetched) {
EXPECT_EQ(
expandNodeVec({2}),
getSentNodesWithPreemption(
@@ -2069,7 +2071,7 @@ TEST_F(BucketDBUpdaterTest, preempted_storage_node_down_must_be_re_fetched) {
"version:3 distributor:6 storage:6"));
}
-TEST_F(BucketDBUpdaterTest, do_not_send_to_preempted_node_now_in_down_state) {
+TEST_F(LegacyBucketDBUpdaterTest, do_not_send_to_preempted_node_now_in_down_state) {
EXPECT_EQ(
nodeVec{},
getSentNodesWithPreemption(
@@ -2079,7 +2081,7 @@ TEST_F(BucketDBUpdaterTest, do_not_send_to_preempted_node_now_in_down_state) {
"version:3 distributor:6 storage:6 .2.s:d")); // 2 down again.
}
-TEST_F(BucketDBUpdaterTest, doNotSendToPreemptedNodeNotPartOfNewState) {
+TEST_F(LegacyBucketDBUpdaterTest, doNotSendToPreemptedNodeNotPartOfNewState) {
// Even though 100 nodes are preempted, not all of these should be part
// of the request afterwards when only 6 are part of the state.
EXPECT_EQ(
@@ -2091,7 +2093,7 @@ TEST_F(BucketDBUpdaterTest, doNotSendToPreemptedNodeNotPartOfNewState) {
"version:3 distributor:6 storage:6"));
}
-TEST_F(BucketDBUpdaterTest, outdated_node_set_cleared_after_successful_state_completion) {
+TEST_F(LegacyBucketDBUpdaterTest, outdated_node_set_cleared_after_successful_state_completion) {
lib::ClusterState stateBefore(
"version:1 distributor:6 storage:6 .1.t:1234");
uint32_t expectedMsgs = messageCount(6), dummyBucketsToReturn = 10;
@@ -2111,7 +2113,7 @@ TEST_F(BucketDBUpdaterTest, outdated_node_set_cleared_after_successful_state_com
// distribution config will follow very shortly after the config has been
// applied to the node. The new cluster state will then send out requests to
// the correct node set.
-TEST_F(BucketDBUpdaterTest, DISABLED_cluster_config_downsize_only_sends_to_available_nodes) {
+TEST_F(LegacyBucketDBUpdaterTest, DISABLED_cluster_config_downsize_only_sends_to_available_nodes) {
uint32_t expectedMsgs = 6, dummyBucketsToReturn = 20;
ASSERT_NO_FATAL_FAILURE(setAndEnableClusterState(lib::ClusterState("distributor:6 storage:6"),
expectedMsgs, dummyBucketsToReturn));
@@ -2134,7 +2136,7 @@ TEST_F(BucketDBUpdaterTest, DISABLED_cluster_config_downsize_only_sends_to_avail
*
* See VESPA-790 for details.
*/
-TEST_F(BucketDBUpdaterTest, node_missing_from_config_is_treated_as_needing_ownership_transfer) {
+TEST_F(LegacyBucketDBUpdaterTest, node_missing_from_config_is_treated_as_needing_ownership_transfer) {
uint32_t expectedMsgs = messageCount(3), dummyBucketsToReturn = 1;
ASSERT_NO_FATAL_FAILURE(setAndEnableClusterState(lib::ClusterState("distributor:3 storage:3"),
expectedMsgs, dummyBucketsToReturn));
@@ -2170,7 +2172,7 @@ TEST_F(BucketDBUpdaterTest, node_missing_from_config_is_treated_as_needing_owner
EXPECT_EQ(expandNodeVec({0, 1}), getSendSet());
}
-TEST_F(BucketDBUpdaterTest, changed_distributor_set_implies_ownership_transfer) {
+TEST_F(LegacyBucketDBUpdaterTest, changed_distributor_set_implies_ownership_transfer) {
auto fixture = createPendingStateFixtureForStateChange(
"distributor:2 storage:2", "distributor:1 storage:2");
EXPECT_TRUE(fixture->state->hasBucketOwnershipTransfer());
@@ -2180,7 +2182,7 @@ TEST_F(BucketDBUpdaterTest, changed_distributor_set_implies_ownership_transfer)
EXPECT_TRUE(fixture->state->hasBucketOwnershipTransfer());
}
-TEST_F(BucketDBUpdaterTest, unchanged_distributor_set_implies_no_ownership_transfer) {
+TEST_F(LegacyBucketDBUpdaterTest, unchanged_distributor_set_implies_no_ownership_transfer) {
auto fixture = createPendingStateFixtureForStateChange(
"distributor:2 storage:2", "distributor:2 storage:1");
EXPECT_FALSE(fixture->state->hasBucketOwnershipTransfer());
@@ -2190,26 +2192,26 @@ TEST_F(BucketDBUpdaterTest, unchanged_distributor_set_implies_no_ownership_trans
EXPECT_FALSE(fixture->state->hasBucketOwnershipTransfer());
}
-TEST_F(BucketDBUpdaterTest, changed_distribution_config_implies_ownership_transfer) {
+TEST_F(LegacyBucketDBUpdaterTest, changed_distribution_config_implies_ownership_transfer) {
auto fixture = createPendingStateFixtureForDistributionChange(
"distributor:2 storage:2");
EXPECT_TRUE(fixture->state->hasBucketOwnershipTransfer());
}
-TEST_F(BucketDBUpdaterTest, transition_time_tracked_for_single_state_change) {
+TEST_F(LegacyBucketDBUpdaterTest, transition_time_tracked_for_single_state_change) {
ASSERT_NO_FATAL_FAILURE(completeStateTransitionInSeconds("distributor:2 storage:2", 5, messageCount(2)));
EXPECT_EQ(uint64_t(5000), lastTransitionTimeInMillis());
}
-TEST_F(BucketDBUpdaterTest, transition_time_reset_across_non_preempting_state_changes) {
+TEST_F(LegacyBucketDBUpdaterTest, transition_time_reset_across_non_preempting_state_changes) {
ASSERT_NO_FATAL_FAILURE(completeStateTransitionInSeconds("distributor:2 storage:2", 5, messageCount(2)));
ASSERT_NO_FATAL_FAILURE(completeStateTransitionInSeconds("distributor:2 storage:3", 3, messageCount(1)));
EXPECT_EQ(uint64_t(3000), lastTransitionTimeInMillis());
}
-TEST_F(BucketDBUpdaterTest, transition_time_tracked_for_distribution_config_change) {
+TEST_F(LegacyBucketDBUpdaterTest, transition_time_tracked_for_distribution_config_change) {
lib::ClusterState state("distributor:2 storage:2");
ASSERT_NO_FATAL_FAILURE(setAndEnableClusterState(state, messageCount(2), 1));
@@ -2221,7 +2223,7 @@ TEST_F(BucketDBUpdaterTest, transition_time_tracked_for_distribution_config_chan
EXPECT_EQ(uint64_t(4000), lastTransitionTimeInMillis());
}
-TEST_F(BucketDBUpdaterTest, transition_time_tracked_across_preempted_transitions) {
+TEST_F(LegacyBucketDBUpdaterTest, transition_time_tracked_across_preempted_transitions) {
_sender.clear();
lib::ClusterState state("distributor:2 storage:2");
setSystemState(state);
@@ -2245,7 +2247,7 @@ TEST_F(BucketDBUpdaterTest, transition_time_tracked_across_preempted_transitions
* Yes, the order of node<->bucket id is reversed between the two, perhaps to make sure you're awake.
*/
-TEST_F(BucketDBUpdaterTest, batch_update_of_existing_diverging_replicas_does_not_mark_any_as_trusted) {
+TEST_F(LegacyBucketDBUpdaterTest, batch_update_of_existing_diverging_replicas_does_not_mark_any_as_trusted) {
// Replacing bucket information for content node 0 should not mark existing
// untrusted replica as trusted as a side effect.
EXPECT_EQ(
@@ -2257,32 +2259,32 @@ TEST_F(BucketDBUpdaterTest, batch_update_of_existing_diverging_replicas_does_not
"0:5/1/2/3|1:5/7/8/9", true));
}
-TEST_F(BucketDBUpdaterTest, batch_add_of_new_diverging_replicas_does_not_mark_any_as_trusted) {
+TEST_F(LegacyBucketDBUpdaterTest, batch_add_of_new_diverging_replicas_does_not_mark_any_as_trusted) {
EXPECT_EQ(std::string("5:1/7/8/9/u,0/1/2/3/u|"),
mergeBucketLists("", "0:5/1/2/3|1:5/7/8/9", true));
}
-TEST_F(BucketDBUpdaterTest, batch_add_with_single_resulting_replica_implicitly_marks_as_trusted) {
+TEST_F(LegacyBucketDBUpdaterTest, batch_add_with_single_resulting_replica_implicitly_marks_as_trusted) {
EXPECT_EQ(std::string("5:0/1/2/3/t|"),
mergeBucketLists("", "0:5/1/2/3", true));
}
-TEST_F(BucketDBUpdaterTest, identity_update_of_single_replica_does_not_clear_trusted) {
+TEST_F(LegacyBucketDBUpdaterTest, identity_update_of_single_replica_does_not_clear_trusted) {
EXPECT_EQ(std::string("5:0/1/2/3/t|"),
mergeBucketLists("0:5/1/2/3", "0:5/1/2/3", true));
}
-TEST_F(BucketDBUpdaterTest, identity_update_of_diverging_untrusted_replicas_does_not_mark_any_as_trusted) {
+TEST_F(LegacyBucketDBUpdaterTest, identity_update_of_diverging_untrusted_replicas_does_not_mark_any_as_trusted) {
EXPECT_EQ(std::string("5:1/7/8/9/u,0/1/2/3/u|"),
mergeBucketLists("0:5/1/2/3|1:5/7/8/9", "0:5/1/2/3|1:5/7/8/9", true));
}
-TEST_F(BucketDBUpdaterTest, adding_diverging_replica_to_existing_trusted_does_not_remove_trusted) {
+TEST_F(LegacyBucketDBUpdaterTest, adding_diverging_replica_to_existing_trusted_does_not_remove_trusted) {
EXPECT_EQ(std::string("5:1/2/3/4/u,0/1/2/3/t|"),
mergeBucketLists("0:5/1/2/3", "0:5/1/2/3|1:5/2/3/4", true));
}
-TEST_F(BucketDBUpdaterTest, batch_update_from_distributor_change_does_not_mark_diverging_replicas_as_trusted) {
+TEST_F(LegacyBucketDBUpdaterTest, batch_update_from_distributor_change_does_not_mark_diverging_replicas_as_trusted) {
// This differs from batch_update_of_existing_diverging_replicas_does_not_mark_any_as_trusted
// in that _all_ content nodes are considered outdated when distributor changes take place,
// and therefore a slightly different code path is taken. In particular, bucket info for
@@ -2298,7 +2300,7 @@ TEST_F(BucketDBUpdaterTest, batch_update_from_distributor_change_does_not_mark_d
}
// TODO remove on Vespa 8 - this is a workaround for https://github.com/vespa-engine/vespa/issues/8475
-TEST_F(BucketDBUpdaterTest, global_distribution_hash_falls_back_to_legacy_format_upon_request_rejection) {
+TEST_F(LegacyBucketDBUpdaterTest, global_distribution_hash_falls_back_to_legacy_format_upon_request_rejection) {
std::string distConfig(getDistConfig6Nodes2Groups());
setDistribution(distConfig);
@@ -2366,7 +2368,7 @@ void for_each_bucket(const DistributorBucketSpaceRepo& repo, Func&& f) {
}
-TEST_F(BucketDBUpdaterTest, non_owned_buckets_moved_to_read_only_db_on_ownership_change) {
+TEST_F(LegacyBucketDBUpdaterTest, non_owned_buckets_moved_to_read_only_db_on_ownership_change) {
getBucketDBUpdater().set_stale_reads_enabled(true);
lib::ClusterState initial_state("distributor:1 storage:4"); // All buckets owned by us by definition
@@ -2407,7 +2409,7 @@ TEST_F(BucketDBUpdaterTest, non_owned_buckets_moved_to_read_only_db_on_ownership
});
}
-TEST_F(BucketDBUpdaterTest, buckets_no_longer_available_are_not_moved_to_read_only_database) {
+TEST_F(LegacyBucketDBUpdaterTest, buckets_no_longer_available_are_not_moved_to_read_only_database) {
constexpr uint32_t n_buckets = 10;
// No ownership change, just node down. Test redundancy is 2, so removing 2 nodes will
// cause some buckets to be entirely unavailable.
@@ -2418,7 +2420,7 @@ TEST_F(BucketDBUpdaterTest, buckets_no_longer_available_are_not_moved_to_read_on
EXPECT_EQ(size_t(0), read_only_global_db().size());
}
-TEST_F(BucketDBUpdaterTest, non_owned_buckets_purged_when_read_only_support_is_config_disabled) {
+TEST_F(LegacyBucketDBUpdaterTest, non_owned_buckets_purged_when_read_only_support_is_config_disabled) {
getBucketDBUpdater().set_stale_reads_enabled(false);
lib::ClusterState initial_state("distributor:1 storage:4"); // All buckets owned by us by definition
@@ -2440,7 +2442,7 @@ TEST_F(BucketDBUpdaterTest, non_owned_buckets_purged_when_read_only_support_is_c
EXPECT_EQ(size_t(0), read_only_global_db().size());
}
-void BucketDBUpdaterTest::trigger_completed_but_not_yet_activated_transition(
+void LegacyBucketDBUpdaterTest::trigger_completed_but_not_yet_activated_transition(
vespalib::stringref initial_state_str,
uint32_t initial_buckets,
uint32_t initial_expected_msgs,
@@ -2463,7 +2465,7 @@ void BucketDBUpdaterTest::trigger_completed_but_not_yet_activated_transition(
_sender.clear();
}
-TEST_F(BucketDBUpdaterTest, deferred_activated_state_does_not_enable_state_until_activation_received) {
+TEST_F(LegacyBucketDBUpdaterTest, deferred_activated_state_does_not_enable_state_until_activation_received) {
getBucketDBUpdater().set_stale_reads_enabled(true);
constexpr uint32_t n_buckets = 10;
ASSERT_NO_FATAL_FAILURE(
@@ -2483,7 +2485,7 @@ TEST_F(BucketDBUpdaterTest, deferred_activated_state_does_not_enable_state_until
EXPECT_EQ(uint64_t(n_buckets), mutable_global_db().size());
}
-TEST_F(BucketDBUpdaterTest, read_only_db_cleared_once_pending_state_is_activated) {
+TEST_F(LegacyBucketDBUpdaterTest, read_only_db_cleared_once_pending_state_is_activated) {
getBucketDBUpdater().set_stale_reads_enabled(true);
constexpr uint32_t n_buckets = 10;
ASSERT_NO_FATAL_FAILURE(
@@ -2495,7 +2497,7 @@ TEST_F(BucketDBUpdaterTest, read_only_db_cleared_once_pending_state_is_activated
EXPECT_EQ(uint64_t(0), read_only_global_db().size());
}
-TEST_F(BucketDBUpdaterTest, read_only_db_is_populated_even_when_self_is_marked_down) {
+TEST_F(LegacyBucketDBUpdaterTest, read_only_db_is_populated_even_when_self_is_marked_down) {
getBucketDBUpdater().set_stale_reads_enabled(true);
constexpr uint32_t n_buckets = 10;
ASSERT_NO_FATAL_FAILURE(
@@ -2509,7 +2511,7 @@ TEST_F(BucketDBUpdaterTest, read_only_db_is_populated_even_when_self_is_marked_d
EXPECT_EQ(uint64_t(n_buckets), read_only_global_db().size());
}
-TEST_F(BucketDBUpdaterTest, activate_cluster_state_request_with_mismatching_version_returns_actual_version) {
+TEST_F(LegacyBucketDBUpdaterTest, activate_cluster_state_request_with_mismatching_version_returns_actual_version) {
getBucketDBUpdater().set_stale_reads_enabled(true);
constexpr uint32_t n_buckets = 10;
ASSERT_NO_FATAL_FAILURE(
@@ -2523,7 +2525,7 @@ TEST_F(BucketDBUpdaterTest, activate_cluster_state_request_with_mismatching_vers
ASSERT_NO_FATAL_FAILURE(assert_has_activate_cluster_state_reply_with_actual_version(5));
}
-TEST_F(BucketDBUpdaterTest, activate_cluster_state_request_without_pending_transition_passes_message_through) {
+TEST_F(LegacyBucketDBUpdaterTest, activate_cluster_state_request_without_pending_transition_passes_message_through) {
getBucketDBUpdater().set_stale_reads_enabled(true);
constexpr uint32_t n_buckets = 10;
ASSERT_NO_FATAL_FAILURE(
@@ -2539,7 +2541,7 @@ TEST_F(BucketDBUpdaterTest, activate_cluster_state_request_without_pending_trans
EXPECT_EQ(size_t(0), _sender.replies().size());
}
-TEST_F(BucketDBUpdaterTest, DISABLED_benchmark_bulk_loading_into_empty_db) {
+TEST_F(LegacyBucketDBUpdaterTest, DISABLED_benchmark_bulk_loading_into_empty_db) {
// Need to trigger an initial edge to complete first bucket scan
ASSERT_NO_FATAL_FAILURE(setAndEnableClusterState(lib::ClusterState("distributor:2 storage:1"),
messageCount(1), 0));
@@ -2586,7 +2588,7 @@ TEST_F(BucketDBUpdaterTest, DISABLED_benchmark_bulk_loading_into_empty_db) {
EXPECT_EQ(size_t(0), mutable_global_db().size());
}
-uint32_t BucketDBUpdaterTest::populate_bucket_db_via_request_bucket_info_for_benchmarking() {
+uint32_t LegacyBucketDBUpdaterTest::populate_bucket_db_via_request_bucket_info_for_benchmarking() {
// Need to trigger an initial edge to complete first bucket scan
setAndEnableClusterState(lib::ClusterState("distributor:2 storage:1"), messageCount(1), 0);
_sender.clear();
@@ -2622,7 +2624,7 @@ uint32_t BucketDBUpdaterTest::populate_bucket_db_via_request_bucket_info_for_ben
return n_buckets;
}
-TEST_F(BucketDBUpdaterTest, DISABLED_benchmark_removing_buckets_for_unavailable_storage_nodes) {
+TEST_F(LegacyBucketDBUpdaterTest, DISABLED_benchmark_removing_buckets_for_unavailable_storage_nodes) {
const uint32_t n_buckets = populate_bucket_db_via_request_bucket_info_for_benchmarking();
lib::ClusterState no_op_state("distributor:1 storage:1 .0.s:m"); // Removing all buckets via ownership
@@ -2633,7 +2635,7 @@ TEST_F(BucketDBUpdaterTest, DISABLED_benchmark_removing_buckets_for_unavailable_
fprintf(stderr, "Took %g seconds to scan and remove %u buckets\n", timer.min_time(), n_buckets);
}
-TEST_F(BucketDBUpdaterTest, DISABLED_benchmark_no_buckets_removed_during_node_remover_db_pass) {
+TEST_F(LegacyBucketDBUpdaterTest, DISABLED_benchmark_no_buckets_removed_during_node_remover_db_pass) {
const uint32_t n_buckets = populate_bucket_db_via_request_bucket_info_for_benchmarking();
// TODO this benchmark is void if we further restrict the pruning elision logic to allow
@@ -2646,7 +2648,7 @@ TEST_F(BucketDBUpdaterTest, DISABLED_benchmark_no_buckets_removed_during_node_re
fprintf(stderr, "Took %g seconds to scan %u buckets with no-op action\n", timer.min_time(), n_buckets);
}
-TEST_F(BucketDBUpdaterTest, DISABLED_benchmark_all_buckets_removed_during_node_remover_db_pass) {
+TEST_F(LegacyBucketDBUpdaterTest, DISABLED_benchmark_all_buckets_removed_during_node_remover_db_pass) {
const uint32_t n_buckets = populate_bucket_db_via_request_bucket_info_for_benchmarking();
lib::ClusterState no_op_state("distributor:1 storage:1 .0.s:m"); // Removing all buckets via all replicas gone
@@ -2657,7 +2659,7 @@ TEST_F(BucketDBUpdaterTest, DISABLED_benchmark_all_buckets_removed_during_node_r
fprintf(stderr, "Took %g seconds to scan and remove %u buckets\n", timer.min_time(), n_buckets);
}
-TEST_F(BucketDBUpdaterTest, pending_cluster_state_getter_is_non_null_only_when_state_is_pending) {
+TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_getter_is_non_null_only_when_state_is_pending) {
auto initial_baseline = std::make_shared<lib::ClusterState>("distributor:1 storage:2 .0.s:d");
auto initial_default = std::make_shared<lib::ClusterState>("distributor:1 storage:2 .0.s:m");
@@ -2682,7 +2684,7 @@ TEST_F(BucketDBUpdaterTest, pending_cluster_state_getter_is_non_null_only_when_s
EXPECT_TRUE(state == nullptr);
}
-struct BucketDBUpdaterSnapshotTest : BucketDBUpdaterTest {
+struct BucketDBUpdaterSnapshotTest : LegacyBucketDBUpdaterTest {
lib::ClusterState empty_state;
std::shared_ptr<lib::ClusterState> initial_baseline;
std::shared_ptr<lib::ClusterState> initial_default;
@@ -2691,7 +2693,7 @@ struct BucketDBUpdaterSnapshotTest : BucketDBUpdaterTest {
Bucket global_bucket;
BucketDBUpdaterSnapshotTest()
- : BucketDBUpdaterTest(),
+ : LegacyBucketDBUpdaterTest(),
empty_state(),
initial_baseline(std::make_shared<lib::ClusterState>("distributor:1 storage:2 .0.s:d")),
initial_default(std::make_shared<lib::ClusterState>("distributor:1 storage:2 .0.s:m")),
@@ -2704,7 +2706,7 @@ struct BucketDBUpdaterSnapshotTest : BucketDBUpdaterTest {
~BucketDBUpdaterSnapshotTest() override;
void SetUp() override {
- BucketDBUpdaterTest::SetUp();
+ LegacyBucketDBUpdaterTest::SetUp();
getBucketDBUpdater().set_stale_reads_enabled(true);
};
diff --git a/storage/src/tests/distributor/distributortest.cpp b/storage/src/tests/distributor/legacy_distributor_test.cpp
index 3d1c6165946..3123b7fc91c 100644
--- a/storage/src/tests/distributor/distributortest.cpp
+++ b/storage/src/tests/distributor/legacy_distributor_test.cpp
@@ -33,9 +33,11 @@ using namespace ::testing;
namespace storage::distributor {
-struct DistributorTest : Test, DistributorTestUtil {
- DistributorTest();
- ~DistributorTest() override;
+// TODO STRIPE: Add variant of this test for the new stripe mode.
+// TODO STRIPE: Remove this test when legacy mode is gone.
+struct LegacyDistributorTest : Test, DistributorTestUtil {
+ LegacyDistributorTest();
+ ~LegacyDistributorTest() override;
// TODO handle edge case for window between getnodestate reply already
// sent and new request not yet received
@@ -222,7 +224,6 @@ struct DistributorTest : Test, DistributorTestUtil {
}
void configureMaxClusterClockSkew(int seconds);
- void sendDownClusterStateCommand();
void replyToSingleRequestBucketInfoCommandWith1Bucket();
void sendDownDummyRemoveCommand();
void assertSingleBouncedRemoveReplyPresent();
@@ -234,17 +235,17 @@ struct DistributorTest : Test, DistributorTestUtil {
void set_up_and_start_get_op_with_stale_reads_enabled(bool enabled);
};
-DistributorTest::DistributorTest()
+LegacyDistributorTest::LegacyDistributorTest()
: Test(),
DistributorTestUtil(),
_bucketSpaces()
{
}
-DistributorTest::~DistributorTest() = default;
+LegacyDistributorTest::~LegacyDistributorTest() = default;
// TODO -> stripe test
-TEST_F(DistributorTest, operation_generation) {
+TEST_F(LegacyDistributorTest, operation_generation) {
setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1");
document::BucketId bid;
@@ -263,7 +264,7 @@ TEST_F(DistributorTest, operation_generation) {
}
// TODO -> stripe test
-TEST_F(DistributorTest, operations_generated_and_started_without_duplicates) {
+TEST_F(LegacyDistributorTest, operations_generated_and_started_without_duplicates) {
setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1");
for (uint32_t i = 0; i < 6; ++i) {
@@ -279,7 +280,7 @@ TEST_F(DistributorTest, operations_generated_and_started_without_duplicates) {
// TODO -> stripe test
// TODO also need to impl/test cross-stripe cluster state changes
-TEST_F(DistributorTest, recovery_mode_on_cluster_state_change) {
+TEST_F(LegacyDistributorTest, recovery_mode_on_cluster_state_change) {
setupDistributor(Redundancy(1), NodeCount(2),
"storage:1 .0.s:d distributor:1");
enableDistributorClusterState("storage:1 distributor:1");
@@ -301,7 +302,7 @@ TEST_F(DistributorTest, recovery_mode_on_cluster_state_change) {
// TODO -> stripe test
// TODO how to throttle across stripes?
-TEST_F(DistributorTest, operations_are_throttled) {
+TEST_F(LegacyDistributorTest, operations_are_throttled) {
setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1");
getConfig().setMinPendingMaintenanceOps(1);
getConfig().setMaxPendingMaintenanceOps(1);
@@ -314,7 +315,7 @@ TEST_F(DistributorTest, operations_are_throttled) {
}
// TODO -> stripe test
-TEST_F(DistributorTest, handle_unknown_maintenance_reply) {
+TEST_F(LegacyDistributorTest, handle_unknown_maintenance_reply) {
setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1");
{
@@ -334,7 +335,7 @@ TEST_F(DistributorTest, handle_unknown_maintenance_reply) {
}
// TODO -> generic, non distr/stripe test
-TEST_F(DistributorTest, contains_time_statement) {
+TEST_F(LegacyDistributorTest, contains_time_statement) {
setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1");
EXPECT_FALSE(getConfig().containsTimeStatement(""));
@@ -346,7 +347,7 @@ TEST_F(DistributorTest, contains_time_statement) {
}
// TODO -> stripe test
-TEST_F(DistributorTest, update_bucket_database) {
+TEST_F(LegacyDistributorTest, update_bucket_database) {
enableDistributorClusterState("distributor:1 storage:3");
EXPECT_EQ("BucketId(0x4000000000000001) : "
@@ -417,7 +418,7 @@ public:
// TODO -> stripe test
// TODO need to impl/test cross-stripe status requests
-TEST_F(DistributorTest, tick_processes_status_requests) {
+TEST_F(LegacyDistributorTest, tick_processes_status_requests) {
setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1");
addNodesToBucketDB(document::BucketId(16, 1), "0=1/1/1/t");
@@ -447,7 +448,7 @@ TEST_F(DistributorTest, tick_processes_status_requests) {
// TODO -> distributor test since it owns metric hook
// TODO need to impl/test cross-stripe metrics aggregation
-TEST_F(DistributorTest, metric_update_hook_updates_pending_maintenance_metrics) {
+TEST_F(LegacyDistributorTest, metric_update_hook_updates_pending_maintenance_metrics) {
setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
// To ensure we count all operations, not just those fitting within the
// pending window.
@@ -494,7 +495,7 @@ TEST_F(DistributorTest, metric_update_hook_updates_pending_maintenance_metrics)
}
// TODO -> stripe test
-TEST_F(DistributorTest, bucket_db_memory_usage_metrics_only_updated_at_fixed_time_intervals) {
+TEST_F(LegacyDistributorTest, bucket_db_memory_usage_metrics_only_updated_at_fixed_time_intervals) {
getClock().setAbsoluteTimeInSeconds(1000);
setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
@@ -533,7 +534,7 @@ TEST_F(DistributorTest, bucket_db_memory_usage_metrics_only_updated_at_fixed_tim
// TODO -> stripe test
// TODO need to impl/test cross-stripe config propagation
-TEST_F(DistributorTest, priority_config_is_propagated_to_distributor_configuration) {
+TEST_F(LegacyDistributorTest, priority_config_is_propagated_to_distributor_configuration) {
using namespace vespa::config::content::core;
setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
@@ -570,7 +571,7 @@ TEST_F(DistributorTest, priority_config_is_propagated_to_distributor_configurati
}
// TODO -> stripe test
-TEST_F(DistributorTest, no_db_resurrection_for_bucket_not_owned_in_pending_state) {
+TEST_F(LegacyDistributorTest, no_db_resurrection_for_bucket_not_owned_in_pending_state) {
setupDistributor(Redundancy(1), NodeCount(10), "storage:2 distributor:2");
lib::ClusterState newState("storage:10 distributor:10");
auto stateCmd = std::make_shared<api::SetSystemStateCommand>(newState);
@@ -592,7 +593,7 @@ TEST_F(DistributorTest, no_db_resurrection_for_bucket_not_owned_in_pending_state
}
// TODO -> stripe test
-TEST_F(DistributorTest, added_db_buckets_without_gc_timestamp_implicitly_get_current_time) {
+TEST_F(LegacyDistributorTest, added_db_buckets_without_gc_timestamp_implicitly_get_current_time) {
setupDistributor(Redundancy(1), NodeCount(10), "storage:2 distributor:2");
getClock().setAbsoluteTimeInSeconds(101234);
document::BucketId bucket(16, 7654);
@@ -606,7 +607,7 @@ TEST_F(DistributorTest, added_db_buckets_without_gc_timestamp_implicitly_get_cur
}
// TODO -> stripe test
-TEST_F(DistributorTest, merge_stats_are_accumulated_during_database_iteration) {
+TEST_F(LegacyDistributorTest, merge_stats_are_accumulated_during_database_iteration) {
setupDistributor(Redundancy(2), NodeCount(3), "storage:3 distributor:1");
// Copies out of sync. Not possible for distributor to _reliably_ tell
// which direction(s) data will flow, so for simplicity assume that we
@@ -657,9 +658,9 @@ TEST_F(DistributorTest, merge_stats_are_accumulated_during_database_iteration) {
}
void
-DistributorTest::assertBucketSpaceStats(size_t expBucketPending, size_t expBucketTotal, uint16_t node,
- const vespalib::string& bucketSpace,
- const BucketSpacesStatsProvider::PerNodeBucketSpacesStats& stats)
+LegacyDistributorTest::assertBucketSpaceStats(size_t expBucketPending, size_t expBucketTotal, uint16_t node,
+ const vespalib::string& bucketSpace,
+ const BucketSpacesStatsProvider::PerNodeBucketSpacesStats& stats)
{
auto nodeItr = stats.find(node);
ASSERT_TRUE(nodeItr != stats.end());
@@ -678,7 +679,7 @@ DistributorTest::assertBucketSpaceStats(size_t expBucketPending, size_t expBucke
* operations for the bucket.
*/
// TODO -> stripe test
-TEST_F(DistributorTest, stats_generated_for_preempted_operations) {
+TEST_F(LegacyDistributorTest, stats_generated_for_preempted_operations) {
setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
// For this test it suffices to have a single bucket with multiple aspects
// wrong about it. In this case, let a bucket be both out of sync _and_
@@ -703,7 +704,7 @@ TEST_F(DistributorTest, stats_generated_for_preempted_operations) {
}
// TODO -> distributor test
-TEST_F(DistributorTest, host_info_reporter_config_is_propagated_to_reporter) {
+TEST_F(LegacyDistributorTest, host_info_reporter_config_is_propagated_to_reporter) {
setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
// Default is enabled=true.
@@ -717,13 +718,13 @@ TEST_F(DistributorTest, host_info_reporter_config_is_propagated_to_reporter) {
}
// TODO -> stripe test (though config is a bit of a special case...)
-TEST_F(DistributorTest, replica_counting_mode_is_configured_to_trusted_by_default) {
+TEST_F(LegacyDistributorTest, replica_counting_mode_is_configured_to_trusted_by_default) {
setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
EXPECT_EQ(ConfigBuilder::MinimumReplicaCountingMode::TRUSTED, currentReplicaCountingMode());
}
// TODO -> stripe test
-TEST_F(DistributorTest, replica_counting_mode_config_is_propagated_to_metric_updater) {
+TEST_F(LegacyDistributorTest, replica_counting_mode_config_is_propagated_to_metric_updater) {
setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
ConfigBuilder builder;
builder.minimumReplicaCountingMode = ConfigBuilder::MinimumReplicaCountingMode::ANY;
@@ -732,7 +733,7 @@ TEST_F(DistributorTest, replica_counting_mode_config_is_propagated_to_metric_upd
}
// TODO -> stripe test
-TEST_F(DistributorTest, max_consecutively_inhibited_maintenance_ticks_config_is_propagated_to_internal_config) {
+TEST_F(LegacyDistributorTest, max_consecutively_inhibited_maintenance_ticks_config_is_propagated_to_internal_config) {
setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
ConfigBuilder builder;
builder.maxConsecutivelyInhibitedMaintenanceTicks = 123;
@@ -741,13 +742,13 @@ TEST_F(DistributorTest, max_consecutively_inhibited_maintenance_ticks_config_is_
}
// TODO -> stripe test
-TEST_F(DistributorTest, bucket_activation_is_enabled_by_default) {
+TEST_F(LegacyDistributorTest, bucket_activation_is_enabled_by_default) {
setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
EXPECT_FALSE(getConfig().isBucketActivationDisabled());
}
// TODO -> stripe test
-TEST_F(DistributorTest, bucket_activation_config_is_propagated_to_distributor_configuration) {
+TEST_F(LegacyDistributorTest, bucket_activation_config_is_propagated_to_distributor_configuration) {
using namespace vespa::config::content::core;
setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
@@ -760,7 +761,7 @@ TEST_F(DistributorTest, bucket_activation_config_is_propagated_to_distributor_co
}
void
-DistributorTest::configureMaxClusterClockSkew(int seconds) {
+LegacyDistributorTest::configureMaxClusterClockSkew(int seconds) {
using namespace vespa::config::content::core;
ConfigBuilder builder;
@@ -770,7 +771,7 @@ DistributorTest::configureMaxClusterClockSkew(int seconds) {
}
// TODO -> stripe test
-TEST_F(DistributorTest, max_clock_skew_config_is_propagated_to_distributor_config) {
+TEST_F(LegacyDistributorTest, max_clock_skew_config_is_propagated_to_distributor_config) {
setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
configureMaxClusterClockSkew(5);
@@ -795,13 +796,7 @@ auto make_dummy_get_command_for_bucket_1() {
}
-void DistributorTest::sendDownClusterStateCommand() {
- lib::ClusterState newState("bits:1 storage:1 distributor:1");
- auto stateCmd = std::make_shared<api::SetSystemStateCommand>(newState);
- _distributor->handleMessage(stateCmd);
-}
-
-void DistributorTest::replyToSingleRequestBucketInfoCommandWith1Bucket() {
+void LegacyDistributorTest::replyToSingleRequestBucketInfoCommandWith1Bucket() {
ASSERT_EQ(_bucketSpaces.size(), _sender.commands().size());
for (uint32_t i = 0; i < _sender.commands().size(); ++i) {
ASSERT_EQ(api::MessageType::REQUESTBUCKETINFO, _sender.command(i)->getType());
@@ -821,11 +816,11 @@ void DistributorTest::replyToSingleRequestBucketInfoCommandWith1Bucket() {
_sender.commands().clear();
}
-void DistributorTest::sendDownDummyRemoveCommand() {
+void LegacyDistributorTest::sendDownDummyRemoveCommand() {
_distributor->handleMessage(makeDummyRemoveCommand());
}
-void DistributorTest::assertSingleBouncedRemoveReplyPresent() {
+void LegacyDistributorTest::assertSingleBouncedRemoveReplyPresent() {
ASSERT_EQ(1, _sender.replies().size()); // Rejected remove
ASSERT_EQ(api::MessageType::REMOVE_REPLY, _sender.reply(0)->getType());
auto& reply(static_cast<api::RemoveReply&>(*_sender.reply(0)));
@@ -833,7 +828,7 @@ void DistributorTest::assertSingleBouncedRemoveReplyPresent() {
_sender.replies().clear();
}
-void DistributorTest::assertNoMessageBounced() {
+void LegacyDistributorTest::assertNoMessageBounced() {
ASSERT_EQ(0, _sender.replies().size());
}
@@ -841,13 +836,13 @@ void DistributorTest::assertNoMessageBounced() {
// reply once we have the "highest timestamp across all owned buckets" feature
// in place.
// TODO where does this truly belong?
-TEST_F(DistributorTest, configured_safe_time_point_rejection_works_end_to_end) {
+TEST_F(LegacyDistributorTest, configured_safe_time_point_rejection_works_end_to_end) {
setupDistributor(Redundancy(2), NodeCount(2),
"bits:1 storage:1 distributor:2");
getClock().setAbsoluteTimeInSeconds(1000);
configureMaxClusterClockSkew(10);
- sendDownClusterStateCommand();
+ receive_set_system_state_command("bits:1 storage:1 distributor:1");
ASSERT_NO_FATAL_FAILURE(replyToSingleRequestBucketInfoCommandWith1Bucket());
// SetSystemStateCommand sent down chain at this point.
sendDownDummyRemoveCommand();
@@ -861,7 +856,7 @@ TEST_F(DistributorTest, configured_safe_time_point_rejection_works_end_to_end) {
ASSERT_NO_FATAL_FAILURE(assertNoMessageBounced());
}
-void DistributorTest::configure_mutation_sequencing(bool enabled) {
+void LegacyDistributorTest::configure_mutation_sequencing(bool enabled) {
using namespace vespa::config::content::core;
ConfigBuilder builder;
@@ -871,7 +866,7 @@ void DistributorTest::configure_mutation_sequencing(bool enabled) {
}
// TODO -> stripe test
-TEST_F(DistributorTest, sequencing_config_is_propagated_to_distributor_config) {
+TEST_F(LegacyDistributorTest, sequencing_config_is_propagated_to_distributor_config) {
setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
// Should be enabled by default
@@ -887,7 +882,7 @@ TEST_F(DistributorTest, sequencing_config_is_propagated_to_distributor_config) {
}
void
-DistributorTest::configure_merge_busy_inhibit_duration(int seconds) {
+LegacyDistributorTest::configure_merge_busy_inhibit_duration(int seconds) {
using namespace vespa::config::content::core;
ConfigBuilder builder;
@@ -897,7 +892,7 @@ DistributorTest::configure_merge_busy_inhibit_duration(int seconds) {
}
// TODO -> stripe test
-TEST_F(DistributorTest, merge_busy_inhibit_duration_config_is_propagated_to_distributor_config) {
+TEST_F(LegacyDistributorTest, merge_busy_inhibit_duration_config_is_propagated_to_distributor_config) {
setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
configure_merge_busy_inhibit_duration(7);
@@ -905,7 +900,7 @@ TEST_F(DistributorTest, merge_busy_inhibit_duration_config_is_propagated_to_dist
}
// TODO -> stripe test
-TEST_F(DistributorTest, merge_busy_inhibit_duration_is_propagated_to_pending_message_tracker) {
+TEST_F(LegacyDistributorTest, merge_busy_inhibit_duration_is_propagated_to_pending_message_tracker) {
setupDistributor(Redundancy(2), NodeCount(2), "storage:1 distributor:1");
addNodesToBucketDB(document::BucketId(16, 1), "0=1/1/1/t");
@@ -931,7 +926,7 @@ TEST_F(DistributorTest, merge_busy_inhibit_duration_is_propagated_to_pending_mes
}
// TODO -> stripe test
-TEST_F(DistributorTest, external_client_requests_are_handled_individually_in_priority_order) {
+TEST_F(LegacyDistributorTest, external_client_requests_are_handled_individually_in_priority_order) {
setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1");
addNodesToBucketDB(document::BucketId(16, 1), "0=1/1/1/t/a");
@@ -960,7 +955,7 @@ TEST_F(DistributorTest, external_client_requests_are_handled_individually_in_pri
}
// TODO -> stripe test
-TEST_F(DistributorTest, internal_messages_are_started_in_fifo_order_batch) {
+TEST_F(LegacyDistributorTest, internal_messages_are_started_in_fifo_order_batch) {
// To test internal request ordering, we use NotifyBucketChangeCommand
// for the reason that it explicitly updates the bucket database for
// each individual invocation.
@@ -990,7 +985,7 @@ TEST_F(DistributorTest, internal_messages_are_started_in_fifo_order_batch) {
// TODO -> stripe test
// TODO also test that closing distributor closes stripes
-TEST_F(DistributorTest, closing_aborts_priority_queued_client_requests) {
+TEST_F(LegacyDistributorTest, closing_aborts_priority_queued_client_requests) {
setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1");
document::BucketId bucket(16, 1);
addNodesToBucketDB(bucket, "0=1/1/1/t");
@@ -1031,7 +1026,7 @@ void assert_invalid_stats_for_all_spaces(
// TODO -> stripe test
// TODO must impl/test cross-stripe bucket space stats
// TODO cross-stripe recovery mode handling how?
-TEST_F(DistributorTest, entering_recovery_mode_resets_bucket_space_stats) {
+TEST_F(LegacyDistributorTest, entering_recovery_mode_resets_bucket_space_stats) {
// Set up a cluster state + DB contents which implies merge maintenance ops
setupDistributor(Redundancy(2), NodeCount(2), "version:1 distributor:1 storage:2");
addNodesToBucketDB(document::BucketId(16, 1), "0=1/1/1/t/a");
@@ -1053,7 +1048,7 @@ TEST_F(DistributorTest, entering_recovery_mode_resets_bucket_space_stats) {
}
// TODO figure out interaction between stripes and distributors on this one
-TEST_F(DistributorTest, leaving_recovery_mode_immediately_sends_getnodestate_replies) {
+TEST_F(LegacyDistributorTest, leaving_recovery_mode_immediately_sends_getnodestate_replies) {
setupDistributor(Redundancy(2), NodeCount(2), "version:1 distributor:1 storage:2");
// Should not send explicit replies during init stage
ASSERT_EQ(0, explicit_node_state_reply_send_invocations());
@@ -1074,7 +1069,7 @@ TEST_F(DistributorTest, leaving_recovery_mode_immediately_sends_getnodestate_rep
EXPECT_EQ(1, explicit_node_state_reply_send_invocations());
}
-void DistributorTest::do_test_pending_merge_getnodestate_reply_edge(BucketSpace space) {
+void LegacyDistributorTest::do_test_pending_merge_getnodestate_reply_edge(BucketSpace space) {
setupDistributor(Redundancy(2), NodeCount(2), "version:1 distributor:1 storage:2");
EXPECT_TRUE(distributor_is_in_recovery_mode());
// 2 buckets with missing replicas triggering merge pending stats
@@ -1110,15 +1105,15 @@ void DistributorTest::do_test_pending_merge_getnodestate_reply_edge(BucketSpace
EXPECT_EQ(2, explicit_node_state_reply_send_invocations());
}
-TEST_F(DistributorTest, pending_to_no_pending_default_merges_edge_immediately_sends_getnodestate_replies) {
+TEST_F(LegacyDistributorTest, pending_to_no_pending_default_merges_edge_immediately_sends_getnodestate_replies) {
do_test_pending_merge_getnodestate_reply_edge(FixedBucketSpaces::default_space());
}
-TEST_F(DistributorTest, pending_to_no_pending_global_merges_edge_immediately_sends_getnodestate_replies) {
+TEST_F(LegacyDistributorTest, pending_to_no_pending_global_merges_edge_immediately_sends_getnodestate_replies) {
do_test_pending_merge_getnodestate_reply_edge(FixedBucketSpaces::global_space());
}
-TEST_F(DistributorTest, stale_reads_config_is_propagated_to_external_operation_handler) {
+TEST_F(LegacyDistributorTest, stale_reads_config_is_propagated_to_external_operation_handler) {
createLinks();
setupDistributor(Redundancy(1), NodeCount(1), "distributor:1 storage:1");
@@ -1129,7 +1124,7 @@ TEST_F(DistributorTest, stale_reads_config_is_propagated_to_external_operation_h
EXPECT_FALSE(getExternalOperationHandler().concurrent_gets_enabled());
}
-TEST_F(DistributorTest, fast_path_on_consistent_gets_config_is_propagated_to_internal_config) {
+TEST_F(LegacyDistributorTest, fast_path_on_consistent_gets_config_is_propagated_to_internal_config) {
createLinks();
setupDistributor(Redundancy(1), NodeCount(1), "distributor:1 storage:1");
@@ -1140,7 +1135,7 @@ TEST_F(DistributorTest, fast_path_on_consistent_gets_config_is_propagated_to_int
EXPECT_FALSE(getConfig().update_fast_path_restart_enabled());
}
-TEST_F(DistributorTest, merge_disabling_config_is_propagated_to_internal_config) {
+TEST_F(LegacyDistributorTest, merge_disabling_config_is_propagated_to_internal_config) {
createLinks();
setupDistributor(Redundancy(1), NodeCount(1), "distributor:1 storage:1");
@@ -1151,7 +1146,7 @@ TEST_F(DistributorTest, merge_disabling_config_is_propagated_to_internal_config)
EXPECT_FALSE(getConfig().merge_operations_disabled());
}
-TEST_F(DistributorTest, metadata_update_phase_config_is_propagated_to_internal_config) {
+TEST_F(LegacyDistributorTest, metadata_update_phase_config_is_propagated_to_internal_config) {
createLinks();
setupDistributor(Redundancy(1), NodeCount(1), "distributor:1 storage:1");
@@ -1162,7 +1157,7 @@ TEST_F(DistributorTest, metadata_update_phase_config_is_propagated_to_internal_c
EXPECT_FALSE(getConfig().enable_metadata_only_fetch_phase_for_inconsistent_updates());
}
-TEST_F(DistributorTest, weak_internal_read_consistency_config_is_propagated_to_internal_configs) {
+TEST_F(LegacyDistributorTest, weak_internal_read_consistency_config_is_propagated_to_internal_configs) {
createLinks();
setupDistributor(Redundancy(1), NodeCount(1), "distributor:1 storage:1");
@@ -1175,7 +1170,7 @@ TEST_F(DistributorTest, weak_internal_read_consistency_config_is_propagated_to_i
EXPECT_FALSE(getExternalOperationHandler().use_weak_internal_read_consistency_for_gets());
}
-void DistributorTest::set_up_and_start_get_op_with_stale_reads_enabled(bool enabled) {
+void LegacyDistributorTest::set_up_and_start_get_op_with_stale_reads_enabled(bool enabled) {
createLinks();
setupDistributor(Redundancy(1), NodeCount(1), "distributor:1 storage:1");
configure_stale_reads_enabled(enabled);
@@ -1185,7 +1180,7 @@ void DistributorTest::set_up_and_start_get_op_with_stale_reads_enabled(bool enab
_distributor->onDown(make_dummy_get_command_for_bucket_1());
}
-TEST_F(DistributorTest, gets_are_started_outside_main_distributor_logic_if_stale_reads_enabled) {
+TEST_F(LegacyDistributorTest, gets_are_started_outside_main_distributor_logic_if_stale_reads_enabled) {
set_up_and_start_get_op_with_stale_reads_enabled(true);
ASSERT_THAT(_sender.commands(), SizeIs(1));
EXPECT_THAT(_sender.replies(), SizeIs(0));
@@ -1197,7 +1192,7 @@ TEST_F(DistributorTest, gets_are_started_outside_main_distributor_logic_if_stale
EXPECT_THAT(_sender.replies(), SizeIs(1));
}
-TEST_F(DistributorTest, gets_are_not_started_outside_main_distributor_logic_if_stale_reads_disabled) {
+TEST_F(LegacyDistributorTest, gets_are_not_started_outside_main_distributor_logic_if_stale_reads_disabled) {
set_up_and_start_get_op_with_stale_reads_enabled(false);
// Get has been placed into distributor queue, so no external messages are produced.
EXPECT_THAT(_sender.commands(), SizeIs(0));
@@ -1207,21 +1202,21 @@ TEST_F(DistributorTest, gets_are_not_started_outside_main_distributor_logic_if_s
// There's no need or desire to track "lockfree" Gets in the main pending message tracker,
// as we only have to track mutations to inhibit maintenance ops safely. Furthermore,
// the message tracker is a multi-index and therefore has some runtime cost.
-TEST_F(DistributorTest, gets_started_outside_main_thread_are_not_tracked_by_main_pending_message_tracker) {
+TEST_F(LegacyDistributorTest, gets_started_outside_main_thread_are_not_tracked_by_main_pending_message_tracker) {
set_up_and_start_get_op_with_stale_reads_enabled(true);
Bucket bucket(FixedBucketSpaces::default_space(), BucketId(16, 1));
EXPECT_FALSE(pending_message_tracker().hasPendingMessage(
0, bucket, api::MessageType::GET_ID));
}
-TEST_F(DistributorTest, closing_aborts_gets_started_outside_main_distributor_thread) {
+TEST_F(LegacyDistributorTest, closing_aborts_gets_started_outside_main_distributor_thread) {
set_up_and_start_get_op_with_stale_reads_enabled(true);
_distributor->close();
ASSERT_EQ(1, _sender.replies().size());
EXPECT_EQ(api::ReturnCode::ABORTED, _sender.reply(0)->getResult().getResult());
}
-TEST_F(DistributorTest, prioritize_global_bucket_merges_config_is_propagated_to_internal_config) {
+TEST_F(LegacyDistributorTest, prioritize_global_bucket_merges_config_is_propagated_to_internal_config) {
createLinks();
setupDistributor(Redundancy(1), NodeCount(1), "distributor:1 storage:1");
@@ -1232,7 +1227,7 @@ TEST_F(DistributorTest, prioritize_global_bucket_merges_config_is_propagated_to_
EXPECT_FALSE(getConfig().prioritize_global_bucket_merges());
}
-TEST_F(DistributorTest, max_activation_inhibited_out_of_sync_groups_config_is_propagated_to_internal_config) {
+TEST_F(LegacyDistributorTest, max_activation_inhibited_out_of_sync_groups_config_is_propagated_to_internal_config) {
createLinks();
setupDistributor(Redundancy(1), NodeCount(1), "distributor:1 storage:1");
@@ -1243,7 +1238,7 @@ TEST_F(DistributorTest, max_activation_inhibited_out_of_sync_groups_config_is_pr
EXPECT_EQ(getConfig().max_activation_inhibited_out_of_sync_groups(), 0);
}
-TEST_F(DistributorTest, wanted_split_bit_count_is_lower_bounded) {
+TEST_F(LegacyDistributorTest, wanted_split_bit_count_is_lower_bounded) {
createLinks();
setupDistributor(Redundancy(1), NodeCount(1), "distributor:1 storage:1");
@@ -1254,7 +1249,7 @@ TEST_F(DistributorTest, wanted_split_bit_count_is_lower_bounded) {
EXPECT_EQ(getConfig().getMinimalBucketSplit(), 8);
}
-TEST_F(DistributorTest, host_info_sent_immediately_once_all_stripes_first_reported) {
+TEST_F(LegacyDistributorTest, host_info_sent_immediately_once_all_stripes_first_reported) {
set_num_distributor_stripes(4);
createLinks();
getClock().setAbsoluteTimeInSeconds(1000);
@@ -1283,7 +1278,7 @@ TEST_F(DistributorTest, host_info_sent_immediately_once_all_stripes_first_report
}
// TODO STRIPE make delay configurable instead of hardcoded
-TEST_F(DistributorTest, non_bootstrap_host_info_send_request_delays_sending) {
+TEST_F(LegacyDistributorTest, non_bootstrap_host_info_send_request_delays_sending) {
set_num_distributor_stripes(4);
createLinks();
getClock().setAbsoluteTimeInSeconds(1000);
diff --git a/storage/src/tests/distributor/maintenancemocks.h b/storage/src/tests/distributor/maintenancemocks.h
index fff798d4413..1245c9bb15d 100644
--- a/storage/src/tests/distributor/maintenancemocks.h
+++ b/storage/src/tests/distributor/maintenancemocks.h
@@ -51,7 +51,7 @@ public:
}
void onStart(DistributorStripeMessageSender&) override {}
void onReceive(DistributorStripeMessageSender&, const std::shared_ptr<api::StorageReply>&) override {}
- bool isBlocked(const PendingMessageTracker&, const OperationSequencer&) const override {
+ bool isBlocked(const DistributorStripeOperationContext&, const OperationSequencer&) const override {
return _shouldBlock;
}
void setShouldBlock(bool shouldBlock) {
diff --git a/storage/src/tests/distributor/mergeoperationtest.cpp b/storage/src/tests/distributor/mergeoperationtest.cpp
index 1026ea2855e..52a8bfc41b6 100644
--- a/storage/src/tests/distributor/mergeoperationtest.cpp
+++ b/storage/src/tests/distributor/mergeoperationtest.cpp
@@ -18,13 +18,11 @@ using namespace ::testing;
namespace storage::distributor {
struct MergeOperationTest : Test, DistributorTestUtil {
- std::unique_ptr<PendingMessageTracker> _pendingTracker;
OperationSequencer _operation_sequencer;
void SetUp() override {
createLinks();
- _pendingTracker = std::make_unique<PendingMessageTracker>(getComponentRegister());
- _sender.setPendingMessageTracker(*_pendingTracker);
+ _sender.setPendingMessageTracker(pending_message_tracker());
_sender.set_operation_sequencer(_operation_sequencer);
}
@@ -256,7 +254,7 @@ TEST_F(MergeOperationTest, do_not_remove_copies_with_pending_messages) {
makeDocumentBucket(bucket), api::SetBucketStateCommand::ACTIVE);
vespalib::string storage("storage");
msg->setAddress(api::StorageMessageAddress::create(&storage, lib::NodeType::STORAGE, 1));
- _pendingTracker->insert(msg);
+ pending_message_tracker().insert(msg);
sendReply(op);
// Should not be a remove here!
@@ -400,19 +398,19 @@ TEST_F(MergeOperationTest, merge_operation_is_blocked_by_any_busy_target_node) {
op.setIdealStateManager(&getIdealStateManager());
// Should not block on nodes _not_ included in operation node set
- _pendingTracker->getNodeInfo().setBusy(3, std::chrono::seconds(10));
- EXPECT_FALSE(op.isBlocked(*_pendingTracker, _operation_sequencer));
+ pending_message_tracker().getNodeInfo().setBusy(3, std::chrono::seconds(10));
+ EXPECT_FALSE(op.isBlocked(operation_context(), _operation_sequencer));
// Node 1 is included in operation node set and should cause a block
- _pendingTracker->getNodeInfo().setBusy(0, std::chrono::seconds(10));
- EXPECT_TRUE(op.isBlocked(*_pendingTracker, _operation_sequencer));
+ pending_message_tracker().getNodeInfo().setBusy(0, std::chrono::seconds(10));
+ EXPECT_TRUE(op.isBlocked(operation_context(), _operation_sequencer));
getClock().addSecondsToTime(11);
- EXPECT_FALSE(op.isBlocked(*_pendingTracker, _operation_sequencer)); // No longer busy
+ EXPECT_FALSE(op.isBlocked(operation_context(), _operation_sequencer)); // No longer busy
// Should block on other operation nodes than the first listed as well
- _pendingTracker->getNodeInfo().setBusy(1, std::chrono::seconds(10));
- EXPECT_TRUE(op.isBlocked(*_pendingTracker, _operation_sequencer));
+ pending_message_tracker().getNodeInfo().setBusy(1, std::chrono::seconds(10));
+ EXPECT_TRUE(op.isBlocked(operation_context(), _operation_sequencer));
}
@@ -426,8 +424,8 @@ TEST_F(MergeOperationTest, global_bucket_merges_are_not_blocked_by_busy_nodes) {
op.setIdealStateManager(&getIdealStateManager());
// Node 1 is included in operation node set but should not cause a block of global bucket merge
- _pendingTracker->getNodeInfo().setBusy(0, std::chrono::seconds(10));
- EXPECT_FALSE(op.isBlocked(*_pendingTracker, _operation_sequencer));
+ pending_message_tracker().getNodeInfo().setBusy(0, std::chrono::seconds(10));
+ EXPECT_FALSE(op.isBlocked(operation_context(), _operation_sequencer));
}
TEST_F(MergeOperationTest, merge_operation_is_blocked_by_locked_bucket) {
@@ -437,10 +435,10 @@ TEST_F(MergeOperationTest, merge_operation_is_blocked_by_locked_bucket) {
MergeOperation op(BucketAndNodes(makeDocumentBucket(document::BucketId(16, 1)), toVector<uint16_t>(0, 1, 2)));
op.setIdealStateManager(&getIdealStateManager());
- EXPECT_FALSE(op.isBlocked(*_pendingTracker, _operation_sequencer));
+ EXPECT_FALSE(op.isBlocked(operation_context(), _operation_sequencer));
auto token = _operation_sequencer.try_acquire(makeDocumentBucket(document::BucketId(16, 1)), "foo");
EXPECT_TRUE(token.valid());
- EXPECT_TRUE(op.isBlocked(*_pendingTracker, _operation_sequencer));
+ EXPECT_TRUE(op.isBlocked(operation_context(), _operation_sequencer));
}
TEST_F(MergeOperationTest, missing_replica_is_included_in_limited_node_list) {
diff --git a/storage/src/tests/distributor/simplemaintenancescannertest.cpp b/storage/src/tests/distributor/simplemaintenancescannertest.cpp
index 58dc2430041..1bf3809b135 100644
--- a/storage/src/tests/distributor/simplemaintenancescannertest.cpp
+++ b/storage/src/tests/distributor/simplemaintenancescannertest.cpp
@@ -1,6 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include <tests/distributor/maintenancemocks.h>
+#include <vespa/document/bucket/fixed_bucket_spaces.h>
#include <vespa/document/test/make_bucket_space.h>
#include <vespa/storage/distributor/distributor_bucket_space.h>
#include <vespa/storage/distributor/distributor_bucket_space_repo.h>
@@ -209,4 +210,88 @@ TEST_F(SimpleMaintenanceScannerTest, per_node_maintenance_stats_are_tracked) {
}
}
+TEST_F(SimpleMaintenanceScannerTest, merge_node_maintenance_stats) {
+
+ NodeMaintenanceStats stats_a;
+ stats_a.movingOut = 1;
+ stats_a.syncing = 2;
+ stats_a.copyingIn = 3;
+ stats_a.copyingOut = 4;
+ stats_a.total = 5;
+
+ NodeMaintenanceStats stats_b;
+ stats_b.movingOut = 10;
+ stats_b.syncing = 20;
+ stats_b.copyingIn = 30;
+ stats_b.copyingOut = 40;
+ stats_b.total = 50;
+
+ NodeMaintenanceStats result;
+ result.merge(stats_a);
+ result.merge(stats_b);
+
+ NodeMaintenanceStats exp;
+ exp.movingOut = 11;
+ exp.syncing = 22;
+ exp.copyingIn = 33;
+ exp.copyingOut = 44;
+ exp.total = 55;
+ EXPECT_EQ(exp, result);
+}
+
+TEST_F(SimpleMaintenanceScannerTest, merge_pending_maintenance_stats) {
+ auto default_space = document::FixedBucketSpaces::default_space();
+ auto global_space = document::FixedBucketSpaces::global_space();
+
+ PendingStats stats_a;
+ stats_a.global.pending[MaintenanceOperation::DELETE_BUCKET] = 1;
+ stats_a.global.pending[MaintenanceOperation::MERGE_BUCKET] = 2;
+ stats_a.global.pending[MaintenanceOperation::SPLIT_BUCKET] = 3;
+ stats_a.global.pending[MaintenanceOperation::JOIN_BUCKET] = 4;
+ stats_a.global.pending[MaintenanceOperation::SET_BUCKET_STATE] = 5;
+ stats_a.global.pending[MaintenanceOperation::GARBAGE_COLLECTION] = 6;
+ stats_a.perNodeStats.incMovingOut(3, default_space);
+ stats_a.perNodeStats.incSyncing(3, global_space);
+ stats_a.perNodeStats.incCopyingIn(5, default_space);
+ stats_a.perNodeStats.incCopyingOut(5, global_space);
+ stats_a.perNodeStats.incTotal(5, default_space);
+
+ PendingStats stats_b;
+ stats_b.global.pending[MaintenanceOperation::DELETE_BUCKET] = 10;
+ stats_b.global.pending[MaintenanceOperation::MERGE_BUCKET] = 20;
+ stats_b.global.pending[MaintenanceOperation::SPLIT_BUCKET] = 30;
+ stats_b.global.pending[MaintenanceOperation::JOIN_BUCKET] = 40;
+ stats_b.global.pending[MaintenanceOperation::SET_BUCKET_STATE] = 50;
+ stats_b.global.pending[MaintenanceOperation::GARBAGE_COLLECTION] = 60;
+ stats_b.perNodeStats.incMovingOut(7, default_space);
+ stats_b.perNodeStats.incSyncing(7, global_space);
+ stats_b.perNodeStats.incCopyingIn(5, default_space);
+ stats_b.perNodeStats.incCopyingOut(5, global_space);
+ stats_b.perNodeStats.incTotal(5, default_space);
+
+ PendingStats result;
+ result.merge(stats_a);
+ result.merge(stats_b);
+
+ PendingStats exp;
+ exp.global.pending[MaintenanceOperation::DELETE_BUCKET] = 11;
+ exp.global.pending[MaintenanceOperation::MERGE_BUCKET] = 22;
+ exp.global.pending[MaintenanceOperation::SPLIT_BUCKET] = 33;
+ exp.global.pending[MaintenanceOperation::JOIN_BUCKET] = 44;
+ exp.global.pending[MaintenanceOperation::SET_BUCKET_STATE] = 55;
+ exp.global.pending[MaintenanceOperation::GARBAGE_COLLECTION] = 66;
+ exp.perNodeStats.incMovingOut(3, default_space);
+ exp.perNodeStats.incSyncing(3, global_space);
+ exp.perNodeStats.incCopyingIn(5, default_space);
+ exp.perNodeStats.incCopyingIn(5, default_space);
+ exp.perNodeStats.incCopyingOut(5, global_space);
+ exp.perNodeStats.incCopyingOut(5, global_space);
+ exp.perNodeStats.incTotal(5, default_space);
+ exp.perNodeStats.incTotal(5, default_space);
+ exp.perNodeStats.incMovingOut(7, default_space);
+ exp.perNodeStats.incSyncing(7, global_space);
+ EXPECT_EQ(exp.global, result.global);
+ EXPECT_EQ(exp.perNodeStats, result.perNodeStats);
+}
+
}
diff --git a/storage/src/tests/distributor/splitbuckettest.cpp b/storage/src/tests/distributor/splitbuckettest.cpp
index 8c8da1bb197..ec58992ed3e 100644
--- a/storage/src/tests/distributor/splitbuckettest.cpp
+++ b/storage/src/tests/distributor/splitbuckettest.cpp
@@ -261,7 +261,6 @@ TEST_F(SplitOperationTest, operation_blocked_by_pending_join) {
framework::defaultimplementation::FakeClock clock;
compReg.setClock(clock);
clock.setAbsoluteTimeInSeconds(1);
- PendingMessageTracker tracker(compReg);
OperationSequencer op_seq;
enableDistributorClusterState("distributor:1 storage:2");
@@ -274,7 +273,7 @@ TEST_F(SplitOperationTest, operation_blocked_by_pending_join) {
joinCmd->getSourceBuckets() = joinSources;
joinCmd->setAddress(_Storage0Address);
- tracker.insert(joinCmd);
+ pending_message_tracker().insert(joinCmd);
insertBucketInfo(joinTarget, 0, 0xabc, 1000, 1234, true);
@@ -284,18 +283,18 @@ TEST_F(SplitOperationTest, operation_blocked_by_pending_join) {
splitCount,
splitByteSize);
- EXPECT_TRUE(op.isBlocked(tracker, op_seq));
+ EXPECT_TRUE(op.isBlocked(operation_context(), op_seq));
// Now, pretend there's a join for another node in the same bucket. This
// will happen when a join is partially completed.
- tracker.clearMessagesForNode(0);
- EXPECT_FALSE(op.isBlocked(tracker, op_seq));
+ pending_message_tracker().clearMessagesForNode(0);
+ EXPECT_FALSE(op.isBlocked(operation_context(), op_seq));
joinCmd->setAddress(api::StorageMessageAddress::create(dummy_cluster_context.cluster_name_ptr(),
lib::NodeType::STORAGE, 1));
- tracker.insert(joinCmd);
+ pending_message_tracker().insert(joinCmd);
- EXPECT_TRUE(op.isBlocked(tracker, op_seq));
+ EXPECT_TRUE(op.isBlocked(operation_context(), op_seq));
}
TEST_F(SplitOperationTest, split_is_blocked_by_locked_bucket) {
@@ -303,7 +302,6 @@ TEST_F(SplitOperationTest, split_is_blocked_by_locked_bucket) {
framework::defaultimplementation::FakeClock clock;
compReg.setClock(clock);
clock.setAbsoluteTimeInSeconds(1);
- PendingMessageTracker tracker(compReg);
OperationSequencer op_seq;
enableDistributorClusterState("distributor:1 storage:2");
@@ -314,10 +312,10 @@ TEST_F(SplitOperationTest, split_is_blocked_by_locked_bucket) {
SplitOperation op(dummy_cluster_context, BucketAndNodes(makeDocumentBucket(source_bucket), toVector<uint16_t>(0)),
maxSplitBits, splitCount, splitByteSize);
- EXPECT_FALSE(op.isBlocked(tracker, op_seq));
+ EXPECT_FALSE(op.isBlocked(operation_context(), op_seq));
auto token = op_seq.try_acquire(makeDocumentBucket(source_bucket), "foo");
EXPECT_TRUE(token.valid());
- EXPECT_TRUE(op.isBlocked(tracker, op_seq));
+ EXPECT_TRUE(op.isBlocked(operation_context(), op_seq));
}
} // storage::distributor
diff --git a/storage/src/tests/storageserver/mergethrottlertest.cpp b/storage/src/tests/storageserver/mergethrottlertest.cpp
index 12ed9ead1b6..dfeaee031ba 100644
--- a/storage/src/tests/storageserver/mergethrottlertest.cpp
+++ b/storage/src/tests/storageserver/mergethrottlertest.cpp
@@ -1220,6 +1220,7 @@ TEST_F(MergeThrottlerTest, unknown_merge_with_self_in_chain) {
TEST_F(MergeThrottlerTest, busy_returned_on_full_queue) {
size_t maxPending = _throttlers[0]->getThrottlePolicy().getMaxPendingCount();
size_t maxQueue = _throttlers[0]->getMaxQueueSize();
+ ASSERT_EQ(20, maxQueue);
ASSERT_LT(maxPending, 100);
for (std::size_t i = 0; i < maxPending + maxQueue; ++i) {
std::vector<MergeBucketCommand::Node> nodes;
@@ -1234,6 +1235,7 @@ TEST_F(MergeThrottlerTest, busy_returned_on_full_queue) {
// Wait till we have maxPending replies and maxQueue queued
_topLinks[0]->waitForMessages(maxPending, _messageWaitTime);
waitUntilMergeQueueIs(*_throttlers[0], maxQueue, _messageWaitTime);
+ EXPECT_EQ(maxQueue, _throttlers[0]->getMetrics().queueSize.getMaximum());
// Clear all forwarded merges
_topLinks[0]->getRepliesOnce();
diff --git a/storage/src/vespa/storage/config/distributorconfiguration.h b/storage/src/vespa/storage/config/distributorconfiguration.h
index 479298ff082..7aa10893b80 100644
--- a/storage/src/vespa/storage/config/distributorconfiguration.h
+++ b/storage/src/vespa/storage/config/distributorconfiguration.h
@@ -9,7 +9,7 @@
namespace storage {
-namespace distributor { struct DistributorTest; }
+namespace distributor { struct LegacyDistributorTest; }
class DistributorConfiguration {
public:
@@ -323,7 +323,7 @@ private:
DistrConfig::MinimumReplicaCountingMode _minimumReplicaCountingMode;
- friend struct distributor::DistributorTest;
+ friend struct distributor::LegacyDistributorTest;
void configureMaintenancePriorities(
const vespa::config::content::core::StorDistributormanagerConfig&);
};
diff --git a/storage/src/vespa/storage/distributor/CMakeLists.txt b/storage/src/vespa/storage/distributor/CMakeLists.txt
index 7b048e9f109..eba76c91af0 100644
--- a/storage/src/vespa/storage/distributor/CMakeLists.txt
+++ b/storage/src/vespa/storage/distributor/CMakeLists.txt
@@ -7,6 +7,7 @@ vespa_add_library(storage_distributor
bucket_space_distribution_configs.cpp
bucket_space_distribution_context.cpp
bucket_space_state_map.cpp
+ bucket_spaces_stats_provider.cpp
bucketdbupdater.cpp
bucketgctimecalculator.cpp
bucketlistmerger.cpp
@@ -22,13 +23,16 @@ vespa_add_library(storage_distributor
distributor_stripe_component.cpp
distributor_stripe_pool.cpp
distributor_stripe_thread.cpp
+ distributor_total_metrics.cpp
distributormessagesender.cpp
distributormetricsset.cpp
externaloperationhandler.cpp
ideal_service_layer_nodes_bundle.cpp
+ ideal_state_total_metrics.cpp
idealstatemanager.cpp
idealstatemetricsset.cpp
messagetracker.cpp
+ min_replica_provider.cpp
multi_threaded_stripe_access_guard.cpp
nodeinfo.cpp
operation_routing_snapshot.cpp
diff --git a/storage/src/vespa/storage/distributor/blockingoperationstarter.cpp b/storage/src/vespa/storage/distributor/blockingoperationstarter.cpp
index 25c38888098..e9b53e35b61 100644
--- a/storage/src/vespa/storage/distributor/blockingoperationstarter.cpp
+++ b/storage/src/vespa/storage/distributor/blockingoperationstarter.cpp
@@ -7,7 +7,7 @@ namespace storage::distributor {
bool
BlockingOperationStarter::start(const std::shared_ptr<Operation>& operation, Priority priority)
{
- if (operation->isBlocked(_messageTracker, _operation_sequencer)) {
+ if (operation->isBlocked(_operation_context, _operation_sequencer)) {
return true;
}
return _starterImpl.start(operation, priority);
diff --git a/storage/src/vespa/storage/distributor/blockingoperationstarter.h b/storage/src/vespa/storage/distributor/blockingoperationstarter.h
index e79ae6b4a79..180e617d08d 100644
--- a/storage/src/vespa/storage/distributor/blockingoperationstarter.h
+++ b/storage/src/vespa/storage/distributor/blockingoperationstarter.h
@@ -6,16 +6,16 @@
namespace storage::distributor {
-class PendingMessageTracker;
+class DistributorStripeOperationContext;
class OperationSequencer;
class BlockingOperationStarter : public OperationStarter
{
public:
- BlockingOperationStarter(PendingMessageTracker& messageTracker,
+ BlockingOperationStarter(DistributorStripeOperationContext& ctx,
OperationSequencer& operation_sequencer,
OperationStarter& starterImpl)
- : _messageTracker(messageTracker),
+ : _operation_context(ctx),
_operation_sequencer(operation_sequencer),
_starterImpl(starterImpl)
{}
@@ -24,9 +24,9 @@ public:
bool start(const std::shared_ptr<Operation>& operation, Priority priority) override;
private:
- PendingMessageTracker& _messageTracker;
- OperationSequencer& _operation_sequencer;
- OperationStarter& _starterImpl;
+ DistributorStripeOperationContext& _operation_context;
+ OperationSequencer& _operation_sequencer;
+ OperationStarter& _starterImpl;
};
}
diff --git a/storage/src/vespa/storage/distributor/bucket_spaces_stats_provider.cpp b/storage/src/vespa/storage/distributor/bucket_spaces_stats_provider.cpp
new file mode 100644
index 00000000000..2b12d437aaa
--- /dev/null
+++ b/storage/src/vespa/storage/distributor/bucket_spaces_stats_provider.cpp
@@ -0,0 +1,40 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "bucket_spaces_stats_provider.h"
+
+namespace storage::distributor {
+
+std::ostream&
+operator<<(std::ostream& out, const BucketSpaceStats& stats)
+{
+ out << "{valid=" << stats.valid() << ", bucketsTotal=" << stats.bucketsTotal() << ", bucketsPending=" << stats.bucketsPending() << "}";
+ return out;
+}
+
+void
+merge_bucket_spaces_stats(BucketSpacesStatsProvider::BucketSpacesStats& dest,
+ const BucketSpacesStatsProvider::BucketSpacesStats& src)
+{
+ for (const auto& entry : src) {
+ const auto& bucket_space_name = entry.first;
+ auto itr = dest.find(bucket_space_name);
+ if (itr != dest.end()) {
+ itr->second.merge(entry.second);
+ } else {
+ // We need to explicitly handle this case to avoid creating an empty BucketSpaceStats that is not valid.
+ dest[bucket_space_name] = entry.second;
+ }
+ }
+}
+
+void
+merge_per_node_bucket_spaces_stats(BucketSpacesStatsProvider::PerNodeBucketSpacesStats& dest,
+ const BucketSpacesStatsProvider::PerNodeBucketSpacesStats& src)
+{
+ for (const auto& entry : src) {
+ auto node_index = entry.first;
+ merge_bucket_spaces_stats(dest[node_index], entry.second);
+ }
+}
+
+}
diff --git a/storage/src/vespa/storage/distributor/bucket_spaces_stats_provider.h b/storage/src/vespa/storage/distributor/bucket_spaces_stats_provider.h
index 3d7b60f4471..c8ba04ed1ab 100644
--- a/storage/src/vespa/storage/distributor/bucket_spaces_stats_provider.h
+++ b/storage/src/vespa/storage/distributor/bucket_spaces_stats_provider.h
@@ -3,6 +3,7 @@
#include <vespa/vespalib/stllike/string.h>
#include <map>
+#include <ostream>
#include <unordered_map>
namespace storage::distributor {
@@ -32,8 +33,22 @@ public:
bool valid() const noexcept { return _valid; }
size_t bucketsTotal() const noexcept { return _bucketsTotal; }
size_t bucketsPending() const noexcept { return _bucketsPending; }
+
+ bool operator==(const BucketSpaceStats& rhs) const {
+ return (_valid == rhs._valid) &&
+ (_bucketsTotal == rhs._bucketsTotal) &&
+ (_bucketsPending == rhs._bucketsPending);
+ }
+
+ void merge(const BucketSpaceStats& rhs) {
+ _valid = _valid && rhs._valid;
+ _bucketsTotal += rhs._bucketsTotal;
+ _bucketsPending += rhs._bucketsPending;
+ }
};
+std::ostream& operator<<(std::ostream& out, const BucketSpaceStats& stats);
+
/**
* Interface that provides snapshots of bucket spaces statistics per content node.
*/
@@ -48,4 +63,10 @@ public:
virtual PerNodeBucketSpacesStats getBucketSpacesStats() const = 0;
};
+void merge_bucket_spaces_stats(BucketSpacesStatsProvider::BucketSpacesStats& dest,
+ const BucketSpacesStatsProvider::BucketSpacesStats& src);
+
+void merge_per_node_bucket_spaces_stats(BucketSpacesStatsProvider::PerNodeBucketSpacesStats& dest,
+ const BucketSpacesStatsProvider::PerNodeBucketSpacesStats& src);
+
}
diff --git a/storage/src/vespa/storage/distributor/distributor.cpp b/storage/src/vespa/storage/distributor/distributor.cpp
index 65945b2c6ae..6f9cbf3b0f2 100644
--- a/storage/src/vespa/storage/distributor/distributor.cpp
+++ b/storage/src/vespa/storage/distributor/distributor.cpp
@@ -9,7 +9,7 @@
#include "distributor_stripe.h"
#include "distributor_stripe_pool.h"
#include "distributor_stripe_thread.h"
-#include "distributormetricsset.h"
+#include "distributor_total_metrics.h"
#include "idealstatemetricsset.h"
#include "multi_threaded_stripe_access_guard.h"
#include "operation_sequencer.h"
@@ -59,18 +59,29 @@ Distributor::Distributor(DistributorComponentRegister& compReg,
: StorageLink("distributor"),
framework::StatusReporter("distributor", "Distributor"),
_comp_reg(compReg),
+ _use_legacy_mode(num_distributor_stripes == 0),
_metrics(std::make_shared<DistributorMetricSet>()),
+ _total_metrics(_use_legacy_mode ? std::shared_ptr<DistributorTotalMetrics>() :
+ std::make_shared<DistributorTotalMetrics>(num_distributor_stripes)),
+ _ideal_state_metrics(_use_legacy_mode ? std::make_shared<IdealStateMetricSet>() : std::shared_ptr<IdealStateMetricSet>()),
+ _ideal_state_total_metrics(_use_legacy_mode ? std::shared_ptr<IdealStateTotalMetrics>() :
+ std::make_shared<IdealStateTotalMetrics>(num_distributor_stripes)),
_messageSender(messageSender),
- _use_legacy_mode(num_distributor_stripes == 0),
_n_stripe_bits(0),
- _stripe(std::make_unique<DistributorStripe>(compReg, *_metrics, node_identity, threadPool,
+ _stripe(std::make_unique<DistributorStripe>(compReg,
+ _use_legacy_mode ? *_metrics : _total_metrics->stripe(0),
+ _use_legacy_mode ? *_ideal_state_metrics : _ideal_state_total_metrics->stripe(0),
+ node_identity, threadPool,
doneInitHandler, *this, *this, _use_legacy_mode)),
_stripe_pool(stripe_pool),
_stripes(),
_stripe_accessor(),
+ _random_stripe_gen(),
+ _random_stripe_gen_mutex(),
_message_queue(),
_fetched_messages(),
_component(*this, compReg, "distributor"),
+ _ideal_state_component(compReg, "Ideal state manager"),
_total_config(_component.total_distributor_config_sp()),
_bucket_db_updater(),
_distributorStatusDelegate(compReg, *this, *this),
@@ -89,7 +100,9 @@ Distributor::Distributor(DistributorComponentRegister& compReg,
_next_distribution(),
_current_internal_config_generation(_component.internal_config_generation())
{
- _component.registerMetric(*_metrics);
+ _component.registerMetric(_use_legacy_mode ? *_metrics : *_total_metrics);
+ _ideal_state_component.registerMetric(_use_legacy_mode ? *_ideal_state_metrics :
+ *_ideal_state_total_metrics);
_component.registerMetricUpdateHook(_metricUpdateHook, framework::SecondTime(0));
if (!_use_legacy_mode) {
assert(num_distributor_stripes == adjusted_num_stripes(num_distributor_stripes));
@@ -103,7 +116,10 @@ Distributor::Distributor(DistributorComponentRegister& compReg,
*_stripe_accessor);
_stripes.emplace_back(std::move(_stripe));
for (size_t i = 1; i < num_distributor_stripes; ++i) {
- _stripes.emplace_back(std::make_unique<DistributorStripe>(compReg, *_metrics, node_identity, threadPool,
+ _stripes.emplace_back(std::make_unique<DistributorStripe>(compReg,
+ _total_metrics->stripe(i),
+ _ideal_state_total_metrics->stripe(i),
+ node_identity, threadPool,
doneInitHandler, *this, *this, _use_legacy_mode, i));
}
_stripe_scan_stats.resize(num_distributor_stripes);
@@ -122,16 +138,10 @@ Distributor::~Distributor()
closeNextLink();
}
-// TODO STRIPE remove
-DistributorStripe&
-Distributor::first_stripe() noexcept {
- return *_stripes[0];
-}
-
-// TODO STRIPE remove
-const DistributorStripe&
-Distributor::first_stripe() const noexcept {
- return *_stripes[0];
+DistributorMetricSet&
+Distributor::getMetrics()
+{
+ return _use_legacy_mode ? *_metrics : _total_metrics->bucket_db_updater_metrics();
}
// TODO STRIPE figure out how to handle inspection functions used by tests when legacy mode no longer exists.
@@ -320,6 +330,7 @@ namespace {
bool should_be_handled_by_top_level_bucket_db_updater(const api::StorageMessage& msg) noexcept {
switch (msg.getType().getId()) {
case api::MessageType::SETSYSTEMSTATE_ID:
+ case api::MessageType::GETNODESTATE_ID:
case api::MessageType::ACTIVATE_CLUSTER_STATE_VERSION_ID:
return true;
case api::MessageType::REQUESTBUCKETINFO_REPLY_ID:
@@ -342,15 +353,7 @@ get_bucket_id_for_striping(const api::StorageMessage& msg, const DistributorNode
case api::MessageType::REMOVE_ID:
return node_ctx.bucket_id_factory().getBucketId(dynamic_cast<const api::TestAndSetCommand&>(msg).getDocumentId());
case api::MessageType::REQUESTBUCKETINFO_REPLY_ID:
- {
- const auto& reply = dynamic_cast<const api::RequestBucketInfoReply&>(msg);
- if (!reply.getBucketInfo().empty()) {
- // Note: All bucket ids in this reply belong to the same distributor stripe, so we just use the first entry.
- return reply.getBucketInfo()[0]._bucketId;
- } else {
- return reply.getBucketId();
- }
- }
+ return dynamic_cast<const api::RequestBucketInfoReply&>(msg).super_bucket_id();
case api::MessageType::GET_ID:
return node_ctx.bucket_id_factory().getBucketId(dynamic_cast<const api::GetCommand&>(msg).getDocumentId());
case api::MessageType::VISITOR_CREATE_ID:
@@ -364,16 +367,31 @@ get_bucket_id_for_striping(const api::StorageMessage& msg, const DistributorNode
return msg.getBucketId();
}
+}
+
uint32_t
-stripe_of_bucket_id(const document::BucketId& bucketd_id, uint8_t n_stripe_bits)
+Distributor::random_stripe_idx()
{
- if (!bucketd_id.isSet()) {
- // TODO STRIPE: Messages with a non-set bucket id should be handled by the top-level distributor instead.
- return 0;
- }
- return storage::stripe_of_bucket_key(bucketd_id.toKey(), n_stripe_bits);
+ std::lock_guard lock(_random_stripe_gen_mutex);
+ return _random_stripe_gen.nextUint32() % _stripes.size();
}
+uint32_t
+Distributor::stripe_of_bucket_id(const document::BucketId& bucket_id, const api::StorageMessage& msg)
+{
+ if (!bucket_id.isSet()) {
+ LOG(error, "Message (%s) has a bucket id (%s) that is not set. Cannot route to stripe",
+ msg.toString(true).c_str(), bucket_id.toString().c_str());
+ }
+ assert(bucket_id.isSet());
+ if (bucket_id.getUsedBits() < spi::BucketLimits::MinUsedBits) {
+ if (msg.getType().getId() == api::MessageType::VISITOR_CREATE_ID) {
+ // This message will eventually be bounced with api::ReturnCode::WRONG_DISTRIBUTION,
+ // so we can just route it to a random distributor stripe.
+ return random_stripe_idx();
+ }
+ }
+ return storage::stripe_of_bucket_key(bucket_id.toKey(), _n_stripe_bits);
}
bool
@@ -389,7 +407,7 @@ Distributor::onDown(const std::shared_ptr<api::StorageMessage>& msg)
return true;
}
auto bucket_id = get_bucket_id_for_striping(*msg, _component);
- uint32_t stripe_idx = stripe_of_bucket_id(bucket_id, _n_stripe_bits);
+ uint32_t stripe_idx = stripe_of_bucket_id(bucket_id, *msg);
MBUS_TRACE(msg->getTrace(), 9,
vespalib::make_string("Distributor::onDown(): Dispatch message to stripe %u", stripe_idx));
bool handled = _stripes[stripe_idx]->handle_or_enqueue_message(msg);
@@ -506,44 +524,55 @@ Distributor::propagateDefaultDistribution(
std::unordered_map<uint16_t, uint32_t>
Distributor::getMinReplica() const
{
- // TODO STRIPE merged snapshot from all stripes
if (_use_legacy_mode) {
return _stripe->getMinReplica();
} else {
- return first_stripe().getMinReplica();
+ std::unordered_map<uint16_t, uint32_t> result;
+ for (const auto& stripe : _stripes) {
+ merge_min_replica_stats(result, stripe->getMinReplica());
+ }
+ return result;
}
}
BucketSpacesStatsProvider::PerNodeBucketSpacesStats
Distributor::getBucketSpacesStats() const
{
- // TODO STRIPE merged snapshot from all stripes
if (_use_legacy_mode) {
return _stripe->getBucketSpacesStats();
} else {
- return first_stripe().getBucketSpacesStats();
+ BucketSpacesStatsProvider::PerNodeBucketSpacesStats result;
+ for (const auto& stripe : _stripes) {
+ merge_per_node_bucket_spaces_stats(result, stripe->getBucketSpacesStats());
+ }
+ return result;
}
}
SimpleMaintenanceScanner::PendingMaintenanceStats
Distributor::pending_maintenance_stats() const {
- // TODO STRIPE merged snapshot from all stripes
if (_use_legacy_mode) {
return _stripe->pending_maintenance_stats();
} else {
- return first_stripe().pending_maintenance_stats();
+ SimpleMaintenanceScanner::PendingMaintenanceStats result;
+ for (const auto& stripe : _stripes) {
+ result.merge(stripe->pending_maintenance_stats());
+ }
+ return result;
}
}
void
Distributor::propagateInternalScanMetricsToExternal()
{
- // TODO STRIPE propagate to all stripes
- // TODO STRIPE reconsider metric wiring...
if (_use_legacy_mode) {
_stripe->propagateInternalScanMetricsToExternal();
} else {
- first_stripe().propagateInternalScanMetricsToExternal();
+ for (auto &stripe : _stripes) {
+ stripe->propagateInternalScanMetricsToExternal();
+ }
+ _total_metrics->aggregate();
+ _ideal_state_total_metrics->aggregate();
}
}
@@ -739,13 +768,8 @@ Distributor::getReportContentType(const framework::HttpUrlPath& path) const
std::string
Distributor::getActiveIdealStateOperations() const
{
- // TODO STRIPE need to aggregate status responses _across_ stripes..!
- if (_use_legacy_mode) {
- return _stripe->getActiveIdealStateOperations();
- } else {
- auto guard = _stripe_accessor->rendezvous_and_hold_all();
- return first_stripe().getActiveIdealStateOperations();
- }
+ assert(_use_legacy_mode);
+ return _stripe->getActiveIdealStateOperations();
}
bool
diff --git a/storage/src/vespa/storage/distributor/distributor.h b/storage/src/vespa/storage/distributor/distributor.h
index 61a1f06309d..41d88f5dba1 100644
--- a/storage/src/vespa/storage/distributor/distributor.h
+++ b/storage/src/vespa/storage/distributor/distributor.h
@@ -9,6 +9,7 @@
#include "distributor_interface.h"
#include "distributor_stripe_interface.h"
#include "externaloperationhandler.h"
+#include "ideal_state_total_metrics.h"
#include "idealstatemanager.h"
#include "min_replica_provider.h"
#include "pendingmessagetracker.h"
@@ -24,6 +25,7 @@
#include <vespa/storageapi/message/state.h>
#include <vespa/storageframework/generic/metric/metricupdatehook.h>
#include <vespa/storageframework/generic/thread/tickingthread.h>
+#include <vespa/vdslib/state/random.h>
#include <chrono>
#include <queue>
#include <unordered_map>
@@ -43,6 +45,7 @@ class DistributorBucketSpaceRepo;
class DistributorStatus;
class DistributorStripe;
class DistributorStripePool;
+class DistributorTotalMetrics;
class StripeAccessor;
class OperationSequencer;
class OwnershipTransferSafeTimePointCalculator;
@@ -77,7 +80,7 @@ public:
void sendUp(const std::shared_ptr<api::StorageMessage>&) override;
void sendDown(const std::shared_ptr<api::StorageMessage>&) override;
- DistributorMetricSet& getMetrics() { return *_metrics; }
+ DistributorMetricSet& getMetrics();
// Implements DistributorInterface and DistributorMessageSender.
DistributorMetricSet& metrics() override { return getMetrics(); }
@@ -122,14 +125,10 @@ public:
};
private:
- friend struct DistributorTest;
- friend class BucketDBUpdaterTest;
friend class DistributorTestUtil;
+ friend class LegacyBucketDBUpdaterTest;
friend class MetricUpdateHook;
-
- // TODO STRIPE remove
- DistributorStripe& first_stripe() noexcept;
- const DistributorStripe& first_stripe() const noexcept;
+ friend struct LegacyDistributorTest;
void setNodeStateUp();
bool handleMessage(const std::shared_ptr<api::StorageMessage>& msg);
@@ -189,6 +188,9 @@ private:
// Precondition: _stripe_scan_notify_mutex is held
[[nodiscard]] bool may_send_host_info_on_behalf_of_stripes(std::lock_guard<std::mutex>& held_lock) noexcept;
+ uint32_t random_stripe_idx();
+ uint32_t stripe_of_bucket_id(const document::BucketId& bucket_id, const api::StorageMessage& msg);
+
struct StripeScanStats {
bool wants_to_send_host_info = false;
bool has_reported_in_at_least_once = false;
@@ -197,18 +199,24 @@ private:
using MessageQueue = std::vector<std::shared_ptr<api::StorageMessage>>;
DistributorComponentRegister& _comp_reg;
+ const bool _use_legacy_mode;
std::shared_ptr<DistributorMetricSet> _metrics;
+ std::shared_ptr<DistributorTotalMetrics> _total_metrics;
+ std::shared_ptr<IdealStateMetricSet> _ideal_state_metrics;
+ std::shared_ptr<IdealStateTotalMetrics> _ideal_state_total_metrics;
ChainedMessageSender* _messageSender;
- const bool _use_legacy_mode;
// TODO STRIPE multiple stripes...! This is for proof of concept of wiring.
uint8_t _n_stripe_bits;
std::unique_ptr<DistributorStripe> _stripe;
DistributorStripePool& _stripe_pool;
std::vector<std::unique_ptr<DistributorStripe>> _stripes;
std::unique_ptr<StripeAccessor> _stripe_accessor;
+ storage::lib::RandomGen _random_stripe_gen;
+ std::mutex _random_stripe_gen_mutex;
MessageQueue _message_queue; // Queue for top-level ops
MessageQueue _fetched_messages;
distributor::DistributorComponent _component;
+ storage::DistributorComponent _ideal_state_component;
std::shared_ptr<const DistributorConfiguration> _total_config;
std::unique_ptr<BucketDBUpdater> _bucket_db_updater;
StatusReporterDelegate _distributorStatusDelegate;
diff --git a/storage/src/vespa/storage/distributor/distributor_operation_context.h b/storage/src/vespa/storage/distributor/distributor_operation_context.h
index aa598835cdb..e0d481a322a 100644
--- a/storage/src/vespa/storage/distributor/distributor_operation_context.h
+++ b/storage/src/vespa/storage/distributor/distributor_operation_context.h
@@ -20,7 +20,7 @@ public:
virtual ~DistributorOperationContext() {}
virtual api::Timestamp generate_unique_timestamp() = 0;
// TODO STRIPE: Access to bucket space repos is only temporary at this level.
- virtual const DistributorBucketSpaceRepo& bucket_space_repo() const noexcept= 0;
+ virtual const DistributorBucketSpaceRepo& bucket_space_repo() const noexcept = 0;
virtual DistributorBucketSpaceRepo& bucket_space_repo() noexcept = 0;
virtual const DistributorBucketSpaceRepo& read_only_bucket_space_repo() const noexcept = 0;
virtual DistributorBucketSpaceRepo& read_only_bucket_space_repo() noexcept = 0;
diff --git a/storage/src/vespa/storage/distributor/distributor_stripe.cpp b/storage/src/vespa/storage/distributor/distributor_stripe.cpp
index bf78707cfd9..837193a1e7c 100644
--- a/storage/src/vespa/storage/distributor/distributor_stripe.cpp
+++ b/storage/src/vespa/storage/distributor/distributor_stripe.cpp
@@ -36,6 +36,7 @@ namespace storage::distributor {
*/
DistributorStripe::DistributorStripe(DistributorComponentRegister& compReg,
DistributorMetricSet& metrics,
+ IdealStateMetricSet& ideal_state_metrics,
const NodeIdentity& node_identity,
framework::TickingThreadPool& threadPool,
DoneInitializeHandler& doneInitHandler,
@@ -58,7 +59,7 @@ DistributorStripe::DistributorStripe(DistributorComponentRegister& compReg,
_bucketDBUpdater(_component, _component, *this, *this, use_legacy_mode),
_distributorStatusDelegate(compReg, *this, *this),
_bucketDBStatusDelegate(compReg, *this, _bucketDBUpdater),
- _idealStateManager(*this, *_bucketSpaceRepo, *_readOnlyBucketSpaceRepo, compReg, stripe_index),
+ _idealStateManager(_component, _component, ideal_state_metrics),
_messageSender(messageSender),
_stripe_host_info_notifier(stripe_host_info_notifier),
_externalOperationHandler(_component, _component, getMetrics(), getMessageSender(),
@@ -71,7 +72,7 @@ DistributorStripe::DistributorStripe(DistributorComponentRegister& compReg,
_bucketPriorityDb(std::make_unique<SimpleBucketPriorityDatabase>()),
_scanner(std::make_unique<SimpleMaintenanceScanner>(*_bucketPriorityDb, _idealStateManager, *_bucketSpaceRepo)),
_throttlingStarter(std::make_unique<ThrottlingOperationStarter>(_maintenanceOperationOwner)),
- _blockingStarter(std::make_unique<BlockingOperationStarter>(_pendingMessageTracker, *_operation_sequencer,
+ _blockingStarter(std::make_unique<BlockingOperationStarter>(_component, *_operation_sequencer,
*_throttlingStarter)),
_scheduler(std::make_unique<MaintenanceScheduler>(_idealStateManager, *_bucketPriorityDb, *_blockingStarter)),
_schedulingMode(MaintenanceScheduler::NORMAL_SCHEDULING_MODE),
@@ -748,7 +749,7 @@ void DistributorStripe::send_updated_host_info_if_required() {
if (_use_legacy_mode) {
_component.getStateUpdater().immediately_send_get_node_state_replies();
} else {
- _stripe_host_info_notifier.notify_stripe_wants_to_send_host_info(0); // TODO STRIPE correct stripe index!
+ _stripe_host_info_notifier.notify_stripe_wants_to_send_host_info(_stripe_index);
}
_must_send_updated_host_info = false;
}
diff --git a/storage/src/vespa/storage/distributor/distributor_stripe.h b/storage/src/vespa/storage/distributor/distributor_stripe.h
index 347863b6d77..8f3de38aec7 100644
--- a/storage/src/vespa/storage/distributor/distributor_stripe.h
+++ b/storage/src/vespa/storage/distributor/distributor_stripe.h
@@ -59,6 +59,7 @@ class DistributorStripe final
public:
DistributorStripe(DistributorComponentRegister&,
DistributorMetricSet& metrics,
+ IdealStateMetricSet& ideal_state_metrics,
const NodeIdentity& node_identity,
framework::TickingThreadPool&,
DoneInitializeHandler&,
@@ -193,13 +194,13 @@ public:
bool tick() override;
private:
- // TODO reduce number of friends. DistributorStripe too popular for its own good.
- friend struct DistributorTest;
- friend class BucketDBUpdaterTest;
+ // TODO STRIPE: reduce number of friends. DistributorStripe too popular for its own good.
+ friend class Distributor;
friend class DistributorTestUtil;
+ friend class LegacyBucketDBUpdaterTest;
friend class MetricUpdateHook;
- friend class Distributor;
friend class MultiThreadedStripeAccessGuard;
+ friend struct LegacyDistributorTest;
bool handleMessage(const std::shared_ptr<api::StorageMessage>& msg);
bool isMaintenanceReply(const api::StorageReply& reply) const;
diff --git a/storage/src/vespa/storage/distributor/distributor_stripe_component.h b/storage/src/vespa/storage/distributor/distributor_stripe_component.h
index 31ee9ca88d2..e47d73cc4df 100644
--- a/storage/src/vespa/storage/distributor/distributor_stripe_component.h
+++ b/storage/src/vespa/storage/distributor/distributor_stripe_component.h
@@ -140,6 +140,9 @@ public:
PendingMessageTracker& pending_message_tracker() noexcept override {
return getDistributor().getPendingMessageTracker();
}
+ const PendingMessageTracker& pending_message_tracker() const noexcept override {
+ return getDistributor().getPendingMessageTracker();
+ }
bool has_pending_message(uint16_t node_index,
const document::Bucket& bucket,
uint32_t message_type) const override;
diff --git a/storage/src/vespa/storage/distributor/distributor_stripe_interface.h b/storage/src/vespa/storage/distributor/distributor_stripe_interface.h
index bd9a4e1de57..24db212c120 100644
--- a/storage/src/vespa/storage/distributor/distributor_stripe_interface.h
+++ b/storage/src/vespa/storage/distributor/distributor_stripe_interface.h
@@ -24,7 +24,6 @@ class PendingMessageTracker;
class DistributorStripeInterface : public DistributorStripeMessageSender
{
public:
- virtual PendingMessageTracker& getPendingMessageTracker() = 0;
virtual DistributorMetricSet& getMetrics() = 0;
virtual void enableClusterStateBundle(const lib::ClusterStateBundle& state) = 0;
virtual const lib::ClusterState* pendingClusterStateOrNull(const document::BucketSpace&) const = 0;
diff --git a/storage/src/vespa/storage/distributor/distributor_stripe_operation_context.h b/storage/src/vespa/storage/distributor/distributor_stripe_operation_context.h
index 8419abeadaa..518c83d7ffa 100644
--- a/storage/src/vespa/storage/distributor/distributor_stripe_operation_context.h
+++ b/storage/src/vespa/storage/distributor/distributor_stripe_operation_context.h
@@ -22,7 +22,7 @@ class PendingMessageTracker;
*/
class DistributorStripeOperationContext : public DistributorOperationContext {
public:
- virtual ~DistributorStripeOperationContext() {}
+ virtual ~DistributorStripeOperationContext() = default;
virtual void update_bucket_database(const document::Bucket& bucket,
const BucketCopy& changed_node,
uint32_t update_flags = 0) = 0;
@@ -41,6 +41,7 @@ public:
uint8_t pri) = 0;
virtual OperationRoutingSnapshot read_snapshot_for_bucket(const document::Bucket& bucket) const = 0;
virtual PendingMessageTracker& pending_message_tracker() noexcept = 0;
+ virtual const PendingMessageTracker& pending_message_tracker() const noexcept = 0;
virtual bool has_pending_message(uint16_t node_index,
const document::Bucket& bucket,
uint32_t message_type) const = 0;
diff --git a/storage/src/vespa/storage/distributor/distributor_total_metrics.cpp b/storage/src/vespa/storage/distributor/distributor_total_metrics.cpp
new file mode 100644
index 00000000000..510b1df2ff3
--- /dev/null
+++ b/storage/src/vespa/storage/distributor/distributor_total_metrics.cpp
@@ -0,0 +1,54 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "distributor_total_metrics.h"
+
+namespace storage::distributor {
+
+DistributorTotalMetrics::DistributorTotalMetrics(uint32_t num_distributor_stripes)
+ : DistributorMetricSet(),
+ _stripes_metrics(),
+ _bucket_db_updater_metrics()
+{
+ _stripes_metrics.reserve(num_distributor_stripes);
+ for (uint32_t i = 0; i < num_distributor_stripes; ++i) {
+ _stripes_metrics.emplace_back(std::make_shared<DistributorMetricSet>());
+ }
+}
+
+DistributorTotalMetrics::~DistributorTotalMetrics() = default;
+
+void
+DistributorTotalMetrics::aggregate_helper(DistributorMetricSet &total) const
+{
+ _bucket_db_updater_metrics.addToPart(total);
+ for (auto &stripe_metrics : _stripes_metrics) {
+ stripe_metrics->addToPart(total);
+ }
+}
+
+void
+DistributorTotalMetrics::aggregate()
+{
+ DistributorMetricSet::reset();
+ aggregate_helper(*this);
+}
+
+void
+DistributorTotalMetrics::addToSnapshot(Metric& m, std::vector<Metric::UP> &ownerList) const
+{
+ DistributorMetricSet total;
+ aggregate_helper(total);
+ total.addToSnapshot(m, ownerList);
+}
+
+void
+DistributorTotalMetrics::reset()
+{
+ DistributorMetricSet::reset();
+ _bucket_db_updater_metrics.reset();
+ for (auto &stripe_metrics : _stripes_metrics) {
+ stripe_metrics->reset();
+ }
+}
+
+}
diff --git a/storage/src/vespa/storage/distributor/distributor_total_metrics.h b/storage/src/vespa/storage/distributor/distributor_total_metrics.h
new file mode 100644
index 00000000000..f0457fe64c3
--- /dev/null
+++ b/storage/src/vespa/storage/distributor/distributor_total_metrics.h
@@ -0,0 +1,29 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "distributormetricsset.h"
+
+namespace storage::distributor {
+
+/*
+ * Class presenting total metrics (as a DistributorMetricSet) to the
+ * metric framework, while managing a DistributorMetricSet for each
+ * stripe and an extra one for the top level bucket db updater.
+ */
+class DistributorTotalMetrics : public DistributorMetricSet
+{
+ std::vector<std::shared_ptr<DistributorMetricSet>> _stripes_metrics;
+ DistributorMetricSet _bucket_db_updater_metrics;
+ void aggregate_helper(DistributorMetricSet &total) const;
+public:
+ explicit DistributorTotalMetrics(uint32_t num_distributor_stripes);
+ ~DistributorTotalMetrics() override;
+ void aggregate();
+ void addToSnapshot(Metric& m, std::vector<Metric::UP> &ownerList) const override;
+ void reset() override;
+ DistributorMetricSet& stripe(uint32_t stripe_index) { return *_stripes_metrics[stripe_index]; }
+ DistributorMetricSet& bucket_db_updater_metrics() { return _bucket_db_updater_metrics; }
+};
+
+}
diff --git a/storage/src/vespa/storage/distributor/distributormessagesender.h b/storage/src/vespa/storage/distributor/distributormessagesender.h
index c39e3e8fe8a..c5a164ed036 100644
--- a/storage/src/vespa/storage/distributor/distributormessagesender.h
+++ b/storage/src/vespa/storage/distributor/distributormessagesender.h
@@ -26,6 +26,7 @@ public:
class DistributorStripeMessageSender : public DistributorMessageSender {
public:
+ virtual PendingMessageTracker& getPendingMessageTracker() = 0;
virtual const PendingMessageTracker& getPendingMessageTracker() const = 0;
virtual const OperationSequencer& operation_sequencer() const noexcept = 0;
};
diff --git a/storage/src/vespa/storage/distributor/externaloperationhandler.cpp b/storage/src/vespa/storage/distributor/externaloperationhandler.cpp
index e703c5bfdb8..2bc779aa47e 100644
--- a/storage/src/vespa/storage/distributor/externaloperationhandler.cpp
+++ b/storage/src/vespa/storage/distributor/externaloperationhandler.cpp
@@ -54,6 +54,9 @@ public:
const ClusterContext & cluster_context() const override {
return _node_ctx;
}
+ PendingMessageTracker& getPendingMessageTracker() override {
+ abort(); // Never called by the messages using this component.
+ }
const PendingMessageTracker& getPendingMessageTracker() const override {
abort(); // Never called by the messages using this component.
}
diff --git a/storage/src/vespa/storage/distributor/ideal_state_total_metrics.cpp b/storage/src/vespa/storage/distributor/ideal_state_total_metrics.cpp
new file mode 100644
index 00000000000..65dcad468fc
--- /dev/null
+++ b/storage/src/vespa/storage/distributor/ideal_state_total_metrics.cpp
@@ -0,0 +1,51 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "ideal_state_total_metrics.h"
+
+namespace storage::distributor {
+
+void
+IdealStateTotalMetrics::aggregate_helper(IdealStateMetricSet& total) const
+{
+ for (auto& stripe_metrics : _stripes_metrics) {
+ stripe_metrics->addToPart(total);
+ }
+}
+
+IdealStateTotalMetrics::IdealStateTotalMetrics(uint32_t num_distributor_stripes)
+ : IdealStateMetricSet(),
+ _stripes_metrics()
+{
+ _stripes_metrics.reserve(num_distributor_stripes);
+ for (uint32_t i = 0; i < num_distributor_stripes; ++i) {
+ _stripes_metrics.emplace_back(std::make_shared<IdealStateMetricSet>());
+ }
+}
+
+IdealStateTotalMetrics::~IdealStateTotalMetrics() = default;
+
+void
+IdealStateTotalMetrics::aggregate()
+{
+ IdealStateMetricSet::reset();
+ aggregate_helper(*this);
+}
+
+void
+IdealStateTotalMetrics::addToSnapshot(Metric& m, std::vector<Metric::UP>& owner_list) const
+{
+ IdealStateMetricSet total;
+ aggregate_helper(total);
+ total.addToSnapshot(m, owner_list);
+}
+
+void
+IdealStateTotalMetrics::reset()
+{
+ IdealStateMetricSet::reset();
+ for (auto& stripe_metrics : _stripes_metrics) {
+ stripe_metrics->reset();
+ }
+}
+
+}
diff --git a/storage/src/vespa/storage/distributor/ideal_state_total_metrics.h b/storage/src/vespa/storage/distributor/ideal_state_total_metrics.h
new file mode 100644
index 00000000000..c3207baa2f0
--- /dev/null
+++ b/storage/src/vespa/storage/distributor/ideal_state_total_metrics.h
@@ -0,0 +1,28 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "idealstatemetricsset.h"
+
+namespace storage::distributor {
+
+/*
+ * Class presenting total metrics (as an IdealStateMetricSet) to the metric framework,
+ * while managing an IdealStateMetricSet for each distributor stripe.
+ */
+class IdealStateTotalMetrics : public IdealStateMetricSet {
+private:
+ std::vector<std::shared_ptr<IdealStateMetricSet>> _stripes_metrics;
+
+ void aggregate_helper(IdealStateMetricSet& total) const;
+
+public:
+ explicit IdealStateTotalMetrics(uint32_t num_distributor_stripes);
+ ~IdealStateTotalMetrics() override;
+ void aggregate();
+ void addToSnapshot(Metric& m, std::vector<Metric::UP>& owner_list) const override;
+ void reset() override;
+ IdealStateMetricSet& stripe(uint32_t stripe_index) { return *_stripes_metrics[stripe_index]; }
+};
+
+}
diff --git a/storage/src/vespa/storage/distributor/idealstatemanager.cpp b/storage/src/vespa/storage/distributor/idealstatemanager.cpp
index 013551b8505..65e018765fe 100644
--- a/storage/src/vespa/storage/distributor/idealstatemanager.cpp
+++ b/storage/src/vespa/storage/distributor/idealstatemanager.cpp
@@ -25,21 +25,14 @@ namespace storage {
namespace distributor {
IdealStateManager::IdealStateManager(
- DistributorStripeInterface& owner,
- DistributorBucketSpaceRepo& bucketSpaceRepo,
- DistributorBucketSpaceRepo& readOnlyBucketSpaceRepo,
- DistributorComponentRegister& compReg,
- uint32_t stripe_index)
- : _metrics(new IdealStateMetricSet),
- _distributorComponent(owner, bucketSpaceRepo, readOnlyBucketSpaceRepo, compReg, "Ideal state manager"),
- _bucketSpaceRepo(bucketSpaceRepo),
+ const DistributorNodeContext& node_ctx,
+ DistributorStripeOperationContext& op_ctx,
+ IdealStateMetricSet& metrics)
+ : _metrics(metrics),
+ _node_ctx(node_ctx),
+ _op_ctx(op_ctx),
_has_logged_phantom_replica_warning(false)
{
- if (stripe_index == 0) {
- // TODO STRIPE: Add proper handling of metrics across distributor stripes
- _distributorComponent.registerMetric(*_metrics);
- }
-
LOG(debug, "Adding BucketStateStateChecker to state checkers");
_stateCheckers.push_back(StateChecker::SP(new BucketStateStateChecker()));
@@ -167,7 +160,7 @@ IdealStateManager::generateHighestPriority(
const document::Bucket &bucket,
NodeMaintenanceStatsTracker& statsTracker) const
{
- auto &distributorBucketSpace(_bucketSpaceRepo.get(bucket.getBucketSpace()));
+ auto& distributorBucketSpace = _op_ctx.bucket_space_repo().get(bucket.getBucketSpace());
StateChecker::Context c(node_context(), operation_context(), distributorBucketSpace, statsTracker, bucket);
fillParentAndChildBuckets(c);
fillSiblingBucket(c);
@@ -204,7 +197,7 @@ IdealStateManager::generateInterceptingSplit(BucketSpace bucketSpace,
{
NodeMaintenanceStatsTracker statsTracker;
document::Bucket bucket(bucketSpace, e.getBucketId());
- auto &distributorBucketSpace(_bucketSpaceRepo.get(bucket.getBucketSpace()));
+ auto& distributorBucketSpace = _op_ctx.bucket_space_repo().get(bucket.getBucketSpace());
StateChecker::Context c(node_context(), operation_context(), distributorBucketSpace, statsTracker, bucket);
if (e.valid()) {
c.entry = e;
@@ -239,7 +232,7 @@ std::vector<MaintenanceOperation::SP>
IdealStateManager::generateAll(const document::Bucket &bucket,
NodeMaintenanceStatsTracker& statsTracker) const
{
- auto &distributorBucketSpace(_bucketSpaceRepo.get(bucket.getBucketSpace()));
+ auto& distributorBucketSpace = _op_ctx.bucket_space_repo().get(bucket.getBucketSpace());
StateChecker::Context c(node_context(), operation_context(), distributorBucketSpace, statsTracker, bucket);
fillParentAndChildBuckets(c);
fillSiblingBucket(c);
@@ -291,7 +284,7 @@ IdealStateManager::getBucketStatus(
void IdealStateManager::dump_bucket_space_db_status(document::BucketSpace bucket_space, std::ostream& out) const {
StatusBucketVisitor proc(*this, bucket_space, out);
- auto &distributorBucketSpace(_bucketSpaceRepo.get(bucket_space));
+ auto& distributorBucketSpace = _op_ctx.bucket_space_repo().get(bucket_space);
distributorBucketSpace.getBucketDatabase().forEach(proc);
}
@@ -299,7 +292,7 @@ void IdealStateManager::getBucketStatus(std::ostream& out) const {
LOG(debug, "Dumping bucket database valid at cluster state version %u",
operation_context().cluster_state_bundle().getVersion());
- for (auto& space : _bucketSpaceRepo) {
+ for (auto& space : _op_ctx.bucket_space_repo()) {
out << "<h2>" << document::FixedBucketSpaces::to_string(space.first) << " - " << space.first << "</h2>\n";
dump_bucket_space_db_status(space.first, out);
}
diff --git a/storage/src/vespa/storage/distributor/idealstatemanager.h b/storage/src/vespa/storage/distributor/idealstatemanager.h
index 041e009ee9f..c0fa7dd70ab 100644
--- a/storage/src/vespa/storage/distributor/idealstatemanager.h
+++ b/storage/src/vespa/storage/distributor/idealstatemanager.h
@@ -33,11 +33,9 @@ class IdealStateManager : public MaintenancePriorityGenerator,
{
public:
- IdealStateManager(DistributorStripeInterface& owner,
- DistributorBucketSpaceRepo& bucketSpaceRepo,
- DistributorBucketSpaceRepo& readOnlyBucketSpaceRepo,
- DistributorComponentRegister& compReg,
- uint32_t stripe_index = 0);
+ IdealStateManager(const DistributorNodeContext& node_ctx,
+ DistributorStripeOperationContext& op_ctx,
+ IdealStateMetricSet& metrics);
~IdealStateManager() override;
@@ -66,18 +64,18 @@ public:
const BucketDatabase::Entry& e,
api::StorageMessage::Priority pri);
- IdealStateMetricSet& getMetrics() { return *_metrics; }
+ IdealStateMetricSet& getMetrics() { return _metrics; }
void dump_bucket_space_db_status(document::BucketSpace bucket_space, std::ostream& out) const;
void getBucketStatus(std::ostream& out) const;
- const DistributorNodeContext& node_context() const { return _distributorComponent; }
- DistributorStripeOperationContext& operation_context() { return _distributorComponent; }
- const DistributorStripeOperationContext& operation_context() const { return _distributorComponent; }
- DistributorBucketSpaceRepo &getBucketSpaceRepo() { return _bucketSpaceRepo; }
- const DistributorBucketSpaceRepo &getBucketSpaceRepo() const { return _bucketSpaceRepo; }
+ const DistributorNodeContext& node_context() const { return _node_ctx; }
+ DistributorStripeOperationContext& operation_context() { return _op_ctx; }
+ const DistributorStripeOperationContext& operation_context() const { return _op_ctx; }
+ DistributorBucketSpaceRepo &getBucketSpaceRepo() { return _op_ctx.bucket_space_repo(); }
+ const DistributorBucketSpaceRepo &getBucketSpaceRepo() const { return _op_ctx.bucket_space_repo(); }
private:
void verify_only_live_nodes_in_context(const StateChecker::Context& c) const;
@@ -90,7 +88,7 @@ private:
BucketDatabase::Entry* getEntryForPrimaryBucket(StateChecker::Context& c) const;
- std::shared_ptr<IdealStateMetricSet> _metrics;
+ IdealStateMetricSet& _metrics;
document::BucketId _lastPrioritizedBucket;
// Prioritized of state checkers that generate operations
@@ -98,8 +96,8 @@ private:
std::vector<StateChecker::SP> _stateCheckers;
SplitBucketStateChecker* _splitBucketStateChecker;
- DistributorStripeComponent _distributorComponent;
- DistributorBucketSpaceRepo& _bucketSpaceRepo;
+ const DistributorNodeContext& _node_ctx;
+ DistributorStripeOperationContext& _op_ctx;
mutable bool _has_logged_phantom_replica_warning;
bool iAmUp() const;
diff --git a/storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.cpp b/storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.cpp
index b954ef93c76..4e7f7d9d89d 100644
--- a/storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.cpp
+++ b/storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.cpp
@@ -7,6 +7,39 @@ namespace storage::distributor {
const NodeMaintenanceStats NodeMaintenanceStatsTracker::_emptyNodeMaintenanceStats;
+void
+NodeMaintenanceStats::merge(const NodeMaintenanceStats& rhs)
+{
+ movingOut += rhs.movingOut;
+ syncing += rhs.syncing;
+ copyingIn += rhs.copyingIn;
+ copyingOut += rhs.copyingOut;
+ total += rhs.total;
+}
+
+namespace {
+
+void
+merge_bucket_spaces_stats(NodeMaintenanceStatsTracker::BucketSpacesStats& dest,
+ const NodeMaintenanceStatsTracker::BucketSpacesStats& src)
+{
+ for (const auto& entry : src) {
+ auto bucket_space = entry.first;
+ dest[bucket_space].merge(entry.second);
+ }
+}
+
+}
+
+void
+NodeMaintenanceStatsTracker::merge(const NodeMaintenanceStatsTracker& rhs)
+{
+ for (const auto& entry : rhs._stats) {
+ auto node_index = entry.first;
+ merge_bucket_spaces_stats(_stats[node_index], entry.second);
+ }
+}
+
std::ostream&
operator<<(std::ostream& os, const NodeMaintenanceStats& stats)
{
diff --git a/storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.h b/storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.h
index faf253fc84c..6399e53089b 100644
--- a/storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.h
+++ b/storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.h
@@ -37,6 +37,8 @@ struct NodeMaintenanceStats
bool operator!=(const NodeMaintenanceStats& other) const noexcept {
return !(*this == other);
}
+
+ void merge(const NodeMaintenanceStats& rhs);
};
std::ostream& operator<<(std::ostream&, const NodeMaintenanceStats&);
@@ -93,6 +95,11 @@ public:
const PerNodeStats& perNodeStats() const {
return _stats;
}
+
+ bool operator==(const NodeMaintenanceStatsTracker& rhs) const {
+ return _stats == rhs._stats;
+ }
+ void merge(const NodeMaintenanceStatsTracker& rhs);
};
} // storage::distributor
diff --git a/storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.cpp b/storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.cpp
index 15a57c1e7ee..2bfce9569cc 100644
--- a/storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.cpp
+++ b/storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.cpp
@@ -19,6 +19,28 @@ SimpleMaintenanceScanner::SimpleMaintenanceScanner(BucketPriorityDatabase& bucke
SimpleMaintenanceScanner::~SimpleMaintenanceScanner() = default;
+bool
+SimpleMaintenanceScanner::GlobalMaintenanceStats::operator==(const GlobalMaintenanceStats& rhs) const
+{
+ return pending == rhs.pending;
+}
+
+void
+SimpleMaintenanceScanner::GlobalMaintenanceStats::merge(const GlobalMaintenanceStats& rhs)
+{
+ assert(pending.size() == rhs.pending.size());
+ for (size_t i = 0; i < pending.size(); ++i) {
+ pending[i] += rhs.pending[i];
+ }
+}
+
+void
+SimpleMaintenanceScanner::PendingMaintenanceStats::merge(const PendingMaintenanceStats& rhs)
+{
+ global.merge(rhs.global);
+ perNodeStats.merge(rhs.perNodeStats);
+}
+
SimpleMaintenanceScanner::PendingMaintenanceStats::PendingMaintenanceStats() = default;
SimpleMaintenanceScanner::PendingMaintenanceStats::~PendingMaintenanceStats() = default;
SimpleMaintenanceScanner::PendingMaintenanceStats::PendingMaintenanceStats(const PendingMaintenanceStats &) = default;
diff --git a/storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.h b/storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.h
index 254b3244171..69e63fd4c65 100644
--- a/storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.h
+++ b/storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.h
@@ -18,6 +18,9 @@ public:
GlobalMaintenanceStats()
: pending(MaintenanceOperation::OPERATION_COUNT)
{ }
+
+ bool operator==(const GlobalMaintenanceStats& rhs) const;
+ void merge(const GlobalMaintenanceStats& rhs);
};
struct PendingMaintenanceStats {
PendingMaintenanceStats();
@@ -26,6 +29,8 @@ public:
~PendingMaintenanceStats();
GlobalMaintenanceStats global;
NodeMaintenanceStatsTracker perNodeStats;
+
+ void merge(const PendingMaintenanceStats& rhs);
};
private:
BucketPriorityDatabase& _bucketPriorityDb;
diff --git a/storage/src/vespa/storage/distributor/min_replica_provider.cpp b/storage/src/vespa/storage/distributor/min_replica_provider.cpp
new file mode 100644
index 00000000000..c9929940560
--- /dev/null
+++ b/storage/src/vespa/storage/distributor/min_replica_provider.cpp
@@ -0,0 +1,19 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "min_replica_provider.h"
+
+namespace storage::distributor {
+
+void
+merge_min_replica_stats(std::unordered_map<uint16_t, uint32_t>& dest,
+ const std::unordered_map<uint16_t, uint32_t>& src)
+{
+ for (const auto& entry : src) {
+ auto node_index = entry.first;
+ auto itr = dest.find(node_index);
+ auto new_min_replica = (itr != dest.end()) ? std::min(itr->second, entry.second) : entry.second;
+ dest[node_index] = new_min_replica;
+ }
+}
+
+}
diff --git a/storage/src/vespa/storage/distributor/min_replica_provider.h b/storage/src/vespa/storage/distributor/min_replica_provider.h
index 6d644f4e9d4..ba946cd5a7f 100644
--- a/storage/src/vespa/storage/distributor/min_replica_provider.h
+++ b/storage/src/vespa/storage/distributor/min_replica_provider.h
@@ -4,8 +4,7 @@
#include <stdint.h>
#include <unordered_map>
-namespace storage {
-namespace distributor {
+namespace storage::distributor {
class MinReplicaProvider
{
@@ -21,5 +20,8 @@ public:
virtual std::unordered_map<uint16_t, uint32_t> getMinReplica() const = 0;
};
-} // distributor
-} // storage
+void merge_min_replica_stats(std::unordered_map<uint16_t, uint32_t>& dest,
+ const std::unordered_map<uint16_t, uint32_t>& src);
+
+}
+
diff --git a/storage/src/vespa/storage/distributor/operationowner.h b/storage/src/vespa/storage/distributor/operationowner.h
index d3f46343ebc..c469b35a8dc 100644
--- a/storage/src/vespa/storage/distributor/operationowner.h
+++ b/storage/src/vespa/storage/distributor/operationowner.h
@@ -43,6 +43,10 @@ public:
return _sender.cluster_context();
}
+ PendingMessageTracker& getPendingMessageTracker() override {
+ return _sender.getPendingMessageTracker();
+ }
+
const PendingMessageTracker& getPendingMessageTracker() const override {
return _sender.getPendingMessageTracker();
}
diff --git a/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.cpp b/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.cpp
index 9077f3dc288..db30fcc7196 100644
--- a/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.cpp
@@ -86,6 +86,10 @@ struct IntermediateMessageSender : DistributorStripeMessageSender {
return forward.cluster_context();
}
+ PendingMessageTracker& getPendingMessageTracker() override {
+ return forward.getPendingMessageTracker();
+ }
+
const PendingMessageTracker& getPendingMessageTracker() const override {
return forward.getPendingMessageTracker();
}
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/idealstateoperation.cpp b/storage/src/vespa/storage/distributor/operations/idealstate/idealstateoperation.cpp
index 1a48df0fd7c..f11d1c26da2 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/idealstateoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/idealstateoperation.cpp
@@ -156,41 +156,26 @@ public:
}
};
-// TODO STRIPE replace with check for pending cluster state transition.
-// Null-bucket messages are not intercepted nor observeable by stripes,
-// only by the top-level distributor.
-bool
-checkNullBucketRequestBucketInfoMessage(uint16_t node,
- document::BucketSpace bucketSpace,
- const PendingMessageTracker& tracker)
-{
- RequestBucketInfoChecker rchk;
- // Check messages sent to null-bucket (i.e. any bucket) for the node.
- document::Bucket nullBucket(bucketSpace, document::BucketId());
- tracker.checkPendingMessages(node, nullBucket, rchk);
- return rchk.blocked;
-}
-
}
bool
IdealStateOperation::checkBlock(const document::Bucket &bucket,
- const PendingMessageTracker& tracker,
+ const DistributorStripeOperationContext& ctx,
const OperationSequencer& seq) const
{
if (seq.is_blocked(bucket)) {
return true;
}
+ if (ctx.pending_cluster_state_or_null(bucket.getBucketSpace())) {
+ return true;
+ }
IdealStateOpChecker ichk(*this);
const std::vector<uint16_t>& nodes(getNodes());
for (auto node : nodes) {
- tracker.checkPendingMessages(node, bucket, ichk);
+ ctx.pending_message_tracker().checkPendingMessages(node, bucket, ichk);
if (ichk.blocked) {
return true;
}
- if (checkNullBucketRequestBucketInfoMessage(node, bucket.getBucketSpace(), tracker)) {
- return true;
- }
}
return false;
}
@@ -198,32 +183,25 @@ IdealStateOperation::checkBlock(const document::Bucket &bucket,
bool
IdealStateOperation::checkBlockForAllNodes(
const document::Bucket &bucket,
- const PendingMessageTracker& tracker,
+ const DistributorStripeOperationContext& ctx,
const OperationSequencer& seq) const
{
if (seq.is_blocked(bucket)) {
return true;
}
- IdealStateOpChecker ichk(*this);
- // Check messages sent to _any node_ for _this_ particular bucket.
- tracker.checkPendingMessages(bucket, ichk);
- if (ichk.blocked) {
+ if (ctx.pending_cluster_state_or_null(bucket.getBucketSpace())) {
return true;
}
- const std::vector<uint16_t>& nodes(getNodes());
- for (auto node : nodes) {
- if (checkNullBucketRequestBucketInfoMessage(node, bucket.getBucketSpace(), tracker)) {
- return true;
- }
- }
- return false;
+ IdealStateOpChecker ichk(*this);
+ // Check messages sent to _any node_ for _this_ particular bucket.
+ ctx.pending_message_tracker().checkPendingMessages(bucket, ichk);
+ return ichk.blocked;
}
-
bool
-IdealStateOperation::isBlocked(const PendingMessageTracker& tracker, const OperationSequencer& op_seq) const
+IdealStateOperation::isBlocked(const DistributorStripeOperationContext& ctx, const OperationSequencer& op_seq) const
{
- return checkBlock(getBucket(), tracker, op_seq);
+ return checkBlock(getBucket(), ctx, op_seq);
}
std::string
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/idealstateoperation.h b/storage/src/vespa/storage/distributor/operations/idealstate/idealstateoperation.h
index 0e45d7f3b3a..d41640b468e 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/idealstateoperation.h
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/idealstateoperation.h
@@ -182,7 +182,7 @@ public:
* Returns true if we are blocked to start this operation given
* the pending messages.
*/
- bool isBlocked(const PendingMessageTracker& pendingMessages, const OperationSequencer&) const override;
+ bool isBlocked(const DistributorStripeOperationContext& ctx, const OperationSequencer&) const override;
/**
Returns the priority we should send messages with.
@@ -234,10 +234,10 @@ protected:
* the set of messages checked.
*/
bool checkBlock(const document::Bucket& bucket,
- const PendingMessageTracker& tracker,
+ const DistributorStripeOperationContext& ctx,
const OperationSequencer&) const;
bool checkBlockForAllNodes(const document::Bucket& bucket,
- const PendingMessageTracker& tracker,
+ const DistributorStripeOperationContext& ctx,
const OperationSequencer&) const;
};
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/joinoperation.cpp b/storage/src/vespa/storage/distributor/operations/idealstate/joinoperation.cpp
index d9e411bc44e..15d3129b309 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/joinoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/joinoperation.cpp
@@ -161,10 +161,10 @@ JoinOperation::getJoinBucket(size_t idx) const
}
bool
-JoinOperation::isBlocked(const PendingMessageTracker& tracker, const OperationSequencer& op_seq) const
+JoinOperation::isBlocked(const DistributorStripeOperationContext& ctx, const OperationSequencer& op_seq) const
{
- return (checkBlock(getBucket(), tracker, op_seq) ||
- checkBlock(getJoinBucket(0), tracker, op_seq) ||
- (_bucketsToJoin.size() > 1 && checkBlock(getJoinBucket(1), tracker, op_seq)));
+ return (checkBlock(getBucket(), ctx, op_seq) ||
+ checkBlock(getJoinBucket(0), ctx, op_seq) ||
+ (_bucketsToJoin.size() > 1 && checkBlock(getJoinBucket(1), ctx, op_seq)));
}
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/joinoperation.h b/storage/src/vespa/storage/distributor/operations/idealstate/joinoperation.h
index 5796b8d3fa1..4515092cfef 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/joinoperation.h
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/joinoperation.h
@@ -35,7 +35,7 @@ public:
return JOIN_BUCKET;
}
- bool isBlocked(const PendingMessageTracker& pendingMessages,
+ bool isBlocked(const DistributorStripeOperationContext& ctx,
const OperationSequencer& op_seq) const override;
protected:
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.cpp b/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.cpp
index 27e203a9060..749787c51b9 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.cpp
@@ -235,7 +235,7 @@ MergeOperation::deleteSourceOnlyNodes(
BucketAndNodes(getBucket(), sourceOnlyNodes));
// Must not send removes to source only copies if something has caused
// pending load to the copy after the merge was sent!
- if (_removeOperation->isBlocked(sender.getPendingMessageTracker(), sender.operation_sequencer())) {
+ if (_removeOperation->isBlocked(_manager->operation_context(), sender.operation_sequencer())) {
LOG(debug, "Source only removal for %s was blocked by a pending operation",
getBucketId().toString().c_str());
_ok = false;
@@ -324,7 +324,7 @@ bool MergeOperation::shouldBlockThisOperation(uint32_t messageType, uint8_t pri)
return IdealStateOperation::shouldBlockThisOperation(messageType, pri);
}
-bool MergeOperation::isBlocked(const PendingMessageTracker& pending_tracker,
+bool MergeOperation::isBlocked(const DistributorStripeOperationContext& ctx,
const OperationSequencer& op_seq) const {
// To avoid starvation of high priority global bucket merges, we do not consider
// these for blocking due to a node being "busy" (usually caused by a full merge
@@ -338,14 +338,14 @@ bool MergeOperation::isBlocked(const PendingMessageTracker& pending_tracker,
// 2. Global bucket merges have high priority and will most likely be allowed
// to enter the merge throttler queues, displacing lower priority merges.
if (!is_global_bucket_merge()) {
- const auto& node_info = pending_tracker.getNodeInfo();
+ const auto& node_info = ctx.pending_message_tracker().getNodeInfo();
for (auto node : getNodes()) {
if (node_info.isBusy(node)) {
return true;
}
}
}
- return IdealStateOperation::isBlocked(pending_tracker, op_seq);
+ return IdealStateOperation::isBlocked(ctx, op_seq);
}
bool MergeOperation::is_global_bucket_merge() const noexcept {
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.h b/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.h
index 11b5494fd9b..945b9318482 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.h
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.h
@@ -47,7 +47,7 @@ public:
std::vector<MergeMetaData>&);
bool shouldBlockThisOperation(uint32_t messageType, uint8_t pri) const override;
- bool isBlocked(const PendingMessageTracker& pendingMessages, const OperationSequencer&) const override;
+ bool isBlocked(const DistributorStripeOperationContext& ctx, const OperationSequencer&) const override;
private:
static void addIdealNodes(
const std::vector<uint16_t>& idealNodes,
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/splitoperation.cpp b/storage/src/vespa/storage/distributor/operations/idealstate/splitoperation.cpp
index 437c4ed6033..d7f03740e4d 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/splitoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/splitoperation.cpp
@@ -143,9 +143,9 @@ SplitOperation::onReceive(DistributorStripeMessageSender&, const api::StorageRep
}
bool
-SplitOperation::isBlocked(const PendingMessageTracker& tracker, const OperationSequencer& op_seq) const
+SplitOperation::isBlocked(const DistributorStripeOperationContext& ctx, const OperationSequencer& op_seq) const
{
- return checkBlockForAllNodes(getBucket(), tracker, op_seq);
+ return checkBlockForAllNodes(getBucket(), ctx, op_seq);
}
bool
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/splitoperation.h b/storage/src/vespa/storage/distributor/operations/idealstate/splitoperation.h
index eccbdc69869..5581edf41bd 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/splitoperation.h
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/splitoperation.h
@@ -20,7 +20,7 @@ public:
void onReceive(DistributorStripeMessageSender& sender, const std::shared_ptr<api::StorageReply> &) override;
const char* getName() const override { return "split"; };
Type getType() const override { return SPLIT_BUCKET; }
- bool isBlocked(const PendingMessageTracker&, const OperationSequencer&) const override;
+ bool isBlocked(const DistributorStripeOperationContext&, const OperationSequencer&) const override;
bool shouldBlockThisOperation(uint32_t, uint8_t) const override;
protected:
MessageTracker _tracker;
diff --git a/storage/src/vespa/storage/distributor/operations/operation.h b/storage/src/vespa/storage/distributor/operations/operation.h
index 5099762fd6a..18f7214c498 100644
--- a/storage/src/vespa/storage/distributor/operations/operation.h
+++ b/storage/src/vespa/storage/distributor/operations/operation.h
@@ -16,6 +16,7 @@ class StorageComponent;
namespace distributor {
+class DistributorStripeOperationContext;
class PendingMessageTracker;
class OperationSequencer;
@@ -61,7 +62,7 @@ public:
* Returns true if we are blocked to start this operation given
* the pending messages.
*/
- virtual bool isBlocked(const PendingMessageTracker&, const OperationSequencer&) const {
+ virtual bool isBlocked(const DistributorStripeOperationContext&, const OperationSequencer&) const {
return false;
}
diff --git a/storage/src/vespa/storage/storageserver/mergethrottler.cpp b/storage/src/vespa/storage/storageserver/mergethrottler.cpp
index dc8457769a2..1761abfc097 100644
--- a/storage/src/vespa/storage/storageserver/mergethrottler.cpp
+++ b/storage/src/vespa/storage/storageserver/mergethrottler.cpp
@@ -71,6 +71,7 @@ MergeThrottler::ChainedMergeState::~ChainedMergeState() = default;
MergeThrottler::Metrics::Metrics(metrics::MetricSet* owner)
: metrics::MetricSet("mergethrottler", {}, "", owner),
averageQueueWaitingTime("averagequeuewaitingtime", {}, "Average time a merge spends in the throttler queue", this),
+ queueSize("queuesize", {}, "Length of merge queue", this),
bounced_due_to_back_pressure("bounced_due_to_back_pressure", {}, "Number of merges bounced due to resource exhaustion back-pressure", this),
chaining("mergechains", this),
local("locallyexecutedmerges", this)
@@ -416,6 +417,7 @@ MergeThrottler::enqueueMerge(
return;
}
_queue.insert(MergePriorityQueue::value_type(msg, _queueSequence++));
+ _metrics->queueSize.set(_queue.size());
}
bool
diff --git a/storage/src/vespa/storage/storageserver/mergethrottler.h b/storage/src/vespa/storage/storageserver/mergethrottler.h
index e8815eee680..0c608f29196 100644
--- a/storage/src/vespa/storage/storageserver/mergethrottler.h
+++ b/storage/src/vespa/storage/storageserver/mergethrottler.h
@@ -57,12 +57,13 @@ public:
MergeFailureMetrics failures;
MergeOperationMetrics(const std::string& name, metrics::MetricSet* owner);
- ~MergeOperationMetrics();
+ ~MergeOperationMetrics() override;
};
class Metrics : public metrics::MetricSet {
public:
metrics::DoubleAverageMetric averageQueueWaitingTime;
+ metrics::LongValueMetric queueSize;
metrics::LongCountMetric bounced_due_to_back_pressure;
MergeOperationMetrics chaining;
MergeOperationMetrics local;
diff --git a/storageapi/src/vespa/storageapi/message/bucket.cpp b/storageapi/src/vespa/storageapi/message/bucket.cpp
index 2e2ca82079d..2323a1ab0a4 100644
--- a/storageapi/src/vespa/storageapi/message/bucket.cpp
+++ b/storageapi/src/vespa/storageapi/message/bucket.cpp
@@ -476,6 +476,12 @@ RequestBucketInfoCommand::getBucket() const
return document::Bucket(_bucketSpace, document::BucketId());
}
+document::BucketId
+RequestBucketInfoCommand::super_bucket_id() const
+{
+ return _buckets.empty() ? document::BucketId() : _buckets[0];
+}
+
void
RequestBucketInfoCommand::print(std::ostream& out, bool verbose,
const std::string& indent) const
@@ -510,7 +516,8 @@ std::ostream& operator<<(std::ostream& out, const RequestBucketInfoReply::Entry&
RequestBucketInfoReply::RequestBucketInfoReply(const RequestBucketInfoCommand& cmd)
: StorageReply(cmd),
_buckets(),
- _full_bucket_fetch(cmd.hasSystemState())
+ _full_bucket_fetch(cmd.hasSystemState()),
+ _super_bucket_id(cmd.super_bucket_id())
{ }
RequestBucketInfoReply::~RequestBucketInfoReply() = default;
diff --git a/storageapi/src/vespa/storageapi/message/bucket.h b/storageapi/src/vespa/storageapi/message/bucket.h
index 61766fb1f11..98445745753 100644
--- a/storageapi/src/vespa/storageapi/message/bucket.h
+++ b/storageapi/src/vespa/storageapi/message/bucket.h
@@ -358,6 +358,7 @@ public:
const vespalib::string& getDistributionHash() const { return _distributionHash; }
document::BucketSpace getBucketSpace() const { return _bucketSpace; }
document::Bucket getBucket() const override;
+ document::BucketId super_bucket_id() const;
void print(std::ostream& out, bool verbose, const std::string& indent) const override;
@@ -388,6 +389,7 @@ public:
private:
EntryVector _buckets;
bool _full_bucket_fetch;
+ document::BucketId _super_bucket_id;
public:
@@ -396,6 +398,7 @@ public:
const EntryVector & getBucketInfo() const { return _buckets; }
EntryVector & getBucketInfo() { return _buckets; }
[[nodiscard]] bool full_bucket_fetch() const noexcept { return _full_bucket_fetch; }
+ const document::BucketId& super_bucket_id() const { return _super_bucket_id; }
void print(std::ostream& out, bool verbose, const std::string& indent) const override;
DECLARE_STORAGEREPLY(RequestBucketInfoReply, onRequestBucketInfoReply)
};
diff --git a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/api/AthenzAccessToken.java b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/api/AthenzAccessToken.java
index c085be7c205..561b20a9c8a 100644
--- a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/api/AthenzAccessToken.java
+++ b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/api/AthenzAccessToken.java
@@ -6,7 +6,10 @@ import com.auth0.jwt.interfaces.DecodedJWT;
import com.yahoo.vespa.athenz.utils.AthenzIdentities;
import java.time.Instant;
+import java.util.List;
import java.util.Objects;
+import java.util.Optional;
+import java.util.stream.Collectors;
/**
* Represents an Athenz Access Token
@@ -18,6 +21,8 @@ public class AthenzAccessToken {
public static final String HTTP_HEADER_NAME = "Authorization";
private static final String BEARER_TOKEN_PREFIX = "Bearer ";
+ private static final String SCOPE_CLAIM = "scp";
+ private static final String AUDIENCE_CLAIM = "aud";
private final String value;
private volatile DecodedJWT jwt;
@@ -43,6 +48,12 @@ public class AthenzAccessToken {
return jwt().getExpiresAt().toInstant();
}
public AthenzIdentity getAthenzIdentity() { return AthenzIdentities.from(jwt().getClaim("client_id").asString()); }
+ public List<AthenzRole> roles() {
+ String domain = Optional.ofNullable(jwt().getClaim(AUDIENCE_CLAIM).asString()).orElse("");
+ return Optional.ofNullable(jwt().getClaim(SCOPE_CLAIM).asList(String.class)).orElse(List.of()).stream()
+ .map(role -> new AthenzRole(domain, role))
+ .collect(Collectors.toList());
+ }
private DecodedJWT jwt() {
if (jwt == null) {
diff --git a/vespa-documentgen-plugin/src/main/java/com/yahoo/vespa/DocumentGenMojo.java b/vespa-documentgen-plugin/src/main/java/com/yahoo/vespa/DocumentGenMojo.java
index 3faf47ccfa9..b086e0d8a26 100644
--- a/vespa-documentgen-plugin/src/main/java/com/yahoo/vespa/DocumentGenMojo.java
+++ b/vespa-documentgen-plugin/src/main/java/com/yahoo/vespa/DocumentGenMojo.java
@@ -957,7 +957,7 @@ public class DocumentGenMojo extends AbstractMojo {
if (DataType.BYTE.equals(dt)) return "com.yahoo.document.DataType.BYTE";
if (DataType.BOOL.equals(dt)) return "com.yahoo.document.DataType.BOOL";
if (DataType.TAG.equals(dt)) return "com.yahoo.document.DataType.TAG";
- if (dt instanceof StructDataType) return "new com.yahoo.document.StructDataType(\""+dt.getName()+"\")";
+ if (dt instanceof StructDataType) return className(dt.getName()) +".type";
if (dt instanceof WeightedSetDataType) return "new com.yahoo.document.WeightedSetDataType("+toJavaReference(((WeightedSetDataType)dt).getNestedType())+", "+
((WeightedSetDataType)dt).createIfNonExistent()+", "+ ((WeightedSetDataType)dt).removeIfZero()+","+dt.getId()+")";
if (dt instanceof ArrayDataType) return "new com.yahoo.document.ArrayDataType("+toJavaReference(((ArrayDataType)dt).getNestedType())+")";
diff --git a/vespa-feed-client-cli/CMakeLists.txt b/vespa-feed-client-cli/CMakeLists.txt
index 3967c135d1c..1d4966ac4a2 100644
--- a/vespa-feed-client-cli/CMakeLists.txt
+++ b/vespa-feed-client-cli/CMakeLists.txt
@@ -1,5 +1,5 @@
# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-install_java_artifact(vespa-feed-client-cli)
+install_fat_java_artifact(vespa-feed-client-cli)
vespa_install_script(src/main/sh/vespa-feed-client.sh vespa-feed-client bin)
install(FILES src/main/resources/logging.properties DESTINATION conf/vespa-feed-client)
diff --git a/vespa-feed-client-cli/pom.xml b/vespa-feed-client-cli/pom.xml
index 9fd59f1cfa4..ebbea35f4a4 100644
--- a/vespa-feed-client-cli/pom.xml
+++ b/vespa-feed-client-cli/pom.xml
@@ -71,25 +71,38 @@
</configuration>
</plugin>
<plugin>
+ <groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
- <configuration>
- <archive>
- <manifest>
- <mainClass>ai.vespa.feed.client.CliClient</mainClass>
- </manifest>
- </archive>
- <descriptorRefs>
- <descriptorRef>jar-with-dependencies</descriptorRef>
- </descriptorRefs>
- <appendAssemblyId>false</appendAssemblyId>
- </configuration>
<executions>
<execution>
- <id>make-assembly</id>
+ <id>make-fatjar</id>
+ <phase>prepare-package</phase>
+ <goals>
+ <goal>single</goal>
+ </goals>
+ <configuration>
+ <attach>false</attach>
+ <archive>
+ <manifest>
+ <mainClass>ai.vespa.feed.client.CliClient</mainClass>
+ </manifest>
+ </archive>
+ <descriptorRefs>
+ <descriptorRef>jar-with-dependencies</descriptorRef>
+ </descriptorRefs>
+ </configuration>
+ </execution>
+ <execution>
+ <id>make-zip</id>
<phase>package</phase>
<goals>
<goal>single</goal>
</goals>
+ <configuration>
+ <descriptors>
+ <descriptor>src/maven/create-zip.xml</descriptor>
+ </descriptors>
+ </configuration>
</execution>
</executions>
</plugin>
diff --git a/vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/CliClient.java b/vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/CliClient.java
index e3f726eaf11..83abe0bb872 100644
--- a/vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/CliClient.java
+++ b/vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/CliClient.java
@@ -8,11 +8,14 @@ import javax.net.ssl.HostnameVerifier;
import javax.net.ssl.SSLSession;
import java.io.IOException;
import java.io.InputStream;
+import java.io.OutputStream;
import java.io.PrintStream;
import java.nio.file.Files;
+import java.time.Duration;
import java.util.Map;
import java.util.Optional;
import java.util.Properties;
+import java.util.concurrent.atomic.AtomicInteger;
/**
* Main method for CLI interface
@@ -55,11 +58,12 @@ public class CliClient {
return 0;
}
try (InputStream in = createFeedInputStream(cliArgs);
- JsonStreamFeeder feeder = createJsonFeeder(cliArgs)) {
+ FeedClient feedClient = createFeedClient(cliArgs);
+ JsonFeeder feeder = createJsonFeeder(feedClient, cliArgs)) {
+ long startNanos = System.nanoTime();
+ feeder.feedMany(in).join();
if (cliArgs.benchmarkModeEnabled()) {
- printBenchmarkResult(feeder.benchmark(in));
- } else {
- feeder.feed(in);
+ printBenchmarkResult(System.nanoTime() - startNanos, feedClient.stats(), systemOut);
}
}
return 0;
@@ -80,14 +84,13 @@ public class CliClient {
builder.setHostnameVerifier(AcceptAllHostnameVerifier.INSTANCE);
}
cliArgs.certificateAndKey().ifPresent(c -> builder.setCertificate(c.certificateFile, c.privateKeyFile));
- cliArgs.caCertificates().ifPresent(builder::setCaCertificates);
+ cliArgs.caCertificates().ifPresent(builder::setCaCertificatesFile);
cliArgs.headers().forEach(builder::addRequestHeader);
return builder.build();
}
- private static JsonStreamFeeder createJsonFeeder(CliArguments cliArgs) throws CliArguments.CliArgumentsException, IOException {
- FeedClient feedClient = createFeedClient(cliArgs);
- JsonStreamFeeder.Builder builder = JsonStreamFeeder.builder(feedClient);
+ private static JsonFeeder createJsonFeeder(FeedClient feedClient, CliArguments cliArgs) throws CliArguments.CliArgumentsException, IOException {
+ JsonFeeder.Builder builder = JsonFeeder.builder(feedClient);
cliArgs.timeout().ifPresent(builder::withTimeout);
cliArgs.route().ifPresent(builder::withRoute);
cliArgs.traceLevel().ifPresent(builder::withTracelevel);
@@ -98,18 +101,6 @@ public class CliClient {
return cliArgs.readFeedFromStandardInput() ? systemIn : Files.newInputStream(cliArgs.inputFile().get());
}
- private void printBenchmarkResult(JsonStreamFeeder.BenchmarkResult result) throws IOException {
- JsonFactory factory = new JsonFactory();
- try (JsonGenerator generator = factory.createGenerator(systemOut).useDefaultPrettyPrinter()) {
- generator.writeStartObject();
- generator.writeNumberField("feeder.runtime", result.duration.toMillis());
- generator.writeNumberField("feeder.okcount", result.okCount);
- generator.writeNumberField("feeder.errorcount", result.errorCount);
- generator.writeNumberField("feeder.throughput", result.throughput);
- generator.writeEndObject();
- }
- }
-
private int handleException(boolean verbose, Exception e) { return handleException(verbose, e.getMessage(), e); }
private int handleException(boolean verbose, String message, Exception exception) {
@@ -131,4 +122,31 @@ public class CliClient {
static final AcceptAllHostnameVerifier INSTANCE = new AcceptAllHostnameVerifier();
@Override public boolean verify(String hostname, SSLSession session) { return true; }
}
+
+ static void printBenchmarkResult(long durationNanos, OperationStats stats, OutputStream systemOut) throws IOException {
+ JsonFactory factory = new JsonFactory();
+ long okCount = stats.successes();
+ long errorCount = stats.requests() - okCount;
+ double throughput = okCount * 1e9 / Math.max(1, durationNanos);
+ try (JsonGenerator generator = factory.createGenerator(systemOut).useDefaultPrettyPrinter()) {
+ generator.writeStartObject();
+ generator.writeNumberField("feeder.runtime", durationNanos / 1_000_000);
+ generator.writeNumberField("feeder.okcount", okCount);
+ generator.writeNumberField("feeder.errorcount", errorCount);
+ generator.writeNumberField("feeder.throughput", throughput);
+ generator.writeNumberField("feeder.minlatency", stats.minLatencyMillis());
+ generator.writeNumberField("feeder.avglatency", stats.averageLatencyMillis());
+ generator.writeNumberField("feeder.maxlatency", stats.maxLatencyMillis());
+ generator.writeNumberField("feeder.bytessent", stats.bytesSent());
+ generator.writeNumberField("feeder.bytesreceived", stats.bytesReceived());
+
+ generator.writeObjectFieldStart("feeder.responsecodes");
+ for (Map.Entry<Integer, Long> entry : stats.responsesByCode().entrySet())
+ generator.writeNumberField(Integer.toString(entry.getKey()), entry.getValue());
+ generator.writeEndObject();
+
+ generator.writeEndObject();
+ }
+ }
+
}
diff --git a/vespa-feed-client-cli/src/main/sh/vespa-feed-client-standalone.sh b/vespa-feed-client-cli/src/main/sh/vespa-feed-client-standalone.sh
new file mode 100755
index 00000000000..57077205d18
--- /dev/null
+++ b/vespa-feed-client-cli/src/main/sh/vespa-feed-client-standalone.sh
@@ -0,0 +1,9 @@
+#!/usr/bin/env sh
+# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+exec java \
+-Djava.awt.headless=true \
+-Xms128m -Xmx2048m \
+--add-opens=java.base/sun.security.ssl=ALL-UNNAMED \
+-Djava.util.logging.config.file=logging.properties \
+-cp vespa-feed-client-cli-jar-with-dependencies.jar ai.vespa.feed.client.CliClient "$@"
diff --git a/vespa-feed-client-cli/src/main/sh/vespa-feed-client.sh b/vespa-feed-client-cli/src/main/sh/vespa-feed-client.sh
index ab43fca2f67..43cde0894b9 100755
--- a/vespa-feed-client-cli/src/main/sh/vespa-feed-client.sh
+++ b/vespa-feed-client-cli/src/main/sh/vespa-feed-client.sh
@@ -81,4 +81,4 @@ exec java \
-Xms128m -Xmx2048m $(getJavaOptionsIPV46) \
--add-opens=java.base/sun.security.ssl=ALL-UNNAMED \
-Djava.util.logging.config.file=${VESPA_HOME}/conf/vespa-feed-client/logging.properties \
--cp ${VESPA_HOME}/lib/jars/vespa-feed-client-cli.jar ai.vespa.feed.client.CliClient "$@"
+-cp ${VESPA_HOME}/lib/jars/vespa-feed-client-cli-jar-with-dependencies.jar ai.vespa.feed.client.CliClient "$@"
diff --git a/vespa-feed-client-cli/src/maven/create-zip.xml b/vespa-feed-client-cli/src/maven/create-zip.xml
new file mode 100644
index 00000000000..45bbbea9f2d
--- /dev/null
+++ b/vespa-feed-client-cli/src/maven/create-zip.xml
@@ -0,0 +1,24 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -->
+<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.2"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.2 http://maven.apache.org/xsd/assembly-1.1.2.xsd">
+ <id>zip</id>
+ <includeBaseDirectory>true</includeBaseDirectory>
+
+ <formats>
+ <format>zip</format>
+ </formats>
+ <files>
+ <file>
+ <source>${project.build.directory}/${project.artifactId}-jar-with-dependencies.jar</source>
+ </file>
+ <file>
+ <source>${project.basedir}/src/main/sh/vespa-feed-client-standalone.sh</source>
+ <destName>vespa-feed-client</destName>
+ </file>
+ <file>
+ <source>${project.basedir}/src/main/resources/logging.properties</source>
+ </file>
+ </files>
+</assembly>
diff --git a/vespa-feed-client/abi-spec.json b/vespa-feed-client/abi-spec.json
new file mode 100644
index 00000000000..db9c1ff1a02
--- /dev/null
+++ b/vespa-feed-client/abi-spec.json
@@ -0,0 +1,319 @@
+{
+ "ai.vespa.feed.client.BenchmarkingCluster": {
+ "superClass": "java.lang.Object",
+ "interfaces": [
+ "ai.vespa.feed.client.Cluster"
+ ],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>(ai.vespa.feed.client.Cluster)",
+ "public void dispatch(ai.vespa.feed.client.HttpRequest, java.util.concurrent.CompletableFuture)",
+ "public ai.vespa.feed.client.OperationStats stats()",
+ "public void close()"
+ ],
+ "fields": []
+ },
+ "ai.vespa.feed.client.DocumentId": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public static ai.vespa.feed.client.DocumentId of(java.lang.String, java.lang.String, java.lang.String)",
+ "public static ai.vespa.feed.client.DocumentId of(java.lang.String, java.lang.String, long, java.lang.String)",
+ "public static ai.vespa.feed.client.DocumentId of(java.lang.String, java.lang.String, java.lang.String, java.lang.String)",
+ "public static ai.vespa.feed.client.DocumentId of(java.lang.String)",
+ "public java.lang.String documentType()",
+ "public java.lang.String namespace()",
+ "public java.util.OptionalLong number()",
+ "public java.util.Optional group()",
+ "public java.lang.String userSpecific()",
+ "public boolean equals(java.lang.Object)",
+ "public int hashCode()",
+ "public java.lang.String toString()"
+ ],
+ "fields": []
+ },
+ "ai.vespa.feed.client.FeedClient$CircuitBreaker$State": {
+ "superClass": "java.lang.Enum",
+ "interfaces": [],
+ "attributes": [
+ "public",
+ "final",
+ "enum"
+ ],
+ "methods": [
+ "public static ai.vespa.feed.client.FeedClient$CircuitBreaker$State[] values()",
+ "public static ai.vespa.feed.client.FeedClient$CircuitBreaker$State valueOf(java.lang.String)"
+ ],
+ "fields": [
+ "public static final enum ai.vespa.feed.client.FeedClient$CircuitBreaker$State CLOSED",
+ "public static final enum ai.vespa.feed.client.FeedClient$CircuitBreaker$State HALF_OPEN",
+ "public static final enum ai.vespa.feed.client.FeedClient$CircuitBreaker$State OPEN"
+ ]
+ },
+ "ai.vespa.feed.client.FeedClient$CircuitBreaker": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public",
+ "interface",
+ "abstract"
+ ],
+ "methods": [
+ "public abstract void success()",
+ "public abstract void failure()",
+ "public abstract ai.vespa.feed.client.FeedClient$CircuitBreaker$State state()"
+ ],
+ "fields": []
+ },
+ "ai.vespa.feed.client.FeedClient$OperationType": {
+ "superClass": "java.lang.Enum",
+ "interfaces": [],
+ "attributes": [
+ "public",
+ "final",
+ "enum"
+ ],
+ "methods": [
+ "public static ai.vespa.feed.client.FeedClient$OperationType[] values()",
+ "public static ai.vespa.feed.client.FeedClient$OperationType valueOf(java.lang.String)"
+ ],
+ "fields": [
+ "public static final enum ai.vespa.feed.client.FeedClient$OperationType PUT",
+ "public static final enum ai.vespa.feed.client.FeedClient$OperationType UPDATE",
+ "public static final enum ai.vespa.feed.client.FeedClient$OperationType REMOVE"
+ ]
+ },
+ "ai.vespa.feed.client.FeedClient$RetryStrategy": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public",
+ "interface",
+ "abstract"
+ ],
+ "methods": [
+ "public boolean retry(ai.vespa.feed.client.FeedClient$OperationType)",
+ "public int retries()"
+ ],
+ "fields": []
+ },
+ "ai.vespa.feed.client.FeedClient": {
+ "superClass": "java.lang.Object",
+ "interfaces": [
+ "java.io.Closeable"
+ ],
+ "attributes": [
+ "public",
+ "interface",
+ "abstract"
+ ],
+ "methods": [
+ "public abstract java.util.concurrent.CompletableFuture put(ai.vespa.feed.client.DocumentId, java.lang.String, ai.vespa.feed.client.OperationParameters)",
+ "public abstract java.util.concurrent.CompletableFuture update(ai.vespa.feed.client.DocumentId, java.lang.String, ai.vespa.feed.client.OperationParameters)",
+ "public abstract java.util.concurrent.CompletableFuture remove(ai.vespa.feed.client.DocumentId, ai.vespa.feed.client.OperationParameters)",
+ "public abstract ai.vespa.feed.client.OperationStats stats()",
+ "public ai.vespa.feed.client.FeedClient$CircuitBreaker$State circuitBreakerState()",
+ "public abstract void close(boolean)",
+ "public void close()"
+ ],
+ "fields": []
+ },
+ "ai.vespa.feed.client.FeedClientBuilder": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public static ai.vespa.feed.client.FeedClientBuilder create(java.net.URI)",
+ "public static ai.vespa.feed.client.FeedClientBuilder create(java.util.List)",
+ "public ai.vespa.feed.client.FeedClientBuilder setConnectionsPerEndpoint(int)",
+ "public ai.vespa.feed.client.FeedClientBuilder setMaxStreamPerConnection(int)",
+ "public ai.vespa.feed.client.FeedClientBuilder setSslContext(javax.net.ssl.SSLContext)",
+ "public ai.vespa.feed.client.FeedClientBuilder setHostnameVerifier(javax.net.ssl.HostnameVerifier)",
+ "public ai.vespa.feed.client.FeedClientBuilder setBenchmarkOn(boolean)",
+ "public ai.vespa.feed.client.FeedClientBuilder addRequestHeader(java.lang.String, java.lang.String)",
+ "public ai.vespa.feed.client.FeedClientBuilder addRequestHeader(java.lang.String, java.util.function.Supplier)",
+ "public ai.vespa.feed.client.FeedClientBuilder setRetryStrategy(ai.vespa.feed.client.FeedClient$RetryStrategy)",
+ "public ai.vespa.feed.client.FeedClientBuilder setCircuitBreaker(ai.vespa.feed.client.FeedClient$CircuitBreaker)",
+ "public ai.vespa.feed.client.FeedClientBuilder setCertificate(java.nio.file.Path, java.nio.file.Path)",
+ "public ai.vespa.feed.client.FeedClientBuilder setCertificate(java.util.Collection, java.security.PrivateKey)",
+ "public ai.vespa.feed.client.FeedClientBuilder setCertificate(java.security.cert.X509Certificate, java.security.PrivateKey)",
+ "public ai.vespa.feed.client.FeedClientBuilder setCaCertificatesFile(java.nio.file.Path)",
+ "public ai.vespa.feed.client.FeedClientBuilder setCaCertificates(java.util.Collection)",
+ "public ai.vespa.feed.client.FeedClient build()"
+ ],
+ "fields": []
+ },
+ "ai.vespa.feed.client.FeedException": {
+ "superClass": "java.lang.RuntimeException",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>(java.lang.String)",
+ "public void <init>(java.lang.String, java.lang.Throwable)",
+ "public void <init>(java.lang.Throwable)"
+ ],
+ "fields": []
+ },
+ "ai.vespa.feed.client.GracePeriodCircuitBreaker": {
+ "superClass": "java.lang.Object",
+ "interfaces": [
+ "ai.vespa.feed.client.FeedClient$CircuitBreaker"
+ ],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>(java.time.Duration, java.time.Duration)",
+ "public void success()",
+ "public void failure()",
+ "public ai.vespa.feed.client.FeedClient$CircuitBreaker$State state()"
+ ],
+ "fields": []
+ },
+ "ai.vespa.feed.client.JsonFeeder$Builder": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public ai.vespa.feed.client.JsonFeeder$Builder withTimeout(java.time.Duration)",
+ "public ai.vespa.feed.client.JsonFeeder$Builder withRoute(java.lang.String)",
+ "public ai.vespa.feed.client.JsonFeeder$Builder withTracelevel(int)",
+ "public ai.vespa.feed.client.JsonFeeder build()"
+ ],
+ "fields": []
+ },
+ "ai.vespa.feed.client.JsonFeeder$ResultCallback": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public",
+ "interface",
+ "abstract"
+ ],
+ "methods": [
+ "public void onNextResult(ai.vespa.feed.client.Result, java.lang.Throwable)",
+ "public void onError(java.lang.Throwable)",
+ "public void onComplete()"
+ ],
+ "fields": []
+ },
+ "ai.vespa.feed.client.JsonFeeder": {
+ "superClass": "java.lang.Object",
+ "interfaces": [
+ "java.io.Closeable"
+ ],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public static ai.vespa.feed.client.JsonFeeder$Builder builder(ai.vespa.feed.client.FeedClient)",
+ "public java.util.concurrent.CompletableFuture feedSingle(java.lang.String)",
+ "public java.util.concurrent.CompletableFuture feedMany(java.io.InputStream, ai.vespa.feed.client.JsonFeeder$ResultCallback)",
+ "public java.util.concurrent.CompletableFuture feedMany(java.io.InputStream)",
+ "public void close()"
+ ],
+ "fields": []
+ },
+ "ai.vespa.feed.client.JsonParseException": {
+ "superClass": "ai.vespa.feed.client.FeedException",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>(java.lang.String)",
+ "public void <init>(java.lang.String, java.lang.Throwable)"
+ ],
+ "fields": []
+ },
+ "ai.vespa.feed.client.OperationParameters": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public static ai.vespa.feed.client.OperationParameters empty()",
+ "public ai.vespa.feed.client.OperationParameters createIfNonExistent(boolean)",
+ "public ai.vespa.feed.client.OperationParameters testAndSetCondition(java.lang.String)",
+ "public ai.vespa.feed.client.OperationParameters timeout(java.time.Duration)",
+ "public ai.vespa.feed.client.OperationParameters route(java.lang.String)",
+ "public ai.vespa.feed.client.OperationParameters tracelevel(int)",
+ "public boolean createIfNonExistent()",
+ "public java.util.Optional testAndSetCondition()",
+ "public java.util.Optional timeout()",
+ "public java.util.Optional route()",
+ "public java.util.OptionalInt tracelevel()",
+ "public boolean equals(java.lang.Object)",
+ "public int hashCode()",
+ "public java.lang.String toString()"
+ ],
+ "fields": []
+ },
+ "ai.vespa.feed.client.OperationStats": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>(long, java.util.Map, long, long, long, long, long, long, long)",
+ "public long requests()",
+ "public long responses()",
+ "public long successes()",
+ "public java.util.Map responsesByCode()",
+ "public long exceptions()",
+ "public long inflight()",
+ "public long averageLatencyMillis()",
+ "public long minLatencyMillis()",
+ "public long maxLatencyMillis()",
+ "public long bytesSent()",
+ "public long bytesReceived()",
+ "public java.lang.String toString()"
+ ],
+ "fields": []
+ },
+ "ai.vespa.feed.client.Result$Type": {
+ "superClass": "java.lang.Enum",
+ "interfaces": [],
+ "attributes": [
+ "public",
+ "final",
+ "enum"
+ ],
+ "methods": [
+ "public static ai.vespa.feed.client.Result$Type[] values()",
+ "public static ai.vespa.feed.client.Result$Type valueOf(java.lang.String)"
+ ],
+ "fields": [
+ "public static final enum ai.vespa.feed.client.Result$Type success",
+ "public static final enum ai.vespa.feed.client.Result$Type conditionNotMet",
+ "public static final enum ai.vespa.feed.client.Result$Type failure"
+ ]
+ },
+ "ai.vespa.feed.client.Result": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public ai.vespa.feed.client.Result$Type type()",
+ "public ai.vespa.feed.client.DocumentId documentId()",
+ "public java.util.Optional resultMessage()",
+ "public java.util.Optional traceMessage()"
+ ],
+ "fields": []
+ }
+} \ No newline at end of file
diff --git a/vespa-feed-client/pom.xml b/vespa-feed-client/pom.xml
index 7759e9d2308..7d4938c6fb0 100644
--- a/vespa-feed-client/pom.xml
+++ b/vespa-feed-client/pom.xml
@@ -20,6 +20,11 @@
<dependencies>
<!-- compile scope -->
<dependency>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>annotations</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
<groupId>org.bouncycastle</groupId>
<artifactId>bcpkix-jdk15on</artifactId>
<scope>compile</scope>
@@ -83,6 +88,10 @@
</execution>
</executions>
</plugin>
+ <plugin>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>abi-check-plugin</artifactId>
+ </plugin>
</plugins>
</build>
</project>
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpCluster.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/ApacheCluster.java
index 34491d23c4b..e5d45a2f211 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpCluster.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/ApacheCluster.java
@@ -8,6 +8,7 @@ import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient;
import org.apache.hc.client5.http.impl.async.H2AsyncClientBuilder;
import org.apache.hc.client5.http.ssl.ClientTlsStrategyBuilder;
import org.apache.hc.core5.concurrent.FutureCallback;
+import org.apache.hc.core5.http.ContentType;
import org.apache.hc.core5.http.message.BasicHeader;
import org.apache.hc.core5.http2.config.H2Config;
import org.apache.hc.core5.net.URIAuthority;
@@ -29,18 +30,23 @@ import static org.apache.hc.core5.http.ssl.TlsCiphers.excludeWeak;
/**
* @author jonmv
*/
-class HttpCluster implements Cluster {
+class ApacheCluster implements Cluster {
private final List<Endpoint> endpoints = new ArrayList<>();
- public HttpCluster(FeedClientBuilder builder) throws IOException {
+ ApacheCluster(FeedClientBuilder builder) throws IOException {
for (URI endpoint : builder.endpoints)
for (int i = 0; i < builder.connectionsPerEndpoint; i++)
endpoints.add(new Endpoint(createHttpClient(builder), endpoint));
}
@Override
- public void dispatch(SimpleHttpRequest request, CompletableFuture<SimpleHttpResponse> vessel) {
+ public void dispatch(HttpRequest wrapped, CompletableFuture<HttpResponse> vessel) {
+ SimpleHttpRequest request = new SimpleHttpRequest(wrapped.method(), wrapped.path());
+ wrapped.headers().forEach((name, value) -> request.setHeader(name, value.get()));
+ if (wrapped.body() != null)
+ request.setBody(wrapped.body(), ContentType.APPLICATION_JSON);
+
int index = 0;
int min = Integer.MAX_VALUE;
for (int i = 0; i < endpoints.size(); i++)
@@ -56,7 +62,7 @@ class HttpCluster implements Cluster {
request.setAuthority(new URIAuthority(endpoint.url.getHost(), endpoint.url.getPort()));
endpoint.client.execute(request,
new FutureCallback<SimpleHttpResponse>() {
- @Override public void completed(SimpleHttpResponse response) { vessel.complete(response); }
+ @Override public void completed(SimpleHttpResponse response) { vessel.complete(new ApacheHttpResponse(response)); }
@Override public void failed(Exception ex) { vessel.completeExceptionally(ex); }
@Override public void cancelled() { vessel.cancel(false); }
});
@@ -105,23 +111,23 @@ class HttpCluster implements Cluster {
.disableRedirectHandling()
.disableAutomaticRetries()
.setIOReactorConfig(IOReactorConfig.custom()
- .setIoThreadCount(1)
+ .setIoThreadCount(2)
.setTcpNoDelay(true)
.setSoTimeout(Timeout.ofSeconds(10))
.build())
- .setDefaultRequestConfig(
- RequestConfig.custom()
- .setConnectTimeout(Timeout.ofSeconds(10))
- .setConnectionRequestTimeout(Timeout.DISABLED)
- .setResponseTimeout(Timeout.ofMinutes(5))
- .build())
- .setH2Config(H2Config.initial()
+ .setDefaultRequestConfig(RequestConfig.custom()
+ .setConnectTimeout(Timeout.ofSeconds(10))
+ .setConnectionRequestTimeout(Timeout.DISABLED)
+ .setResponseTimeout(Timeout.ofMinutes(5))
+ .build())
+ .setH2Config(H2Config.custom()
.setMaxConcurrentStreams(builder.maxStreamsPerConnection)
.setCompressionEnabled(true)
.setPushEnabled(false)
+ .setInitialWindowSize(Integer.MAX_VALUE)
.build());
- SSLContext sslContext = constructSslContext(builder);
+ SSLContext sslContext = builder.constructSslContext();
String[] allowedCiphers = excludeH2Blacklisted(excludeWeak(sslContext.getSupportedSSLParameters().getCipherSuites()));
if (allowedCiphers.length == 0)
throw new IllegalStateException("No adequate SSL cipher suites supported by the JVM");
@@ -136,16 +142,24 @@ class HttpCluster implements Cluster {
.build();
}
- private static SSLContext constructSslContext(FeedClientBuilder builder) throws IOException {
- if (builder.sslContext != null) return builder.sslContext;
- SslContextBuilder sslContextBuilder = new SslContextBuilder();
- if (builder.certificate != null && builder.privateKey != null) {
- sslContextBuilder.withCertificateAndKey(builder.certificate, builder.privateKey);
+ private static class ApacheHttpResponse implements HttpResponse {
+
+ private final SimpleHttpResponse wrapped;
+
+ private ApacheHttpResponse(SimpleHttpResponse wrapped) {
+ this.wrapped = wrapped;
}
- if (builder.caCertificates != null) {
- sslContextBuilder.withCaCertificates(builder.caCertificates);
+
+ @Override
+ public int code() {
+ return wrapped.getCode();
}
- return sslContextBuilder.build();
+
+ @Override
+ public byte[] body() {
+ return wrapped.getBodyBytes();
+ }
+
}
}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/BenchmarkingCluster.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/BenchmarkingCluster.java
new file mode 100644
index 00000000000..840219a6bf1
--- /dev/null
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/BenchmarkingCluster.java
@@ -0,0 +1,102 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.RejectedExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static java.util.Objects.requireNonNull;
+
+public class BenchmarkingCluster implements Cluster {
+
+ private final Cluster delegate;
+ private final ExecutorService executor = Executors.newSingleThreadExecutor(runnable -> {
+ Thread thread = new Thread(runnable, "cluster-stats-collector");
+ thread.setDaemon(true);
+ return thread;
+ });
+
+ private final AtomicLong requests = new AtomicLong();
+ private long results = 0;
+ private long responses = 0;
+ private final long[] responsesByCode = new long[600];
+ private long exceptions = 0;
+ private long totalLatencyMillis = 0;
+ private long minLatencyMillis = Long.MAX_VALUE;
+ private long maxLatencyMillis = 0;
+ private long bytesSent = 0;
+ private long bytesReceived = 0;
+
+ public BenchmarkingCluster(Cluster delegate) {
+ this.delegate = requireNonNull(delegate);
+ }
+
+ @Override
+ public void dispatch(HttpRequest request, CompletableFuture<HttpResponse> vessel) {
+ requests.incrementAndGet();
+ long startNanos = System.nanoTime();
+ delegate.dispatch(request, vessel);
+ vessel.whenCompleteAsync((response, thrown) -> {
+ results++;
+ if (thrown == null) {
+ responses++;
+ responsesByCode[response.code()]++;
+ long latency = (System.nanoTime() - startNanos) / 1_000_000;
+ totalLatencyMillis += latency;
+ minLatencyMillis = Math.min(minLatencyMillis, latency);
+ maxLatencyMillis = Math.max(maxLatencyMillis, latency);
+ bytesSent += request.body() == null ? 0 : request.body().length;
+ bytesReceived += response.body() == null ? 0 : response.body().length;
+ }
+ else
+ exceptions++;
+ },
+ executor);
+ }
+
+ @Override
+ public OperationStats stats() {
+ try {
+ try {
+ return executor.submit(this::getStats).get();
+ }
+ catch (RejectedExecutionException ignored) {
+ executor.awaitTermination(10, TimeUnit.SECONDS);
+ return getStats();
+ }
+ }
+ catch (InterruptedException | ExecutionException ignored) {
+ throw new RuntimeException(ignored);
+ }
+ }
+
+ private OperationStats getStats() {
+ Map<Integer, Long> responses = new HashMap<>();
+ for (int code = 0; code < responsesByCode.length; code++)
+ if (responsesByCode[code] > 0)
+ responses.put(code, responsesByCode[code]);
+
+ return new OperationStats(requests.get(),
+ responses,
+ exceptions,
+ requests.get() - results,
+ this.responses == 0 ? 0 : totalLatencyMillis / this.responses,
+ minLatencyMillis,
+ maxLatencyMillis,
+ bytesSent,
+ bytesReceived);
+ }
+
+ @Override
+ public void close() {
+ delegate.close();
+ executor.shutdown();
+ }
+
+}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/Cluster.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/Cluster.java
index fde230d3ca4..f428fb567e6 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/Cluster.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/Cluster.java
@@ -1,21 +1,21 @@
// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package ai.vespa.feed.client;
-import org.apache.hc.client5.http.async.methods.SimpleHttpRequest;
-import org.apache.hc.client5.http.async.methods.SimpleHttpResponse;
-
import java.io.Closeable;
+import java.util.Collections;
import java.util.concurrent.CompletableFuture;
/**
- * Allows dispatch to a Vespa cluster. {@link #dispatch} should be called by a single thread, i.e., it is not thread-safe.
+ * Allows dispatch to a Vespa cluster.
*/
interface Cluster extends Closeable {
- /** Dispatch the request to the cluster, causing the response vessel to complete at a later time. */
- void dispatch(SimpleHttpRequest request, CompletableFuture<SimpleHttpResponse> vessel);
+ /** Dispatch the request to the cluster, causing the response vessel to complete at a later time. May not throw. */
+ void dispatch(HttpRequest request, CompletableFuture<HttpResponse> vessel);
@Override
default void close() { }
+ default OperationStats stats() { return new OperationStats(0, Collections.emptyMap(), 0, 0, 0, 0, 0, 0, 0); }
+
}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/DocumentId.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/DocumentId.java
index 21513a5dac2..39fc9fb28e0 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/DocumentId.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/DocumentId.java
@@ -8,6 +8,8 @@ import java.util.OptionalLong;
import static java.util.Objects.requireNonNull;
/**
+ * Represents a Vespa document id
+ *
* @author jonmv
*/
public class DocumentId {
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClient.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClient.java
index 2ac75a948d9..250809a48b9 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClient.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClient.java
@@ -5,6 +5,8 @@ import java.io.Closeable;
import java.util.concurrent.CompletableFuture;
/**
+ * Asynchronous feed client accepting document operations as JSON
+ *
* @author bjorncs
* @author jonmv
*/
@@ -19,6 +21,12 @@ public interface FeedClient extends Closeable {
/** Send a document remove with the given parameters, returning a future with the result of the operation. */
CompletableFuture<Result> remove(DocumentId documentId, OperationParameters params);
+ /** Returns a snapshot of the stats for this feed client, such as requests made, and responses by status. */
+ OperationStats stats();
+
+ /** Current state of the circuit breaker. */
+ default CircuitBreaker.State circuitBreakerState() { return CircuitBreaker.State.CLOSED; }
+
/** Shut down, and reject new operations. Operations in flight are allowed to complete normally if graceful. */
void close(boolean graceful);
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClientBuilder.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClientBuilder.java
index da575a7cf6d..8b5eb9efea7 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClientBuilder.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClientBuilder.java
@@ -7,9 +7,11 @@ import java.io.IOException;
import java.io.UncheckedIOException;
import java.net.URI;
import java.nio.file.Path;
-import java.time.Clock;
+import java.security.PrivateKey;
+import java.security.cert.X509Certificate;
import java.time.Duration;
import java.util.ArrayList;
+import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
@@ -35,13 +37,19 @@ public class FeedClientBuilder {
int connectionsPerEndpoint = 4;
int maxStreamsPerConnection = 128;
FeedClient.RetryStrategy retryStrategy = defaultRetryStrategy;
- FeedClient.CircuitBreaker circuitBreaker = new GracePeriodCircuitBreaker(Clock.systemUTC(), Duration.ofSeconds(1), Duration.ofMinutes(10));
- Path certificate;
- Path privateKey;
- Path caCertificates;
-
+ FeedClient.CircuitBreaker circuitBreaker = new GracePeriodCircuitBreaker(Duration.ofSeconds(1), Duration.ofMinutes(10));
+ Path certificateFile;
+ Path privateKeyFile;
+ Path caCertificatesFile;
+ Collection<X509Certificate> certificate;
+ PrivateKey privateKey;
+ Collection<X509Certificate> caCertificates;
+ boolean benchmark;
+
+ /** Creates a builder for a single container endpoint **/
public static FeedClientBuilder create(URI endpoint) { return new FeedClientBuilder(Collections.singletonList(endpoint)); }
+ /** Creates a builder for multiple container endpoints **/
public static FeedClientBuilder create(List<URI> endpoints) { return new FeedClientBuilder(endpoints); }
private FeedClientBuilder(List<URI> endpoints) {
@@ -81,57 +89,137 @@ public class FeedClientBuilder {
return this;
}
+ /** Sets {@link SSLContext} instance. */
public FeedClientBuilder setSslContext(SSLContext context) {
- if (certificate != null || caCertificates != null || privateKey != null) {
- throw new IllegalArgumentException("Cannot set both SSLContext and certificate / CA certificates");
- }
this.sslContext = requireNonNull(context);
return this;
}
+ /** Sets {@link HostnameVerifier} instance (e.g for disabling default SSL hostname verification). */
public FeedClientBuilder setHostnameVerifier(HostnameVerifier verifier) {
this.hostnameVerifier = requireNonNull(verifier);
return this;
}
+ /** Turns on/off benchmarking, aggregated in {@link FeedClient#stats()}. */
+ public FeedClientBuilder setBenchmarkOn(boolean on) {
+ this.benchmark = on;
+ return this;
+ }
+
+ /** Adds HTTP request header to all client requests. */
public FeedClientBuilder addRequestHeader(String name, String value) {
return addRequestHeader(name, () -> requireNonNull(value));
}
+ /**
+ * Adds HTTP request header to all client requests. Value {@link Supplier} is invoked for each HTTP request,
+ * i.e. value can be dynamically updated during a feed.
+ */
public FeedClientBuilder addRequestHeader(String name, Supplier<String> valueSupplier) {
this.requestHeaders.put(requireNonNull(name), requireNonNull(valueSupplier));
return this;
}
+ /**
+ * Overrides default retry strategy.
+ * @see FeedClient.RetryStrategy
+ */
public FeedClientBuilder setRetryStrategy(FeedClient.RetryStrategy strategy) {
this.retryStrategy = requireNonNull(strategy);
return this;
}
+ /**
+ * Overrides default circuit breaker.
+ * @see FeedClient.CircuitBreaker
+ */
public FeedClientBuilder setCircuitBreaker(FeedClient.CircuitBreaker breaker) {
this.circuitBreaker = requireNonNull(breaker);
return this;
}
+ /** Sets path to client SSL certificate/key PEM files */
public FeedClientBuilder setCertificate(Path certificatePemFile, Path privateKeyPemFile) {
- if (sslContext != null) throw new IllegalArgumentException("Cannot set both SSLContext and certificate");
- this.certificate = certificatePemFile;
- this.privateKey = privateKeyPemFile;
+ this.certificateFile = certificatePemFile;
+ this.privateKeyFile = privateKeyPemFile;
return this;
}
- public FeedClientBuilder setCaCertificates(Path caCertificatesFile) {
- if (sslContext != null) throw new IllegalArgumentException("Cannot set both SSLContext and CA certificate");
- this.caCertificates = caCertificatesFile;
+ /** Sets client SSL certificates/key */
+ public FeedClientBuilder setCertificate(Collection<X509Certificate> certificate, PrivateKey privateKey) {
+ this.certificate = certificate;
+ this.privateKey = privateKey;
return this;
}
+ /** Sets client SSL certificate/key */
+ public FeedClientBuilder setCertificate(X509Certificate certificate, PrivateKey privateKey) {
+ return setCertificate(Collections.singletonList(certificate), privateKey);
+ }
+
+ /**
+ * Overrides JVM default SSL truststore
+ * @param caCertificatesFile Path to PEM encoded file containing trusted certificates
+ */
+ public FeedClientBuilder setCaCertificatesFile(Path caCertificatesFile) {
+ this.caCertificatesFile = caCertificatesFile;
+ return this;
+ }
+
+ /** Overrides JVM default SSL truststore */
+ public FeedClientBuilder setCaCertificates(Collection<X509Certificate> caCertificates) {
+ this.caCertificates = caCertificates;
+ return this;
+ }
+
+ /** Constructs instance of {@link ai.vespa.feed.client.FeedClient} from builder configuration */
public FeedClient build() {
try {
+ validateConfiguration();
return new HttpFeedClient(this);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
+ SSLContext constructSslContext() throws IOException {
+ if (sslContext != null) return sslContext;
+ SslContextBuilder sslContextBuilder = new SslContextBuilder();
+ if (certificateFile != null && privateKeyFile != null) {
+ sslContextBuilder.withCertificateAndKey(certificateFile, privateKeyFile);
+ } else if (certificate != null && privateKey != null) {
+ sslContextBuilder.withCertificateAndKey(certificate, privateKey);
+ }
+ if (caCertificatesFile != null) {
+ sslContextBuilder.withCaCertificates(caCertificatesFile);
+ } else if (caCertificates != null) {
+ sslContextBuilder.withCaCertificates(caCertificates);
+ }
+ return sslContextBuilder.build();
+ }
+
+ private void validateConfiguration() {
+ if (sslContext != null && (
+ certificateFile != null || caCertificatesFile != null || privateKeyFile != null ||
+ certificate != null || caCertificates != null || privateKey != null)) {
+ throw new IllegalArgumentException("Cannot set both SSLContext and certificate / CA certificates");
+ }
+ if (certificate != null && certificateFile != null) {
+ throw new IllegalArgumentException("Cannot set both certificate directly and as file");
+ }
+ if (privateKey != null && privateKeyFile != null) {
+ throw new IllegalArgumentException("Cannot set both private key directly and as file");
+ }
+ if (caCertificates != null && caCertificatesFile != null) {
+ throw new IllegalArgumentException("Cannot set both CA certificates directly and as file");
+ }
+ if (certificate != null && certificate.isEmpty()) {
+ throw new IllegalArgumentException("Certificate cannot be empty");
+ }
+ if (caCertificates != null && caCertificates.isEmpty()) {
+ throw new IllegalArgumentException("CA certificates cannot be empty");
+ }
+ }
+
}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedException.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedException.java
index eb31d1aa808..e1c6c733e9c 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedException.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedException.java
@@ -2,7 +2,16 @@
package ai.vespa.feed.client;
/**
+ * Signals that an error occurred during feeding
+ *
* @author bjorncs
*/
public class FeedException extends RuntimeException {
+
+ public FeedException(String message) { super(message); }
+
+ public FeedException(String message, Throwable cause) { super(message, cause); }
+
+ public FeedException(Throwable cause) { super(cause); }
+
}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/GracePeriodCircuitBreaker.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/GracePeriodCircuitBreaker.java
index 974d18418ec..2c5c2dccf19 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/GracePeriodCircuitBreaker.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/GracePeriodCircuitBreaker.java
@@ -1,10 +1,10 @@
// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package ai.vespa.feed.client;
-import java.time.Clock;
import java.time.Duration;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.LongSupplier;
import java.util.logging.Logger;
import static java.util.Objects.requireNonNull;
@@ -13,19 +13,26 @@ import static java.util.logging.Level.WARNING;
/**
* Breaks the circuit when no successes have been recorded for a specified time.
+ *
+ * @author jonmv
*/
public class GracePeriodCircuitBreaker implements FeedClient.CircuitBreaker {
private static final Logger log = Logger.getLogger(GracePeriodCircuitBreaker.class.getName());
+ private static final long NEVER = 1L << 60;
- private final AtomicLong lastSuccessMillis = new AtomicLong(0); // Trigger if first response is a failure.
+ private final AtomicLong failingSinceMillis = new AtomicLong(NEVER);
private final AtomicBoolean halfOpen = new AtomicBoolean(false);
private final AtomicBoolean open = new AtomicBoolean(false);
- private final Clock clock;
+ private final LongSupplier clock;
private final long graceMillis;
private final long doomMillis;
- GracePeriodCircuitBreaker(Clock clock, Duration grace, Duration doom) {
+ public GracePeriodCircuitBreaker(Duration grace, Duration doom) {
+ this(System::currentTimeMillis, grace, doom);
+ }
+
+ GracePeriodCircuitBreaker(LongSupplier clock, Duration grace, Duration doom) {
if (grace.isNegative())
throw new IllegalArgumentException("Grace delay must be non-negative");
@@ -39,23 +46,25 @@ public class GracePeriodCircuitBreaker implements FeedClient.CircuitBreaker {
@Override
public void success() {
- lastSuccessMillis.set(clock.millis());
- if (halfOpen.compareAndSet(true, false))
+ failingSinceMillis.set(NEVER);
+ if ( ! open.get() && halfOpen.compareAndSet(true, false))
log.log(INFO, "Circuit breaker is now closed");
}
@Override
public void failure() {
- long nowMillis = clock.millis();
- if (lastSuccessMillis.get() < nowMillis - doomMillis && open.compareAndSet(false, true))
- log.log(WARNING, "Circuit breaker is now open");
-
- if (lastSuccessMillis.get() < nowMillis - graceMillis && halfOpen.compareAndSet(false, true))
- log.log(INFO, "Circuit breaker is now half-open");
+ failingSinceMillis.compareAndSet(NEVER, clock.getAsLong());
}
@Override
public State state() {
+ long failingMillis = clock.getAsLong() - failingSinceMillis.get();
+ if (failingMillis > graceMillis && halfOpen.compareAndSet(false, true))
+ log.log(INFO, "Circuit breaker is now half-open");
+
+ if (failingMillis > doomMillis && open.compareAndSet(false, true))
+ log.log(WARNING, "Circuit breaker is now open");
+
return open.get() ? State.OPEN : halfOpen.get() ? State.HALF_OPEN : State.CLOSED;
}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpFeedClient.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpFeedClient.java
index 9b89595db25..b160cced4b9 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpFeedClient.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpFeedClient.java
@@ -4,25 +4,19 @@ package ai.vespa.feed.client;
import com.fasterxml.jackson.core.JsonFactory;
import com.fasterxml.jackson.core.JsonParser;
import com.fasterxml.jackson.core.JsonToken;
-import org.apache.hc.client5.http.async.methods.SimpleHttpRequest;
-import org.apache.hc.client5.http.async.methods.SimpleHttpResponse;
-import org.apache.hc.core5.http.ContentType;
-import org.apache.hc.core5.net.URIBuilder;
-import java.io.ByteArrayOutputStream;
import java.io.IOException;
-import java.io.PrintStream;
import java.io.UncheckedIOException;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.util.ArrayList;
+import java.io.UnsupportedEncodingException;
+import java.net.URLEncoder;
import java.util.HashMap;
-import java.util.List;
import java.util.Map;
+import java.util.StringJoiner;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Supplier;
+import static java.nio.charset.StandardCharsets.UTF_8;
import static java.util.Objects.requireNonNull;
/**
@@ -64,6 +58,16 @@ class HttpFeedClient implements FeedClient {
}
@Override
+ public OperationStats stats() {
+ return requestStrategy.stats();
+ }
+
+ @Override
+ public CircuitBreaker.State circuitBreakerState() {
+ return requestStrategy.circuitBreakerState();
+ }
+
+ @Override
public void close(boolean graceful) {
closed.set(true);
if (graceful)
@@ -72,49 +76,33 @@ class HttpFeedClient implements FeedClient {
requestStrategy.destroy();
}
- private void ensureOpen() {
- if (requestStrategy.hasFailed())
- close();
-
- if (closed.get())
- throw new IllegalStateException("Client is closed, no further operations may be sent");
- }
-
private CompletableFuture<Result> send(String method, DocumentId documentId, String operationJson, OperationParameters params) {
- ensureOpen();
-
- String path = operationPath(documentId, params).toString();
- SimpleHttpRequest request = new SimpleHttpRequest(method, path);
- requestHeaders.forEach((name, value) -> request.setHeader(name, value.get()));
- if (operationJson != null)
- request.setBody(operationJson, ContentType.APPLICATION_JSON);
+ HttpRequest request = new HttpRequest(method,
+ getPath(documentId) + getQuery(params),
+ requestHeaders,
+ operationJson.getBytes(UTF_8)); // TODO: make it bytes all the way?
return requestStrategy.enqueue(documentId, request)
- .handle((response, thrown) -> {
- if (thrown != null) {
- // TODO: What to do with exceptions here? Ex on 400, 401, 403, etc, and wrap and throw?
- ByteArrayOutputStream buffer = new ByteArrayOutputStream();
- thrown.printStackTrace(new PrintStream(buffer));
- return new Result(Result.Type.failure, documentId, buffer.toString(), null);
- }
- return toResult(response, documentId);
- });
+ .thenApply(response -> toResult(request, response, documentId));
}
- static Result toResult(SimpleHttpResponse response, DocumentId documentId) {
+ static Result toResult(HttpRequest request, HttpResponse response, DocumentId documentId) {
Result.Type type;
- switch (response.getCode()) {
+ switch (response.code()) {
case 200: type = Result.Type.success; break;
case 412: type = Result.Type.conditionNotMet; break;
- default: type = Result.Type.failure;
+ case 502:
+ case 504:
+ case 507: type = Result.Type.failure; break;
+ default: type = null;
}
String message = null;
String trace = null;
try {
- JsonParser parser = factory.createParser(response.getBodyText());
+ JsonParser parser = factory.createParser(response.body());
if (parser.nextToken() != JsonToken.START_OBJECT)
- throw new IllegalArgumentException("Expected '" + JsonToken.START_OBJECT + "', but found '" + parser.currentToken() + "' in: " + response.getBodyText());
+ throw new IllegalArgumentException("Expected '" + JsonToken.START_OBJECT + "', but found '" + parser.currentToken() + "' in: " + new String(response.body(), UTF_8));
String name;
while ((name = parser.nextFieldName()) != null) {
@@ -126,53 +114,58 @@ class HttpFeedClient implements FeedClient {
}
if (parser.currentToken() != JsonToken.END_OBJECT)
- throw new IllegalArgumentException("Expected '" + JsonToken.END_OBJECT + "', but found '" + parser.currentToken() + "' in: " + response.getBodyText());
+ throw new IllegalArgumentException("Expected '" + JsonToken.END_OBJECT + "', but found '" + parser.currentToken() + "' in: " + new String(response.body(), UTF_8));
}
catch (IOException e) {
throw new UncheckedIOException(e);
}
+ if (type == null) // Not a Vespa response, but a failure in the HTTP layer.
+ throw new FeedException("Status " + response.code() + " executing '" + request +
+ "': " + (message == null ? new String(response.body(), UTF_8) : message));
+
return new Result(type, documentId, message, trace);
}
- static List<String> toPath(DocumentId documentId) {
- List<String> path = new ArrayList<>();
+ static String getPath(DocumentId documentId) {
+ StringJoiner path = new StringJoiner("/", "/", "");
path.add("document");
path.add("v1");
- path.add(documentId.namespace());
- path.add(documentId.documentType());
+ path.add(encode(documentId.namespace()));
+ path.add(encode(documentId.documentType()));
if (documentId.number().isPresent()) {
path.add("number");
path.add(Long.toUnsignedString(documentId.number().getAsLong()));
}
else if (documentId.group().isPresent()) {
path.add("group");
- path.add(documentId.group().get());
+ path.add(encode(documentId.group().get()));
}
else {
path.add("docid");
}
- path.add(documentId.userSpecific());
+ path.add(encode(documentId.userSpecific()));
- return path;
+ return path.toString();
}
- static URI operationPath(DocumentId documentId, OperationParameters params) {
- URIBuilder url = new URIBuilder();
- url.setPathSegments(toPath(documentId));
-
- if (params.createIfNonExistent()) url.addParameter("create", "true");
- params.testAndSetCondition().ifPresent(condition -> url.addParameter("condition", condition));
- params.timeout().ifPresent(timeout -> url.addParameter("timeout", timeout.toMillis() + "ms"));
- params.route().ifPresent(route -> url.addParameter("route", route));
- params.tracelevel().ifPresent(tracelevel -> url.addParameter("tracelevel", Integer.toString(tracelevel)));
-
+ static String encode(String raw) {
try {
- return url.build();
+ return URLEncoder.encode(raw, UTF_8.name());
}
- catch (URISyntaxException e) {
+ catch (UnsupportedEncodingException e) {
throw new IllegalStateException(e);
}
}
+ static String getQuery(OperationParameters params) {
+ StringJoiner query = new StringJoiner("&", "?", "").setEmptyValue("");
+ if (params.createIfNonExistent()) query.add("create=true");
+ params.testAndSetCondition().ifPresent(condition -> query.add("condition=" + encode(condition)));
+ params.timeout().ifPresent(timeout -> query.add("timeout=" + timeout.toMillis() + "ms"));
+ params.route().ifPresent(route -> query.add("route=" + encode(route)));
+ params.tracelevel().ifPresent(tracelevel -> query.add("tracelevel=" + tracelevel));
+ return query.toString();
+ }
+
}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpRequest.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpRequest.java
new file mode 100644
index 00000000000..8da2f46def2
--- /dev/null
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpRequest.java
@@ -0,0 +1,42 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client;
+
+import java.util.Map;
+import java.util.function.Supplier;
+
+class HttpRequest {
+
+ private final String method;
+ private final String path;
+ private final Map<String, Supplier<String>> headers;
+ private final byte[] body;
+
+ public HttpRequest(String method, String path, Map<String, Supplier<String>> headers, byte[] body) {
+ this.method = method;
+ this.path = path;
+ this.headers = headers;
+ this.body = body;
+ }
+
+ public String method() {
+ return method;
+ }
+
+ public String path() {
+ return path;
+ }
+
+ public Map<String, Supplier<String>> headers() {
+ return headers;
+ }
+
+ public byte[] body() {
+ return body;
+ }
+
+ @Override
+ public String toString() {
+ return method + " " + path;
+ }
+
+}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpRequestStrategy.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpRequestStrategy.java
index 7a6e2120be6..6b2aec5d8b3 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpRequestStrategy.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpRequestStrategy.java
@@ -3,8 +3,6 @@ package ai.vespa.feed.client;
import ai.vespa.feed.client.FeedClient.CircuitBreaker;
import ai.vespa.feed.client.FeedClient.RetryStrategy;
-import org.apache.hc.client5.http.async.methods.SimpleHttpRequest;
-import org.apache.hc.client5.http.async.methods.SimpleHttpResponse;
import java.io.IOException;
import java.util.Map;
@@ -12,6 +10,8 @@ import java.util.Queue;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
import java.util.logging.Logger;
@@ -21,6 +21,7 @@ import static ai.vespa.feed.client.FeedClient.CircuitBreaker.State.HALF_OPEN;
import static ai.vespa.feed.client.FeedClient.CircuitBreaker.State.OPEN;
import static java.lang.Math.max;
import static java.lang.Math.min;
+import static java.nio.charset.StandardCharsets.UTF_8;
import static java.util.logging.Level.FINE;
import static java.util.logging.Level.WARNING;
@@ -51,27 +52,45 @@ class HttpRequestStrategy implements RequestStrategy {
private final AtomicBoolean destroyed = new AtomicBoolean(false);
private final AtomicLong delayedCount = new AtomicLong(0);
private final AtomicLong retries = new AtomicLong(0);
+ private final ExecutorService resultExecutor = Executors.newSingleThreadExecutor(runnable -> {
+ Thread thread = new Thread(runnable, "feed-client-result-executor");
+ thread.setDaemon(true);
+ return thread;
+ });
HttpRequestStrategy(FeedClientBuilder builder) throws IOException {
- this(builder, new HttpCluster(builder));
+ this(builder, new ApacheCluster(builder));
}
HttpRequestStrategy(FeedClientBuilder builder, Cluster cluster) {
- this.cluster = cluster;
+ this.cluster = builder.benchmark ? new BenchmarkingCluster(cluster) : cluster;
this.strategy = builder.retryStrategy;
this.breaker = builder.circuitBreaker;
this.maxInflight = builder.connectionsPerEndpoint * (long) builder.maxStreamsPerConnection;
this.minInflight = builder.connectionsPerEndpoint * (long) min(16, builder.maxStreamsPerConnection);
this.targetInflightX10 = new AtomicLong(10 * (long) (Math.sqrt(minInflight) * Math.sqrt(maxInflight)));
- new Thread(this::dispatch, "feed-client-dispatcher").start();
+
+ Thread dispatcher = new Thread(this::dispatch, "feed-client-dispatcher");
+ dispatcher.setDaemon(true);
+ dispatcher.start();
+ }
+
+ @Override
+ public OperationStats stats() {
+ return cluster.stats();
+ }
+
+ @Override
+ public CircuitBreaker.State circuitBreakerState() {
+ return breaker.state();
}
private void dispatch() {
try {
- while (breaker.state() != OPEN) {
+ while (breaker.state() != OPEN && ! destroyed.get()) {
while ( ! isInExcess() && poll() && breaker.state() == CLOSED);
// Sleep when circuit is half-open, nap when queue is empty, or we are throttled.
- Thread.sleep(breaker.state() == HALF_OPEN ? 1000 : 10);
+ Thread.sleep(breaker.state() == HALF_OPEN ? 1000 : 10); // TODO: Reduce throughput when turning half-open?
}
}
catch (InterruptedException e) {
@@ -98,15 +117,15 @@ class HttpRequestStrategy implements RequestStrategy {
return inflight.get() - delayedCount.get() > targetInflight();
}
- private boolean retry(SimpleHttpRequest request, int attempt) {
- if (attempt >= strategy.retries())
+ private boolean retry(HttpRequest request, int attempt) {
+ if (attempt > strategy.retries())
return false;
- switch (request.getMethod().toUpperCase()) {
+ switch (request.method().toUpperCase()) {
case "POST": return strategy.retry(FeedClient.OperationType.PUT);
case "PUT": return strategy.retry(FeedClient.OperationType.UPDATE);
case "DELETE": return strategy.retry(FeedClient.OperationType.REMOVE);
- default: throw new IllegalStateException("Unexpected HTTP method: " + request.getMethod());
+ default: throw new IllegalStateException("Unexpected HTTP method: " + request.method());
}
}
@@ -114,7 +133,7 @@ class HttpRequestStrategy implements RequestStrategy {
* Retries all IOExceptions, unless error rate has converged to a value higher than the threshold,
* or the user has turned off retries for this type of operation.
*/
- private boolean retry(SimpleHttpRequest request, Throwable thrown, int attempt) {
+ private boolean retry(HttpRequest request, Throwable thrown, int attempt) {
breaker.failure();
log.log(FINE, thrown, () -> "Failed attempt " + attempt + " at " + request);
@@ -137,23 +156,23 @@ class HttpRequestStrategy implements RequestStrategy {
}
/** Retries throttled requests (429, 503), adjusting the target inflight count, and server errors (500, 502). */
- private boolean retry(SimpleHttpRequest request, SimpleHttpResponse response, int attempt) {
- if (response.getCode() / 100 == 2) {
+ private boolean retry(HttpRequest request, HttpResponse response, int attempt) {
+ if (response.code() / 100 == 2) {
breaker.success();
incrementTargetInflight();
return false;
}
- log.log(FINE, () -> "Status code " + response.getCode() + " (" + response.getBodyText() +
+ log.log(FINE, () -> "Status code " + response.code() + " (" + new String(response.body(), UTF_8) +
") on attempt " + attempt + " at " + request);
- if (response.getCode() == 429 || response.getCode() == 503) { // Throttling; reduce target inflight.
+ if (response.code() == 429 || response.code() == 503) { // Throttling; reduce target inflight.
decreaseTargetInflight();
return true;
}
breaker.failure();
- if (response.getCode() == 500 || response.getCode() == 502 || response.getCode() == 504) // Hopefully temporary errors.
+ if (response.code() == 500 || response.code() == 502 || response.code() == 504) // Hopefully temporary errors.
return retry(request, attempt);
return false;
@@ -175,11 +194,6 @@ class HttpRequestStrategy implements RequestStrategy {
inflight.decrementAndGet();
}
- @Override
- public boolean hasFailed() {
- return breaker.state() == OPEN;
- }
-
public void await() {
try {
while (inflight.get() > 0)
@@ -191,9 +205,9 @@ class HttpRequestStrategy implements RequestStrategy {
}
@Override
- public CompletableFuture<SimpleHttpResponse> enqueue(DocumentId documentId, SimpleHttpRequest request) {
- CompletableFuture<SimpleHttpResponse> result = new CompletableFuture<>(); // Carries the aggregate result of the operation, including retries.
- CompletableFuture<SimpleHttpResponse> vessel = new CompletableFuture<>(); // Holds the computation of a single dispatch to the HTTP client.
+ public CompletableFuture<HttpResponse> enqueue(DocumentId documentId, HttpRequest request) {
+ CompletableFuture<HttpResponse> result = new CompletableFuture<>(); // Carries the aggregate result of the operation, including retries.
+ CompletableFuture<HttpResponse> vessel = new CompletableFuture<>(); // Holds the computation of a single dispatch to the HTTP client.
CompletableFuture<?> previous = inflightById.put(documentId, result);
if (destroyed.get()) {
result.cancel(true);
@@ -218,23 +232,24 @@ class HttpRequestStrategy implements RequestStrategy {
}
/** Handles the result of one attempt at the given operation, retrying if necessary. */
- private void handleAttempt(CompletableFuture<SimpleHttpResponse> vessel, SimpleHttpRequest request, CompletableFuture<SimpleHttpResponse> result, int attempt) {
- vessel.whenComplete((response, thrown) -> {
- // Retry the operation if it failed with a transient error ...
- if (thrown != null ? retry(request, thrown, attempt)
- : retry(request, response, attempt)) {
- retries.incrementAndGet();
- CircuitBreaker.State state = breaker.state();
- CompletableFuture<SimpleHttpResponse> retry = new CompletableFuture<>();
- offer(() -> cluster.dispatch(request, retry));
- handleAttempt(retry, request, result, attempt + (state == HALF_OPEN ? 0 : 1));
- }
- // ... or accept the outcome and mark the operation as complete.
- else {
- if (thrown == null) result.complete(response);
- else result.completeExceptionally(thrown);
- }
- });
+ private void handleAttempt(CompletableFuture<HttpResponse> vessel, HttpRequest request, CompletableFuture<HttpResponse> result, int attempt) {
+ vessel.whenCompleteAsync((response, thrown) -> {
+ // Retry the operation if it failed with a transient error ...
+ if (thrown != null ? retry(request, thrown, attempt)
+ : retry(request, response, attempt)) {
+ retries.incrementAndGet();
+ CircuitBreaker.State state = breaker.state();
+ CompletableFuture<HttpResponse> retry = new CompletableFuture<>();
+ offer(() -> cluster.dispatch(request, retry));
+ handleAttempt(retry, request, result, attempt + (state == HALF_OPEN ? 0 : 1));
+ }
+ // ... or accept the outcome and mark the operation as complete.
+ else {
+ if (thrown == null) result.complete(response);
+ else result.completeExceptionally(thrown);
+ }
+ },
+ resultExecutor);
}
@Override
@@ -243,6 +258,7 @@ class HttpRequestStrategy implements RequestStrategy {
inflightById.values().forEach(result -> result.cancel(true));
cluster.close();
+ resultExecutor.shutdown();
}
}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpResponse.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpResponse.java
new file mode 100644
index 00000000000..b1dd54240eb
--- /dev/null
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpResponse.java
@@ -0,0 +1,16 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client;
+
+interface HttpResponse {
+
+ int code();
+ byte[] body();
+
+ static HttpResponse of(int code, byte[] body) {
+ return new HttpResponse() {
+ @Override public int code() { return code; }
+ @Override public byte[] body() { return body; }
+ };
+ }
+
+}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/JsonFeeder.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/JsonFeeder.java
new file mode 100644
index 00000000000..b3a7aca1808
--- /dev/null
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/JsonFeeder.java
@@ -0,0 +1,472 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client;
+
+import ai.vespa.feed.client.FeedClient.OperationType;
+import com.fasterxml.jackson.core.JsonFactory;
+import com.fasterxml.jackson.core.JsonParser;
+import com.fasterxml.jackson.core.JsonToken;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InterruptedIOException;
+import java.io.UncheckedIOException;
+import java.time.Duration;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static ai.vespa.feed.client.FeedClient.OperationType.PUT;
+import static ai.vespa.feed.client.FeedClient.OperationType.REMOVE;
+import static ai.vespa.feed.client.FeedClient.OperationType.UPDATE;
+import static com.fasterxml.jackson.core.JsonToken.END_ARRAY;
+import static com.fasterxml.jackson.core.JsonToken.START_ARRAY;
+import static com.fasterxml.jackson.core.JsonToken.START_OBJECT;
+import static com.fasterxml.jackson.core.JsonToken.VALUE_FALSE;
+import static com.fasterxml.jackson.core.JsonToken.VALUE_STRING;
+import static com.fasterxml.jackson.core.JsonToken.VALUE_TRUE;
+import static java.lang.Math.min;
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * @author jonmv
+ * @author bjorncs
+ */
+public class JsonFeeder implements Closeable {
+
+ private final ExecutorService resultExecutor = Executors.newSingleThreadExecutor(r -> {
+ Thread t = new Thread(r, "json-feeder-result-executor");
+ t.setDaemon(true);
+ return t;
+ });
+ private final FeedClient client;
+ private final OperationParameters protoParameters;
+
+ private JsonFeeder(FeedClient client, OperationParameters protoParameters) {
+ this.client = client;
+ this.protoParameters = protoParameters;
+ }
+
+ public interface ResultCallback {
+ /**
+ * Invoked after each operation has either completed successfully or failed
+ *
+ * @param result Non-null if operation completed successfully
+ * @param error Non-null if operation failed
+ */
+ default void onNextResult(Result result, Throwable error) { }
+
+ /**
+ * Invoked if an unrecoverable error occurred during feed processing,
+ * after which no other {@link ResultCallback} methods are invoked.
+ */
+ default void onError(Throwable error) { }
+
+ /**
+ * Invoked when all feed operations are either completed successfully or failed.
+ */
+ default void onComplete() { }
+ }
+
+ public static Builder builder(FeedClient client) { return new Builder(client); }
+
+ /** Feeds single JSON feed operations on the form
+ * <pre>
+ * {
+ * "id": "id:ns:type::boo",
+ * "fields": { ... document fields ... }
+ * }
+ * </pre>
+ */
+ public CompletableFuture<Result> feedSingle(String json) {
+ CompletableFuture<Result> result = new CompletableFuture<>();
+ try {
+ SingleOperationParserAndExecutor parser = new SingleOperationParserAndExecutor(json.getBytes(UTF_8));
+ parser.next().whenCompleteAsync((operationResult, error) -> {
+ if (error != null) {
+ result.completeExceptionally(error);
+ } else {
+ result.complete(operationResult);
+ }
+ }, resultExecutor);
+ } catch (Exception e) {
+ resultExecutor.execute(() -> result.completeExceptionally(e));
+ }
+ return result;
+ }
+
+ /** Feeds a stream containing a JSON array of feed operations on the form
+ * <pre>
+ * [
+ * {
+ * "id": "id:ns:type::boo",
+ * "fields": { ... document fields ... }
+ * },
+ * {
+ * "put": "id:ns:type::foo",
+ * "fields": { ... document fields ... }
+ * },
+ * {
+ * "update": "id:ns:type:n=4:bar",
+ * "create": true,
+ * "fields": { ... partial update fields ... }
+ * },
+ * {
+ * "remove": "id:ns:type:g=foo:bar",
+ * "condition": "type.baz = \"bax\""
+ * },
+ * ...
+ * ]
+ * </pre>
+ * Note that {@code "id"} is an alias for the document put operation.
+ */
+ public CompletableFuture<Void> feedMany(InputStream jsonStream, ResultCallback resultCallback) {
+ return feedMany(jsonStream, 1 << 26, resultCallback);
+ }
+
+ public CompletableFuture<Void> feedMany(InputStream jsonStream) {
+ return feedMany(jsonStream, new ResultCallback() { });
+ }
+
+ CompletableFuture<Void> feedMany(InputStream jsonStream, int size, ResultCallback resultCallback) {
+ RingBufferStream buffer = new RingBufferStream(jsonStream, size);
+ CompletableFuture<Void> overallResult = new CompletableFuture<>();
+ CompletableFuture<Result> result;
+ AtomicInteger pending = new AtomicInteger(1); // The below dispatch loop itself is counted as a single pending operation
+ AtomicBoolean finalCallbackInvoked = new AtomicBoolean();
+ try {
+ while ((result = buffer.next()) != null) {
+ pending.incrementAndGet();
+ result.whenCompleteAsync((r, t) -> {
+ if (!finalCallbackInvoked.get()) {
+ resultCallback.onNextResult(r, t);
+ }
+ if (pending.decrementAndGet() == 0 && finalCallbackInvoked.compareAndSet(false, true)) {
+ resultCallback.onComplete();
+ overallResult.complete(null);
+ }
+ }, resultExecutor);
+ }
+ if (pending.decrementAndGet() == 0 && finalCallbackInvoked.compareAndSet(false, true)) {
+ resultExecutor.execute(() -> {
+ resultCallback.onComplete();
+ overallResult.complete(null);
+ });
+ }
+ } catch (Exception e) {
+ if (finalCallbackInvoked.compareAndSet(false, true)) {
+ resultExecutor.execute(() -> {
+ resultCallback.onError(e);
+ overallResult.completeExceptionally(e);
+ });
+ }
+ }
+ return overallResult;
+ }
+
+ private static final JsonFactory factory = new JsonFactory();
+
+ @Override public void close() throws IOException {
+ client.close();
+ resultExecutor.shutdown();
+ try {
+ if (!resultExecutor.awaitTermination(30, TimeUnit.SECONDS)) {
+ throw new IOException("Failed to close client in time");
+ }
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ }
+ }
+
+ private class RingBufferStream extends InputStream {
+
+ private final byte[] b = new byte[1];
+ private final InputStream in;
+ private final byte[] data;
+ private final int size;
+ private final Object lock = new Object();
+ private Throwable thrown = null;
+ private long tail = 0;
+ private long pos = 0;
+ private long head = 0;
+ private boolean done = false;
+ private final OperationParserAndExecutor parserAndExecutor;
+
+ RingBufferStream(InputStream in, int size) {
+ this.in = in;
+ this.data = new byte[size];
+ this.size = size;
+
+ new Thread(this::fill, "feed-reader").start();
+
+ try { this.parserAndExecutor = new RingBufferBackedOperationParserAndExecutor(factory.createParser(this)); }
+ catch (IOException e) { throw new UncheckedIOException(e); }
+ }
+
+ @Override
+ public int read() throws IOException {
+ return read(b, 0, 1) == -1 ? -1 : b[0];
+ }
+
+ @Override
+ public int read(byte[] buffer, int off, int len) throws IOException {
+ try {
+ int ready;
+ synchronized (lock) {
+ while ((ready = (int) (head - pos)) == 0 && ! done)
+ lock.wait();
+ }
+ if (thrown != null) throw new RuntimeException("Error reading input", thrown);
+ if (ready == 0) return -1;
+
+ ready = min(ready, len);
+ int offset = (int) (pos % size);
+ int length = min(ready, size - offset);
+ System.arraycopy(data, offset, buffer, off, length);
+ if (length < ready)
+ System.arraycopy(data, 0, buffer, off + length, ready - length);
+
+ pos += ready;
+ return ready;
+ }
+ catch (InterruptedException e) {
+ throw new InterruptedIOException("Interrupted waiting for data: " + e.getMessage());
+ }
+ }
+
+ public CompletableFuture<Result> next() throws IOException {
+ return parserAndExecutor.next();
+ }
+
+ private final byte[] prefix = "{\"fields\":".getBytes(UTF_8);
+ private byte[] copy(long start, long end) {
+ int length = (int) (end - start);
+ byte[] buffer = new byte[prefix.length + length + 1];
+ System.arraycopy(prefix, 0, buffer, 0, prefix.length);
+
+ int offset = (int) (start % size);
+ int toWrite = min(length, size - offset);
+ System.arraycopy(data, offset, buffer, prefix.length, toWrite);
+ if (toWrite < length)
+ System.arraycopy(data, 0, buffer, prefix.length + toWrite, length - toWrite);
+
+ buffer[buffer.length - 1] = '}';
+ return buffer;
+ }
+
+
+ @Override
+ public void close() throws IOException {
+ synchronized (lock) {
+ done = true;
+ lock.notifyAll();
+ }
+ in.close();
+ }
+
+ private void fill() {
+ try {
+ while (true) {
+ int free;
+ synchronized (lock) {
+ while ((free = (int) (tail + size - head)) <= 0 && ! done)
+ lock.wait();
+ }
+ if (done) break;
+
+ int off = (int) (head % size);
+ int len = min(min(free, size - off), 1 << 13);
+ int read = in.read(data, off, len);
+
+ synchronized (lock) {
+ if (read < 0) done = true;
+ else head += read;
+ lock.notify();
+ }
+ }
+ }
+ catch (Throwable t) {
+ synchronized (lock) {
+ done = true;
+ thrown = t;
+ }
+ }
+ }
+
+ private class RingBufferBackedOperationParserAndExecutor extends OperationParserAndExecutor {
+
+ RingBufferBackedOperationParserAndExecutor(JsonParser parser) throws IOException { super(parser, true); }
+
+ @Override
+ String getDocumentJson(long start, long end) {
+ String payload = new String(copy(start, end), UTF_8);
+ synchronized (lock) {
+ tail = end;
+ lock.notify();
+ }
+ return payload;
+ }
+ }
+ }
+
+ private class SingleOperationParserAndExecutor extends OperationParserAndExecutor {
+
+ private final byte[] json;
+
+ SingleOperationParserAndExecutor(byte[] json) throws IOException {
+ super(factory.createParser(json), false);
+ this.json = json;
+ }
+
+ @Override
+ String getDocumentJson(long start, long end) {
+ return new String(json, (int) start, (int) (end - start), UTF_8);
+ }
+ }
+
+ private abstract class OperationParserAndExecutor {
+
+ private final JsonParser parser;
+ private final boolean multipleOperations;
+ private boolean arrayPrefixParsed;
+
+ protected OperationParserAndExecutor(JsonParser parser, boolean multipleOperations) throws IOException {
+ this.parser = parser;
+ this.multipleOperations = multipleOperations;
+ }
+
+ abstract String getDocumentJson(long start, long end);
+
+ CompletableFuture<Result> next() throws IOException {
+ try {
+ if (multipleOperations && !arrayPrefixParsed){
+ expect(START_ARRAY);
+ arrayPrefixParsed = true;
+ }
+
+ JsonToken token = parser.nextToken();
+ if (token == END_ARRAY && multipleOperations) return null;
+ else if (token == null && !multipleOperations) return null;
+ else if (token == START_OBJECT);
+ else throw new JsonParseException("Unexpected token '" + parser.currentToken() + "' at offset " + parser.getTokenLocation().getByteOffset());
+ long start = 0, end = -1;
+ OperationType type = null;
+ DocumentId id = null;
+ OperationParameters parameters = protoParameters;
+ loop: while (true) {
+ switch (parser.nextToken()) {
+ case FIELD_NAME:
+ switch (parser.getText()) {
+ case "id":
+ case "put": type = PUT; id = readId(); break;
+ case "update": type = UPDATE; id = readId(); break;
+ case "remove": type = REMOVE; id = readId(); break;
+ case "condition": parameters = parameters.testAndSetCondition(readString()); break;
+ case "create": parameters = parameters.createIfNonExistent(readBoolean()); break;
+ case "fields": {
+ expect(START_OBJECT);
+ start = parser.getTokenLocation().getByteOffset();
+ int depth = 1;
+ while (depth > 0) switch (parser.nextToken()) {
+ case START_OBJECT: ++depth; break;
+ case END_OBJECT: --depth; break;
+ }
+ end = parser.getTokenLocation().getByteOffset() + 1;
+ break;
+ }
+ default: throw new JsonParseException("Unexpected field name '" + parser.getText() + "' at offset " +
+ parser.getTokenLocation().getByteOffset());
+ }
+ break;
+
+ case END_OBJECT:
+ break loop;
+
+ default:
+ throw new JsonParseException("Unexpected token '" + parser.currentToken() + "' at offset " +
+ parser.getTokenLocation().getByteOffset());
+ }
+ }
+ if (id == null)
+ throw new JsonParseException("No document id for document at offset " + start);
+
+ if (end < start)
+ throw new JsonParseException("No 'fields' object for document at offset " + parser.getTokenLocation().getByteOffset());
+ String payload = getDocumentJson(start, end);
+ switch (type) {
+ case PUT: return client.put (id, payload, parameters);
+ case UPDATE: return client.update(id, payload, parameters);
+ case REMOVE: return client.remove(id, parameters);
+ default: throw new JsonParseException("Unexpected operation type '" + type + "'");
+ }
+ } catch (com.fasterxml.jackson.core.JacksonException e) {
+ throw new JsonParseException("Failed to parse JSON", e);
+ }
+ }
+
+ private void expect(JsonToken token) throws IOException {
+ if (parser.nextToken() != token)
+ throw new JsonParseException("Expected '" + token + "' at offset " + parser.getTokenLocation().getByteOffset() +
+ ", but found '" + parser.currentToken() + "' (" + parser.getText() + ")");
+ }
+
+ private String readString() throws IOException {
+ String value = parser.nextTextValue();
+ if (value == null)
+ throw new JsonParseException("Expected '" + VALUE_STRING + "' at offset " + parser.getTokenLocation().getByteOffset() +
+ ", but found '" + parser.currentToken() + "' (" + parser.getText() + ")");
+
+ return value;
+ }
+
+ private boolean readBoolean() throws IOException {
+ Boolean value = parser.nextBooleanValue();
+ if (value == null)
+ throw new JsonParseException("Expected '" + VALUE_FALSE + "' or '" + VALUE_TRUE + "' at offset " + parser.getTokenLocation().getByteOffset() +
+ ", but found '" + parser.currentToken() + "' (" + parser.getText() + ")");
+
+ return value;
+
+ }
+
+ private DocumentId readId() throws IOException {
+ return DocumentId.of(readString());
+ }
+
+ }
+
+
+ public static class Builder {
+
+ final FeedClient client;
+ OperationParameters parameters = OperationParameters.empty();
+
+ private Builder(FeedClient client) {
+ this.client = requireNonNull(client);
+ }
+
+ public Builder withTimeout(Duration timeout) {
+ parameters = parameters.timeout(timeout);
+ return this;
+ }
+
+ public Builder withRoute(String route) {
+ parameters = parameters.route(route);
+ return this;
+ }
+
+ public Builder withTracelevel(int tracelevel) {
+ parameters = parameters.tracelevel(tracelevel);
+ return this;
+ }
+
+ public JsonFeeder build() {
+ return new JsonFeeder(client, parameters);
+ }
+
+ }
+}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/JsonParseException.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/JsonParseException.java
new file mode 100644
index 00000000000..8edf74ec275
--- /dev/null
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/JsonParseException.java
@@ -0,0 +1,15 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client;
+
+/**
+ * Signals that supplied JSON is invalid
+ *
+ * @author bjorncs
+ */
+public class JsonParseException extends FeedException {
+
+ public JsonParseException(String message) { super(message); }
+
+ public JsonParseException(String message, Throwable cause) { super(message, cause); }
+
+}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/JsonStreamFeeder.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/JsonStreamFeeder.java
deleted file mode 100644
index 99d05a4bae8..00000000000
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/JsonStreamFeeder.java
+++ /dev/null
@@ -1,364 +0,0 @@
-// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package ai.vespa.feed.client;
-
-import ai.vespa.feed.client.FeedClient.OperationType;
-import com.fasterxml.jackson.core.JsonFactory;
-import com.fasterxml.jackson.core.JsonParser;
-import com.fasterxml.jackson.core.JsonToken;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InterruptedIOException;
-import java.io.UncheckedIOException;
-import java.time.Duration;
-import java.util.Optional;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicReference;
-
-import static ai.vespa.feed.client.FeedClient.OperationType.PUT;
-import static ai.vespa.feed.client.FeedClient.OperationType.REMOVE;
-import static ai.vespa.feed.client.FeedClient.OperationType.UPDATE;
-import static com.fasterxml.jackson.core.JsonToken.START_OBJECT;
-import static com.fasterxml.jackson.core.JsonToken.VALUE_FALSE;
-import static com.fasterxml.jackson.core.JsonToken.VALUE_STRING;
-import static com.fasterxml.jackson.core.JsonToken.VALUE_TRUE;
-import static java.lang.Math.min;
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static java.util.Objects.requireNonNull;
-
-/**
- * @author jonmv
- */
-public class JsonStreamFeeder implements Closeable {
-
- private final FeedClient client;
- private final OperationParameters protoParameters;
-
- private JsonStreamFeeder(FeedClient client, OperationParameters protoParameters) {
- this.client = client;
- this.protoParameters = protoParameters;
- }
-
- public static Builder builder(FeedClient client) { return new Builder(client); }
-
- /** Feeds a stream containing a JSON array of feed operations on the form
- * <pre>
- * [
- * {
- * "id": "id:ns:type::boo",
- * "fields": { ... document fields ... }
- * },
- * {
- * "put": "id:ns:type::foo",
- * "fields": { ... document fields ... }
- * },
- * {
- * "update": "id:ns:type:n=4:bar",
- * "create": true,
- * "fields": { ... partial update fields ... }
- * },
- * {
- * "remove": "id:ns:type:g=foo:bar",
- * "condition": "type.baz = \"bax\""
- * },
- * ...
- * ]
- * </pre>
- * Note that {@code "id"} is an alias for the document put operation.
- */
- public void feed(InputStream jsonStream) throws IOException {
- feed(jsonStream, 1 << 26, false);
- }
-
- BenchmarkResult benchmark(InputStream jsonStream) throws IOException {
- return feed(jsonStream, 1 << 26, true).get();
- }
-
- Optional<BenchmarkResult> feed(InputStream jsonStream, int size, boolean benchmark) throws IOException {
- RingBufferStream buffer = new RingBufferStream(jsonStream, size);
- buffer.expect(JsonToken.START_ARRAY);
- AtomicInteger okCount = new AtomicInteger();
- AtomicInteger failedCount = new AtomicInteger();
- long startTime = System.nanoTime();
- CompletableFuture<Result> result;
- AtomicReference<Throwable> thrown = new AtomicReference<>();
- while ((result = buffer.next()) != null) {
- result.whenComplete((r, t) -> {
- if (t != null) {
- failedCount.incrementAndGet();
- if (!benchmark) thrown.set(t);
- } else
- okCount.incrementAndGet();
- });
- if (thrown.get() != null)
- sneakyThrow(thrown.get());
- }
- if (!benchmark) return Optional.empty();
- Duration duration = Duration.ofNanos(System.nanoTime() - startTime);
- double throughPut = (double)okCount.get() / duration.toMillis() * 1000D;
- return Optional.of(new BenchmarkResult(okCount.get(), failedCount.get(), duration, throughPut));
- }
-
- @SuppressWarnings("unchecked")
- static <T extends Throwable> void sneakyThrow(Throwable thrown) throws T { throw (T) thrown; }
-
- private static final JsonFactory factory = new JsonFactory();
-
- @Override public void close() throws IOException { client.close(); }
-
- private class RingBufferStream extends InputStream {
-
- private final byte[] b = new byte[1];
- private final InputStream in;
- private final byte[] data;
- private final int size;
- private final Object lock = new Object();
- private final JsonParser parser;
- private Throwable thrown = null;
- private long tail = 0;
- private long pos = 0;
- private long head = 0;
- private boolean done = false;
-
- RingBufferStream(InputStream in, int size) {
- this.in = in;
- this.data = new byte[size];
- this.size = size;
-
- new Thread(this::fill, "feed-reader").start();
-
- try { this.parser = factory.createParser(this); }
- catch (IOException e) { throw new UncheckedIOException(e); }
- }
-
- @Override
- public int read() throws IOException {
- return read(b, 0, 1) == -1 ? -1 : b[0];
- }
-
- @Override
- public int read(byte[] buffer, int off, int len) throws IOException {
- try {
- int ready;
- synchronized (lock) {
- while ((ready = (int) (head - pos)) == 0 && ! done)
- lock.wait();
- }
- if (thrown != null) throw new RuntimeException("Error reading input", thrown);
- if (ready == 0) return -1;
-
- ready = min(ready, len);
- int offset = (int) (pos % size);
- int length = min(ready, size - offset);
- System.arraycopy(data, offset, buffer, off, length);
- if (length < ready)
- System.arraycopy(data, 0, buffer, off + length, ready - length);
-
- pos += ready;
- return ready;
- }
- catch (InterruptedException e) {
- throw new InterruptedIOException("Interrupted waiting for data: " + e.getMessage());
- }
- }
-
- void expect(JsonToken token) throws IOException {
- if (parser.nextToken() != token)
- throw new IllegalArgumentException("Expected '" + token + "' at offset " + parser.getTokenLocation().getByteOffset() +
- ", but found '" + parser.currentToken() + "' (" + parser.getText() + ")");
- }
-
- public CompletableFuture<Result> next() throws IOException {
- long start = 0, end = -1;
- OperationType type = null;
- DocumentId id = null;
- OperationParameters parameters = protoParameters;
- switch (parser.nextToken()) {
- case END_ARRAY: return null;
- case START_OBJECT: break;
- default: throw new IllegalArgumentException("Unexpected token '" + parser.currentToken() + "' at offset " +
- parser.getTokenLocation().getByteOffset());
- }
-
- loop: while (true) {
- switch (parser.nextToken()) {
- case FIELD_NAME:
- switch (parser.getText()) {
- case "id":
- case "put": type = PUT; id = readId(); break;
- case "update": type = UPDATE; id = readId(); break;
- case "remove": type = REMOVE; id = readId(); break;
- case "condition": parameters = parameters.testAndSetCondition(readString()); break;
- case "create": parameters = parameters.createIfNonExistent(readBoolean()); break;
- case "fields": {
- expect(START_OBJECT);
- start = parser.getTokenLocation().getByteOffset();
- int depth = 1;
- while (depth > 0) switch (parser.nextToken()) {
- case START_OBJECT: ++depth; break;
- case END_OBJECT: --depth; break;
- }
- end = parser.getTokenLocation().getByteOffset() + 1;
- break;
- }
- default: throw new IllegalArgumentException("Unexpected field name '" + parser.getText() + "' at offset " +
- parser.getTokenLocation().getByteOffset());
- }
- break;
-
- case END_OBJECT:
- break loop;
-
- default:
- throw new IllegalArgumentException("Unexpected token '" + parser.currentToken() + "' at offset " +
- parser.getTokenLocation().getByteOffset());
- }
- }
-
- if (id == null)
- throw new IllegalArgumentException("No document id for document at offset " + start);
-
- if (end < start)
- throw new IllegalArgumentException("No 'fields' object for document at offset " + parser.getTokenLocation().getByteOffset());
-
- String payload = new String(copy(start, end), UTF_8);
- synchronized (lock) {
- tail = end;
- lock.notify();
- }
-
- switch (type) {
- case PUT: return client.put (id, payload, parameters);
- case UPDATE: return client.update(id, payload, parameters);
- case REMOVE: return client.remove(id, parameters);
- default: throw new IllegalStateException("Unexpected operation type '" + type + "'");
- }
- }
-
- private final byte[] prefix = "{\"fields\":".getBytes(UTF_8);
- private byte[] copy(long start, long end) {
- int length = (int) (end - start);
- byte[] buffer = new byte[prefix.length + length + 1];
- System.arraycopy(prefix, 0, buffer, 0, prefix.length);
-
- int offset = (int) (start % size);
- int toWrite = min(length, size - offset);
- System.arraycopy(data, offset, buffer, prefix.length, toWrite);
- if (toWrite < length)
- System.arraycopy(data, 0, buffer, prefix.length + toWrite, length - toWrite);
-
- buffer[buffer.length - 1] = '}';
- return buffer;
- }
-
- private String readString() throws IOException {
- String value = parser.nextTextValue();
- if (value == null)
- throw new IllegalArgumentException("Expected '" + VALUE_STRING + "' at offset " + parser.getTokenLocation().getByteOffset() +
- ", but found '" + parser.currentToken() + "' (" + parser.getText() + ")");
-
- return value;
- }
-
- private boolean readBoolean() throws IOException {
- Boolean value = parser.nextBooleanValue();
- if (value == null)
- throw new IllegalArgumentException("Expected '" + VALUE_FALSE + "' or '" + VALUE_TRUE + "' at offset " + parser.getTokenLocation().getByteOffset() +
- ", but found '" + parser.currentToken() + "' (" + parser.getText() + ")");
-
- return value;
-
- }
-
- private DocumentId readId() throws IOException {
- return DocumentId.of(readString());
- }
-
- @Override
- public void close() throws IOException {
- synchronized (lock) {
- done = true;
- lock.notifyAll();
- }
- in.close();
- }
-
- private void fill() {
- try {
- while (true) {
- int free;
- synchronized (lock) {
- while ((free = (int) (tail + size - head)) <= 0 && ! done)
- lock.wait();
- }
- if (done) break;
-
- int off = (int) (head % size);
- int len = min(min(free, size - off), 1 << 13);
- int read = in.read(data, off, len);
-
- synchronized (lock) {
- if (read < 0) done = true;
- else head += read;
- lock.notify();
- }
- }
- }
- catch (Throwable t) {
- synchronized (lock) {
- done = true;
- thrown = t;
- }
- }
- }
-
- }
-
-
- public static class Builder {
-
- final FeedClient client;
- OperationParameters parameters = OperationParameters.empty();
-
- private Builder(FeedClient client) {
- this.client = requireNonNull(client);
- }
-
- public Builder withTimeout(Duration timeout) {
- parameters = parameters.timeout(timeout);
- return this;
- }
-
- public Builder withRoute(String route) {
- parameters = parameters.route(route);
- return this;
- }
-
- public Builder withTracelevel(int tracelevel) {
- parameters = parameters.tracelevel(tracelevel);
- return this;
- }
-
- public JsonStreamFeeder build() {
- return new JsonStreamFeeder(client, parameters);
- }
-
- }
-
- static class BenchmarkResult {
- final int okCount;
- final int errorCount;
- final Duration duration;
- final double throughput;
-
- BenchmarkResult(int okCount, int errorCount, Duration duration, double throughput) {
- this.okCount = okCount;
- this.errorCount = errorCount;
- this.duration = duration;
- this.throughput = throughput;
- }
- }
-
-}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationParameters.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationParameters.java
index 22546f89ccb..8c20a37d224 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationParameters.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationParameters.java
@@ -7,6 +7,8 @@ import java.util.Optional;
import java.util.OptionalInt;
/**
+ * Per-operation feed parameters
+ *
* @author bjorncs
* @author jonmv
*/
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationStats.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationStats.java
new file mode 100644
index 00000000000..d36475a51fb
--- /dev/null
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationStats.java
@@ -0,0 +1,96 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client;
+
+import java.util.Map;
+
+/**
+ * Statistics for feed operations over HTTP against a Vespa cluster.
+ *
+ * @author jonmv
+ */
+public class OperationStats {
+
+ private final long requests;
+ private final Map<Integer, Long> responsesByCode;
+ private final long inflight;
+ private final long exceptions;
+ private final long averageLatencyMillis;
+ private final long minLatencyMillis;
+ private final long maxLatencyMillis;
+ private final long bytesSent;
+ private final long bytesReceived;
+
+ public OperationStats(long requests, Map<Integer, Long> responsesByCode, long exceptions, long inflight,
+ long averageLatencyMillis, long minLatencyMillis, long maxLatencyMillis,
+ long bytesSent, long bytesReceived) {
+ this.requests = requests;
+ this.responsesByCode = responsesByCode;
+ this.exceptions = exceptions;
+ this.inflight = inflight;
+ this.averageLatencyMillis = averageLatencyMillis;
+ this.minLatencyMillis = minLatencyMillis;
+ this.maxLatencyMillis = maxLatencyMillis;
+ this.bytesSent = bytesSent;
+ this.bytesReceived = bytesReceived;
+ }
+
+ public long requests() {
+ return requests;
+ }
+
+ public long responses() {
+ return requests - inflight;
+ }
+
+ public long successes() {
+ return responsesByCode.getOrDefault(200, 0L);
+ }
+
+ public Map<Integer, Long> responsesByCode() {
+ return responsesByCode;
+ }
+
+ public long exceptions() {
+ return exceptions;
+ }
+
+ public long inflight() {
+ return inflight;
+ }
+
+ public long averageLatencyMillis() {
+ return averageLatencyMillis;
+ }
+
+ public long minLatencyMillis() {
+ return minLatencyMillis;
+ }
+
+ public long maxLatencyMillis() {
+ return maxLatencyMillis;
+ }
+
+ public long bytesSent() {
+ return bytesSent;
+ }
+
+ public long bytesReceived() {
+ return bytesReceived;
+ }
+
+ @Override
+ public String toString() {
+ return "Stats{" +
+ "requests=" + requests +
+ ", responsesByCode=" + responsesByCode +
+ ", exceptions=" + exceptions +
+ ", inflight=" + inflight +
+ ", averageLatencyMillis=" + averageLatencyMillis +
+ ", minLatencyMillis=" + minLatencyMillis +
+ ", maxLatencyMillis=" + maxLatencyMillis +
+ ", bytesSent=" + bytesSent +
+ ", bytesReceived=" + bytesReceived +
+ '}';
+ }
+
+}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/RequestStrategy.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/RequestStrategy.java
index bda214405b5..a1101eb0ebb 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/RequestStrategy.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/RequestStrategy.java
@@ -1,12 +1,9 @@
// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package ai.vespa.feed.client;
-import org.apache.hc.client5.http.async.methods.SimpleHttpRequest;
-import org.apache.hc.client5.http.async.methods.SimpleHttpResponse;
+import ai.vespa.feed.client.FeedClient.CircuitBreaker.State;
-import java.io.Closeable;
import java.util.concurrent.CompletableFuture;
-import java.util.function.BiConsumer;
/**
* Controls execution of feed operations.
@@ -15,8 +12,11 @@ import java.util.function.BiConsumer;
*/
interface RequestStrategy {
- /** Whether this has failed fatally, and we should cease sending further operations. */
- boolean hasFailed();
+ /** Stats for operations sent through this. */
+ OperationStats stats();
+
+ /** State of the circuit breaker. */
+ State circuitBreakerState();
/** Forcibly terminates this, causing all inflight operations to complete immediately. */
void destroy();
@@ -25,6 +25,6 @@ interface RequestStrategy {
void await();
/** Enqueue the given operation, returning its future result. This may block if the client send queue is full. */
- CompletableFuture<SimpleHttpResponse> enqueue(DocumentId documentId, SimpleHttpRequest request);
+ CompletableFuture<HttpResponse> enqueue(DocumentId documentId, HttpRequest request);
}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/Result.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/Result.java
index 31a6cf6e893..b29d65e193b 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/Result.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/Result.java
@@ -4,6 +4,8 @@ package ai.vespa.feed.client;
import java.util.Optional;
/**
+ * Result for a document operation
+ *
* @author bjorncs
* @author jonmv
*/
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/SslContextBuilder.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/SslContextBuilder.java
index 7200d5fd943..9114e22f4a6 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/SslContextBuilder.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/SslContextBuilder.java
@@ -20,11 +20,14 @@ import java.nio.file.Path;
import java.security.GeneralSecurityException;
import java.security.KeyFactory;
import java.security.KeyStore;
+import java.security.KeyStoreException;
import java.security.PrivateKey;
import java.security.cert.Certificate;
import java.security.cert.X509Certificate;
import java.security.spec.PKCS8EncodedKeySpec;
import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
import java.util.List;
/**
@@ -39,6 +42,9 @@ class SslContextBuilder {
private Path certificateFile;
private Path privateKeyFile;
private Path caCertificatesFile;
+ private Collection<X509Certificate> certificate;
+ private PrivateKey privateKey;
+ private Collection<X509Certificate> caCertificates;
SslContextBuilder withCertificateAndKey(Path certificate, Path privateKey) {
this.certificateFile = certificate;
@@ -46,20 +52,35 @@ class SslContextBuilder {
return this;
}
+ SslContextBuilder withCertificateAndKey(Collection<X509Certificate> certificate, PrivateKey privateKey) {
+ this.certificate = certificate;
+ this.privateKey = privateKey;
+ return this;
+ }
+
SslContextBuilder withCaCertificates(Path caCertificates) {
this.caCertificatesFile = caCertificates;
return this;
}
+ SslContextBuilder withCaCertificates(Collection<X509Certificate> caCertificates) {
+ this.caCertificates = caCertificates;
+ return this;
+ }
+
SSLContext build() throws IOException {
try {
KeyStore keystore = KeyStore.getInstance("PKCS12");
keystore.load(null);
if (certificateFile != null && privateKeyFile != null) {
keystore.setKeyEntry("cert", privateKey(privateKeyFile), new char[0], certificates(certificateFile));
+ } else if (certificate != null && privateKey != null) {
+ keystore.setKeyEntry("cert", privateKey, new char[0], certificate.toArray(new Certificate[0]));
}
if (caCertificatesFile != null) {
- keystore.setCertificateEntry("ca-cert", certificates(caCertificatesFile)[0]);
+ addCaCertificates(keystore, Arrays.asList(certificates(caCertificatesFile)));
+ } else if (caCertificates != null) {
+ addCaCertificates(keystore, caCertificates);
}
KeyManagerFactory kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm());
kmf.init(keystore, new char[0]);
@@ -73,6 +94,13 @@ class SslContextBuilder {
}
}
+ private static void addCaCertificates(KeyStore keystore, Collection<? extends Certificate> certificates) throws KeyStoreException {
+ int i = 0;
+ for (Certificate cert : certificates) {
+ keystore.setCertificateEntry("ca-cert-" + ++i, cert);
+ }
+ }
+
private static Certificate[] certificates(Path file) throws IOException, GeneralSecurityException {
try (PEMParser parser = new PEMParser(Files.newBufferedReader(file))) {
List<X509Certificate> result = new ArrayList<>();
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/package-info.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/package-info.java
new file mode 100644
index 00000000000..e058b9b921e
--- /dev/null
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/package-info.java
@@ -0,0 +1,9 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+
+@PublicApi
+package ai.vespa.feed.client;
+
+import com.yahoo.api.annotations.PublicApi; \ No newline at end of file
diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/GracePeriodCircuitBreakerTest.java b/vespa-feed-client/src/test/java/ai/vespa/feed/client/GracePeriodCircuitBreakerTest.java
new file mode 100644
index 00000000000..9b30ebfd0aa
--- /dev/null
+++ b/vespa-feed-client/src/test/java/ai/vespa/feed/client/GracePeriodCircuitBreakerTest.java
@@ -0,0 +1,60 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client;
+
+import ai.vespa.feed.client.FeedClient.CircuitBreaker;
+import org.junit.jupiter.api.Test;
+
+import java.time.Duration;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static ai.vespa.feed.client.FeedClient.CircuitBreaker.State.CLOSED;
+import static ai.vespa.feed.client.FeedClient.CircuitBreaker.State.HALF_OPEN;
+import static ai.vespa.feed.client.FeedClient.CircuitBreaker.State.OPEN;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+/**
+ * @author jonmv
+ */
+class GracePeriodCircuitBreakerTest {
+
+ @Test
+ void testCircuitBreaker() {
+ AtomicLong now = new AtomicLong(0);
+ long SECOND = 1000;
+ CircuitBreaker breaker = new GracePeriodCircuitBreaker(now::get, Duration.ofSeconds(1), Duration.ofMinutes(1));
+
+ assertEquals(CLOSED, breaker.state(), "Initial state is closed");
+
+ now.addAndGet(100 * SECOND);
+ assertEquals(CLOSED, breaker.state(), "State is closed after some time without activity");
+
+ breaker.success();
+ assertEquals(CLOSED, breaker.state(), "State is closed after a success");
+
+ now.addAndGet(100 * SECOND);
+ assertEquals(CLOSED, breaker.state(), "State is closed some time after a success");
+
+ breaker.failure();
+ assertEquals(CLOSED, breaker.state(), "State is closed right after a failure");
+
+ now.addAndGet(SECOND);
+ assertEquals(CLOSED, breaker.state(), "State is closed until grace period has passed");
+
+ now.addAndGet(1);
+ assertEquals(HALF_OPEN, breaker.state(), "State is half-open when grace period has passed");
+
+ breaker.success();
+ assertEquals(CLOSED, breaker.state(), "State is closed after a new success");
+
+ breaker.failure();
+ now.addAndGet(60 * SECOND);
+ assertEquals(HALF_OPEN, breaker.state(), "State is half-open until doom period has passed");
+
+ now.addAndGet(1);
+ assertEquals(OPEN, breaker.state(), "State is open when doom period has passed");
+
+ breaker.success();
+ assertEquals(OPEN, breaker.state(), "State remains open in spite of new successes");
+ }
+
+}
diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/HttpFeedClientTest.java b/vespa-feed-client/src/test/java/ai/vespa/feed/client/HttpFeedClientTest.java
new file mode 100644
index 00000000000..d8090549420
--- /dev/null
+++ b/vespa-feed-client/src/test/java/ai/vespa/feed/client/HttpFeedClientTest.java
@@ -0,0 +1,101 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client;
+
+import org.junit.jupiter.api.Test;
+
+import java.net.URI;
+import java.time.Duration;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.function.BiFunction;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+
+/**
+ * @author jonmv
+ */
+class HttpFeedClientTest {
+
+ @Test
+ void testFeeding() throws ExecutionException, InterruptedException {
+ DocumentId id = DocumentId.of("ns", "type", "0");
+ AtomicReference<BiFunction<DocumentId, HttpRequest, CompletableFuture<HttpResponse>>> dispatch = new AtomicReference<>();
+ class MockRequestStrategy implements RequestStrategy {
+ @Override public OperationStats stats() { throw new UnsupportedOperationException(); }
+ @Override public FeedClient.CircuitBreaker.State circuitBreakerState() { return FeedClient.CircuitBreaker.State.CLOSED; }
+ @Override public void destroy() { throw new UnsupportedOperationException(); }
+ @Override public void await() { throw new UnsupportedOperationException(); }
+ @Override public CompletableFuture<HttpResponse> enqueue(DocumentId documentId, HttpRequest request) { return dispatch.get().apply(documentId, request); }
+ }
+ FeedClient client = new HttpFeedClient(FeedClientBuilder.create(URI.create("https://dummy:123")), new MockRequestStrategy());
+
+ // Vespa error is an error result.
+ dispatch.set((documentId, request) -> {
+ try {
+ assertEquals(id, documentId);
+ assertEquals("/document/v1/ns/type/docid/0?create=true&condition=false&timeout=5000ms&route=route",
+ request.path());
+ assertEquals("json", new String(request.body(), UTF_8));
+
+ HttpResponse response = HttpResponse.of(502,
+ ("{\n" +
+ " \"pathId\": \"/document/v1/ns/type/docid/0\",\n" +
+ " \"id\": \"id:ns:type::0\",\n" +
+ " \"message\": \"Ooops! ... I did it again.\",\n" +
+ " \"trace\": \"I played with your heart. Got lost in the game.\"\n" +
+ "}").getBytes(UTF_8));
+ return CompletableFuture.completedFuture(response);
+ }
+ catch (Throwable thrown) {
+ CompletableFuture<HttpResponse> failed = new CompletableFuture<>();
+ failed.completeExceptionally(thrown);
+ return failed;
+ }
+ });
+ Result result = client.put(id,
+ "json",
+ OperationParameters.empty()
+ .createIfNonExistent(true)
+ .testAndSetCondition("false")
+ .route("route")
+ .timeout(Duration.ofSeconds(5)))
+ .get();
+ assertEquals("Ooops! ... I did it again.", result.resultMessage().get());
+ assertEquals("I played with your heart. Got lost in the game.", result.traceMessage().get());
+
+
+ // Handler error is a FeedException.
+ dispatch.set((documentId, request) -> {
+ try {
+ assertEquals(id, documentId);
+ assertEquals("/document/v1/ns/type/docid/0",
+ request.path());
+ assertEquals("json", new String(request.body(), UTF_8));
+
+ HttpResponse response = HttpResponse.of(500,
+ ("{\n" +
+ " \"pathId\": \"/document/v1/ns/type/docid/0\",\n" +
+ " \"id\": \"id:ns:type::0\",\n" +
+ " \"message\": \"Alla ska i jorden.\",\n" +
+ " \"trace\": \"Din tid den kom, och senn så for den. \"\n" +
+ "}").getBytes(UTF_8));
+ return CompletableFuture.completedFuture(response);
+ }
+ catch (Throwable thrown) {
+ CompletableFuture<HttpResponse> failed = new CompletableFuture<>();
+ failed.completeExceptionally(thrown);
+ return failed;
+ }
+ });
+ ExecutionException expected = assertThrows(ExecutionException.class,
+ () -> client.put(id,
+ "json",
+ OperationParameters.empty())
+ .get());
+ assertEquals("Status 500 executing 'POST /document/v1/ns/type/docid/0': Alla ska i jorden.", expected.getCause().getMessage());
+ }
+
+}
diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/HttpRequestStrategyTest.java b/vespa-feed-client/src/test/java/ai/vespa/feed/client/HttpRequestStrategyTest.java
new file mode 100644
index 00000000000..d3005227184
--- /dev/null
+++ b/vespa-feed-client/src/test/java/ai/vespa/feed/client/HttpRequestStrategyTest.java
@@ -0,0 +1,190 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client;
+
+import ai.vespa.feed.client.FeedClient.CircuitBreaker;
+import org.apache.hc.core5.http.ContentType;
+import org.junit.jupiter.api.Test;
+
+import java.io.IOException;
+import java.net.URI;
+import java.time.Duration;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.function.BiConsumer;
+
+import static ai.vespa.feed.client.FeedClient.CircuitBreaker.State.CLOSED;
+import static ai.vespa.feed.client.FeedClient.CircuitBreaker.State.HALF_OPEN;
+import static ai.vespa.feed.client.FeedClient.CircuitBreaker.State.OPEN;
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+
+class HttpRequestStrategyTest {
+
+ @Test
+ void testConcurrency() {
+ int documents = 1 << 16;
+ HttpRequest request = new HttpRequest("PUT", "/", null, null);
+ HttpResponse response = HttpResponse.of(200, "{}".getBytes(UTF_8));
+ ScheduledExecutorService executor = Executors.newScheduledThreadPool(1);
+ Cluster cluster = new BenchmarkingCluster((__, vessel) -> executor.schedule(() -> vessel.complete(response), 100, TimeUnit.MILLISECONDS));
+
+ HttpRequestStrategy strategy = new HttpRequestStrategy(FeedClientBuilder.create(URI.create("https://dummy.com:123"))
+ .setConnectionsPerEndpoint(1 << 12)
+ .setMaxStreamPerConnection(1 << 4),
+ cluster);
+ long startNanos = System.nanoTime();
+ for (int i = 0; i < documents; i++)
+ strategy.enqueue(DocumentId.of("ns", "type", Integer.toString(i)), request);
+
+ strategy.await();
+ executor.shutdown();
+ cluster.close();
+ OperationStats stats = cluster.stats();
+ long successes = stats.responsesByCode().get(200);
+ System.err.println(successes + " successes in " + (System.nanoTime() - startNanos) * 1e-9 + " seconds");
+ System.err.println(stats);
+
+ assertEquals(documents, stats.requests());
+ assertEquals(documents, stats.responses());
+ assertEquals(documents, stats.responsesByCode().get(200));
+ assertEquals(0, stats.inflight());
+ assertEquals(0, stats.exceptions());
+ assertEquals(0, stats.bytesSent());
+ assertEquals(2 * documents, stats.bytesReceived());
+ }
+
+ @Test
+ void testLogic() throws ExecutionException, InterruptedException {
+ int minStreams = 16; // Hard limit for minimum number of streams per connection.
+ MockCluster cluster = new MockCluster();
+ AtomicLong now = new AtomicLong(0);
+ CircuitBreaker breaker = new GracePeriodCircuitBreaker(now::get, Duration.ofSeconds(1), Duration.ofMinutes(10));
+ HttpRequestStrategy strategy = new HttpRequestStrategy(FeedClientBuilder.create(URI.create("https://dummy.com:123"))
+ .setRetryStrategy(new FeedClient.RetryStrategy() {
+ @Override public boolean retry(FeedClient.OperationType type) { return type == FeedClient.OperationType.PUT; }
+ @Override public int retries() { return 1; }
+ })
+ .setCircuitBreaker(breaker)
+ .setConnectionsPerEndpoint(1)
+ .setMaxStreamPerConnection(minStreams),
+ new BenchmarkingCluster(cluster));
+
+ DocumentId id1 = DocumentId.of("ns", "type", "1");
+ DocumentId id2 = DocumentId.of("ns", "type", "2");
+ HttpRequest request = new HttpRequest("POST", "/", null, null);
+
+ // Runtime exception is not retried.
+ cluster.expect((__, vessel) -> vessel.completeExceptionally(new RuntimeException("boom")));
+ ExecutionException expected = assertThrows(ExecutionException.class,
+ () -> strategy.enqueue(id1, request).get());
+ assertEquals("boom", expected.getCause().getMessage());
+ assertEquals(1, strategy.stats().requests());
+
+ // IOException is retried.
+ cluster.expect((__, vessel) -> vessel.completeExceptionally(new IOException("retry me")));
+ expected = assertThrows(ExecutionException.class,
+ () -> strategy.enqueue(id1, request).get());
+ assertEquals("retry me", expected.getCause().getMessage());
+ assertEquals(3, strategy.stats().requests());
+
+ // Successful response is returned
+ HttpResponse success = HttpResponse.of(200, null);
+ cluster.expect((__, vessel) -> vessel.complete(success));
+ assertEquals(success, strategy.enqueue(id1, request).get());
+ assertEquals(4, strategy.stats().requests());
+
+ // Throttled requests are retried. Concurrent operations to same ID (only) are serialised.
+ now.set(2000);
+ HttpResponse throttled = HttpResponse.of(429, null);
+ AtomicInteger count = new AtomicInteger(3);
+ CountDownLatch latch = new CountDownLatch(1);
+ AtomicReference<CompletableFuture<HttpResponse>> completion = new AtomicReference<>();
+ cluster.expect((req, vessel) -> {
+ if (req == request) {
+ if (count.decrementAndGet() > 0)
+ vessel.complete(throttled);
+ else {
+ completion.set(vessel);
+ latch.countDown();
+ }
+ }
+ else vessel.complete(success);
+ });
+ CompletableFuture<HttpResponse> delayed = strategy.enqueue(id1, request);
+ CompletableFuture<HttpResponse> serialised = strategy.enqueue(id1, new HttpRequest("PUT", "/", null, null));
+ assertEquals(success, strategy.enqueue(id2, new HttpRequest("DELETE", "/", null, null)).get());
+ latch.await();
+ assertEquals(8, strategy.stats().requests()); // 3 attempts at throttled and one at id2.
+ now.set(4000);
+ assertEquals(CLOSED, breaker.state()); // Circuit not broken due to throttled requests.
+ completion.get().complete(success);
+ assertEquals(success, delayed.get());
+ assertEquals(success, serialised.get());
+
+ // Some error responses are retried.
+ HttpResponse serverError = HttpResponse.of(500, null);
+ cluster.expect((__, vessel) -> vessel.complete(serverError));
+ assertEquals(serverError, strategy.enqueue(id1, request).get());
+ assertEquals(11, strategy.stats().requests());
+ assertEquals(CLOSED, breaker.state()); // Circuit not broken due to throttled requests.
+
+ // Error responses are not retried when not of appropriate type.
+ cluster.expect((__, vessel) -> vessel.complete(serverError));
+ assertEquals(serverError, strategy.enqueue(id1, new HttpRequest("PUT", "/", null, null)).get());
+ assertEquals(12, strategy.stats().requests());
+
+ // Some error responses are not retried.
+ HttpResponse badRequest = HttpResponse.of(400, null);
+ cluster.expect((__, vessel) -> vessel.complete(badRequest));
+ assertEquals(badRequest, strategy.enqueue(id1, request).get());
+ assertEquals(13, strategy.stats().requests());
+
+ // Circuit breaker opens some time after starting to fail.
+ now.set(6000);
+ assertEquals(HALF_OPEN, breaker.state()); // Circuit broken due to failed requests.
+ now.set(605000);
+ assertEquals(OPEN, breaker.state()); // Circuit broken due to failed requests.
+
+ Map<Integer, Long> codes = new HashMap<>();
+ codes.put(200, 4L);
+ codes.put(400, 1L);
+ codes.put(429, 2L);
+ codes.put(500, 3L);
+ assertEquals(codes, strategy.stats().responsesByCode());
+ assertEquals(3, strategy.stats().exceptions());
+ }
+
+ static class MockCluster implements Cluster {
+
+ final AtomicReference<BiConsumer<HttpRequest, CompletableFuture<HttpResponse>>> dispatch = new AtomicReference<>();
+
+ void expect(BiConsumer<HttpRequest, CompletableFuture<HttpResponse>> expected) {
+ dispatch.set(expected);
+ }
+
+ @Override
+ public void dispatch(HttpRequest request, CompletableFuture<HttpResponse> vessel) {
+ dispatch.get().accept(request, vessel);
+ }
+
+ @Override
+ public void close() { }
+
+ @Override
+ public OperationStats stats() {
+ return null;
+ }
+
+ }
+
+}
diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/JsonFeederTest.java b/vespa-feed-client/src/test/java/ai/vespa/feed/client/JsonFeederTest.java
new file mode 100644
index 00000000000..03194e23d47
--- /dev/null
+++ b/vespa-feed-client/src/test/java/ai/vespa/feed/client/JsonFeederTest.java
@@ -0,0 +1,124 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client;
+
+import org.junit.jupiter.api.Test;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.StandardOpenOption;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.stream.IntStream;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static java.util.stream.Collectors.joining;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+class JsonFeederTest {
+
+ @Test
+ void test() throws IOException {
+ int docs = 1 << 14;
+ String json = "[\n" +
+
+ IntStream.range(0, docs).mapToObj(i ->
+ " {\n" +
+ " \"id\": \"id:ns:type::abc" + i + "\",\n" +
+ " \"fields\": {\n" +
+ " \"lul\":\"lal\"\n" +
+ " }\n" +
+ " },\n"
+ ).collect(joining()) +
+
+ " {\n" +
+ " \"id\": \"id:ns:type::abc" + docs + "\",\n" +
+ " \"fields\": {\n" +
+ " \"lul\":\"lal\"\n" +
+ " }\n" +
+ " }\n" +
+ "]";
+ AtomicReference<Throwable> exceptionThrow = new AtomicReference<>();
+ Path tmpFile = Files.createTempFile(null, null);
+ Files.write(tmpFile, json.getBytes(UTF_8));
+ try (InputStream in = Files.newInputStream(tmpFile, StandardOpenOption.READ, StandardOpenOption.DELETE_ON_CLOSE)) {
+ AtomicInteger resultsReceived = new AtomicInteger();
+ AtomicBoolean completedSuccessfully = new AtomicBoolean();
+ long startNanos = System.nanoTime();
+ SimpleClient feedClient = new SimpleClient();
+ JsonFeeder.builder(feedClient).build()
+ .feedMany(in, 1 << 7,
+ new JsonFeeder.ResultCallback() { // TODO: hangs when buffer is smaller than largest document
+ @Override
+ public void onNextResult(Result result, Throwable error) { resultsReceived.incrementAndGet(); }
+
+ @Override
+ public void onError(Throwable error) { exceptionThrow.set(error); }
+
+ @Override
+ public void onComplete() { completedSuccessfully.set(true); }
+ })
+ .join();
+
+ System.err.println((json.length() / 1048576.0) + " MB in " + (System.nanoTime() - startNanos) * 1e-9 + " seconds");
+ assertEquals(docs + 1, feedClient.ids.size());
+ assertEquals(docs + 1, resultsReceived.get());
+ assertTrue(completedSuccessfully.get());
+ assertNull(exceptionThrow.get());
+ }
+ }
+
+ @Test
+ public void singleJsonOperationIsDispatchedToFeedClient() throws IOException, ExecutionException, InterruptedException {
+ try (JsonFeeder feeder = JsonFeeder.builder(new SimpleClient()).build()) {
+ String json = "{\"put\": \"id:ns:type::abc1\",\n" +
+ " \"fields\": {\n" +
+ " \"lul\":\"lal\"\n" +
+ " }\n" +
+ " }\n";
+ Result result = feeder.feedSingle(json).get();
+ assertEquals(DocumentId.of("id:ns:type::abc1"), result.documentId());
+ assertEquals(Result.Type.success, result.type());
+ assertEquals("success", result.resultMessage().get());
+ }
+ }
+
+ private static class SimpleClient implements FeedClient {
+ final Set<String> ids = new HashSet<>();
+
+ @Override
+ public CompletableFuture<Result> put(DocumentId documentId, String documentJson, OperationParameters params) {
+ ids.add(documentId.userSpecific());
+ return createSuccessResult(documentId);
+ }
+
+ @Override
+ public CompletableFuture<Result> update(DocumentId documentId, String updateJson, OperationParameters params) {
+ return createSuccessResult(documentId);
+ }
+
+ @Override
+ public CompletableFuture<Result> remove(DocumentId documentId, OperationParameters params) {
+ return createSuccessResult(documentId);
+ }
+
+ @Override
+ public OperationStats stats() { return null; }
+
+ @Override
+ public void close(boolean graceful) { }
+
+ private CompletableFuture<Result> createSuccessResult(DocumentId documentId) {
+ return CompletableFuture.completedFuture(new Result(Result.Type.success, documentId, "success", null));
+ }
+ }
+
+}
diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/JsonStreamFeederTest.java b/vespa-feed-client/src/test/java/ai/vespa/feed/client/JsonStreamFeederTest.java
deleted file mode 100644
index 28a50b88396..00000000000
--- a/vespa-feed-client/src/test/java/ai/vespa/feed/client/JsonStreamFeederTest.java
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package ai.vespa.feed.client;
-
-import org.junit.jupiter.api.Test;
-
-import java.io.ByteArrayInputStream;
-import java.io.IOException;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.concurrent.CompletableFuture;
-import java.util.stream.IntStream;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static java.util.stream.Collectors.joining;
-import static org.junit.jupiter.api.Assertions.assertEquals;
-
-class JsonStreamFeederTest {
-
- @Test
- void test() throws IOException {
- int docs = 1 << 14;
- String json = "[\n" +
-
- IntStream.range(0, docs).mapToObj(i ->
- " {\n" +
- " \"id\": \"id:ns:type::abc" + i + "\",\n" +
- " \"fields\": {\n" +
- " \"lul\":\"lal\"\n" +
- " }\n" +
- " },\n"
- ).collect(joining()) +
-
- " {\n" +
- " \"id\": \"id:ns:type::abc" + docs + "\",\n" +
- " \"fields\": {\n" +
- " \"lul\":\"lal\"\n" +
- " }\n" +
- " }\n" +
- "]";
- ByteArrayInputStream in = new ByteArrayInputStream(json.getBytes(UTF_8));
- Set<String> ids = new HashSet<>();
- long startNanos = System.nanoTime();
- JsonStreamFeeder.builder(new FeedClient() {
-
- @Override
- public CompletableFuture<Result> put(DocumentId documentId, String documentJson, OperationParameters params) {
- ids.add(documentId.userSpecific());
- return new CompletableFuture<>();
- }
-
- @Override
- public CompletableFuture<Result> update(DocumentId documentId, String updateJson, OperationParameters params) {
- return new CompletableFuture<>();
- }
-
- @Override
- public CompletableFuture<Result> remove(DocumentId documentId, OperationParameters params) {
- return new CompletableFuture<>();
- }
-
- @Override
- public void close(boolean graceful) { }
-
- }).build().feed(in, 1 << 7, false); // TODO: hangs when buffer is smaller than largest document
- System.err.println((json.length() / 1048576.0) + " MB in " + (System.nanoTime() - startNanos) * 1e-9 + " seconds");
- assertEquals(docs + 1, ids.size());
- }
-
-}
diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/JsonFileFeederExample.java b/vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/JsonFileFeederExample.java
new file mode 100644
index 00000000000..579adf9048f
--- /dev/null
+++ b/vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/JsonFileFeederExample.java
@@ -0,0 +1,89 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client.examples;
+
+import ai.vespa.feed.client.FeedClient;
+import ai.vespa.feed.client.FeedClientBuilder;
+import ai.vespa.feed.client.JsonFeeder;
+import ai.vespa.feed.client.Result;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.URI;
+import java.time.Duration;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.logging.Logger;
+
+/**
+ * Sample feeder demonstrating how to programmatically feed to a Vespa cluster.
+ */
+class JsonFileFeederExample implements Closeable {
+
+ private final static Logger log = Logger.getLogger(JsonFileFeederExample.class.getName());
+
+ private final JsonFeeder jsonFeeder;
+ private final URI endpoint;
+
+ static class ResultCallBack implements JsonFeeder.ResultCallback {
+
+ final AtomicInteger resultsReceived = new AtomicInteger(0);
+ final AtomicInteger errorsReceived = new AtomicInteger(0);
+ final long startTimeMillis = System.currentTimeMillis();;
+
+ @Override
+ public void onNextResult(Result result, Throwable error) {
+ resultsReceived.incrementAndGet();
+ if (error != null) {
+ log.warning("Problems with feeding document");
+ errorsReceived.incrementAndGet();
+ } else if (result.type() == Result.Type.failure) {
+ log.warning("Problems with docID " + result.documentId() + ":" + error);
+ errorsReceived.incrementAndGet();
+ }
+ }
+
+ @Override
+ public void onError(Throwable error) {
+ log.severe("Feeding failed: " + error.getMessage());
+ }
+
+ @Override
+ public void onComplete() {
+ log.info("Feeding completed");
+ }
+
+ void dumpStatsToLog() {
+ log.info("Received in total " + resultsReceived.get() + ", " + errorsReceived.get() + " errors.");
+ log.info("Time spent receiving is " + (System.currentTimeMillis() - startTimeMillis) + " ms.");
+ }
+
+ }
+
+ JsonFileFeederExample(URI endpoint) {
+ this.endpoint = endpoint;
+ FeedClient feedClient = FeedClientBuilder.create(endpoint)
+ .build();
+ this.jsonFeeder = JsonFeeder.builder(feedClient)
+ .withTimeout(Duration.ofSeconds(30))
+ .build();
+ }
+
+ /**
+ * Feed all operations from a stream.
+ *
+ * @param stream The input stream to read operations from (JSON array containing one or more document operations).
+ */
+ void batchFeed(InputStream stream, String batchId) {
+ ResultCallBack callback = new ResultCallBack();
+ log.info("Starting feed to " + endpoint + " for batch '" + batchId + "'");
+ CompletableFuture<Void> promise = jsonFeeder.feedMany(stream, callback);
+ promise.join(); // wait for feeding to complete
+ callback.dumpStatsToLog();
+ }
+
+ @Override
+ public void close() throws IOException {
+ jsonFeeder.close();
+ }
+}
diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/JsonStreamFeederExample.java b/vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/JsonStreamFeederExample.java
new file mode 100644
index 00000000000..5cee776b244
--- /dev/null
+++ b/vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/JsonStreamFeederExample.java
@@ -0,0 +1,117 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client.examples;
+
+import ai.vespa.feed.client.DocumentId;
+import ai.vespa.feed.client.FeedClient;
+import ai.vespa.feed.client.FeedClientBuilder;
+import ai.vespa.feed.client.OperationParameters;
+import ai.vespa.feed.client.Result;
+
+import java.net.URI;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+/**
+ * Simple Streaming feeder implementation which will send operations to a Vespa endpoint.
+ * Other threads communicate with the feeder by adding new operations on the BlockingQueue
+ */
+
+class JsonStreamFeederExample extends Thread implements AutoCloseable {
+
+ static class Operation {
+ final String type;
+ final String documentId;
+ final String documentFieldsJson;
+
+ Operation(String type, String id, String fields) {
+ this.type = type;
+ this.documentId = id;
+ this.documentFieldsJson = fields;
+ }
+ }
+
+ private final static Logger log = Logger.getLogger(JsonStreamFeederExample.class.getName());
+
+ private final BlockingQueue<Operation> operations;
+ private final FeedClient feedClient;
+ private final AtomicBoolean drain = new AtomicBoolean(false);
+ private final CountDownLatch finishedDraining = new CountDownLatch(1);
+ private final AtomicInteger resultCounter = new AtomicInteger();
+
+ /**
+ * Constructor
+ * @param operations The shared blocking queue where other threads can put document operations to.
+ * @param endpoint The endpoint to feed to
+ */
+ JsonStreamFeederExample(BlockingQueue<JsonStreamFeederExample.Operation> operations, URI endpoint) {
+ this.operations = operations;
+ this.feedClient = FeedClientBuilder.create(endpoint).build();
+ }
+
+ /**
+ * Shutdown this feeder, waits until operations on queue is drained
+ */
+ @Override
+ public void close() {
+ log.info("Shutdown initiated, awaiting operations queue to be drained. Queue size is " + operations.size());
+ drain.set(true);
+ try {
+ finishedDraining.await();
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ }
+ }
+
+ @Override
+ public void run() {
+ while (!drain.get() || !operations.isEmpty()) {
+ try {
+ JsonStreamFeederExample.Operation op = operations.poll(1, TimeUnit.SECONDS);
+ if(op == null) // no operations available
+ continue;
+ log.info("Put document " + op.documentId);
+ CompletableFuture<Result> promise;
+ DocumentId docId = DocumentId.of(op.documentId);
+ OperationParameters params = OperationParameters.empty();
+ String json = op.documentFieldsJson;
+ switch (op.type) {
+ case "put":
+ promise = feedClient.put(docId, json, params);
+ break;
+ case "remove":
+ promise = feedClient.remove(docId, params);
+ break;
+ case "update":
+ promise = feedClient.update(docId, json, params);
+ break;
+ default:
+ throw new IllegalArgumentException("Invalid operation: " + op.type);
+ }
+ promise.whenComplete((result, throwable) -> {
+ if (resultCounter.getAndIncrement() % 10 == 0) {
+ System.err.println(feedClient.stats());
+ }
+ if (throwable != null) {
+ System.err.printf("Failure for '%s': %s", docId, throwable);
+ throwable.printStackTrace();
+ } else if (result.type() == Result.Type.failure) {
+ System.err.printf("Failure for '%s': %s", docId, result.resultMessage().orElse("<no messsage>"));
+ }
+ });
+ } catch (InterruptedException e) {
+ log.log(Level.SEVERE, "Got interrupt exception.", e);
+ break;
+ }
+ }
+ log.info("Shutting down feeding thread");
+ this.feedClient.close();
+ finishedDraining.countDown();
+ }
+
+} \ No newline at end of file
diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/SimpleExample.java b/vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/SimpleExample.java
new file mode 100644
index 00000000000..5ece9051e41
--- /dev/null
+++ b/vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/SimpleExample.java
@@ -0,0 +1,34 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client.examples;
+
+import ai.vespa.feed.client.DocumentId;
+import ai.vespa.feed.client.FeedClient;
+import ai.vespa.feed.client.FeedClientBuilder;
+import ai.vespa.feed.client.OperationParameters;
+import ai.vespa.feed.client.Result;
+
+import java.net.URI;
+import java.time.Duration;
+import java.util.concurrent.CompletableFuture;
+
+class SimpleExample {
+
+ public static void main(String[] args) {
+ try (FeedClient client = FeedClientBuilder.create(URI.create("https://my-container-endpoint-with-http2:8080/")).build()) {
+ DocumentId id = DocumentId.of("namespace", "documenttype", "1");
+ String json = "{\"fields\": {\"title\": \"hello world\"}}";
+ OperationParameters params = OperationParameters.empty()
+ .timeout(Duration.ofSeconds(5))
+ .route("myvesparoute");
+ CompletableFuture<Result> promise = client.put(id, json, params);
+ promise.whenComplete(((result, throwable) -> {
+ if (throwable != null) {
+ throwable.printStackTrace();
+ } else {
+ System.out.printf("'%s' for document '%s': %s%n", result.type(), result.documentId(), result.resultMessage());
+ }
+ }));
+ }
+ }
+
+}
diff --git a/vespa-hadoop/pom.xml b/vespa-hadoop/pom.xml
index 382c28dc884..39f10d84f9b 100644
--- a/vespa-hadoop/pom.xml
+++ b/vespa-hadoop/pom.xml
@@ -101,17 +101,22 @@
<scope>test</scope>
</dependency>
<dependency>
- <groupId>junit</groupId>
- <artifactId>junit</artifactId>
+ <groupId>org.junit.jupiter</groupId>
+ <artifactId>junit-jupiter</artifactId>
<scope>test</scope>
</dependency>
-
<!-- Vespa feeding dependencies -->
<dependency>
<groupId>com.yahoo.vespa</groupId>
<artifactId>vespa-http-client</artifactId>
<version>${project.version}</version>
</dependency>
+ <dependency>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>vespa-feed-client</artifactId>
+ <version>${project.version}</version>
+ <scope>compile</scope>
+ </dependency>
</dependencies>
diff --git a/vespa-hadoop/src/main/java/ai/vespa/feed/client/DryrunResult.java b/vespa-hadoop/src/main/java/ai/vespa/feed/client/DryrunResult.java
new file mode 100644
index 00000000000..5974a8df271
--- /dev/null
+++ b/vespa-hadoop/src/main/java/ai/vespa/feed/client/DryrunResult.java
@@ -0,0 +1,18 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client;
+
+import ai.vespa.feed.client.Result.Type;
+
+/**
+ * Workaround for package-private {@link Result} constructor.
+ *
+ * @author bjorncs
+ */
+public class DryrunResult {
+
+ private DryrunResult() {}
+
+ public static Result create(Type type, DocumentId documentId, String resultMessage, String traceMessage) {
+ return new Result(type, documentId, resultMessage, traceMessage);
+ }
+}
diff --git a/vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/LegacyVespaRecordWriter.java b/vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/LegacyVespaRecordWriter.java
new file mode 100644
index 00000000000..b716c55beb5
--- /dev/null
+++ b/vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/LegacyVespaRecordWriter.java
@@ -0,0 +1,235 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hadoop.mapreduce;
+
+import com.fasterxml.jackson.core.JsonFactory;
+import com.fasterxml.jackson.core.JsonParseException;
+import com.fasterxml.jackson.core.JsonParser;
+import com.fasterxml.jackson.core.JsonToken;
+import com.yahoo.vespa.hadoop.mapreduce.util.VespaConfiguration;
+import com.yahoo.vespa.hadoop.mapreduce.util.VespaCounters;
+import com.yahoo.vespa.hadoop.pig.VespaDocumentOperation;
+import com.yahoo.vespa.http.client.FeedClient;
+import com.yahoo.vespa.http.client.FeedClientFactory;
+import com.yahoo.vespa.http.client.Result;
+import com.yahoo.vespa.http.client.config.Cluster;
+import com.yahoo.vespa.http.client.config.ConnectionParams;
+import com.yahoo.vespa.http.client.config.Endpoint;
+import com.yahoo.vespa.http.client.config.FeedParams;
+import com.yahoo.vespa.http.client.config.FeedParams.DataFormat;
+import com.yahoo.vespa.http.client.config.SessionParams;
+import org.apache.hadoop.mapreduce.RecordWriter;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+
+import javax.xml.namespace.QName;
+import javax.xml.stream.FactoryConfigurationError;
+import javax.xml.stream.XMLEventReader;
+import javax.xml.stream.XMLInputFactory;
+import javax.xml.stream.XMLStreamException;
+import javax.xml.stream.events.StartElement;
+import javax.xml.stream.events.XMLEvent;
+import java.io.IOException;
+import java.io.StringReader;
+import java.time.Duration;
+import java.util.List;
+import java.util.StringTokenizer;
+import java.util.concurrent.ThreadLocalRandom;
+import java.util.logging.Logger;
+
+/**
+ * {@link LegacyVespaRecordWriter} sends the output &lt;key, value&gt; to one or more Vespa endpoints using vespa-http-client.
+ *
+ * @author lesters
+ */
+@SuppressWarnings("rawtypes")
+public class LegacyVespaRecordWriter extends RecordWriter {
+
+ private final static Logger log = Logger.getLogger(LegacyVespaRecordWriter.class.getCanonicalName());
+
+ private boolean initialized = false;
+ private FeedClient feedClient;
+ private final VespaCounters counters;
+ private final int progressInterval;
+
+ final VespaConfiguration configuration;
+
+ LegacyVespaRecordWriter(VespaConfiguration configuration, VespaCounters counters) {
+ this.counters = counters;
+ this.configuration = configuration;
+ this.progressInterval = configuration.progressInterval();
+ }
+
+
+ @Override
+ public void write(Object key, Object data) throws IOException, InterruptedException {
+ if (!initialized) {
+ initialize();
+ }
+
+ String doc = data.toString().trim();
+
+ // Parse data to find document id - if none found, skip this write
+ String docId = DataFormat.JSON_UTF8.equals(configuration.dataFormat()) ? findDocId(doc)
+ : findDocIdFromXml(doc);
+ if (docId != null && docId.length() >= 0) {
+ feedClient.stream(docId, doc);
+ counters.incrementDocumentsSent(1);
+ } else {
+ counters.incrementDocumentsSkipped(1);
+ }
+
+ if (counters.getDocumentsSent() % progressInterval == 0) {
+ String progress = String.format("Feed progress: %d / %d / %d / %d (sent, ok, failed, skipped)",
+ counters.getDocumentsSent(),
+ counters.getDocumentsOk(),
+ counters.getDocumentsFailed(),
+ counters.getDocumentsSkipped());
+ log.info(progress);
+ }
+
+ }
+
+
+ @Override
+ public void close(TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException {
+ if (feedClient != null) {
+ feedClient.close();
+ }
+ }
+
+ protected ConnectionParams.Builder configureConnectionParams() {
+ ConnectionParams.Builder connParamsBuilder = new ConnectionParams.Builder();
+ connParamsBuilder.setDryRun(configuration.dryrun());
+ connParamsBuilder.setUseCompression(configuration.useCompression());
+ connParamsBuilder.setNumPersistentConnectionsPerEndpoint(configuration.numConnections());
+ connParamsBuilder.setMaxRetries(configuration.numRetries());
+ if (configuration.proxyHost() != null) {
+ connParamsBuilder.setProxyHost(configuration.proxyHost());
+ }
+ if (configuration.proxyPort() >= 0) {
+ connParamsBuilder.setProxyPort(configuration.proxyPort());
+ }
+ return connParamsBuilder;
+ }
+
+ protected FeedParams.Builder configureFeedParams() {
+ FeedParams.Builder feedParamsBuilder = new FeedParams.Builder();
+ feedParamsBuilder.setDataFormat(configuration.dataFormat());
+ feedParamsBuilder.setRoute(configuration.route());
+ feedParamsBuilder.setMaxSleepTimeMs(configuration.maxSleepTimeMs());
+ feedParamsBuilder.setMaxInFlightRequests(configuration.maxInFlightRequests());
+ feedParamsBuilder.setLocalQueueTimeOut(Duration.ofMinutes(10).toMillis());
+ return feedParamsBuilder;
+ }
+
+ protected SessionParams.Builder configureSessionParams() {
+ SessionParams.Builder sessionParamsBuilder = new SessionParams.Builder();
+ sessionParamsBuilder.setThrottlerMinSize(configuration.throttlerMinSize());
+ sessionParamsBuilder.setClientQueueSize(configuration.maxInFlightRequests()*2);
+ return sessionParamsBuilder;
+ }
+
+ private void initialize() {
+ if (!configuration.dryrun() && configuration.randomStartupSleepMs() > 0) {
+ int delay = ThreadLocalRandom.current().nextInt(configuration.randomStartupSleepMs());
+ log.info("VespaStorage: Delaying startup by " + delay + " ms");
+ try {
+ Thread.sleep(delay);
+ } catch (Exception e) {}
+ }
+
+ ConnectionParams.Builder connParamsBuilder = configureConnectionParams();
+ FeedParams.Builder feedParamsBuilder = configureFeedParams();
+ SessionParams.Builder sessionParams = configureSessionParams();
+
+ sessionParams.setConnectionParams(connParamsBuilder.build());
+ sessionParams.setFeedParams(feedParamsBuilder.build());
+
+ String endpoints = configuration.endpoint();
+ StringTokenizer tokenizer = new StringTokenizer(endpoints, ",");
+ while (tokenizer.hasMoreTokens()) {
+ String endpoint = tokenizer.nextToken().trim();
+ sessionParams.addCluster(new Cluster.Builder().addEndpoint(
+ Endpoint.create(endpoint, configuration.defaultPort(), configuration.useSSL())
+ ).build());
+ }
+
+ ResultCallback resultCallback = new ResultCallback(counters);
+ feedClient = FeedClientFactory.create(sessionParams.build(), resultCallback);
+
+ initialized = true;
+ log.info("VespaStorage configuration:\n" + configuration.toString());
+ log.info(feedClient.getStatsAsJson());
+ }
+
+ private String findDocIdFromXml(String xml) {
+ try {
+ XMLEventReader eventReader = XMLInputFactory.newInstance().createXMLEventReader(new StringReader(xml));
+ while (eventReader.hasNext()) {
+ XMLEvent event = eventReader.nextEvent();
+ if (event.getEventType() == XMLEvent.START_ELEMENT) {
+ StartElement element = event.asStartElement();
+ String elementName = element.getName().getLocalPart();
+ if (VespaDocumentOperation.Operation.valid(elementName)) {
+ return element.getAttributeByName(QName.valueOf("documentid")).getValue();
+ }
+ }
+ }
+ } catch (XMLStreamException | FactoryConfigurationError e) {
+ // as json dude does
+ return null;
+ }
+ return null;
+ }
+
+ private String findDocId(String json) throws IOException {
+ JsonFactory factory = new JsonFactory();
+ try(JsonParser parser = factory.createParser(json)) {
+ if (parser.nextToken() != JsonToken.START_OBJECT) {
+ return null;
+ }
+ while (parser.nextToken() != JsonToken.END_OBJECT) {
+ String fieldName = parser.getCurrentName();
+ parser.nextToken();
+ if (VespaDocumentOperation.Operation.valid(fieldName)) {
+ String docId = parser.getText();
+ return docId;
+ } else {
+ parser.skipChildren();
+ }
+ }
+ } catch (JsonParseException ex) {
+ return null;
+ }
+ return null;
+ }
+
+
+ static class ResultCallback implements FeedClient.ResultCallback {
+ final VespaCounters counters;
+
+ public ResultCallback(VespaCounters counters) {
+ this.counters = counters;
+ }
+
+ @Override
+ public void onCompletion(String docId, Result documentResult) {
+ if (!documentResult.isSuccess()) {
+ counters.incrementDocumentsFailed(1);
+ StringBuilder sb = new StringBuilder();
+ sb.append("Problems with docid ");
+ sb.append(docId);
+ sb.append(": ");
+ List<Result.Detail> details = documentResult.getDetails();
+ for (Result.Detail detail : details) {
+ sb.append(detail.toString());
+ sb.append(" ");
+ }
+ log.warning(sb.toString());
+ return;
+ }
+ counters.incrementDocumentsOk(1);
+ }
+
+ }
+
+}
diff --git a/vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/VespaOutputFormat.java b/vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/VespaOutputFormat.java
index bef51e9ae08..97bc7dc838e 100644
--- a/vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/VespaOutputFormat.java
+++ b/vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/VespaOutputFormat.java
@@ -10,7 +10,7 @@ import java.util.Properties;
/**
* An output specification for writing to Vespa instances in a Map-Reduce job.
- * Mainly returns an instance of a {@link VespaRecordWriter} that does the
+ * Mainly returns an instance of a {@link LegacyVespaRecordWriter} that does the
* actual feeding to Vespa.
*
* @author lesters
@@ -35,7 +35,9 @@ public class VespaOutputFormat extends OutputFormat {
public RecordWriter getRecordWriter(TaskAttemptContext context) throws IOException, InterruptedException {
VespaCounters counters = VespaCounters.get(context);
VespaConfiguration configuration = VespaConfiguration.get(context.getConfiguration(), configOverride);
- return new VespaRecordWriter(configuration, counters);
+ return configuration.useLegacyClient()
+ ? new LegacyVespaRecordWriter(configuration, counters)
+ : new VespaRecordWriter(configuration, counters);
}
diff --git a/vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/VespaRecordWriter.java b/vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/VespaRecordWriter.java
index 4cc93bfd538..15b3d2e9d7d 100644
--- a/vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/VespaRecordWriter.java
+++ b/vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/VespaRecordWriter.java
@@ -1,83 +1,74 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hadoop.mapreduce;
-import com.fasterxml.jackson.core.JsonFactory;
-import com.fasterxml.jackson.core.JsonParseException;
-import com.fasterxml.jackson.core.JsonParser;
-import com.fasterxml.jackson.core.JsonToken;
+import ai.vespa.feed.client.DocumentId;
+import ai.vespa.feed.client.DryrunResult;
+import ai.vespa.feed.client.FeedClient;
+import ai.vespa.feed.client.FeedClientBuilder;
+import ai.vespa.feed.client.JsonFeeder;
+import ai.vespa.feed.client.JsonParseException;
+import ai.vespa.feed.client.OperationParameters;
+import ai.vespa.feed.client.OperationStats;
+import ai.vespa.feed.client.Result;
import com.yahoo.vespa.hadoop.mapreduce.util.VespaConfiguration;
import com.yahoo.vespa.hadoop.mapreduce.util.VespaCounters;
-import com.yahoo.vespa.hadoop.pig.VespaDocumentOperation;
-import com.yahoo.vespa.http.client.FeedClient;
-import com.yahoo.vespa.http.client.FeedClientFactory;
-import com.yahoo.vespa.http.client.Result;
-import com.yahoo.vespa.http.client.config.Cluster;
-import com.yahoo.vespa.http.client.config.ConnectionParams;
-import com.yahoo.vespa.http.client.config.Endpoint;
import com.yahoo.vespa.http.client.config.FeedParams;
-import com.yahoo.vespa.http.client.config.FeedParams.DataFormat;
-import com.yahoo.vespa.http.client.config.SessionParams;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
-import javax.xml.namespace.QName;
-import javax.xml.stream.FactoryConfigurationError;
-import javax.xml.stream.XMLEventReader;
-import javax.xml.stream.XMLInputFactory;
-import javax.xml.stream.XMLStreamException;
-import javax.xml.stream.events.StartElement;
-import javax.xml.stream.events.XMLEvent;
import java.io.IOException;
-import java.io.StringReader;
+import java.net.URI;
import java.time.Duration;
+import java.util.Arrays;
import java.util.List;
-import java.util.StringTokenizer;
+import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ThreadLocalRandom;
import java.util.logging.Logger;
+import static java.util.stream.Collectors.toList;
+
/**
- * VespaRecordWriter sends the output &lt;key, value&gt; to one or more Vespa endpoints.
+ * {@link VespaRecordWriter} sends the output &lt;key, value&gt; to one or more Vespa endpoints using vespa-feed-client.
*
- * @author lesters
+ * @author bjorncs
*/
-@SuppressWarnings("rawtypes")
-public class VespaRecordWriter extends RecordWriter {
+public class VespaRecordWriter extends RecordWriter<Object, Object> {
private final static Logger log = Logger.getLogger(VespaRecordWriter.class.getCanonicalName());
- private boolean initialized = false;
- private FeedClient feedClient;
private final VespaCounters counters;
- private final int progressInterval;
+ private final VespaConfiguration config;
- final VespaConfiguration configuration;
+ private boolean initialized = false;
+ private JsonFeeder feeder;
- VespaRecordWriter(VespaConfiguration configuration, VespaCounters counters) {
+ protected VespaRecordWriter(VespaConfiguration config, VespaCounters counters) {
this.counters = counters;
- this.configuration = configuration;
- this.progressInterval = configuration.progressInterval();
+ this.config = config;
}
-
@Override
- public void write(Object key, Object data) throws IOException, InterruptedException {
- if (!initialized) {
- initialize();
- }
-
- String doc = data.toString().trim();
-
- // Parse data to find document id - if none found, skip this write
- String docId = DataFormat.JSON_UTF8.equals(configuration.dataFormat()) ? findDocId(doc)
- : findDocIdFromXml(doc);
- if (docId != null && docId.length() >= 0) {
- feedClient.stream(docId, doc);
- counters.incrementDocumentsSent(1);
- } else {
- counters.incrementDocumentsSkipped(1);
- }
-
- if (counters.getDocumentsSent() % progressInterval == 0) {
+ public void write(Object key, Object data) throws IOException {
+ initializeOnFirstWrite();
+ String json = data.toString().trim();
+ feeder.feedSingle(json)
+ .whenComplete((result, error) -> {
+ if (error != null) {
+ if (error instanceof JsonParseException) {
+ counters.incrementDocumentsSkipped(1);
+ } else {
+ String msg = "Failed to feed single document: " + error;
+ System.out.println(msg);
+ System.err.println(msg);
+ log.warning(msg);
+ counters.incrementDocumentsFailed(1);
+ }
+ } else {
+ counters.incrementDocumentsOk(1);
+ }
+ });
+ counters.incrementDocumentsSent(1);
+ if (counters.getDocumentsSent() % config.progressInterval() == 0) {
String progress = String.format("Feed progress: %d / %d / %d / %d (sent, ok, failed, skipped)",
counters.getDocumentsSent(),
counters.getDocumentsOk(),
@@ -85,151 +76,115 @@ public class VespaRecordWriter extends RecordWriter {
counters.getDocumentsSkipped());
log.info(progress);
}
-
}
-
@Override
- public void close(TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException {
- if (feedClient != null) {
- feedClient.close();
+ public void close(TaskAttemptContext context) throws IOException {
+ if (feeder != null) {
+ feeder.close();
+ feeder = null;
+ initialized = false;
}
}
- protected ConnectionParams.Builder configureConnectionParams() {
- ConnectionParams.Builder connParamsBuilder = new ConnectionParams.Builder();
- connParamsBuilder.setDryRun(configuration.dryrun());
- connParamsBuilder.setUseCompression(configuration.useCompression());
- connParamsBuilder.setNumPersistentConnectionsPerEndpoint(configuration.numConnections());
- connParamsBuilder.setMaxRetries(configuration.numRetries());
- if (configuration.proxyHost() != null) {
- connParamsBuilder.setProxyHost(configuration.proxyHost());
- }
- if (configuration.proxyPort() >= 0) {
- connParamsBuilder.setProxyPort(configuration.proxyPort());
- }
- return connParamsBuilder;
- }
+ /** Override method to alter {@link FeedClient} configuration */
+ protected void onFeedClientInitialization(FeedClientBuilder builder) {}
- protected FeedParams.Builder configureFeedParams() {
- FeedParams.Builder feedParamsBuilder = new FeedParams.Builder();
- feedParamsBuilder.setDataFormat(configuration.dataFormat());
- feedParamsBuilder.setRoute(configuration.route());
- feedParamsBuilder.setMaxSleepTimeMs(configuration.maxSleepTimeMs());
- feedParamsBuilder.setMaxInFlightRequests(configuration.maxInFlightRequests());
- feedParamsBuilder.setLocalQueueTimeOut(Duration.ofMinutes(10).toMillis());
- return feedParamsBuilder;
+ private void initializeOnFirstWrite() {
+ if (initialized) return;
+ validateConfig();
+ useRandomizedStartupDelayIfEnabled();
+ feeder = createJsonStreamFeeder();
+ initialized = true;
}
- protected SessionParams.Builder configureSessionParams() {
- SessionParams.Builder sessionParamsBuilder = new SessionParams.Builder();
- sessionParamsBuilder.setThrottlerMinSize(configuration.throttlerMinSize());
- sessionParamsBuilder.setClientQueueSize(configuration.maxInFlightRequests()*2);
- return sessionParamsBuilder;
+ private void validateConfig() {
+ if (!config.useSSL()) {
+ throw new IllegalArgumentException("SSL is required for this feed client implementation");
+ }
+ if (config.dataFormat() != FeedParams.DataFormat.JSON_UTF8) {
+ throw new IllegalArgumentException("Only JSON is support by this feed client implementation");
+ }
+ if (config.proxyHost() != null) {
+ log.warning(String.format("Ignoring proxy config (host='%s', port=%d)", config.proxyHost(), config.proxyPort()));
+ }
}
-
- private void initialize() {
- if (!configuration.dryrun() && configuration.randomStartupSleepMs() > 0) {
- int delay = ThreadLocalRandom.current().nextInt(configuration.randomStartupSleepMs());
- log.info("VespaStorage: Delaying startup by " + delay + " ms");
+
+ private void useRandomizedStartupDelayIfEnabled() {
+ if (!config.dryrun() && config.randomStartupSleepMs() > 0) {
+ int delay = ThreadLocalRandom.current().nextInt(config.randomStartupSleepMs());
+ log.info("Delaying startup by " + delay + " ms");
try {
Thread.sleep(delay);
} catch (Exception e) {}
}
+ }
- ConnectionParams.Builder connParamsBuilder = configureConnectionParams();
- FeedParams.Builder feedParamsBuilder = configureFeedParams();
- SessionParams.Builder sessionParams = configureSessionParams();
-
- sessionParams.setConnectionParams(connParamsBuilder.build());
- sessionParams.setFeedParams(feedParamsBuilder.build());
- String endpoints = configuration.endpoint();
- StringTokenizer tokenizer = new StringTokenizer(endpoints, ",");
- while (tokenizer.hasMoreTokens()) {
- String endpoint = tokenizer.nextToken().trim();
- sessionParams.addCluster(new Cluster.Builder().addEndpoint(
- Endpoint.create(endpoint, configuration.defaultPort(), configuration.useSSL())
- ).build());
+ private JsonFeeder createJsonStreamFeeder() {
+ FeedClient feedClient = createFeedClient();
+ JsonFeeder.Builder builder = JsonFeeder.builder(feedClient)
+ .withTimeout(Duration.ofMinutes(10));
+ if (config.route() != null) {
+ builder.withRoute(config.route());
}
+ return builder.build();
- ResultCallback resultCallback = new ResultCallback(counters);
- feedClient = FeedClientFactory.create(sessionParams.build(), resultCallback);
-
- initialized = true;
- log.info("VespaStorage configuration:\n" + configuration.toString());
- log.info(feedClient.getStatsAsJson());
}
- private String findDocIdFromXml(String xml) {
- try {
- XMLEventReader eventReader = XMLInputFactory.newInstance().createXMLEventReader(new StringReader(xml));
- while (eventReader.hasNext()) {
- XMLEvent event = eventReader.nextEvent();
- if (event.getEventType() == XMLEvent.START_ELEMENT) {
- StartElement element = event.asStartElement();
- String elementName = element.getName().getLocalPart();
- if (VespaDocumentOperation.Operation.valid(elementName)) {
- return element.getAttributeByName(QName.valueOf("documentid")).getValue();
- }
- }
- }
- } catch (XMLStreamException | FactoryConfigurationError e) {
- // as json dude does
- return null;
+ private FeedClient createFeedClient() {
+ if (config.dryrun()) {
+ return new DryrunClient();
+ } else {
+ FeedClientBuilder feedClientBuilder = FeedClientBuilder.create(endpointUris(config))
+ .setConnectionsPerEndpoint(config.numConnections())
+ .setMaxStreamPerConnection(streamsPerConnection(config))
+ .setRetryStrategy(retryStrategy(config));
+
+ onFeedClientInitialization(feedClientBuilder);
+ return feedClientBuilder.build();
}
- return null;
}
-
- private String findDocId(String json) throws IOException {
- JsonFactory factory = new JsonFactory();
- try(JsonParser parser = factory.createParser(json)) {
- if (parser.nextToken() != JsonToken.START_OBJECT) {
- return null;
- }
- while (parser.nextToken() != JsonToken.END_OBJECT) {
- String fieldName = parser.getCurrentName();
- parser.nextToken();
- if (VespaDocumentOperation.Operation.valid(fieldName)) {
- String docId = parser.getText();
- return docId;
- } else {
- parser.skipChildren();
- }
- }
- } catch (JsonParseException ex) {
- return null;
- }
- return null;
+
+ private static FeedClient.RetryStrategy retryStrategy(VespaConfiguration config) {
+ int maxRetries = config.numRetries();
+ return new FeedClient.RetryStrategy() {
+ @Override public int retries() { return maxRetries; }
+ };
}
+ private static int streamsPerConnection(VespaConfiguration config) {
+ return Math.min(256, config.maxInFlightRequests() / config.numConnections());
+ }
+
+ private static List<URI> endpointUris(VespaConfiguration config) {
+ return Arrays.stream(config.endpoint().split(","))
+ .map(hostname -> URI.create(String.format("https://%s:%d/", hostname, config.defaultPort())))
+ .collect(toList());
+ }
- static class ResultCallback implements FeedClient.ResultCallback {
- final VespaCounters counters;
+ private static class DryrunClient implements FeedClient {
- public ResultCallback(VespaCounters counters) {
- this.counters = counters;
+ @Override
+ public CompletableFuture<Result> put(DocumentId documentId, String documentJson, OperationParameters params) {
+ return createSuccessResult(documentId);
}
@Override
- public void onCompletion(String docId, Result documentResult) {
- if (!documentResult.isSuccess()) {
- counters.incrementDocumentsFailed(1);
- StringBuilder sb = new StringBuilder();
- sb.append("Problems with docid ");
- sb.append(docId);
- sb.append(": ");
- List<Result.Detail> details = documentResult.getDetails();
- for (Result.Detail detail : details) {
- sb.append(detail.toString());
- sb.append(" ");
- }
- log.warning(sb.toString());
- return;
- }
- counters.incrementDocumentsOk(1);
+ public CompletableFuture<Result> update(DocumentId documentId, String updateJson, OperationParameters params) {
+ return createSuccessResult(documentId);
}
- }
+ @Override
+ public CompletableFuture<Result> remove(DocumentId documentId, OperationParameters params) {
+ return createSuccessResult(documentId);
+ }
+
+ @Override public OperationStats stats() { return null; }
+ @Override public void close(boolean graceful) {}
+ private static CompletableFuture<Result> createSuccessResult(DocumentId documentId) {
+ return CompletableFuture.completedFuture(DryrunResult.create(Result.Type.success, documentId, "ok", null));
+ }
+ }
}
diff --git a/vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/util/VespaConfiguration.java b/vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/util/VespaConfiguration.java
index 2a1179dbec6..7219e621486 100644
--- a/vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/util/VespaConfiguration.java
+++ b/vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/util/VespaConfiguration.java
@@ -27,6 +27,7 @@ public class VespaConfiguration {
public static final String MAX_IN_FLIGHT_REQUESTS = "vespa.feed.max.in.flight.requests";
public static final String RANDOM_STARTUP_SLEEP = "vespa.feed.random.startup.sleep.ms";
public static final String NUM_RETRIES = "vespa.feed.num.retries";
+ public static final String USE_LEGACY_CLIENT = "vespa.feed.uselegacyclient";
private final Configuration conf;
private final Properties override;
@@ -130,6 +131,7 @@ public class VespaConfiguration {
return getInt(PROGRESS_REPORT, 1000);
}
+ public boolean useLegacyClient() { return getBoolean(USE_LEGACY_CLIENT, true); }
public String getString(String name) {
if (override != null && override.containsKey(name)) {
diff --git a/vespa-hadoop/src/test/java/com/yahoo/vespa/hadoop/pig/MapReduceTest.java b/vespa-hadoop/src/test/java/com/yahoo/vespa/hadoop/pig/MapReduceTest.java
index ebb34dbc1b1..fa7965acbc1 100644
--- a/vespa-hadoop/src/test/java/com/yahoo/vespa/hadoop/pig/MapReduceTest.java
+++ b/vespa-hadoop/src/test/java/com/yahoo/vespa/hadoop/pig/MapReduceTest.java
@@ -4,10 +4,9 @@ package com.yahoo.vespa.hadoop.pig;
import com.fasterxml.jackson.core.JsonEncoding;
import com.fasterxml.jackson.core.JsonFactory;
import com.fasterxml.jackson.core.JsonGenerator;
+import com.yahoo.vespa.hadoop.mapreduce.VespaOutputFormat;
import com.yahoo.vespa.hadoop.mapreduce.util.VespaConfiguration;
import com.yahoo.vespa.hadoop.mapreduce.util.VespaCounters;
-import com.yahoo.vespa.hadoop.mapreduce.VespaOutputFormat;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
@@ -17,22 +16,25 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.test.PathUtils;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import java.io.*;
-import java.util.*;
+import java.io.BufferedInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.StringTokenizer;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
public class MapReduceTest {
@@ -44,7 +46,7 @@ public class MapReduceTest {
protected static Path metricsJsonPath;
protected static Path metricsCsvPath;
- @BeforeClass
+ @BeforeAll
public static void setUp() throws IOException {
hdfsBaseDir = new File(PathUtils.getTestDir(MapReduceTest.class).getCanonicalPath());
@@ -62,7 +64,7 @@ public class MapReduceTest {
copyToHdfs("src/test/resources/tabular_data.csv", metricsCsvPath, "data");
}
- @AfterClass
+ @AfterAll
public static void tearDown() throws IOException {
Path testDir = new Path(hdfsBaseDir.getParent());
hdfs.delete(testDir, true);
@@ -82,7 +84,7 @@ public class MapReduceTest {
FileInputFormat.setInputPaths(job, metricsJsonPath);
boolean success = job.waitForCompletion(true);
- assertTrue("Job Failed", success);
+ assertTrue(success, "Job Failed");
VespaCounters counters = VespaCounters.get(job);
assertEquals(10, counters.getDocumentsSent());
@@ -103,7 +105,7 @@ public class MapReduceTest {
FileInputFormat.setInputPaths(job, metricsJsonPath);
boolean success = job.waitForCompletion(true);
- assertTrue("Job Failed", success);
+ assertTrue(success, "Job Failed");
VespaCounters counters = VespaCounters.get(job);
assertEquals(10, counters.getDocumentsSent());
@@ -125,7 +127,7 @@ public class MapReduceTest {
FileInputFormat.setInputPaths(job, metricsCsvPath);
boolean success = job.waitForCompletion(true);
- assertTrue("Job Failed", success);
+ assertTrue(success, "Job Failed");
VespaCounters counters = VespaCounters.get(job);
assertEquals(10, counters.getDocumentsSent());
diff --git a/vespa-hadoop/src/test/java/com/yahoo/vespa/hadoop/pig/VespaDocumentOperationTest.java b/vespa-hadoop/src/test/java/com/yahoo/vespa/hadoop/pig/VespaDocumentOperationTest.java
index bafeb593e4f..db2fab9b05e 100644
--- a/vespa-hadoop/src/test/java/com/yahoo/vespa/hadoop/pig/VespaDocumentOperationTest.java
+++ b/vespa-hadoop/src/test/java/com/yahoo/vespa/hadoop/pig/VespaDocumentOperationTest.java
@@ -12,9 +12,9 @@ import org.apache.pig.impl.logicalLayer.FrontendException;
import org.apache.pig.impl.logicalLayer.schema.Schema;
import org.codehaus.jackson.JsonNode;
import org.codehaus.jackson.map.ObjectMapper;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
@@ -23,22 +23,22 @@ import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
@SuppressWarnings("serial")
public class VespaDocumentOperationTest {
private final ByteArrayOutputStream outContent = new ByteArrayOutputStream();
private final PrintStream originalOut = System.out;
- @Before
+ @BeforeEach
public void setUpStreams() {
System.setOut(new PrintStream(outContent));
}
- @After
+ @AfterEach
public void restoreStreams() {
System.setOut(originalOut);
}
diff --git a/vespa-hadoop/src/test/java/com/yahoo/vespa/hadoop/pig/VespaQueryTest.java b/vespa-hadoop/src/test/java/com/yahoo/vespa/hadoop/pig/VespaQueryTest.java
index 2d55017b13e..b0e2dd32c04 100644
--- a/vespa-hadoop/src/test/java/com/yahoo/vespa/hadoop/pig/VespaQueryTest.java
+++ b/vespa-hadoop/src/test/java/com/yahoo/vespa/hadoop/pig/VespaQueryTest.java
@@ -8,12 +8,16 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.pig.ExecType;
import org.apache.pig.PigServer;
import org.apache.pig.data.Tuple;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import java.net.InetSocketAddress;
-import java.util.*;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
public class VespaQueryTest {
diff --git a/vespa-hadoop/src/test/java/com/yahoo/vespa/hadoop/pig/VespaStorageTest.java b/vespa-hadoop/src/test/java/com/yahoo/vespa/hadoop/pig/VespaStorageTest.java
index 7ca401a0cc8..3565db37126 100644
--- a/vespa-hadoop/src/test/java/com/yahoo/vespa/hadoop/pig/VespaStorageTest.java
+++ b/vespa-hadoop/src/test/java/com/yahoo/vespa/hadoop/pig/VespaStorageTest.java
@@ -1,14 +1,8 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hadoop.pig;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.function.Supplier;
-
+import com.yahoo.vespa.hadoop.mapreduce.util.VespaConfiguration;
+import com.yahoo.vespa.hadoop.mapreduce.util.VespaCounters;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.mapred.Counters;
@@ -18,11 +12,14 @@ import org.apache.pig.backend.executionengine.ExecJob;
import org.apache.pig.tools.pigstats.JobStats;
import org.apache.pig.tools.pigstats.PigStats;
import org.apache.pig.tools.pigstats.mapreduce.MRJobStats;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
-import com.yahoo.vespa.hadoop.mapreduce.util.VespaConfiguration;
-import com.yahoo.vespa.hadoop.mapreduce.util.VespaCounters;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
public class VespaStorageTest {
@@ -51,6 +48,13 @@ public class VespaStorageTest {
assertAllDocumentsOk("src/test/pig/feed_operations_with_json_loader.pig");
}
+ @Test
+ public void requireThatPremadeOperationsWithJsonLoaderFeedAndNonLegacyClientSucceeds() throws Exception {
+ Configuration conf = new HdfsConfiguration();
+ conf.set(VespaConfiguration.USE_SSL, Boolean.TRUE.toString());
+ conf.set(VespaConfiguration.USE_LEGACY_CLIENT, Boolean.FALSE.toString());
+ assertAllDocumentsOk("src/test/pig/feed_operations_with_json_loader.pig", conf);
+ }
@Test
public void requireThatCreateOperationsFeedSucceeds() throws Exception {
diff --git a/vespa-hadoop/src/test/java/com/yahoo/vespa/hadoop/util/TupleToolsTest.java b/vespa-hadoop/src/test/java/com/yahoo/vespa/hadoop/util/TupleToolsTest.java
index 27080a8b2af..93e6a0abfdd 100644
--- a/vespa-hadoop/src/test/java/com/yahoo/vespa/hadoop/util/TupleToolsTest.java
+++ b/vespa-hadoop/src/test/java/com/yahoo/vespa/hadoop/util/TupleToolsTest.java
@@ -6,11 +6,11 @@ import org.apache.pig.data.DataType;
import org.apache.pig.data.Tuple;
import org.apache.pig.data.TupleFactory;
import org.apache.pig.impl.logicalLayer.schema.Schema;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import java.io.IOException;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
public class TupleToolsTest {
diff --git a/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/DocumentV1ApiHandler.java b/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/DocumentV1ApiHandler.java
index 58857d1d8e6..9db296e33cd 100644
--- a/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/DocumentV1ApiHandler.java
+++ b/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/DocumentV1ApiHandler.java
@@ -122,7 +122,7 @@ import static java.util.stream.Collectors.toUnmodifiableMap;
*/
public class DocumentV1ApiHandler extends AbstractRequestHandler {
- private static final Duration defaultTimeout = Duration.ofSeconds(175);
+ private static final Duration defaultTimeout = Duration.ofSeconds(180); // Match document API default timeout.
private static final Logger log = Logger.getLogger(DocumentV1ApiHandler.class.getName());
private static final Parser<Integer> integerParser = Integer::parseInt;
@@ -160,6 +160,7 @@ public class DocumentV1ApiHandler extends AbstractRequestHandler {
private static final String TRACELEVEL = "tracelevel";
private final Clock clock;
+ private final Duration handlerTimeout;
private final Metric metric;
private final DocumentApiMetrics metrics;
private final DocumentOperationParser parser;
@@ -184,14 +185,15 @@ public class DocumentV1ApiHandler extends AbstractRequestHandler {
ClusterListConfig clusterListConfig,
AllClustersBucketSpacesConfig bucketSpacesConfig,
DocumentOperationExecutorConfig executorConfig) {
- this(Clock.systemUTC(), metric, metricReceiver, documentAccess,
+ this(Clock.systemUTC(), Duration.ofSeconds(5), metric, metricReceiver, documentAccess,
documentManagerConfig, executorConfig, clusterListConfig, bucketSpacesConfig);
}
- DocumentV1ApiHandler(Clock clock, Metric metric, MetricReceiver metricReceiver, DocumentAccess access,
+ DocumentV1ApiHandler(Clock clock, Duration handlerTimeout, Metric metric, MetricReceiver metricReceiver, DocumentAccess access,
DocumentmanagerConfig documentmanagerConfig, DocumentOperationExecutorConfig executorConfig,
ClusterListConfig clusterListConfig, AllClustersBucketSpacesConfig bucketSpacesConfig) {
this.clock = clock;
+ this.handlerTimeout = handlerTimeout;
this.parser = new DocumentOperationParser(documentmanagerConfig);
this.metric = metric;
this.metrics = new DocumentApiMetrics(metricReceiver, "documentV1");
@@ -222,8 +224,9 @@ public class DocumentV1ApiHandler extends AbstractRequestHandler {
HttpRequest request = (HttpRequest) rawRequest;
try {
- request.setTimeout(getProperty(request, TIMEOUT, timeoutMillisParser)
- .orElse(defaultTimeout.toMillis()),
+ // Set a higher HTTP layer timeout than the document API timeout, to prefer triggering the latter.
+ request.setTimeout( getProperty(request, TIMEOUT, timeoutMillisParser).orElse(defaultTimeout.toMillis())
+ + handlerTimeout.toMillis(),
TimeUnit.MILLISECONDS);
Path requestPath = new Path(request.getUri());
@@ -251,7 +254,7 @@ public class DocumentV1ApiHandler extends AbstractRequestHandler {
@Override
public void handleTimeout(Request request, ResponseHandler responseHandler) {
- timeout((HttpRequest) request, "Request timeout after " + request.getTimeout(TimeUnit.MILLISECONDS) + "ms", responseHandler);
+ timeout((HttpRequest) request, "Timeout after " + (request.getTimeout(TimeUnit.MILLISECONDS) - handlerTimeout.toMillis()) + "ms", responseHandler);
}
@Override
@@ -743,11 +746,18 @@ public class DocumentV1ApiHandler extends AbstractRequestHandler {
private static void serverError(HttpRequest request, Throwable t, ResponseHandler handler) {
loggingException(() -> {
- log.log(WARNING, "Uncaught exception handling request " + request.getMethod() + " " + request.getUri().getRawPath() + ":", t);
+ log.log(WARNING, "Uncaught exception handling request " + request.getMethod() + " " + request.getUri().getRawPath(), t);
JsonResponse.create(request, Exceptions.toMessageString(t), handler).respond(Response.Status.INTERNAL_SERVER_ERROR);
});
}
+ private static void badGateway(HttpRequest request, Throwable t, ResponseHandler handler) {
+ loggingException(() -> {
+ log.log(FINE, t, () -> "Document access error handling request " + request.getMethod() + " " + request.getUri().getRawPath());
+ JsonResponse.create(request, Exceptions.toMessageString(t), handler).respond(Response.Status.BAD_GATEWAY);
+ });
+ }
+
private static void timeout(HttpRequest request, String message, ResponseHandler handler) {
loggingException(() -> {
log.log(FINE, () -> "Timeout handling request " + request.getMethod() + " " + request.getUri().getRawPath() + ": " + message);
@@ -803,6 +813,9 @@ public class DocumentV1ApiHandler extends AbstractRequestHandler {
catch (IllegalArgumentException e) {
badRequest(request, e, handler);
}
+ catch (DispatchException e) {
+ badGateway(request, e, handler);
+ }
catch (RuntimeException e) {
serverError(request, e, handler);
}
@@ -821,12 +834,16 @@ public class DocumentV1ApiHandler extends AbstractRequestHandler {
return false;
if (result.type() == Result.ResultType.FATAL_ERROR)
- throw new RuntimeException(result.getError());
+ throw new DispatchException(result.getError());
outstanding.incrementAndGet();
return true;
}
+ private static class DispatchException extends RuntimeException {
+ private DispatchException(Throwable cause) { super(cause); }
+ }
+
/** Readable content channel which forwards data to a reader when closed. */
static class ForwardingContentChannel implements ContentChannel {
@@ -923,7 +940,7 @@ public class DocumentV1ApiHandler extends AbstractRequestHandler {
log.log(WARNING, "Unexpected document API operation outcome '" + response.outcome() + "'");
case ERROR:
log.log(FINE, () -> "Exception performing document operation: " + response.getTextMessage());
- jsonResponse.commit(Response.Status.INTERNAL_SERVER_ERROR);
+ jsonResponse.commit(Response.Status.BAD_GATEWAY);
}
}
}
@@ -956,7 +973,7 @@ public class DocumentV1ApiHandler extends AbstractRequestHandler {
parameters.setMaxTotalHits(wantedDocumentCount);
parameters.setThrottlePolicy(new StaticThrottlePolicy().setMaxPendingCount(concurrency));
parameters.visitInconsistentBuckets(true);
- parameters.setSessionTimeoutMs(Math.max(1, request.getTimeout(TimeUnit.MILLISECONDS) - 5000));
+ parameters.setSessionTimeoutMs(Math.max(1, request.getTimeout(TimeUnit.MILLISECONDS) - handlerTimeout.toMillis()));
return parameters;
}
@@ -966,7 +983,7 @@ public class DocumentV1ApiHandler extends AbstractRequestHandler {
VisitorParameters parameters = parseCommonParameters(request, path, Optional.of(requireProperty(request, CLUSTER)));
parameters.setThrottlePolicy(new DynamicThrottlePolicy().setMinWindowSize(1).setWindowSizeIncrement(1));
long timeChunk = getProperty(request, TIME_CHUNK, timeoutMillisParser).orElse(60_000L);
- parameters.setSessionTimeoutMs(Math.max(1, Math.min(timeChunk, request.getTimeout(TimeUnit.MILLISECONDS) - 5000L)));
+ parameters.setSessionTimeoutMs(Math.max(1, Math.min(timeChunk, request.getTimeout(TimeUnit.MILLISECONDS) - handlerTimeout.toMillis())));
return parameters;
}
@@ -1118,7 +1135,7 @@ public class DocumentV1ApiHandler extends AbstractRequestHandler {
if (getVisitorStatistics() != null)
response.writeDocumentCount(getVisitorStatistics().getDocumentsReturned());
- response.respond(Response.Status.INTERNAL_SERVER_ERROR);
+ response.respond(Response.Status.BAD_GATEWAY);
}
});
visitDispatcher.execute(() -> {
diff --git a/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/DocumentV1ApiTest.java b/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/DocumentV1ApiTest.java
index 6f1b0466350..29ae7f52265 100644
--- a/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/DocumentV1ApiTest.java
+++ b/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/DocumentV1ApiTest.java
@@ -60,17 +60,20 @@ import org.junit.Test;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.UncheckedIOException;
+import java.time.Duration;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.TreeMap;
+import java.util.concurrent.BlockingQueue;
import java.util.concurrent.CopyOnWriteArraySet;
import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executors;
-import java.util.concurrent.Phaser;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.BiFunction;
import java.util.function.Consumer;
@@ -83,6 +86,7 @@ import static com.yahoo.jdisc.http.HttpRequest.Method.POST;
import static com.yahoo.jdisc.http.HttpRequest.Method.PUT;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
@@ -130,7 +134,7 @@ public class DocumentV1ApiTest {
access = new MockDocumentAccess(docConfig);
metric = new NullMetric();
metrics = new MetricReceiver.MockReceiver();
- handler = new DocumentV1ApiHandler(clock, metric, metrics, access, docConfig, executorConfig, clusterConfig, bucketConfig);
+ handler = new DocumentV1ApiHandler(clock, Duration.ofMillis(1), metric, metrics, access, docConfig, executorConfig, clusterConfig, bucketConfig);
}
@After
@@ -176,7 +180,7 @@ public class DocumentV1ApiTest {
}
@Test
- public void testResponses() {
+ public void testResponses() throws InterruptedException {
RequestHandlerTestDriver driver = new RequestHandlerTestDriver(handler);
List<AckToken> tokens = List.of(new AckToken(null), new AckToken(null), new AckToken(null));
// GET at non-existent path returns 404 with available paths
@@ -204,7 +208,7 @@ public class DocumentV1ApiTest {
assertEquals(100, ((StaticThrottlePolicy) parameters.getThrottlePolicy()).getMaxPendingCount());
assertEquals("[id]", parameters.getFieldSet());
assertEquals("(all the things)", parameters.getDocumentSelection());
- assertEquals(1000, parameters.getSessionTimeoutMs());
+ assertEquals(6000, parameters.getSessionTimeoutMs());
// Put some documents in the response
parameters.getLocalDataHandler().onMessage(new PutDocumentMessage(new DocumentPut(doc1)), tokens.get(0));
parameters.getLocalDataHandler().onMessage(new PutDocumentMessage(new DocumentPut(doc2)), tokens.get(1));
@@ -269,7 +273,7 @@ public class DocumentV1ApiTest {
access.expect(parameters -> {
assertEquals("[Content:cluster=content]", parameters.getRemoteDataHandler());
assertEquals("[all]", parameters.fieldSet());
- assertEquals(55_000L, parameters.getSessionTimeoutMs());
+ assertEquals(60_000L, parameters.getSessionTimeoutMs());
parameters.getControlHandler().onDone(VisitorControlHandler.CompletionCode.SUCCESS, "We made it!");
});
response = driver.sendRequest("http://localhost/document/v1/space/music/docid?destinationCluster=content&selection=true&cluster=content&timeout=60", POST);
@@ -330,6 +334,7 @@ public class DocumentV1ApiTest {
assertEquals(400, response.getStatus());
// DELETE with namespace and document type is a restricted visit which deletes visited documents.
+ // When visiting fails fatally, a 502 BAD GATEWAY is returned.
access.expect(tokens.subList(0, 1));
access.expect(parameters -> {
assertEquals("(false) and (music) and (id.namespace=='space')", parameters.getDocumentSelection());
@@ -351,7 +356,7 @@ public class DocumentV1ApiTest {
" \"pathId\": \"/document/v1/space/music/docid\"," +
" \"message\": \"boom\"" +
"}", response.readAll());
- assertEquals(500, response.getStatus());
+ assertEquals(502, response.getStatus());
// DELETE at the root is also a deletion visit. These also require a selection.
access.expect(parameters -> {
@@ -386,7 +391,7 @@ public class DocumentV1ApiTest {
" \"documents\": []," +
" \"message\": \"error\"" +
"}", response.readAll());
- assertEquals(500, response.getStatus());
+ assertEquals(502, response.getStatus());
// GET with namespace, document type and number is a restricted visit.
access.expect(parameters -> {
@@ -649,12 +654,12 @@ public class DocumentV1ApiTest {
" \"pathId\": \"/document/v1/space/music/number/1/two\"," +
" \"message\": \"error\"" +
"}", response1.readAll());
- assertEquals(500, response1.getStatus());
+ assertEquals(502, response1.getStatus());
assertSameJson("{" +
" \"pathId\": \"/document/v1/space/music/number/1/two\"," +
" \"message\": \"error\"" +
"}", response2.readAll());
- assertEquals(500, response2.getStatus());
+ assertEquals(502, response2.getStatus());
// Request response does not arrive before timeout has passed.
AtomicReference<ResponseHandler> handler = new AtomicReference<>();
@@ -662,15 +667,89 @@ public class DocumentV1ApiTest {
handler.set(parameters.responseHandler().get());
return new Result(Result.ResultType.SUCCESS, null);
});
- var response4 = driver.sendRequest("http://localhost/document/v1/space/music/docid/one?timeout=1ms");
- assertSameJson("{" +
- " \"pathId\": \"/document/v1/space/music/docid/one\"," +
- " \"message\": \"Request timeout after 1ms\"" +
- "}", response4.readAll());
- assertEquals(504, response4.getStatus());
- if (handler.get() != null) // Timeout may have occurred before dispatch, or ...
- handler.get().handleResponse(new Response(0)); // response may eventually arrive, but too late.
+ try {
+ var response4 = driver.sendRequest("http://localhost/document/v1/space/music/docid/one?timeout=1ms");
+ assertSameJson("{" +
+ " \"pathId\": \"/document/v1/space/music/docid/one\"," +
+ " \"message\": \"Timeout after 1ms\"" +
+ "}", response4.readAll());
+ assertEquals(504, response4.getStatus());
+ }
+ finally {
+ if (handler.get() != null) // Timeout may have occurred before dispatch, or ...
+ handler.get().handleResponse(new Response(0)); // response may eventually arrive, but too late.
+ }
+
+ driver.close();
+ }
+
+ @Test
+ public void testThroughput() throws InterruptedException {
+ DocumentOperationExecutorConfig executorConfig = new DocumentOperationExecutorConfig.Builder().build();
+ handler = new DocumentV1ApiHandler(clock, Duration.ofMillis(1), metric, metrics, access, docConfig, executorConfig, clusterConfig, bucketConfig);
+
+ int writers = 4;
+ int queueFill = executorConfig.maxThrottled() - writers;
+ RequestHandlerTestDriver driver = new RequestHandlerTestDriver(handler);
+ ScheduledExecutorService writer = Executors.newScheduledThreadPool(writers);
+ ScheduledExecutorService reader = Executors.newScheduledThreadPool(1);
+ ScheduledExecutorService replier = Executors.newScheduledThreadPool(writers);
+ BlockingQueue<RequestHandlerTestDriver.MockResponseHandler> responses = new LinkedBlockingQueue<>();
+
+ Response success = new Response(0, null, Response.Outcome.SUCCESS);
+ int docs = 1 << 14;
+ assertTrue(docs >= writers);
+ AtomicReference<com.yahoo.jdisc.Response> failed = new AtomicReference<>();
+
+ CountDownLatch latch = new CountDownLatch(docs);
+ reader.execute(() -> {
+ while ( ! reader.isShutdown()) {
+ try {
+ var response = responses.take();
+ response.awaitResponse().readAll();
+ if (response.getStatus() != 200)
+ failed.set(response.getResponse());
+ latch.countDown();
+ }
+ catch (InterruptedException e) { break; }
+ }
+ });
+
+ // Fill the handler resend queue.
+ long startNanos = System.nanoTime();
+ CountDownLatch setup = new CountDownLatch(queueFill);
+ access.session.expect((id, parameters) -> {
+ setup.countDown();
+ return new Result(Result.ResultType.TRANSIENT_ERROR, new Error());
+ });
+ for (int i = 0; i < queueFill; i++) {
+ int j = i;
+ writer.execute(() -> {
+ responses.add(driver.sendRequest("http://localhost/document/v1/ns/music/docid/" + j,
+ POST,
+ "{ \"fields\": { \"artist\": \"Sigrid\" } }"));
+ });
+ }
+ setup.await();
+
+ // Let "messagebus" start accepting messages.
+ access.session.expect((id, parameters) -> {
+ replier.schedule(() -> parameters.responseHandler().get().handleResponse(success), 10, TimeUnit.MILLISECONDS);
+ return new Result(0);
+ });
+ // Send the rest of the documents. Rely on resender to empty queue of throttled oppperations.
+ for (int i = queueFill; i < docs; i++) {
+ int j = i;
+ writer.execute(() -> {
+ responses.add(driver.sendRequest("http://localhost/document/v1/ns/music/docid/" + j,
+ POST,
+ "{ \"fields\": { \"artist\": \"Sigrid\" } }"));
+ });
+ }
+ latch.await();
+ System.err.println(docs + " requests in " + (System.nanoTime() - startNanos) * 1e-9 + " seconds");
+ assertNull(failed.get());
driver.close();
}
diff --git a/vespaclient-java/src/main/sh/vespa-visit.sh b/vespaclient-java/src/main/sh/vespa-visit.sh
index 92d6bc67f3f..e4fec2857fe 100755
--- a/vespaclient-java/src/main/sh/vespa-visit.sh
+++ b/vespaclient-java/src/main/sh/vespa-visit.sh
@@ -74,16 +74,25 @@ findhost
# END environment bootstrap section
+Xmx="-Xmx1024m"
+# Allow -Xmx to be specified in args
+for arg in "$@"; do
+ shift
+ case $arg in -Xmx*) Xmx=$arg ;;
+ *) set -- "$@" "$arg" ;;
+ esac
+done
+
if [ "${VESPA_LOG_LEVEL}" = "" ]; then
export VESPA_LOG_LEVEL=error,warning
fi
-export MALLOC_ARENA_MAX=1 #Does not need fast allocation
+export MALLOC_ARENA_MAX=1 # Does not need fast allocation
exec java \
-server -enableassertions \
-XX:ThreadStackSize=512 \
-XX:MaxJavaStackTraceDepth=1000000 \
-Djava.library.path=${VESPA_HOME}/libexec64/native:${VESPA_HOME}/lib64 \
-XX:MaxDirectMemorySize=32m -Djava.awt.headless=true \
--Xms128m -Xmx1024m $(getJavaOptionsIPV46) \
+-Xms128m $(getJavaOptionsIPV46) ${Xmx} \
-cp ${VESPA_HOME}/lib/jars/vespaclient-java-jar-with-dependencies.jar com.yahoo.vespavisit.VdsVisit "$@"
diff --git a/vespalib/src/tests/datastore/datastore/datastore_test.cpp b/vespalib/src/tests/datastore/datastore/datastore_test.cpp
index 11b4f5e6631..90281acb0d3 100644
--- a/vespalib/src/tests/datastore/datastore/datastore_test.cpp
+++ b/vespalib/src/tests/datastore/datastore/datastore_test.cpp
@@ -141,6 +141,38 @@ assertMemStats(const DataStoreBase::MemStats &exp,
EXPECT_EQ(exp._holdBuffers, act._holdBuffers);
}
+TEST(DataStoreTest, require_that_invalid_entry_ref_can_be_ordered) {
+ EntryRef inValid;
+ EntryRef a(1);
+ EXPECT_EQ(inValid, inValid);
+ EXPECT_EQ(a, a);
+ EXPECT_NE(inValid, a);
+ EXPECT_NE(a, inValid);
+ EXPECT_LT(inValid, a);
+ EXPECT_LE(inValid, a);
+}
+
+TEST(DataStoreTest, require_that_entry_ref_can_be_ordered) {
+ EntryRef a(1);
+ EntryRef b(2);
+ EntryRef c(3);
+ EXPECT_EQ(a, a);
+ EXPECT_EQ(b, b);
+ EXPECT_EQ(c, c);
+ EXPECT_NE(a, b);
+ EXPECT_NE(a, c);
+ EXPECT_NE(b, c);
+ EXPECT_LT(a, b);
+ EXPECT_LT(b, c);
+ EXPECT_LT(a, c);
+ EXPECT_LE(a, a);
+ EXPECT_LE(b, b);
+ EXPECT_LE(c, c);
+ EXPECT_LE(a, b);
+ EXPECT_LE(b, c);
+ EXPECT_LE(a, c);
+}
+
TEST(DataStoreTest, require_that_entry_ref_is_working)
{
using MyRefType = EntryRefT<22>;
@@ -643,6 +675,7 @@ TEST(DataStoreTest, control_static_sizes) {
EXPECT_EQ(0, bs.size());
}
+
}
GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/vespalib/src/vespa/vespalib/datastore/datastorebase.h b/vespalib/src/vespa/vespalib/datastore/datastorebase.h
index 13c242e85a7..5a50dba6a3c 100644
--- a/vespalib/src/vespa/vespalib/datastore/datastorebase.h
+++ b/vespalib/src/vespa/vespalib/datastore/datastorebase.h
@@ -27,11 +27,11 @@ public:
{
public:
EntryRef _ref;
- size_t _len; // Aligned length
+ size_t _len; // Aligned length
ElemHold1ListElem(EntryRef ref, size_t len)
- : _ref(ref),
- _len(len)
+ : _ref(ref),
+ _len(len)
{ }
};
diff --git a/vespalib/src/vespa/vespalib/datastore/entryref.h b/vespalib/src/vespa/vespalib/datastore/entryref.h
index 046d9089580..01f473fcf17 100644
--- a/vespalib/src/vespa/vespalib/datastore/entryref.h
+++ b/vespalib/src/vespa/vespalib/datastore/entryref.h
@@ -21,6 +21,7 @@ public:
bool operator==(const EntryRef &rhs) const noexcept { return _ref == rhs._ref; }
bool operator!=(const EntryRef &rhs) const noexcept { return _ref != rhs._ref; }
bool operator <(const EntryRef &rhs) const noexcept { return _ref < rhs._ref; }
+ bool operator <=(const EntryRef &rhs) const noexcept { return _ref <= rhs._ref; }
};
/**
diff --git a/vespalib/src/vespa/vespalib/net/tls/crypto_codec_adapter.h b/vespalib/src/vespa/vespalib/net/tls/crypto_codec_adapter.h
index 372a2191a88..11c87d3b7e9 100644
--- a/vespalib/src/vespa/vespalib/net/tls/crypto_codec_adapter.h
+++ b/vespalib/src/vespa/vespalib/net/tls/crypto_codec_adapter.h
@@ -33,7 +33,7 @@ private:
ssize_t flush_all(); // -1/0 -> error/ok
public:
CryptoCodecAdapter(SocketHandle socket, std::unique_ptr<CryptoCodec> codec)
- : _input(64_Ki), _output(64_Ki), _socket(std::move(socket)), _codec(std::move(codec)),
+ : _input(0), _output(0), _socket(std::move(socket)), _codec(std::move(codec)),
_got_tls_close(false), _encoded_tls_close(false) {}
void inject_read_data(const char *buf, size_t len) override;
int get_fd() const override { return _socket.get(); }
diff --git a/vespalib/src/vespa/vespalib/util/exception.h b/vespalib/src/vespa/vespalib/util/exception.h
index 097ecc131c7..6fb53c035eb 100644
--- a/vespalib/src/vespa/vespalib/util/exception.h
+++ b/vespalib/src/vespa/vespalib/util/exception.h
@@ -216,6 +216,9 @@ public:
/** @brief Returns the msg parameter that this Exception was constructed with */
const string &getMessage() const { return _msg; }
+ /** @brief Returns the message string */
+ const char *message() const { return _msg.c_str(); }
+
/** @brief Returns the location parameter that this Exception was constructed with */
const string &getLocation() const { return _location; }